##// END OF EJS Templates
sharesafe: add functionality to automatically downgrade shares...
Pulkit Goyal -
r46853:eec47efe default
parent child Browse files
Show More
@@ -1,2540 +1,2545 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'debug',
573 b'debug',
574 b'dirstate.delaywrite',
574 b'dirstate.delaywrite',
575 default=0,
575 default=0,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'defaults',
578 b'defaults',
579 b'.*',
579 b'.*',
580 default=None,
580 default=None,
581 generic=True,
581 generic=True,
582 )
582 )
583 coreconfigitem(
583 coreconfigitem(
584 b'devel',
584 b'devel',
585 b'all-warnings',
585 b'all-warnings',
586 default=False,
586 default=False,
587 )
587 )
588 coreconfigitem(
588 coreconfigitem(
589 b'devel',
589 b'devel',
590 b'bundle2.debug',
590 b'bundle2.debug',
591 default=False,
591 default=False,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'bundle.delta',
595 b'bundle.delta',
596 default=b'',
596 default=b'',
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'cache-vfs',
600 b'cache-vfs',
601 default=None,
601 default=None,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'check-locks',
605 b'check-locks',
606 default=False,
606 default=False,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'check-relroot',
610 b'check-relroot',
611 default=False,
611 default=False,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'default-date',
615 b'default-date',
616 default=None,
616 default=None,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'deprec-warn',
620 b'deprec-warn',
621 default=False,
621 default=False,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'devel',
624 b'devel',
625 b'disableloaddefaultcerts',
625 b'disableloaddefaultcerts',
626 default=False,
626 default=False,
627 )
627 )
628 coreconfigitem(
628 coreconfigitem(
629 b'devel',
629 b'devel',
630 b'warn-empty-changegroup',
630 b'warn-empty-changegroup',
631 default=False,
631 default=False,
632 )
632 )
633 coreconfigitem(
633 coreconfigitem(
634 b'devel',
634 b'devel',
635 b'legacy.exchange',
635 b'legacy.exchange',
636 default=list,
636 default=list,
637 )
637 )
638 coreconfigitem(
638 coreconfigitem(
639 b'devel',
639 b'devel',
640 b'persistent-nodemap',
640 b'persistent-nodemap',
641 default=False,
641 default=False,
642 )
642 )
643 coreconfigitem(
643 coreconfigitem(
644 b'devel',
644 b'devel',
645 b'servercafile',
645 b'servercafile',
646 default=b'',
646 default=b'',
647 )
647 )
648 coreconfigitem(
648 coreconfigitem(
649 b'devel',
649 b'devel',
650 b'serverexactprotocol',
650 b'serverexactprotocol',
651 default=b'',
651 default=b'',
652 )
652 )
653 coreconfigitem(
653 coreconfigitem(
654 b'devel',
654 b'devel',
655 b'serverrequirecert',
655 b'serverrequirecert',
656 default=False,
656 default=False,
657 )
657 )
658 coreconfigitem(
658 coreconfigitem(
659 b'devel',
659 b'devel',
660 b'strip-obsmarkers',
660 b'strip-obsmarkers',
661 default=True,
661 default=True,
662 )
662 )
663 coreconfigitem(
663 coreconfigitem(
664 b'devel',
664 b'devel',
665 b'warn-config',
665 b'warn-config',
666 default=None,
666 default=None,
667 )
667 )
668 coreconfigitem(
668 coreconfigitem(
669 b'devel',
669 b'devel',
670 b'warn-config-default',
670 b'warn-config-default',
671 default=None,
671 default=None,
672 )
672 )
673 coreconfigitem(
673 coreconfigitem(
674 b'devel',
674 b'devel',
675 b'user.obsmarker',
675 b'user.obsmarker',
676 default=None,
676 default=None,
677 )
677 )
678 coreconfigitem(
678 coreconfigitem(
679 b'devel',
679 b'devel',
680 b'warn-config-unknown',
680 b'warn-config-unknown',
681 default=None,
681 default=None,
682 )
682 )
683 coreconfigitem(
683 coreconfigitem(
684 b'devel',
684 b'devel',
685 b'debug.copies',
685 b'debug.copies',
686 default=False,
686 default=False,
687 )
687 )
688 coreconfigitem(
688 coreconfigitem(
689 b'devel',
689 b'devel',
690 b'debug.extensions',
690 b'debug.extensions',
691 default=False,
691 default=False,
692 )
692 )
693 coreconfigitem(
693 coreconfigitem(
694 b'devel',
694 b'devel',
695 b'debug.repo-filters',
695 b'debug.repo-filters',
696 default=False,
696 default=False,
697 )
697 )
698 coreconfigitem(
698 coreconfigitem(
699 b'devel',
699 b'devel',
700 b'debug.peer-request',
700 b'debug.peer-request',
701 default=False,
701 default=False,
702 )
702 )
703 coreconfigitem(
703 coreconfigitem(
704 b'devel',
704 b'devel',
705 b'discovery.randomize',
705 b'discovery.randomize',
706 default=True,
706 default=True,
707 )
707 )
708 _registerdiffopts(section=b'diff')
708 _registerdiffopts(section=b'diff')
709 coreconfigitem(
709 coreconfigitem(
710 b'email',
710 b'email',
711 b'bcc',
711 b'bcc',
712 default=None,
712 default=None,
713 )
713 )
714 coreconfigitem(
714 coreconfigitem(
715 b'email',
715 b'email',
716 b'cc',
716 b'cc',
717 default=None,
717 default=None,
718 )
718 )
719 coreconfigitem(
719 coreconfigitem(
720 b'email',
720 b'email',
721 b'charsets',
721 b'charsets',
722 default=list,
722 default=list,
723 )
723 )
724 coreconfigitem(
724 coreconfigitem(
725 b'email',
725 b'email',
726 b'from',
726 b'from',
727 default=None,
727 default=None,
728 )
728 )
729 coreconfigitem(
729 coreconfigitem(
730 b'email',
730 b'email',
731 b'method',
731 b'method',
732 default=b'smtp',
732 default=b'smtp',
733 )
733 )
734 coreconfigitem(
734 coreconfigitem(
735 b'email',
735 b'email',
736 b'reply-to',
736 b'reply-to',
737 default=None,
737 default=None,
738 )
738 )
739 coreconfigitem(
739 coreconfigitem(
740 b'email',
740 b'email',
741 b'to',
741 b'to',
742 default=None,
742 default=None,
743 )
743 )
744 coreconfigitem(
744 coreconfigitem(
745 b'experimental',
745 b'experimental',
746 b'archivemetatemplate',
746 b'archivemetatemplate',
747 default=dynamicdefault,
747 default=dynamicdefault,
748 )
748 )
749 coreconfigitem(
749 coreconfigitem(
750 b'experimental',
750 b'experimental',
751 b'auto-publish',
751 b'auto-publish',
752 default=b'publish',
752 default=b'publish',
753 )
753 )
754 coreconfigitem(
754 coreconfigitem(
755 b'experimental',
755 b'experimental',
756 b'bundle-phases',
756 b'bundle-phases',
757 default=False,
757 default=False,
758 )
758 )
759 coreconfigitem(
759 coreconfigitem(
760 b'experimental',
760 b'experimental',
761 b'bundle2-advertise',
761 b'bundle2-advertise',
762 default=True,
762 default=True,
763 )
763 )
764 coreconfigitem(
764 coreconfigitem(
765 b'experimental',
765 b'experimental',
766 b'bundle2-output-capture',
766 b'bundle2-output-capture',
767 default=False,
767 default=False,
768 )
768 )
769 coreconfigitem(
769 coreconfigitem(
770 b'experimental',
770 b'experimental',
771 b'bundle2.pushback',
771 b'bundle2.pushback',
772 default=False,
772 default=False,
773 )
773 )
774 coreconfigitem(
774 coreconfigitem(
775 b'experimental',
775 b'experimental',
776 b'bundle2lazylocking',
776 b'bundle2lazylocking',
777 default=False,
777 default=False,
778 )
778 )
779 coreconfigitem(
779 coreconfigitem(
780 b'experimental',
780 b'experimental',
781 b'bundlecomplevel',
781 b'bundlecomplevel',
782 default=None,
782 default=None,
783 )
783 )
784 coreconfigitem(
784 coreconfigitem(
785 b'experimental',
785 b'experimental',
786 b'bundlecomplevel.bzip2',
786 b'bundlecomplevel.bzip2',
787 default=None,
787 default=None,
788 )
788 )
789 coreconfigitem(
789 coreconfigitem(
790 b'experimental',
790 b'experimental',
791 b'bundlecomplevel.gzip',
791 b'bundlecomplevel.gzip',
792 default=None,
792 default=None,
793 )
793 )
794 coreconfigitem(
794 coreconfigitem(
795 b'experimental',
795 b'experimental',
796 b'bundlecomplevel.none',
796 b'bundlecomplevel.none',
797 default=None,
797 default=None,
798 )
798 )
799 coreconfigitem(
799 coreconfigitem(
800 b'experimental',
800 b'experimental',
801 b'bundlecomplevel.zstd',
801 b'bundlecomplevel.zstd',
802 default=None,
802 default=None,
803 )
803 )
804 coreconfigitem(
804 coreconfigitem(
805 b'experimental',
805 b'experimental',
806 b'changegroup3',
806 b'changegroup3',
807 default=False,
807 default=False,
808 )
808 )
809 coreconfigitem(
809 coreconfigitem(
810 b'experimental',
810 b'experimental',
811 b'cleanup-as-archived',
811 b'cleanup-as-archived',
812 default=False,
812 default=False,
813 )
813 )
814 coreconfigitem(
814 coreconfigitem(
815 b'experimental',
815 b'experimental',
816 b'clientcompressionengines',
816 b'clientcompressionengines',
817 default=list,
817 default=list,
818 )
818 )
819 coreconfigitem(
819 coreconfigitem(
820 b'experimental',
820 b'experimental',
821 b'copytrace',
821 b'copytrace',
822 default=b'on',
822 default=b'on',
823 )
823 )
824 coreconfigitem(
824 coreconfigitem(
825 b'experimental',
825 b'experimental',
826 b'copytrace.movecandidateslimit',
826 b'copytrace.movecandidateslimit',
827 default=100,
827 default=100,
828 )
828 )
829 coreconfigitem(
829 coreconfigitem(
830 b'experimental',
830 b'experimental',
831 b'copytrace.sourcecommitlimit',
831 b'copytrace.sourcecommitlimit',
832 default=100,
832 default=100,
833 )
833 )
834 coreconfigitem(
834 coreconfigitem(
835 b'experimental',
835 b'experimental',
836 b'copies.read-from',
836 b'copies.read-from',
837 default=b"filelog-only",
837 default=b"filelog-only",
838 )
838 )
839 coreconfigitem(
839 coreconfigitem(
840 b'experimental',
840 b'experimental',
841 b'copies.write-to',
841 b'copies.write-to',
842 default=b'filelog-only',
842 default=b'filelog-only',
843 )
843 )
844 coreconfigitem(
844 coreconfigitem(
845 b'experimental',
845 b'experimental',
846 b'crecordtest',
846 b'crecordtest',
847 default=None,
847 default=None,
848 )
848 )
849 coreconfigitem(
849 coreconfigitem(
850 b'experimental',
850 b'experimental',
851 b'directaccess',
851 b'directaccess',
852 default=False,
852 default=False,
853 )
853 )
854 coreconfigitem(
854 coreconfigitem(
855 b'experimental',
855 b'experimental',
856 b'directaccess.revnums',
856 b'directaccess.revnums',
857 default=False,
857 default=False,
858 )
858 )
859 coreconfigitem(
859 coreconfigitem(
860 b'experimental',
860 b'experimental',
861 b'editortmpinhg',
861 b'editortmpinhg',
862 default=False,
862 default=False,
863 )
863 )
864 coreconfigitem(
864 coreconfigitem(
865 b'experimental',
865 b'experimental',
866 b'evolution',
866 b'evolution',
867 default=list,
867 default=list,
868 )
868 )
869 coreconfigitem(
869 coreconfigitem(
870 b'experimental',
870 b'experimental',
871 b'evolution.allowdivergence',
871 b'evolution.allowdivergence',
872 default=False,
872 default=False,
873 alias=[(b'experimental', b'allowdivergence')],
873 alias=[(b'experimental', b'allowdivergence')],
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'evolution.allowunstable',
877 b'evolution.allowunstable',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'evolution.createmarkers',
882 b'evolution.createmarkers',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'evolution.effect-flags',
887 b'evolution.effect-flags',
888 default=True,
888 default=True,
889 alias=[(b'experimental', b'effect-flags')],
889 alias=[(b'experimental', b'effect-flags')],
890 )
890 )
891 coreconfigitem(
891 coreconfigitem(
892 b'experimental',
892 b'experimental',
893 b'evolution.exchange',
893 b'evolution.exchange',
894 default=None,
894 default=None,
895 )
895 )
896 coreconfigitem(
896 coreconfigitem(
897 b'experimental',
897 b'experimental',
898 b'evolution.bundle-obsmarker',
898 b'evolution.bundle-obsmarker',
899 default=False,
899 default=False,
900 )
900 )
901 coreconfigitem(
901 coreconfigitem(
902 b'experimental',
902 b'experimental',
903 b'evolution.bundle-obsmarker:mandatory',
903 b'evolution.bundle-obsmarker:mandatory',
904 default=True,
904 default=True,
905 )
905 )
906 coreconfigitem(
906 coreconfigitem(
907 b'experimental',
907 b'experimental',
908 b'log.topo',
908 b'log.topo',
909 default=False,
909 default=False,
910 )
910 )
911 coreconfigitem(
911 coreconfigitem(
912 b'experimental',
912 b'experimental',
913 b'evolution.report-instabilities',
913 b'evolution.report-instabilities',
914 default=True,
914 default=True,
915 )
915 )
916 coreconfigitem(
916 coreconfigitem(
917 b'experimental',
917 b'experimental',
918 b'evolution.track-operation',
918 b'evolution.track-operation',
919 default=True,
919 default=True,
920 )
920 )
921 # repo-level config to exclude a revset visibility
921 # repo-level config to exclude a revset visibility
922 #
922 #
923 # The target use case is to use `share` to expose different subset of the same
923 # The target use case is to use `share` to expose different subset of the same
924 # repository, especially server side. See also `server.view`.
924 # repository, especially server side. See also `server.view`.
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'extra-filter-revs',
927 b'extra-filter-revs',
928 default=None,
928 default=None,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'maxdeltachainspan',
932 b'maxdeltachainspan',
933 default=-1,
933 default=-1,
934 )
934 )
935 # tracks files which were undeleted (merge might delete them but we explicitly
935 # tracks files which were undeleted (merge might delete them but we explicitly
936 # kept/undeleted them) and creates new filenodes for them
936 # kept/undeleted them) and creates new filenodes for them
937 coreconfigitem(
937 coreconfigitem(
938 b'experimental',
938 b'experimental',
939 b'merge-track-salvaged',
939 b'merge-track-salvaged',
940 default=False,
940 default=False,
941 )
941 )
942 coreconfigitem(
942 coreconfigitem(
943 b'experimental',
943 b'experimental',
944 b'mergetempdirprefix',
944 b'mergetempdirprefix',
945 default=None,
945 default=None,
946 )
946 )
947 coreconfigitem(
947 coreconfigitem(
948 b'experimental',
948 b'experimental',
949 b'mmapindexthreshold',
949 b'mmapindexthreshold',
950 default=None,
950 default=None,
951 )
951 )
952 coreconfigitem(
952 coreconfigitem(
953 b'experimental',
953 b'experimental',
954 b'narrow',
954 b'narrow',
955 default=False,
955 default=False,
956 )
956 )
957 coreconfigitem(
957 coreconfigitem(
958 b'experimental',
958 b'experimental',
959 b'nonnormalparanoidcheck',
959 b'nonnormalparanoidcheck',
960 default=False,
960 default=False,
961 )
961 )
962 coreconfigitem(
962 coreconfigitem(
963 b'experimental',
963 b'experimental',
964 b'exportableenviron',
964 b'exportableenviron',
965 default=list,
965 default=list,
966 )
966 )
967 coreconfigitem(
967 coreconfigitem(
968 b'experimental',
968 b'experimental',
969 b'extendedheader.index',
969 b'extendedheader.index',
970 default=None,
970 default=None,
971 )
971 )
972 coreconfigitem(
972 coreconfigitem(
973 b'experimental',
973 b'experimental',
974 b'extendedheader.similarity',
974 b'extendedheader.similarity',
975 default=False,
975 default=False,
976 )
976 )
977 coreconfigitem(
977 coreconfigitem(
978 b'experimental',
978 b'experimental',
979 b'graphshorten',
979 b'graphshorten',
980 default=False,
980 default=False,
981 )
981 )
982 coreconfigitem(
982 coreconfigitem(
983 b'experimental',
983 b'experimental',
984 b'graphstyle.parent',
984 b'graphstyle.parent',
985 default=dynamicdefault,
985 default=dynamicdefault,
986 )
986 )
987 coreconfigitem(
987 coreconfigitem(
988 b'experimental',
988 b'experimental',
989 b'graphstyle.missing',
989 b'graphstyle.missing',
990 default=dynamicdefault,
990 default=dynamicdefault,
991 )
991 )
992 coreconfigitem(
992 coreconfigitem(
993 b'experimental',
993 b'experimental',
994 b'graphstyle.grandparent',
994 b'graphstyle.grandparent',
995 default=dynamicdefault,
995 default=dynamicdefault,
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'hook-track-tags',
999 b'hook-track-tags',
1000 default=False,
1000 default=False,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'httppeer.advertise-v2',
1004 b'httppeer.advertise-v2',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'httppeer.v2-encoder-order',
1009 b'httppeer.v2-encoder-order',
1010 default=None,
1010 default=None,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'httppostargs',
1014 b'httppostargs',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1017 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1018 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1018 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1019
1019
1020 coreconfigitem(
1020 coreconfigitem(
1021 b'experimental',
1021 b'experimental',
1022 b'obsmarkers-exchange-debug',
1022 b'obsmarkers-exchange-debug',
1023 default=False,
1023 default=False,
1024 )
1024 )
1025 coreconfigitem(
1025 coreconfigitem(
1026 b'experimental',
1026 b'experimental',
1027 b'remotenames',
1027 b'remotenames',
1028 default=False,
1028 default=False,
1029 )
1029 )
1030 coreconfigitem(
1030 coreconfigitem(
1031 b'experimental',
1031 b'experimental',
1032 b'removeemptydirs',
1032 b'removeemptydirs',
1033 default=True,
1033 default=True,
1034 )
1034 )
1035 coreconfigitem(
1035 coreconfigitem(
1036 b'experimental',
1036 b'experimental',
1037 b'revert.interactive.select-to-keep',
1037 b'revert.interactive.select-to-keep',
1038 default=False,
1038 default=False,
1039 )
1039 )
1040 coreconfigitem(
1040 coreconfigitem(
1041 b'experimental',
1041 b'experimental',
1042 b'revisions.prefixhexnode',
1042 b'revisions.prefixhexnode',
1043 default=False,
1043 default=False,
1044 )
1044 )
1045 coreconfigitem(
1045 coreconfigitem(
1046 b'experimental',
1046 b'experimental',
1047 b'revlogv2',
1047 b'revlogv2',
1048 default=None,
1048 default=None,
1049 )
1049 )
1050 coreconfigitem(
1050 coreconfigitem(
1051 b'experimental',
1051 b'experimental',
1052 b'revisions.disambiguatewithin',
1052 b'revisions.disambiguatewithin',
1053 default=None,
1053 default=None,
1054 )
1054 )
1055 coreconfigitem(
1055 coreconfigitem(
1056 b'experimental',
1056 b'experimental',
1057 b'rust.index',
1057 b'rust.index',
1058 default=False,
1058 default=False,
1059 )
1059 )
1060 coreconfigitem(
1060 coreconfigitem(
1061 b'experimental',
1061 b'experimental',
1062 b'server.filesdata.recommended-batch-size',
1062 b'server.filesdata.recommended-batch-size',
1063 default=50000,
1063 default=50000,
1064 )
1064 )
1065 coreconfigitem(
1065 coreconfigitem(
1066 b'experimental',
1066 b'experimental',
1067 b'server.manifestdata.recommended-batch-size',
1067 b'server.manifestdata.recommended-batch-size',
1068 default=100000,
1068 default=100000,
1069 )
1069 )
1070 coreconfigitem(
1070 coreconfigitem(
1071 b'experimental',
1071 b'experimental',
1072 b'server.stream-narrow-clones',
1072 b'server.stream-narrow-clones',
1073 default=False,
1073 default=False,
1074 )
1074 )
1075 coreconfigitem(
1075 coreconfigitem(
1076 b'experimental',
1076 b'experimental',
1077 b'sharesafe-auto-downgrade-shares',
1078 default=False,
1079 )
1080 coreconfigitem(
1081 b'experimental',
1077 b'sharesafe-auto-upgrade-shares',
1082 b'sharesafe-auto-upgrade-shares',
1078 default=False,
1083 default=False,
1079 )
1084 )
1080 coreconfigitem(
1085 coreconfigitem(
1081 b'experimental',
1086 b'experimental',
1082 b'single-head-per-branch',
1087 b'single-head-per-branch',
1083 default=False,
1088 default=False,
1084 )
1089 )
1085 coreconfigitem(
1090 coreconfigitem(
1086 b'experimental',
1091 b'experimental',
1087 b'single-head-per-branch:account-closed-heads',
1092 b'single-head-per-branch:account-closed-heads',
1088 default=False,
1093 default=False,
1089 )
1094 )
1090 coreconfigitem(
1095 coreconfigitem(
1091 b'experimental',
1096 b'experimental',
1092 b'single-head-per-branch:public-changes-only',
1097 b'single-head-per-branch:public-changes-only',
1093 default=False,
1098 default=False,
1094 )
1099 )
1095 coreconfigitem(
1100 coreconfigitem(
1096 b'experimental',
1101 b'experimental',
1097 b'sshserver.support-v2',
1102 b'sshserver.support-v2',
1098 default=False,
1103 default=False,
1099 )
1104 )
1100 coreconfigitem(
1105 coreconfigitem(
1101 b'experimental',
1106 b'experimental',
1102 b'sparse-read',
1107 b'sparse-read',
1103 default=False,
1108 default=False,
1104 )
1109 )
1105 coreconfigitem(
1110 coreconfigitem(
1106 b'experimental',
1111 b'experimental',
1107 b'sparse-read.density-threshold',
1112 b'sparse-read.density-threshold',
1108 default=0.50,
1113 default=0.50,
1109 )
1114 )
1110 coreconfigitem(
1115 coreconfigitem(
1111 b'experimental',
1116 b'experimental',
1112 b'sparse-read.min-gap-size',
1117 b'sparse-read.min-gap-size',
1113 default=b'65K',
1118 default=b'65K',
1114 )
1119 )
1115 coreconfigitem(
1120 coreconfigitem(
1116 b'experimental',
1121 b'experimental',
1117 b'treemanifest',
1122 b'treemanifest',
1118 default=False,
1123 default=False,
1119 )
1124 )
1120 coreconfigitem(
1125 coreconfigitem(
1121 b'experimental',
1126 b'experimental',
1122 b'update.atomic-file',
1127 b'update.atomic-file',
1123 default=False,
1128 default=False,
1124 )
1129 )
1125 coreconfigitem(
1130 coreconfigitem(
1126 b'experimental',
1131 b'experimental',
1127 b'sshpeer.advertise-v2',
1132 b'sshpeer.advertise-v2',
1128 default=False,
1133 default=False,
1129 )
1134 )
1130 coreconfigitem(
1135 coreconfigitem(
1131 b'experimental',
1136 b'experimental',
1132 b'web.apiserver',
1137 b'web.apiserver',
1133 default=False,
1138 default=False,
1134 )
1139 )
1135 coreconfigitem(
1140 coreconfigitem(
1136 b'experimental',
1141 b'experimental',
1137 b'web.api.http-v2',
1142 b'web.api.http-v2',
1138 default=False,
1143 default=False,
1139 )
1144 )
1140 coreconfigitem(
1145 coreconfigitem(
1141 b'experimental',
1146 b'experimental',
1142 b'web.api.debugreflect',
1147 b'web.api.debugreflect',
1143 default=False,
1148 default=False,
1144 )
1149 )
1145 coreconfigitem(
1150 coreconfigitem(
1146 b'experimental',
1151 b'experimental',
1147 b'worker.wdir-get-thread-safe',
1152 b'worker.wdir-get-thread-safe',
1148 default=False,
1153 default=False,
1149 )
1154 )
1150 coreconfigitem(
1155 coreconfigitem(
1151 b'experimental',
1156 b'experimental',
1152 b'worker.repository-upgrade',
1157 b'worker.repository-upgrade',
1153 default=False,
1158 default=False,
1154 )
1159 )
1155 coreconfigitem(
1160 coreconfigitem(
1156 b'experimental',
1161 b'experimental',
1157 b'xdiff',
1162 b'xdiff',
1158 default=False,
1163 default=False,
1159 )
1164 )
1160 coreconfigitem(
1165 coreconfigitem(
1161 b'extensions',
1166 b'extensions',
1162 b'.*',
1167 b'.*',
1163 default=None,
1168 default=None,
1164 generic=True,
1169 generic=True,
1165 )
1170 )
1166 coreconfigitem(
1171 coreconfigitem(
1167 b'extdata',
1172 b'extdata',
1168 b'.*',
1173 b'.*',
1169 default=None,
1174 default=None,
1170 generic=True,
1175 generic=True,
1171 )
1176 )
1172 coreconfigitem(
1177 coreconfigitem(
1173 b'format',
1178 b'format',
1174 b'bookmarks-in-store',
1179 b'bookmarks-in-store',
1175 default=False,
1180 default=False,
1176 )
1181 )
1177 coreconfigitem(
1182 coreconfigitem(
1178 b'format',
1183 b'format',
1179 b'chunkcachesize',
1184 b'chunkcachesize',
1180 default=None,
1185 default=None,
1181 experimental=True,
1186 experimental=True,
1182 )
1187 )
1183 coreconfigitem(
1188 coreconfigitem(
1184 b'format',
1189 b'format',
1185 b'dotencode',
1190 b'dotencode',
1186 default=True,
1191 default=True,
1187 )
1192 )
1188 coreconfigitem(
1193 coreconfigitem(
1189 b'format',
1194 b'format',
1190 b'generaldelta',
1195 b'generaldelta',
1191 default=False,
1196 default=False,
1192 experimental=True,
1197 experimental=True,
1193 )
1198 )
1194 coreconfigitem(
1199 coreconfigitem(
1195 b'format',
1200 b'format',
1196 b'manifestcachesize',
1201 b'manifestcachesize',
1197 default=None,
1202 default=None,
1198 experimental=True,
1203 experimental=True,
1199 )
1204 )
1200 coreconfigitem(
1205 coreconfigitem(
1201 b'format',
1206 b'format',
1202 b'maxchainlen',
1207 b'maxchainlen',
1203 default=dynamicdefault,
1208 default=dynamicdefault,
1204 experimental=True,
1209 experimental=True,
1205 )
1210 )
1206 coreconfigitem(
1211 coreconfigitem(
1207 b'format',
1212 b'format',
1208 b'obsstore-version',
1213 b'obsstore-version',
1209 default=None,
1214 default=None,
1210 )
1215 )
1211 coreconfigitem(
1216 coreconfigitem(
1212 b'format',
1217 b'format',
1213 b'sparse-revlog',
1218 b'sparse-revlog',
1214 default=True,
1219 default=True,
1215 )
1220 )
1216 coreconfigitem(
1221 coreconfigitem(
1217 b'format',
1222 b'format',
1218 b'revlog-compression',
1223 b'revlog-compression',
1219 default=lambda: [b'zlib'],
1224 default=lambda: [b'zlib'],
1220 alias=[(b'experimental', b'format.compression')],
1225 alias=[(b'experimental', b'format.compression')],
1221 )
1226 )
1222 coreconfigitem(
1227 coreconfigitem(
1223 b'format',
1228 b'format',
1224 b'usefncache',
1229 b'usefncache',
1225 default=True,
1230 default=True,
1226 )
1231 )
1227 coreconfigitem(
1232 coreconfigitem(
1228 b'format',
1233 b'format',
1229 b'usegeneraldelta',
1234 b'usegeneraldelta',
1230 default=True,
1235 default=True,
1231 )
1236 )
1232 coreconfigitem(
1237 coreconfigitem(
1233 b'format',
1238 b'format',
1234 b'usestore',
1239 b'usestore',
1235 default=True,
1240 default=True,
1236 )
1241 )
1237 # Right now, the only efficient implement of the nodemap logic is in Rust, so
1242 # Right now, the only efficient implement of the nodemap logic is in Rust, so
1238 # the persistent nodemap feature needs to stay experimental as long as the Rust
1243 # the persistent nodemap feature needs to stay experimental as long as the Rust
1239 # extensions are an experimental feature.
1244 # extensions are an experimental feature.
1240 coreconfigitem(
1245 coreconfigitem(
1241 b'format', b'use-persistent-nodemap', default=False, experimental=True
1246 b'format', b'use-persistent-nodemap', default=False, experimental=True
1242 )
1247 )
1243 coreconfigitem(
1248 coreconfigitem(
1244 b'format',
1249 b'format',
1245 b'exp-use-copies-side-data-changeset',
1250 b'exp-use-copies-side-data-changeset',
1246 default=False,
1251 default=False,
1247 experimental=True,
1252 experimental=True,
1248 )
1253 )
1249 coreconfigitem(
1254 coreconfigitem(
1250 b'format',
1255 b'format',
1251 b'exp-use-side-data',
1256 b'exp-use-side-data',
1252 default=False,
1257 default=False,
1253 experimental=True,
1258 experimental=True,
1254 )
1259 )
1255 coreconfigitem(
1260 coreconfigitem(
1256 b'format',
1261 b'format',
1257 b'exp-share-safe',
1262 b'exp-share-safe',
1258 default=False,
1263 default=False,
1259 experimental=True,
1264 experimental=True,
1260 )
1265 )
1261 coreconfigitem(
1266 coreconfigitem(
1262 b'format',
1267 b'format',
1263 b'internal-phase',
1268 b'internal-phase',
1264 default=False,
1269 default=False,
1265 experimental=True,
1270 experimental=True,
1266 )
1271 )
1267 coreconfigitem(
1272 coreconfigitem(
1268 b'fsmonitor',
1273 b'fsmonitor',
1269 b'warn_when_unused',
1274 b'warn_when_unused',
1270 default=True,
1275 default=True,
1271 )
1276 )
1272 coreconfigitem(
1277 coreconfigitem(
1273 b'fsmonitor',
1278 b'fsmonitor',
1274 b'warn_update_file_count',
1279 b'warn_update_file_count',
1275 default=50000,
1280 default=50000,
1276 )
1281 )
1277 coreconfigitem(
1282 coreconfigitem(
1278 b'fsmonitor',
1283 b'fsmonitor',
1279 b'warn_update_file_count_rust',
1284 b'warn_update_file_count_rust',
1280 default=400000,
1285 default=400000,
1281 )
1286 )
1282 coreconfigitem(
1287 coreconfigitem(
1283 b'help',
1288 b'help',
1284 br'hidden-command\..*',
1289 br'hidden-command\..*',
1285 default=False,
1290 default=False,
1286 generic=True,
1291 generic=True,
1287 )
1292 )
1288 coreconfigitem(
1293 coreconfigitem(
1289 b'help',
1294 b'help',
1290 br'hidden-topic\..*',
1295 br'hidden-topic\..*',
1291 default=False,
1296 default=False,
1292 generic=True,
1297 generic=True,
1293 )
1298 )
1294 coreconfigitem(
1299 coreconfigitem(
1295 b'hooks',
1300 b'hooks',
1296 b'.*',
1301 b'.*',
1297 default=dynamicdefault,
1302 default=dynamicdefault,
1298 generic=True,
1303 generic=True,
1299 )
1304 )
1300 coreconfigitem(
1305 coreconfigitem(
1301 b'hgweb-paths',
1306 b'hgweb-paths',
1302 b'.*',
1307 b'.*',
1303 default=list,
1308 default=list,
1304 generic=True,
1309 generic=True,
1305 )
1310 )
1306 coreconfigitem(
1311 coreconfigitem(
1307 b'hostfingerprints',
1312 b'hostfingerprints',
1308 b'.*',
1313 b'.*',
1309 default=list,
1314 default=list,
1310 generic=True,
1315 generic=True,
1311 )
1316 )
1312 coreconfigitem(
1317 coreconfigitem(
1313 b'hostsecurity',
1318 b'hostsecurity',
1314 b'ciphers',
1319 b'ciphers',
1315 default=None,
1320 default=None,
1316 )
1321 )
1317 coreconfigitem(
1322 coreconfigitem(
1318 b'hostsecurity',
1323 b'hostsecurity',
1319 b'minimumprotocol',
1324 b'minimumprotocol',
1320 default=dynamicdefault,
1325 default=dynamicdefault,
1321 )
1326 )
1322 coreconfigitem(
1327 coreconfigitem(
1323 b'hostsecurity',
1328 b'hostsecurity',
1324 b'.*:minimumprotocol$',
1329 b'.*:minimumprotocol$',
1325 default=dynamicdefault,
1330 default=dynamicdefault,
1326 generic=True,
1331 generic=True,
1327 )
1332 )
1328 coreconfigitem(
1333 coreconfigitem(
1329 b'hostsecurity',
1334 b'hostsecurity',
1330 b'.*:ciphers$',
1335 b'.*:ciphers$',
1331 default=dynamicdefault,
1336 default=dynamicdefault,
1332 generic=True,
1337 generic=True,
1333 )
1338 )
1334 coreconfigitem(
1339 coreconfigitem(
1335 b'hostsecurity',
1340 b'hostsecurity',
1336 b'.*:fingerprints$',
1341 b'.*:fingerprints$',
1337 default=list,
1342 default=list,
1338 generic=True,
1343 generic=True,
1339 )
1344 )
1340 coreconfigitem(
1345 coreconfigitem(
1341 b'hostsecurity',
1346 b'hostsecurity',
1342 b'.*:verifycertsfile$',
1347 b'.*:verifycertsfile$',
1343 default=None,
1348 default=None,
1344 generic=True,
1349 generic=True,
1345 )
1350 )
1346
1351
1347 coreconfigitem(
1352 coreconfigitem(
1348 b'http_proxy',
1353 b'http_proxy',
1349 b'always',
1354 b'always',
1350 default=False,
1355 default=False,
1351 )
1356 )
1352 coreconfigitem(
1357 coreconfigitem(
1353 b'http_proxy',
1358 b'http_proxy',
1354 b'host',
1359 b'host',
1355 default=None,
1360 default=None,
1356 )
1361 )
1357 coreconfigitem(
1362 coreconfigitem(
1358 b'http_proxy',
1363 b'http_proxy',
1359 b'no',
1364 b'no',
1360 default=list,
1365 default=list,
1361 )
1366 )
1362 coreconfigitem(
1367 coreconfigitem(
1363 b'http_proxy',
1368 b'http_proxy',
1364 b'passwd',
1369 b'passwd',
1365 default=None,
1370 default=None,
1366 )
1371 )
1367 coreconfigitem(
1372 coreconfigitem(
1368 b'http_proxy',
1373 b'http_proxy',
1369 b'user',
1374 b'user',
1370 default=None,
1375 default=None,
1371 )
1376 )
1372
1377
1373 coreconfigitem(
1378 coreconfigitem(
1374 b'http',
1379 b'http',
1375 b'timeout',
1380 b'timeout',
1376 default=None,
1381 default=None,
1377 )
1382 )
1378
1383
1379 coreconfigitem(
1384 coreconfigitem(
1380 b'logtoprocess',
1385 b'logtoprocess',
1381 b'commandexception',
1386 b'commandexception',
1382 default=None,
1387 default=None,
1383 )
1388 )
1384 coreconfigitem(
1389 coreconfigitem(
1385 b'logtoprocess',
1390 b'logtoprocess',
1386 b'commandfinish',
1391 b'commandfinish',
1387 default=None,
1392 default=None,
1388 )
1393 )
1389 coreconfigitem(
1394 coreconfigitem(
1390 b'logtoprocess',
1395 b'logtoprocess',
1391 b'command',
1396 b'command',
1392 default=None,
1397 default=None,
1393 )
1398 )
1394 coreconfigitem(
1399 coreconfigitem(
1395 b'logtoprocess',
1400 b'logtoprocess',
1396 b'develwarn',
1401 b'develwarn',
1397 default=None,
1402 default=None,
1398 )
1403 )
1399 coreconfigitem(
1404 coreconfigitem(
1400 b'logtoprocess',
1405 b'logtoprocess',
1401 b'uiblocked',
1406 b'uiblocked',
1402 default=None,
1407 default=None,
1403 )
1408 )
1404 coreconfigitem(
1409 coreconfigitem(
1405 b'merge',
1410 b'merge',
1406 b'checkunknown',
1411 b'checkunknown',
1407 default=b'abort',
1412 default=b'abort',
1408 )
1413 )
1409 coreconfigitem(
1414 coreconfigitem(
1410 b'merge',
1415 b'merge',
1411 b'checkignored',
1416 b'checkignored',
1412 default=b'abort',
1417 default=b'abort',
1413 )
1418 )
1414 coreconfigitem(
1419 coreconfigitem(
1415 b'experimental',
1420 b'experimental',
1416 b'merge.checkpathconflicts',
1421 b'merge.checkpathconflicts',
1417 default=False,
1422 default=False,
1418 )
1423 )
1419 coreconfigitem(
1424 coreconfigitem(
1420 b'merge',
1425 b'merge',
1421 b'followcopies',
1426 b'followcopies',
1422 default=True,
1427 default=True,
1423 )
1428 )
1424 coreconfigitem(
1429 coreconfigitem(
1425 b'merge',
1430 b'merge',
1426 b'on-failure',
1431 b'on-failure',
1427 default=b'continue',
1432 default=b'continue',
1428 )
1433 )
1429 coreconfigitem(
1434 coreconfigitem(
1430 b'merge',
1435 b'merge',
1431 b'preferancestor',
1436 b'preferancestor',
1432 default=lambda: [b'*'],
1437 default=lambda: [b'*'],
1433 experimental=True,
1438 experimental=True,
1434 )
1439 )
1435 coreconfigitem(
1440 coreconfigitem(
1436 b'merge',
1441 b'merge',
1437 b'strict-capability-check',
1442 b'strict-capability-check',
1438 default=False,
1443 default=False,
1439 )
1444 )
1440 coreconfigitem(
1445 coreconfigitem(
1441 b'merge-tools',
1446 b'merge-tools',
1442 b'.*',
1447 b'.*',
1443 default=None,
1448 default=None,
1444 generic=True,
1449 generic=True,
1445 )
1450 )
1446 coreconfigitem(
1451 coreconfigitem(
1447 b'merge-tools',
1452 b'merge-tools',
1448 br'.*\.args$',
1453 br'.*\.args$',
1449 default=b"$local $base $other",
1454 default=b"$local $base $other",
1450 generic=True,
1455 generic=True,
1451 priority=-1,
1456 priority=-1,
1452 )
1457 )
1453 coreconfigitem(
1458 coreconfigitem(
1454 b'merge-tools',
1459 b'merge-tools',
1455 br'.*\.binary$',
1460 br'.*\.binary$',
1456 default=False,
1461 default=False,
1457 generic=True,
1462 generic=True,
1458 priority=-1,
1463 priority=-1,
1459 )
1464 )
1460 coreconfigitem(
1465 coreconfigitem(
1461 b'merge-tools',
1466 b'merge-tools',
1462 br'.*\.check$',
1467 br'.*\.check$',
1463 default=list,
1468 default=list,
1464 generic=True,
1469 generic=True,
1465 priority=-1,
1470 priority=-1,
1466 )
1471 )
1467 coreconfigitem(
1472 coreconfigitem(
1468 b'merge-tools',
1473 b'merge-tools',
1469 br'.*\.checkchanged$',
1474 br'.*\.checkchanged$',
1470 default=False,
1475 default=False,
1471 generic=True,
1476 generic=True,
1472 priority=-1,
1477 priority=-1,
1473 )
1478 )
1474 coreconfigitem(
1479 coreconfigitem(
1475 b'merge-tools',
1480 b'merge-tools',
1476 br'.*\.executable$',
1481 br'.*\.executable$',
1477 default=dynamicdefault,
1482 default=dynamicdefault,
1478 generic=True,
1483 generic=True,
1479 priority=-1,
1484 priority=-1,
1480 )
1485 )
1481 coreconfigitem(
1486 coreconfigitem(
1482 b'merge-tools',
1487 b'merge-tools',
1483 br'.*\.fixeol$',
1488 br'.*\.fixeol$',
1484 default=False,
1489 default=False,
1485 generic=True,
1490 generic=True,
1486 priority=-1,
1491 priority=-1,
1487 )
1492 )
1488 coreconfigitem(
1493 coreconfigitem(
1489 b'merge-tools',
1494 b'merge-tools',
1490 br'.*\.gui$',
1495 br'.*\.gui$',
1491 default=False,
1496 default=False,
1492 generic=True,
1497 generic=True,
1493 priority=-1,
1498 priority=-1,
1494 )
1499 )
1495 coreconfigitem(
1500 coreconfigitem(
1496 b'merge-tools',
1501 b'merge-tools',
1497 br'.*\.mergemarkers$',
1502 br'.*\.mergemarkers$',
1498 default=b'basic',
1503 default=b'basic',
1499 generic=True,
1504 generic=True,
1500 priority=-1,
1505 priority=-1,
1501 )
1506 )
1502 coreconfigitem(
1507 coreconfigitem(
1503 b'merge-tools',
1508 b'merge-tools',
1504 br'.*\.mergemarkertemplate$',
1509 br'.*\.mergemarkertemplate$',
1505 default=dynamicdefault, # take from command-templates.mergemarker
1510 default=dynamicdefault, # take from command-templates.mergemarker
1506 generic=True,
1511 generic=True,
1507 priority=-1,
1512 priority=-1,
1508 )
1513 )
1509 coreconfigitem(
1514 coreconfigitem(
1510 b'merge-tools',
1515 b'merge-tools',
1511 br'.*\.priority$',
1516 br'.*\.priority$',
1512 default=0,
1517 default=0,
1513 generic=True,
1518 generic=True,
1514 priority=-1,
1519 priority=-1,
1515 )
1520 )
1516 coreconfigitem(
1521 coreconfigitem(
1517 b'merge-tools',
1522 b'merge-tools',
1518 br'.*\.premerge$',
1523 br'.*\.premerge$',
1519 default=dynamicdefault,
1524 default=dynamicdefault,
1520 generic=True,
1525 generic=True,
1521 priority=-1,
1526 priority=-1,
1522 )
1527 )
1523 coreconfigitem(
1528 coreconfigitem(
1524 b'merge-tools',
1529 b'merge-tools',
1525 br'.*\.symlink$',
1530 br'.*\.symlink$',
1526 default=False,
1531 default=False,
1527 generic=True,
1532 generic=True,
1528 priority=-1,
1533 priority=-1,
1529 )
1534 )
1530 coreconfigitem(
1535 coreconfigitem(
1531 b'pager',
1536 b'pager',
1532 b'attend-.*',
1537 b'attend-.*',
1533 default=dynamicdefault,
1538 default=dynamicdefault,
1534 generic=True,
1539 generic=True,
1535 )
1540 )
1536 coreconfigitem(
1541 coreconfigitem(
1537 b'pager',
1542 b'pager',
1538 b'ignore',
1543 b'ignore',
1539 default=list,
1544 default=list,
1540 )
1545 )
1541 coreconfigitem(
1546 coreconfigitem(
1542 b'pager',
1547 b'pager',
1543 b'pager',
1548 b'pager',
1544 default=dynamicdefault,
1549 default=dynamicdefault,
1545 )
1550 )
1546 coreconfigitem(
1551 coreconfigitem(
1547 b'patch',
1552 b'patch',
1548 b'eol',
1553 b'eol',
1549 default=b'strict',
1554 default=b'strict',
1550 )
1555 )
1551 coreconfigitem(
1556 coreconfigitem(
1552 b'patch',
1557 b'patch',
1553 b'fuzz',
1558 b'fuzz',
1554 default=2,
1559 default=2,
1555 )
1560 )
1556 coreconfigitem(
1561 coreconfigitem(
1557 b'paths',
1562 b'paths',
1558 b'default',
1563 b'default',
1559 default=None,
1564 default=None,
1560 )
1565 )
1561 coreconfigitem(
1566 coreconfigitem(
1562 b'paths',
1567 b'paths',
1563 b'default-push',
1568 b'default-push',
1564 default=None,
1569 default=None,
1565 )
1570 )
1566 coreconfigitem(
1571 coreconfigitem(
1567 b'paths',
1572 b'paths',
1568 b'.*',
1573 b'.*',
1569 default=None,
1574 default=None,
1570 generic=True,
1575 generic=True,
1571 )
1576 )
1572 coreconfigitem(
1577 coreconfigitem(
1573 b'phases',
1578 b'phases',
1574 b'checksubrepos',
1579 b'checksubrepos',
1575 default=b'follow',
1580 default=b'follow',
1576 )
1581 )
1577 coreconfigitem(
1582 coreconfigitem(
1578 b'phases',
1583 b'phases',
1579 b'new-commit',
1584 b'new-commit',
1580 default=b'draft',
1585 default=b'draft',
1581 )
1586 )
1582 coreconfigitem(
1587 coreconfigitem(
1583 b'phases',
1588 b'phases',
1584 b'publish',
1589 b'publish',
1585 default=True,
1590 default=True,
1586 )
1591 )
1587 coreconfigitem(
1592 coreconfigitem(
1588 b'profiling',
1593 b'profiling',
1589 b'enabled',
1594 b'enabled',
1590 default=False,
1595 default=False,
1591 )
1596 )
1592 coreconfigitem(
1597 coreconfigitem(
1593 b'profiling',
1598 b'profiling',
1594 b'format',
1599 b'format',
1595 default=b'text',
1600 default=b'text',
1596 )
1601 )
1597 coreconfigitem(
1602 coreconfigitem(
1598 b'profiling',
1603 b'profiling',
1599 b'freq',
1604 b'freq',
1600 default=1000,
1605 default=1000,
1601 )
1606 )
1602 coreconfigitem(
1607 coreconfigitem(
1603 b'profiling',
1608 b'profiling',
1604 b'limit',
1609 b'limit',
1605 default=30,
1610 default=30,
1606 )
1611 )
1607 coreconfigitem(
1612 coreconfigitem(
1608 b'profiling',
1613 b'profiling',
1609 b'nested',
1614 b'nested',
1610 default=0,
1615 default=0,
1611 )
1616 )
1612 coreconfigitem(
1617 coreconfigitem(
1613 b'profiling',
1618 b'profiling',
1614 b'output',
1619 b'output',
1615 default=None,
1620 default=None,
1616 )
1621 )
1617 coreconfigitem(
1622 coreconfigitem(
1618 b'profiling',
1623 b'profiling',
1619 b'showmax',
1624 b'showmax',
1620 default=0.999,
1625 default=0.999,
1621 )
1626 )
1622 coreconfigitem(
1627 coreconfigitem(
1623 b'profiling',
1628 b'profiling',
1624 b'showmin',
1629 b'showmin',
1625 default=dynamicdefault,
1630 default=dynamicdefault,
1626 )
1631 )
1627 coreconfigitem(
1632 coreconfigitem(
1628 b'profiling',
1633 b'profiling',
1629 b'showtime',
1634 b'showtime',
1630 default=True,
1635 default=True,
1631 )
1636 )
1632 coreconfigitem(
1637 coreconfigitem(
1633 b'profiling',
1638 b'profiling',
1634 b'sort',
1639 b'sort',
1635 default=b'inlinetime',
1640 default=b'inlinetime',
1636 )
1641 )
1637 coreconfigitem(
1642 coreconfigitem(
1638 b'profiling',
1643 b'profiling',
1639 b'statformat',
1644 b'statformat',
1640 default=b'hotpath',
1645 default=b'hotpath',
1641 )
1646 )
1642 coreconfigitem(
1647 coreconfigitem(
1643 b'profiling',
1648 b'profiling',
1644 b'time-track',
1649 b'time-track',
1645 default=dynamicdefault,
1650 default=dynamicdefault,
1646 )
1651 )
1647 coreconfigitem(
1652 coreconfigitem(
1648 b'profiling',
1653 b'profiling',
1649 b'type',
1654 b'type',
1650 default=b'stat',
1655 default=b'stat',
1651 )
1656 )
1652 coreconfigitem(
1657 coreconfigitem(
1653 b'progress',
1658 b'progress',
1654 b'assume-tty',
1659 b'assume-tty',
1655 default=False,
1660 default=False,
1656 )
1661 )
1657 coreconfigitem(
1662 coreconfigitem(
1658 b'progress',
1663 b'progress',
1659 b'changedelay',
1664 b'changedelay',
1660 default=1,
1665 default=1,
1661 )
1666 )
1662 coreconfigitem(
1667 coreconfigitem(
1663 b'progress',
1668 b'progress',
1664 b'clear-complete',
1669 b'clear-complete',
1665 default=True,
1670 default=True,
1666 )
1671 )
1667 coreconfigitem(
1672 coreconfigitem(
1668 b'progress',
1673 b'progress',
1669 b'debug',
1674 b'debug',
1670 default=False,
1675 default=False,
1671 )
1676 )
1672 coreconfigitem(
1677 coreconfigitem(
1673 b'progress',
1678 b'progress',
1674 b'delay',
1679 b'delay',
1675 default=3,
1680 default=3,
1676 )
1681 )
1677 coreconfigitem(
1682 coreconfigitem(
1678 b'progress',
1683 b'progress',
1679 b'disable',
1684 b'disable',
1680 default=False,
1685 default=False,
1681 )
1686 )
1682 coreconfigitem(
1687 coreconfigitem(
1683 b'progress',
1688 b'progress',
1684 b'estimateinterval',
1689 b'estimateinterval',
1685 default=60.0,
1690 default=60.0,
1686 )
1691 )
1687 coreconfigitem(
1692 coreconfigitem(
1688 b'progress',
1693 b'progress',
1689 b'format',
1694 b'format',
1690 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1695 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1691 )
1696 )
1692 coreconfigitem(
1697 coreconfigitem(
1693 b'progress',
1698 b'progress',
1694 b'refresh',
1699 b'refresh',
1695 default=0.1,
1700 default=0.1,
1696 )
1701 )
1697 coreconfigitem(
1702 coreconfigitem(
1698 b'progress',
1703 b'progress',
1699 b'width',
1704 b'width',
1700 default=dynamicdefault,
1705 default=dynamicdefault,
1701 )
1706 )
1702 coreconfigitem(
1707 coreconfigitem(
1703 b'pull',
1708 b'pull',
1704 b'confirm',
1709 b'confirm',
1705 default=False,
1710 default=False,
1706 )
1711 )
1707 coreconfigitem(
1712 coreconfigitem(
1708 b'push',
1713 b'push',
1709 b'pushvars.server',
1714 b'pushvars.server',
1710 default=False,
1715 default=False,
1711 )
1716 )
1712 coreconfigitem(
1717 coreconfigitem(
1713 b'rewrite',
1718 b'rewrite',
1714 b'backup-bundle',
1719 b'backup-bundle',
1715 default=True,
1720 default=True,
1716 alias=[(b'ui', b'history-editing-backup')],
1721 alias=[(b'ui', b'history-editing-backup')],
1717 )
1722 )
1718 coreconfigitem(
1723 coreconfigitem(
1719 b'rewrite',
1724 b'rewrite',
1720 b'update-timestamp',
1725 b'update-timestamp',
1721 default=False,
1726 default=False,
1722 )
1727 )
1723 coreconfigitem(
1728 coreconfigitem(
1724 b'rewrite',
1729 b'rewrite',
1725 b'empty-successor',
1730 b'empty-successor',
1726 default=b'skip',
1731 default=b'skip',
1727 experimental=True,
1732 experimental=True,
1728 )
1733 )
1729 coreconfigitem(
1734 coreconfigitem(
1730 b'storage',
1735 b'storage',
1731 b'new-repo-backend',
1736 b'new-repo-backend',
1732 default=b'revlogv1',
1737 default=b'revlogv1',
1733 experimental=True,
1738 experimental=True,
1734 )
1739 )
1735 coreconfigitem(
1740 coreconfigitem(
1736 b'storage',
1741 b'storage',
1737 b'revlog.optimize-delta-parent-choice',
1742 b'revlog.optimize-delta-parent-choice',
1738 default=True,
1743 default=True,
1739 alias=[(b'format', b'aggressivemergedeltas')],
1744 alias=[(b'format', b'aggressivemergedeltas')],
1740 )
1745 )
1741 # experimental as long as rust is experimental (or a C version is implemented)
1746 # experimental as long as rust is experimental (or a C version is implemented)
1742 coreconfigitem(
1747 coreconfigitem(
1743 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1748 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1744 )
1749 )
1745 # experimental as long as format.use-persistent-nodemap is.
1750 # experimental as long as format.use-persistent-nodemap is.
1746 coreconfigitem(
1751 coreconfigitem(
1747 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1752 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1748 )
1753 )
1749 coreconfigitem(
1754 coreconfigitem(
1750 b'storage',
1755 b'storage',
1751 b'revlog.reuse-external-delta',
1756 b'revlog.reuse-external-delta',
1752 default=True,
1757 default=True,
1753 )
1758 )
1754 coreconfigitem(
1759 coreconfigitem(
1755 b'storage',
1760 b'storage',
1756 b'revlog.reuse-external-delta-parent',
1761 b'revlog.reuse-external-delta-parent',
1757 default=None,
1762 default=None,
1758 )
1763 )
1759 coreconfigitem(
1764 coreconfigitem(
1760 b'storage',
1765 b'storage',
1761 b'revlog.zlib.level',
1766 b'revlog.zlib.level',
1762 default=None,
1767 default=None,
1763 )
1768 )
1764 coreconfigitem(
1769 coreconfigitem(
1765 b'storage',
1770 b'storage',
1766 b'revlog.zstd.level',
1771 b'revlog.zstd.level',
1767 default=None,
1772 default=None,
1768 )
1773 )
1769 coreconfigitem(
1774 coreconfigitem(
1770 b'server',
1775 b'server',
1771 b'bookmarks-pushkey-compat',
1776 b'bookmarks-pushkey-compat',
1772 default=True,
1777 default=True,
1773 )
1778 )
1774 coreconfigitem(
1779 coreconfigitem(
1775 b'server',
1780 b'server',
1776 b'bundle1',
1781 b'bundle1',
1777 default=True,
1782 default=True,
1778 )
1783 )
1779 coreconfigitem(
1784 coreconfigitem(
1780 b'server',
1785 b'server',
1781 b'bundle1gd',
1786 b'bundle1gd',
1782 default=None,
1787 default=None,
1783 )
1788 )
1784 coreconfigitem(
1789 coreconfigitem(
1785 b'server',
1790 b'server',
1786 b'bundle1.pull',
1791 b'bundle1.pull',
1787 default=None,
1792 default=None,
1788 )
1793 )
1789 coreconfigitem(
1794 coreconfigitem(
1790 b'server',
1795 b'server',
1791 b'bundle1gd.pull',
1796 b'bundle1gd.pull',
1792 default=None,
1797 default=None,
1793 )
1798 )
1794 coreconfigitem(
1799 coreconfigitem(
1795 b'server',
1800 b'server',
1796 b'bundle1.push',
1801 b'bundle1.push',
1797 default=None,
1802 default=None,
1798 )
1803 )
1799 coreconfigitem(
1804 coreconfigitem(
1800 b'server',
1805 b'server',
1801 b'bundle1gd.push',
1806 b'bundle1gd.push',
1802 default=None,
1807 default=None,
1803 )
1808 )
1804 coreconfigitem(
1809 coreconfigitem(
1805 b'server',
1810 b'server',
1806 b'bundle2.stream',
1811 b'bundle2.stream',
1807 default=True,
1812 default=True,
1808 alias=[(b'experimental', b'bundle2.stream')],
1813 alias=[(b'experimental', b'bundle2.stream')],
1809 )
1814 )
1810 coreconfigitem(
1815 coreconfigitem(
1811 b'server',
1816 b'server',
1812 b'compressionengines',
1817 b'compressionengines',
1813 default=list,
1818 default=list,
1814 )
1819 )
1815 coreconfigitem(
1820 coreconfigitem(
1816 b'server',
1821 b'server',
1817 b'concurrent-push-mode',
1822 b'concurrent-push-mode',
1818 default=b'check-related',
1823 default=b'check-related',
1819 )
1824 )
1820 coreconfigitem(
1825 coreconfigitem(
1821 b'server',
1826 b'server',
1822 b'disablefullbundle',
1827 b'disablefullbundle',
1823 default=False,
1828 default=False,
1824 )
1829 )
1825 coreconfigitem(
1830 coreconfigitem(
1826 b'server',
1831 b'server',
1827 b'maxhttpheaderlen',
1832 b'maxhttpheaderlen',
1828 default=1024,
1833 default=1024,
1829 )
1834 )
1830 coreconfigitem(
1835 coreconfigitem(
1831 b'server',
1836 b'server',
1832 b'pullbundle',
1837 b'pullbundle',
1833 default=False,
1838 default=False,
1834 )
1839 )
1835 coreconfigitem(
1840 coreconfigitem(
1836 b'server',
1841 b'server',
1837 b'preferuncompressed',
1842 b'preferuncompressed',
1838 default=False,
1843 default=False,
1839 )
1844 )
1840 coreconfigitem(
1845 coreconfigitem(
1841 b'server',
1846 b'server',
1842 b'streamunbundle',
1847 b'streamunbundle',
1843 default=False,
1848 default=False,
1844 )
1849 )
1845 coreconfigitem(
1850 coreconfigitem(
1846 b'server',
1851 b'server',
1847 b'uncompressed',
1852 b'uncompressed',
1848 default=True,
1853 default=True,
1849 )
1854 )
1850 coreconfigitem(
1855 coreconfigitem(
1851 b'server',
1856 b'server',
1852 b'uncompressedallowsecret',
1857 b'uncompressedallowsecret',
1853 default=False,
1858 default=False,
1854 )
1859 )
1855 coreconfigitem(
1860 coreconfigitem(
1856 b'server',
1861 b'server',
1857 b'view',
1862 b'view',
1858 default=b'served',
1863 default=b'served',
1859 )
1864 )
1860 coreconfigitem(
1865 coreconfigitem(
1861 b'server',
1866 b'server',
1862 b'validate',
1867 b'validate',
1863 default=False,
1868 default=False,
1864 )
1869 )
1865 coreconfigitem(
1870 coreconfigitem(
1866 b'server',
1871 b'server',
1867 b'zliblevel',
1872 b'zliblevel',
1868 default=-1,
1873 default=-1,
1869 )
1874 )
1870 coreconfigitem(
1875 coreconfigitem(
1871 b'server',
1876 b'server',
1872 b'zstdlevel',
1877 b'zstdlevel',
1873 default=3,
1878 default=3,
1874 )
1879 )
1875 coreconfigitem(
1880 coreconfigitem(
1876 b'share',
1881 b'share',
1877 b'pool',
1882 b'pool',
1878 default=None,
1883 default=None,
1879 )
1884 )
1880 coreconfigitem(
1885 coreconfigitem(
1881 b'share',
1886 b'share',
1882 b'poolnaming',
1887 b'poolnaming',
1883 default=b'identity',
1888 default=b'identity',
1884 )
1889 )
1885 coreconfigitem(
1890 coreconfigitem(
1886 b'shelve',
1891 b'shelve',
1887 b'maxbackups',
1892 b'maxbackups',
1888 default=10,
1893 default=10,
1889 )
1894 )
1890 coreconfigitem(
1895 coreconfigitem(
1891 b'smtp',
1896 b'smtp',
1892 b'host',
1897 b'host',
1893 default=None,
1898 default=None,
1894 )
1899 )
1895 coreconfigitem(
1900 coreconfigitem(
1896 b'smtp',
1901 b'smtp',
1897 b'local_hostname',
1902 b'local_hostname',
1898 default=None,
1903 default=None,
1899 )
1904 )
1900 coreconfigitem(
1905 coreconfigitem(
1901 b'smtp',
1906 b'smtp',
1902 b'password',
1907 b'password',
1903 default=None,
1908 default=None,
1904 )
1909 )
1905 coreconfigitem(
1910 coreconfigitem(
1906 b'smtp',
1911 b'smtp',
1907 b'port',
1912 b'port',
1908 default=dynamicdefault,
1913 default=dynamicdefault,
1909 )
1914 )
1910 coreconfigitem(
1915 coreconfigitem(
1911 b'smtp',
1916 b'smtp',
1912 b'tls',
1917 b'tls',
1913 default=b'none',
1918 default=b'none',
1914 )
1919 )
1915 coreconfigitem(
1920 coreconfigitem(
1916 b'smtp',
1921 b'smtp',
1917 b'username',
1922 b'username',
1918 default=None,
1923 default=None,
1919 )
1924 )
1920 coreconfigitem(
1925 coreconfigitem(
1921 b'sparse',
1926 b'sparse',
1922 b'missingwarning',
1927 b'missingwarning',
1923 default=True,
1928 default=True,
1924 experimental=True,
1929 experimental=True,
1925 )
1930 )
1926 coreconfigitem(
1931 coreconfigitem(
1927 b'subrepos',
1932 b'subrepos',
1928 b'allowed',
1933 b'allowed',
1929 default=dynamicdefault, # to make backporting simpler
1934 default=dynamicdefault, # to make backporting simpler
1930 )
1935 )
1931 coreconfigitem(
1936 coreconfigitem(
1932 b'subrepos',
1937 b'subrepos',
1933 b'hg:allowed',
1938 b'hg:allowed',
1934 default=dynamicdefault,
1939 default=dynamicdefault,
1935 )
1940 )
1936 coreconfigitem(
1941 coreconfigitem(
1937 b'subrepos',
1942 b'subrepos',
1938 b'git:allowed',
1943 b'git:allowed',
1939 default=dynamicdefault,
1944 default=dynamicdefault,
1940 )
1945 )
1941 coreconfigitem(
1946 coreconfigitem(
1942 b'subrepos',
1947 b'subrepos',
1943 b'svn:allowed',
1948 b'svn:allowed',
1944 default=dynamicdefault,
1949 default=dynamicdefault,
1945 )
1950 )
1946 coreconfigitem(
1951 coreconfigitem(
1947 b'templates',
1952 b'templates',
1948 b'.*',
1953 b'.*',
1949 default=None,
1954 default=None,
1950 generic=True,
1955 generic=True,
1951 )
1956 )
1952 coreconfigitem(
1957 coreconfigitem(
1953 b'templateconfig',
1958 b'templateconfig',
1954 b'.*',
1959 b'.*',
1955 default=dynamicdefault,
1960 default=dynamicdefault,
1956 generic=True,
1961 generic=True,
1957 )
1962 )
1958 coreconfigitem(
1963 coreconfigitem(
1959 b'trusted',
1964 b'trusted',
1960 b'groups',
1965 b'groups',
1961 default=list,
1966 default=list,
1962 )
1967 )
1963 coreconfigitem(
1968 coreconfigitem(
1964 b'trusted',
1969 b'trusted',
1965 b'users',
1970 b'users',
1966 default=list,
1971 default=list,
1967 )
1972 )
1968 coreconfigitem(
1973 coreconfigitem(
1969 b'ui',
1974 b'ui',
1970 b'_usedassubrepo',
1975 b'_usedassubrepo',
1971 default=False,
1976 default=False,
1972 )
1977 )
1973 coreconfigitem(
1978 coreconfigitem(
1974 b'ui',
1979 b'ui',
1975 b'allowemptycommit',
1980 b'allowemptycommit',
1976 default=False,
1981 default=False,
1977 )
1982 )
1978 coreconfigitem(
1983 coreconfigitem(
1979 b'ui',
1984 b'ui',
1980 b'archivemeta',
1985 b'archivemeta',
1981 default=True,
1986 default=True,
1982 )
1987 )
1983 coreconfigitem(
1988 coreconfigitem(
1984 b'ui',
1989 b'ui',
1985 b'askusername',
1990 b'askusername',
1986 default=False,
1991 default=False,
1987 )
1992 )
1988 coreconfigitem(
1993 coreconfigitem(
1989 b'ui',
1994 b'ui',
1990 b'available-memory',
1995 b'available-memory',
1991 default=None,
1996 default=None,
1992 )
1997 )
1993
1998
1994 coreconfigitem(
1999 coreconfigitem(
1995 b'ui',
2000 b'ui',
1996 b'clonebundlefallback',
2001 b'clonebundlefallback',
1997 default=False,
2002 default=False,
1998 )
2003 )
1999 coreconfigitem(
2004 coreconfigitem(
2000 b'ui',
2005 b'ui',
2001 b'clonebundleprefers',
2006 b'clonebundleprefers',
2002 default=list,
2007 default=list,
2003 )
2008 )
2004 coreconfigitem(
2009 coreconfigitem(
2005 b'ui',
2010 b'ui',
2006 b'clonebundles',
2011 b'clonebundles',
2007 default=True,
2012 default=True,
2008 )
2013 )
2009 coreconfigitem(
2014 coreconfigitem(
2010 b'ui',
2015 b'ui',
2011 b'color',
2016 b'color',
2012 default=b'auto',
2017 default=b'auto',
2013 )
2018 )
2014 coreconfigitem(
2019 coreconfigitem(
2015 b'ui',
2020 b'ui',
2016 b'commitsubrepos',
2021 b'commitsubrepos',
2017 default=False,
2022 default=False,
2018 )
2023 )
2019 coreconfigitem(
2024 coreconfigitem(
2020 b'ui',
2025 b'ui',
2021 b'debug',
2026 b'debug',
2022 default=False,
2027 default=False,
2023 )
2028 )
2024 coreconfigitem(
2029 coreconfigitem(
2025 b'ui',
2030 b'ui',
2026 b'debugger',
2031 b'debugger',
2027 default=None,
2032 default=None,
2028 )
2033 )
2029 coreconfigitem(
2034 coreconfigitem(
2030 b'ui',
2035 b'ui',
2031 b'editor',
2036 b'editor',
2032 default=dynamicdefault,
2037 default=dynamicdefault,
2033 )
2038 )
2034 coreconfigitem(
2039 coreconfigitem(
2035 b'ui',
2040 b'ui',
2036 b'detailed-exit-code',
2041 b'detailed-exit-code',
2037 default=False,
2042 default=False,
2038 experimental=True,
2043 experimental=True,
2039 )
2044 )
2040 coreconfigitem(
2045 coreconfigitem(
2041 b'ui',
2046 b'ui',
2042 b'fallbackencoding',
2047 b'fallbackencoding',
2043 default=None,
2048 default=None,
2044 )
2049 )
2045 coreconfigitem(
2050 coreconfigitem(
2046 b'ui',
2051 b'ui',
2047 b'forcecwd',
2052 b'forcecwd',
2048 default=None,
2053 default=None,
2049 )
2054 )
2050 coreconfigitem(
2055 coreconfigitem(
2051 b'ui',
2056 b'ui',
2052 b'forcemerge',
2057 b'forcemerge',
2053 default=None,
2058 default=None,
2054 )
2059 )
2055 coreconfigitem(
2060 coreconfigitem(
2056 b'ui',
2061 b'ui',
2057 b'formatdebug',
2062 b'formatdebug',
2058 default=False,
2063 default=False,
2059 )
2064 )
2060 coreconfigitem(
2065 coreconfigitem(
2061 b'ui',
2066 b'ui',
2062 b'formatjson',
2067 b'formatjson',
2063 default=False,
2068 default=False,
2064 )
2069 )
2065 coreconfigitem(
2070 coreconfigitem(
2066 b'ui',
2071 b'ui',
2067 b'formatted',
2072 b'formatted',
2068 default=None,
2073 default=None,
2069 )
2074 )
2070 coreconfigitem(
2075 coreconfigitem(
2071 b'ui',
2076 b'ui',
2072 b'interactive',
2077 b'interactive',
2073 default=None,
2078 default=None,
2074 )
2079 )
2075 coreconfigitem(
2080 coreconfigitem(
2076 b'ui',
2081 b'ui',
2077 b'interface',
2082 b'interface',
2078 default=None,
2083 default=None,
2079 )
2084 )
2080 coreconfigitem(
2085 coreconfigitem(
2081 b'ui',
2086 b'ui',
2082 b'interface.chunkselector',
2087 b'interface.chunkselector',
2083 default=None,
2088 default=None,
2084 )
2089 )
2085 coreconfigitem(
2090 coreconfigitem(
2086 b'ui',
2091 b'ui',
2087 b'large-file-limit',
2092 b'large-file-limit',
2088 default=10000000,
2093 default=10000000,
2089 )
2094 )
2090 coreconfigitem(
2095 coreconfigitem(
2091 b'ui',
2096 b'ui',
2092 b'logblockedtimes',
2097 b'logblockedtimes',
2093 default=False,
2098 default=False,
2094 )
2099 )
2095 coreconfigitem(
2100 coreconfigitem(
2096 b'ui',
2101 b'ui',
2097 b'merge',
2102 b'merge',
2098 default=None,
2103 default=None,
2099 )
2104 )
2100 coreconfigitem(
2105 coreconfigitem(
2101 b'ui',
2106 b'ui',
2102 b'mergemarkers',
2107 b'mergemarkers',
2103 default=b'basic',
2108 default=b'basic',
2104 )
2109 )
2105 coreconfigitem(
2110 coreconfigitem(
2106 b'ui',
2111 b'ui',
2107 b'message-output',
2112 b'message-output',
2108 default=b'stdio',
2113 default=b'stdio',
2109 )
2114 )
2110 coreconfigitem(
2115 coreconfigitem(
2111 b'ui',
2116 b'ui',
2112 b'nontty',
2117 b'nontty',
2113 default=False,
2118 default=False,
2114 )
2119 )
2115 coreconfigitem(
2120 coreconfigitem(
2116 b'ui',
2121 b'ui',
2117 b'origbackuppath',
2122 b'origbackuppath',
2118 default=None,
2123 default=None,
2119 )
2124 )
2120 coreconfigitem(
2125 coreconfigitem(
2121 b'ui',
2126 b'ui',
2122 b'paginate',
2127 b'paginate',
2123 default=True,
2128 default=True,
2124 )
2129 )
2125 coreconfigitem(
2130 coreconfigitem(
2126 b'ui',
2131 b'ui',
2127 b'patch',
2132 b'patch',
2128 default=None,
2133 default=None,
2129 )
2134 )
2130 coreconfigitem(
2135 coreconfigitem(
2131 b'ui',
2136 b'ui',
2132 b'portablefilenames',
2137 b'portablefilenames',
2133 default=b'warn',
2138 default=b'warn',
2134 )
2139 )
2135 coreconfigitem(
2140 coreconfigitem(
2136 b'ui',
2141 b'ui',
2137 b'promptecho',
2142 b'promptecho',
2138 default=False,
2143 default=False,
2139 )
2144 )
2140 coreconfigitem(
2145 coreconfigitem(
2141 b'ui',
2146 b'ui',
2142 b'quiet',
2147 b'quiet',
2143 default=False,
2148 default=False,
2144 )
2149 )
2145 coreconfigitem(
2150 coreconfigitem(
2146 b'ui',
2151 b'ui',
2147 b'quietbookmarkmove',
2152 b'quietbookmarkmove',
2148 default=False,
2153 default=False,
2149 )
2154 )
2150 coreconfigitem(
2155 coreconfigitem(
2151 b'ui',
2156 b'ui',
2152 b'relative-paths',
2157 b'relative-paths',
2153 default=b'legacy',
2158 default=b'legacy',
2154 )
2159 )
2155 coreconfigitem(
2160 coreconfigitem(
2156 b'ui',
2161 b'ui',
2157 b'remotecmd',
2162 b'remotecmd',
2158 default=b'hg',
2163 default=b'hg',
2159 )
2164 )
2160 coreconfigitem(
2165 coreconfigitem(
2161 b'ui',
2166 b'ui',
2162 b'report_untrusted',
2167 b'report_untrusted',
2163 default=True,
2168 default=True,
2164 )
2169 )
2165 coreconfigitem(
2170 coreconfigitem(
2166 b'ui',
2171 b'ui',
2167 b'rollback',
2172 b'rollback',
2168 default=True,
2173 default=True,
2169 )
2174 )
2170 coreconfigitem(
2175 coreconfigitem(
2171 b'ui',
2176 b'ui',
2172 b'signal-safe-lock',
2177 b'signal-safe-lock',
2173 default=True,
2178 default=True,
2174 )
2179 )
2175 coreconfigitem(
2180 coreconfigitem(
2176 b'ui',
2181 b'ui',
2177 b'slash',
2182 b'slash',
2178 default=False,
2183 default=False,
2179 )
2184 )
2180 coreconfigitem(
2185 coreconfigitem(
2181 b'ui',
2186 b'ui',
2182 b'ssh',
2187 b'ssh',
2183 default=b'ssh',
2188 default=b'ssh',
2184 )
2189 )
2185 coreconfigitem(
2190 coreconfigitem(
2186 b'ui',
2191 b'ui',
2187 b'ssherrorhint',
2192 b'ssherrorhint',
2188 default=None,
2193 default=None,
2189 )
2194 )
2190 coreconfigitem(
2195 coreconfigitem(
2191 b'ui',
2196 b'ui',
2192 b'statuscopies',
2197 b'statuscopies',
2193 default=False,
2198 default=False,
2194 )
2199 )
2195 coreconfigitem(
2200 coreconfigitem(
2196 b'ui',
2201 b'ui',
2197 b'strict',
2202 b'strict',
2198 default=False,
2203 default=False,
2199 )
2204 )
2200 coreconfigitem(
2205 coreconfigitem(
2201 b'ui',
2206 b'ui',
2202 b'style',
2207 b'style',
2203 default=b'',
2208 default=b'',
2204 )
2209 )
2205 coreconfigitem(
2210 coreconfigitem(
2206 b'ui',
2211 b'ui',
2207 b'supportcontact',
2212 b'supportcontact',
2208 default=None,
2213 default=None,
2209 )
2214 )
2210 coreconfigitem(
2215 coreconfigitem(
2211 b'ui',
2216 b'ui',
2212 b'textwidth',
2217 b'textwidth',
2213 default=78,
2218 default=78,
2214 )
2219 )
2215 coreconfigitem(
2220 coreconfigitem(
2216 b'ui',
2221 b'ui',
2217 b'timeout',
2222 b'timeout',
2218 default=b'600',
2223 default=b'600',
2219 )
2224 )
2220 coreconfigitem(
2225 coreconfigitem(
2221 b'ui',
2226 b'ui',
2222 b'timeout.warn',
2227 b'timeout.warn',
2223 default=0,
2228 default=0,
2224 )
2229 )
2225 coreconfigitem(
2230 coreconfigitem(
2226 b'ui',
2231 b'ui',
2227 b'timestamp-output',
2232 b'timestamp-output',
2228 default=False,
2233 default=False,
2229 )
2234 )
2230 coreconfigitem(
2235 coreconfigitem(
2231 b'ui',
2236 b'ui',
2232 b'traceback',
2237 b'traceback',
2233 default=False,
2238 default=False,
2234 )
2239 )
2235 coreconfigitem(
2240 coreconfigitem(
2236 b'ui',
2241 b'ui',
2237 b'tweakdefaults',
2242 b'tweakdefaults',
2238 default=False,
2243 default=False,
2239 )
2244 )
2240 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2245 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2241 coreconfigitem(
2246 coreconfigitem(
2242 b'ui',
2247 b'ui',
2243 b'verbose',
2248 b'verbose',
2244 default=False,
2249 default=False,
2245 )
2250 )
2246 coreconfigitem(
2251 coreconfigitem(
2247 b'verify',
2252 b'verify',
2248 b'skipflags',
2253 b'skipflags',
2249 default=None,
2254 default=None,
2250 )
2255 )
2251 coreconfigitem(
2256 coreconfigitem(
2252 b'web',
2257 b'web',
2253 b'allowbz2',
2258 b'allowbz2',
2254 default=False,
2259 default=False,
2255 )
2260 )
2256 coreconfigitem(
2261 coreconfigitem(
2257 b'web',
2262 b'web',
2258 b'allowgz',
2263 b'allowgz',
2259 default=False,
2264 default=False,
2260 )
2265 )
2261 coreconfigitem(
2266 coreconfigitem(
2262 b'web',
2267 b'web',
2263 b'allow-pull',
2268 b'allow-pull',
2264 alias=[(b'web', b'allowpull')],
2269 alias=[(b'web', b'allowpull')],
2265 default=True,
2270 default=True,
2266 )
2271 )
2267 coreconfigitem(
2272 coreconfigitem(
2268 b'web',
2273 b'web',
2269 b'allow-push',
2274 b'allow-push',
2270 alias=[(b'web', b'allow_push')],
2275 alias=[(b'web', b'allow_push')],
2271 default=list,
2276 default=list,
2272 )
2277 )
2273 coreconfigitem(
2278 coreconfigitem(
2274 b'web',
2279 b'web',
2275 b'allowzip',
2280 b'allowzip',
2276 default=False,
2281 default=False,
2277 )
2282 )
2278 coreconfigitem(
2283 coreconfigitem(
2279 b'web',
2284 b'web',
2280 b'archivesubrepos',
2285 b'archivesubrepos',
2281 default=False,
2286 default=False,
2282 )
2287 )
2283 coreconfigitem(
2288 coreconfigitem(
2284 b'web',
2289 b'web',
2285 b'cache',
2290 b'cache',
2286 default=True,
2291 default=True,
2287 )
2292 )
2288 coreconfigitem(
2293 coreconfigitem(
2289 b'web',
2294 b'web',
2290 b'comparisoncontext',
2295 b'comparisoncontext',
2291 default=5,
2296 default=5,
2292 )
2297 )
2293 coreconfigitem(
2298 coreconfigitem(
2294 b'web',
2299 b'web',
2295 b'contact',
2300 b'contact',
2296 default=None,
2301 default=None,
2297 )
2302 )
2298 coreconfigitem(
2303 coreconfigitem(
2299 b'web',
2304 b'web',
2300 b'deny_push',
2305 b'deny_push',
2301 default=list,
2306 default=list,
2302 )
2307 )
2303 coreconfigitem(
2308 coreconfigitem(
2304 b'web',
2309 b'web',
2305 b'guessmime',
2310 b'guessmime',
2306 default=False,
2311 default=False,
2307 )
2312 )
2308 coreconfigitem(
2313 coreconfigitem(
2309 b'web',
2314 b'web',
2310 b'hidden',
2315 b'hidden',
2311 default=False,
2316 default=False,
2312 )
2317 )
2313 coreconfigitem(
2318 coreconfigitem(
2314 b'web',
2319 b'web',
2315 b'labels',
2320 b'labels',
2316 default=list,
2321 default=list,
2317 )
2322 )
2318 coreconfigitem(
2323 coreconfigitem(
2319 b'web',
2324 b'web',
2320 b'logoimg',
2325 b'logoimg',
2321 default=b'hglogo.png',
2326 default=b'hglogo.png',
2322 )
2327 )
2323 coreconfigitem(
2328 coreconfigitem(
2324 b'web',
2329 b'web',
2325 b'logourl',
2330 b'logourl',
2326 default=b'https://mercurial-scm.org/',
2331 default=b'https://mercurial-scm.org/',
2327 )
2332 )
2328 coreconfigitem(
2333 coreconfigitem(
2329 b'web',
2334 b'web',
2330 b'accesslog',
2335 b'accesslog',
2331 default=b'-',
2336 default=b'-',
2332 )
2337 )
2333 coreconfigitem(
2338 coreconfigitem(
2334 b'web',
2339 b'web',
2335 b'address',
2340 b'address',
2336 default=b'',
2341 default=b'',
2337 )
2342 )
2338 coreconfigitem(
2343 coreconfigitem(
2339 b'web',
2344 b'web',
2340 b'allow-archive',
2345 b'allow-archive',
2341 alias=[(b'web', b'allow_archive')],
2346 alias=[(b'web', b'allow_archive')],
2342 default=list,
2347 default=list,
2343 )
2348 )
2344 coreconfigitem(
2349 coreconfigitem(
2345 b'web',
2350 b'web',
2346 b'allow_read',
2351 b'allow_read',
2347 default=list,
2352 default=list,
2348 )
2353 )
2349 coreconfigitem(
2354 coreconfigitem(
2350 b'web',
2355 b'web',
2351 b'baseurl',
2356 b'baseurl',
2352 default=None,
2357 default=None,
2353 )
2358 )
2354 coreconfigitem(
2359 coreconfigitem(
2355 b'web',
2360 b'web',
2356 b'cacerts',
2361 b'cacerts',
2357 default=None,
2362 default=None,
2358 )
2363 )
2359 coreconfigitem(
2364 coreconfigitem(
2360 b'web',
2365 b'web',
2361 b'certificate',
2366 b'certificate',
2362 default=None,
2367 default=None,
2363 )
2368 )
2364 coreconfigitem(
2369 coreconfigitem(
2365 b'web',
2370 b'web',
2366 b'collapse',
2371 b'collapse',
2367 default=False,
2372 default=False,
2368 )
2373 )
2369 coreconfigitem(
2374 coreconfigitem(
2370 b'web',
2375 b'web',
2371 b'csp',
2376 b'csp',
2372 default=None,
2377 default=None,
2373 )
2378 )
2374 coreconfigitem(
2379 coreconfigitem(
2375 b'web',
2380 b'web',
2376 b'deny_read',
2381 b'deny_read',
2377 default=list,
2382 default=list,
2378 )
2383 )
2379 coreconfigitem(
2384 coreconfigitem(
2380 b'web',
2385 b'web',
2381 b'descend',
2386 b'descend',
2382 default=True,
2387 default=True,
2383 )
2388 )
2384 coreconfigitem(
2389 coreconfigitem(
2385 b'web',
2390 b'web',
2386 b'description',
2391 b'description',
2387 default=b"",
2392 default=b"",
2388 )
2393 )
2389 coreconfigitem(
2394 coreconfigitem(
2390 b'web',
2395 b'web',
2391 b'encoding',
2396 b'encoding',
2392 default=lambda: encoding.encoding,
2397 default=lambda: encoding.encoding,
2393 )
2398 )
2394 coreconfigitem(
2399 coreconfigitem(
2395 b'web',
2400 b'web',
2396 b'errorlog',
2401 b'errorlog',
2397 default=b'-',
2402 default=b'-',
2398 )
2403 )
2399 coreconfigitem(
2404 coreconfigitem(
2400 b'web',
2405 b'web',
2401 b'ipv6',
2406 b'ipv6',
2402 default=False,
2407 default=False,
2403 )
2408 )
2404 coreconfigitem(
2409 coreconfigitem(
2405 b'web',
2410 b'web',
2406 b'maxchanges',
2411 b'maxchanges',
2407 default=10,
2412 default=10,
2408 )
2413 )
2409 coreconfigitem(
2414 coreconfigitem(
2410 b'web',
2415 b'web',
2411 b'maxfiles',
2416 b'maxfiles',
2412 default=10,
2417 default=10,
2413 )
2418 )
2414 coreconfigitem(
2419 coreconfigitem(
2415 b'web',
2420 b'web',
2416 b'maxshortchanges',
2421 b'maxshortchanges',
2417 default=60,
2422 default=60,
2418 )
2423 )
2419 coreconfigitem(
2424 coreconfigitem(
2420 b'web',
2425 b'web',
2421 b'motd',
2426 b'motd',
2422 default=b'',
2427 default=b'',
2423 )
2428 )
2424 coreconfigitem(
2429 coreconfigitem(
2425 b'web',
2430 b'web',
2426 b'name',
2431 b'name',
2427 default=dynamicdefault,
2432 default=dynamicdefault,
2428 )
2433 )
2429 coreconfigitem(
2434 coreconfigitem(
2430 b'web',
2435 b'web',
2431 b'port',
2436 b'port',
2432 default=8000,
2437 default=8000,
2433 )
2438 )
2434 coreconfigitem(
2439 coreconfigitem(
2435 b'web',
2440 b'web',
2436 b'prefix',
2441 b'prefix',
2437 default=b'',
2442 default=b'',
2438 )
2443 )
2439 coreconfigitem(
2444 coreconfigitem(
2440 b'web',
2445 b'web',
2441 b'push_ssl',
2446 b'push_ssl',
2442 default=True,
2447 default=True,
2443 )
2448 )
2444 coreconfigitem(
2449 coreconfigitem(
2445 b'web',
2450 b'web',
2446 b'refreshinterval',
2451 b'refreshinterval',
2447 default=20,
2452 default=20,
2448 )
2453 )
2449 coreconfigitem(
2454 coreconfigitem(
2450 b'web',
2455 b'web',
2451 b'server-header',
2456 b'server-header',
2452 default=None,
2457 default=None,
2453 )
2458 )
2454 coreconfigitem(
2459 coreconfigitem(
2455 b'web',
2460 b'web',
2456 b'static',
2461 b'static',
2457 default=None,
2462 default=None,
2458 )
2463 )
2459 coreconfigitem(
2464 coreconfigitem(
2460 b'web',
2465 b'web',
2461 b'staticurl',
2466 b'staticurl',
2462 default=None,
2467 default=None,
2463 )
2468 )
2464 coreconfigitem(
2469 coreconfigitem(
2465 b'web',
2470 b'web',
2466 b'stripes',
2471 b'stripes',
2467 default=1,
2472 default=1,
2468 )
2473 )
2469 coreconfigitem(
2474 coreconfigitem(
2470 b'web',
2475 b'web',
2471 b'style',
2476 b'style',
2472 default=b'paper',
2477 default=b'paper',
2473 )
2478 )
2474 coreconfigitem(
2479 coreconfigitem(
2475 b'web',
2480 b'web',
2476 b'templates',
2481 b'templates',
2477 default=None,
2482 default=None,
2478 )
2483 )
2479 coreconfigitem(
2484 coreconfigitem(
2480 b'web',
2485 b'web',
2481 b'view',
2486 b'view',
2482 default=b'served',
2487 default=b'served',
2483 experimental=True,
2488 experimental=True,
2484 )
2489 )
2485 coreconfigitem(
2490 coreconfigitem(
2486 b'worker',
2491 b'worker',
2487 b'backgroundclose',
2492 b'backgroundclose',
2488 default=dynamicdefault,
2493 default=dynamicdefault,
2489 )
2494 )
2490 # Windows defaults to a limit of 512 open files. A buffer of 128
2495 # Windows defaults to a limit of 512 open files. A buffer of 128
2491 # should give us enough headway.
2496 # should give us enough headway.
2492 coreconfigitem(
2497 coreconfigitem(
2493 b'worker',
2498 b'worker',
2494 b'backgroundclosemaxqueue',
2499 b'backgroundclosemaxqueue',
2495 default=384,
2500 default=384,
2496 )
2501 )
2497 coreconfigitem(
2502 coreconfigitem(
2498 b'worker',
2503 b'worker',
2499 b'backgroundcloseminfilecount',
2504 b'backgroundcloseminfilecount',
2500 default=2048,
2505 default=2048,
2501 )
2506 )
2502 coreconfigitem(
2507 coreconfigitem(
2503 b'worker',
2508 b'worker',
2504 b'backgroundclosethreadcount',
2509 b'backgroundclosethreadcount',
2505 default=4,
2510 default=4,
2506 )
2511 )
2507 coreconfigitem(
2512 coreconfigitem(
2508 b'worker',
2513 b'worker',
2509 b'enabled',
2514 b'enabled',
2510 default=True,
2515 default=True,
2511 )
2516 )
2512 coreconfigitem(
2517 coreconfigitem(
2513 b'worker',
2518 b'worker',
2514 b'numcpus',
2519 b'numcpus',
2515 default=None,
2520 default=None,
2516 )
2521 )
2517
2522
2518 # Rebase related configuration moved to core because other extension are doing
2523 # Rebase related configuration moved to core because other extension are doing
2519 # strange things. For example, shelve import the extensions to reuse some bit
2524 # strange things. For example, shelve import the extensions to reuse some bit
2520 # without formally loading it.
2525 # without formally loading it.
2521 coreconfigitem(
2526 coreconfigitem(
2522 b'commands',
2527 b'commands',
2523 b'rebase.requiredest',
2528 b'rebase.requiredest',
2524 default=False,
2529 default=False,
2525 )
2530 )
2526 coreconfigitem(
2531 coreconfigitem(
2527 b'experimental',
2532 b'experimental',
2528 b'rebaseskipobsolete',
2533 b'rebaseskipobsolete',
2529 default=True,
2534 default=True,
2530 )
2535 )
2531 coreconfigitem(
2536 coreconfigitem(
2532 b'rebase',
2537 b'rebase',
2533 b'singletransaction',
2538 b'singletransaction',
2534 default=False,
2539 default=False,
2535 )
2540 )
2536 coreconfigitem(
2541 coreconfigitem(
2537 b'rebase',
2542 b'rebase',
2538 b'experimental.inmemory',
2543 b'experimental.inmemory',
2539 default=False,
2544 default=False,
2540 )
2545 )
@@ -1,3602 +1,3617 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revset,
62 revset,
63 revsetlang,
63 revsetlang,
64 scmutil,
64 scmutil,
65 sparse,
65 sparse,
66 store as storemod,
66 store as storemod,
67 subrepoutil,
67 subrepoutil,
68 tags as tagsmod,
68 tags as tagsmod,
69 transaction,
69 transaction,
70 txnutil,
70 txnutil,
71 util,
71 util,
72 vfs as vfsmod,
72 vfs as vfsmod,
73 )
73 )
74
74
75 from .interfaces import (
75 from .interfaces import (
76 repository,
76 repository,
77 util as interfaceutil,
77 util as interfaceutil,
78 )
78 )
79
79
80 from .utils import (
80 from .utils import (
81 hashutil,
81 hashutil,
82 procutil,
82 procutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 from .revlogutils import constants as revlogconst
86 from .revlogutils import constants as revlogconst
87
87
88 release = lockmod.release
88 release = lockmod.release
89 urlerr = util.urlerr
89 urlerr = util.urlerr
90 urlreq = util.urlreq
90 urlreq = util.urlreq
91
91
92 # set of (path, vfs-location) tuples. vfs-location is:
92 # set of (path, vfs-location) tuples. vfs-location is:
93 # - 'plain for vfs relative paths
93 # - 'plain for vfs relative paths
94 # - '' for svfs relative paths
94 # - '' for svfs relative paths
95 _cachedfiles = set()
95 _cachedfiles = set()
96
96
97
97
98 class _basefilecache(scmutil.filecache):
98 class _basefilecache(scmutil.filecache):
99 """All filecache usage on repo are done for logic that should be unfiltered"""
99 """All filecache usage on repo are done for logic that should be unfiltered"""
100
100
101 def __get__(self, repo, type=None):
101 def __get__(self, repo, type=None):
102 if repo is None:
102 if repo is None:
103 return self
103 return self
104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 unfi = repo.unfiltered()
105 unfi = repo.unfiltered()
106 try:
106 try:
107 return unfi.__dict__[self.sname]
107 return unfi.__dict__[self.sname]
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110 return super(_basefilecache, self).__get__(unfi, type)
110 return super(_basefilecache, self).__get__(unfi, type)
111
111
112 def set(self, repo, value):
112 def set(self, repo, value):
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114
114
115
115
116 class repofilecache(_basefilecache):
116 class repofilecache(_basefilecache):
117 """filecache for files in .hg but outside of .hg/store"""
117 """filecache for files in .hg but outside of .hg/store"""
118
118
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(repofilecache, self).__init__(*paths)
120 super(repofilecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, b'plain'))
122 _cachedfiles.add((path, b'plain'))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.vfs.join(fname)
125 return obj.vfs.join(fname)
126
126
127
127
128 class storecache(_basefilecache):
128 class storecache(_basefilecache):
129 """filecache for files in the store"""
129 """filecache for files in the store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(storecache, self).__init__(*paths)
132 super(storecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b''))
134 _cachedfiles.add((path, b''))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.sjoin(fname)
137 return obj.sjoin(fname)
138
138
139
139
140 class mixedrepostorecache(_basefilecache):
140 class mixedrepostorecache(_basefilecache):
141 """filecache for a mix files in .hg/store and outside"""
141 """filecache for a mix files in .hg/store and outside"""
142
142
143 def __init__(self, *pathsandlocations):
143 def __init__(self, *pathsandlocations):
144 # scmutil.filecache only uses the path for passing back into our
144 # scmutil.filecache only uses the path for passing back into our
145 # join(), so we can safely pass a list of paths and locations
145 # join(), so we can safely pass a list of paths and locations
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
148
148
149 def join(self, obj, fnameandlocation):
149 def join(self, obj, fnameandlocation):
150 fname, location = fnameandlocation
150 fname, location = fnameandlocation
151 if location == b'plain':
151 if location == b'plain':
152 return obj.vfs.join(fname)
152 return obj.vfs.join(fname)
153 else:
153 else:
154 if location != b'':
154 if location != b'':
155 raise error.ProgrammingError(
155 raise error.ProgrammingError(
156 b'unexpected location: %s' % location
156 b'unexpected location: %s' % location
157 )
157 )
158 return obj.sjoin(fname)
158 return obj.sjoin(fname)
159
159
160
160
161 def isfilecached(repo, name):
161 def isfilecached(repo, name):
162 """check if a repo has already cached "name" filecache-ed property
162 """check if a repo has already cached "name" filecache-ed property
163
163
164 This returns (cachedobj-or-None, iscached) tuple.
164 This returns (cachedobj-or-None, iscached) tuple.
165 """
165 """
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 if not cacheentry:
167 if not cacheentry:
168 return None, False
168 return None, False
169 return cacheentry.obj, True
169 return cacheentry.obj, True
170
170
171
171
172 class unfilteredpropertycache(util.propertycache):
172 class unfilteredpropertycache(util.propertycache):
173 """propertycache that apply to unfiltered repo only"""
173 """propertycache that apply to unfiltered repo only"""
174
174
175 def __get__(self, repo, type=None):
175 def __get__(self, repo, type=None):
176 unfi = repo.unfiltered()
176 unfi = repo.unfiltered()
177 if unfi is repo:
177 if unfi is repo:
178 return super(unfilteredpropertycache, self).__get__(unfi)
178 return super(unfilteredpropertycache, self).__get__(unfi)
179 return getattr(unfi, self.name)
179 return getattr(unfi, self.name)
180
180
181
181
182 class filteredpropertycache(util.propertycache):
182 class filteredpropertycache(util.propertycache):
183 """propertycache that must take filtering in account"""
183 """propertycache that must take filtering in account"""
184
184
185 def cachevalue(self, obj, value):
185 def cachevalue(self, obj, value):
186 object.__setattr__(obj, self.name, value)
186 object.__setattr__(obj, self.name, value)
187
187
188
188
189 def hasunfilteredcache(repo, name):
189 def hasunfilteredcache(repo, name):
190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 return name in vars(repo.unfiltered())
191 return name in vars(repo.unfiltered())
192
192
193
193
194 def unfilteredmethod(orig):
194 def unfilteredmethod(orig):
195 """decorate method that always need to be run on unfiltered version"""
195 """decorate method that always need to be run on unfiltered version"""
196
196
197 @functools.wraps(orig)
197 @functools.wraps(orig)
198 def wrapper(repo, *args, **kwargs):
198 def wrapper(repo, *args, **kwargs):
199 return orig(repo.unfiltered(), *args, **kwargs)
199 return orig(repo.unfiltered(), *args, **kwargs)
200
200
201 return wrapper
201 return wrapper
202
202
203
203
204 moderncaps = {
204 moderncaps = {
205 b'lookup',
205 b'lookup',
206 b'branchmap',
206 b'branchmap',
207 b'pushkey',
207 b'pushkey',
208 b'known',
208 b'known',
209 b'getbundle',
209 b'getbundle',
210 b'unbundle',
210 b'unbundle',
211 }
211 }
212 legacycaps = moderncaps.union({b'changegroupsubset'})
212 legacycaps = moderncaps.union({b'changegroupsubset'})
213
213
214
214
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 class localcommandexecutor(object):
216 class localcommandexecutor(object):
217 def __init__(self, peer):
217 def __init__(self, peer):
218 self._peer = peer
218 self._peer = peer
219 self._sent = False
219 self._sent = False
220 self._closed = False
220 self._closed = False
221
221
222 def __enter__(self):
222 def __enter__(self):
223 return self
223 return self
224
224
225 def __exit__(self, exctype, excvalue, exctb):
225 def __exit__(self, exctype, excvalue, exctb):
226 self.close()
226 self.close()
227
227
228 def callcommand(self, command, args):
228 def callcommand(self, command, args):
229 if self._sent:
229 if self._sent:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after sendcommands()'
231 b'callcommand() cannot be used after sendcommands()'
232 )
232 )
233
233
234 if self._closed:
234 if self._closed:
235 raise error.ProgrammingError(
235 raise error.ProgrammingError(
236 b'callcommand() cannot be used after close()'
236 b'callcommand() cannot be used after close()'
237 )
237 )
238
238
239 # We don't need to support anything fancy. Just call the named
239 # We don't need to support anything fancy. Just call the named
240 # method on the peer and return a resolved future.
240 # method on the peer and return a resolved future.
241 fn = getattr(self._peer, pycompat.sysstr(command))
241 fn = getattr(self._peer, pycompat.sysstr(command))
242
242
243 f = pycompat.futures.Future()
243 f = pycompat.futures.Future()
244
244
245 try:
245 try:
246 result = fn(**pycompat.strkwargs(args))
246 result = fn(**pycompat.strkwargs(args))
247 except Exception:
247 except Exception:
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 else:
249 else:
250 f.set_result(result)
250 f.set_result(result)
251
251
252 return f
252 return f
253
253
254 def sendcommands(self):
254 def sendcommands(self):
255 self._sent = True
255 self._sent = True
256
256
257 def close(self):
257 def close(self):
258 self._closed = True
258 self._closed = True
259
259
260
260
261 @interfaceutil.implementer(repository.ipeercommands)
261 @interfaceutil.implementer(repository.ipeercommands)
262 class localpeer(repository.peer):
262 class localpeer(repository.peer):
263 '''peer for a local repo; reflects only the most recent API'''
263 '''peer for a local repo; reflects only the most recent API'''
264
264
265 def __init__(self, repo, caps=None):
265 def __init__(self, repo, caps=None):
266 super(localpeer, self).__init__()
266 super(localpeer, self).__init__()
267
267
268 if caps is None:
268 if caps is None:
269 caps = moderncaps.copy()
269 caps = moderncaps.copy()
270 self._repo = repo.filtered(b'served')
270 self._repo = repo.filtered(b'served')
271 self.ui = repo.ui
271 self.ui = repo.ui
272 self._caps = repo._restrictcapabilities(caps)
272 self._caps = repo._restrictcapabilities(caps)
273
273
274 # Begin of _basepeer interface.
274 # Begin of _basepeer interface.
275
275
276 def url(self):
276 def url(self):
277 return self._repo.url()
277 return self._repo.url()
278
278
279 def local(self):
279 def local(self):
280 return self._repo
280 return self._repo
281
281
282 def peer(self):
282 def peer(self):
283 return self
283 return self
284
284
285 def canpush(self):
285 def canpush(self):
286 return True
286 return True
287
287
288 def close(self):
288 def close(self):
289 self._repo.close()
289 self._repo.close()
290
290
291 # End of _basepeer interface.
291 # End of _basepeer interface.
292
292
293 # Begin of _basewirecommands interface.
293 # Begin of _basewirecommands interface.
294
294
295 def branchmap(self):
295 def branchmap(self):
296 return self._repo.branchmap()
296 return self._repo.branchmap()
297
297
298 def capabilities(self):
298 def capabilities(self):
299 return self._caps
299 return self._caps
300
300
301 def clonebundles(self):
301 def clonebundles(self):
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303
303
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 """Used to test argument passing over the wire"""
305 """Used to test argument passing over the wire"""
306 return b"%s %s %s %s %s" % (
306 return b"%s %s %s %s %s" % (
307 one,
307 one,
308 two,
308 two,
309 pycompat.bytestr(three),
309 pycompat.bytestr(three),
310 pycompat.bytestr(four),
310 pycompat.bytestr(four),
311 pycompat.bytestr(five),
311 pycompat.bytestr(five),
312 )
312 )
313
313
314 def getbundle(
314 def getbundle(
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 ):
316 ):
317 chunks = exchange.getbundlechunks(
317 chunks = exchange.getbundlechunks(
318 self._repo,
318 self._repo,
319 source,
319 source,
320 heads=heads,
320 heads=heads,
321 common=common,
321 common=common,
322 bundlecaps=bundlecaps,
322 bundlecaps=bundlecaps,
323 **kwargs
323 **kwargs
324 )[1]
324 )[1]
325 cb = util.chunkbuffer(chunks)
325 cb = util.chunkbuffer(chunks)
326
326
327 if exchange.bundle2requested(bundlecaps):
327 if exchange.bundle2requested(bundlecaps):
328 # When requesting a bundle2, getbundle returns a stream to make the
328 # When requesting a bundle2, getbundle returns a stream to make the
329 # wire level function happier. We need to build a proper object
329 # wire level function happier. We need to build a proper object
330 # from it in local peer.
330 # from it in local peer.
331 return bundle2.getunbundler(self.ui, cb)
331 return bundle2.getunbundler(self.ui, cb)
332 else:
332 else:
333 return changegroup.getunbundler(b'01', cb, None)
333 return changegroup.getunbundler(b'01', cb, None)
334
334
335 def heads(self):
335 def heads(self):
336 return self._repo.heads()
336 return self._repo.heads()
337
337
338 def known(self, nodes):
338 def known(self, nodes):
339 return self._repo.known(nodes)
339 return self._repo.known(nodes)
340
340
341 def listkeys(self, namespace):
341 def listkeys(self, namespace):
342 return self._repo.listkeys(namespace)
342 return self._repo.listkeys(namespace)
343
343
344 def lookup(self, key):
344 def lookup(self, key):
345 return self._repo.lookup(key)
345 return self._repo.lookup(key)
346
346
347 def pushkey(self, namespace, key, old, new):
347 def pushkey(self, namespace, key, old, new):
348 return self._repo.pushkey(namespace, key, old, new)
348 return self._repo.pushkey(namespace, key, old, new)
349
349
350 def stream_out(self):
350 def stream_out(self):
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352
352
353 def unbundle(self, bundle, heads, url):
353 def unbundle(self, bundle, heads, url):
354 """apply a bundle on a repo
354 """apply a bundle on a repo
355
355
356 This function handles the repo locking itself."""
356 This function handles the repo locking itself."""
357 try:
357 try:
358 try:
358 try:
359 bundle = exchange.readbundle(self.ui, bundle, None)
359 bundle = exchange.readbundle(self.ui, bundle, None)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 if util.safehasattr(ret, b'getchunks'):
361 if util.safehasattr(ret, b'getchunks'):
362 # This is a bundle20 object, turn it into an unbundler.
362 # This is a bundle20 object, turn it into an unbundler.
363 # This little dance should be dropped eventually when the
363 # This little dance should be dropped eventually when the
364 # API is finally improved.
364 # API is finally improved.
365 stream = util.chunkbuffer(ret.getchunks())
365 stream = util.chunkbuffer(ret.getchunks())
366 ret = bundle2.getunbundler(self.ui, stream)
366 ret = bundle2.getunbundler(self.ui, stream)
367 return ret
367 return ret
368 except Exception as exc:
368 except Exception as exc:
369 # If the exception contains output salvaged from a bundle2
369 # If the exception contains output salvaged from a bundle2
370 # reply, we need to make sure it is printed before continuing
370 # reply, we need to make sure it is printed before continuing
371 # to fail. So we build a bundle2 with such output and consume
371 # to fail. So we build a bundle2 with such output and consume
372 # it directly.
372 # it directly.
373 #
373 #
374 # This is not very elegant but allows a "simple" solution for
374 # This is not very elegant but allows a "simple" solution for
375 # issue4594
375 # issue4594
376 output = getattr(exc, '_bundle2salvagedoutput', ())
376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 if output:
377 if output:
378 bundler = bundle2.bundle20(self._repo.ui)
378 bundler = bundle2.bundle20(self._repo.ui)
379 for out in output:
379 for out in output:
380 bundler.addpart(out)
380 bundler.addpart(out)
381 stream = util.chunkbuffer(bundler.getchunks())
381 stream = util.chunkbuffer(bundler.getchunks())
382 b = bundle2.getunbundler(self.ui, stream)
382 b = bundle2.getunbundler(self.ui, stream)
383 bundle2.processbundle(self._repo, b)
383 bundle2.processbundle(self._repo, b)
384 raise
384 raise
385 except error.PushRaced as exc:
385 except error.PushRaced as exc:
386 raise error.ResponseError(
386 raise error.ResponseError(
387 _(b'push failed:'), stringutil.forcebytestr(exc)
387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 )
388 )
389
389
390 # End of _basewirecommands interface.
390 # End of _basewirecommands interface.
391
391
392 # Begin of peer interface.
392 # Begin of peer interface.
393
393
394 def commandexecutor(self):
394 def commandexecutor(self):
395 return localcommandexecutor(self)
395 return localcommandexecutor(self)
396
396
397 # End of peer interface.
397 # End of peer interface.
398
398
399
399
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 class locallegacypeer(localpeer):
401 class locallegacypeer(localpeer):
402 """peer extension which implements legacy methods too; used for tests with
402 """peer extension which implements legacy methods too; used for tests with
403 restricted capabilities"""
403 restricted capabilities"""
404
404
405 def __init__(self, repo):
405 def __init__(self, repo):
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407
407
408 # Begin of baselegacywirecommands interface.
408 # Begin of baselegacywirecommands interface.
409
409
410 def between(self, pairs):
410 def between(self, pairs):
411 return self._repo.between(pairs)
411 return self._repo.between(pairs)
412
412
413 def branches(self, nodes):
413 def branches(self, nodes):
414 return self._repo.branches(nodes)
414 return self._repo.branches(nodes)
415
415
416 def changegroup(self, nodes, source):
416 def changegroup(self, nodes, source):
417 outgoing = discovery.outgoing(
417 outgoing = discovery.outgoing(
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 )
419 )
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421
421
422 def changegroupsubset(self, bases, heads, source):
422 def changegroupsubset(self, bases, heads, source):
423 outgoing = discovery.outgoing(
423 outgoing = discovery.outgoing(
424 self._repo, missingroots=bases, ancestorsof=heads
424 self._repo, missingroots=bases, ancestorsof=heads
425 )
425 )
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427
427
428 # End of baselegacywirecommands interface.
428 # End of baselegacywirecommands interface.
429
429
430
430
431 # Functions receiving (ui, features) that extensions can register to impact
431 # Functions receiving (ui, features) that extensions can register to impact
432 # the ability to load repositories with custom requirements. Only
432 # the ability to load repositories with custom requirements. Only
433 # functions defined in loaded extensions are called.
433 # functions defined in loaded extensions are called.
434 #
434 #
435 # The function receives a set of requirement strings that the repository
435 # The function receives a set of requirement strings that the repository
436 # is capable of opening. Functions will typically add elements to the
436 # is capable of opening. Functions will typically add elements to the
437 # set to reflect that the extension knows how to handle that requirements.
437 # set to reflect that the extension knows how to handle that requirements.
438 featuresetupfuncs = set()
438 featuresetupfuncs = set()
439
439
440
440
441 def _getsharedvfs(hgvfs, requirements):
441 def _getsharedvfs(hgvfs, requirements):
442 """returns the vfs object pointing to root of shared source
442 """returns the vfs object pointing to root of shared source
443 repo for a shared repository
443 repo for a shared repository
444
444
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
447 """
447 """
448 # The ``shared`` or ``relshared`` requirements indicate the
448 # The ``shared`` or ``relshared`` requirements indicate the
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # This is an absolute path for ``shared`` and relative to
450 # This is an absolute path for ``shared`` and relative to
451 # ``.hg/`` for ``relshared``.
451 # ``.hg/`` for ``relshared``.
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 sharedpath = hgvfs.join(sharedpath)
454 sharedpath = hgvfs.join(sharedpath)
455
455
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457
457
458 if not sharedvfs.exists():
458 if not sharedvfs.exists():
459 raise error.RepoError(
459 raise error.RepoError(
460 _(b'.hg/sharedpath points to nonexistent directory %s')
460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 % sharedvfs.base
461 % sharedvfs.base
462 )
462 )
463 return sharedvfs
463 return sharedvfs
464
464
465
465
466 def _readrequires(vfs, allowmissing):
466 def _readrequires(vfs, allowmissing):
467 """reads the require file present at root of this vfs
467 """reads the require file present at root of this vfs
468 and return a set of requirements
468 and return a set of requirements
469
469
470 If allowmissing is True, we suppress ENOENT if raised"""
470 If allowmissing is True, we suppress ENOENT if raised"""
471 # requires file contains a newline-delimited list of
471 # requires file contains a newline-delimited list of
472 # features/capabilities the opener (us) must have in order to use
472 # features/capabilities the opener (us) must have in order to use
473 # the repository. This file was introduced in Mercurial 0.9.2,
473 # the repository. This file was introduced in Mercurial 0.9.2,
474 # which means very old repositories may not have one. We assume
474 # which means very old repositories may not have one. We assume
475 # a missing file translates to no requirements.
475 # a missing file translates to no requirements.
476 try:
476 try:
477 requirements = set(vfs.read(b'requires').splitlines())
477 requirements = set(vfs.read(b'requires').splitlines())
478 except IOError as e:
478 except IOError as e:
479 if not (allowmissing and e.errno == errno.ENOENT):
479 if not (allowmissing and e.errno == errno.ENOENT):
480 raise
480 raise
481 requirements = set()
481 requirements = set()
482 return requirements
482 return requirements
483
483
484
484
485 def makelocalrepository(baseui, path, intents=None):
485 def makelocalrepository(baseui, path, intents=None):
486 """Create a local repository object.
486 """Create a local repository object.
487
487
488 Given arguments needed to construct a local repository, this function
488 Given arguments needed to construct a local repository, this function
489 performs various early repository loading functionality (such as
489 performs various early repository loading functionality (such as
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 the repository can be opened, derives a type suitable for representing
491 the repository can be opened, derives a type suitable for representing
492 that repository, and returns an instance of it.
492 that repository, and returns an instance of it.
493
493
494 The returned object conforms to the ``repository.completelocalrepository``
494 The returned object conforms to the ``repository.completelocalrepository``
495 interface.
495 interface.
496
496
497 The repository type is derived by calling a series of factory functions
497 The repository type is derived by calling a series of factory functions
498 for each aspect/interface of the final repository. These are defined by
498 for each aspect/interface of the final repository. These are defined by
499 ``REPO_INTERFACES``.
499 ``REPO_INTERFACES``.
500
500
501 Each factory function is called to produce a type implementing a specific
501 Each factory function is called to produce a type implementing a specific
502 interface. The cumulative list of returned types will be combined into a
502 interface. The cumulative list of returned types will be combined into a
503 new type and that type will be instantiated to represent the local
503 new type and that type will be instantiated to represent the local
504 repository.
504 repository.
505
505
506 The factory functions each receive various state that may be consulted
506 The factory functions each receive various state that may be consulted
507 as part of deriving a type.
507 as part of deriving a type.
508
508
509 Extensions should wrap these factory functions to customize repository type
509 Extensions should wrap these factory functions to customize repository type
510 creation. Note that an extension's wrapped function may be called even if
510 creation. Note that an extension's wrapped function may be called even if
511 that extension is not loaded for the repo being constructed. Extensions
511 that extension is not loaded for the repo being constructed. Extensions
512 should check if their ``__name__`` appears in the
512 should check if their ``__name__`` appears in the
513 ``extensionmodulenames`` set passed to the factory function and no-op if
513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 not.
514 not.
515 """
515 """
516 ui = baseui.copy()
516 ui = baseui.copy()
517 # Prevent copying repo configuration.
517 # Prevent copying repo configuration.
518 ui.copy = baseui.copy
518 ui.copy = baseui.copy
519
519
520 # Working directory VFS rooted at repository root.
520 # Working directory VFS rooted at repository root.
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522
522
523 # Main VFS for .hg/ directory.
523 # Main VFS for .hg/ directory.
524 hgpath = wdirvfs.join(b'.hg')
524 hgpath = wdirvfs.join(b'.hg')
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 # Whether this repository is shared one or not
526 # Whether this repository is shared one or not
527 shared = False
527 shared = False
528 # If this repository is shared, vfs pointing to shared repo
528 # If this repository is shared, vfs pointing to shared repo
529 sharedvfs = None
529 sharedvfs = None
530
530
531 # The .hg/ path should exist and should be a directory. All other
531 # The .hg/ path should exist and should be a directory. All other
532 # cases are errors.
532 # cases are errors.
533 if not hgvfs.isdir():
533 if not hgvfs.isdir():
534 try:
534 try:
535 hgvfs.stat()
535 hgvfs.stat()
536 except OSError as e:
536 except OSError as e:
537 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
538 raise
538 raise
539 except ValueError as e:
539 except ValueError as e:
540 # Can be raised on Python 3.8 when path is invalid.
540 # Can be raised on Python 3.8 when path is invalid.
541 raise error.Abort(
541 raise error.Abort(
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 )
543 )
544
544
545 raise error.RepoError(_(b'repository %s not found') % path)
545 raise error.RepoError(_(b'repository %s not found') % path)
546
546
547 requirements = _readrequires(hgvfs, True)
547 requirements = _readrequires(hgvfs, True)
548 shared = (
548 shared = (
549 requirementsmod.SHARED_REQUIREMENT in requirements
549 requirementsmod.SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 )
551 )
552 storevfs = None
552 storevfs = None
553 if shared:
553 if shared:
554 # This is a shared repo
554 # This is a shared repo
555 sharedvfs = _getsharedvfs(hgvfs, requirements)
555 sharedvfs = _getsharedvfs(hgvfs, requirements)
556 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
556 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
557 else:
557 else:
558 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
558 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
559
559
560 # if .hg/requires contains the sharesafe requirement, it means
560 # if .hg/requires contains the sharesafe requirement, it means
561 # there exists a `.hg/store/requires` too and we should read it
561 # there exists a `.hg/store/requires` too and we should read it
562 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
562 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
563 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
563 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
564 # is not present, refer checkrequirementscompat() for that
564 # is not present, refer checkrequirementscompat() for that
565 #
565 #
566 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
566 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
567 # repository was shared the old way. We check the share source .hg/requires
567 # repository was shared the old way. We check the share source .hg/requires
568 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
568 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
569 # to be reshared
569 # to be reshared
570 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
570 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
571
571
572 if (
572 if (
573 shared
573 shared
574 and requirementsmod.SHARESAFE_REQUIREMENT
574 and requirementsmod.SHARESAFE_REQUIREMENT
575 not in _readrequires(sharedvfs, True)
575 not in _readrequires(sharedvfs, True)
576 ):
576 ):
577 raise error.Abort(
577 if ui.configbool(
578 _(b"share source does not support exp-sharesafe requirement")
578 b'experimental', b'sharesafe-auto-downgrade-shares'
579 )
579 ):
580
580 # prevent cyclic import localrepo -> upgrade -> localrepo
581 requirements |= _readrequires(storevfs, False)
581 from . import upgrade
582
583 upgrade.downgrade_share_to_non_safe(
584 ui,
585 hgvfs,
586 sharedvfs,
587 requirements,
588 )
589 else:
590 raise error.Abort(
591 _(
592 b"share source does not support exp-sharesafe requirement"
593 )
594 )
595 else:
596 requirements |= _readrequires(storevfs, False)
582 elif shared:
597 elif shared:
583 sourcerequires = _readrequires(sharedvfs, False)
598 sourcerequires = _readrequires(sharedvfs, False)
584 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
599 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
585 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
600 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
586 # prevent cyclic import localrepo -> upgrade -> localrepo
601 # prevent cyclic import localrepo -> upgrade -> localrepo
587 from . import upgrade
602 from . import upgrade
588
603
589 upgrade.upgrade_share_to_safe(
604 upgrade.upgrade_share_to_safe(
590 ui,
605 ui,
591 hgvfs,
606 hgvfs,
592 storevfs,
607 storevfs,
593 requirements,
608 requirements,
594 )
609 )
595 else:
610 else:
596 ui.warn(
611 ui.warn(
597 _(
612 _(
598 b'warning: source repository supports share-safe functionality.'
613 b'warning: source repository supports share-safe functionality.'
599 b' Reshare to upgrade.\n'
614 b' Reshare to upgrade.\n'
600 )
615 )
601 )
616 )
602
617
603 # The .hg/hgrc file may load extensions or contain config options
618 # The .hg/hgrc file may load extensions or contain config options
604 # that influence repository construction. Attempt to load it and
619 # that influence repository construction. Attempt to load it and
605 # process any new extensions that it may have pulled in.
620 # process any new extensions that it may have pulled in.
606 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
621 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
607 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
622 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
608 extensions.loadall(ui)
623 extensions.loadall(ui)
609 extensions.populateui(ui)
624 extensions.populateui(ui)
610
625
611 # Set of module names of extensions loaded for this repository.
626 # Set of module names of extensions loaded for this repository.
612 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
627 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
613
628
614 supportedrequirements = gathersupportedrequirements(ui)
629 supportedrequirements = gathersupportedrequirements(ui)
615
630
616 # We first validate the requirements are known.
631 # We first validate the requirements are known.
617 ensurerequirementsrecognized(requirements, supportedrequirements)
632 ensurerequirementsrecognized(requirements, supportedrequirements)
618
633
619 # Then we validate that the known set is reasonable to use together.
634 # Then we validate that the known set is reasonable to use together.
620 ensurerequirementscompatible(ui, requirements)
635 ensurerequirementscompatible(ui, requirements)
621
636
622 # TODO there are unhandled edge cases related to opening repositories with
637 # TODO there are unhandled edge cases related to opening repositories with
623 # shared storage. If storage is shared, we should also test for requirements
638 # shared storage. If storage is shared, we should also test for requirements
624 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
639 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
625 # that repo, as that repo may load extensions needed to open it. This is a
640 # that repo, as that repo may load extensions needed to open it. This is a
626 # bit complicated because we don't want the other hgrc to overwrite settings
641 # bit complicated because we don't want the other hgrc to overwrite settings
627 # in this hgrc.
642 # in this hgrc.
628 #
643 #
629 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
644 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
630 # file when sharing repos. But if a requirement is added after the share is
645 # file when sharing repos. But if a requirement is added after the share is
631 # performed, thereby introducing a new requirement for the opener, we may
646 # performed, thereby introducing a new requirement for the opener, we may
632 # will not see that and could encounter a run-time error interacting with
647 # will not see that and could encounter a run-time error interacting with
633 # that shared store since it has an unknown-to-us requirement.
648 # that shared store since it has an unknown-to-us requirement.
634
649
635 # At this point, we know we should be capable of opening the repository.
650 # At this point, we know we should be capable of opening the repository.
636 # Now get on with doing that.
651 # Now get on with doing that.
637
652
638 features = set()
653 features = set()
639
654
640 # The "store" part of the repository holds versioned data. How it is
655 # The "store" part of the repository holds versioned data. How it is
641 # accessed is determined by various requirements. If `shared` or
656 # accessed is determined by various requirements. If `shared` or
642 # `relshared` requirements are present, this indicates current repository
657 # `relshared` requirements are present, this indicates current repository
643 # is a share and store exists in path mentioned in `.hg/sharedpath`
658 # is a share and store exists in path mentioned in `.hg/sharedpath`
644 if shared:
659 if shared:
645 storebasepath = sharedvfs.base
660 storebasepath = sharedvfs.base
646 cachepath = sharedvfs.join(b'cache')
661 cachepath = sharedvfs.join(b'cache')
647 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
662 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
648 else:
663 else:
649 storebasepath = hgvfs.base
664 storebasepath = hgvfs.base
650 cachepath = hgvfs.join(b'cache')
665 cachepath = hgvfs.join(b'cache')
651 wcachepath = hgvfs.join(b'wcache')
666 wcachepath = hgvfs.join(b'wcache')
652
667
653 # The store has changed over time and the exact layout is dictated by
668 # The store has changed over time and the exact layout is dictated by
654 # requirements. The store interface abstracts differences across all
669 # requirements. The store interface abstracts differences across all
655 # of them.
670 # of them.
656 store = makestore(
671 store = makestore(
657 requirements,
672 requirements,
658 storebasepath,
673 storebasepath,
659 lambda base: vfsmod.vfs(base, cacheaudited=True),
674 lambda base: vfsmod.vfs(base, cacheaudited=True),
660 )
675 )
661 hgvfs.createmode = store.createmode
676 hgvfs.createmode = store.createmode
662
677
663 storevfs = store.vfs
678 storevfs = store.vfs
664 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
679 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
665
680
666 # The cache vfs is used to manage cache files.
681 # The cache vfs is used to manage cache files.
667 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
682 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
668 cachevfs.createmode = store.createmode
683 cachevfs.createmode = store.createmode
669 # The cache vfs is used to manage cache files related to the working copy
684 # The cache vfs is used to manage cache files related to the working copy
670 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
685 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
671 wcachevfs.createmode = store.createmode
686 wcachevfs.createmode = store.createmode
672
687
673 # Now resolve the type for the repository object. We do this by repeatedly
688 # Now resolve the type for the repository object. We do this by repeatedly
674 # calling a factory function to produces types for specific aspects of the
689 # calling a factory function to produces types for specific aspects of the
675 # repo's operation. The aggregate returned types are used as base classes
690 # repo's operation. The aggregate returned types are used as base classes
676 # for a dynamically-derived type, which will represent our new repository.
691 # for a dynamically-derived type, which will represent our new repository.
677
692
678 bases = []
693 bases = []
679 extrastate = {}
694 extrastate = {}
680
695
681 for iface, fn in REPO_INTERFACES:
696 for iface, fn in REPO_INTERFACES:
682 # We pass all potentially useful state to give extensions tons of
697 # We pass all potentially useful state to give extensions tons of
683 # flexibility.
698 # flexibility.
684 typ = fn()(
699 typ = fn()(
685 ui=ui,
700 ui=ui,
686 intents=intents,
701 intents=intents,
687 requirements=requirements,
702 requirements=requirements,
688 features=features,
703 features=features,
689 wdirvfs=wdirvfs,
704 wdirvfs=wdirvfs,
690 hgvfs=hgvfs,
705 hgvfs=hgvfs,
691 store=store,
706 store=store,
692 storevfs=storevfs,
707 storevfs=storevfs,
693 storeoptions=storevfs.options,
708 storeoptions=storevfs.options,
694 cachevfs=cachevfs,
709 cachevfs=cachevfs,
695 wcachevfs=wcachevfs,
710 wcachevfs=wcachevfs,
696 extensionmodulenames=extensionmodulenames,
711 extensionmodulenames=extensionmodulenames,
697 extrastate=extrastate,
712 extrastate=extrastate,
698 baseclasses=bases,
713 baseclasses=bases,
699 )
714 )
700
715
701 if not isinstance(typ, type):
716 if not isinstance(typ, type):
702 raise error.ProgrammingError(
717 raise error.ProgrammingError(
703 b'unable to construct type for %s' % iface
718 b'unable to construct type for %s' % iface
704 )
719 )
705
720
706 bases.append(typ)
721 bases.append(typ)
707
722
708 # type() allows you to use characters in type names that wouldn't be
723 # type() allows you to use characters in type names that wouldn't be
709 # recognized as Python symbols in source code. We abuse that to add
724 # recognized as Python symbols in source code. We abuse that to add
710 # rich information about our constructed repo.
725 # rich information about our constructed repo.
711 name = pycompat.sysstr(
726 name = pycompat.sysstr(
712 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
727 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
713 )
728 )
714
729
715 cls = type(name, tuple(bases), {})
730 cls = type(name, tuple(bases), {})
716
731
717 return cls(
732 return cls(
718 baseui=baseui,
733 baseui=baseui,
719 ui=ui,
734 ui=ui,
720 origroot=path,
735 origroot=path,
721 wdirvfs=wdirvfs,
736 wdirvfs=wdirvfs,
722 hgvfs=hgvfs,
737 hgvfs=hgvfs,
723 requirements=requirements,
738 requirements=requirements,
724 supportedrequirements=supportedrequirements,
739 supportedrequirements=supportedrequirements,
725 sharedpath=storebasepath,
740 sharedpath=storebasepath,
726 store=store,
741 store=store,
727 cachevfs=cachevfs,
742 cachevfs=cachevfs,
728 wcachevfs=wcachevfs,
743 wcachevfs=wcachevfs,
729 features=features,
744 features=features,
730 intents=intents,
745 intents=intents,
731 )
746 )
732
747
733
748
734 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
749 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
735 """Load hgrc files/content into a ui instance.
750 """Load hgrc files/content into a ui instance.
736
751
737 This is called during repository opening to load any additional
752 This is called during repository opening to load any additional
738 config files or settings relevant to the current repository.
753 config files or settings relevant to the current repository.
739
754
740 Returns a bool indicating whether any additional configs were loaded.
755 Returns a bool indicating whether any additional configs were loaded.
741
756
742 Extensions should monkeypatch this function to modify how per-repo
757 Extensions should monkeypatch this function to modify how per-repo
743 configs are loaded. For example, an extension may wish to pull in
758 configs are loaded. For example, an extension may wish to pull in
744 configs from alternate files or sources.
759 configs from alternate files or sources.
745
760
746 sharedvfs is vfs object pointing to source repo if the current one is a
761 sharedvfs is vfs object pointing to source repo if the current one is a
747 shared one
762 shared one
748 """
763 """
749 if not rcutil.use_repo_hgrc():
764 if not rcutil.use_repo_hgrc():
750 return False
765 return False
751
766
752 ret = False
767 ret = False
753 # first load config from shared source if we has to
768 # first load config from shared source if we has to
754 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
769 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
755 try:
770 try:
756 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
771 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
757 ret = True
772 ret = True
758 except IOError:
773 except IOError:
759 pass
774 pass
760
775
761 try:
776 try:
762 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
777 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
763 ret = True
778 ret = True
764 except IOError:
779 except IOError:
765 pass
780 pass
766
781
767 try:
782 try:
768 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
783 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
769 ret = True
784 ret = True
770 except IOError:
785 except IOError:
771 pass
786 pass
772
787
773 return ret
788 return ret
774
789
775
790
776 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
791 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
777 """Perform additional actions after .hg/hgrc is loaded.
792 """Perform additional actions after .hg/hgrc is loaded.
778
793
779 This function is called during repository loading immediately after
794 This function is called during repository loading immediately after
780 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
795 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
781
796
782 The function can be used to validate configs, automatically add
797 The function can be used to validate configs, automatically add
783 options (including extensions) based on requirements, etc.
798 options (including extensions) based on requirements, etc.
784 """
799 """
785
800
786 # Map of requirements to list of extensions to load automatically when
801 # Map of requirements to list of extensions to load automatically when
787 # requirement is present.
802 # requirement is present.
788 autoextensions = {
803 autoextensions = {
789 b'git': [b'git'],
804 b'git': [b'git'],
790 b'largefiles': [b'largefiles'],
805 b'largefiles': [b'largefiles'],
791 b'lfs': [b'lfs'],
806 b'lfs': [b'lfs'],
792 }
807 }
793
808
794 for requirement, names in sorted(autoextensions.items()):
809 for requirement, names in sorted(autoextensions.items()):
795 if requirement not in requirements:
810 if requirement not in requirements:
796 continue
811 continue
797
812
798 for name in names:
813 for name in names:
799 if not ui.hasconfig(b'extensions', name):
814 if not ui.hasconfig(b'extensions', name):
800 ui.setconfig(b'extensions', name, b'', source=b'autoload')
815 ui.setconfig(b'extensions', name, b'', source=b'autoload')
801
816
802
817
803 def gathersupportedrequirements(ui):
818 def gathersupportedrequirements(ui):
804 """Determine the complete set of recognized requirements."""
819 """Determine the complete set of recognized requirements."""
805 # Start with all requirements supported by this file.
820 # Start with all requirements supported by this file.
806 supported = set(localrepository._basesupported)
821 supported = set(localrepository._basesupported)
807
822
808 # Execute ``featuresetupfuncs`` entries if they belong to an extension
823 # Execute ``featuresetupfuncs`` entries if they belong to an extension
809 # relevant to this ui instance.
824 # relevant to this ui instance.
810 modules = {m.__name__ for n, m in extensions.extensions(ui)}
825 modules = {m.__name__ for n, m in extensions.extensions(ui)}
811
826
812 for fn in featuresetupfuncs:
827 for fn in featuresetupfuncs:
813 if fn.__module__ in modules:
828 if fn.__module__ in modules:
814 fn(ui, supported)
829 fn(ui, supported)
815
830
816 # Add derived requirements from registered compression engines.
831 # Add derived requirements from registered compression engines.
817 for name in util.compengines:
832 for name in util.compengines:
818 engine = util.compengines[name]
833 engine = util.compengines[name]
819 if engine.available() and engine.revlogheader():
834 if engine.available() and engine.revlogheader():
820 supported.add(b'exp-compression-%s' % name)
835 supported.add(b'exp-compression-%s' % name)
821 if engine.name() == b'zstd':
836 if engine.name() == b'zstd':
822 supported.add(b'revlog-compression-zstd')
837 supported.add(b'revlog-compression-zstd')
823
838
824 return supported
839 return supported
825
840
826
841
827 def ensurerequirementsrecognized(requirements, supported):
842 def ensurerequirementsrecognized(requirements, supported):
828 """Validate that a set of local requirements is recognized.
843 """Validate that a set of local requirements is recognized.
829
844
830 Receives a set of requirements. Raises an ``error.RepoError`` if there
845 Receives a set of requirements. Raises an ``error.RepoError`` if there
831 exists any requirement in that set that currently loaded code doesn't
846 exists any requirement in that set that currently loaded code doesn't
832 recognize.
847 recognize.
833
848
834 Returns a set of supported requirements.
849 Returns a set of supported requirements.
835 """
850 """
836 missing = set()
851 missing = set()
837
852
838 for requirement in requirements:
853 for requirement in requirements:
839 if requirement in supported:
854 if requirement in supported:
840 continue
855 continue
841
856
842 if not requirement or not requirement[0:1].isalnum():
857 if not requirement or not requirement[0:1].isalnum():
843 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
858 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
844
859
845 missing.add(requirement)
860 missing.add(requirement)
846
861
847 if missing:
862 if missing:
848 raise error.RequirementError(
863 raise error.RequirementError(
849 _(b'repository requires features unknown to this Mercurial: %s')
864 _(b'repository requires features unknown to this Mercurial: %s')
850 % b' '.join(sorted(missing)),
865 % b' '.join(sorted(missing)),
851 hint=_(
866 hint=_(
852 b'see https://mercurial-scm.org/wiki/MissingRequirement '
867 b'see https://mercurial-scm.org/wiki/MissingRequirement '
853 b'for more information'
868 b'for more information'
854 ),
869 ),
855 )
870 )
856
871
857
872
858 def ensurerequirementscompatible(ui, requirements):
873 def ensurerequirementscompatible(ui, requirements):
859 """Validates that a set of recognized requirements is mutually compatible.
874 """Validates that a set of recognized requirements is mutually compatible.
860
875
861 Some requirements may not be compatible with others or require
876 Some requirements may not be compatible with others or require
862 config options that aren't enabled. This function is called during
877 config options that aren't enabled. This function is called during
863 repository opening to ensure that the set of requirements needed
878 repository opening to ensure that the set of requirements needed
864 to open a repository is sane and compatible with config options.
879 to open a repository is sane and compatible with config options.
865
880
866 Extensions can monkeypatch this function to perform additional
881 Extensions can monkeypatch this function to perform additional
867 checking.
882 checking.
868
883
869 ``error.RepoError`` should be raised on failure.
884 ``error.RepoError`` should be raised on failure.
870 """
885 """
871 if (
886 if (
872 requirementsmod.SPARSE_REQUIREMENT in requirements
887 requirementsmod.SPARSE_REQUIREMENT in requirements
873 and not sparse.enabled
888 and not sparse.enabled
874 ):
889 ):
875 raise error.RepoError(
890 raise error.RepoError(
876 _(
891 _(
877 b'repository is using sparse feature but '
892 b'repository is using sparse feature but '
878 b'sparse is not enabled; enable the '
893 b'sparse is not enabled; enable the '
879 b'"sparse" extensions to access'
894 b'"sparse" extensions to access'
880 )
895 )
881 )
896 )
882
897
883
898
884 def makestore(requirements, path, vfstype):
899 def makestore(requirements, path, vfstype):
885 """Construct a storage object for a repository."""
900 """Construct a storage object for a repository."""
886 if b'store' in requirements:
901 if b'store' in requirements:
887 if b'fncache' in requirements:
902 if b'fncache' in requirements:
888 return storemod.fncachestore(
903 return storemod.fncachestore(
889 path, vfstype, b'dotencode' in requirements
904 path, vfstype, b'dotencode' in requirements
890 )
905 )
891
906
892 return storemod.encodedstore(path, vfstype)
907 return storemod.encodedstore(path, vfstype)
893
908
894 return storemod.basicstore(path, vfstype)
909 return storemod.basicstore(path, vfstype)
895
910
896
911
897 def resolvestorevfsoptions(ui, requirements, features):
912 def resolvestorevfsoptions(ui, requirements, features):
898 """Resolve the options to pass to the store vfs opener.
913 """Resolve the options to pass to the store vfs opener.
899
914
900 The returned dict is used to influence behavior of the storage layer.
915 The returned dict is used to influence behavior of the storage layer.
901 """
916 """
902 options = {}
917 options = {}
903
918
904 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
919 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
905 options[b'treemanifest'] = True
920 options[b'treemanifest'] = True
906
921
907 # experimental config: format.manifestcachesize
922 # experimental config: format.manifestcachesize
908 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
923 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
909 if manifestcachesize is not None:
924 if manifestcachesize is not None:
910 options[b'manifestcachesize'] = manifestcachesize
925 options[b'manifestcachesize'] = manifestcachesize
911
926
912 # In the absence of another requirement superseding a revlog-related
927 # In the absence of another requirement superseding a revlog-related
913 # requirement, we have to assume the repo is using revlog version 0.
928 # requirement, we have to assume the repo is using revlog version 0.
914 # This revlog format is super old and we don't bother trying to parse
929 # This revlog format is super old and we don't bother trying to parse
915 # opener options for it because those options wouldn't do anything
930 # opener options for it because those options wouldn't do anything
916 # meaningful on such old repos.
931 # meaningful on such old repos.
917 if (
932 if (
918 b'revlogv1' in requirements
933 b'revlogv1' in requirements
919 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
934 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
920 ):
935 ):
921 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
936 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
922 else: # explicitly mark repo as using revlogv0
937 else: # explicitly mark repo as using revlogv0
923 options[b'revlogv0'] = True
938 options[b'revlogv0'] = True
924
939
925 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
940 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
926 options[b'copies-storage'] = b'changeset-sidedata'
941 options[b'copies-storage'] = b'changeset-sidedata'
927 else:
942 else:
928 writecopiesto = ui.config(b'experimental', b'copies.write-to')
943 writecopiesto = ui.config(b'experimental', b'copies.write-to')
929 copiesextramode = (b'changeset-only', b'compatibility')
944 copiesextramode = (b'changeset-only', b'compatibility')
930 if writecopiesto in copiesextramode:
945 if writecopiesto in copiesextramode:
931 options[b'copies-storage'] = b'extra'
946 options[b'copies-storage'] = b'extra'
932
947
933 return options
948 return options
934
949
935
950
936 def resolverevlogstorevfsoptions(ui, requirements, features):
951 def resolverevlogstorevfsoptions(ui, requirements, features):
937 """Resolve opener options specific to revlogs."""
952 """Resolve opener options specific to revlogs."""
938
953
939 options = {}
954 options = {}
940 options[b'flagprocessors'] = {}
955 options[b'flagprocessors'] = {}
941
956
942 if b'revlogv1' in requirements:
957 if b'revlogv1' in requirements:
943 options[b'revlogv1'] = True
958 options[b'revlogv1'] = True
944 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
959 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
945 options[b'revlogv2'] = True
960 options[b'revlogv2'] = True
946
961
947 if b'generaldelta' in requirements:
962 if b'generaldelta' in requirements:
948 options[b'generaldelta'] = True
963 options[b'generaldelta'] = True
949
964
950 # experimental config: format.chunkcachesize
965 # experimental config: format.chunkcachesize
951 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
966 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
952 if chunkcachesize is not None:
967 if chunkcachesize is not None:
953 options[b'chunkcachesize'] = chunkcachesize
968 options[b'chunkcachesize'] = chunkcachesize
954
969
955 deltabothparents = ui.configbool(
970 deltabothparents = ui.configbool(
956 b'storage', b'revlog.optimize-delta-parent-choice'
971 b'storage', b'revlog.optimize-delta-parent-choice'
957 )
972 )
958 options[b'deltabothparents'] = deltabothparents
973 options[b'deltabothparents'] = deltabothparents
959
974
960 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
975 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
961 lazydeltabase = False
976 lazydeltabase = False
962 if lazydelta:
977 if lazydelta:
963 lazydeltabase = ui.configbool(
978 lazydeltabase = ui.configbool(
964 b'storage', b'revlog.reuse-external-delta-parent'
979 b'storage', b'revlog.reuse-external-delta-parent'
965 )
980 )
966 if lazydeltabase is None:
981 if lazydeltabase is None:
967 lazydeltabase = not scmutil.gddeltaconfig(ui)
982 lazydeltabase = not scmutil.gddeltaconfig(ui)
968 options[b'lazydelta'] = lazydelta
983 options[b'lazydelta'] = lazydelta
969 options[b'lazydeltabase'] = lazydeltabase
984 options[b'lazydeltabase'] = lazydeltabase
970
985
971 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
986 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
972 if 0 <= chainspan:
987 if 0 <= chainspan:
973 options[b'maxdeltachainspan'] = chainspan
988 options[b'maxdeltachainspan'] = chainspan
974
989
975 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
990 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
976 if mmapindexthreshold is not None:
991 if mmapindexthreshold is not None:
977 options[b'mmapindexthreshold'] = mmapindexthreshold
992 options[b'mmapindexthreshold'] = mmapindexthreshold
978
993
979 withsparseread = ui.configbool(b'experimental', b'sparse-read')
994 withsparseread = ui.configbool(b'experimental', b'sparse-read')
980 srdensitythres = float(
995 srdensitythres = float(
981 ui.config(b'experimental', b'sparse-read.density-threshold')
996 ui.config(b'experimental', b'sparse-read.density-threshold')
982 )
997 )
983 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
998 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
984 options[b'with-sparse-read'] = withsparseread
999 options[b'with-sparse-read'] = withsparseread
985 options[b'sparse-read-density-threshold'] = srdensitythres
1000 options[b'sparse-read-density-threshold'] = srdensitythres
986 options[b'sparse-read-min-gap-size'] = srmingapsize
1001 options[b'sparse-read-min-gap-size'] = srmingapsize
987
1002
988 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1003 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
989 options[b'sparse-revlog'] = sparserevlog
1004 options[b'sparse-revlog'] = sparserevlog
990 if sparserevlog:
1005 if sparserevlog:
991 options[b'generaldelta'] = True
1006 options[b'generaldelta'] = True
992
1007
993 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1008 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
994 options[b'side-data'] = sidedata
1009 options[b'side-data'] = sidedata
995
1010
996 maxchainlen = None
1011 maxchainlen = None
997 if sparserevlog:
1012 if sparserevlog:
998 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1013 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
999 # experimental config: format.maxchainlen
1014 # experimental config: format.maxchainlen
1000 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1015 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1001 if maxchainlen is not None:
1016 if maxchainlen is not None:
1002 options[b'maxchainlen'] = maxchainlen
1017 options[b'maxchainlen'] = maxchainlen
1003
1018
1004 for r in requirements:
1019 for r in requirements:
1005 # we allow multiple compression engine requirement to co-exist because
1020 # we allow multiple compression engine requirement to co-exist because
1006 # strickly speaking, revlog seems to support mixed compression style.
1021 # strickly speaking, revlog seems to support mixed compression style.
1007 #
1022 #
1008 # The compression used for new entries will be "the last one"
1023 # The compression used for new entries will be "the last one"
1009 prefix = r.startswith
1024 prefix = r.startswith
1010 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1025 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1011 options[b'compengine'] = r.split(b'-', 2)[2]
1026 options[b'compengine'] = r.split(b'-', 2)[2]
1012
1027
1013 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1028 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1014 if options[b'zlib.level'] is not None:
1029 if options[b'zlib.level'] is not None:
1015 if not (0 <= options[b'zlib.level'] <= 9):
1030 if not (0 <= options[b'zlib.level'] <= 9):
1016 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1031 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1017 raise error.Abort(msg % options[b'zlib.level'])
1032 raise error.Abort(msg % options[b'zlib.level'])
1018 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1033 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1019 if options[b'zstd.level'] is not None:
1034 if options[b'zstd.level'] is not None:
1020 if not (0 <= options[b'zstd.level'] <= 22):
1035 if not (0 <= options[b'zstd.level'] <= 22):
1021 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1036 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1022 raise error.Abort(msg % options[b'zstd.level'])
1037 raise error.Abort(msg % options[b'zstd.level'])
1023
1038
1024 if requirementsmod.NARROW_REQUIREMENT in requirements:
1039 if requirementsmod.NARROW_REQUIREMENT in requirements:
1025 options[b'enableellipsis'] = True
1040 options[b'enableellipsis'] = True
1026
1041
1027 if ui.configbool(b'experimental', b'rust.index'):
1042 if ui.configbool(b'experimental', b'rust.index'):
1028 options[b'rust.index'] = True
1043 options[b'rust.index'] = True
1029 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1044 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1030 options[b'persistent-nodemap'] = True
1045 options[b'persistent-nodemap'] = True
1031 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
1046 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
1032 options[b'persistent-nodemap.mmap'] = True
1047 options[b'persistent-nodemap.mmap'] = True
1033 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1048 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1034 options[b'persistent-nodemap.mode'] = epnm
1049 options[b'persistent-nodemap.mode'] = epnm
1035 if ui.configbool(b'devel', b'persistent-nodemap'):
1050 if ui.configbool(b'devel', b'persistent-nodemap'):
1036 options[b'devel-force-nodemap'] = True
1051 options[b'devel-force-nodemap'] = True
1037
1052
1038 return options
1053 return options
1039
1054
1040
1055
1041 def makemain(**kwargs):
1056 def makemain(**kwargs):
1042 """Produce a type conforming to ``ilocalrepositorymain``."""
1057 """Produce a type conforming to ``ilocalrepositorymain``."""
1043 return localrepository
1058 return localrepository
1044
1059
1045
1060
1046 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1061 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1047 class revlogfilestorage(object):
1062 class revlogfilestorage(object):
1048 """File storage when using revlogs."""
1063 """File storage when using revlogs."""
1049
1064
1050 def file(self, path):
1065 def file(self, path):
1051 if path[0] == b'/':
1066 if path[0] == b'/':
1052 path = path[1:]
1067 path = path[1:]
1053
1068
1054 return filelog.filelog(self.svfs, path)
1069 return filelog.filelog(self.svfs, path)
1055
1070
1056
1071
1057 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1072 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1058 class revlognarrowfilestorage(object):
1073 class revlognarrowfilestorage(object):
1059 """File storage when using revlogs and narrow files."""
1074 """File storage when using revlogs and narrow files."""
1060
1075
1061 def file(self, path):
1076 def file(self, path):
1062 if path[0] == b'/':
1077 if path[0] == b'/':
1063 path = path[1:]
1078 path = path[1:]
1064
1079
1065 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1080 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1066
1081
1067
1082
1068 def makefilestorage(requirements, features, **kwargs):
1083 def makefilestorage(requirements, features, **kwargs):
1069 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1084 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1070 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1085 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1071 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1086 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1072
1087
1073 if requirementsmod.NARROW_REQUIREMENT in requirements:
1088 if requirementsmod.NARROW_REQUIREMENT in requirements:
1074 return revlognarrowfilestorage
1089 return revlognarrowfilestorage
1075 else:
1090 else:
1076 return revlogfilestorage
1091 return revlogfilestorage
1077
1092
1078
1093
1079 # List of repository interfaces and factory functions for them. Each
1094 # List of repository interfaces and factory functions for them. Each
1080 # will be called in order during ``makelocalrepository()`` to iteratively
1095 # will be called in order during ``makelocalrepository()`` to iteratively
1081 # derive the final type for a local repository instance. We capture the
1096 # derive the final type for a local repository instance. We capture the
1082 # function as a lambda so we don't hold a reference and the module-level
1097 # function as a lambda so we don't hold a reference and the module-level
1083 # functions can be wrapped.
1098 # functions can be wrapped.
1084 REPO_INTERFACES = [
1099 REPO_INTERFACES = [
1085 (repository.ilocalrepositorymain, lambda: makemain),
1100 (repository.ilocalrepositorymain, lambda: makemain),
1086 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1101 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1087 ]
1102 ]
1088
1103
1089
1104
1090 @interfaceutil.implementer(repository.ilocalrepositorymain)
1105 @interfaceutil.implementer(repository.ilocalrepositorymain)
1091 class localrepository(object):
1106 class localrepository(object):
1092 """Main class for representing local repositories.
1107 """Main class for representing local repositories.
1093
1108
1094 All local repositories are instances of this class.
1109 All local repositories are instances of this class.
1095
1110
1096 Constructed on its own, instances of this class are not usable as
1111 Constructed on its own, instances of this class are not usable as
1097 repository objects. To obtain a usable repository object, call
1112 repository objects. To obtain a usable repository object, call
1098 ``hg.repository()``, ``localrepo.instance()``, or
1113 ``hg.repository()``, ``localrepo.instance()``, or
1099 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1114 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1100 ``instance()`` adds support for creating new repositories.
1115 ``instance()`` adds support for creating new repositories.
1101 ``hg.repository()`` adds more extension integration, including calling
1116 ``hg.repository()`` adds more extension integration, including calling
1102 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1117 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1103 used.
1118 used.
1104 """
1119 """
1105
1120
1106 # obsolete experimental requirements:
1121 # obsolete experimental requirements:
1107 # - manifestv2: An experimental new manifest format that allowed
1122 # - manifestv2: An experimental new manifest format that allowed
1108 # for stem compression of long paths. Experiment ended up not
1123 # for stem compression of long paths. Experiment ended up not
1109 # being successful (repository sizes went up due to worse delta
1124 # being successful (repository sizes went up due to worse delta
1110 # chains), and the code was deleted in 4.6.
1125 # chains), and the code was deleted in 4.6.
1111 supportedformats = {
1126 supportedformats = {
1112 b'revlogv1',
1127 b'revlogv1',
1113 b'generaldelta',
1128 b'generaldelta',
1114 requirementsmod.TREEMANIFEST_REQUIREMENT,
1129 requirementsmod.TREEMANIFEST_REQUIREMENT,
1115 requirementsmod.COPIESSDC_REQUIREMENT,
1130 requirementsmod.COPIESSDC_REQUIREMENT,
1116 requirementsmod.REVLOGV2_REQUIREMENT,
1131 requirementsmod.REVLOGV2_REQUIREMENT,
1117 requirementsmod.SIDEDATA_REQUIREMENT,
1132 requirementsmod.SIDEDATA_REQUIREMENT,
1118 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1133 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1119 requirementsmod.NODEMAP_REQUIREMENT,
1134 requirementsmod.NODEMAP_REQUIREMENT,
1120 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1135 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1121 requirementsmod.SHARESAFE_REQUIREMENT,
1136 requirementsmod.SHARESAFE_REQUIREMENT,
1122 }
1137 }
1123 _basesupported = supportedformats | {
1138 _basesupported = supportedformats | {
1124 b'store',
1139 b'store',
1125 b'fncache',
1140 b'fncache',
1126 requirementsmod.SHARED_REQUIREMENT,
1141 requirementsmod.SHARED_REQUIREMENT,
1127 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1142 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1128 b'dotencode',
1143 b'dotencode',
1129 requirementsmod.SPARSE_REQUIREMENT,
1144 requirementsmod.SPARSE_REQUIREMENT,
1130 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1145 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1131 }
1146 }
1132
1147
1133 # list of prefix for file which can be written without 'wlock'
1148 # list of prefix for file which can be written without 'wlock'
1134 # Extensions should extend this list when needed
1149 # Extensions should extend this list when needed
1135 _wlockfreeprefix = {
1150 _wlockfreeprefix = {
1136 # We migh consider requiring 'wlock' for the next
1151 # We migh consider requiring 'wlock' for the next
1137 # two, but pretty much all the existing code assume
1152 # two, but pretty much all the existing code assume
1138 # wlock is not needed so we keep them excluded for
1153 # wlock is not needed so we keep them excluded for
1139 # now.
1154 # now.
1140 b'hgrc',
1155 b'hgrc',
1141 b'requires',
1156 b'requires',
1142 # XXX cache is a complicatged business someone
1157 # XXX cache is a complicatged business someone
1143 # should investigate this in depth at some point
1158 # should investigate this in depth at some point
1144 b'cache/',
1159 b'cache/',
1145 # XXX shouldn't be dirstate covered by the wlock?
1160 # XXX shouldn't be dirstate covered by the wlock?
1146 b'dirstate',
1161 b'dirstate',
1147 # XXX bisect was still a bit too messy at the time
1162 # XXX bisect was still a bit too messy at the time
1148 # this changeset was introduced. Someone should fix
1163 # this changeset was introduced. Someone should fix
1149 # the remainig bit and drop this line
1164 # the remainig bit and drop this line
1150 b'bisect.state',
1165 b'bisect.state',
1151 }
1166 }
1152
1167
1153 def __init__(
1168 def __init__(
1154 self,
1169 self,
1155 baseui,
1170 baseui,
1156 ui,
1171 ui,
1157 origroot,
1172 origroot,
1158 wdirvfs,
1173 wdirvfs,
1159 hgvfs,
1174 hgvfs,
1160 requirements,
1175 requirements,
1161 supportedrequirements,
1176 supportedrequirements,
1162 sharedpath,
1177 sharedpath,
1163 store,
1178 store,
1164 cachevfs,
1179 cachevfs,
1165 wcachevfs,
1180 wcachevfs,
1166 features,
1181 features,
1167 intents=None,
1182 intents=None,
1168 ):
1183 ):
1169 """Create a new local repository instance.
1184 """Create a new local repository instance.
1170
1185
1171 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1186 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1172 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1187 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1173 object.
1188 object.
1174
1189
1175 Arguments:
1190 Arguments:
1176
1191
1177 baseui
1192 baseui
1178 ``ui.ui`` instance that ``ui`` argument was based off of.
1193 ``ui.ui`` instance that ``ui`` argument was based off of.
1179
1194
1180 ui
1195 ui
1181 ``ui.ui`` instance for use by the repository.
1196 ``ui.ui`` instance for use by the repository.
1182
1197
1183 origroot
1198 origroot
1184 ``bytes`` path to working directory root of this repository.
1199 ``bytes`` path to working directory root of this repository.
1185
1200
1186 wdirvfs
1201 wdirvfs
1187 ``vfs.vfs`` rooted at the working directory.
1202 ``vfs.vfs`` rooted at the working directory.
1188
1203
1189 hgvfs
1204 hgvfs
1190 ``vfs.vfs`` rooted at .hg/
1205 ``vfs.vfs`` rooted at .hg/
1191
1206
1192 requirements
1207 requirements
1193 ``set`` of bytestrings representing repository opening requirements.
1208 ``set`` of bytestrings representing repository opening requirements.
1194
1209
1195 supportedrequirements
1210 supportedrequirements
1196 ``set`` of bytestrings representing repository requirements that we
1211 ``set`` of bytestrings representing repository requirements that we
1197 know how to open. May be a supetset of ``requirements``.
1212 know how to open. May be a supetset of ``requirements``.
1198
1213
1199 sharedpath
1214 sharedpath
1200 ``bytes`` Defining path to storage base directory. Points to a
1215 ``bytes`` Defining path to storage base directory. Points to a
1201 ``.hg/`` directory somewhere.
1216 ``.hg/`` directory somewhere.
1202
1217
1203 store
1218 store
1204 ``store.basicstore`` (or derived) instance providing access to
1219 ``store.basicstore`` (or derived) instance providing access to
1205 versioned storage.
1220 versioned storage.
1206
1221
1207 cachevfs
1222 cachevfs
1208 ``vfs.vfs`` used for cache files.
1223 ``vfs.vfs`` used for cache files.
1209
1224
1210 wcachevfs
1225 wcachevfs
1211 ``vfs.vfs`` used for cache files related to the working copy.
1226 ``vfs.vfs`` used for cache files related to the working copy.
1212
1227
1213 features
1228 features
1214 ``set`` of bytestrings defining features/capabilities of this
1229 ``set`` of bytestrings defining features/capabilities of this
1215 instance.
1230 instance.
1216
1231
1217 intents
1232 intents
1218 ``set`` of system strings indicating what this repo will be used
1233 ``set`` of system strings indicating what this repo will be used
1219 for.
1234 for.
1220 """
1235 """
1221 self.baseui = baseui
1236 self.baseui = baseui
1222 self.ui = ui
1237 self.ui = ui
1223 self.origroot = origroot
1238 self.origroot = origroot
1224 # vfs rooted at working directory.
1239 # vfs rooted at working directory.
1225 self.wvfs = wdirvfs
1240 self.wvfs = wdirvfs
1226 self.root = wdirvfs.base
1241 self.root = wdirvfs.base
1227 # vfs rooted at .hg/. Used to access most non-store paths.
1242 # vfs rooted at .hg/. Used to access most non-store paths.
1228 self.vfs = hgvfs
1243 self.vfs = hgvfs
1229 self.path = hgvfs.base
1244 self.path = hgvfs.base
1230 self.requirements = requirements
1245 self.requirements = requirements
1231 self.supported = supportedrequirements
1246 self.supported = supportedrequirements
1232 self.sharedpath = sharedpath
1247 self.sharedpath = sharedpath
1233 self.store = store
1248 self.store = store
1234 self.cachevfs = cachevfs
1249 self.cachevfs = cachevfs
1235 self.wcachevfs = wcachevfs
1250 self.wcachevfs = wcachevfs
1236 self.features = features
1251 self.features = features
1237
1252
1238 self.filtername = None
1253 self.filtername = None
1239
1254
1240 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1255 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1241 b'devel', b'check-locks'
1256 b'devel', b'check-locks'
1242 ):
1257 ):
1243 self.vfs.audit = self._getvfsward(self.vfs.audit)
1258 self.vfs.audit = self._getvfsward(self.vfs.audit)
1244 # A list of callback to shape the phase if no data were found.
1259 # A list of callback to shape the phase if no data were found.
1245 # Callback are in the form: func(repo, roots) --> processed root.
1260 # Callback are in the form: func(repo, roots) --> processed root.
1246 # This list it to be filled by extension during repo setup
1261 # This list it to be filled by extension during repo setup
1247 self._phasedefaults = []
1262 self._phasedefaults = []
1248
1263
1249 color.setup(self.ui)
1264 color.setup(self.ui)
1250
1265
1251 self.spath = self.store.path
1266 self.spath = self.store.path
1252 self.svfs = self.store.vfs
1267 self.svfs = self.store.vfs
1253 self.sjoin = self.store.join
1268 self.sjoin = self.store.join
1254 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1269 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1255 b'devel', b'check-locks'
1270 b'devel', b'check-locks'
1256 ):
1271 ):
1257 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1272 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1258 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1273 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1259 else: # standard vfs
1274 else: # standard vfs
1260 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1275 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1261
1276
1262 self._dirstatevalidatewarned = False
1277 self._dirstatevalidatewarned = False
1263
1278
1264 self._branchcaches = branchmap.BranchMapCache()
1279 self._branchcaches = branchmap.BranchMapCache()
1265 self._revbranchcache = None
1280 self._revbranchcache = None
1266 self._filterpats = {}
1281 self._filterpats = {}
1267 self._datafilters = {}
1282 self._datafilters = {}
1268 self._transref = self._lockref = self._wlockref = None
1283 self._transref = self._lockref = self._wlockref = None
1269
1284
1270 # A cache for various files under .hg/ that tracks file changes,
1285 # A cache for various files under .hg/ that tracks file changes,
1271 # (used by the filecache decorator)
1286 # (used by the filecache decorator)
1272 #
1287 #
1273 # Maps a property name to its util.filecacheentry
1288 # Maps a property name to its util.filecacheentry
1274 self._filecache = {}
1289 self._filecache = {}
1275
1290
1276 # hold sets of revision to be filtered
1291 # hold sets of revision to be filtered
1277 # should be cleared when something might have changed the filter value:
1292 # should be cleared when something might have changed the filter value:
1278 # - new changesets,
1293 # - new changesets,
1279 # - phase change,
1294 # - phase change,
1280 # - new obsolescence marker,
1295 # - new obsolescence marker,
1281 # - working directory parent change,
1296 # - working directory parent change,
1282 # - bookmark changes
1297 # - bookmark changes
1283 self.filteredrevcache = {}
1298 self.filteredrevcache = {}
1284
1299
1285 # post-dirstate-status hooks
1300 # post-dirstate-status hooks
1286 self._postdsstatus = []
1301 self._postdsstatus = []
1287
1302
1288 # generic mapping between names and nodes
1303 # generic mapping between names and nodes
1289 self.names = namespaces.namespaces()
1304 self.names = namespaces.namespaces()
1290
1305
1291 # Key to signature value.
1306 # Key to signature value.
1292 self._sparsesignaturecache = {}
1307 self._sparsesignaturecache = {}
1293 # Signature to cached matcher instance.
1308 # Signature to cached matcher instance.
1294 self._sparsematchercache = {}
1309 self._sparsematchercache = {}
1295
1310
1296 self._extrafilterid = repoview.extrafilter(ui)
1311 self._extrafilterid = repoview.extrafilter(ui)
1297
1312
1298 self.filecopiesmode = None
1313 self.filecopiesmode = None
1299 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1314 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1300 self.filecopiesmode = b'changeset-sidedata'
1315 self.filecopiesmode = b'changeset-sidedata'
1301
1316
1302 def _getvfsward(self, origfunc):
1317 def _getvfsward(self, origfunc):
1303 """build a ward for self.vfs"""
1318 """build a ward for self.vfs"""
1304 rref = weakref.ref(self)
1319 rref = weakref.ref(self)
1305
1320
1306 def checkvfs(path, mode=None):
1321 def checkvfs(path, mode=None):
1307 ret = origfunc(path, mode=mode)
1322 ret = origfunc(path, mode=mode)
1308 repo = rref()
1323 repo = rref()
1309 if (
1324 if (
1310 repo is None
1325 repo is None
1311 or not util.safehasattr(repo, b'_wlockref')
1326 or not util.safehasattr(repo, b'_wlockref')
1312 or not util.safehasattr(repo, b'_lockref')
1327 or not util.safehasattr(repo, b'_lockref')
1313 ):
1328 ):
1314 return
1329 return
1315 if mode in (None, b'r', b'rb'):
1330 if mode in (None, b'r', b'rb'):
1316 return
1331 return
1317 if path.startswith(repo.path):
1332 if path.startswith(repo.path):
1318 # truncate name relative to the repository (.hg)
1333 # truncate name relative to the repository (.hg)
1319 path = path[len(repo.path) + 1 :]
1334 path = path[len(repo.path) + 1 :]
1320 if path.startswith(b'cache/'):
1335 if path.startswith(b'cache/'):
1321 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1336 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1322 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1337 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1323 # path prefixes covered by 'lock'
1338 # path prefixes covered by 'lock'
1324 vfs_path_prefixes = (
1339 vfs_path_prefixes = (
1325 b'journal.',
1340 b'journal.',
1326 b'undo.',
1341 b'undo.',
1327 b'strip-backup/',
1342 b'strip-backup/',
1328 b'cache/',
1343 b'cache/',
1329 )
1344 )
1330 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1345 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1331 if repo._currentlock(repo._lockref) is None:
1346 if repo._currentlock(repo._lockref) is None:
1332 repo.ui.develwarn(
1347 repo.ui.develwarn(
1333 b'write with no lock: "%s"' % path,
1348 b'write with no lock: "%s"' % path,
1334 stacklevel=3,
1349 stacklevel=3,
1335 config=b'check-locks',
1350 config=b'check-locks',
1336 )
1351 )
1337 elif repo._currentlock(repo._wlockref) is None:
1352 elif repo._currentlock(repo._wlockref) is None:
1338 # rest of vfs files are covered by 'wlock'
1353 # rest of vfs files are covered by 'wlock'
1339 #
1354 #
1340 # exclude special files
1355 # exclude special files
1341 for prefix in self._wlockfreeprefix:
1356 for prefix in self._wlockfreeprefix:
1342 if path.startswith(prefix):
1357 if path.startswith(prefix):
1343 return
1358 return
1344 repo.ui.develwarn(
1359 repo.ui.develwarn(
1345 b'write with no wlock: "%s"' % path,
1360 b'write with no wlock: "%s"' % path,
1346 stacklevel=3,
1361 stacklevel=3,
1347 config=b'check-locks',
1362 config=b'check-locks',
1348 )
1363 )
1349 return ret
1364 return ret
1350
1365
1351 return checkvfs
1366 return checkvfs
1352
1367
1353 def _getsvfsward(self, origfunc):
1368 def _getsvfsward(self, origfunc):
1354 """build a ward for self.svfs"""
1369 """build a ward for self.svfs"""
1355 rref = weakref.ref(self)
1370 rref = weakref.ref(self)
1356
1371
1357 def checksvfs(path, mode=None):
1372 def checksvfs(path, mode=None):
1358 ret = origfunc(path, mode=mode)
1373 ret = origfunc(path, mode=mode)
1359 repo = rref()
1374 repo = rref()
1360 if repo is None or not util.safehasattr(repo, b'_lockref'):
1375 if repo is None or not util.safehasattr(repo, b'_lockref'):
1361 return
1376 return
1362 if mode in (None, b'r', b'rb'):
1377 if mode in (None, b'r', b'rb'):
1363 return
1378 return
1364 if path.startswith(repo.sharedpath):
1379 if path.startswith(repo.sharedpath):
1365 # truncate name relative to the repository (.hg)
1380 # truncate name relative to the repository (.hg)
1366 path = path[len(repo.sharedpath) + 1 :]
1381 path = path[len(repo.sharedpath) + 1 :]
1367 if repo._currentlock(repo._lockref) is None:
1382 if repo._currentlock(repo._lockref) is None:
1368 repo.ui.develwarn(
1383 repo.ui.develwarn(
1369 b'write with no lock: "%s"' % path, stacklevel=4
1384 b'write with no lock: "%s"' % path, stacklevel=4
1370 )
1385 )
1371 return ret
1386 return ret
1372
1387
1373 return checksvfs
1388 return checksvfs
1374
1389
1375 def close(self):
1390 def close(self):
1376 self._writecaches()
1391 self._writecaches()
1377
1392
1378 def _writecaches(self):
1393 def _writecaches(self):
1379 if self._revbranchcache:
1394 if self._revbranchcache:
1380 self._revbranchcache.write()
1395 self._revbranchcache.write()
1381
1396
1382 def _restrictcapabilities(self, caps):
1397 def _restrictcapabilities(self, caps):
1383 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1398 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1384 caps = set(caps)
1399 caps = set(caps)
1385 capsblob = bundle2.encodecaps(
1400 capsblob = bundle2.encodecaps(
1386 bundle2.getrepocaps(self, role=b'client')
1401 bundle2.getrepocaps(self, role=b'client')
1387 )
1402 )
1388 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1389 return caps
1404 return caps
1390
1405
1391 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1406 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1392 # self -> auditor -> self._checknested -> self
1407 # self -> auditor -> self._checknested -> self
1393
1408
1394 @property
1409 @property
1395 def auditor(self):
1410 def auditor(self):
1396 # This is only used by context.workingctx.match in order to
1411 # This is only used by context.workingctx.match in order to
1397 # detect files in subrepos.
1412 # detect files in subrepos.
1398 return pathutil.pathauditor(self.root, callback=self._checknested)
1413 return pathutil.pathauditor(self.root, callback=self._checknested)
1399
1414
1400 @property
1415 @property
1401 def nofsauditor(self):
1416 def nofsauditor(self):
1402 # This is only used by context.basectx.match in order to detect
1417 # This is only used by context.basectx.match in order to detect
1403 # files in subrepos.
1418 # files in subrepos.
1404 return pathutil.pathauditor(
1419 return pathutil.pathauditor(
1405 self.root, callback=self._checknested, realfs=False, cached=True
1420 self.root, callback=self._checknested, realfs=False, cached=True
1406 )
1421 )
1407
1422
1408 def _checknested(self, path):
1423 def _checknested(self, path):
1409 """Determine if path is a legal nested repository."""
1424 """Determine if path is a legal nested repository."""
1410 if not path.startswith(self.root):
1425 if not path.startswith(self.root):
1411 return False
1426 return False
1412 subpath = path[len(self.root) + 1 :]
1427 subpath = path[len(self.root) + 1 :]
1413 normsubpath = util.pconvert(subpath)
1428 normsubpath = util.pconvert(subpath)
1414
1429
1415 # XXX: Checking against the current working copy is wrong in
1430 # XXX: Checking against the current working copy is wrong in
1416 # the sense that it can reject things like
1431 # the sense that it can reject things like
1417 #
1432 #
1418 # $ hg cat -r 10 sub/x.txt
1433 # $ hg cat -r 10 sub/x.txt
1419 #
1434 #
1420 # if sub/ is no longer a subrepository in the working copy
1435 # if sub/ is no longer a subrepository in the working copy
1421 # parent revision.
1436 # parent revision.
1422 #
1437 #
1423 # However, it can of course also allow things that would have
1438 # However, it can of course also allow things that would have
1424 # been rejected before, such as the above cat command if sub/
1439 # been rejected before, such as the above cat command if sub/
1425 # is a subrepository now, but was a normal directory before.
1440 # is a subrepository now, but was a normal directory before.
1426 # The old path auditor would have rejected by mistake since it
1441 # The old path auditor would have rejected by mistake since it
1427 # panics when it sees sub/.hg/.
1442 # panics when it sees sub/.hg/.
1428 #
1443 #
1429 # All in all, checking against the working copy seems sensible
1444 # All in all, checking against the working copy seems sensible
1430 # since we want to prevent access to nested repositories on
1445 # since we want to prevent access to nested repositories on
1431 # the filesystem *now*.
1446 # the filesystem *now*.
1432 ctx = self[None]
1447 ctx = self[None]
1433 parts = util.splitpath(subpath)
1448 parts = util.splitpath(subpath)
1434 while parts:
1449 while parts:
1435 prefix = b'/'.join(parts)
1450 prefix = b'/'.join(parts)
1436 if prefix in ctx.substate:
1451 if prefix in ctx.substate:
1437 if prefix == normsubpath:
1452 if prefix == normsubpath:
1438 return True
1453 return True
1439 else:
1454 else:
1440 sub = ctx.sub(prefix)
1455 sub = ctx.sub(prefix)
1441 return sub.checknested(subpath[len(prefix) + 1 :])
1456 return sub.checknested(subpath[len(prefix) + 1 :])
1442 else:
1457 else:
1443 parts.pop()
1458 parts.pop()
1444 return False
1459 return False
1445
1460
1446 def peer(self):
1461 def peer(self):
1447 return localpeer(self) # not cached to avoid reference cycle
1462 return localpeer(self) # not cached to avoid reference cycle
1448
1463
1449 def unfiltered(self):
1464 def unfiltered(self):
1450 """Return unfiltered version of the repository
1465 """Return unfiltered version of the repository
1451
1466
1452 Intended to be overwritten by filtered repo."""
1467 Intended to be overwritten by filtered repo."""
1453 return self
1468 return self
1454
1469
1455 def filtered(self, name, visibilityexceptions=None):
1470 def filtered(self, name, visibilityexceptions=None):
1456 """Return a filtered version of a repository
1471 """Return a filtered version of a repository
1457
1472
1458 The `name` parameter is the identifier of the requested view. This
1473 The `name` parameter is the identifier of the requested view. This
1459 will return a repoview object set "exactly" to the specified view.
1474 will return a repoview object set "exactly" to the specified view.
1460
1475
1461 This function does not apply recursive filtering to a repository. For
1476 This function does not apply recursive filtering to a repository. For
1462 example calling `repo.filtered("served")` will return a repoview using
1477 example calling `repo.filtered("served")` will return a repoview using
1463 the "served" view, regardless of the initial view used by `repo`.
1478 the "served" view, regardless of the initial view used by `repo`.
1464
1479
1465 In other word, there is always only one level of `repoview` "filtering".
1480 In other word, there is always only one level of `repoview` "filtering".
1466 """
1481 """
1467 if self._extrafilterid is not None and b'%' not in name:
1482 if self._extrafilterid is not None and b'%' not in name:
1468 name = name + b'%' + self._extrafilterid
1483 name = name + b'%' + self._extrafilterid
1469
1484
1470 cls = repoview.newtype(self.unfiltered().__class__)
1485 cls = repoview.newtype(self.unfiltered().__class__)
1471 return cls(self, name, visibilityexceptions)
1486 return cls(self, name, visibilityexceptions)
1472
1487
1473 @mixedrepostorecache(
1488 @mixedrepostorecache(
1474 (b'bookmarks', b'plain'),
1489 (b'bookmarks', b'plain'),
1475 (b'bookmarks.current', b'plain'),
1490 (b'bookmarks.current', b'plain'),
1476 (b'bookmarks', b''),
1491 (b'bookmarks', b''),
1477 (b'00changelog.i', b''),
1492 (b'00changelog.i', b''),
1478 )
1493 )
1479 def _bookmarks(self):
1494 def _bookmarks(self):
1480 # Since the multiple files involved in the transaction cannot be
1495 # Since the multiple files involved in the transaction cannot be
1481 # written atomically (with current repository format), there is a race
1496 # written atomically (with current repository format), there is a race
1482 # condition here.
1497 # condition here.
1483 #
1498 #
1484 # 1) changelog content A is read
1499 # 1) changelog content A is read
1485 # 2) outside transaction update changelog to content B
1500 # 2) outside transaction update changelog to content B
1486 # 3) outside transaction update bookmark file referring to content B
1501 # 3) outside transaction update bookmark file referring to content B
1487 # 4) bookmarks file content is read and filtered against changelog-A
1502 # 4) bookmarks file content is read and filtered against changelog-A
1488 #
1503 #
1489 # When this happens, bookmarks against nodes missing from A are dropped.
1504 # When this happens, bookmarks against nodes missing from A are dropped.
1490 #
1505 #
1491 # Having this happening during read is not great, but it become worse
1506 # Having this happening during read is not great, but it become worse
1492 # when this happen during write because the bookmarks to the "unknown"
1507 # when this happen during write because the bookmarks to the "unknown"
1493 # nodes will be dropped for good. However, writes happen within locks.
1508 # nodes will be dropped for good. However, writes happen within locks.
1494 # This locking makes it possible to have a race free consistent read.
1509 # This locking makes it possible to have a race free consistent read.
1495 # For this purpose data read from disc before locking are
1510 # For this purpose data read from disc before locking are
1496 # "invalidated" right after the locks are taken. This invalidations are
1511 # "invalidated" right after the locks are taken. This invalidations are
1497 # "light", the `filecache` mechanism keep the data in memory and will
1512 # "light", the `filecache` mechanism keep the data in memory and will
1498 # reuse them if the underlying files did not changed. Not parsing the
1513 # reuse them if the underlying files did not changed. Not parsing the
1499 # same data multiple times helps performances.
1514 # same data multiple times helps performances.
1500 #
1515 #
1501 # Unfortunately in the case describe above, the files tracked by the
1516 # Unfortunately in the case describe above, the files tracked by the
1502 # bookmarks file cache might not have changed, but the in-memory
1517 # bookmarks file cache might not have changed, but the in-memory
1503 # content is still "wrong" because we used an older changelog content
1518 # content is still "wrong" because we used an older changelog content
1504 # to process the on-disk data. So after locking, the changelog would be
1519 # to process the on-disk data. So after locking, the changelog would be
1505 # refreshed but `_bookmarks` would be preserved.
1520 # refreshed but `_bookmarks` would be preserved.
1506 # Adding `00changelog.i` to the list of tracked file is not
1521 # Adding `00changelog.i` to the list of tracked file is not
1507 # enough, because at the time we build the content for `_bookmarks` in
1522 # enough, because at the time we build the content for `_bookmarks` in
1508 # (4), the changelog file has already diverged from the content used
1523 # (4), the changelog file has already diverged from the content used
1509 # for loading `changelog` in (1)
1524 # for loading `changelog` in (1)
1510 #
1525 #
1511 # To prevent the issue, we force the changelog to be explicitly
1526 # To prevent the issue, we force the changelog to be explicitly
1512 # reloaded while computing `_bookmarks`. The data race can still happen
1527 # reloaded while computing `_bookmarks`. The data race can still happen
1513 # without the lock (with a narrower window), but it would no longer go
1528 # without the lock (with a narrower window), but it would no longer go
1514 # undetected during the lock time refresh.
1529 # undetected during the lock time refresh.
1515 #
1530 #
1516 # The new schedule is as follow
1531 # The new schedule is as follow
1517 #
1532 #
1518 # 1) filecache logic detect that `_bookmarks` needs to be computed
1533 # 1) filecache logic detect that `_bookmarks` needs to be computed
1519 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1534 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1520 # 3) We force `changelog` filecache to be tested
1535 # 3) We force `changelog` filecache to be tested
1521 # 4) cachestat for `changelog` are captured (for changelog)
1536 # 4) cachestat for `changelog` are captured (for changelog)
1522 # 5) `_bookmarks` is computed and cached
1537 # 5) `_bookmarks` is computed and cached
1523 #
1538 #
1524 # The step in (3) ensure we have a changelog at least as recent as the
1539 # The step in (3) ensure we have a changelog at least as recent as the
1525 # cache stat computed in (1). As a result at locking time:
1540 # cache stat computed in (1). As a result at locking time:
1526 # * if the changelog did not changed since (1) -> we can reuse the data
1541 # * if the changelog did not changed since (1) -> we can reuse the data
1527 # * otherwise -> the bookmarks get refreshed.
1542 # * otherwise -> the bookmarks get refreshed.
1528 self._refreshchangelog()
1543 self._refreshchangelog()
1529 return bookmarks.bmstore(self)
1544 return bookmarks.bmstore(self)
1530
1545
1531 def _refreshchangelog(self):
1546 def _refreshchangelog(self):
1532 """make sure the in memory changelog match the on-disk one"""
1547 """make sure the in memory changelog match the on-disk one"""
1533 if 'changelog' in vars(self) and self.currenttransaction() is None:
1548 if 'changelog' in vars(self) and self.currenttransaction() is None:
1534 del self.changelog
1549 del self.changelog
1535
1550
1536 @property
1551 @property
1537 def _activebookmark(self):
1552 def _activebookmark(self):
1538 return self._bookmarks.active
1553 return self._bookmarks.active
1539
1554
1540 # _phasesets depend on changelog. what we need is to call
1555 # _phasesets depend on changelog. what we need is to call
1541 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1556 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1542 # can't be easily expressed in filecache mechanism.
1557 # can't be easily expressed in filecache mechanism.
1543 @storecache(b'phaseroots', b'00changelog.i')
1558 @storecache(b'phaseroots', b'00changelog.i')
1544 def _phasecache(self):
1559 def _phasecache(self):
1545 return phases.phasecache(self, self._phasedefaults)
1560 return phases.phasecache(self, self._phasedefaults)
1546
1561
1547 @storecache(b'obsstore')
1562 @storecache(b'obsstore')
1548 def obsstore(self):
1563 def obsstore(self):
1549 return obsolete.makestore(self.ui, self)
1564 return obsolete.makestore(self.ui, self)
1550
1565
1551 @storecache(b'00changelog.i')
1566 @storecache(b'00changelog.i')
1552 def changelog(self):
1567 def changelog(self):
1553 # load dirstate before changelog to avoid race see issue6303
1568 # load dirstate before changelog to avoid race see issue6303
1554 self.dirstate.prefetch_parents()
1569 self.dirstate.prefetch_parents()
1555 return self.store.changelog(txnutil.mayhavepending(self.root))
1570 return self.store.changelog(txnutil.mayhavepending(self.root))
1556
1571
1557 @storecache(b'00manifest.i')
1572 @storecache(b'00manifest.i')
1558 def manifestlog(self):
1573 def manifestlog(self):
1559 return self.store.manifestlog(self, self._storenarrowmatch)
1574 return self.store.manifestlog(self, self._storenarrowmatch)
1560
1575
1561 @repofilecache(b'dirstate')
1576 @repofilecache(b'dirstate')
1562 def dirstate(self):
1577 def dirstate(self):
1563 return self._makedirstate()
1578 return self._makedirstate()
1564
1579
1565 def _makedirstate(self):
1580 def _makedirstate(self):
1566 """Extension point for wrapping the dirstate per-repo."""
1581 """Extension point for wrapping the dirstate per-repo."""
1567 sparsematchfn = lambda: sparse.matcher(self)
1582 sparsematchfn = lambda: sparse.matcher(self)
1568
1583
1569 return dirstate.dirstate(
1584 return dirstate.dirstate(
1570 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1585 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1571 )
1586 )
1572
1587
1573 def _dirstatevalidate(self, node):
1588 def _dirstatevalidate(self, node):
1574 try:
1589 try:
1575 self.changelog.rev(node)
1590 self.changelog.rev(node)
1576 return node
1591 return node
1577 except error.LookupError:
1592 except error.LookupError:
1578 if not self._dirstatevalidatewarned:
1593 if not self._dirstatevalidatewarned:
1579 self._dirstatevalidatewarned = True
1594 self._dirstatevalidatewarned = True
1580 self.ui.warn(
1595 self.ui.warn(
1581 _(b"warning: ignoring unknown working parent %s!\n")
1596 _(b"warning: ignoring unknown working parent %s!\n")
1582 % short(node)
1597 % short(node)
1583 )
1598 )
1584 return nullid
1599 return nullid
1585
1600
1586 @storecache(narrowspec.FILENAME)
1601 @storecache(narrowspec.FILENAME)
1587 def narrowpats(self):
1602 def narrowpats(self):
1588 """matcher patterns for this repository's narrowspec
1603 """matcher patterns for this repository's narrowspec
1589
1604
1590 A tuple of (includes, excludes).
1605 A tuple of (includes, excludes).
1591 """
1606 """
1592 return narrowspec.load(self)
1607 return narrowspec.load(self)
1593
1608
1594 @storecache(narrowspec.FILENAME)
1609 @storecache(narrowspec.FILENAME)
1595 def _storenarrowmatch(self):
1610 def _storenarrowmatch(self):
1596 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1611 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1597 return matchmod.always()
1612 return matchmod.always()
1598 include, exclude = self.narrowpats
1613 include, exclude = self.narrowpats
1599 return narrowspec.match(self.root, include=include, exclude=exclude)
1614 return narrowspec.match(self.root, include=include, exclude=exclude)
1600
1615
1601 @storecache(narrowspec.FILENAME)
1616 @storecache(narrowspec.FILENAME)
1602 def _narrowmatch(self):
1617 def _narrowmatch(self):
1603 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1618 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1604 return matchmod.always()
1619 return matchmod.always()
1605 narrowspec.checkworkingcopynarrowspec(self)
1620 narrowspec.checkworkingcopynarrowspec(self)
1606 include, exclude = self.narrowpats
1621 include, exclude = self.narrowpats
1607 return narrowspec.match(self.root, include=include, exclude=exclude)
1622 return narrowspec.match(self.root, include=include, exclude=exclude)
1608
1623
1609 def narrowmatch(self, match=None, includeexact=False):
1624 def narrowmatch(self, match=None, includeexact=False):
1610 """matcher corresponding the the repo's narrowspec
1625 """matcher corresponding the the repo's narrowspec
1611
1626
1612 If `match` is given, then that will be intersected with the narrow
1627 If `match` is given, then that will be intersected with the narrow
1613 matcher.
1628 matcher.
1614
1629
1615 If `includeexact` is True, then any exact matches from `match` will
1630 If `includeexact` is True, then any exact matches from `match` will
1616 be included even if they're outside the narrowspec.
1631 be included even if they're outside the narrowspec.
1617 """
1632 """
1618 if match:
1633 if match:
1619 if includeexact and not self._narrowmatch.always():
1634 if includeexact and not self._narrowmatch.always():
1620 # do not exclude explicitly-specified paths so that they can
1635 # do not exclude explicitly-specified paths so that they can
1621 # be warned later on
1636 # be warned later on
1622 em = matchmod.exact(match.files())
1637 em = matchmod.exact(match.files())
1623 nm = matchmod.unionmatcher([self._narrowmatch, em])
1638 nm = matchmod.unionmatcher([self._narrowmatch, em])
1624 return matchmod.intersectmatchers(match, nm)
1639 return matchmod.intersectmatchers(match, nm)
1625 return matchmod.intersectmatchers(match, self._narrowmatch)
1640 return matchmod.intersectmatchers(match, self._narrowmatch)
1626 return self._narrowmatch
1641 return self._narrowmatch
1627
1642
1628 def setnarrowpats(self, newincludes, newexcludes):
1643 def setnarrowpats(self, newincludes, newexcludes):
1629 narrowspec.save(self, newincludes, newexcludes)
1644 narrowspec.save(self, newincludes, newexcludes)
1630 self.invalidate(clearfilecache=True)
1645 self.invalidate(clearfilecache=True)
1631
1646
1632 @unfilteredpropertycache
1647 @unfilteredpropertycache
1633 def _quick_access_changeid_null(self):
1648 def _quick_access_changeid_null(self):
1634 return {
1649 return {
1635 b'null': (nullrev, nullid),
1650 b'null': (nullrev, nullid),
1636 nullrev: (nullrev, nullid),
1651 nullrev: (nullrev, nullid),
1637 nullid: (nullrev, nullid),
1652 nullid: (nullrev, nullid),
1638 }
1653 }
1639
1654
1640 @unfilteredpropertycache
1655 @unfilteredpropertycache
1641 def _quick_access_changeid_wc(self):
1656 def _quick_access_changeid_wc(self):
1642 # also fast path access to the working copy parents
1657 # also fast path access to the working copy parents
1643 # however, only do it for filter that ensure wc is visible.
1658 # however, only do it for filter that ensure wc is visible.
1644 quick = self._quick_access_changeid_null.copy()
1659 quick = self._quick_access_changeid_null.copy()
1645 cl = self.unfiltered().changelog
1660 cl = self.unfiltered().changelog
1646 for node in self.dirstate.parents():
1661 for node in self.dirstate.parents():
1647 if node == nullid:
1662 if node == nullid:
1648 continue
1663 continue
1649 rev = cl.index.get_rev(node)
1664 rev = cl.index.get_rev(node)
1650 if rev is None:
1665 if rev is None:
1651 # unknown working copy parent case:
1666 # unknown working copy parent case:
1652 #
1667 #
1653 # skip the fast path and let higher code deal with it
1668 # skip the fast path and let higher code deal with it
1654 continue
1669 continue
1655 pair = (rev, node)
1670 pair = (rev, node)
1656 quick[rev] = pair
1671 quick[rev] = pair
1657 quick[node] = pair
1672 quick[node] = pair
1658 # also add the parents of the parents
1673 # also add the parents of the parents
1659 for r in cl.parentrevs(rev):
1674 for r in cl.parentrevs(rev):
1660 if r == nullrev:
1675 if r == nullrev:
1661 continue
1676 continue
1662 n = cl.node(r)
1677 n = cl.node(r)
1663 pair = (r, n)
1678 pair = (r, n)
1664 quick[r] = pair
1679 quick[r] = pair
1665 quick[n] = pair
1680 quick[n] = pair
1666 p1node = self.dirstate.p1()
1681 p1node = self.dirstate.p1()
1667 if p1node != nullid:
1682 if p1node != nullid:
1668 quick[b'.'] = quick[p1node]
1683 quick[b'.'] = quick[p1node]
1669 return quick
1684 return quick
1670
1685
1671 @unfilteredmethod
1686 @unfilteredmethod
1672 def _quick_access_changeid_invalidate(self):
1687 def _quick_access_changeid_invalidate(self):
1673 if '_quick_access_changeid_wc' in vars(self):
1688 if '_quick_access_changeid_wc' in vars(self):
1674 del self.__dict__['_quick_access_changeid_wc']
1689 del self.__dict__['_quick_access_changeid_wc']
1675
1690
1676 @property
1691 @property
1677 def _quick_access_changeid(self):
1692 def _quick_access_changeid(self):
1678 """an helper dictionnary for __getitem__ calls
1693 """an helper dictionnary for __getitem__ calls
1679
1694
1680 This contains a list of symbol we can recognise right away without
1695 This contains a list of symbol we can recognise right away without
1681 further processing.
1696 further processing.
1682 """
1697 """
1683 if self.filtername in repoview.filter_has_wc:
1698 if self.filtername in repoview.filter_has_wc:
1684 return self._quick_access_changeid_wc
1699 return self._quick_access_changeid_wc
1685 return self._quick_access_changeid_null
1700 return self._quick_access_changeid_null
1686
1701
1687 def __getitem__(self, changeid):
1702 def __getitem__(self, changeid):
1688 # dealing with special cases
1703 # dealing with special cases
1689 if changeid is None:
1704 if changeid is None:
1690 return context.workingctx(self)
1705 return context.workingctx(self)
1691 if isinstance(changeid, context.basectx):
1706 if isinstance(changeid, context.basectx):
1692 return changeid
1707 return changeid
1693
1708
1694 # dealing with multiple revisions
1709 # dealing with multiple revisions
1695 if isinstance(changeid, slice):
1710 if isinstance(changeid, slice):
1696 # wdirrev isn't contiguous so the slice shouldn't include it
1711 # wdirrev isn't contiguous so the slice shouldn't include it
1697 return [
1712 return [
1698 self[i]
1713 self[i]
1699 for i in pycompat.xrange(*changeid.indices(len(self)))
1714 for i in pycompat.xrange(*changeid.indices(len(self)))
1700 if i not in self.changelog.filteredrevs
1715 if i not in self.changelog.filteredrevs
1701 ]
1716 ]
1702
1717
1703 # dealing with some special values
1718 # dealing with some special values
1704 quick_access = self._quick_access_changeid.get(changeid)
1719 quick_access = self._quick_access_changeid.get(changeid)
1705 if quick_access is not None:
1720 if quick_access is not None:
1706 rev, node = quick_access
1721 rev, node = quick_access
1707 return context.changectx(self, rev, node, maybe_filtered=False)
1722 return context.changectx(self, rev, node, maybe_filtered=False)
1708 if changeid == b'tip':
1723 if changeid == b'tip':
1709 node = self.changelog.tip()
1724 node = self.changelog.tip()
1710 rev = self.changelog.rev(node)
1725 rev = self.changelog.rev(node)
1711 return context.changectx(self, rev, node)
1726 return context.changectx(self, rev, node)
1712
1727
1713 # dealing with arbitrary values
1728 # dealing with arbitrary values
1714 try:
1729 try:
1715 if isinstance(changeid, int):
1730 if isinstance(changeid, int):
1716 node = self.changelog.node(changeid)
1731 node = self.changelog.node(changeid)
1717 rev = changeid
1732 rev = changeid
1718 elif changeid == b'.':
1733 elif changeid == b'.':
1719 # this is a hack to delay/avoid loading obsmarkers
1734 # this is a hack to delay/avoid loading obsmarkers
1720 # when we know that '.' won't be hidden
1735 # when we know that '.' won't be hidden
1721 node = self.dirstate.p1()
1736 node = self.dirstate.p1()
1722 rev = self.unfiltered().changelog.rev(node)
1737 rev = self.unfiltered().changelog.rev(node)
1723 elif len(changeid) == 20:
1738 elif len(changeid) == 20:
1724 try:
1739 try:
1725 node = changeid
1740 node = changeid
1726 rev = self.changelog.rev(changeid)
1741 rev = self.changelog.rev(changeid)
1727 except error.FilteredLookupError:
1742 except error.FilteredLookupError:
1728 changeid = hex(changeid) # for the error message
1743 changeid = hex(changeid) # for the error message
1729 raise
1744 raise
1730 except LookupError:
1745 except LookupError:
1731 # check if it might have come from damaged dirstate
1746 # check if it might have come from damaged dirstate
1732 #
1747 #
1733 # XXX we could avoid the unfiltered if we had a recognizable
1748 # XXX we could avoid the unfiltered if we had a recognizable
1734 # exception for filtered changeset access
1749 # exception for filtered changeset access
1735 if (
1750 if (
1736 self.local()
1751 self.local()
1737 and changeid in self.unfiltered().dirstate.parents()
1752 and changeid in self.unfiltered().dirstate.parents()
1738 ):
1753 ):
1739 msg = _(b"working directory has unknown parent '%s'!")
1754 msg = _(b"working directory has unknown parent '%s'!")
1740 raise error.Abort(msg % short(changeid))
1755 raise error.Abort(msg % short(changeid))
1741 changeid = hex(changeid) # for the error message
1756 changeid = hex(changeid) # for the error message
1742 raise
1757 raise
1743
1758
1744 elif len(changeid) == 40:
1759 elif len(changeid) == 40:
1745 node = bin(changeid)
1760 node = bin(changeid)
1746 rev = self.changelog.rev(node)
1761 rev = self.changelog.rev(node)
1747 else:
1762 else:
1748 raise error.ProgrammingError(
1763 raise error.ProgrammingError(
1749 b"unsupported changeid '%s' of type %s"
1764 b"unsupported changeid '%s' of type %s"
1750 % (changeid, pycompat.bytestr(type(changeid)))
1765 % (changeid, pycompat.bytestr(type(changeid)))
1751 )
1766 )
1752
1767
1753 return context.changectx(self, rev, node)
1768 return context.changectx(self, rev, node)
1754
1769
1755 except (error.FilteredIndexError, error.FilteredLookupError):
1770 except (error.FilteredIndexError, error.FilteredLookupError):
1756 raise error.FilteredRepoLookupError(
1771 raise error.FilteredRepoLookupError(
1757 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1772 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1758 )
1773 )
1759 except (IndexError, LookupError):
1774 except (IndexError, LookupError):
1760 raise error.RepoLookupError(
1775 raise error.RepoLookupError(
1761 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1776 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1762 )
1777 )
1763 except error.WdirUnsupported:
1778 except error.WdirUnsupported:
1764 return context.workingctx(self)
1779 return context.workingctx(self)
1765
1780
1766 def __contains__(self, changeid):
1781 def __contains__(self, changeid):
1767 """True if the given changeid exists"""
1782 """True if the given changeid exists"""
1768 try:
1783 try:
1769 self[changeid]
1784 self[changeid]
1770 return True
1785 return True
1771 except error.RepoLookupError:
1786 except error.RepoLookupError:
1772 return False
1787 return False
1773
1788
1774 def __nonzero__(self):
1789 def __nonzero__(self):
1775 return True
1790 return True
1776
1791
1777 __bool__ = __nonzero__
1792 __bool__ = __nonzero__
1778
1793
1779 def __len__(self):
1794 def __len__(self):
1780 # no need to pay the cost of repoview.changelog
1795 # no need to pay the cost of repoview.changelog
1781 unfi = self.unfiltered()
1796 unfi = self.unfiltered()
1782 return len(unfi.changelog)
1797 return len(unfi.changelog)
1783
1798
1784 def __iter__(self):
1799 def __iter__(self):
1785 return iter(self.changelog)
1800 return iter(self.changelog)
1786
1801
1787 def revs(self, expr, *args):
1802 def revs(self, expr, *args):
1788 """Find revisions matching a revset.
1803 """Find revisions matching a revset.
1789
1804
1790 The revset is specified as a string ``expr`` that may contain
1805 The revset is specified as a string ``expr`` that may contain
1791 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1806 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1792
1807
1793 Revset aliases from the configuration are not expanded. To expand
1808 Revset aliases from the configuration are not expanded. To expand
1794 user aliases, consider calling ``scmutil.revrange()`` or
1809 user aliases, consider calling ``scmutil.revrange()`` or
1795 ``repo.anyrevs([expr], user=True)``.
1810 ``repo.anyrevs([expr], user=True)``.
1796
1811
1797 Returns a smartset.abstractsmartset, which is a list-like interface
1812 Returns a smartset.abstractsmartset, which is a list-like interface
1798 that contains integer revisions.
1813 that contains integer revisions.
1799 """
1814 """
1800 tree = revsetlang.spectree(expr, *args)
1815 tree = revsetlang.spectree(expr, *args)
1801 return revset.makematcher(tree)(self)
1816 return revset.makematcher(tree)(self)
1802
1817
1803 def set(self, expr, *args):
1818 def set(self, expr, *args):
1804 """Find revisions matching a revset and emit changectx instances.
1819 """Find revisions matching a revset and emit changectx instances.
1805
1820
1806 This is a convenience wrapper around ``revs()`` that iterates the
1821 This is a convenience wrapper around ``revs()`` that iterates the
1807 result and is a generator of changectx instances.
1822 result and is a generator of changectx instances.
1808
1823
1809 Revset aliases from the configuration are not expanded. To expand
1824 Revset aliases from the configuration are not expanded. To expand
1810 user aliases, consider calling ``scmutil.revrange()``.
1825 user aliases, consider calling ``scmutil.revrange()``.
1811 """
1826 """
1812 for r in self.revs(expr, *args):
1827 for r in self.revs(expr, *args):
1813 yield self[r]
1828 yield self[r]
1814
1829
1815 def anyrevs(self, specs, user=False, localalias=None):
1830 def anyrevs(self, specs, user=False, localalias=None):
1816 """Find revisions matching one of the given revsets.
1831 """Find revisions matching one of the given revsets.
1817
1832
1818 Revset aliases from the configuration are not expanded by default. To
1833 Revset aliases from the configuration are not expanded by default. To
1819 expand user aliases, specify ``user=True``. To provide some local
1834 expand user aliases, specify ``user=True``. To provide some local
1820 definitions overriding user aliases, set ``localalias`` to
1835 definitions overriding user aliases, set ``localalias`` to
1821 ``{name: definitionstring}``.
1836 ``{name: definitionstring}``.
1822 """
1837 """
1823 if specs == [b'null']:
1838 if specs == [b'null']:
1824 return revset.baseset([nullrev])
1839 return revset.baseset([nullrev])
1825 if specs == [b'.']:
1840 if specs == [b'.']:
1826 quick_data = self._quick_access_changeid.get(b'.')
1841 quick_data = self._quick_access_changeid.get(b'.')
1827 if quick_data is not None:
1842 if quick_data is not None:
1828 return revset.baseset([quick_data[0]])
1843 return revset.baseset([quick_data[0]])
1829 if user:
1844 if user:
1830 m = revset.matchany(
1845 m = revset.matchany(
1831 self.ui,
1846 self.ui,
1832 specs,
1847 specs,
1833 lookup=revset.lookupfn(self),
1848 lookup=revset.lookupfn(self),
1834 localalias=localalias,
1849 localalias=localalias,
1835 )
1850 )
1836 else:
1851 else:
1837 m = revset.matchany(None, specs, localalias=localalias)
1852 m = revset.matchany(None, specs, localalias=localalias)
1838 return m(self)
1853 return m(self)
1839
1854
1840 def url(self):
1855 def url(self):
1841 return b'file:' + self.root
1856 return b'file:' + self.root
1842
1857
1843 def hook(self, name, throw=False, **args):
1858 def hook(self, name, throw=False, **args):
1844 """Call a hook, passing this repo instance.
1859 """Call a hook, passing this repo instance.
1845
1860
1846 This a convenience method to aid invoking hooks. Extensions likely
1861 This a convenience method to aid invoking hooks. Extensions likely
1847 won't call this unless they have registered a custom hook or are
1862 won't call this unless they have registered a custom hook or are
1848 replacing code that is expected to call a hook.
1863 replacing code that is expected to call a hook.
1849 """
1864 """
1850 return hook.hook(self.ui, self, name, throw, **args)
1865 return hook.hook(self.ui, self, name, throw, **args)
1851
1866
1852 @filteredpropertycache
1867 @filteredpropertycache
1853 def _tagscache(self):
1868 def _tagscache(self):
1854 """Returns a tagscache object that contains various tags related
1869 """Returns a tagscache object that contains various tags related
1855 caches."""
1870 caches."""
1856
1871
1857 # This simplifies its cache management by having one decorated
1872 # This simplifies its cache management by having one decorated
1858 # function (this one) and the rest simply fetch things from it.
1873 # function (this one) and the rest simply fetch things from it.
1859 class tagscache(object):
1874 class tagscache(object):
1860 def __init__(self):
1875 def __init__(self):
1861 # These two define the set of tags for this repository. tags
1876 # These two define the set of tags for this repository. tags
1862 # maps tag name to node; tagtypes maps tag name to 'global' or
1877 # maps tag name to node; tagtypes maps tag name to 'global' or
1863 # 'local'. (Global tags are defined by .hgtags across all
1878 # 'local'. (Global tags are defined by .hgtags across all
1864 # heads, and local tags are defined in .hg/localtags.)
1879 # heads, and local tags are defined in .hg/localtags.)
1865 # They constitute the in-memory cache of tags.
1880 # They constitute the in-memory cache of tags.
1866 self.tags = self.tagtypes = None
1881 self.tags = self.tagtypes = None
1867
1882
1868 self.nodetagscache = self.tagslist = None
1883 self.nodetagscache = self.tagslist = None
1869
1884
1870 cache = tagscache()
1885 cache = tagscache()
1871 cache.tags, cache.tagtypes = self._findtags()
1886 cache.tags, cache.tagtypes = self._findtags()
1872
1887
1873 return cache
1888 return cache
1874
1889
1875 def tags(self):
1890 def tags(self):
1876 '''return a mapping of tag to node'''
1891 '''return a mapping of tag to node'''
1877 t = {}
1892 t = {}
1878 if self.changelog.filteredrevs:
1893 if self.changelog.filteredrevs:
1879 tags, tt = self._findtags()
1894 tags, tt = self._findtags()
1880 else:
1895 else:
1881 tags = self._tagscache.tags
1896 tags = self._tagscache.tags
1882 rev = self.changelog.rev
1897 rev = self.changelog.rev
1883 for k, v in pycompat.iteritems(tags):
1898 for k, v in pycompat.iteritems(tags):
1884 try:
1899 try:
1885 # ignore tags to unknown nodes
1900 # ignore tags to unknown nodes
1886 rev(v)
1901 rev(v)
1887 t[k] = v
1902 t[k] = v
1888 except (error.LookupError, ValueError):
1903 except (error.LookupError, ValueError):
1889 pass
1904 pass
1890 return t
1905 return t
1891
1906
1892 def _findtags(self):
1907 def _findtags(self):
1893 """Do the hard work of finding tags. Return a pair of dicts
1908 """Do the hard work of finding tags. Return a pair of dicts
1894 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1909 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1895 maps tag name to a string like \'global\' or \'local\'.
1910 maps tag name to a string like \'global\' or \'local\'.
1896 Subclasses or extensions are free to add their own tags, but
1911 Subclasses or extensions are free to add their own tags, but
1897 should be aware that the returned dicts will be retained for the
1912 should be aware that the returned dicts will be retained for the
1898 duration of the localrepo object."""
1913 duration of the localrepo object."""
1899
1914
1900 # XXX what tagtype should subclasses/extensions use? Currently
1915 # XXX what tagtype should subclasses/extensions use? Currently
1901 # mq and bookmarks add tags, but do not set the tagtype at all.
1916 # mq and bookmarks add tags, but do not set the tagtype at all.
1902 # Should each extension invent its own tag type? Should there
1917 # Should each extension invent its own tag type? Should there
1903 # be one tagtype for all such "virtual" tags? Or is the status
1918 # be one tagtype for all such "virtual" tags? Or is the status
1904 # quo fine?
1919 # quo fine?
1905
1920
1906 # map tag name to (node, hist)
1921 # map tag name to (node, hist)
1907 alltags = tagsmod.findglobaltags(self.ui, self)
1922 alltags = tagsmod.findglobaltags(self.ui, self)
1908 # map tag name to tag type
1923 # map tag name to tag type
1909 tagtypes = {tag: b'global' for tag in alltags}
1924 tagtypes = {tag: b'global' for tag in alltags}
1910
1925
1911 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1926 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1912
1927
1913 # Build the return dicts. Have to re-encode tag names because
1928 # Build the return dicts. Have to re-encode tag names because
1914 # the tags module always uses UTF-8 (in order not to lose info
1929 # the tags module always uses UTF-8 (in order not to lose info
1915 # writing to the cache), but the rest of Mercurial wants them in
1930 # writing to the cache), but the rest of Mercurial wants them in
1916 # local encoding.
1931 # local encoding.
1917 tags = {}
1932 tags = {}
1918 for (name, (node, hist)) in pycompat.iteritems(alltags):
1933 for (name, (node, hist)) in pycompat.iteritems(alltags):
1919 if node != nullid:
1934 if node != nullid:
1920 tags[encoding.tolocal(name)] = node
1935 tags[encoding.tolocal(name)] = node
1921 tags[b'tip'] = self.changelog.tip()
1936 tags[b'tip'] = self.changelog.tip()
1922 tagtypes = {
1937 tagtypes = {
1923 encoding.tolocal(name): value
1938 encoding.tolocal(name): value
1924 for (name, value) in pycompat.iteritems(tagtypes)
1939 for (name, value) in pycompat.iteritems(tagtypes)
1925 }
1940 }
1926 return (tags, tagtypes)
1941 return (tags, tagtypes)
1927
1942
1928 def tagtype(self, tagname):
1943 def tagtype(self, tagname):
1929 """
1944 """
1930 return the type of the given tag. result can be:
1945 return the type of the given tag. result can be:
1931
1946
1932 'local' : a local tag
1947 'local' : a local tag
1933 'global' : a global tag
1948 'global' : a global tag
1934 None : tag does not exist
1949 None : tag does not exist
1935 """
1950 """
1936
1951
1937 return self._tagscache.tagtypes.get(tagname)
1952 return self._tagscache.tagtypes.get(tagname)
1938
1953
1939 def tagslist(self):
1954 def tagslist(self):
1940 '''return a list of tags ordered by revision'''
1955 '''return a list of tags ordered by revision'''
1941 if not self._tagscache.tagslist:
1956 if not self._tagscache.tagslist:
1942 l = []
1957 l = []
1943 for t, n in pycompat.iteritems(self.tags()):
1958 for t, n in pycompat.iteritems(self.tags()):
1944 l.append((self.changelog.rev(n), t, n))
1959 l.append((self.changelog.rev(n), t, n))
1945 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1960 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1946
1961
1947 return self._tagscache.tagslist
1962 return self._tagscache.tagslist
1948
1963
1949 def nodetags(self, node):
1964 def nodetags(self, node):
1950 '''return the tags associated with a node'''
1965 '''return the tags associated with a node'''
1951 if not self._tagscache.nodetagscache:
1966 if not self._tagscache.nodetagscache:
1952 nodetagscache = {}
1967 nodetagscache = {}
1953 for t, n in pycompat.iteritems(self._tagscache.tags):
1968 for t, n in pycompat.iteritems(self._tagscache.tags):
1954 nodetagscache.setdefault(n, []).append(t)
1969 nodetagscache.setdefault(n, []).append(t)
1955 for tags in pycompat.itervalues(nodetagscache):
1970 for tags in pycompat.itervalues(nodetagscache):
1956 tags.sort()
1971 tags.sort()
1957 self._tagscache.nodetagscache = nodetagscache
1972 self._tagscache.nodetagscache = nodetagscache
1958 return self._tagscache.nodetagscache.get(node, [])
1973 return self._tagscache.nodetagscache.get(node, [])
1959
1974
1960 def nodebookmarks(self, node):
1975 def nodebookmarks(self, node):
1961 """return the list of bookmarks pointing to the specified node"""
1976 """return the list of bookmarks pointing to the specified node"""
1962 return self._bookmarks.names(node)
1977 return self._bookmarks.names(node)
1963
1978
1964 def branchmap(self):
1979 def branchmap(self):
1965 """returns a dictionary {branch: [branchheads]} with branchheads
1980 """returns a dictionary {branch: [branchheads]} with branchheads
1966 ordered by increasing revision number"""
1981 ordered by increasing revision number"""
1967 return self._branchcaches[self]
1982 return self._branchcaches[self]
1968
1983
1969 @unfilteredmethod
1984 @unfilteredmethod
1970 def revbranchcache(self):
1985 def revbranchcache(self):
1971 if not self._revbranchcache:
1986 if not self._revbranchcache:
1972 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1987 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1973 return self._revbranchcache
1988 return self._revbranchcache
1974
1989
1975 def branchtip(self, branch, ignoremissing=False):
1990 def branchtip(self, branch, ignoremissing=False):
1976 """return the tip node for a given branch
1991 """return the tip node for a given branch
1977
1992
1978 If ignoremissing is True, then this method will not raise an error.
1993 If ignoremissing is True, then this method will not raise an error.
1979 This is helpful for callers that only expect None for a missing branch
1994 This is helpful for callers that only expect None for a missing branch
1980 (e.g. namespace).
1995 (e.g. namespace).
1981
1996
1982 """
1997 """
1983 try:
1998 try:
1984 return self.branchmap().branchtip(branch)
1999 return self.branchmap().branchtip(branch)
1985 except KeyError:
2000 except KeyError:
1986 if not ignoremissing:
2001 if not ignoremissing:
1987 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2002 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1988 else:
2003 else:
1989 pass
2004 pass
1990
2005
1991 def lookup(self, key):
2006 def lookup(self, key):
1992 node = scmutil.revsymbol(self, key).node()
2007 node = scmutil.revsymbol(self, key).node()
1993 if node is None:
2008 if node is None:
1994 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2009 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1995 return node
2010 return node
1996
2011
1997 def lookupbranch(self, key):
2012 def lookupbranch(self, key):
1998 if self.branchmap().hasbranch(key):
2013 if self.branchmap().hasbranch(key):
1999 return key
2014 return key
2000
2015
2001 return scmutil.revsymbol(self, key).branch()
2016 return scmutil.revsymbol(self, key).branch()
2002
2017
2003 def known(self, nodes):
2018 def known(self, nodes):
2004 cl = self.changelog
2019 cl = self.changelog
2005 get_rev = cl.index.get_rev
2020 get_rev = cl.index.get_rev
2006 filtered = cl.filteredrevs
2021 filtered = cl.filteredrevs
2007 result = []
2022 result = []
2008 for n in nodes:
2023 for n in nodes:
2009 r = get_rev(n)
2024 r = get_rev(n)
2010 resp = not (r is None or r in filtered)
2025 resp = not (r is None or r in filtered)
2011 result.append(resp)
2026 result.append(resp)
2012 return result
2027 return result
2013
2028
2014 def local(self):
2029 def local(self):
2015 return self
2030 return self
2016
2031
2017 def publishing(self):
2032 def publishing(self):
2018 # it's safe (and desirable) to trust the publish flag unconditionally
2033 # it's safe (and desirable) to trust the publish flag unconditionally
2019 # so that we don't finalize changes shared between users via ssh or nfs
2034 # so that we don't finalize changes shared between users via ssh or nfs
2020 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2035 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2021
2036
2022 def cancopy(self):
2037 def cancopy(self):
2023 # so statichttprepo's override of local() works
2038 # so statichttprepo's override of local() works
2024 if not self.local():
2039 if not self.local():
2025 return False
2040 return False
2026 if not self.publishing():
2041 if not self.publishing():
2027 return True
2042 return True
2028 # if publishing we can't copy if there is filtered content
2043 # if publishing we can't copy if there is filtered content
2029 return not self.filtered(b'visible').changelog.filteredrevs
2044 return not self.filtered(b'visible').changelog.filteredrevs
2030
2045
2031 def shared(self):
2046 def shared(self):
2032 '''the type of shared repository (None if not shared)'''
2047 '''the type of shared repository (None if not shared)'''
2033 if self.sharedpath != self.path:
2048 if self.sharedpath != self.path:
2034 return b'store'
2049 return b'store'
2035 return None
2050 return None
2036
2051
2037 def wjoin(self, f, *insidef):
2052 def wjoin(self, f, *insidef):
2038 return self.vfs.reljoin(self.root, f, *insidef)
2053 return self.vfs.reljoin(self.root, f, *insidef)
2039
2054
2040 def setparents(self, p1, p2=nullid):
2055 def setparents(self, p1, p2=nullid):
2041 self[None].setparents(p1, p2)
2056 self[None].setparents(p1, p2)
2042 self._quick_access_changeid_invalidate()
2057 self._quick_access_changeid_invalidate()
2043
2058
2044 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2059 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2045 """changeid must be a changeset revision, if specified.
2060 """changeid must be a changeset revision, if specified.
2046 fileid can be a file revision or node."""
2061 fileid can be a file revision or node."""
2047 return context.filectx(
2062 return context.filectx(
2048 self, path, changeid, fileid, changectx=changectx
2063 self, path, changeid, fileid, changectx=changectx
2049 )
2064 )
2050
2065
2051 def getcwd(self):
2066 def getcwd(self):
2052 return self.dirstate.getcwd()
2067 return self.dirstate.getcwd()
2053
2068
2054 def pathto(self, f, cwd=None):
2069 def pathto(self, f, cwd=None):
2055 return self.dirstate.pathto(f, cwd)
2070 return self.dirstate.pathto(f, cwd)
2056
2071
2057 def _loadfilter(self, filter):
2072 def _loadfilter(self, filter):
2058 if filter not in self._filterpats:
2073 if filter not in self._filterpats:
2059 l = []
2074 l = []
2060 for pat, cmd in self.ui.configitems(filter):
2075 for pat, cmd in self.ui.configitems(filter):
2061 if cmd == b'!':
2076 if cmd == b'!':
2062 continue
2077 continue
2063 mf = matchmod.match(self.root, b'', [pat])
2078 mf = matchmod.match(self.root, b'', [pat])
2064 fn = None
2079 fn = None
2065 params = cmd
2080 params = cmd
2066 for name, filterfn in pycompat.iteritems(self._datafilters):
2081 for name, filterfn in pycompat.iteritems(self._datafilters):
2067 if cmd.startswith(name):
2082 if cmd.startswith(name):
2068 fn = filterfn
2083 fn = filterfn
2069 params = cmd[len(name) :].lstrip()
2084 params = cmd[len(name) :].lstrip()
2070 break
2085 break
2071 if not fn:
2086 if not fn:
2072 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2087 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2073 fn.__name__ = 'commandfilter'
2088 fn.__name__ = 'commandfilter'
2074 # Wrap old filters not supporting keyword arguments
2089 # Wrap old filters not supporting keyword arguments
2075 if not pycompat.getargspec(fn)[2]:
2090 if not pycompat.getargspec(fn)[2]:
2076 oldfn = fn
2091 oldfn = fn
2077 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2092 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2078 fn.__name__ = 'compat-' + oldfn.__name__
2093 fn.__name__ = 'compat-' + oldfn.__name__
2079 l.append((mf, fn, params))
2094 l.append((mf, fn, params))
2080 self._filterpats[filter] = l
2095 self._filterpats[filter] = l
2081 return self._filterpats[filter]
2096 return self._filterpats[filter]
2082
2097
2083 def _filter(self, filterpats, filename, data):
2098 def _filter(self, filterpats, filename, data):
2084 for mf, fn, cmd in filterpats:
2099 for mf, fn, cmd in filterpats:
2085 if mf(filename):
2100 if mf(filename):
2086 self.ui.debug(
2101 self.ui.debug(
2087 b"filtering %s through %s\n"
2102 b"filtering %s through %s\n"
2088 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2103 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2089 )
2104 )
2090 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2105 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2091 break
2106 break
2092
2107
2093 return data
2108 return data
2094
2109
2095 @unfilteredpropertycache
2110 @unfilteredpropertycache
2096 def _encodefilterpats(self):
2111 def _encodefilterpats(self):
2097 return self._loadfilter(b'encode')
2112 return self._loadfilter(b'encode')
2098
2113
2099 @unfilteredpropertycache
2114 @unfilteredpropertycache
2100 def _decodefilterpats(self):
2115 def _decodefilterpats(self):
2101 return self._loadfilter(b'decode')
2116 return self._loadfilter(b'decode')
2102
2117
2103 def adddatafilter(self, name, filter):
2118 def adddatafilter(self, name, filter):
2104 self._datafilters[name] = filter
2119 self._datafilters[name] = filter
2105
2120
2106 def wread(self, filename):
2121 def wread(self, filename):
2107 if self.wvfs.islink(filename):
2122 if self.wvfs.islink(filename):
2108 data = self.wvfs.readlink(filename)
2123 data = self.wvfs.readlink(filename)
2109 else:
2124 else:
2110 data = self.wvfs.read(filename)
2125 data = self.wvfs.read(filename)
2111 return self._filter(self._encodefilterpats, filename, data)
2126 return self._filter(self._encodefilterpats, filename, data)
2112
2127
2113 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2128 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2114 """write ``data`` into ``filename`` in the working directory
2129 """write ``data`` into ``filename`` in the working directory
2115
2130
2116 This returns length of written (maybe decoded) data.
2131 This returns length of written (maybe decoded) data.
2117 """
2132 """
2118 data = self._filter(self._decodefilterpats, filename, data)
2133 data = self._filter(self._decodefilterpats, filename, data)
2119 if b'l' in flags:
2134 if b'l' in flags:
2120 self.wvfs.symlink(data, filename)
2135 self.wvfs.symlink(data, filename)
2121 else:
2136 else:
2122 self.wvfs.write(
2137 self.wvfs.write(
2123 filename, data, backgroundclose=backgroundclose, **kwargs
2138 filename, data, backgroundclose=backgroundclose, **kwargs
2124 )
2139 )
2125 if b'x' in flags:
2140 if b'x' in flags:
2126 self.wvfs.setflags(filename, False, True)
2141 self.wvfs.setflags(filename, False, True)
2127 else:
2142 else:
2128 self.wvfs.setflags(filename, False, False)
2143 self.wvfs.setflags(filename, False, False)
2129 return len(data)
2144 return len(data)
2130
2145
2131 def wwritedata(self, filename, data):
2146 def wwritedata(self, filename, data):
2132 return self._filter(self._decodefilterpats, filename, data)
2147 return self._filter(self._decodefilterpats, filename, data)
2133
2148
2134 def currenttransaction(self):
2149 def currenttransaction(self):
2135 """return the current transaction or None if non exists"""
2150 """return the current transaction or None if non exists"""
2136 if self._transref:
2151 if self._transref:
2137 tr = self._transref()
2152 tr = self._transref()
2138 else:
2153 else:
2139 tr = None
2154 tr = None
2140
2155
2141 if tr and tr.running():
2156 if tr and tr.running():
2142 return tr
2157 return tr
2143 return None
2158 return None
2144
2159
2145 def transaction(self, desc, report=None):
2160 def transaction(self, desc, report=None):
2146 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2161 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2147 b'devel', b'check-locks'
2162 b'devel', b'check-locks'
2148 ):
2163 ):
2149 if self._currentlock(self._lockref) is None:
2164 if self._currentlock(self._lockref) is None:
2150 raise error.ProgrammingError(b'transaction requires locking')
2165 raise error.ProgrammingError(b'transaction requires locking')
2151 tr = self.currenttransaction()
2166 tr = self.currenttransaction()
2152 if tr is not None:
2167 if tr is not None:
2153 return tr.nest(name=desc)
2168 return tr.nest(name=desc)
2154
2169
2155 # abort here if the journal already exists
2170 # abort here if the journal already exists
2156 if self.svfs.exists(b"journal"):
2171 if self.svfs.exists(b"journal"):
2157 raise error.RepoError(
2172 raise error.RepoError(
2158 _(b"abandoned transaction found"),
2173 _(b"abandoned transaction found"),
2159 hint=_(b"run 'hg recover' to clean up transaction"),
2174 hint=_(b"run 'hg recover' to clean up transaction"),
2160 )
2175 )
2161
2176
2162 idbase = b"%.40f#%f" % (random.random(), time.time())
2177 idbase = b"%.40f#%f" % (random.random(), time.time())
2163 ha = hex(hashutil.sha1(idbase).digest())
2178 ha = hex(hashutil.sha1(idbase).digest())
2164 txnid = b'TXN:' + ha
2179 txnid = b'TXN:' + ha
2165 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2180 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2166
2181
2167 self._writejournal(desc)
2182 self._writejournal(desc)
2168 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2183 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2169 if report:
2184 if report:
2170 rp = report
2185 rp = report
2171 else:
2186 else:
2172 rp = self.ui.warn
2187 rp = self.ui.warn
2173 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2188 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2174 # we must avoid cyclic reference between repo and transaction.
2189 # we must avoid cyclic reference between repo and transaction.
2175 reporef = weakref.ref(self)
2190 reporef = weakref.ref(self)
2176 # Code to track tag movement
2191 # Code to track tag movement
2177 #
2192 #
2178 # Since tags are all handled as file content, it is actually quite hard
2193 # Since tags are all handled as file content, it is actually quite hard
2179 # to track these movement from a code perspective. So we fallback to a
2194 # to track these movement from a code perspective. So we fallback to a
2180 # tracking at the repository level. One could envision to track changes
2195 # tracking at the repository level. One could envision to track changes
2181 # to the '.hgtags' file through changegroup apply but that fails to
2196 # to the '.hgtags' file through changegroup apply but that fails to
2182 # cope with case where transaction expose new heads without changegroup
2197 # cope with case where transaction expose new heads without changegroup
2183 # being involved (eg: phase movement).
2198 # being involved (eg: phase movement).
2184 #
2199 #
2185 # For now, We gate the feature behind a flag since this likely comes
2200 # For now, We gate the feature behind a flag since this likely comes
2186 # with performance impacts. The current code run more often than needed
2201 # with performance impacts. The current code run more often than needed
2187 # and do not use caches as much as it could. The current focus is on
2202 # and do not use caches as much as it could. The current focus is on
2188 # the behavior of the feature so we disable it by default. The flag
2203 # the behavior of the feature so we disable it by default. The flag
2189 # will be removed when we are happy with the performance impact.
2204 # will be removed when we are happy with the performance impact.
2190 #
2205 #
2191 # Once this feature is no longer experimental move the following
2206 # Once this feature is no longer experimental move the following
2192 # documentation to the appropriate help section:
2207 # documentation to the appropriate help section:
2193 #
2208 #
2194 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2209 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2195 # tags (new or changed or deleted tags). In addition the details of
2210 # tags (new or changed or deleted tags). In addition the details of
2196 # these changes are made available in a file at:
2211 # these changes are made available in a file at:
2197 # ``REPOROOT/.hg/changes/tags.changes``.
2212 # ``REPOROOT/.hg/changes/tags.changes``.
2198 # Make sure you check for HG_TAG_MOVED before reading that file as it
2213 # Make sure you check for HG_TAG_MOVED before reading that file as it
2199 # might exist from a previous transaction even if no tag were touched
2214 # might exist from a previous transaction even if no tag were touched
2200 # in this one. Changes are recorded in a line base format::
2215 # in this one. Changes are recorded in a line base format::
2201 #
2216 #
2202 # <action> <hex-node> <tag-name>\n
2217 # <action> <hex-node> <tag-name>\n
2203 #
2218 #
2204 # Actions are defined as follow:
2219 # Actions are defined as follow:
2205 # "-R": tag is removed,
2220 # "-R": tag is removed,
2206 # "+A": tag is added,
2221 # "+A": tag is added,
2207 # "-M": tag is moved (old value),
2222 # "-M": tag is moved (old value),
2208 # "+M": tag is moved (new value),
2223 # "+M": tag is moved (new value),
2209 tracktags = lambda x: None
2224 tracktags = lambda x: None
2210 # experimental config: experimental.hook-track-tags
2225 # experimental config: experimental.hook-track-tags
2211 shouldtracktags = self.ui.configbool(
2226 shouldtracktags = self.ui.configbool(
2212 b'experimental', b'hook-track-tags'
2227 b'experimental', b'hook-track-tags'
2213 )
2228 )
2214 if desc != b'strip' and shouldtracktags:
2229 if desc != b'strip' and shouldtracktags:
2215 oldheads = self.changelog.headrevs()
2230 oldheads = self.changelog.headrevs()
2216
2231
2217 def tracktags(tr2):
2232 def tracktags(tr2):
2218 repo = reporef()
2233 repo = reporef()
2219 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2234 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2220 newheads = repo.changelog.headrevs()
2235 newheads = repo.changelog.headrevs()
2221 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2236 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2222 # notes: we compare lists here.
2237 # notes: we compare lists here.
2223 # As we do it only once buiding set would not be cheaper
2238 # As we do it only once buiding set would not be cheaper
2224 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2239 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2225 if changes:
2240 if changes:
2226 tr2.hookargs[b'tag_moved'] = b'1'
2241 tr2.hookargs[b'tag_moved'] = b'1'
2227 with repo.vfs(
2242 with repo.vfs(
2228 b'changes/tags.changes', b'w', atomictemp=True
2243 b'changes/tags.changes', b'w', atomictemp=True
2229 ) as changesfile:
2244 ) as changesfile:
2230 # note: we do not register the file to the transaction
2245 # note: we do not register the file to the transaction
2231 # because we needs it to still exist on the transaction
2246 # because we needs it to still exist on the transaction
2232 # is close (for txnclose hooks)
2247 # is close (for txnclose hooks)
2233 tagsmod.writediff(changesfile, changes)
2248 tagsmod.writediff(changesfile, changes)
2234
2249
2235 def validate(tr2):
2250 def validate(tr2):
2236 """will run pre-closing hooks"""
2251 """will run pre-closing hooks"""
2237 # XXX the transaction API is a bit lacking here so we take a hacky
2252 # XXX the transaction API is a bit lacking here so we take a hacky
2238 # path for now
2253 # path for now
2239 #
2254 #
2240 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2255 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2241 # dict is copied before these run. In addition we needs the data
2256 # dict is copied before these run. In addition we needs the data
2242 # available to in memory hooks too.
2257 # available to in memory hooks too.
2243 #
2258 #
2244 # Moreover, we also need to make sure this runs before txnclose
2259 # Moreover, we also need to make sure this runs before txnclose
2245 # hooks and there is no "pending" mechanism that would execute
2260 # hooks and there is no "pending" mechanism that would execute
2246 # logic only if hooks are about to run.
2261 # logic only if hooks are about to run.
2247 #
2262 #
2248 # Fixing this limitation of the transaction is also needed to track
2263 # Fixing this limitation of the transaction is also needed to track
2249 # other families of changes (bookmarks, phases, obsolescence).
2264 # other families of changes (bookmarks, phases, obsolescence).
2250 #
2265 #
2251 # This will have to be fixed before we remove the experimental
2266 # This will have to be fixed before we remove the experimental
2252 # gating.
2267 # gating.
2253 tracktags(tr2)
2268 tracktags(tr2)
2254 repo = reporef()
2269 repo = reporef()
2255
2270
2256 singleheadopt = (b'experimental', b'single-head-per-branch')
2271 singleheadopt = (b'experimental', b'single-head-per-branch')
2257 singlehead = repo.ui.configbool(*singleheadopt)
2272 singlehead = repo.ui.configbool(*singleheadopt)
2258 if singlehead:
2273 if singlehead:
2259 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2274 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2260 accountclosed = singleheadsub.get(
2275 accountclosed = singleheadsub.get(
2261 b"account-closed-heads", False
2276 b"account-closed-heads", False
2262 )
2277 )
2263 if singleheadsub.get(b"public-changes-only", False):
2278 if singleheadsub.get(b"public-changes-only", False):
2264 filtername = b"immutable"
2279 filtername = b"immutable"
2265 else:
2280 else:
2266 filtername = b"visible"
2281 filtername = b"visible"
2267 scmutil.enforcesinglehead(
2282 scmutil.enforcesinglehead(
2268 repo, tr2, desc, accountclosed, filtername
2283 repo, tr2, desc, accountclosed, filtername
2269 )
2284 )
2270 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2285 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2271 for name, (old, new) in sorted(
2286 for name, (old, new) in sorted(
2272 tr.changes[b'bookmarks'].items()
2287 tr.changes[b'bookmarks'].items()
2273 ):
2288 ):
2274 args = tr.hookargs.copy()
2289 args = tr.hookargs.copy()
2275 args.update(bookmarks.preparehookargs(name, old, new))
2290 args.update(bookmarks.preparehookargs(name, old, new))
2276 repo.hook(
2291 repo.hook(
2277 b'pretxnclose-bookmark',
2292 b'pretxnclose-bookmark',
2278 throw=True,
2293 throw=True,
2279 **pycompat.strkwargs(args)
2294 **pycompat.strkwargs(args)
2280 )
2295 )
2281 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2296 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2282 cl = repo.unfiltered().changelog
2297 cl = repo.unfiltered().changelog
2283 for revs, (old, new) in tr.changes[b'phases']:
2298 for revs, (old, new) in tr.changes[b'phases']:
2284 for rev in revs:
2299 for rev in revs:
2285 args = tr.hookargs.copy()
2300 args = tr.hookargs.copy()
2286 node = hex(cl.node(rev))
2301 node = hex(cl.node(rev))
2287 args.update(phases.preparehookargs(node, old, new))
2302 args.update(phases.preparehookargs(node, old, new))
2288 repo.hook(
2303 repo.hook(
2289 b'pretxnclose-phase',
2304 b'pretxnclose-phase',
2290 throw=True,
2305 throw=True,
2291 **pycompat.strkwargs(args)
2306 **pycompat.strkwargs(args)
2292 )
2307 )
2293
2308
2294 repo.hook(
2309 repo.hook(
2295 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2310 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2296 )
2311 )
2297
2312
2298 def releasefn(tr, success):
2313 def releasefn(tr, success):
2299 repo = reporef()
2314 repo = reporef()
2300 if repo is None:
2315 if repo is None:
2301 # If the repo has been GC'd (and this release function is being
2316 # If the repo has been GC'd (and this release function is being
2302 # called from transaction.__del__), there's not much we can do,
2317 # called from transaction.__del__), there's not much we can do,
2303 # so just leave the unfinished transaction there and let the
2318 # so just leave the unfinished transaction there and let the
2304 # user run `hg recover`.
2319 # user run `hg recover`.
2305 return
2320 return
2306 if success:
2321 if success:
2307 # this should be explicitly invoked here, because
2322 # this should be explicitly invoked here, because
2308 # in-memory changes aren't written out at closing
2323 # in-memory changes aren't written out at closing
2309 # transaction, if tr.addfilegenerator (via
2324 # transaction, if tr.addfilegenerator (via
2310 # dirstate.write or so) isn't invoked while
2325 # dirstate.write or so) isn't invoked while
2311 # transaction running
2326 # transaction running
2312 repo.dirstate.write(None)
2327 repo.dirstate.write(None)
2313 else:
2328 else:
2314 # discard all changes (including ones already written
2329 # discard all changes (including ones already written
2315 # out) in this transaction
2330 # out) in this transaction
2316 narrowspec.restorebackup(self, b'journal.narrowspec')
2331 narrowspec.restorebackup(self, b'journal.narrowspec')
2317 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2332 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2318 repo.dirstate.restorebackup(None, b'journal.dirstate')
2333 repo.dirstate.restorebackup(None, b'journal.dirstate')
2319
2334
2320 repo.invalidate(clearfilecache=True)
2335 repo.invalidate(clearfilecache=True)
2321
2336
2322 tr = transaction.transaction(
2337 tr = transaction.transaction(
2323 rp,
2338 rp,
2324 self.svfs,
2339 self.svfs,
2325 vfsmap,
2340 vfsmap,
2326 b"journal",
2341 b"journal",
2327 b"undo",
2342 b"undo",
2328 aftertrans(renames),
2343 aftertrans(renames),
2329 self.store.createmode,
2344 self.store.createmode,
2330 validator=validate,
2345 validator=validate,
2331 releasefn=releasefn,
2346 releasefn=releasefn,
2332 checkambigfiles=_cachedfiles,
2347 checkambigfiles=_cachedfiles,
2333 name=desc,
2348 name=desc,
2334 )
2349 )
2335 tr.changes[b'origrepolen'] = len(self)
2350 tr.changes[b'origrepolen'] = len(self)
2336 tr.changes[b'obsmarkers'] = set()
2351 tr.changes[b'obsmarkers'] = set()
2337 tr.changes[b'phases'] = []
2352 tr.changes[b'phases'] = []
2338 tr.changes[b'bookmarks'] = {}
2353 tr.changes[b'bookmarks'] = {}
2339
2354
2340 tr.hookargs[b'txnid'] = txnid
2355 tr.hookargs[b'txnid'] = txnid
2341 tr.hookargs[b'txnname'] = desc
2356 tr.hookargs[b'txnname'] = desc
2342 tr.hookargs[b'changes'] = tr.changes
2357 tr.hookargs[b'changes'] = tr.changes
2343 # note: writing the fncache only during finalize mean that the file is
2358 # note: writing the fncache only during finalize mean that the file is
2344 # outdated when running hooks. As fncache is used for streaming clone,
2359 # outdated when running hooks. As fncache is used for streaming clone,
2345 # this is not expected to break anything that happen during the hooks.
2360 # this is not expected to break anything that happen during the hooks.
2346 tr.addfinalize(b'flush-fncache', self.store.write)
2361 tr.addfinalize(b'flush-fncache', self.store.write)
2347
2362
2348 def txnclosehook(tr2):
2363 def txnclosehook(tr2):
2349 """To be run if transaction is successful, will schedule a hook run"""
2364 """To be run if transaction is successful, will schedule a hook run"""
2350 # Don't reference tr2 in hook() so we don't hold a reference.
2365 # Don't reference tr2 in hook() so we don't hold a reference.
2351 # This reduces memory consumption when there are multiple
2366 # This reduces memory consumption when there are multiple
2352 # transactions per lock. This can likely go away if issue5045
2367 # transactions per lock. This can likely go away if issue5045
2353 # fixes the function accumulation.
2368 # fixes the function accumulation.
2354 hookargs = tr2.hookargs
2369 hookargs = tr2.hookargs
2355
2370
2356 def hookfunc(unused_success):
2371 def hookfunc(unused_success):
2357 repo = reporef()
2372 repo = reporef()
2358 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2373 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2359 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2374 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2360 for name, (old, new) in bmchanges:
2375 for name, (old, new) in bmchanges:
2361 args = tr.hookargs.copy()
2376 args = tr.hookargs.copy()
2362 args.update(bookmarks.preparehookargs(name, old, new))
2377 args.update(bookmarks.preparehookargs(name, old, new))
2363 repo.hook(
2378 repo.hook(
2364 b'txnclose-bookmark',
2379 b'txnclose-bookmark',
2365 throw=False,
2380 throw=False,
2366 **pycompat.strkwargs(args)
2381 **pycompat.strkwargs(args)
2367 )
2382 )
2368
2383
2369 if hook.hashook(repo.ui, b'txnclose-phase'):
2384 if hook.hashook(repo.ui, b'txnclose-phase'):
2370 cl = repo.unfiltered().changelog
2385 cl = repo.unfiltered().changelog
2371 phasemv = sorted(
2386 phasemv = sorted(
2372 tr.changes[b'phases'], key=lambda r: r[0][0]
2387 tr.changes[b'phases'], key=lambda r: r[0][0]
2373 )
2388 )
2374 for revs, (old, new) in phasemv:
2389 for revs, (old, new) in phasemv:
2375 for rev in revs:
2390 for rev in revs:
2376 args = tr.hookargs.copy()
2391 args = tr.hookargs.copy()
2377 node = hex(cl.node(rev))
2392 node = hex(cl.node(rev))
2378 args.update(phases.preparehookargs(node, old, new))
2393 args.update(phases.preparehookargs(node, old, new))
2379 repo.hook(
2394 repo.hook(
2380 b'txnclose-phase',
2395 b'txnclose-phase',
2381 throw=False,
2396 throw=False,
2382 **pycompat.strkwargs(args)
2397 **pycompat.strkwargs(args)
2383 )
2398 )
2384
2399
2385 repo.hook(
2400 repo.hook(
2386 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2401 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2387 )
2402 )
2388
2403
2389 reporef()._afterlock(hookfunc)
2404 reporef()._afterlock(hookfunc)
2390
2405
2391 tr.addfinalize(b'txnclose-hook', txnclosehook)
2406 tr.addfinalize(b'txnclose-hook', txnclosehook)
2392 # Include a leading "-" to make it happen before the transaction summary
2407 # Include a leading "-" to make it happen before the transaction summary
2393 # reports registered via scmutil.registersummarycallback() whose names
2408 # reports registered via scmutil.registersummarycallback() whose names
2394 # are 00-txnreport etc. That way, the caches will be warm when the
2409 # are 00-txnreport etc. That way, the caches will be warm when the
2395 # callbacks run.
2410 # callbacks run.
2396 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2411 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2397
2412
2398 def txnaborthook(tr2):
2413 def txnaborthook(tr2):
2399 """To be run if transaction is aborted"""
2414 """To be run if transaction is aborted"""
2400 reporef().hook(
2415 reporef().hook(
2401 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2416 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2402 )
2417 )
2403
2418
2404 tr.addabort(b'txnabort-hook', txnaborthook)
2419 tr.addabort(b'txnabort-hook', txnaborthook)
2405 # avoid eager cache invalidation. in-memory data should be identical
2420 # avoid eager cache invalidation. in-memory data should be identical
2406 # to stored data if transaction has no error.
2421 # to stored data if transaction has no error.
2407 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2422 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2408 self._transref = weakref.ref(tr)
2423 self._transref = weakref.ref(tr)
2409 scmutil.registersummarycallback(self, tr, desc)
2424 scmutil.registersummarycallback(self, tr, desc)
2410 return tr
2425 return tr
2411
2426
2412 def _journalfiles(self):
2427 def _journalfiles(self):
2413 return (
2428 return (
2414 (self.svfs, b'journal'),
2429 (self.svfs, b'journal'),
2415 (self.svfs, b'journal.narrowspec'),
2430 (self.svfs, b'journal.narrowspec'),
2416 (self.vfs, b'journal.narrowspec.dirstate'),
2431 (self.vfs, b'journal.narrowspec.dirstate'),
2417 (self.vfs, b'journal.dirstate'),
2432 (self.vfs, b'journal.dirstate'),
2418 (self.vfs, b'journal.branch'),
2433 (self.vfs, b'journal.branch'),
2419 (self.vfs, b'journal.desc'),
2434 (self.vfs, b'journal.desc'),
2420 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2435 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2421 (self.svfs, b'journal.phaseroots'),
2436 (self.svfs, b'journal.phaseroots'),
2422 )
2437 )
2423
2438
2424 def undofiles(self):
2439 def undofiles(self):
2425 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2440 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2426
2441
2427 @unfilteredmethod
2442 @unfilteredmethod
2428 def _writejournal(self, desc):
2443 def _writejournal(self, desc):
2429 self.dirstate.savebackup(None, b'journal.dirstate')
2444 self.dirstate.savebackup(None, b'journal.dirstate')
2430 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2445 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2431 narrowspec.savebackup(self, b'journal.narrowspec')
2446 narrowspec.savebackup(self, b'journal.narrowspec')
2432 self.vfs.write(
2447 self.vfs.write(
2433 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2448 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2434 )
2449 )
2435 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2450 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2436 bookmarksvfs = bookmarks.bookmarksvfs(self)
2451 bookmarksvfs = bookmarks.bookmarksvfs(self)
2437 bookmarksvfs.write(
2452 bookmarksvfs.write(
2438 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2453 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2439 )
2454 )
2440 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2455 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2441
2456
2442 def recover(self):
2457 def recover(self):
2443 with self.lock():
2458 with self.lock():
2444 if self.svfs.exists(b"journal"):
2459 if self.svfs.exists(b"journal"):
2445 self.ui.status(_(b"rolling back interrupted transaction\n"))
2460 self.ui.status(_(b"rolling back interrupted transaction\n"))
2446 vfsmap = {
2461 vfsmap = {
2447 b'': self.svfs,
2462 b'': self.svfs,
2448 b'plain': self.vfs,
2463 b'plain': self.vfs,
2449 }
2464 }
2450 transaction.rollback(
2465 transaction.rollback(
2451 self.svfs,
2466 self.svfs,
2452 vfsmap,
2467 vfsmap,
2453 b"journal",
2468 b"journal",
2454 self.ui.warn,
2469 self.ui.warn,
2455 checkambigfiles=_cachedfiles,
2470 checkambigfiles=_cachedfiles,
2456 )
2471 )
2457 self.invalidate()
2472 self.invalidate()
2458 return True
2473 return True
2459 else:
2474 else:
2460 self.ui.warn(_(b"no interrupted transaction available\n"))
2475 self.ui.warn(_(b"no interrupted transaction available\n"))
2461 return False
2476 return False
2462
2477
2463 def rollback(self, dryrun=False, force=False):
2478 def rollback(self, dryrun=False, force=False):
2464 wlock = lock = dsguard = None
2479 wlock = lock = dsguard = None
2465 try:
2480 try:
2466 wlock = self.wlock()
2481 wlock = self.wlock()
2467 lock = self.lock()
2482 lock = self.lock()
2468 if self.svfs.exists(b"undo"):
2483 if self.svfs.exists(b"undo"):
2469 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2484 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2470
2485
2471 return self._rollback(dryrun, force, dsguard)
2486 return self._rollback(dryrun, force, dsguard)
2472 else:
2487 else:
2473 self.ui.warn(_(b"no rollback information available\n"))
2488 self.ui.warn(_(b"no rollback information available\n"))
2474 return 1
2489 return 1
2475 finally:
2490 finally:
2476 release(dsguard, lock, wlock)
2491 release(dsguard, lock, wlock)
2477
2492
2478 @unfilteredmethod # Until we get smarter cache management
2493 @unfilteredmethod # Until we get smarter cache management
2479 def _rollback(self, dryrun, force, dsguard):
2494 def _rollback(self, dryrun, force, dsguard):
2480 ui = self.ui
2495 ui = self.ui
2481 try:
2496 try:
2482 args = self.vfs.read(b'undo.desc').splitlines()
2497 args = self.vfs.read(b'undo.desc').splitlines()
2483 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2498 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2484 if len(args) >= 3:
2499 if len(args) >= 3:
2485 detail = args[2]
2500 detail = args[2]
2486 oldtip = oldlen - 1
2501 oldtip = oldlen - 1
2487
2502
2488 if detail and ui.verbose:
2503 if detail and ui.verbose:
2489 msg = _(
2504 msg = _(
2490 b'repository tip rolled back to revision %d'
2505 b'repository tip rolled back to revision %d'
2491 b' (undo %s: %s)\n'
2506 b' (undo %s: %s)\n'
2492 ) % (oldtip, desc, detail)
2507 ) % (oldtip, desc, detail)
2493 else:
2508 else:
2494 msg = _(
2509 msg = _(
2495 b'repository tip rolled back to revision %d (undo %s)\n'
2510 b'repository tip rolled back to revision %d (undo %s)\n'
2496 ) % (oldtip, desc)
2511 ) % (oldtip, desc)
2497 except IOError:
2512 except IOError:
2498 msg = _(b'rolling back unknown transaction\n')
2513 msg = _(b'rolling back unknown transaction\n')
2499 desc = None
2514 desc = None
2500
2515
2501 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2516 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2502 raise error.Abort(
2517 raise error.Abort(
2503 _(
2518 _(
2504 b'rollback of last commit while not checked out '
2519 b'rollback of last commit while not checked out '
2505 b'may lose data'
2520 b'may lose data'
2506 ),
2521 ),
2507 hint=_(b'use -f to force'),
2522 hint=_(b'use -f to force'),
2508 )
2523 )
2509
2524
2510 ui.status(msg)
2525 ui.status(msg)
2511 if dryrun:
2526 if dryrun:
2512 return 0
2527 return 0
2513
2528
2514 parents = self.dirstate.parents()
2529 parents = self.dirstate.parents()
2515 self.destroying()
2530 self.destroying()
2516 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2531 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2517 transaction.rollback(
2532 transaction.rollback(
2518 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2533 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2519 )
2534 )
2520 bookmarksvfs = bookmarks.bookmarksvfs(self)
2535 bookmarksvfs = bookmarks.bookmarksvfs(self)
2521 if bookmarksvfs.exists(b'undo.bookmarks'):
2536 if bookmarksvfs.exists(b'undo.bookmarks'):
2522 bookmarksvfs.rename(
2537 bookmarksvfs.rename(
2523 b'undo.bookmarks', b'bookmarks', checkambig=True
2538 b'undo.bookmarks', b'bookmarks', checkambig=True
2524 )
2539 )
2525 if self.svfs.exists(b'undo.phaseroots'):
2540 if self.svfs.exists(b'undo.phaseroots'):
2526 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2541 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2527 self.invalidate()
2542 self.invalidate()
2528
2543
2529 has_node = self.changelog.index.has_node
2544 has_node = self.changelog.index.has_node
2530 parentgone = any(not has_node(p) for p in parents)
2545 parentgone = any(not has_node(p) for p in parents)
2531 if parentgone:
2546 if parentgone:
2532 # prevent dirstateguard from overwriting already restored one
2547 # prevent dirstateguard from overwriting already restored one
2533 dsguard.close()
2548 dsguard.close()
2534
2549
2535 narrowspec.restorebackup(self, b'undo.narrowspec')
2550 narrowspec.restorebackup(self, b'undo.narrowspec')
2536 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2551 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2537 self.dirstate.restorebackup(None, b'undo.dirstate')
2552 self.dirstate.restorebackup(None, b'undo.dirstate')
2538 try:
2553 try:
2539 branch = self.vfs.read(b'undo.branch')
2554 branch = self.vfs.read(b'undo.branch')
2540 self.dirstate.setbranch(encoding.tolocal(branch))
2555 self.dirstate.setbranch(encoding.tolocal(branch))
2541 except IOError:
2556 except IOError:
2542 ui.warn(
2557 ui.warn(
2543 _(
2558 _(
2544 b'named branch could not be reset: '
2559 b'named branch could not be reset: '
2545 b'current branch is still \'%s\'\n'
2560 b'current branch is still \'%s\'\n'
2546 )
2561 )
2547 % self.dirstate.branch()
2562 % self.dirstate.branch()
2548 )
2563 )
2549
2564
2550 parents = tuple([p.rev() for p in self[None].parents()])
2565 parents = tuple([p.rev() for p in self[None].parents()])
2551 if len(parents) > 1:
2566 if len(parents) > 1:
2552 ui.status(
2567 ui.status(
2553 _(
2568 _(
2554 b'working directory now based on '
2569 b'working directory now based on '
2555 b'revisions %d and %d\n'
2570 b'revisions %d and %d\n'
2556 )
2571 )
2557 % parents
2572 % parents
2558 )
2573 )
2559 else:
2574 else:
2560 ui.status(
2575 ui.status(
2561 _(b'working directory now based on revision %d\n') % parents
2576 _(b'working directory now based on revision %d\n') % parents
2562 )
2577 )
2563 mergestatemod.mergestate.clean(self)
2578 mergestatemod.mergestate.clean(self)
2564
2579
2565 # TODO: if we know which new heads may result from this rollback, pass
2580 # TODO: if we know which new heads may result from this rollback, pass
2566 # them to destroy(), which will prevent the branchhead cache from being
2581 # them to destroy(), which will prevent the branchhead cache from being
2567 # invalidated.
2582 # invalidated.
2568 self.destroyed()
2583 self.destroyed()
2569 return 0
2584 return 0
2570
2585
2571 def _buildcacheupdater(self, newtransaction):
2586 def _buildcacheupdater(self, newtransaction):
2572 """called during transaction to build the callback updating cache
2587 """called during transaction to build the callback updating cache
2573
2588
2574 Lives on the repository to help extension who might want to augment
2589 Lives on the repository to help extension who might want to augment
2575 this logic. For this purpose, the created transaction is passed to the
2590 this logic. For this purpose, the created transaction is passed to the
2576 method.
2591 method.
2577 """
2592 """
2578 # we must avoid cyclic reference between repo and transaction.
2593 # we must avoid cyclic reference between repo and transaction.
2579 reporef = weakref.ref(self)
2594 reporef = weakref.ref(self)
2580
2595
2581 def updater(tr):
2596 def updater(tr):
2582 repo = reporef()
2597 repo = reporef()
2583 repo.updatecaches(tr)
2598 repo.updatecaches(tr)
2584
2599
2585 return updater
2600 return updater
2586
2601
2587 @unfilteredmethod
2602 @unfilteredmethod
2588 def updatecaches(self, tr=None, full=False):
2603 def updatecaches(self, tr=None, full=False):
2589 """warm appropriate caches
2604 """warm appropriate caches
2590
2605
2591 If this function is called after a transaction closed. The transaction
2606 If this function is called after a transaction closed. The transaction
2592 will be available in the 'tr' argument. This can be used to selectively
2607 will be available in the 'tr' argument. This can be used to selectively
2593 update caches relevant to the changes in that transaction.
2608 update caches relevant to the changes in that transaction.
2594
2609
2595 If 'full' is set, make sure all caches the function knows about have
2610 If 'full' is set, make sure all caches the function knows about have
2596 up-to-date data. Even the ones usually loaded more lazily.
2611 up-to-date data. Even the ones usually loaded more lazily.
2597 """
2612 """
2598 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2613 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2599 # During strip, many caches are invalid but
2614 # During strip, many caches are invalid but
2600 # later call to `destroyed` will refresh them.
2615 # later call to `destroyed` will refresh them.
2601 return
2616 return
2602
2617
2603 if tr is None or tr.changes[b'origrepolen'] < len(self):
2618 if tr is None or tr.changes[b'origrepolen'] < len(self):
2604 # accessing the 'ser ved' branchmap should refresh all the others,
2619 # accessing the 'ser ved' branchmap should refresh all the others,
2605 self.ui.debug(b'updating the branch cache\n')
2620 self.ui.debug(b'updating the branch cache\n')
2606 self.filtered(b'served').branchmap()
2621 self.filtered(b'served').branchmap()
2607 self.filtered(b'served.hidden').branchmap()
2622 self.filtered(b'served.hidden').branchmap()
2608
2623
2609 if full:
2624 if full:
2610 unfi = self.unfiltered()
2625 unfi = self.unfiltered()
2611
2626
2612 self.changelog.update_caches(transaction=tr)
2627 self.changelog.update_caches(transaction=tr)
2613 self.manifestlog.update_caches(transaction=tr)
2628 self.manifestlog.update_caches(transaction=tr)
2614
2629
2615 rbc = unfi.revbranchcache()
2630 rbc = unfi.revbranchcache()
2616 for r in unfi.changelog:
2631 for r in unfi.changelog:
2617 rbc.branchinfo(r)
2632 rbc.branchinfo(r)
2618 rbc.write()
2633 rbc.write()
2619
2634
2620 # ensure the working copy parents are in the manifestfulltextcache
2635 # ensure the working copy parents are in the manifestfulltextcache
2621 for ctx in self[b'.'].parents():
2636 for ctx in self[b'.'].parents():
2622 ctx.manifest() # accessing the manifest is enough
2637 ctx.manifest() # accessing the manifest is enough
2623
2638
2624 # accessing fnode cache warms the cache
2639 # accessing fnode cache warms the cache
2625 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2640 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2626 # accessing tags warm the cache
2641 # accessing tags warm the cache
2627 self.tags()
2642 self.tags()
2628 self.filtered(b'served').tags()
2643 self.filtered(b'served').tags()
2629
2644
2630 # The `full` arg is documented as updating even the lazily-loaded
2645 # The `full` arg is documented as updating even the lazily-loaded
2631 # caches immediately, so we're forcing a write to cause these caches
2646 # caches immediately, so we're forcing a write to cause these caches
2632 # to be warmed up even if they haven't explicitly been requested
2647 # to be warmed up even if they haven't explicitly been requested
2633 # yet (if they've never been used by hg, they won't ever have been
2648 # yet (if they've never been used by hg, they won't ever have been
2634 # written, even if they're a subset of another kind of cache that
2649 # written, even if they're a subset of another kind of cache that
2635 # *has* been used).
2650 # *has* been used).
2636 for filt in repoview.filtertable.keys():
2651 for filt in repoview.filtertable.keys():
2637 filtered = self.filtered(filt)
2652 filtered = self.filtered(filt)
2638 filtered.branchmap().write(filtered)
2653 filtered.branchmap().write(filtered)
2639
2654
2640 def invalidatecaches(self):
2655 def invalidatecaches(self):
2641
2656
2642 if '_tagscache' in vars(self):
2657 if '_tagscache' in vars(self):
2643 # can't use delattr on proxy
2658 # can't use delattr on proxy
2644 del self.__dict__['_tagscache']
2659 del self.__dict__['_tagscache']
2645
2660
2646 self._branchcaches.clear()
2661 self._branchcaches.clear()
2647 self.invalidatevolatilesets()
2662 self.invalidatevolatilesets()
2648 self._sparsesignaturecache.clear()
2663 self._sparsesignaturecache.clear()
2649
2664
2650 def invalidatevolatilesets(self):
2665 def invalidatevolatilesets(self):
2651 self.filteredrevcache.clear()
2666 self.filteredrevcache.clear()
2652 obsolete.clearobscaches(self)
2667 obsolete.clearobscaches(self)
2653 self._quick_access_changeid_invalidate()
2668 self._quick_access_changeid_invalidate()
2654
2669
2655 def invalidatedirstate(self):
2670 def invalidatedirstate(self):
2656 """Invalidates the dirstate, causing the next call to dirstate
2671 """Invalidates the dirstate, causing the next call to dirstate
2657 to check if it was modified since the last time it was read,
2672 to check if it was modified since the last time it was read,
2658 rereading it if it has.
2673 rereading it if it has.
2659
2674
2660 This is different to dirstate.invalidate() that it doesn't always
2675 This is different to dirstate.invalidate() that it doesn't always
2661 rereads the dirstate. Use dirstate.invalidate() if you want to
2676 rereads the dirstate. Use dirstate.invalidate() if you want to
2662 explicitly read the dirstate again (i.e. restoring it to a previous
2677 explicitly read the dirstate again (i.e. restoring it to a previous
2663 known good state)."""
2678 known good state)."""
2664 if hasunfilteredcache(self, 'dirstate'):
2679 if hasunfilteredcache(self, 'dirstate'):
2665 for k in self.dirstate._filecache:
2680 for k in self.dirstate._filecache:
2666 try:
2681 try:
2667 delattr(self.dirstate, k)
2682 delattr(self.dirstate, k)
2668 except AttributeError:
2683 except AttributeError:
2669 pass
2684 pass
2670 delattr(self.unfiltered(), 'dirstate')
2685 delattr(self.unfiltered(), 'dirstate')
2671
2686
2672 def invalidate(self, clearfilecache=False):
2687 def invalidate(self, clearfilecache=False):
2673 """Invalidates both store and non-store parts other than dirstate
2688 """Invalidates both store and non-store parts other than dirstate
2674
2689
2675 If a transaction is running, invalidation of store is omitted,
2690 If a transaction is running, invalidation of store is omitted,
2676 because discarding in-memory changes might cause inconsistency
2691 because discarding in-memory changes might cause inconsistency
2677 (e.g. incomplete fncache causes unintentional failure, but
2692 (e.g. incomplete fncache causes unintentional failure, but
2678 redundant one doesn't).
2693 redundant one doesn't).
2679 """
2694 """
2680 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2695 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2681 for k in list(self._filecache.keys()):
2696 for k in list(self._filecache.keys()):
2682 # dirstate is invalidated separately in invalidatedirstate()
2697 # dirstate is invalidated separately in invalidatedirstate()
2683 if k == b'dirstate':
2698 if k == b'dirstate':
2684 continue
2699 continue
2685 if (
2700 if (
2686 k == b'changelog'
2701 k == b'changelog'
2687 and self.currenttransaction()
2702 and self.currenttransaction()
2688 and self.changelog._delayed
2703 and self.changelog._delayed
2689 ):
2704 ):
2690 # The changelog object may store unwritten revisions. We don't
2705 # The changelog object may store unwritten revisions. We don't
2691 # want to lose them.
2706 # want to lose them.
2692 # TODO: Solve the problem instead of working around it.
2707 # TODO: Solve the problem instead of working around it.
2693 continue
2708 continue
2694
2709
2695 if clearfilecache:
2710 if clearfilecache:
2696 del self._filecache[k]
2711 del self._filecache[k]
2697 try:
2712 try:
2698 delattr(unfiltered, k)
2713 delattr(unfiltered, k)
2699 except AttributeError:
2714 except AttributeError:
2700 pass
2715 pass
2701 self.invalidatecaches()
2716 self.invalidatecaches()
2702 if not self.currenttransaction():
2717 if not self.currenttransaction():
2703 # TODO: Changing contents of store outside transaction
2718 # TODO: Changing contents of store outside transaction
2704 # causes inconsistency. We should make in-memory store
2719 # causes inconsistency. We should make in-memory store
2705 # changes detectable, and abort if changed.
2720 # changes detectable, and abort if changed.
2706 self.store.invalidatecaches()
2721 self.store.invalidatecaches()
2707
2722
2708 def invalidateall(self):
2723 def invalidateall(self):
2709 """Fully invalidates both store and non-store parts, causing the
2724 """Fully invalidates both store and non-store parts, causing the
2710 subsequent operation to reread any outside changes."""
2725 subsequent operation to reread any outside changes."""
2711 # extension should hook this to invalidate its caches
2726 # extension should hook this to invalidate its caches
2712 self.invalidate()
2727 self.invalidate()
2713 self.invalidatedirstate()
2728 self.invalidatedirstate()
2714
2729
2715 @unfilteredmethod
2730 @unfilteredmethod
2716 def _refreshfilecachestats(self, tr):
2731 def _refreshfilecachestats(self, tr):
2717 """Reload stats of cached files so that they are flagged as valid"""
2732 """Reload stats of cached files so that they are flagged as valid"""
2718 for k, ce in self._filecache.items():
2733 for k, ce in self._filecache.items():
2719 k = pycompat.sysstr(k)
2734 k = pycompat.sysstr(k)
2720 if k == 'dirstate' or k not in self.__dict__:
2735 if k == 'dirstate' or k not in self.__dict__:
2721 continue
2736 continue
2722 ce.refresh()
2737 ce.refresh()
2723
2738
2724 def _lock(
2739 def _lock(
2725 self,
2740 self,
2726 vfs,
2741 vfs,
2727 lockname,
2742 lockname,
2728 wait,
2743 wait,
2729 releasefn,
2744 releasefn,
2730 acquirefn,
2745 acquirefn,
2731 desc,
2746 desc,
2732 ):
2747 ):
2733 timeout = 0
2748 timeout = 0
2734 warntimeout = 0
2749 warntimeout = 0
2735 if wait:
2750 if wait:
2736 timeout = self.ui.configint(b"ui", b"timeout")
2751 timeout = self.ui.configint(b"ui", b"timeout")
2737 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2752 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2738 # internal config: ui.signal-safe-lock
2753 # internal config: ui.signal-safe-lock
2739 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2754 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2740
2755
2741 l = lockmod.trylock(
2756 l = lockmod.trylock(
2742 self.ui,
2757 self.ui,
2743 vfs,
2758 vfs,
2744 lockname,
2759 lockname,
2745 timeout,
2760 timeout,
2746 warntimeout,
2761 warntimeout,
2747 releasefn=releasefn,
2762 releasefn=releasefn,
2748 acquirefn=acquirefn,
2763 acquirefn=acquirefn,
2749 desc=desc,
2764 desc=desc,
2750 signalsafe=signalsafe,
2765 signalsafe=signalsafe,
2751 )
2766 )
2752 return l
2767 return l
2753
2768
2754 def _afterlock(self, callback):
2769 def _afterlock(self, callback):
2755 """add a callback to be run when the repository is fully unlocked
2770 """add a callback to be run when the repository is fully unlocked
2756
2771
2757 The callback will be executed when the outermost lock is released
2772 The callback will be executed when the outermost lock is released
2758 (with wlock being higher level than 'lock')."""
2773 (with wlock being higher level than 'lock')."""
2759 for ref in (self._wlockref, self._lockref):
2774 for ref in (self._wlockref, self._lockref):
2760 l = ref and ref()
2775 l = ref and ref()
2761 if l and l.held:
2776 if l and l.held:
2762 l.postrelease.append(callback)
2777 l.postrelease.append(callback)
2763 break
2778 break
2764 else: # no lock have been found.
2779 else: # no lock have been found.
2765 callback(True)
2780 callback(True)
2766
2781
2767 def lock(self, wait=True):
2782 def lock(self, wait=True):
2768 """Lock the repository store (.hg/store) and return a weak reference
2783 """Lock the repository store (.hg/store) and return a weak reference
2769 to the lock. Use this before modifying the store (e.g. committing or
2784 to the lock. Use this before modifying the store (e.g. committing or
2770 stripping). If you are opening a transaction, get a lock as well.)
2785 stripping). If you are opening a transaction, get a lock as well.)
2771
2786
2772 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2787 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2773 'wlock' first to avoid a dead-lock hazard."""
2788 'wlock' first to avoid a dead-lock hazard."""
2774 l = self._currentlock(self._lockref)
2789 l = self._currentlock(self._lockref)
2775 if l is not None:
2790 if l is not None:
2776 l.lock()
2791 l.lock()
2777 return l
2792 return l
2778
2793
2779 l = self._lock(
2794 l = self._lock(
2780 vfs=self.svfs,
2795 vfs=self.svfs,
2781 lockname=b"lock",
2796 lockname=b"lock",
2782 wait=wait,
2797 wait=wait,
2783 releasefn=None,
2798 releasefn=None,
2784 acquirefn=self.invalidate,
2799 acquirefn=self.invalidate,
2785 desc=_(b'repository %s') % self.origroot,
2800 desc=_(b'repository %s') % self.origroot,
2786 )
2801 )
2787 self._lockref = weakref.ref(l)
2802 self._lockref = weakref.ref(l)
2788 return l
2803 return l
2789
2804
2790 def wlock(self, wait=True):
2805 def wlock(self, wait=True):
2791 """Lock the non-store parts of the repository (everything under
2806 """Lock the non-store parts of the repository (everything under
2792 .hg except .hg/store) and return a weak reference to the lock.
2807 .hg except .hg/store) and return a weak reference to the lock.
2793
2808
2794 Use this before modifying files in .hg.
2809 Use this before modifying files in .hg.
2795
2810
2796 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2811 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2797 'wlock' first to avoid a dead-lock hazard."""
2812 'wlock' first to avoid a dead-lock hazard."""
2798 l = self._wlockref and self._wlockref()
2813 l = self._wlockref and self._wlockref()
2799 if l is not None and l.held:
2814 if l is not None and l.held:
2800 l.lock()
2815 l.lock()
2801 return l
2816 return l
2802
2817
2803 # We do not need to check for non-waiting lock acquisition. Such
2818 # We do not need to check for non-waiting lock acquisition. Such
2804 # acquisition would not cause dead-lock as they would just fail.
2819 # acquisition would not cause dead-lock as they would just fail.
2805 if wait and (
2820 if wait and (
2806 self.ui.configbool(b'devel', b'all-warnings')
2821 self.ui.configbool(b'devel', b'all-warnings')
2807 or self.ui.configbool(b'devel', b'check-locks')
2822 or self.ui.configbool(b'devel', b'check-locks')
2808 ):
2823 ):
2809 if self._currentlock(self._lockref) is not None:
2824 if self._currentlock(self._lockref) is not None:
2810 self.ui.develwarn(b'"wlock" acquired after "lock"')
2825 self.ui.develwarn(b'"wlock" acquired after "lock"')
2811
2826
2812 def unlock():
2827 def unlock():
2813 if self.dirstate.pendingparentchange():
2828 if self.dirstate.pendingparentchange():
2814 self.dirstate.invalidate()
2829 self.dirstate.invalidate()
2815 else:
2830 else:
2816 self.dirstate.write(None)
2831 self.dirstate.write(None)
2817
2832
2818 self._filecache[b'dirstate'].refresh()
2833 self._filecache[b'dirstate'].refresh()
2819
2834
2820 l = self._lock(
2835 l = self._lock(
2821 self.vfs,
2836 self.vfs,
2822 b"wlock",
2837 b"wlock",
2823 wait,
2838 wait,
2824 unlock,
2839 unlock,
2825 self.invalidatedirstate,
2840 self.invalidatedirstate,
2826 _(b'working directory of %s') % self.origroot,
2841 _(b'working directory of %s') % self.origroot,
2827 )
2842 )
2828 self._wlockref = weakref.ref(l)
2843 self._wlockref = weakref.ref(l)
2829 return l
2844 return l
2830
2845
2831 def _currentlock(self, lockref):
2846 def _currentlock(self, lockref):
2832 """Returns the lock if it's held, or None if it's not."""
2847 """Returns the lock if it's held, or None if it's not."""
2833 if lockref is None:
2848 if lockref is None:
2834 return None
2849 return None
2835 l = lockref()
2850 l = lockref()
2836 if l is None or not l.held:
2851 if l is None or not l.held:
2837 return None
2852 return None
2838 return l
2853 return l
2839
2854
2840 def currentwlock(self):
2855 def currentwlock(self):
2841 """Returns the wlock if it's held, or None if it's not."""
2856 """Returns the wlock if it's held, or None if it's not."""
2842 return self._currentlock(self._wlockref)
2857 return self._currentlock(self._wlockref)
2843
2858
2844 def checkcommitpatterns(self, wctx, match, status, fail):
2859 def checkcommitpatterns(self, wctx, match, status, fail):
2845 """check for commit arguments that aren't committable"""
2860 """check for commit arguments that aren't committable"""
2846 if match.isexact() or match.prefix():
2861 if match.isexact() or match.prefix():
2847 matched = set(status.modified + status.added + status.removed)
2862 matched = set(status.modified + status.added + status.removed)
2848
2863
2849 for f in match.files():
2864 for f in match.files():
2850 f = self.dirstate.normalize(f)
2865 f = self.dirstate.normalize(f)
2851 if f == b'.' or f in matched or f in wctx.substate:
2866 if f == b'.' or f in matched or f in wctx.substate:
2852 continue
2867 continue
2853 if f in status.deleted:
2868 if f in status.deleted:
2854 fail(f, _(b'file not found!'))
2869 fail(f, _(b'file not found!'))
2855 # Is it a directory that exists or used to exist?
2870 # Is it a directory that exists or used to exist?
2856 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2871 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2857 d = f + b'/'
2872 d = f + b'/'
2858 for mf in matched:
2873 for mf in matched:
2859 if mf.startswith(d):
2874 if mf.startswith(d):
2860 break
2875 break
2861 else:
2876 else:
2862 fail(f, _(b"no match under directory!"))
2877 fail(f, _(b"no match under directory!"))
2863 elif f not in self.dirstate:
2878 elif f not in self.dirstate:
2864 fail(f, _(b"file not tracked!"))
2879 fail(f, _(b"file not tracked!"))
2865
2880
2866 @unfilteredmethod
2881 @unfilteredmethod
2867 def commit(
2882 def commit(
2868 self,
2883 self,
2869 text=b"",
2884 text=b"",
2870 user=None,
2885 user=None,
2871 date=None,
2886 date=None,
2872 match=None,
2887 match=None,
2873 force=False,
2888 force=False,
2874 editor=None,
2889 editor=None,
2875 extra=None,
2890 extra=None,
2876 ):
2891 ):
2877 """Add a new revision to current repository.
2892 """Add a new revision to current repository.
2878
2893
2879 Revision information is gathered from the working directory,
2894 Revision information is gathered from the working directory,
2880 match can be used to filter the committed files. If editor is
2895 match can be used to filter the committed files. If editor is
2881 supplied, it is called to get a commit message.
2896 supplied, it is called to get a commit message.
2882 """
2897 """
2883 if extra is None:
2898 if extra is None:
2884 extra = {}
2899 extra = {}
2885
2900
2886 def fail(f, msg):
2901 def fail(f, msg):
2887 raise error.InputError(b'%s: %s' % (f, msg))
2902 raise error.InputError(b'%s: %s' % (f, msg))
2888
2903
2889 if not match:
2904 if not match:
2890 match = matchmod.always()
2905 match = matchmod.always()
2891
2906
2892 if not force:
2907 if not force:
2893 match.bad = fail
2908 match.bad = fail
2894
2909
2895 # lock() for recent changelog (see issue4368)
2910 # lock() for recent changelog (see issue4368)
2896 with self.wlock(), self.lock():
2911 with self.wlock(), self.lock():
2897 wctx = self[None]
2912 wctx = self[None]
2898 merge = len(wctx.parents()) > 1
2913 merge = len(wctx.parents()) > 1
2899
2914
2900 if not force and merge and not match.always():
2915 if not force and merge and not match.always():
2901 raise error.Abort(
2916 raise error.Abort(
2902 _(
2917 _(
2903 b'cannot partially commit a merge '
2918 b'cannot partially commit a merge '
2904 b'(do not specify files or patterns)'
2919 b'(do not specify files or patterns)'
2905 )
2920 )
2906 )
2921 )
2907
2922
2908 status = self.status(match=match, clean=force)
2923 status = self.status(match=match, clean=force)
2909 if force:
2924 if force:
2910 status.modified.extend(
2925 status.modified.extend(
2911 status.clean
2926 status.clean
2912 ) # mq may commit clean files
2927 ) # mq may commit clean files
2913
2928
2914 # check subrepos
2929 # check subrepos
2915 subs, commitsubs, newstate = subrepoutil.precommit(
2930 subs, commitsubs, newstate = subrepoutil.precommit(
2916 self.ui, wctx, status, match, force=force
2931 self.ui, wctx, status, match, force=force
2917 )
2932 )
2918
2933
2919 # make sure all explicit patterns are matched
2934 # make sure all explicit patterns are matched
2920 if not force:
2935 if not force:
2921 self.checkcommitpatterns(wctx, match, status, fail)
2936 self.checkcommitpatterns(wctx, match, status, fail)
2922
2937
2923 cctx = context.workingcommitctx(
2938 cctx = context.workingcommitctx(
2924 self, status, text, user, date, extra
2939 self, status, text, user, date, extra
2925 )
2940 )
2926
2941
2927 ms = mergestatemod.mergestate.read(self)
2942 ms = mergestatemod.mergestate.read(self)
2928 mergeutil.checkunresolved(ms)
2943 mergeutil.checkunresolved(ms)
2929
2944
2930 # internal config: ui.allowemptycommit
2945 # internal config: ui.allowemptycommit
2931 if cctx.isempty() and not self.ui.configbool(
2946 if cctx.isempty() and not self.ui.configbool(
2932 b'ui', b'allowemptycommit'
2947 b'ui', b'allowemptycommit'
2933 ):
2948 ):
2934 self.ui.debug(b'nothing to commit, clearing merge state\n')
2949 self.ui.debug(b'nothing to commit, clearing merge state\n')
2935 ms.reset()
2950 ms.reset()
2936 return None
2951 return None
2937
2952
2938 if merge and cctx.deleted():
2953 if merge and cctx.deleted():
2939 raise error.Abort(_(b"cannot commit merge with missing files"))
2954 raise error.Abort(_(b"cannot commit merge with missing files"))
2940
2955
2941 if editor:
2956 if editor:
2942 cctx._text = editor(self, cctx, subs)
2957 cctx._text = editor(self, cctx, subs)
2943 edited = text != cctx._text
2958 edited = text != cctx._text
2944
2959
2945 # Save commit message in case this transaction gets rolled back
2960 # Save commit message in case this transaction gets rolled back
2946 # (e.g. by a pretxncommit hook). Leave the content alone on
2961 # (e.g. by a pretxncommit hook). Leave the content alone on
2947 # the assumption that the user will use the same editor again.
2962 # the assumption that the user will use the same editor again.
2948 msgfn = self.savecommitmessage(cctx._text)
2963 msgfn = self.savecommitmessage(cctx._text)
2949
2964
2950 # commit subs and write new state
2965 # commit subs and write new state
2951 if subs:
2966 if subs:
2952 uipathfn = scmutil.getuipathfn(self)
2967 uipathfn = scmutil.getuipathfn(self)
2953 for s in sorted(commitsubs):
2968 for s in sorted(commitsubs):
2954 sub = wctx.sub(s)
2969 sub = wctx.sub(s)
2955 self.ui.status(
2970 self.ui.status(
2956 _(b'committing subrepository %s\n')
2971 _(b'committing subrepository %s\n')
2957 % uipathfn(subrepoutil.subrelpath(sub))
2972 % uipathfn(subrepoutil.subrelpath(sub))
2958 )
2973 )
2959 sr = sub.commit(cctx._text, user, date)
2974 sr = sub.commit(cctx._text, user, date)
2960 newstate[s] = (newstate[s][0], sr)
2975 newstate[s] = (newstate[s][0], sr)
2961 subrepoutil.writestate(self, newstate)
2976 subrepoutil.writestate(self, newstate)
2962
2977
2963 p1, p2 = self.dirstate.parents()
2978 p1, p2 = self.dirstate.parents()
2964 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2979 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2965 try:
2980 try:
2966 self.hook(
2981 self.hook(
2967 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2982 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2968 )
2983 )
2969 with self.transaction(b'commit'):
2984 with self.transaction(b'commit'):
2970 ret = self.commitctx(cctx, True)
2985 ret = self.commitctx(cctx, True)
2971 # update bookmarks, dirstate and mergestate
2986 # update bookmarks, dirstate and mergestate
2972 bookmarks.update(self, [p1, p2], ret)
2987 bookmarks.update(self, [p1, p2], ret)
2973 cctx.markcommitted(ret)
2988 cctx.markcommitted(ret)
2974 ms.reset()
2989 ms.reset()
2975 except: # re-raises
2990 except: # re-raises
2976 if edited:
2991 if edited:
2977 self.ui.write(
2992 self.ui.write(
2978 _(b'note: commit message saved in %s\n') % msgfn
2993 _(b'note: commit message saved in %s\n') % msgfn
2979 )
2994 )
2980 self.ui.write(
2995 self.ui.write(
2981 _(
2996 _(
2982 b"note: use 'hg commit --logfile "
2997 b"note: use 'hg commit --logfile "
2983 b".hg/last-message.txt --edit' to reuse it\n"
2998 b".hg/last-message.txt --edit' to reuse it\n"
2984 )
2999 )
2985 )
3000 )
2986 raise
3001 raise
2987
3002
2988 def commithook(unused_success):
3003 def commithook(unused_success):
2989 # hack for command that use a temporary commit (eg: histedit)
3004 # hack for command that use a temporary commit (eg: histedit)
2990 # temporary commit got stripped before hook release
3005 # temporary commit got stripped before hook release
2991 if self.changelog.hasnode(ret):
3006 if self.changelog.hasnode(ret):
2992 self.hook(
3007 self.hook(
2993 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3008 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2994 )
3009 )
2995
3010
2996 self._afterlock(commithook)
3011 self._afterlock(commithook)
2997 return ret
3012 return ret
2998
3013
2999 @unfilteredmethod
3014 @unfilteredmethod
3000 def commitctx(self, ctx, error=False, origctx=None):
3015 def commitctx(self, ctx, error=False, origctx=None):
3001 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3016 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3002
3017
3003 @unfilteredmethod
3018 @unfilteredmethod
3004 def destroying(self):
3019 def destroying(self):
3005 """Inform the repository that nodes are about to be destroyed.
3020 """Inform the repository that nodes are about to be destroyed.
3006 Intended for use by strip and rollback, so there's a common
3021 Intended for use by strip and rollback, so there's a common
3007 place for anything that has to be done before destroying history.
3022 place for anything that has to be done before destroying history.
3008
3023
3009 This is mostly useful for saving state that is in memory and waiting
3024 This is mostly useful for saving state that is in memory and waiting
3010 to be flushed when the current lock is released. Because a call to
3025 to be flushed when the current lock is released. Because a call to
3011 destroyed is imminent, the repo will be invalidated causing those
3026 destroyed is imminent, the repo will be invalidated causing those
3012 changes to stay in memory (waiting for the next unlock), or vanish
3027 changes to stay in memory (waiting for the next unlock), or vanish
3013 completely.
3028 completely.
3014 """
3029 """
3015 # When using the same lock to commit and strip, the phasecache is left
3030 # When using the same lock to commit and strip, the phasecache is left
3016 # dirty after committing. Then when we strip, the repo is invalidated,
3031 # dirty after committing. Then when we strip, the repo is invalidated,
3017 # causing those changes to disappear.
3032 # causing those changes to disappear.
3018 if '_phasecache' in vars(self):
3033 if '_phasecache' in vars(self):
3019 self._phasecache.write()
3034 self._phasecache.write()
3020
3035
3021 @unfilteredmethod
3036 @unfilteredmethod
3022 def destroyed(self):
3037 def destroyed(self):
3023 """Inform the repository that nodes have been destroyed.
3038 """Inform the repository that nodes have been destroyed.
3024 Intended for use by strip and rollback, so there's a common
3039 Intended for use by strip and rollback, so there's a common
3025 place for anything that has to be done after destroying history.
3040 place for anything that has to be done after destroying history.
3026 """
3041 """
3027 # When one tries to:
3042 # When one tries to:
3028 # 1) destroy nodes thus calling this method (e.g. strip)
3043 # 1) destroy nodes thus calling this method (e.g. strip)
3029 # 2) use phasecache somewhere (e.g. commit)
3044 # 2) use phasecache somewhere (e.g. commit)
3030 #
3045 #
3031 # then 2) will fail because the phasecache contains nodes that were
3046 # then 2) will fail because the phasecache contains nodes that were
3032 # removed. We can either remove phasecache from the filecache,
3047 # removed. We can either remove phasecache from the filecache,
3033 # causing it to reload next time it is accessed, or simply filter
3048 # causing it to reload next time it is accessed, or simply filter
3034 # the removed nodes now and write the updated cache.
3049 # the removed nodes now and write the updated cache.
3035 self._phasecache.filterunknown(self)
3050 self._phasecache.filterunknown(self)
3036 self._phasecache.write()
3051 self._phasecache.write()
3037
3052
3038 # refresh all repository caches
3053 # refresh all repository caches
3039 self.updatecaches()
3054 self.updatecaches()
3040
3055
3041 # Ensure the persistent tag cache is updated. Doing it now
3056 # Ensure the persistent tag cache is updated. Doing it now
3042 # means that the tag cache only has to worry about destroyed
3057 # means that the tag cache only has to worry about destroyed
3043 # heads immediately after a strip/rollback. That in turn
3058 # heads immediately after a strip/rollback. That in turn
3044 # guarantees that "cachetip == currenttip" (comparing both rev
3059 # guarantees that "cachetip == currenttip" (comparing both rev
3045 # and node) always means no nodes have been added or destroyed.
3060 # and node) always means no nodes have been added or destroyed.
3046
3061
3047 # XXX this is suboptimal when qrefresh'ing: we strip the current
3062 # XXX this is suboptimal when qrefresh'ing: we strip the current
3048 # head, refresh the tag cache, then immediately add a new head.
3063 # head, refresh the tag cache, then immediately add a new head.
3049 # But I think doing it this way is necessary for the "instant
3064 # But I think doing it this way is necessary for the "instant
3050 # tag cache retrieval" case to work.
3065 # tag cache retrieval" case to work.
3051 self.invalidate()
3066 self.invalidate()
3052
3067
3053 def status(
3068 def status(
3054 self,
3069 self,
3055 node1=b'.',
3070 node1=b'.',
3056 node2=None,
3071 node2=None,
3057 match=None,
3072 match=None,
3058 ignored=False,
3073 ignored=False,
3059 clean=False,
3074 clean=False,
3060 unknown=False,
3075 unknown=False,
3061 listsubrepos=False,
3076 listsubrepos=False,
3062 ):
3077 ):
3063 '''a convenience method that calls node1.status(node2)'''
3078 '''a convenience method that calls node1.status(node2)'''
3064 return self[node1].status(
3079 return self[node1].status(
3065 node2, match, ignored, clean, unknown, listsubrepos
3080 node2, match, ignored, clean, unknown, listsubrepos
3066 )
3081 )
3067
3082
3068 def addpostdsstatus(self, ps):
3083 def addpostdsstatus(self, ps):
3069 """Add a callback to run within the wlock, at the point at which status
3084 """Add a callback to run within the wlock, at the point at which status
3070 fixups happen.
3085 fixups happen.
3071
3086
3072 On status completion, callback(wctx, status) will be called with the
3087 On status completion, callback(wctx, status) will be called with the
3073 wlock held, unless the dirstate has changed from underneath or the wlock
3088 wlock held, unless the dirstate has changed from underneath or the wlock
3074 couldn't be grabbed.
3089 couldn't be grabbed.
3075
3090
3076 Callbacks should not capture and use a cached copy of the dirstate --
3091 Callbacks should not capture and use a cached copy of the dirstate --
3077 it might change in the meanwhile. Instead, they should access the
3092 it might change in the meanwhile. Instead, they should access the
3078 dirstate via wctx.repo().dirstate.
3093 dirstate via wctx.repo().dirstate.
3079
3094
3080 This list is emptied out after each status run -- extensions should
3095 This list is emptied out after each status run -- extensions should
3081 make sure it adds to this list each time dirstate.status is called.
3096 make sure it adds to this list each time dirstate.status is called.
3082 Extensions should also make sure they don't call this for statuses
3097 Extensions should also make sure they don't call this for statuses
3083 that don't involve the dirstate.
3098 that don't involve the dirstate.
3084 """
3099 """
3085
3100
3086 # The list is located here for uniqueness reasons -- it is actually
3101 # The list is located here for uniqueness reasons -- it is actually
3087 # managed by the workingctx, but that isn't unique per-repo.
3102 # managed by the workingctx, but that isn't unique per-repo.
3088 self._postdsstatus.append(ps)
3103 self._postdsstatus.append(ps)
3089
3104
3090 def postdsstatus(self):
3105 def postdsstatus(self):
3091 """Used by workingctx to get the list of post-dirstate-status hooks."""
3106 """Used by workingctx to get the list of post-dirstate-status hooks."""
3092 return self._postdsstatus
3107 return self._postdsstatus
3093
3108
3094 def clearpostdsstatus(self):
3109 def clearpostdsstatus(self):
3095 """Used by workingctx to clear post-dirstate-status hooks."""
3110 """Used by workingctx to clear post-dirstate-status hooks."""
3096 del self._postdsstatus[:]
3111 del self._postdsstatus[:]
3097
3112
3098 def heads(self, start=None):
3113 def heads(self, start=None):
3099 if start is None:
3114 if start is None:
3100 cl = self.changelog
3115 cl = self.changelog
3101 headrevs = reversed(cl.headrevs())
3116 headrevs = reversed(cl.headrevs())
3102 return [cl.node(rev) for rev in headrevs]
3117 return [cl.node(rev) for rev in headrevs]
3103
3118
3104 heads = self.changelog.heads(start)
3119 heads = self.changelog.heads(start)
3105 # sort the output in rev descending order
3120 # sort the output in rev descending order
3106 return sorted(heads, key=self.changelog.rev, reverse=True)
3121 return sorted(heads, key=self.changelog.rev, reverse=True)
3107
3122
3108 def branchheads(self, branch=None, start=None, closed=False):
3123 def branchheads(self, branch=None, start=None, closed=False):
3109 """return a (possibly filtered) list of heads for the given branch
3124 """return a (possibly filtered) list of heads for the given branch
3110
3125
3111 Heads are returned in topological order, from newest to oldest.
3126 Heads are returned in topological order, from newest to oldest.
3112 If branch is None, use the dirstate branch.
3127 If branch is None, use the dirstate branch.
3113 If start is not None, return only heads reachable from start.
3128 If start is not None, return only heads reachable from start.
3114 If closed is True, return heads that are marked as closed as well.
3129 If closed is True, return heads that are marked as closed as well.
3115 """
3130 """
3116 if branch is None:
3131 if branch is None:
3117 branch = self[None].branch()
3132 branch = self[None].branch()
3118 branches = self.branchmap()
3133 branches = self.branchmap()
3119 if not branches.hasbranch(branch):
3134 if not branches.hasbranch(branch):
3120 return []
3135 return []
3121 # the cache returns heads ordered lowest to highest
3136 # the cache returns heads ordered lowest to highest
3122 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3137 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3123 if start is not None:
3138 if start is not None:
3124 # filter out the heads that cannot be reached from startrev
3139 # filter out the heads that cannot be reached from startrev
3125 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3140 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3126 bheads = [h for h in bheads if h in fbheads]
3141 bheads = [h for h in bheads if h in fbheads]
3127 return bheads
3142 return bheads
3128
3143
3129 def branches(self, nodes):
3144 def branches(self, nodes):
3130 if not nodes:
3145 if not nodes:
3131 nodes = [self.changelog.tip()]
3146 nodes = [self.changelog.tip()]
3132 b = []
3147 b = []
3133 for n in nodes:
3148 for n in nodes:
3134 t = n
3149 t = n
3135 while True:
3150 while True:
3136 p = self.changelog.parents(n)
3151 p = self.changelog.parents(n)
3137 if p[1] != nullid or p[0] == nullid:
3152 if p[1] != nullid or p[0] == nullid:
3138 b.append((t, n, p[0], p[1]))
3153 b.append((t, n, p[0], p[1]))
3139 break
3154 break
3140 n = p[0]
3155 n = p[0]
3141 return b
3156 return b
3142
3157
3143 def between(self, pairs):
3158 def between(self, pairs):
3144 r = []
3159 r = []
3145
3160
3146 for top, bottom in pairs:
3161 for top, bottom in pairs:
3147 n, l, i = top, [], 0
3162 n, l, i = top, [], 0
3148 f = 1
3163 f = 1
3149
3164
3150 while n != bottom and n != nullid:
3165 while n != bottom and n != nullid:
3151 p = self.changelog.parents(n)[0]
3166 p = self.changelog.parents(n)[0]
3152 if i == f:
3167 if i == f:
3153 l.append(n)
3168 l.append(n)
3154 f = f * 2
3169 f = f * 2
3155 n = p
3170 n = p
3156 i += 1
3171 i += 1
3157
3172
3158 r.append(l)
3173 r.append(l)
3159
3174
3160 return r
3175 return r
3161
3176
3162 def checkpush(self, pushop):
3177 def checkpush(self, pushop):
3163 """Extensions can override this function if additional checks have
3178 """Extensions can override this function if additional checks have
3164 to be performed before pushing, or call it if they override push
3179 to be performed before pushing, or call it if they override push
3165 command.
3180 command.
3166 """
3181 """
3167
3182
3168 @unfilteredpropertycache
3183 @unfilteredpropertycache
3169 def prepushoutgoinghooks(self):
3184 def prepushoutgoinghooks(self):
3170 """Return util.hooks consists of a pushop with repo, remote, outgoing
3185 """Return util.hooks consists of a pushop with repo, remote, outgoing
3171 methods, which are called before pushing changesets.
3186 methods, which are called before pushing changesets.
3172 """
3187 """
3173 return util.hooks()
3188 return util.hooks()
3174
3189
3175 def pushkey(self, namespace, key, old, new):
3190 def pushkey(self, namespace, key, old, new):
3176 try:
3191 try:
3177 tr = self.currenttransaction()
3192 tr = self.currenttransaction()
3178 hookargs = {}
3193 hookargs = {}
3179 if tr is not None:
3194 if tr is not None:
3180 hookargs.update(tr.hookargs)
3195 hookargs.update(tr.hookargs)
3181 hookargs = pycompat.strkwargs(hookargs)
3196 hookargs = pycompat.strkwargs(hookargs)
3182 hookargs['namespace'] = namespace
3197 hookargs['namespace'] = namespace
3183 hookargs['key'] = key
3198 hookargs['key'] = key
3184 hookargs['old'] = old
3199 hookargs['old'] = old
3185 hookargs['new'] = new
3200 hookargs['new'] = new
3186 self.hook(b'prepushkey', throw=True, **hookargs)
3201 self.hook(b'prepushkey', throw=True, **hookargs)
3187 except error.HookAbort as exc:
3202 except error.HookAbort as exc:
3188 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3203 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3189 if exc.hint:
3204 if exc.hint:
3190 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3205 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3191 return False
3206 return False
3192 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3207 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3193 ret = pushkey.push(self, namespace, key, old, new)
3208 ret = pushkey.push(self, namespace, key, old, new)
3194
3209
3195 def runhook(unused_success):
3210 def runhook(unused_success):
3196 self.hook(
3211 self.hook(
3197 b'pushkey',
3212 b'pushkey',
3198 namespace=namespace,
3213 namespace=namespace,
3199 key=key,
3214 key=key,
3200 old=old,
3215 old=old,
3201 new=new,
3216 new=new,
3202 ret=ret,
3217 ret=ret,
3203 )
3218 )
3204
3219
3205 self._afterlock(runhook)
3220 self._afterlock(runhook)
3206 return ret
3221 return ret
3207
3222
3208 def listkeys(self, namespace):
3223 def listkeys(self, namespace):
3209 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3224 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3210 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3225 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3211 values = pushkey.list(self, namespace)
3226 values = pushkey.list(self, namespace)
3212 self.hook(b'listkeys', namespace=namespace, values=values)
3227 self.hook(b'listkeys', namespace=namespace, values=values)
3213 return values
3228 return values
3214
3229
3215 def debugwireargs(self, one, two, three=None, four=None, five=None):
3230 def debugwireargs(self, one, two, three=None, four=None, five=None):
3216 '''used to test argument passing over the wire'''
3231 '''used to test argument passing over the wire'''
3217 return b"%s %s %s %s %s" % (
3232 return b"%s %s %s %s %s" % (
3218 one,
3233 one,
3219 two,
3234 two,
3220 pycompat.bytestr(three),
3235 pycompat.bytestr(three),
3221 pycompat.bytestr(four),
3236 pycompat.bytestr(four),
3222 pycompat.bytestr(five),
3237 pycompat.bytestr(five),
3223 )
3238 )
3224
3239
3225 def savecommitmessage(self, text):
3240 def savecommitmessage(self, text):
3226 fp = self.vfs(b'last-message.txt', b'wb')
3241 fp = self.vfs(b'last-message.txt', b'wb')
3227 try:
3242 try:
3228 fp.write(text)
3243 fp.write(text)
3229 finally:
3244 finally:
3230 fp.close()
3245 fp.close()
3231 return self.pathto(fp.name[len(self.root) + 1 :])
3246 return self.pathto(fp.name[len(self.root) + 1 :])
3232
3247
3233
3248
3234 # used to avoid circular references so destructors work
3249 # used to avoid circular references so destructors work
3235 def aftertrans(files):
3250 def aftertrans(files):
3236 renamefiles = [tuple(t) for t in files]
3251 renamefiles = [tuple(t) for t in files]
3237
3252
3238 def a():
3253 def a():
3239 for vfs, src, dest in renamefiles:
3254 for vfs, src, dest in renamefiles:
3240 # if src and dest refer to a same file, vfs.rename is a no-op,
3255 # if src and dest refer to a same file, vfs.rename is a no-op,
3241 # leaving both src and dest on disk. delete dest to make sure
3256 # leaving both src and dest on disk. delete dest to make sure
3242 # the rename couldn't be such a no-op.
3257 # the rename couldn't be such a no-op.
3243 vfs.tryunlink(dest)
3258 vfs.tryunlink(dest)
3244 try:
3259 try:
3245 vfs.rename(src, dest)
3260 vfs.rename(src, dest)
3246 except OSError: # journal file does not yet exist
3261 except OSError: # journal file does not yet exist
3247 pass
3262 pass
3248
3263
3249 return a
3264 return a
3250
3265
3251
3266
3252 def undoname(fn):
3267 def undoname(fn):
3253 base, name = os.path.split(fn)
3268 base, name = os.path.split(fn)
3254 assert name.startswith(b'journal')
3269 assert name.startswith(b'journal')
3255 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3270 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3256
3271
3257
3272
3258 def instance(ui, path, create, intents=None, createopts=None):
3273 def instance(ui, path, create, intents=None, createopts=None):
3259 localpath = util.urllocalpath(path)
3274 localpath = util.urllocalpath(path)
3260 if create:
3275 if create:
3261 createrepository(ui, localpath, createopts=createopts)
3276 createrepository(ui, localpath, createopts=createopts)
3262
3277
3263 return makelocalrepository(ui, localpath, intents=intents)
3278 return makelocalrepository(ui, localpath, intents=intents)
3264
3279
3265
3280
3266 def islocal(path):
3281 def islocal(path):
3267 return True
3282 return True
3268
3283
3269
3284
3270 def defaultcreateopts(ui, createopts=None):
3285 def defaultcreateopts(ui, createopts=None):
3271 """Populate the default creation options for a repository.
3286 """Populate the default creation options for a repository.
3272
3287
3273 A dictionary of explicitly requested creation options can be passed
3288 A dictionary of explicitly requested creation options can be passed
3274 in. Missing keys will be populated.
3289 in. Missing keys will be populated.
3275 """
3290 """
3276 createopts = dict(createopts or {})
3291 createopts = dict(createopts or {})
3277
3292
3278 if b'backend' not in createopts:
3293 if b'backend' not in createopts:
3279 # experimental config: storage.new-repo-backend
3294 # experimental config: storage.new-repo-backend
3280 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3295 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3281
3296
3282 return createopts
3297 return createopts
3283
3298
3284
3299
3285 def newreporequirements(ui, createopts):
3300 def newreporequirements(ui, createopts):
3286 """Determine the set of requirements for a new local repository.
3301 """Determine the set of requirements for a new local repository.
3287
3302
3288 Extensions can wrap this function to specify custom requirements for
3303 Extensions can wrap this function to specify custom requirements for
3289 new repositories.
3304 new repositories.
3290 """
3305 """
3291 # If the repo is being created from a shared repository, we copy
3306 # If the repo is being created from a shared repository, we copy
3292 # its requirements.
3307 # its requirements.
3293 if b'sharedrepo' in createopts:
3308 if b'sharedrepo' in createopts:
3294 requirements = set(createopts[b'sharedrepo'].requirements)
3309 requirements = set(createopts[b'sharedrepo'].requirements)
3295 if createopts.get(b'sharedrelative'):
3310 if createopts.get(b'sharedrelative'):
3296 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3311 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3297 else:
3312 else:
3298 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3313 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3299
3314
3300 return requirements
3315 return requirements
3301
3316
3302 if b'backend' not in createopts:
3317 if b'backend' not in createopts:
3303 raise error.ProgrammingError(
3318 raise error.ProgrammingError(
3304 b'backend key not present in createopts; '
3319 b'backend key not present in createopts; '
3305 b'was defaultcreateopts() called?'
3320 b'was defaultcreateopts() called?'
3306 )
3321 )
3307
3322
3308 if createopts[b'backend'] != b'revlogv1':
3323 if createopts[b'backend'] != b'revlogv1':
3309 raise error.Abort(
3324 raise error.Abort(
3310 _(
3325 _(
3311 b'unable to determine repository requirements for '
3326 b'unable to determine repository requirements for '
3312 b'storage backend: %s'
3327 b'storage backend: %s'
3313 )
3328 )
3314 % createopts[b'backend']
3329 % createopts[b'backend']
3315 )
3330 )
3316
3331
3317 requirements = {b'revlogv1'}
3332 requirements = {b'revlogv1'}
3318 if ui.configbool(b'format', b'usestore'):
3333 if ui.configbool(b'format', b'usestore'):
3319 requirements.add(b'store')
3334 requirements.add(b'store')
3320 if ui.configbool(b'format', b'usefncache'):
3335 if ui.configbool(b'format', b'usefncache'):
3321 requirements.add(b'fncache')
3336 requirements.add(b'fncache')
3322 if ui.configbool(b'format', b'dotencode'):
3337 if ui.configbool(b'format', b'dotencode'):
3323 requirements.add(b'dotencode')
3338 requirements.add(b'dotencode')
3324
3339
3325 compengines = ui.configlist(b'format', b'revlog-compression')
3340 compengines = ui.configlist(b'format', b'revlog-compression')
3326 for compengine in compengines:
3341 for compengine in compengines:
3327 if compengine in util.compengines:
3342 if compengine in util.compengines:
3328 break
3343 break
3329 else:
3344 else:
3330 raise error.Abort(
3345 raise error.Abort(
3331 _(
3346 _(
3332 b'compression engines %s defined by '
3347 b'compression engines %s defined by '
3333 b'format.revlog-compression not available'
3348 b'format.revlog-compression not available'
3334 )
3349 )
3335 % b', '.join(b'"%s"' % e for e in compengines),
3350 % b', '.join(b'"%s"' % e for e in compengines),
3336 hint=_(
3351 hint=_(
3337 b'run "hg debuginstall" to list available '
3352 b'run "hg debuginstall" to list available '
3338 b'compression engines'
3353 b'compression engines'
3339 ),
3354 ),
3340 )
3355 )
3341
3356
3342 # zlib is the historical default and doesn't need an explicit requirement.
3357 # zlib is the historical default and doesn't need an explicit requirement.
3343 if compengine == b'zstd':
3358 if compengine == b'zstd':
3344 requirements.add(b'revlog-compression-zstd')
3359 requirements.add(b'revlog-compression-zstd')
3345 elif compengine != b'zlib':
3360 elif compengine != b'zlib':
3346 requirements.add(b'exp-compression-%s' % compengine)
3361 requirements.add(b'exp-compression-%s' % compengine)
3347
3362
3348 if scmutil.gdinitconfig(ui):
3363 if scmutil.gdinitconfig(ui):
3349 requirements.add(b'generaldelta')
3364 requirements.add(b'generaldelta')
3350 if ui.configbool(b'format', b'sparse-revlog'):
3365 if ui.configbool(b'format', b'sparse-revlog'):
3351 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3366 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3352
3367
3353 # experimental config: format.exp-use-side-data
3368 # experimental config: format.exp-use-side-data
3354 if ui.configbool(b'format', b'exp-use-side-data'):
3369 if ui.configbool(b'format', b'exp-use-side-data'):
3355 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3370 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3356 # experimental config: format.exp-use-copies-side-data-changeset
3371 # experimental config: format.exp-use-copies-side-data-changeset
3357 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3372 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3358 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3373 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3359 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3374 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3360 if ui.configbool(b'experimental', b'treemanifest'):
3375 if ui.configbool(b'experimental', b'treemanifest'):
3361 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3376 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3362
3377
3363 revlogv2 = ui.config(b'experimental', b'revlogv2')
3378 revlogv2 = ui.config(b'experimental', b'revlogv2')
3364 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3379 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3365 requirements.remove(b'revlogv1')
3380 requirements.remove(b'revlogv1')
3366 # generaldelta is implied by revlogv2.
3381 # generaldelta is implied by revlogv2.
3367 requirements.discard(b'generaldelta')
3382 requirements.discard(b'generaldelta')
3368 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3383 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3369 # experimental config: format.internal-phase
3384 # experimental config: format.internal-phase
3370 if ui.configbool(b'format', b'internal-phase'):
3385 if ui.configbool(b'format', b'internal-phase'):
3371 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3386 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3372
3387
3373 if createopts.get(b'narrowfiles'):
3388 if createopts.get(b'narrowfiles'):
3374 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3389 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3375
3390
3376 if createopts.get(b'lfs'):
3391 if createopts.get(b'lfs'):
3377 requirements.add(b'lfs')
3392 requirements.add(b'lfs')
3378
3393
3379 if ui.configbool(b'format', b'bookmarks-in-store'):
3394 if ui.configbool(b'format', b'bookmarks-in-store'):
3380 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3395 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3381
3396
3382 if ui.configbool(b'format', b'use-persistent-nodemap'):
3397 if ui.configbool(b'format', b'use-persistent-nodemap'):
3383 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3398 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3384
3399
3385 # if share-safe is enabled, let's create the new repository with the new
3400 # if share-safe is enabled, let's create the new repository with the new
3386 # requirement
3401 # requirement
3387 if ui.configbool(b'format', b'exp-share-safe'):
3402 if ui.configbool(b'format', b'exp-share-safe'):
3388 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3403 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3389
3404
3390 return requirements
3405 return requirements
3391
3406
3392
3407
3393 def checkrequirementscompat(ui, requirements):
3408 def checkrequirementscompat(ui, requirements):
3394 """Checks compatibility of repository requirements enabled and disabled.
3409 """Checks compatibility of repository requirements enabled and disabled.
3395
3410
3396 Returns a set of requirements which needs to be dropped because dependend
3411 Returns a set of requirements which needs to be dropped because dependend
3397 requirements are not enabled. Also warns users about it"""
3412 requirements are not enabled. Also warns users about it"""
3398
3413
3399 dropped = set()
3414 dropped = set()
3400
3415
3401 if b'store' not in requirements:
3416 if b'store' not in requirements:
3402 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3417 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3403 ui.warn(
3418 ui.warn(
3404 _(
3419 _(
3405 b'ignoring enabled \'format.bookmarks-in-store\' config '
3420 b'ignoring enabled \'format.bookmarks-in-store\' config '
3406 b'beacuse it is incompatible with disabled '
3421 b'beacuse it is incompatible with disabled '
3407 b'\'format.usestore\' config\n'
3422 b'\'format.usestore\' config\n'
3408 )
3423 )
3409 )
3424 )
3410 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3425 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3411
3426
3412 if (
3427 if (
3413 requirementsmod.SHARED_REQUIREMENT in requirements
3428 requirementsmod.SHARED_REQUIREMENT in requirements
3414 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3429 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3415 ):
3430 ):
3416 raise error.Abort(
3431 raise error.Abort(
3417 _(
3432 _(
3418 b"cannot create shared repository as source was created"
3433 b"cannot create shared repository as source was created"
3419 b" with 'format.usestore' config disabled"
3434 b" with 'format.usestore' config disabled"
3420 )
3435 )
3421 )
3436 )
3422
3437
3423 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3438 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3424 ui.warn(
3439 ui.warn(
3425 _(
3440 _(
3426 b"ignoring enabled 'format.exp-share-safe' config because "
3441 b"ignoring enabled 'format.exp-share-safe' config because "
3427 b"it is incompatible with disabled 'format.usestore'"
3442 b"it is incompatible with disabled 'format.usestore'"
3428 b" config\n"
3443 b" config\n"
3429 )
3444 )
3430 )
3445 )
3431 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3446 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3432
3447
3433 return dropped
3448 return dropped
3434
3449
3435
3450
3436 def filterknowncreateopts(ui, createopts):
3451 def filterknowncreateopts(ui, createopts):
3437 """Filters a dict of repo creation options against options that are known.
3452 """Filters a dict of repo creation options against options that are known.
3438
3453
3439 Receives a dict of repo creation options and returns a dict of those
3454 Receives a dict of repo creation options and returns a dict of those
3440 options that we don't know how to handle.
3455 options that we don't know how to handle.
3441
3456
3442 This function is called as part of repository creation. If the
3457 This function is called as part of repository creation. If the
3443 returned dict contains any items, repository creation will not
3458 returned dict contains any items, repository creation will not
3444 be allowed, as it means there was a request to create a repository
3459 be allowed, as it means there was a request to create a repository
3445 with options not recognized by loaded code.
3460 with options not recognized by loaded code.
3446
3461
3447 Extensions can wrap this function to filter out creation options
3462 Extensions can wrap this function to filter out creation options
3448 they know how to handle.
3463 they know how to handle.
3449 """
3464 """
3450 known = {
3465 known = {
3451 b'backend',
3466 b'backend',
3452 b'lfs',
3467 b'lfs',
3453 b'narrowfiles',
3468 b'narrowfiles',
3454 b'sharedrepo',
3469 b'sharedrepo',
3455 b'sharedrelative',
3470 b'sharedrelative',
3456 b'shareditems',
3471 b'shareditems',
3457 b'shallowfilestore',
3472 b'shallowfilestore',
3458 }
3473 }
3459
3474
3460 return {k: v for k, v in createopts.items() if k not in known}
3475 return {k: v for k, v in createopts.items() if k not in known}
3461
3476
3462
3477
3463 def createrepository(ui, path, createopts=None):
3478 def createrepository(ui, path, createopts=None):
3464 """Create a new repository in a vfs.
3479 """Create a new repository in a vfs.
3465
3480
3466 ``path`` path to the new repo's working directory.
3481 ``path`` path to the new repo's working directory.
3467 ``createopts`` options for the new repository.
3482 ``createopts`` options for the new repository.
3468
3483
3469 The following keys for ``createopts`` are recognized:
3484 The following keys for ``createopts`` are recognized:
3470
3485
3471 backend
3486 backend
3472 The storage backend to use.
3487 The storage backend to use.
3473 lfs
3488 lfs
3474 Repository will be created with ``lfs`` requirement. The lfs extension
3489 Repository will be created with ``lfs`` requirement. The lfs extension
3475 will automatically be loaded when the repository is accessed.
3490 will automatically be loaded when the repository is accessed.
3476 narrowfiles
3491 narrowfiles
3477 Set up repository to support narrow file storage.
3492 Set up repository to support narrow file storage.
3478 sharedrepo
3493 sharedrepo
3479 Repository object from which storage should be shared.
3494 Repository object from which storage should be shared.
3480 sharedrelative
3495 sharedrelative
3481 Boolean indicating if the path to the shared repo should be
3496 Boolean indicating if the path to the shared repo should be
3482 stored as relative. By default, the pointer to the "parent" repo
3497 stored as relative. By default, the pointer to the "parent" repo
3483 is stored as an absolute path.
3498 is stored as an absolute path.
3484 shareditems
3499 shareditems
3485 Set of items to share to the new repository (in addition to storage).
3500 Set of items to share to the new repository (in addition to storage).
3486 shallowfilestore
3501 shallowfilestore
3487 Indicates that storage for files should be shallow (not all ancestor
3502 Indicates that storage for files should be shallow (not all ancestor
3488 revisions are known).
3503 revisions are known).
3489 """
3504 """
3490 createopts = defaultcreateopts(ui, createopts=createopts)
3505 createopts = defaultcreateopts(ui, createopts=createopts)
3491
3506
3492 unknownopts = filterknowncreateopts(ui, createopts)
3507 unknownopts = filterknowncreateopts(ui, createopts)
3493
3508
3494 if not isinstance(unknownopts, dict):
3509 if not isinstance(unknownopts, dict):
3495 raise error.ProgrammingError(
3510 raise error.ProgrammingError(
3496 b'filterknowncreateopts() did not return a dict'
3511 b'filterknowncreateopts() did not return a dict'
3497 )
3512 )
3498
3513
3499 if unknownopts:
3514 if unknownopts:
3500 raise error.Abort(
3515 raise error.Abort(
3501 _(
3516 _(
3502 b'unable to create repository because of unknown '
3517 b'unable to create repository because of unknown '
3503 b'creation option: %s'
3518 b'creation option: %s'
3504 )
3519 )
3505 % b', '.join(sorted(unknownopts)),
3520 % b', '.join(sorted(unknownopts)),
3506 hint=_(b'is a required extension not loaded?'),
3521 hint=_(b'is a required extension not loaded?'),
3507 )
3522 )
3508
3523
3509 requirements = newreporequirements(ui, createopts=createopts)
3524 requirements = newreporequirements(ui, createopts=createopts)
3510 requirements -= checkrequirementscompat(ui, requirements)
3525 requirements -= checkrequirementscompat(ui, requirements)
3511
3526
3512 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3527 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3513
3528
3514 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3529 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3515 if hgvfs.exists():
3530 if hgvfs.exists():
3516 raise error.RepoError(_(b'repository %s already exists') % path)
3531 raise error.RepoError(_(b'repository %s already exists') % path)
3517
3532
3518 if b'sharedrepo' in createopts:
3533 if b'sharedrepo' in createopts:
3519 sharedpath = createopts[b'sharedrepo'].sharedpath
3534 sharedpath = createopts[b'sharedrepo'].sharedpath
3520
3535
3521 if createopts.get(b'sharedrelative'):
3536 if createopts.get(b'sharedrelative'):
3522 try:
3537 try:
3523 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3538 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3524 except (IOError, ValueError) as e:
3539 except (IOError, ValueError) as e:
3525 # ValueError is raised on Windows if the drive letters differ
3540 # ValueError is raised on Windows if the drive letters differ
3526 # on each path.
3541 # on each path.
3527 raise error.Abort(
3542 raise error.Abort(
3528 _(b'cannot calculate relative path'),
3543 _(b'cannot calculate relative path'),
3529 hint=stringutil.forcebytestr(e),
3544 hint=stringutil.forcebytestr(e),
3530 )
3545 )
3531
3546
3532 if not wdirvfs.exists():
3547 if not wdirvfs.exists():
3533 wdirvfs.makedirs()
3548 wdirvfs.makedirs()
3534
3549
3535 hgvfs.makedir(notindexed=True)
3550 hgvfs.makedir(notindexed=True)
3536 if b'sharedrepo' not in createopts:
3551 if b'sharedrepo' not in createopts:
3537 hgvfs.mkdir(b'cache')
3552 hgvfs.mkdir(b'cache')
3538 hgvfs.mkdir(b'wcache')
3553 hgvfs.mkdir(b'wcache')
3539
3554
3540 if b'store' in requirements and b'sharedrepo' not in createopts:
3555 if b'store' in requirements and b'sharedrepo' not in createopts:
3541 hgvfs.mkdir(b'store')
3556 hgvfs.mkdir(b'store')
3542
3557
3543 # We create an invalid changelog outside the store so very old
3558 # We create an invalid changelog outside the store so very old
3544 # Mercurial versions (which didn't know about the requirements
3559 # Mercurial versions (which didn't know about the requirements
3545 # file) encounter an error on reading the changelog. This
3560 # file) encounter an error on reading the changelog. This
3546 # effectively locks out old clients and prevents them from
3561 # effectively locks out old clients and prevents them from
3547 # mucking with a repo in an unknown format.
3562 # mucking with a repo in an unknown format.
3548 #
3563 #
3549 # The revlog header has version 2, which won't be recognized by
3564 # The revlog header has version 2, which won't be recognized by
3550 # such old clients.
3565 # such old clients.
3551 hgvfs.append(
3566 hgvfs.append(
3552 b'00changelog.i',
3567 b'00changelog.i',
3553 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3568 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3554 b'layout',
3569 b'layout',
3555 )
3570 )
3556
3571
3557 # Filter the requirements into working copy and store ones
3572 # Filter the requirements into working copy and store ones
3558 wcreq, storereq = scmutil.filterrequirements(requirements)
3573 wcreq, storereq = scmutil.filterrequirements(requirements)
3559 # write working copy ones
3574 # write working copy ones
3560 scmutil.writerequires(hgvfs, wcreq)
3575 scmutil.writerequires(hgvfs, wcreq)
3561 # If there are store requirements and the current repository
3576 # If there are store requirements and the current repository
3562 # is not a shared one, write stored requirements
3577 # is not a shared one, write stored requirements
3563 # For new shared repository, we don't need to write the store
3578 # For new shared repository, we don't need to write the store
3564 # requirements as they are already present in store requires
3579 # requirements as they are already present in store requires
3565 if storereq and b'sharedrepo' not in createopts:
3580 if storereq and b'sharedrepo' not in createopts:
3566 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3581 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3567 scmutil.writerequires(storevfs, storereq)
3582 scmutil.writerequires(storevfs, storereq)
3568
3583
3569 # Write out file telling readers where to find the shared store.
3584 # Write out file telling readers where to find the shared store.
3570 if b'sharedrepo' in createopts:
3585 if b'sharedrepo' in createopts:
3571 hgvfs.write(b'sharedpath', sharedpath)
3586 hgvfs.write(b'sharedpath', sharedpath)
3572
3587
3573 if createopts.get(b'shareditems'):
3588 if createopts.get(b'shareditems'):
3574 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3589 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3575 hgvfs.write(b'shared', shared)
3590 hgvfs.write(b'shared', shared)
3576
3591
3577
3592
3578 def poisonrepository(repo):
3593 def poisonrepository(repo):
3579 """Poison a repository instance so it can no longer be used."""
3594 """Poison a repository instance so it can no longer be used."""
3580 # Perform any cleanup on the instance.
3595 # Perform any cleanup on the instance.
3581 repo.close()
3596 repo.close()
3582
3597
3583 # Our strategy is to replace the type of the object with one that
3598 # Our strategy is to replace the type of the object with one that
3584 # has all attribute lookups result in error.
3599 # has all attribute lookups result in error.
3585 #
3600 #
3586 # But we have to allow the close() method because some constructors
3601 # But we have to allow the close() method because some constructors
3587 # of repos call close() on repo references.
3602 # of repos call close() on repo references.
3588 class poisonedrepository(object):
3603 class poisonedrepository(object):
3589 def __getattribute__(self, item):
3604 def __getattribute__(self, item):
3590 if item == 'close':
3605 if item == 'close':
3591 return object.__getattribute__(self, item)
3606 return object.__getattribute__(self, item)
3592
3607
3593 raise error.ProgrammingError(
3608 raise error.ProgrammingError(
3594 b'repo instances should not be used after unshare'
3609 b'repo instances should not be used after unshare'
3595 )
3610 )
3596
3611
3597 def close(self):
3612 def close(self):
3598 pass
3613 pass
3599
3614
3600 # We may have a repoview, which intercepts __setattr__. So be sure
3615 # We may have a repoview, which intercepts __setattr__. So be sure
3601 # we operate at the lowest level possible.
3616 # we operate at the lowest level possible.
3602 object.__setattr__(repo, '__class__', poisonedrepository)
3617 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,266 +1,297 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 hg,
13 hg,
14 localrepo,
14 localrepo,
15 lock as lockmod,
15 lock as lockmod,
16 pycompat,
16 pycompat,
17 requirements as requirementsmod,
17 requirements as requirementsmod,
18 scmutil,
18 scmutil,
19 )
19 )
20
20
21 from .upgrade_utils import (
21 from .upgrade_utils import (
22 actions as upgrade_actions,
22 actions as upgrade_actions,
23 engine as upgrade_engine,
23 engine as upgrade_engine,
24 )
24 )
25
25
26 from .utils import (
26 from .utils import (
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 allformatvariant = upgrade_actions.allformatvariant
30 allformatvariant = upgrade_actions.allformatvariant
31
31
32
32
33 def upgraderepo(
33 def upgraderepo(
34 ui,
34 ui,
35 repo,
35 repo,
36 run=False,
36 run=False,
37 optimize=None,
37 optimize=None,
38 backup=True,
38 backup=True,
39 manifest=None,
39 manifest=None,
40 changelog=None,
40 changelog=None,
41 filelogs=None,
41 filelogs=None,
42 ):
42 ):
43 """Upgrade a repository in place."""
43 """Upgrade a repository in place."""
44 if optimize is None:
44 if optimize is None:
45 optimize = {}
45 optimize = {}
46 repo = repo.unfiltered()
46 repo = repo.unfiltered()
47
47
48 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
48 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
49 specentries = (
49 specentries = (
50 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
50 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
51 (upgrade_engine.UPGRADE_MANIFEST, manifest),
51 (upgrade_engine.UPGRADE_MANIFEST, manifest),
52 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
52 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
53 )
53 )
54 specified = [(y, x) for (y, x) in specentries if x is not None]
54 specified = [(y, x) for (y, x) in specentries if x is not None]
55 if specified:
55 if specified:
56 # we have some limitation on revlogs to be recloned
56 # we have some limitation on revlogs to be recloned
57 if any(x for y, x in specified):
57 if any(x for y, x in specified):
58 revlogs = set()
58 revlogs = set()
59 for upgrade, enabled in specified:
59 for upgrade, enabled in specified:
60 if enabled:
60 if enabled:
61 revlogs.add(upgrade)
61 revlogs.add(upgrade)
62 else:
62 else:
63 # none are enabled
63 # none are enabled
64 for upgrade, __ in specified:
64 for upgrade, __ in specified:
65 revlogs.discard(upgrade)
65 revlogs.discard(upgrade)
66
66
67 # Ensure the repository can be upgraded.
67 # Ensure the repository can be upgraded.
68 upgrade_actions.check_source_requirements(repo)
68 upgrade_actions.check_source_requirements(repo)
69
69
70 default_options = localrepo.defaultcreateopts(repo.ui)
70 default_options = localrepo.defaultcreateopts(repo.ui)
71 newreqs = localrepo.newreporequirements(repo.ui, default_options)
71 newreqs = localrepo.newreporequirements(repo.ui, default_options)
72 newreqs.update(upgrade_actions.preservedrequirements(repo))
72 newreqs.update(upgrade_actions.preservedrequirements(repo))
73
73
74 upgrade_actions.check_requirements_changes(repo, newreqs)
74 upgrade_actions.check_requirements_changes(repo, newreqs)
75
75
76 # Find and validate all improvements that can be made.
76 # Find and validate all improvements that can be made.
77 alloptimizations = upgrade_actions.findoptimizations(repo)
77 alloptimizations = upgrade_actions.findoptimizations(repo)
78
78
79 # Apply and Validate arguments.
79 # Apply and Validate arguments.
80 optimizations = []
80 optimizations = []
81 for o in alloptimizations:
81 for o in alloptimizations:
82 if o.name in optimize:
82 if o.name in optimize:
83 optimizations.append(o)
83 optimizations.append(o)
84 optimize.discard(o.name)
84 optimize.discard(o.name)
85
85
86 if optimize: # anything left is unknown
86 if optimize: # anything left is unknown
87 raise error.Abort(
87 raise error.Abort(
88 _(b'unknown optimization action requested: %s')
88 _(b'unknown optimization action requested: %s')
89 % b', '.join(sorted(optimize)),
89 % b', '.join(sorted(optimize)),
90 hint=_(b'run without arguments to see valid optimizations'),
90 hint=_(b'run without arguments to see valid optimizations'),
91 )
91 )
92
92
93 format_upgrades = upgrade_actions.find_format_upgrades(repo)
93 format_upgrades = upgrade_actions.find_format_upgrades(repo)
94 up_actions = upgrade_actions.determine_upgrade_actions(
94 up_actions = upgrade_actions.determine_upgrade_actions(
95 repo, format_upgrades, optimizations, repo.requirements, newreqs
95 repo, format_upgrades, optimizations, repo.requirements, newreqs
96 )
96 )
97 removed_actions = upgrade_actions.find_format_downgrades(repo)
97 removed_actions = upgrade_actions.find_format_downgrades(repo)
98
98
99 removedreqs = repo.requirements - newreqs
99 removedreqs = repo.requirements - newreqs
100 addedreqs = newreqs - repo.requirements
100 addedreqs = newreqs - repo.requirements
101
101
102 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
102 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
103 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
103 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
104 removedreqs | addedreqs
104 removedreqs | addedreqs
105 )
105 )
106 if incompatible:
106 if incompatible:
107 msg = _(
107 msg = _(
108 b'ignoring revlogs selection flags, format requirements '
108 b'ignoring revlogs selection flags, format requirements '
109 b'change: %s\n'
109 b'change: %s\n'
110 )
110 )
111 ui.warn(msg % b', '.join(sorted(incompatible)))
111 ui.warn(msg % b', '.join(sorted(incompatible)))
112 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
112 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
113
113
114 upgrade_op = upgrade_actions.UpgradeOperation(
114 upgrade_op = upgrade_actions.UpgradeOperation(
115 ui,
115 ui,
116 newreqs,
116 newreqs,
117 repo.requirements,
117 repo.requirements,
118 up_actions,
118 up_actions,
119 removed_actions,
119 removed_actions,
120 revlogs,
120 revlogs,
121 )
121 )
122
122
123 if not run:
123 if not run:
124 fromconfig = []
124 fromconfig = []
125 onlydefault = []
125 onlydefault = []
126
126
127 for d in format_upgrades:
127 for d in format_upgrades:
128 if d.fromconfig(repo):
128 if d.fromconfig(repo):
129 fromconfig.append(d)
129 fromconfig.append(d)
130 elif d.default:
130 elif d.default:
131 onlydefault.append(d)
131 onlydefault.append(d)
132
132
133 if fromconfig or onlydefault:
133 if fromconfig or onlydefault:
134
134
135 if fromconfig:
135 if fromconfig:
136 ui.status(
136 ui.status(
137 _(
137 _(
138 b'repository lacks features recommended by '
138 b'repository lacks features recommended by '
139 b'current config options:\n\n'
139 b'current config options:\n\n'
140 )
140 )
141 )
141 )
142 for i in fromconfig:
142 for i in fromconfig:
143 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
143 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
144
144
145 if onlydefault:
145 if onlydefault:
146 ui.status(
146 ui.status(
147 _(
147 _(
148 b'repository lacks features used by the default '
148 b'repository lacks features used by the default '
149 b'config options:\n\n'
149 b'config options:\n\n'
150 )
150 )
151 )
151 )
152 for i in onlydefault:
152 for i in onlydefault:
153 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
153 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
154
154
155 ui.status(b'\n')
155 ui.status(b'\n')
156 else:
156 else:
157 ui.status(_(b'(no format upgrades found in existing repository)\n'))
157 ui.status(_(b'(no format upgrades found in existing repository)\n'))
158
158
159 ui.status(
159 ui.status(
160 _(
160 _(
161 b'performing an upgrade with "--run" will make the following '
161 b'performing an upgrade with "--run" will make the following '
162 b'changes:\n\n'
162 b'changes:\n\n'
163 )
163 )
164 )
164 )
165
165
166 upgrade_op.print_requirements()
166 upgrade_op.print_requirements()
167 upgrade_op.print_optimisations()
167 upgrade_op.print_optimisations()
168 upgrade_op.print_upgrade_actions()
168 upgrade_op.print_upgrade_actions()
169 upgrade_op.print_affected_revlogs()
169 upgrade_op.print_affected_revlogs()
170
170
171 if upgrade_op.unused_optimizations:
171 if upgrade_op.unused_optimizations:
172 ui.status(
172 ui.status(
173 _(
173 _(
174 b'additional optimizations are available by specifying '
174 b'additional optimizations are available by specifying '
175 b'"--optimize <name>":\n\n'
175 b'"--optimize <name>":\n\n'
176 )
176 )
177 )
177 )
178 upgrade_op.print_unused_optimizations()
178 upgrade_op.print_unused_optimizations()
179 return
179 return
180
180
181 if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
181 if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
182 ui.status(_(b'nothing to do\n'))
182 ui.status(_(b'nothing to do\n'))
183 return
183 return
184 # Else we're in the run=true case.
184 # Else we're in the run=true case.
185 ui.write(_(b'upgrade will perform the following actions:\n\n'))
185 ui.write(_(b'upgrade will perform the following actions:\n\n'))
186 upgrade_op.print_requirements()
186 upgrade_op.print_requirements()
187 upgrade_op.print_optimisations()
187 upgrade_op.print_optimisations()
188 upgrade_op.print_upgrade_actions()
188 upgrade_op.print_upgrade_actions()
189 upgrade_op.print_affected_revlogs()
189 upgrade_op.print_affected_revlogs()
190
190
191 ui.status(_(b'beginning upgrade...\n'))
191 ui.status(_(b'beginning upgrade...\n'))
192 with repo.wlock(), repo.lock():
192 with repo.wlock(), repo.lock():
193 ui.status(_(b'repository locked and read-only\n'))
193 ui.status(_(b'repository locked and read-only\n'))
194 # Our strategy for upgrading the repository is to create a new,
194 # Our strategy for upgrading the repository is to create a new,
195 # temporary repository, write data to it, then do a swap of the
195 # temporary repository, write data to it, then do a swap of the
196 # data. There are less heavyweight ways to do this, but it is easier
196 # data. There are less heavyweight ways to do this, but it is easier
197 # to create a new repo object than to instantiate all the components
197 # to create a new repo object than to instantiate all the components
198 # (like the store) separately.
198 # (like the store) separately.
199 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
199 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
200 backuppath = None
200 backuppath = None
201 try:
201 try:
202 ui.status(
202 ui.status(
203 _(
203 _(
204 b'creating temporary repository to stage upgraded '
204 b'creating temporary repository to stage upgraded '
205 b'data: %s\n'
205 b'data: %s\n'
206 )
206 )
207 % tmppath
207 % tmppath
208 )
208 )
209
209
210 # clone ui without using ui.copy because repo.ui is protected
210 # clone ui without using ui.copy because repo.ui is protected
211 repoui = repo.ui.__class__(repo.ui)
211 repoui = repo.ui.__class__(repo.ui)
212 dstrepo = hg.repository(repoui, path=tmppath, create=True)
212 dstrepo = hg.repository(repoui, path=tmppath, create=True)
213
213
214 with dstrepo.wlock(), dstrepo.lock():
214 with dstrepo.wlock(), dstrepo.lock():
215 backuppath = upgrade_engine.upgrade(
215 backuppath = upgrade_engine.upgrade(
216 ui, repo, dstrepo, upgrade_op
216 ui, repo, dstrepo, upgrade_op
217 )
217 )
218 if not backup:
218 if not backup:
219 ui.status(
219 ui.status(
220 _(b'removing old repository content %s\n') % backuppath
220 _(b'removing old repository content %s\n') % backuppath
221 )
221 )
222 repo.vfs.rmtree(backuppath, forcibly=True)
222 repo.vfs.rmtree(backuppath, forcibly=True)
223 backuppath = None
223 backuppath = None
224
224
225 finally:
225 finally:
226 ui.status(_(b'removing temporary repository %s\n') % tmppath)
226 ui.status(_(b'removing temporary repository %s\n') % tmppath)
227 repo.vfs.rmtree(tmppath, forcibly=True)
227 repo.vfs.rmtree(tmppath, forcibly=True)
228
228
229 if backuppath and not ui.quiet:
229 if backuppath and not ui.quiet:
230 ui.warn(
230 ui.warn(
231 _(b'copy of old repository backed up at %s\n') % backuppath
231 _(b'copy of old repository backed up at %s\n') % backuppath
232 )
232 )
233 ui.warn(
233 ui.warn(
234 _(
234 _(
235 b'the old repository will not be deleted; remove '
235 b'the old repository will not be deleted; remove '
236 b'it to free up disk space once the upgraded '
236 b'it to free up disk space once the upgraded '
237 b'repository is verified\n'
237 b'repository is verified\n'
238 )
238 )
239 )
239 )
240
240
241 upgrade_op.print_post_op_messages()
241 upgrade_op.print_post_op_messages()
242
242
243
243
244 def upgrade_share_to_safe(ui, hgvfs, storevfs, current_requirements):
244 def upgrade_share_to_safe(ui, hgvfs, storevfs, current_requirements):
245 """Upgrades a share to use share-safe mechanism"""
245 """Upgrades a share to use share-safe mechanism"""
246 wlock = None
246 wlock = None
247 try:
247 try:
248 wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
248 wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
249 store_requirements = localrepo._readrequires(storevfs, False)
249 store_requirements = localrepo._readrequires(storevfs, False)
250 # after upgrade, store requires will be shared, so lets find
250 # after upgrade, store requires will be shared, so lets find
251 # the requirements which are not present in store and
251 # the requirements which are not present in store and
252 # write them to share's .hg/requires
252 # write them to share's .hg/requires
253 diffrequires = current_requirements - store_requirements
253 diffrequires = current_requirements - store_requirements
254 # add share-safe requirement as it will mark the share as share-safe
254 # add share-safe requirement as it will mark the share as share-safe
255 diffrequires.add(requirementsmod.SHARESAFE_REQUIREMENT)
255 diffrequires.add(requirementsmod.SHARESAFE_REQUIREMENT)
256 scmutil.writerequires(hgvfs, diffrequires)
256 scmutil.writerequires(hgvfs, diffrequires)
257 current_requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
257 current_requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
258 ui.warn(_(b'repository upgraded to use share-safe mode\n'))
258 ui.warn(_(b'repository upgraded to use share-safe mode\n'))
259 except error.LockError as e:
259 except error.LockError as e:
260 ui.warn(
260 ui.warn(
261 _(b'failed to upgrade share, got error: %s\n')
261 _(b'failed to upgrade share, got error: %s\n')
262 % stringutil.forcebytestr(e.strerror)
262 % stringutil.forcebytestr(e.strerror)
263 )
263 )
264 finally:
264 finally:
265 if wlock:
265 if wlock:
266 wlock.release()
266 wlock.release()
267
268
269 def downgrade_share_to_non_safe(
270 ui,
271 hgvfs,
272 sharedvfs,
273 current_requirements,
274 ):
275 """Downgrades a share which use share-safe to not use it"""
276 wlock = None
277 try:
278 wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
279 source_requirements = localrepo._readrequires(sharedvfs, True)
280 # we cannot be 100% sure on which requirements were present in store when
281 # the source supported share-safe. However, we do know that working
282 # directory requirements were not there. Hence we remove them
283 source_requirements -= requirementsmod.WORKING_DIR_REQUIREMENTS
284 current_requirements |= source_requirements
285 current_requirements.remove(requirementsmod.SHARESAFE_REQUIREMENT)
286 scmutil.writerequires(hgvfs, current_requirements)
287 ui.warn(_(b'repository downgraded to not use share-safe mode\n'))
288 except error.LockError as e:
289 # raise error right away because if downgrade failed, we cannot load
290 # the repository because it does not have complete set of requirements
291 raise error.Abort(
292 _(b'failed to downgrade share, got error: %s')
293 % stringutil.forcebytestr(e.strerror)
294 )
295 finally:
296 if wlock:
297 wlock.release()
@@ -1,541 +1,562 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > exp-share-safe = True
7 > exp-share-safe = True
8 > EOF
8 > EOF
9
9
10 prepare source repo
10 prepare source repo
11
11
12 $ hg init source
12 $ hg init source
13 $ cd source
13 $ cd source
14 $ cat .hg/requires
14 $ cat .hg/requires
15 exp-sharesafe
15 exp-sharesafe
16 $ cat .hg/store/requires
16 $ cat .hg/store/requires
17 dotencode
17 dotencode
18 fncache
18 fncache
19 generaldelta
19 generaldelta
20 revlogv1
20 revlogv1
21 sparserevlog
21 sparserevlog
22 store
22 store
23 $ hg debugrequirements
23 $ hg debugrequirements
24 dotencode
24 dotencode
25 exp-sharesafe
25 exp-sharesafe
26 fncache
26 fncache
27 generaldelta
27 generaldelta
28 revlogv1
28 revlogv1
29 sparserevlog
29 sparserevlog
30 store
30 store
31
31
32 $ echo a > a
32 $ echo a > a
33 $ hg ci -Aqm "added a"
33 $ hg ci -Aqm "added a"
34 $ echo b > b
34 $ echo b > b
35 $ hg ci -Aqm "added b"
35 $ hg ci -Aqm "added b"
36
36
37 $ HGEDITOR=cat hg config --shared
37 $ HGEDITOR=cat hg config --shared
38 abort: repository is not shared; can't use --shared
38 abort: repository is not shared; can't use --shared
39 [10]
39 [10]
40 $ cd ..
40 $ cd ..
41
41
42 Create a shared repo and check the requirements are shared and read correctly
42 Create a shared repo and check the requirements are shared and read correctly
43 $ hg share source shared1
43 $ hg share source shared1
44 updating working directory
44 updating working directory
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 $ cd shared1
46 $ cd shared1
47 $ cat .hg/requires
47 $ cat .hg/requires
48 exp-sharesafe
48 exp-sharesafe
49 shared
49 shared
50
50
51 $ hg debugrequirements -R ../source
51 $ hg debugrequirements -R ../source
52 dotencode
52 dotencode
53 exp-sharesafe
53 exp-sharesafe
54 fncache
54 fncache
55 generaldelta
55 generaldelta
56 revlogv1
56 revlogv1
57 sparserevlog
57 sparserevlog
58 store
58 store
59
59
60 $ hg debugrequirements
60 $ hg debugrequirements
61 dotencode
61 dotencode
62 exp-sharesafe
62 exp-sharesafe
63 fncache
63 fncache
64 generaldelta
64 generaldelta
65 revlogv1
65 revlogv1
66 shared
66 shared
67 sparserevlog
67 sparserevlog
68 store
68 store
69
69
70 $ echo c > c
70 $ echo c > c
71 $ hg ci -Aqm "added c"
71 $ hg ci -Aqm "added c"
72
72
73 Check that config of the source repository is also loaded
73 Check that config of the source repository is also loaded
74
74
75 $ hg showconfig ui.curses
75 $ hg showconfig ui.curses
76 [1]
76 [1]
77
77
78 $ echo "[ui]" >> ../source/.hg/hgrc
78 $ echo "[ui]" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
80
80
81 $ hg showconfig ui.curses
81 $ hg showconfig ui.curses
82 true
82 true
83
83
84 Test that extensions of source repository are also loaded
84 Test that extensions of source repository are also loaded
85
85
86 $ hg debugextensions
86 $ hg debugextensions
87 share
87 share
88 $ hg extdiff -p echo
88 $ hg extdiff -p echo
89 hg: unknown command 'extdiff'
89 hg: unknown command 'extdiff'
90 'extdiff' is provided by the following extension:
90 'extdiff' is provided by the following extension:
91
91
92 extdiff command to allow external programs to compare revisions
92 extdiff command to allow external programs to compare revisions
93
93
94 (use 'hg help extensions' for information on enabling extensions)
94 (use 'hg help extensions' for information on enabling extensions)
95 [255]
95 [255]
96
96
97 $ echo "[extensions]" >> ../source/.hg/hgrc
97 $ echo "[extensions]" >> ../source/.hg/hgrc
98 $ echo "extdiff=" >> ../source/.hg/hgrc
98 $ echo "extdiff=" >> ../source/.hg/hgrc
99
99
100 $ hg debugextensions -R ../source
100 $ hg debugextensions -R ../source
101 extdiff
101 extdiff
102 share
102 share
103 $ hg extdiff -R ../source -p echo
103 $ hg extdiff -R ../source -p echo
104
104
105 BROKEN: the command below will not work if config of shared source is not loaded
105 BROKEN: the command below will not work if config of shared source is not loaded
106 on dispatch but debugextensions says that extension
106 on dispatch but debugextensions says that extension
107 is loaded
107 is loaded
108 $ hg debugextensions
108 $ hg debugextensions
109 extdiff
109 extdiff
110 share
110 share
111
111
112 $ hg extdiff -p echo
112 $ hg extdiff -p echo
113
113
114 However, local .hg/hgrc should override the config set by share source
114 However, local .hg/hgrc should override the config set by share source
115
115
116 $ echo "[ui]" >> .hg/hgrc
116 $ echo "[ui]" >> .hg/hgrc
117 $ echo "curses=false" >> .hg/hgrc
117 $ echo "curses=false" >> .hg/hgrc
118
118
119 $ hg showconfig ui.curses
119 $ hg showconfig ui.curses
120 false
120 false
121
121
122 $ HGEDITOR=cat hg config --shared
122 $ HGEDITOR=cat hg config --shared
123 [ui]
123 [ui]
124 curses=true
124 curses=true
125 [extensions]
125 [extensions]
126 extdiff=
126 extdiff=
127
127
128 $ HGEDITOR=cat hg config --local
128 $ HGEDITOR=cat hg config --local
129 [ui]
129 [ui]
130 curses=false
130 curses=false
131
131
132 Testing that hooks set in source repository also runs in shared repo
132 Testing that hooks set in source repository also runs in shared repo
133
133
134 $ cd ../source
134 $ cd ../source
135 $ cat <<EOF >> .hg/hgrc
135 $ cat <<EOF >> .hg/hgrc
136 > [extensions]
136 > [extensions]
137 > hooklib=
137 > hooklib=
138 > [hooks]
138 > [hooks]
139 > pretxnchangegroup.reject_merge_commits = \
139 > pretxnchangegroup.reject_merge_commits = \
140 > python:hgext.hooklib.reject_merge_commits.hook
140 > python:hgext.hooklib.reject_merge_commits.hook
141 > EOF
141 > EOF
142
142
143 $ cd ..
143 $ cd ..
144 $ hg clone source cloned
144 $ hg clone source cloned
145 updating to branch default
145 updating to branch default
146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 $ cd cloned
147 $ cd cloned
148 $ hg up 0
148 $ hg up 0
149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
150 $ echo bar > bar
150 $ echo bar > bar
151 $ hg ci -Aqm "added bar"
151 $ hg ci -Aqm "added bar"
152 $ hg merge
152 $ hg merge
153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 (branch merge, don't forget to commit)
154 (branch merge, don't forget to commit)
155 $ hg ci -m "merge commit"
155 $ hg ci -m "merge commit"
156
156
157 $ hg push ../source
157 $ hg push ../source
158 pushing to ../source
158 pushing to ../source
159 searching for changes
159 searching for changes
160 adding changesets
160 adding changesets
161 adding manifests
161 adding manifests
162 adding file changes
162 adding file changes
163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
164 transaction abort!
164 transaction abort!
165 rollback completed
165 rollback completed
166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
167 [255]
167 [255]
168
168
169 $ hg push ../shared1
169 $ hg push ../shared1
170 pushing to ../shared1
170 pushing to ../shared1
171 searching for changes
171 searching for changes
172 adding changesets
172 adding changesets
173 adding manifests
173 adding manifests
174 adding file changes
174 adding file changes
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
176 transaction abort!
176 transaction abort!
177 rollback completed
177 rollback completed
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
179 [255]
179 [255]
180
180
181 Test that if share source config is untrusted, we dont read it
181 Test that if share source config is untrusted, we dont read it
182
182
183 $ cd ../shared1
183 $ cd ../shared1
184
184
185 $ cat << EOF > $TESTTMP/untrusted.py
185 $ cat << EOF > $TESTTMP/untrusted.py
186 > from mercurial import scmutil, util
186 > from mercurial import scmutil, util
187 > def uisetup(ui):
187 > def uisetup(ui):
188 > class untrustedui(ui.__class__):
188 > class untrustedui(ui.__class__):
189 > def _trusted(self, fp, f):
189 > def _trusted(self, fp, f):
190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
191 > return False
191 > return False
192 > return super(untrustedui, self)._trusted(fp, f)
192 > return super(untrustedui, self)._trusted(fp, f)
193 > ui.__class__ = untrustedui
193 > ui.__class__ = untrustedui
194 > EOF
194 > EOF
195
195
196 $ hg showconfig hooks
196 $ hg showconfig hooks
197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
198
198
199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
200 [1]
200 [1]
201
201
202 Update the source repository format and check that shared repo works
202 Update the source repository format and check that shared repo works
203
203
204 $ cd ../source
204 $ cd ../source
205
205
206 Disable zstd related tests because its not present on pure version
206 Disable zstd related tests because its not present on pure version
207 #if zstd
207 #if zstd
208 $ echo "[format]" >> .hg/hgrc
208 $ echo "[format]" >> .hg/hgrc
209 $ echo "revlog-compression=zstd" >> .hg/hgrc
209 $ echo "revlog-compression=zstd" >> .hg/hgrc
210
210
211 $ hg debugupgraderepo --run -q
211 $ hg debugupgraderepo --run -q
212 upgrade will perform the following actions:
212 upgrade will perform the following actions:
213
213
214 requirements
214 requirements
215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
216 added: revlog-compression-zstd
216 added: revlog-compression-zstd
217
217
218 processed revlogs:
218 processed revlogs:
219 - all-filelogs
219 - all-filelogs
220 - changelog
220 - changelog
221 - manifest
221 - manifest
222
222
223 $ hg log -r .
223 $ hg log -r .
224 changeset: 1:5f6d8a4bf34a
224 changeset: 1:5f6d8a4bf34a
225 user: test
225 user: test
226 date: Thu Jan 01 00:00:00 1970 +0000
226 date: Thu Jan 01 00:00:00 1970 +0000
227 summary: added b
227 summary: added b
228
228
229 #endif
229 #endif
230 $ echo "[format]" >> .hg/hgrc
230 $ echo "[format]" >> .hg/hgrc
231 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
231 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
232
232
233 $ hg debugupgraderepo --run -q -R ../shared1
233 $ hg debugupgraderepo --run -q -R ../shared1
234 abort: cannot upgrade repository; unsupported source requirement: shared
234 abort: cannot upgrade repository; unsupported source requirement: shared
235 [255]
235 [255]
236
236
237 $ hg debugupgraderepo --run -q
237 $ hg debugupgraderepo --run -q
238 upgrade will perform the following actions:
238 upgrade will perform the following actions:
239
239
240 requirements
240 requirements
241 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
241 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
242 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
242 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
243 added: persistent-nodemap
243 added: persistent-nodemap
244
244
245 processed revlogs:
245 processed revlogs:
246 - all-filelogs
246 - all-filelogs
247 - changelog
247 - changelog
248 - manifest
248 - manifest
249
249
250 $ hg log -r .
250 $ hg log -r .
251 changeset: 1:5f6d8a4bf34a
251 changeset: 1:5f6d8a4bf34a
252 user: test
252 user: test
253 date: Thu Jan 01 00:00:00 1970 +0000
253 date: Thu Jan 01 00:00:00 1970 +0000
254 summary: added b
254 summary: added b
255
255
256
256
257 Shared one should work
257 Shared one should work
258 $ cd ../shared1
258 $ cd ../shared1
259 $ hg log -r .
259 $ hg log -r .
260 changeset: 2:155349b645be
260 changeset: 2:155349b645be
261 tag: tip
261 tag: tip
262 user: test
262 user: test
263 date: Thu Jan 01 00:00:00 1970 +0000
263 date: Thu Jan 01 00:00:00 1970 +0000
264 summary: added c
264 summary: added c
265
265
266
266
267 Testing that nonsharedrc is loaded for source and not shared
267 Testing that nonsharedrc is loaded for source and not shared
268
268
269 $ cd ../source
269 $ cd ../source
270 $ touch .hg/hgrc-not-shared
270 $ touch .hg/hgrc-not-shared
271 $ echo "[ui]" >> .hg/hgrc-not-shared
271 $ echo "[ui]" >> .hg/hgrc-not-shared
272 $ echo "traceback=true" >> .hg/hgrc-not-shared
272 $ echo "traceback=true" >> .hg/hgrc-not-shared
273
273
274 $ hg showconfig ui.traceback
274 $ hg showconfig ui.traceback
275 true
275 true
276
276
277 $ HGEDITOR=cat hg config --non-shared
277 $ HGEDITOR=cat hg config --non-shared
278 [ui]
278 [ui]
279 traceback=true
279 traceback=true
280
280
281 $ cd ../shared1
281 $ cd ../shared1
282 $ hg showconfig ui.traceback
282 $ hg showconfig ui.traceback
283 [1]
283 [1]
284
284
285 Unsharing works
285 Unsharing works
286
286
287 $ hg unshare
287 $ hg unshare
288
288
289 Test that source config is added to the shared one after unshare, and the config
289 Test that source config is added to the shared one after unshare, and the config
290 of current repo is still respected over the config which came from source config
290 of current repo is still respected over the config which came from source config
291 $ cd ../cloned
291 $ cd ../cloned
292 $ hg push ../shared1
292 $ hg push ../shared1
293 pushing to ../shared1
293 pushing to ../shared1
294 searching for changes
294 searching for changes
295 adding changesets
295 adding changesets
296 adding manifests
296 adding manifests
297 adding file changes
297 adding file changes
298 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
298 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
299 transaction abort!
299 transaction abort!
300 rollback completed
300 rollback completed
301 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
301 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
302 [255]
302 [255]
303 $ hg showconfig ui.curses -R ../shared1
303 $ hg showconfig ui.curses -R ../shared1
304 false
304 false
305
305
306 $ cd ../
306 $ cd ../
307
307
308 Test that upgrading using debugupgraderepo works
308 Test that upgrading using debugupgraderepo works
309 =================================================
309 =================================================
310
310
311 $ hg init non-share-safe --config format.exp-share-safe=false
311 $ hg init non-share-safe --config format.exp-share-safe=false
312 $ cd non-share-safe
312 $ cd non-share-safe
313 $ hg debugrequirements
313 $ hg debugrequirements
314 dotencode
314 dotencode
315 fncache
315 fncache
316 generaldelta
316 generaldelta
317 revlogv1
317 revlogv1
318 sparserevlog
318 sparserevlog
319 store
319 store
320 $ echo foo > foo
320 $ echo foo > foo
321 $ hg ci -Aqm 'added foo'
321 $ hg ci -Aqm 'added foo'
322 $ echo bar > bar
322 $ echo bar > bar
323 $ hg ci -Aqm 'added bar'
323 $ hg ci -Aqm 'added bar'
324
324
325 Create a share before upgrading
325 Create a share before upgrading
326
326
327 $ cd ..
327 $ cd ..
328 $ hg share non-share-safe nss-share
328 $ hg share non-share-safe nss-share
329 updating working directory
329 updating working directory
330 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
330 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
331 $ hg debugrequirements -R nss-share
331 $ hg debugrequirements -R nss-share
332 dotencode
332 dotencode
333 fncache
333 fncache
334 generaldelta
334 generaldelta
335 revlogv1
335 revlogv1
336 shared
336 shared
337 sparserevlog
337 sparserevlog
338 store
338 store
339 $ cd non-share-safe
339 $ cd non-share-safe
340
340
341 Upgrade
341 Upgrade
342
342
343 $ hg debugupgraderepo -q
343 $ hg debugupgraderepo -q
344 requirements
344 requirements
345 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
345 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
346 added: exp-sharesafe
346 added: exp-sharesafe
347
347
348 processed revlogs:
348 processed revlogs:
349 - all-filelogs
349 - all-filelogs
350 - changelog
350 - changelog
351 - manifest
351 - manifest
352
352
353 $ hg debugupgraderepo --run -q
353 $ hg debugupgraderepo --run -q
354 upgrade will perform the following actions:
354 upgrade will perform the following actions:
355
355
356 requirements
356 requirements
357 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
357 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
358 added: exp-sharesafe
358 added: exp-sharesafe
359
359
360 processed revlogs:
360 processed revlogs:
361 - all-filelogs
361 - all-filelogs
362 - changelog
362 - changelog
363 - manifest
363 - manifest
364
364
365 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
365 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
366
366
367 $ hg debugrequirements
367 $ hg debugrequirements
368 dotencode
368 dotencode
369 exp-sharesafe
369 exp-sharesafe
370 fncache
370 fncache
371 generaldelta
371 generaldelta
372 revlogv1
372 revlogv1
373 sparserevlog
373 sparserevlog
374 store
374 store
375
375
376 $ cat .hg/requires
376 $ cat .hg/requires
377 exp-sharesafe
377 exp-sharesafe
378
378
379 $ cat .hg/store/requires
379 $ cat .hg/store/requires
380 dotencode
380 dotencode
381 fncache
381 fncache
382 generaldelta
382 generaldelta
383 revlogv1
383 revlogv1
384 sparserevlog
384 sparserevlog
385 store
385 store
386
386
387 $ hg log -GT "{node}: {desc}\n"
387 $ hg log -GT "{node}: {desc}\n"
388 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
388 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
389 |
389 |
390 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
390 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
391
391
392
392
393 Make sure existing shares still works
393 Make sure existing shares still works
394
394
395 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
395 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
396 warning: source repository supports share-safe functionality. Reshare to upgrade.
396 warning: source repository supports share-safe functionality. Reshare to upgrade.
397 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
397 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
398 |
398 |
399 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
399 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
400
400
401
401
402
402
403 Create a safe share from upgrade one
403 Create a safe share from upgrade one
404
404
405 $ cd ..
405 $ cd ..
406 $ hg share non-share-safe ss-share
406 $ hg share non-share-safe ss-share
407 updating working directory
407 updating working directory
408 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
408 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
409 $ cd ss-share
409 $ cd ss-share
410 $ hg log -GT "{node}: {desc}\n"
410 $ hg log -GT "{node}: {desc}\n"
411 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
411 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
412 |
412 |
413 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
413 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
414
414
415 $ cd ../non-share-safe
415 $ cd ../non-share-safe
416
416
417 Test that downgrading works too
417 Test that downgrading works too
418
418
419 $ cat >> $HGRCPATH <<EOF
419 $ cat >> $HGRCPATH <<EOF
420 > [extensions]
420 > [extensions]
421 > share =
421 > share =
422 > [format]
422 > [format]
423 > exp-share-safe = False
423 > exp-share-safe = False
424 > EOF
424 > EOF
425
425
426 $ hg debugupgraderepo -q
426 $ hg debugupgraderepo -q
427 requirements
427 requirements
428 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
428 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
429 removed: exp-sharesafe
429 removed: exp-sharesafe
430
430
431 processed revlogs:
431 processed revlogs:
432 - all-filelogs
432 - all-filelogs
433 - changelog
433 - changelog
434 - manifest
434 - manifest
435
435
436 $ hg debugupgraderepo -q --run
436 $ hg debugupgraderepo -q --run
437 upgrade will perform the following actions:
437 upgrade will perform the following actions:
438
438
439 requirements
439 requirements
440 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
440 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
441 removed: exp-sharesafe
441 removed: exp-sharesafe
442
442
443 processed revlogs:
443 processed revlogs:
444 - all-filelogs
444 - all-filelogs
445 - changelog
445 - changelog
446 - manifest
446 - manifest
447
447
448 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
448 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
449
449
450 $ hg debugrequirements
450 $ hg debugrequirements
451 dotencode
451 dotencode
452 fncache
452 fncache
453 generaldelta
453 generaldelta
454 revlogv1
454 revlogv1
455 sparserevlog
455 sparserevlog
456 store
456 store
457
457
458 $ cat .hg/requires
458 $ cat .hg/requires
459 dotencode
459 dotencode
460 fncache
460 fncache
461 generaldelta
461 generaldelta
462 revlogv1
462 revlogv1
463 sparserevlog
463 sparserevlog
464 store
464 store
465
465
466 $ test -f .hg/store/requires
466 $ test -f .hg/store/requires
467 [1]
467 [1]
468
468
469 $ hg log -GT "{node}: {desc}\n"
469 $ hg log -GT "{node}: {desc}\n"
470 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
470 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
471 |
471 |
472 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
472 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
473
473
474
474
475 Make sure existing shares still works
475 Make sure existing shares still works
476
476
477 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
477 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
478 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
478 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
479 |
479 |
480 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
480 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
481
481
482
482
483 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
483 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
484 abort: share source does not support exp-sharesafe requirement
484 abort: share source does not support exp-sharesafe requirement
485 [255]
485 [255]
486
486
487 Testing automatic downgrade of shares when config is set
488
489 $ touch ../ss-share/.hg/wlock
490 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
491 abort: failed to downgrade share, got error: Lock held
492 [255]
493 $ rm ../ss-share/.hg/wlock
494
495 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
496 repository downgraded to not use share-safe mode
497 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
498 |
499 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
500
501
502 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
503 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
504 |
505 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
506
507
487
508
488 Testing automatic upgrade of shares when config is set
509 Testing automatic upgrade of shares when config is set
489
510
490 $ hg debugupgraderepo -q --run --config format.exp-share-safe=True
511 $ hg debugupgraderepo -q --run --config format.exp-share-safe=True
491 upgrade will perform the following actions:
512 upgrade will perform the following actions:
492
513
493 requirements
514 requirements
494 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
515 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
495 added: exp-sharesafe
516 added: exp-sharesafe
496
517
497 processed revlogs:
518 processed revlogs:
498 - all-filelogs
519 - all-filelogs
499 - changelog
520 - changelog
500 - manifest
521 - manifest
501
522
502 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
523 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
503 $ hg debugrequirements
524 $ hg debugrequirements
504 dotencode
525 dotencode
505 exp-sharesafe
526 exp-sharesafe
506 fncache
527 fncache
507 generaldelta
528 generaldelta
508 revlogv1
529 revlogv1
509 sparserevlog
530 sparserevlog
510 store
531 store
511 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
532 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
512 warning: source repository supports share-safe functionality. Reshare to upgrade.
533 warning: source repository supports share-safe functionality. Reshare to upgrade.
513 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
534 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
514 |
535 |
515 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
536 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
516
537
517
538
518 Check that if lock is taken, upgrade fails but read operation are successful
539 Check that if lock is taken, upgrade fails but read operation are successful
519 $ touch ../nss-share/.hg/wlock
540 $ touch ../nss-share/.hg/wlock
520 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
541 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
521 failed to upgrade share, got error: Lock held
542 failed to upgrade share, got error: Lock held
522 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
543 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
523 |
544 |
524 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
545 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
525
546
526 $ rm ../nss-share/.hg/wlock
547 $ rm ../nss-share/.hg/wlock
527 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
548 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
528 repository upgraded to use share-safe mode
549 repository upgraded to use share-safe mode
529 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
550 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
530 |
551 |
531 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
552 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
532
553
533
554
534 Test that unshare works
555 Test that unshare works
535
556
536 $ hg unshare -R ../nss-share
557 $ hg unshare -R ../nss-share
537 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
558 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
538 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
559 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
539 |
560 |
540 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
561 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
541
562
General Comments 0
You need to be logged in to leave comments. Login now