##// END OF EJS Templates
persistent-nodemap: drop the storage.revlog.nodemap.mode config...
marmoute -
r46938:44f2e95b default draft
parent child Browse files
Show More
@@ -1,2595 +1,2591 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'debug',
573 b'debug',
574 b'dirstate.delaywrite',
574 b'dirstate.delaywrite',
575 default=0,
575 default=0,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'defaults',
578 b'defaults',
579 b'.*',
579 b'.*',
580 default=None,
580 default=None,
581 generic=True,
581 generic=True,
582 )
582 )
583 coreconfigitem(
583 coreconfigitem(
584 b'devel',
584 b'devel',
585 b'all-warnings',
585 b'all-warnings',
586 default=False,
586 default=False,
587 )
587 )
588 coreconfigitem(
588 coreconfigitem(
589 b'devel',
589 b'devel',
590 b'bundle2.debug',
590 b'bundle2.debug',
591 default=False,
591 default=False,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'bundle.delta',
595 b'bundle.delta',
596 default=b'',
596 default=b'',
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'cache-vfs',
600 b'cache-vfs',
601 default=None,
601 default=None,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'check-locks',
605 b'check-locks',
606 default=False,
606 default=False,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'check-relroot',
610 b'check-relroot',
611 default=False,
611 default=False,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'default-date',
615 b'default-date',
616 default=None,
616 default=None,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'deprec-warn',
620 b'deprec-warn',
621 default=False,
621 default=False,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'devel',
624 b'devel',
625 b'disableloaddefaultcerts',
625 b'disableloaddefaultcerts',
626 default=False,
626 default=False,
627 )
627 )
628 coreconfigitem(
628 coreconfigitem(
629 b'devel',
629 b'devel',
630 b'warn-empty-changegroup',
630 b'warn-empty-changegroup',
631 default=False,
631 default=False,
632 )
632 )
633 coreconfigitem(
633 coreconfigitem(
634 b'devel',
634 b'devel',
635 b'legacy.exchange',
635 b'legacy.exchange',
636 default=list,
636 default=list,
637 )
637 )
638 # When True, revlogs use a special reference version of the nodemap, that is not
638 # When True, revlogs use a special reference version of the nodemap, that is not
639 # performant but is "known" to behave properly.
639 # performant but is "known" to behave properly.
640 coreconfigitem(
640 coreconfigitem(
641 b'devel',
641 b'devel',
642 b'persistent-nodemap',
642 b'persistent-nodemap',
643 default=False,
643 default=False,
644 )
644 )
645 coreconfigitem(
645 coreconfigitem(
646 b'devel',
646 b'devel',
647 b'servercafile',
647 b'servercafile',
648 default=b'',
648 default=b'',
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'devel',
651 b'devel',
652 b'serverexactprotocol',
652 b'serverexactprotocol',
653 default=b'',
653 default=b'',
654 )
654 )
655 coreconfigitem(
655 coreconfigitem(
656 b'devel',
656 b'devel',
657 b'serverrequirecert',
657 b'serverrequirecert',
658 default=False,
658 default=False,
659 )
659 )
660 coreconfigitem(
660 coreconfigitem(
661 b'devel',
661 b'devel',
662 b'strip-obsmarkers',
662 b'strip-obsmarkers',
663 default=True,
663 default=True,
664 )
664 )
665 coreconfigitem(
665 coreconfigitem(
666 b'devel',
666 b'devel',
667 b'warn-config',
667 b'warn-config',
668 default=None,
668 default=None,
669 )
669 )
670 coreconfigitem(
670 coreconfigitem(
671 b'devel',
671 b'devel',
672 b'warn-config-default',
672 b'warn-config-default',
673 default=None,
673 default=None,
674 )
674 )
675 coreconfigitem(
675 coreconfigitem(
676 b'devel',
676 b'devel',
677 b'user.obsmarker',
677 b'user.obsmarker',
678 default=None,
678 default=None,
679 )
679 )
680 coreconfigitem(
680 coreconfigitem(
681 b'devel',
681 b'devel',
682 b'warn-config-unknown',
682 b'warn-config-unknown',
683 default=None,
683 default=None,
684 )
684 )
685 coreconfigitem(
685 coreconfigitem(
686 b'devel',
686 b'devel',
687 b'debug.copies',
687 b'debug.copies',
688 default=False,
688 default=False,
689 )
689 )
690 coreconfigitem(
690 coreconfigitem(
691 b'devel',
691 b'devel',
692 b'debug.extensions',
692 b'debug.extensions',
693 default=False,
693 default=False,
694 )
694 )
695 coreconfigitem(
695 coreconfigitem(
696 b'devel',
696 b'devel',
697 b'debug.repo-filters',
697 b'debug.repo-filters',
698 default=False,
698 default=False,
699 )
699 )
700 coreconfigitem(
700 coreconfigitem(
701 b'devel',
701 b'devel',
702 b'debug.peer-request',
702 b'debug.peer-request',
703 default=False,
703 default=False,
704 )
704 )
705 # If discovery.grow-sample is False, the sample size used in set discovery will
705 # If discovery.grow-sample is False, the sample size used in set discovery will
706 # not be increased through the process
706 # not be increased through the process
707 coreconfigitem(
707 coreconfigitem(
708 b'devel',
708 b'devel',
709 b'discovery.grow-sample',
709 b'discovery.grow-sample',
710 default=True,
710 default=True,
711 )
711 )
712 # discovery.grow-sample.rate control the rate at which the sample grow
712 # discovery.grow-sample.rate control the rate at which the sample grow
713 coreconfigitem(
713 coreconfigitem(
714 b'devel',
714 b'devel',
715 b'discovery.grow-sample.rate',
715 b'discovery.grow-sample.rate',
716 default=1.05,
716 default=1.05,
717 )
717 )
718 # If discovery.randomize is False, random sampling during discovery are
718 # If discovery.randomize is False, random sampling during discovery are
719 # deterministic. It is meant for integration tests.
719 # deterministic. It is meant for integration tests.
720 coreconfigitem(
720 coreconfigitem(
721 b'devel',
721 b'devel',
722 b'discovery.randomize',
722 b'discovery.randomize',
723 default=True,
723 default=True,
724 )
724 )
725 _registerdiffopts(section=b'diff')
725 _registerdiffopts(section=b'diff')
726 coreconfigitem(
726 coreconfigitem(
727 b'email',
727 b'email',
728 b'bcc',
728 b'bcc',
729 default=None,
729 default=None,
730 )
730 )
731 coreconfigitem(
731 coreconfigitem(
732 b'email',
732 b'email',
733 b'cc',
733 b'cc',
734 default=None,
734 default=None,
735 )
735 )
736 coreconfigitem(
736 coreconfigitem(
737 b'email',
737 b'email',
738 b'charsets',
738 b'charsets',
739 default=list,
739 default=list,
740 )
740 )
741 coreconfigitem(
741 coreconfigitem(
742 b'email',
742 b'email',
743 b'from',
743 b'from',
744 default=None,
744 default=None,
745 )
745 )
746 coreconfigitem(
746 coreconfigitem(
747 b'email',
747 b'email',
748 b'method',
748 b'method',
749 default=b'smtp',
749 default=b'smtp',
750 )
750 )
751 coreconfigitem(
751 coreconfigitem(
752 b'email',
752 b'email',
753 b'reply-to',
753 b'reply-to',
754 default=None,
754 default=None,
755 )
755 )
756 coreconfigitem(
756 coreconfigitem(
757 b'email',
757 b'email',
758 b'to',
758 b'to',
759 default=None,
759 default=None,
760 )
760 )
761 coreconfigitem(
761 coreconfigitem(
762 b'experimental',
762 b'experimental',
763 b'archivemetatemplate',
763 b'archivemetatemplate',
764 default=dynamicdefault,
764 default=dynamicdefault,
765 )
765 )
766 coreconfigitem(
766 coreconfigitem(
767 b'experimental',
767 b'experimental',
768 b'auto-publish',
768 b'auto-publish',
769 default=b'publish',
769 default=b'publish',
770 )
770 )
771 coreconfigitem(
771 coreconfigitem(
772 b'experimental',
772 b'experimental',
773 b'bundle-phases',
773 b'bundle-phases',
774 default=False,
774 default=False,
775 )
775 )
776 coreconfigitem(
776 coreconfigitem(
777 b'experimental',
777 b'experimental',
778 b'bundle2-advertise',
778 b'bundle2-advertise',
779 default=True,
779 default=True,
780 )
780 )
781 coreconfigitem(
781 coreconfigitem(
782 b'experimental',
782 b'experimental',
783 b'bundle2-output-capture',
783 b'bundle2-output-capture',
784 default=False,
784 default=False,
785 )
785 )
786 coreconfigitem(
786 coreconfigitem(
787 b'experimental',
787 b'experimental',
788 b'bundle2.pushback',
788 b'bundle2.pushback',
789 default=False,
789 default=False,
790 )
790 )
791 coreconfigitem(
791 coreconfigitem(
792 b'experimental',
792 b'experimental',
793 b'bundle2lazylocking',
793 b'bundle2lazylocking',
794 default=False,
794 default=False,
795 )
795 )
796 coreconfigitem(
796 coreconfigitem(
797 b'experimental',
797 b'experimental',
798 b'bundlecomplevel',
798 b'bundlecomplevel',
799 default=None,
799 default=None,
800 )
800 )
801 coreconfigitem(
801 coreconfigitem(
802 b'experimental',
802 b'experimental',
803 b'bundlecomplevel.bzip2',
803 b'bundlecomplevel.bzip2',
804 default=None,
804 default=None,
805 )
805 )
806 coreconfigitem(
806 coreconfigitem(
807 b'experimental',
807 b'experimental',
808 b'bundlecomplevel.gzip',
808 b'bundlecomplevel.gzip',
809 default=None,
809 default=None,
810 )
810 )
811 coreconfigitem(
811 coreconfigitem(
812 b'experimental',
812 b'experimental',
813 b'bundlecomplevel.none',
813 b'bundlecomplevel.none',
814 default=None,
814 default=None,
815 )
815 )
816 coreconfigitem(
816 coreconfigitem(
817 b'experimental',
817 b'experimental',
818 b'bundlecomplevel.zstd',
818 b'bundlecomplevel.zstd',
819 default=None,
819 default=None,
820 )
820 )
821 coreconfigitem(
821 coreconfigitem(
822 b'experimental',
822 b'experimental',
823 b'changegroup3',
823 b'changegroup3',
824 default=False,
824 default=False,
825 )
825 )
826 coreconfigitem(
826 coreconfigitem(
827 b'experimental',
827 b'experimental',
828 b'cleanup-as-archived',
828 b'cleanup-as-archived',
829 default=False,
829 default=False,
830 )
830 )
831 coreconfigitem(
831 coreconfigitem(
832 b'experimental',
832 b'experimental',
833 b'clientcompressionengines',
833 b'clientcompressionengines',
834 default=list,
834 default=list,
835 )
835 )
836 coreconfigitem(
836 coreconfigitem(
837 b'experimental',
837 b'experimental',
838 b'copytrace',
838 b'copytrace',
839 default=b'on',
839 default=b'on',
840 )
840 )
841 coreconfigitem(
841 coreconfigitem(
842 b'experimental',
842 b'experimental',
843 b'copytrace.movecandidateslimit',
843 b'copytrace.movecandidateslimit',
844 default=100,
844 default=100,
845 )
845 )
846 coreconfigitem(
846 coreconfigitem(
847 b'experimental',
847 b'experimental',
848 b'copytrace.sourcecommitlimit',
848 b'copytrace.sourcecommitlimit',
849 default=100,
849 default=100,
850 )
850 )
851 coreconfigitem(
851 coreconfigitem(
852 b'experimental',
852 b'experimental',
853 b'copies.read-from',
853 b'copies.read-from',
854 default=b"filelog-only",
854 default=b"filelog-only",
855 )
855 )
856 coreconfigitem(
856 coreconfigitem(
857 b'experimental',
857 b'experimental',
858 b'copies.write-to',
858 b'copies.write-to',
859 default=b'filelog-only',
859 default=b'filelog-only',
860 )
860 )
861 coreconfigitem(
861 coreconfigitem(
862 b'experimental',
862 b'experimental',
863 b'crecordtest',
863 b'crecordtest',
864 default=None,
864 default=None,
865 )
865 )
866 coreconfigitem(
866 coreconfigitem(
867 b'experimental',
867 b'experimental',
868 b'directaccess',
868 b'directaccess',
869 default=False,
869 default=False,
870 )
870 )
871 coreconfigitem(
871 coreconfigitem(
872 b'experimental',
872 b'experimental',
873 b'directaccess.revnums',
873 b'directaccess.revnums',
874 default=False,
874 default=False,
875 )
875 )
876 coreconfigitem(
876 coreconfigitem(
877 b'experimental',
877 b'experimental',
878 b'editortmpinhg',
878 b'editortmpinhg',
879 default=False,
879 default=False,
880 )
880 )
881 coreconfigitem(
881 coreconfigitem(
882 b'experimental',
882 b'experimental',
883 b'evolution',
883 b'evolution',
884 default=list,
884 default=list,
885 )
885 )
886 coreconfigitem(
886 coreconfigitem(
887 b'experimental',
887 b'experimental',
888 b'evolution.allowdivergence',
888 b'evolution.allowdivergence',
889 default=False,
889 default=False,
890 alias=[(b'experimental', b'allowdivergence')],
890 alias=[(b'experimental', b'allowdivergence')],
891 )
891 )
892 coreconfigitem(
892 coreconfigitem(
893 b'experimental',
893 b'experimental',
894 b'evolution.allowunstable',
894 b'evolution.allowunstable',
895 default=None,
895 default=None,
896 )
896 )
897 coreconfigitem(
897 coreconfigitem(
898 b'experimental',
898 b'experimental',
899 b'evolution.createmarkers',
899 b'evolution.createmarkers',
900 default=None,
900 default=None,
901 )
901 )
902 coreconfigitem(
902 coreconfigitem(
903 b'experimental',
903 b'experimental',
904 b'evolution.effect-flags',
904 b'evolution.effect-flags',
905 default=True,
905 default=True,
906 alias=[(b'experimental', b'effect-flags')],
906 alias=[(b'experimental', b'effect-flags')],
907 )
907 )
908 coreconfigitem(
908 coreconfigitem(
909 b'experimental',
909 b'experimental',
910 b'evolution.exchange',
910 b'evolution.exchange',
911 default=None,
911 default=None,
912 )
912 )
913 coreconfigitem(
913 coreconfigitem(
914 b'experimental',
914 b'experimental',
915 b'evolution.bundle-obsmarker',
915 b'evolution.bundle-obsmarker',
916 default=False,
916 default=False,
917 )
917 )
918 coreconfigitem(
918 coreconfigitem(
919 b'experimental',
919 b'experimental',
920 b'evolution.bundle-obsmarker:mandatory',
920 b'evolution.bundle-obsmarker:mandatory',
921 default=True,
921 default=True,
922 )
922 )
923 coreconfigitem(
923 coreconfigitem(
924 b'experimental',
924 b'experimental',
925 b'log.topo',
925 b'log.topo',
926 default=False,
926 default=False,
927 )
927 )
928 coreconfigitem(
928 coreconfigitem(
929 b'experimental',
929 b'experimental',
930 b'evolution.report-instabilities',
930 b'evolution.report-instabilities',
931 default=True,
931 default=True,
932 )
932 )
933 coreconfigitem(
933 coreconfigitem(
934 b'experimental',
934 b'experimental',
935 b'evolution.track-operation',
935 b'evolution.track-operation',
936 default=True,
936 default=True,
937 )
937 )
938 # repo-level config to exclude a revset visibility
938 # repo-level config to exclude a revset visibility
939 #
939 #
940 # The target use case is to use `share` to expose different subset of the same
940 # The target use case is to use `share` to expose different subset of the same
941 # repository, especially server side. See also `server.view`.
941 # repository, especially server side. See also `server.view`.
942 coreconfigitem(
942 coreconfigitem(
943 b'experimental',
943 b'experimental',
944 b'extra-filter-revs',
944 b'extra-filter-revs',
945 default=None,
945 default=None,
946 )
946 )
947 coreconfigitem(
947 coreconfigitem(
948 b'experimental',
948 b'experimental',
949 b'maxdeltachainspan',
949 b'maxdeltachainspan',
950 default=-1,
950 default=-1,
951 )
951 )
952 # tracks files which were undeleted (merge might delete them but we explicitly
952 # tracks files which were undeleted (merge might delete them but we explicitly
953 # kept/undeleted them) and creates new filenodes for them
953 # kept/undeleted them) and creates new filenodes for them
954 coreconfigitem(
954 coreconfigitem(
955 b'experimental',
955 b'experimental',
956 b'merge-track-salvaged',
956 b'merge-track-salvaged',
957 default=False,
957 default=False,
958 )
958 )
959 coreconfigitem(
959 coreconfigitem(
960 b'experimental',
960 b'experimental',
961 b'mergetempdirprefix',
961 b'mergetempdirprefix',
962 default=None,
962 default=None,
963 )
963 )
964 coreconfigitem(
964 coreconfigitem(
965 b'experimental',
965 b'experimental',
966 b'mmapindexthreshold',
966 b'mmapindexthreshold',
967 default=None,
967 default=None,
968 )
968 )
969 coreconfigitem(
969 coreconfigitem(
970 b'experimental',
970 b'experimental',
971 b'narrow',
971 b'narrow',
972 default=False,
972 default=False,
973 )
973 )
974 coreconfigitem(
974 coreconfigitem(
975 b'experimental',
975 b'experimental',
976 b'nonnormalparanoidcheck',
976 b'nonnormalparanoidcheck',
977 default=False,
977 default=False,
978 )
978 )
979 coreconfigitem(
979 coreconfigitem(
980 b'experimental',
980 b'experimental',
981 b'exportableenviron',
981 b'exportableenviron',
982 default=list,
982 default=list,
983 )
983 )
984 coreconfigitem(
984 coreconfigitem(
985 b'experimental',
985 b'experimental',
986 b'extendedheader.index',
986 b'extendedheader.index',
987 default=None,
987 default=None,
988 )
988 )
989 coreconfigitem(
989 coreconfigitem(
990 b'experimental',
990 b'experimental',
991 b'extendedheader.similarity',
991 b'extendedheader.similarity',
992 default=False,
992 default=False,
993 )
993 )
994 coreconfigitem(
994 coreconfigitem(
995 b'experimental',
995 b'experimental',
996 b'graphshorten',
996 b'graphshorten',
997 default=False,
997 default=False,
998 )
998 )
999 coreconfigitem(
999 coreconfigitem(
1000 b'experimental',
1000 b'experimental',
1001 b'graphstyle.parent',
1001 b'graphstyle.parent',
1002 default=dynamicdefault,
1002 default=dynamicdefault,
1003 )
1003 )
1004 coreconfigitem(
1004 coreconfigitem(
1005 b'experimental',
1005 b'experimental',
1006 b'graphstyle.missing',
1006 b'graphstyle.missing',
1007 default=dynamicdefault,
1007 default=dynamicdefault,
1008 )
1008 )
1009 coreconfigitem(
1009 coreconfigitem(
1010 b'experimental',
1010 b'experimental',
1011 b'graphstyle.grandparent',
1011 b'graphstyle.grandparent',
1012 default=dynamicdefault,
1012 default=dynamicdefault,
1013 )
1013 )
1014 coreconfigitem(
1014 coreconfigitem(
1015 b'experimental',
1015 b'experimental',
1016 b'hook-track-tags',
1016 b'hook-track-tags',
1017 default=False,
1017 default=False,
1018 )
1018 )
1019 coreconfigitem(
1019 coreconfigitem(
1020 b'experimental',
1020 b'experimental',
1021 b'httppeer.advertise-v2',
1021 b'httppeer.advertise-v2',
1022 default=False,
1022 default=False,
1023 )
1023 )
1024 coreconfigitem(
1024 coreconfigitem(
1025 b'experimental',
1025 b'experimental',
1026 b'httppeer.v2-encoder-order',
1026 b'httppeer.v2-encoder-order',
1027 default=None,
1027 default=None,
1028 )
1028 )
1029 coreconfigitem(
1029 coreconfigitem(
1030 b'experimental',
1030 b'experimental',
1031 b'httppostargs',
1031 b'httppostargs',
1032 default=False,
1032 default=False,
1033 )
1033 )
1034 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1034 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1035 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1035 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1036
1036
1037 coreconfigitem(
1037 coreconfigitem(
1038 b'experimental',
1038 b'experimental',
1039 b'obsmarkers-exchange-debug',
1039 b'obsmarkers-exchange-debug',
1040 default=False,
1040 default=False,
1041 )
1041 )
1042 coreconfigitem(
1042 coreconfigitem(
1043 b'experimental',
1043 b'experimental',
1044 b'remotenames',
1044 b'remotenames',
1045 default=False,
1045 default=False,
1046 )
1046 )
1047 coreconfigitem(
1047 coreconfigitem(
1048 b'experimental',
1048 b'experimental',
1049 b'removeemptydirs',
1049 b'removeemptydirs',
1050 default=True,
1050 default=True,
1051 )
1051 )
1052 coreconfigitem(
1052 coreconfigitem(
1053 b'experimental',
1053 b'experimental',
1054 b'revert.interactive.select-to-keep',
1054 b'revert.interactive.select-to-keep',
1055 default=False,
1055 default=False,
1056 )
1056 )
1057 coreconfigitem(
1057 coreconfigitem(
1058 b'experimental',
1058 b'experimental',
1059 b'revisions.prefixhexnode',
1059 b'revisions.prefixhexnode',
1060 default=False,
1060 default=False,
1061 )
1061 )
1062 coreconfigitem(
1062 coreconfigitem(
1063 b'experimental',
1063 b'experimental',
1064 b'revlogv2',
1064 b'revlogv2',
1065 default=None,
1065 default=None,
1066 )
1066 )
1067 coreconfigitem(
1067 coreconfigitem(
1068 b'experimental',
1068 b'experimental',
1069 b'revisions.disambiguatewithin',
1069 b'revisions.disambiguatewithin',
1070 default=None,
1070 default=None,
1071 )
1071 )
1072 coreconfigitem(
1072 coreconfigitem(
1073 b'experimental',
1073 b'experimental',
1074 b'rust.index',
1074 b'rust.index',
1075 default=False,
1075 default=False,
1076 )
1076 )
1077 coreconfigitem(
1077 coreconfigitem(
1078 b'experimental',
1078 b'experimental',
1079 b'server.filesdata.recommended-batch-size',
1079 b'server.filesdata.recommended-batch-size',
1080 default=50000,
1080 default=50000,
1081 )
1081 )
1082 coreconfigitem(
1082 coreconfigitem(
1083 b'experimental',
1083 b'experimental',
1084 b'server.manifestdata.recommended-batch-size',
1084 b'server.manifestdata.recommended-batch-size',
1085 default=100000,
1085 default=100000,
1086 )
1086 )
1087 coreconfigitem(
1087 coreconfigitem(
1088 b'experimental',
1088 b'experimental',
1089 b'server.stream-narrow-clones',
1089 b'server.stream-narrow-clones',
1090 default=False,
1090 default=False,
1091 )
1091 )
1092 coreconfigitem(
1092 coreconfigitem(
1093 b'experimental',
1093 b'experimental',
1094 b'sharesafe-auto-downgrade-shares',
1094 b'sharesafe-auto-downgrade-shares',
1095 default=False,
1095 default=False,
1096 )
1096 )
1097 coreconfigitem(
1097 coreconfigitem(
1098 b'experimental',
1098 b'experimental',
1099 b'sharesafe-auto-upgrade-shares',
1099 b'sharesafe-auto-upgrade-shares',
1100 default=False,
1100 default=False,
1101 )
1101 )
1102 coreconfigitem(
1102 coreconfigitem(
1103 b'experimental',
1103 b'experimental',
1104 b'sharesafe-auto-upgrade-fail-error',
1104 b'sharesafe-auto-upgrade-fail-error',
1105 default=False,
1105 default=False,
1106 )
1106 )
1107 coreconfigitem(
1107 coreconfigitem(
1108 b'experimental',
1108 b'experimental',
1109 b'sharesafe-warn-outdated-shares',
1109 b'sharesafe-warn-outdated-shares',
1110 default=True,
1110 default=True,
1111 )
1111 )
1112 coreconfigitem(
1112 coreconfigitem(
1113 b'experimental',
1113 b'experimental',
1114 b'single-head-per-branch',
1114 b'single-head-per-branch',
1115 default=False,
1115 default=False,
1116 )
1116 )
1117 coreconfigitem(
1117 coreconfigitem(
1118 b'experimental',
1118 b'experimental',
1119 b'single-head-per-branch:account-closed-heads',
1119 b'single-head-per-branch:account-closed-heads',
1120 default=False,
1120 default=False,
1121 )
1121 )
1122 coreconfigitem(
1122 coreconfigitem(
1123 b'experimental',
1123 b'experimental',
1124 b'single-head-per-branch:public-changes-only',
1124 b'single-head-per-branch:public-changes-only',
1125 default=False,
1125 default=False,
1126 )
1126 )
1127 coreconfigitem(
1127 coreconfigitem(
1128 b'experimental',
1128 b'experimental',
1129 b'sshserver.support-v2',
1129 b'sshserver.support-v2',
1130 default=False,
1130 default=False,
1131 )
1131 )
1132 coreconfigitem(
1132 coreconfigitem(
1133 b'experimental',
1133 b'experimental',
1134 b'sparse-read',
1134 b'sparse-read',
1135 default=False,
1135 default=False,
1136 )
1136 )
1137 coreconfigitem(
1137 coreconfigitem(
1138 b'experimental',
1138 b'experimental',
1139 b'sparse-read.density-threshold',
1139 b'sparse-read.density-threshold',
1140 default=0.50,
1140 default=0.50,
1141 )
1141 )
1142 coreconfigitem(
1142 coreconfigitem(
1143 b'experimental',
1143 b'experimental',
1144 b'sparse-read.min-gap-size',
1144 b'sparse-read.min-gap-size',
1145 default=b'65K',
1145 default=b'65K',
1146 )
1146 )
1147 coreconfigitem(
1147 coreconfigitem(
1148 b'experimental',
1148 b'experimental',
1149 b'treemanifest',
1149 b'treemanifest',
1150 default=False,
1150 default=False,
1151 )
1151 )
1152 coreconfigitem(
1152 coreconfigitem(
1153 b'experimental',
1153 b'experimental',
1154 b'update.atomic-file',
1154 b'update.atomic-file',
1155 default=False,
1155 default=False,
1156 )
1156 )
1157 coreconfigitem(
1157 coreconfigitem(
1158 b'experimental',
1158 b'experimental',
1159 b'sshpeer.advertise-v2',
1159 b'sshpeer.advertise-v2',
1160 default=False,
1160 default=False,
1161 )
1161 )
1162 coreconfigitem(
1162 coreconfigitem(
1163 b'experimental',
1163 b'experimental',
1164 b'web.apiserver',
1164 b'web.apiserver',
1165 default=False,
1165 default=False,
1166 )
1166 )
1167 coreconfigitem(
1167 coreconfigitem(
1168 b'experimental',
1168 b'experimental',
1169 b'web.api.http-v2',
1169 b'web.api.http-v2',
1170 default=False,
1170 default=False,
1171 )
1171 )
1172 coreconfigitem(
1172 coreconfigitem(
1173 b'experimental',
1173 b'experimental',
1174 b'web.api.debugreflect',
1174 b'web.api.debugreflect',
1175 default=False,
1175 default=False,
1176 )
1176 )
1177 coreconfigitem(
1177 coreconfigitem(
1178 b'experimental',
1178 b'experimental',
1179 b'worker.wdir-get-thread-safe',
1179 b'worker.wdir-get-thread-safe',
1180 default=False,
1180 default=False,
1181 )
1181 )
1182 coreconfigitem(
1182 coreconfigitem(
1183 b'experimental',
1183 b'experimental',
1184 b'worker.repository-upgrade',
1184 b'worker.repository-upgrade',
1185 default=False,
1185 default=False,
1186 )
1186 )
1187 coreconfigitem(
1187 coreconfigitem(
1188 b'experimental',
1188 b'experimental',
1189 b'xdiff',
1189 b'xdiff',
1190 default=False,
1190 default=False,
1191 )
1191 )
1192 coreconfigitem(
1192 coreconfigitem(
1193 b'extensions',
1193 b'extensions',
1194 b'.*',
1194 b'.*',
1195 default=None,
1195 default=None,
1196 generic=True,
1196 generic=True,
1197 )
1197 )
1198 coreconfigitem(
1198 coreconfigitem(
1199 b'extdata',
1199 b'extdata',
1200 b'.*',
1200 b'.*',
1201 default=None,
1201 default=None,
1202 generic=True,
1202 generic=True,
1203 )
1203 )
1204 coreconfigitem(
1204 coreconfigitem(
1205 b'format',
1205 b'format',
1206 b'bookmarks-in-store',
1206 b'bookmarks-in-store',
1207 default=False,
1207 default=False,
1208 )
1208 )
1209 coreconfigitem(
1209 coreconfigitem(
1210 b'format',
1210 b'format',
1211 b'chunkcachesize',
1211 b'chunkcachesize',
1212 default=None,
1212 default=None,
1213 experimental=True,
1213 experimental=True,
1214 )
1214 )
1215 coreconfigitem(
1215 coreconfigitem(
1216 b'format',
1216 b'format',
1217 b'dotencode',
1217 b'dotencode',
1218 default=True,
1218 default=True,
1219 )
1219 )
1220 coreconfigitem(
1220 coreconfigitem(
1221 b'format',
1221 b'format',
1222 b'generaldelta',
1222 b'generaldelta',
1223 default=False,
1223 default=False,
1224 experimental=True,
1224 experimental=True,
1225 )
1225 )
1226 coreconfigitem(
1226 coreconfigitem(
1227 b'format',
1227 b'format',
1228 b'manifestcachesize',
1228 b'manifestcachesize',
1229 default=None,
1229 default=None,
1230 experimental=True,
1230 experimental=True,
1231 )
1231 )
1232 coreconfigitem(
1232 coreconfigitem(
1233 b'format',
1233 b'format',
1234 b'maxchainlen',
1234 b'maxchainlen',
1235 default=dynamicdefault,
1235 default=dynamicdefault,
1236 experimental=True,
1236 experimental=True,
1237 )
1237 )
1238 coreconfigitem(
1238 coreconfigitem(
1239 b'format',
1239 b'format',
1240 b'obsstore-version',
1240 b'obsstore-version',
1241 default=None,
1241 default=None,
1242 )
1242 )
1243 coreconfigitem(
1243 coreconfigitem(
1244 b'format',
1244 b'format',
1245 b'sparse-revlog',
1245 b'sparse-revlog',
1246 default=True,
1246 default=True,
1247 )
1247 )
1248 coreconfigitem(
1248 coreconfigitem(
1249 b'format',
1249 b'format',
1250 b'revlog-compression',
1250 b'revlog-compression',
1251 default=lambda: [b'zlib'],
1251 default=lambda: [b'zlib'],
1252 alias=[(b'experimental', b'format.compression')],
1252 alias=[(b'experimental', b'format.compression')],
1253 )
1253 )
1254 coreconfigitem(
1254 coreconfigitem(
1255 b'format',
1255 b'format',
1256 b'usefncache',
1256 b'usefncache',
1257 default=True,
1257 default=True,
1258 )
1258 )
1259 coreconfigitem(
1259 coreconfigitem(
1260 b'format',
1260 b'format',
1261 b'usegeneraldelta',
1261 b'usegeneraldelta',
1262 default=True,
1262 default=True,
1263 )
1263 )
1264 coreconfigitem(
1264 coreconfigitem(
1265 b'format',
1265 b'format',
1266 b'usestore',
1266 b'usestore',
1267 default=True,
1267 default=True,
1268 )
1268 )
1269 # Right now, the only efficient implement of the nodemap logic is in Rust,
1269 # Right now, the only efficient implement of the nodemap logic is in Rust,
1270 #
1270 #
1271 # The case was discussed that the 5.6 sprint and the following was decided for
1271 # The case was discussed that the 5.6 sprint and the following was decided for
1272 # feature that have an optional fast implementation (and are a performance
1272 # feature that have an optional fast implementation (and are a performance
1273 # regression in the others)
1273 # regression in the others)
1274 #
1274 #
1275 # * If the fast implementation is not available, Mercurial will refuse to
1275 # * If the fast implementation is not available, Mercurial will refuse to
1276 # access repository that requires it. Pointing to proper documentation
1276 # access repository that requires it. Pointing to proper documentation
1277 #
1277 #
1278 # * An option exist to lift that limitation and allow repository access.
1278 # * An option exist to lift that limitation and allow repository access.
1279 #
1279 #
1280 # Such access will emit a warning unless configured not to.
1280 # Such access will emit a warning unless configured not to.
1281 #
1281 #
1282 # * When sufficiently mature, the feature can be enabled by default only for
1282 # * When sufficiently mature, the feature can be enabled by default only for
1283 # installation that supports it.
1283 # installation that supports it.
1284 coreconfigitem(
1284 coreconfigitem(
1285 b'format', b'use-persistent-nodemap', default=False, experimental=True
1285 b'format', b'use-persistent-nodemap', default=False, experimental=True
1286 )
1286 )
1287 coreconfigitem(
1287 coreconfigitem(
1288 b'format',
1288 b'format',
1289 b'exp-use-copies-side-data-changeset',
1289 b'exp-use-copies-side-data-changeset',
1290 default=False,
1290 default=False,
1291 experimental=True,
1291 experimental=True,
1292 )
1292 )
1293 coreconfigitem(
1293 coreconfigitem(
1294 b'format',
1294 b'format',
1295 b'exp-use-side-data',
1295 b'exp-use-side-data',
1296 default=False,
1296 default=False,
1297 experimental=True,
1297 experimental=True,
1298 )
1298 )
1299 coreconfigitem(
1299 coreconfigitem(
1300 b'format',
1300 b'format',
1301 b'exp-share-safe',
1301 b'exp-share-safe',
1302 default=False,
1302 default=False,
1303 experimental=True,
1303 experimental=True,
1304 )
1304 )
1305 coreconfigitem(
1305 coreconfigitem(
1306 b'format',
1306 b'format',
1307 b'internal-phase',
1307 b'internal-phase',
1308 default=False,
1308 default=False,
1309 experimental=True,
1309 experimental=True,
1310 )
1310 )
1311 coreconfigitem(
1311 coreconfigitem(
1312 b'fsmonitor',
1312 b'fsmonitor',
1313 b'warn_when_unused',
1313 b'warn_when_unused',
1314 default=True,
1314 default=True,
1315 )
1315 )
1316 coreconfigitem(
1316 coreconfigitem(
1317 b'fsmonitor',
1317 b'fsmonitor',
1318 b'warn_update_file_count',
1318 b'warn_update_file_count',
1319 default=50000,
1319 default=50000,
1320 )
1320 )
1321 coreconfigitem(
1321 coreconfigitem(
1322 b'fsmonitor',
1322 b'fsmonitor',
1323 b'warn_update_file_count_rust',
1323 b'warn_update_file_count_rust',
1324 default=400000,
1324 default=400000,
1325 )
1325 )
1326 coreconfigitem(
1326 coreconfigitem(
1327 b'help',
1327 b'help',
1328 br'hidden-command\..*',
1328 br'hidden-command\..*',
1329 default=False,
1329 default=False,
1330 generic=True,
1330 generic=True,
1331 )
1331 )
1332 coreconfigitem(
1332 coreconfigitem(
1333 b'help',
1333 b'help',
1334 br'hidden-topic\..*',
1334 br'hidden-topic\..*',
1335 default=False,
1335 default=False,
1336 generic=True,
1336 generic=True,
1337 )
1337 )
1338 coreconfigitem(
1338 coreconfigitem(
1339 b'hooks',
1339 b'hooks',
1340 b'.*',
1340 b'.*',
1341 default=dynamicdefault,
1341 default=dynamicdefault,
1342 generic=True,
1342 generic=True,
1343 )
1343 )
1344 coreconfigitem(
1344 coreconfigitem(
1345 b'hgweb-paths',
1345 b'hgweb-paths',
1346 b'.*',
1346 b'.*',
1347 default=list,
1347 default=list,
1348 generic=True,
1348 generic=True,
1349 )
1349 )
1350 coreconfigitem(
1350 coreconfigitem(
1351 b'hostfingerprints',
1351 b'hostfingerprints',
1352 b'.*',
1352 b'.*',
1353 default=list,
1353 default=list,
1354 generic=True,
1354 generic=True,
1355 )
1355 )
1356 coreconfigitem(
1356 coreconfigitem(
1357 b'hostsecurity',
1357 b'hostsecurity',
1358 b'ciphers',
1358 b'ciphers',
1359 default=None,
1359 default=None,
1360 )
1360 )
1361 coreconfigitem(
1361 coreconfigitem(
1362 b'hostsecurity',
1362 b'hostsecurity',
1363 b'minimumprotocol',
1363 b'minimumprotocol',
1364 default=dynamicdefault,
1364 default=dynamicdefault,
1365 )
1365 )
1366 coreconfigitem(
1366 coreconfigitem(
1367 b'hostsecurity',
1367 b'hostsecurity',
1368 b'.*:minimumprotocol$',
1368 b'.*:minimumprotocol$',
1369 default=dynamicdefault,
1369 default=dynamicdefault,
1370 generic=True,
1370 generic=True,
1371 )
1371 )
1372 coreconfigitem(
1372 coreconfigitem(
1373 b'hostsecurity',
1373 b'hostsecurity',
1374 b'.*:ciphers$',
1374 b'.*:ciphers$',
1375 default=dynamicdefault,
1375 default=dynamicdefault,
1376 generic=True,
1376 generic=True,
1377 )
1377 )
1378 coreconfigitem(
1378 coreconfigitem(
1379 b'hostsecurity',
1379 b'hostsecurity',
1380 b'.*:fingerprints$',
1380 b'.*:fingerprints$',
1381 default=list,
1381 default=list,
1382 generic=True,
1382 generic=True,
1383 )
1383 )
1384 coreconfigitem(
1384 coreconfigitem(
1385 b'hostsecurity',
1385 b'hostsecurity',
1386 b'.*:verifycertsfile$',
1386 b'.*:verifycertsfile$',
1387 default=None,
1387 default=None,
1388 generic=True,
1388 generic=True,
1389 )
1389 )
1390
1390
1391 coreconfigitem(
1391 coreconfigitem(
1392 b'http_proxy',
1392 b'http_proxy',
1393 b'always',
1393 b'always',
1394 default=False,
1394 default=False,
1395 )
1395 )
1396 coreconfigitem(
1396 coreconfigitem(
1397 b'http_proxy',
1397 b'http_proxy',
1398 b'host',
1398 b'host',
1399 default=None,
1399 default=None,
1400 )
1400 )
1401 coreconfigitem(
1401 coreconfigitem(
1402 b'http_proxy',
1402 b'http_proxy',
1403 b'no',
1403 b'no',
1404 default=list,
1404 default=list,
1405 )
1405 )
1406 coreconfigitem(
1406 coreconfigitem(
1407 b'http_proxy',
1407 b'http_proxy',
1408 b'passwd',
1408 b'passwd',
1409 default=None,
1409 default=None,
1410 )
1410 )
1411 coreconfigitem(
1411 coreconfigitem(
1412 b'http_proxy',
1412 b'http_proxy',
1413 b'user',
1413 b'user',
1414 default=None,
1414 default=None,
1415 )
1415 )
1416
1416
1417 coreconfigitem(
1417 coreconfigitem(
1418 b'http',
1418 b'http',
1419 b'timeout',
1419 b'timeout',
1420 default=None,
1420 default=None,
1421 )
1421 )
1422
1422
1423 coreconfigitem(
1423 coreconfigitem(
1424 b'logtoprocess',
1424 b'logtoprocess',
1425 b'commandexception',
1425 b'commandexception',
1426 default=None,
1426 default=None,
1427 )
1427 )
1428 coreconfigitem(
1428 coreconfigitem(
1429 b'logtoprocess',
1429 b'logtoprocess',
1430 b'commandfinish',
1430 b'commandfinish',
1431 default=None,
1431 default=None,
1432 )
1432 )
1433 coreconfigitem(
1433 coreconfigitem(
1434 b'logtoprocess',
1434 b'logtoprocess',
1435 b'command',
1435 b'command',
1436 default=None,
1436 default=None,
1437 )
1437 )
1438 coreconfigitem(
1438 coreconfigitem(
1439 b'logtoprocess',
1439 b'logtoprocess',
1440 b'develwarn',
1440 b'develwarn',
1441 default=None,
1441 default=None,
1442 )
1442 )
1443 coreconfigitem(
1443 coreconfigitem(
1444 b'logtoprocess',
1444 b'logtoprocess',
1445 b'uiblocked',
1445 b'uiblocked',
1446 default=None,
1446 default=None,
1447 )
1447 )
1448 coreconfigitem(
1448 coreconfigitem(
1449 b'merge',
1449 b'merge',
1450 b'checkunknown',
1450 b'checkunknown',
1451 default=b'abort',
1451 default=b'abort',
1452 )
1452 )
1453 coreconfigitem(
1453 coreconfigitem(
1454 b'merge',
1454 b'merge',
1455 b'checkignored',
1455 b'checkignored',
1456 default=b'abort',
1456 default=b'abort',
1457 )
1457 )
1458 coreconfigitem(
1458 coreconfigitem(
1459 b'experimental',
1459 b'experimental',
1460 b'merge.checkpathconflicts',
1460 b'merge.checkpathconflicts',
1461 default=False,
1461 default=False,
1462 )
1462 )
1463 coreconfigitem(
1463 coreconfigitem(
1464 b'merge',
1464 b'merge',
1465 b'followcopies',
1465 b'followcopies',
1466 default=True,
1466 default=True,
1467 )
1467 )
1468 coreconfigitem(
1468 coreconfigitem(
1469 b'merge',
1469 b'merge',
1470 b'on-failure',
1470 b'on-failure',
1471 default=b'continue',
1471 default=b'continue',
1472 )
1472 )
1473 coreconfigitem(
1473 coreconfigitem(
1474 b'merge',
1474 b'merge',
1475 b'preferancestor',
1475 b'preferancestor',
1476 default=lambda: [b'*'],
1476 default=lambda: [b'*'],
1477 experimental=True,
1477 experimental=True,
1478 )
1478 )
1479 coreconfigitem(
1479 coreconfigitem(
1480 b'merge',
1480 b'merge',
1481 b'strict-capability-check',
1481 b'strict-capability-check',
1482 default=False,
1482 default=False,
1483 )
1483 )
1484 coreconfigitem(
1484 coreconfigitem(
1485 b'merge-tools',
1485 b'merge-tools',
1486 b'.*',
1486 b'.*',
1487 default=None,
1487 default=None,
1488 generic=True,
1488 generic=True,
1489 )
1489 )
1490 coreconfigitem(
1490 coreconfigitem(
1491 b'merge-tools',
1491 b'merge-tools',
1492 br'.*\.args$',
1492 br'.*\.args$',
1493 default=b"$local $base $other",
1493 default=b"$local $base $other",
1494 generic=True,
1494 generic=True,
1495 priority=-1,
1495 priority=-1,
1496 )
1496 )
1497 coreconfigitem(
1497 coreconfigitem(
1498 b'merge-tools',
1498 b'merge-tools',
1499 br'.*\.binary$',
1499 br'.*\.binary$',
1500 default=False,
1500 default=False,
1501 generic=True,
1501 generic=True,
1502 priority=-1,
1502 priority=-1,
1503 )
1503 )
1504 coreconfigitem(
1504 coreconfigitem(
1505 b'merge-tools',
1505 b'merge-tools',
1506 br'.*\.check$',
1506 br'.*\.check$',
1507 default=list,
1507 default=list,
1508 generic=True,
1508 generic=True,
1509 priority=-1,
1509 priority=-1,
1510 )
1510 )
1511 coreconfigitem(
1511 coreconfigitem(
1512 b'merge-tools',
1512 b'merge-tools',
1513 br'.*\.checkchanged$',
1513 br'.*\.checkchanged$',
1514 default=False,
1514 default=False,
1515 generic=True,
1515 generic=True,
1516 priority=-1,
1516 priority=-1,
1517 )
1517 )
1518 coreconfigitem(
1518 coreconfigitem(
1519 b'merge-tools',
1519 b'merge-tools',
1520 br'.*\.executable$',
1520 br'.*\.executable$',
1521 default=dynamicdefault,
1521 default=dynamicdefault,
1522 generic=True,
1522 generic=True,
1523 priority=-1,
1523 priority=-1,
1524 )
1524 )
1525 coreconfigitem(
1525 coreconfigitem(
1526 b'merge-tools',
1526 b'merge-tools',
1527 br'.*\.fixeol$',
1527 br'.*\.fixeol$',
1528 default=False,
1528 default=False,
1529 generic=True,
1529 generic=True,
1530 priority=-1,
1530 priority=-1,
1531 )
1531 )
1532 coreconfigitem(
1532 coreconfigitem(
1533 b'merge-tools',
1533 b'merge-tools',
1534 br'.*\.gui$',
1534 br'.*\.gui$',
1535 default=False,
1535 default=False,
1536 generic=True,
1536 generic=True,
1537 priority=-1,
1537 priority=-1,
1538 )
1538 )
1539 coreconfigitem(
1539 coreconfigitem(
1540 b'merge-tools',
1540 b'merge-tools',
1541 br'.*\.mergemarkers$',
1541 br'.*\.mergemarkers$',
1542 default=b'basic',
1542 default=b'basic',
1543 generic=True,
1543 generic=True,
1544 priority=-1,
1544 priority=-1,
1545 )
1545 )
1546 coreconfigitem(
1546 coreconfigitem(
1547 b'merge-tools',
1547 b'merge-tools',
1548 br'.*\.mergemarkertemplate$',
1548 br'.*\.mergemarkertemplate$',
1549 default=dynamicdefault, # take from command-templates.mergemarker
1549 default=dynamicdefault, # take from command-templates.mergemarker
1550 generic=True,
1550 generic=True,
1551 priority=-1,
1551 priority=-1,
1552 )
1552 )
1553 coreconfigitem(
1553 coreconfigitem(
1554 b'merge-tools',
1554 b'merge-tools',
1555 br'.*\.priority$',
1555 br'.*\.priority$',
1556 default=0,
1556 default=0,
1557 generic=True,
1557 generic=True,
1558 priority=-1,
1558 priority=-1,
1559 )
1559 )
1560 coreconfigitem(
1560 coreconfigitem(
1561 b'merge-tools',
1561 b'merge-tools',
1562 br'.*\.premerge$',
1562 br'.*\.premerge$',
1563 default=dynamicdefault,
1563 default=dynamicdefault,
1564 generic=True,
1564 generic=True,
1565 priority=-1,
1565 priority=-1,
1566 )
1566 )
1567 coreconfigitem(
1567 coreconfigitem(
1568 b'merge-tools',
1568 b'merge-tools',
1569 br'.*\.symlink$',
1569 br'.*\.symlink$',
1570 default=False,
1570 default=False,
1571 generic=True,
1571 generic=True,
1572 priority=-1,
1572 priority=-1,
1573 )
1573 )
1574 coreconfigitem(
1574 coreconfigitem(
1575 b'pager',
1575 b'pager',
1576 b'attend-.*',
1576 b'attend-.*',
1577 default=dynamicdefault,
1577 default=dynamicdefault,
1578 generic=True,
1578 generic=True,
1579 )
1579 )
1580 coreconfigitem(
1580 coreconfigitem(
1581 b'pager',
1581 b'pager',
1582 b'ignore',
1582 b'ignore',
1583 default=list,
1583 default=list,
1584 )
1584 )
1585 coreconfigitem(
1585 coreconfigitem(
1586 b'pager',
1586 b'pager',
1587 b'pager',
1587 b'pager',
1588 default=dynamicdefault,
1588 default=dynamicdefault,
1589 )
1589 )
1590 coreconfigitem(
1590 coreconfigitem(
1591 b'patch',
1591 b'patch',
1592 b'eol',
1592 b'eol',
1593 default=b'strict',
1593 default=b'strict',
1594 )
1594 )
1595 coreconfigitem(
1595 coreconfigitem(
1596 b'patch',
1596 b'patch',
1597 b'fuzz',
1597 b'fuzz',
1598 default=2,
1598 default=2,
1599 )
1599 )
1600 coreconfigitem(
1600 coreconfigitem(
1601 b'paths',
1601 b'paths',
1602 b'default',
1602 b'default',
1603 default=None,
1603 default=None,
1604 )
1604 )
1605 coreconfigitem(
1605 coreconfigitem(
1606 b'paths',
1606 b'paths',
1607 b'default-push',
1607 b'default-push',
1608 default=None,
1608 default=None,
1609 )
1609 )
1610 coreconfigitem(
1610 coreconfigitem(
1611 b'paths',
1611 b'paths',
1612 b'.*',
1612 b'.*',
1613 default=None,
1613 default=None,
1614 generic=True,
1614 generic=True,
1615 )
1615 )
1616 coreconfigitem(
1616 coreconfigitem(
1617 b'phases',
1617 b'phases',
1618 b'checksubrepos',
1618 b'checksubrepos',
1619 default=b'follow',
1619 default=b'follow',
1620 )
1620 )
1621 coreconfigitem(
1621 coreconfigitem(
1622 b'phases',
1622 b'phases',
1623 b'new-commit',
1623 b'new-commit',
1624 default=b'draft',
1624 default=b'draft',
1625 )
1625 )
1626 coreconfigitem(
1626 coreconfigitem(
1627 b'phases',
1627 b'phases',
1628 b'publish',
1628 b'publish',
1629 default=True,
1629 default=True,
1630 )
1630 )
1631 coreconfigitem(
1631 coreconfigitem(
1632 b'profiling',
1632 b'profiling',
1633 b'enabled',
1633 b'enabled',
1634 default=False,
1634 default=False,
1635 )
1635 )
1636 coreconfigitem(
1636 coreconfigitem(
1637 b'profiling',
1637 b'profiling',
1638 b'format',
1638 b'format',
1639 default=b'text',
1639 default=b'text',
1640 )
1640 )
1641 coreconfigitem(
1641 coreconfigitem(
1642 b'profiling',
1642 b'profiling',
1643 b'freq',
1643 b'freq',
1644 default=1000,
1644 default=1000,
1645 )
1645 )
1646 coreconfigitem(
1646 coreconfigitem(
1647 b'profiling',
1647 b'profiling',
1648 b'limit',
1648 b'limit',
1649 default=30,
1649 default=30,
1650 )
1650 )
1651 coreconfigitem(
1651 coreconfigitem(
1652 b'profiling',
1652 b'profiling',
1653 b'nested',
1653 b'nested',
1654 default=0,
1654 default=0,
1655 )
1655 )
1656 coreconfigitem(
1656 coreconfigitem(
1657 b'profiling',
1657 b'profiling',
1658 b'output',
1658 b'output',
1659 default=None,
1659 default=None,
1660 )
1660 )
1661 coreconfigitem(
1661 coreconfigitem(
1662 b'profiling',
1662 b'profiling',
1663 b'showmax',
1663 b'showmax',
1664 default=0.999,
1664 default=0.999,
1665 )
1665 )
1666 coreconfigitem(
1666 coreconfigitem(
1667 b'profiling',
1667 b'profiling',
1668 b'showmin',
1668 b'showmin',
1669 default=dynamicdefault,
1669 default=dynamicdefault,
1670 )
1670 )
1671 coreconfigitem(
1671 coreconfigitem(
1672 b'profiling',
1672 b'profiling',
1673 b'showtime',
1673 b'showtime',
1674 default=True,
1674 default=True,
1675 )
1675 )
1676 coreconfigitem(
1676 coreconfigitem(
1677 b'profiling',
1677 b'profiling',
1678 b'sort',
1678 b'sort',
1679 default=b'inlinetime',
1679 default=b'inlinetime',
1680 )
1680 )
1681 coreconfigitem(
1681 coreconfigitem(
1682 b'profiling',
1682 b'profiling',
1683 b'statformat',
1683 b'statformat',
1684 default=b'hotpath',
1684 default=b'hotpath',
1685 )
1685 )
1686 coreconfigitem(
1686 coreconfigitem(
1687 b'profiling',
1687 b'profiling',
1688 b'time-track',
1688 b'time-track',
1689 default=dynamicdefault,
1689 default=dynamicdefault,
1690 )
1690 )
1691 coreconfigitem(
1691 coreconfigitem(
1692 b'profiling',
1692 b'profiling',
1693 b'type',
1693 b'type',
1694 default=b'stat',
1694 default=b'stat',
1695 )
1695 )
1696 coreconfigitem(
1696 coreconfigitem(
1697 b'progress',
1697 b'progress',
1698 b'assume-tty',
1698 b'assume-tty',
1699 default=False,
1699 default=False,
1700 )
1700 )
1701 coreconfigitem(
1701 coreconfigitem(
1702 b'progress',
1702 b'progress',
1703 b'changedelay',
1703 b'changedelay',
1704 default=1,
1704 default=1,
1705 )
1705 )
1706 coreconfigitem(
1706 coreconfigitem(
1707 b'progress',
1707 b'progress',
1708 b'clear-complete',
1708 b'clear-complete',
1709 default=True,
1709 default=True,
1710 )
1710 )
1711 coreconfigitem(
1711 coreconfigitem(
1712 b'progress',
1712 b'progress',
1713 b'debug',
1713 b'debug',
1714 default=False,
1714 default=False,
1715 )
1715 )
1716 coreconfigitem(
1716 coreconfigitem(
1717 b'progress',
1717 b'progress',
1718 b'delay',
1718 b'delay',
1719 default=3,
1719 default=3,
1720 )
1720 )
1721 coreconfigitem(
1721 coreconfigitem(
1722 b'progress',
1722 b'progress',
1723 b'disable',
1723 b'disable',
1724 default=False,
1724 default=False,
1725 )
1725 )
1726 coreconfigitem(
1726 coreconfigitem(
1727 b'progress',
1727 b'progress',
1728 b'estimateinterval',
1728 b'estimateinterval',
1729 default=60.0,
1729 default=60.0,
1730 )
1730 )
1731 coreconfigitem(
1731 coreconfigitem(
1732 b'progress',
1732 b'progress',
1733 b'format',
1733 b'format',
1734 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1734 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1735 )
1735 )
1736 coreconfigitem(
1736 coreconfigitem(
1737 b'progress',
1737 b'progress',
1738 b'refresh',
1738 b'refresh',
1739 default=0.1,
1739 default=0.1,
1740 )
1740 )
1741 coreconfigitem(
1741 coreconfigitem(
1742 b'progress',
1742 b'progress',
1743 b'width',
1743 b'width',
1744 default=dynamicdefault,
1744 default=dynamicdefault,
1745 )
1745 )
1746 coreconfigitem(
1746 coreconfigitem(
1747 b'pull',
1747 b'pull',
1748 b'confirm',
1748 b'confirm',
1749 default=False,
1749 default=False,
1750 )
1750 )
1751 coreconfigitem(
1751 coreconfigitem(
1752 b'push',
1752 b'push',
1753 b'pushvars.server',
1753 b'pushvars.server',
1754 default=False,
1754 default=False,
1755 )
1755 )
1756 coreconfigitem(
1756 coreconfigitem(
1757 b'rewrite',
1757 b'rewrite',
1758 b'backup-bundle',
1758 b'backup-bundle',
1759 default=True,
1759 default=True,
1760 alias=[(b'ui', b'history-editing-backup')],
1760 alias=[(b'ui', b'history-editing-backup')],
1761 )
1761 )
1762 coreconfigitem(
1762 coreconfigitem(
1763 b'rewrite',
1763 b'rewrite',
1764 b'update-timestamp',
1764 b'update-timestamp',
1765 default=False,
1765 default=False,
1766 )
1766 )
1767 coreconfigitem(
1767 coreconfigitem(
1768 b'rewrite',
1768 b'rewrite',
1769 b'empty-successor',
1769 b'empty-successor',
1770 default=b'skip',
1770 default=b'skip',
1771 experimental=True,
1771 experimental=True,
1772 )
1772 )
1773 coreconfigitem(
1773 coreconfigitem(
1774 b'storage',
1774 b'storage',
1775 b'new-repo-backend',
1775 b'new-repo-backend',
1776 default=b'revlogv1',
1776 default=b'revlogv1',
1777 experimental=True,
1777 experimental=True,
1778 )
1778 )
1779 coreconfigitem(
1779 coreconfigitem(
1780 b'storage',
1780 b'storage',
1781 b'revlog.optimize-delta-parent-choice',
1781 b'revlog.optimize-delta-parent-choice',
1782 default=True,
1782 default=True,
1783 alias=[(b'format', b'aggressivemergedeltas')],
1783 alias=[(b'format', b'aggressivemergedeltas')],
1784 )
1784 )
1785 # experimental as long as rust is experimental (or a C version is implemented)
1785 # experimental as long as rust is experimental (or a C version is implemented)
1786 coreconfigitem(
1786 coreconfigitem(
1787 b'storage',
1787 b'storage',
1788 b'revlog.persistent-nodemap.mmap',
1788 b'revlog.persistent-nodemap.mmap',
1789 default=True,
1789 default=True,
1790 experimental=True,
1790 experimental=True,
1791 )
1791 )
1792 # experimental as long as format.use-persistent-nodemap is.
1792 # experimental as long as format.use-persistent-nodemap is.
1793 coreconfigitem(
1793 coreconfigitem(
1794 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1795 )
1796 # experimental as long as format.use-persistent-nodemap is.
1797 coreconfigitem(
1798 b'storage',
1794 b'storage',
1799 b'revlog.persistent-nodemap.slow-path',
1795 b'revlog.persistent-nodemap.slow-path',
1800 default=b"abort",
1796 default=b"abort",
1801 experimental=True,
1797 experimental=True,
1802 )
1798 )
1803
1799
1804 coreconfigitem(
1800 coreconfigitem(
1805 b'storage',
1801 b'storage',
1806 b'revlog.reuse-external-delta',
1802 b'revlog.reuse-external-delta',
1807 default=True,
1803 default=True,
1808 )
1804 )
1809 coreconfigitem(
1805 coreconfigitem(
1810 b'storage',
1806 b'storage',
1811 b'revlog.reuse-external-delta-parent',
1807 b'revlog.reuse-external-delta-parent',
1812 default=None,
1808 default=None,
1813 )
1809 )
1814 coreconfigitem(
1810 coreconfigitem(
1815 b'storage',
1811 b'storage',
1816 b'revlog.zlib.level',
1812 b'revlog.zlib.level',
1817 default=None,
1813 default=None,
1818 )
1814 )
1819 coreconfigitem(
1815 coreconfigitem(
1820 b'storage',
1816 b'storage',
1821 b'revlog.zstd.level',
1817 b'revlog.zstd.level',
1822 default=None,
1818 default=None,
1823 )
1819 )
1824 coreconfigitem(
1820 coreconfigitem(
1825 b'server',
1821 b'server',
1826 b'bookmarks-pushkey-compat',
1822 b'bookmarks-pushkey-compat',
1827 default=True,
1823 default=True,
1828 )
1824 )
1829 coreconfigitem(
1825 coreconfigitem(
1830 b'server',
1826 b'server',
1831 b'bundle1',
1827 b'bundle1',
1832 default=True,
1828 default=True,
1833 )
1829 )
1834 coreconfigitem(
1830 coreconfigitem(
1835 b'server',
1831 b'server',
1836 b'bundle1gd',
1832 b'bundle1gd',
1837 default=None,
1833 default=None,
1838 )
1834 )
1839 coreconfigitem(
1835 coreconfigitem(
1840 b'server',
1836 b'server',
1841 b'bundle1.pull',
1837 b'bundle1.pull',
1842 default=None,
1838 default=None,
1843 )
1839 )
1844 coreconfigitem(
1840 coreconfigitem(
1845 b'server',
1841 b'server',
1846 b'bundle1gd.pull',
1842 b'bundle1gd.pull',
1847 default=None,
1843 default=None,
1848 )
1844 )
1849 coreconfigitem(
1845 coreconfigitem(
1850 b'server',
1846 b'server',
1851 b'bundle1.push',
1847 b'bundle1.push',
1852 default=None,
1848 default=None,
1853 )
1849 )
1854 coreconfigitem(
1850 coreconfigitem(
1855 b'server',
1851 b'server',
1856 b'bundle1gd.push',
1852 b'bundle1gd.push',
1857 default=None,
1853 default=None,
1858 )
1854 )
1859 coreconfigitem(
1855 coreconfigitem(
1860 b'server',
1856 b'server',
1861 b'bundle2.stream',
1857 b'bundle2.stream',
1862 default=True,
1858 default=True,
1863 alias=[(b'experimental', b'bundle2.stream')],
1859 alias=[(b'experimental', b'bundle2.stream')],
1864 )
1860 )
1865 coreconfigitem(
1861 coreconfigitem(
1866 b'server',
1862 b'server',
1867 b'compressionengines',
1863 b'compressionengines',
1868 default=list,
1864 default=list,
1869 )
1865 )
1870 coreconfigitem(
1866 coreconfigitem(
1871 b'server',
1867 b'server',
1872 b'concurrent-push-mode',
1868 b'concurrent-push-mode',
1873 default=b'check-related',
1869 default=b'check-related',
1874 )
1870 )
1875 coreconfigitem(
1871 coreconfigitem(
1876 b'server',
1872 b'server',
1877 b'disablefullbundle',
1873 b'disablefullbundle',
1878 default=False,
1874 default=False,
1879 )
1875 )
1880 coreconfigitem(
1876 coreconfigitem(
1881 b'server',
1877 b'server',
1882 b'maxhttpheaderlen',
1878 b'maxhttpheaderlen',
1883 default=1024,
1879 default=1024,
1884 )
1880 )
1885 coreconfigitem(
1881 coreconfigitem(
1886 b'server',
1882 b'server',
1887 b'pullbundle',
1883 b'pullbundle',
1888 default=False,
1884 default=False,
1889 )
1885 )
1890 coreconfigitem(
1886 coreconfigitem(
1891 b'server',
1887 b'server',
1892 b'preferuncompressed',
1888 b'preferuncompressed',
1893 default=False,
1889 default=False,
1894 )
1890 )
1895 coreconfigitem(
1891 coreconfigitem(
1896 b'server',
1892 b'server',
1897 b'streamunbundle',
1893 b'streamunbundle',
1898 default=False,
1894 default=False,
1899 )
1895 )
1900 coreconfigitem(
1896 coreconfigitem(
1901 b'server',
1897 b'server',
1902 b'uncompressed',
1898 b'uncompressed',
1903 default=True,
1899 default=True,
1904 )
1900 )
1905 coreconfigitem(
1901 coreconfigitem(
1906 b'server',
1902 b'server',
1907 b'uncompressedallowsecret',
1903 b'uncompressedallowsecret',
1908 default=False,
1904 default=False,
1909 )
1905 )
1910 coreconfigitem(
1906 coreconfigitem(
1911 b'server',
1907 b'server',
1912 b'view',
1908 b'view',
1913 default=b'served',
1909 default=b'served',
1914 )
1910 )
1915 coreconfigitem(
1911 coreconfigitem(
1916 b'server',
1912 b'server',
1917 b'validate',
1913 b'validate',
1918 default=False,
1914 default=False,
1919 )
1915 )
1920 coreconfigitem(
1916 coreconfigitem(
1921 b'server',
1917 b'server',
1922 b'zliblevel',
1918 b'zliblevel',
1923 default=-1,
1919 default=-1,
1924 )
1920 )
1925 coreconfigitem(
1921 coreconfigitem(
1926 b'server',
1922 b'server',
1927 b'zstdlevel',
1923 b'zstdlevel',
1928 default=3,
1924 default=3,
1929 )
1925 )
1930 coreconfigitem(
1926 coreconfigitem(
1931 b'share',
1927 b'share',
1932 b'pool',
1928 b'pool',
1933 default=None,
1929 default=None,
1934 )
1930 )
1935 coreconfigitem(
1931 coreconfigitem(
1936 b'share',
1932 b'share',
1937 b'poolnaming',
1933 b'poolnaming',
1938 default=b'identity',
1934 default=b'identity',
1939 )
1935 )
1940 coreconfigitem(
1936 coreconfigitem(
1941 b'shelve',
1937 b'shelve',
1942 b'maxbackups',
1938 b'maxbackups',
1943 default=10,
1939 default=10,
1944 )
1940 )
1945 coreconfigitem(
1941 coreconfigitem(
1946 b'smtp',
1942 b'smtp',
1947 b'host',
1943 b'host',
1948 default=None,
1944 default=None,
1949 )
1945 )
1950 coreconfigitem(
1946 coreconfigitem(
1951 b'smtp',
1947 b'smtp',
1952 b'local_hostname',
1948 b'local_hostname',
1953 default=None,
1949 default=None,
1954 )
1950 )
1955 coreconfigitem(
1951 coreconfigitem(
1956 b'smtp',
1952 b'smtp',
1957 b'password',
1953 b'password',
1958 default=None,
1954 default=None,
1959 )
1955 )
1960 coreconfigitem(
1956 coreconfigitem(
1961 b'smtp',
1957 b'smtp',
1962 b'port',
1958 b'port',
1963 default=dynamicdefault,
1959 default=dynamicdefault,
1964 )
1960 )
1965 coreconfigitem(
1961 coreconfigitem(
1966 b'smtp',
1962 b'smtp',
1967 b'tls',
1963 b'tls',
1968 default=b'none',
1964 default=b'none',
1969 )
1965 )
1970 coreconfigitem(
1966 coreconfigitem(
1971 b'smtp',
1967 b'smtp',
1972 b'username',
1968 b'username',
1973 default=None,
1969 default=None,
1974 )
1970 )
1975 coreconfigitem(
1971 coreconfigitem(
1976 b'sparse',
1972 b'sparse',
1977 b'missingwarning',
1973 b'missingwarning',
1978 default=True,
1974 default=True,
1979 experimental=True,
1975 experimental=True,
1980 )
1976 )
1981 coreconfigitem(
1977 coreconfigitem(
1982 b'subrepos',
1978 b'subrepos',
1983 b'allowed',
1979 b'allowed',
1984 default=dynamicdefault, # to make backporting simpler
1980 default=dynamicdefault, # to make backporting simpler
1985 )
1981 )
1986 coreconfigitem(
1982 coreconfigitem(
1987 b'subrepos',
1983 b'subrepos',
1988 b'hg:allowed',
1984 b'hg:allowed',
1989 default=dynamicdefault,
1985 default=dynamicdefault,
1990 )
1986 )
1991 coreconfigitem(
1987 coreconfigitem(
1992 b'subrepos',
1988 b'subrepos',
1993 b'git:allowed',
1989 b'git:allowed',
1994 default=dynamicdefault,
1990 default=dynamicdefault,
1995 )
1991 )
1996 coreconfigitem(
1992 coreconfigitem(
1997 b'subrepos',
1993 b'subrepos',
1998 b'svn:allowed',
1994 b'svn:allowed',
1999 default=dynamicdefault,
1995 default=dynamicdefault,
2000 )
1996 )
2001 coreconfigitem(
1997 coreconfigitem(
2002 b'templates',
1998 b'templates',
2003 b'.*',
1999 b'.*',
2004 default=None,
2000 default=None,
2005 generic=True,
2001 generic=True,
2006 )
2002 )
2007 coreconfigitem(
2003 coreconfigitem(
2008 b'templateconfig',
2004 b'templateconfig',
2009 b'.*',
2005 b'.*',
2010 default=dynamicdefault,
2006 default=dynamicdefault,
2011 generic=True,
2007 generic=True,
2012 )
2008 )
2013 coreconfigitem(
2009 coreconfigitem(
2014 b'trusted',
2010 b'trusted',
2015 b'groups',
2011 b'groups',
2016 default=list,
2012 default=list,
2017 )
2013 )
2018 coreconfigitem(
2014 coreconfigitem(
2019 b'trusted',
2015 b'trusted',
2020 b'users',
2016 b'users',
2021 default=list,
2017 default=list,
2022 )
2018 )
2023 coreconfigitem(
2019 coreconfigitem(
2024 b'ui',
2020 b'ui',
2025 b'_usedassubrepo',
2021 b'_usedassubrepo',
2026 default=False,
2022 default=False,
2027 )
2023 )
2028 coreconfigitem(
2024 coreconfigitem(
2029 b'ui',
2025 b'ui',
2030 b'allowemptycommit',
2026 b'allowemptycommit',
2031 default=False,
2027 default=False,
2032 )
2028 )
2033 coreconfigitem(
2029 coreconfigitem(
2034 b'ui',
2030 b'ui',
2035 b'archivemeta',
2031 b'archivemeta',
2036 default=True,
2032 default=True,
2037 )
2033 )
2038 coreconfigitem(
2034 coreconfigitem(
2039 b'ui',
2035 b'ui',
2040 b'askusername',
2036 b'askusername',
2041 default=False,
2037 default=False,
2042 )
2038 )
2043 coreconfigitem(
2039 coreconfigitem(
2044 b'ui',
2040 b'ui',
2045 b'available-memory',
2041 b'available-memory',
2046 default=None,
2042 default=None,
2047 )
2043 )
2048
2044
2049 coreconfigitem(
2045 coreconfigitem(
2050 b'ui',
2046 b'ui',
2051 b'clonebundlefallback',
2047 b'clonebundlefallback',
2052 default=False,
2048 default=False,
2053 )
2049 )
2054 coreconfigitem(
2050 coreconfigitem(
2055 b'ui',
2051 b'ui',
2056 b'clonebundleprefers',
2052 b'clonebundleprefers',
2057 default=list,
2053 default=list,
2058 )
2054 )
2059 coreconfigitem(
2055 coreconfigitem(
2060 b'ui',
2056 b'ui',
2061 b'clonebundles',
2057 b'clonebundles',
2062 default=True,
2058 default=True,
2063 )
2059 )
2064 coreconfigitem(
2060 coreconfigitem(
2065 b'ui',
2061 b'ui',
2066 b'color',
2062 b'color',
2067 default=b'auto',
2063 default=b'auto',
2068 )
2064 )
2069 coreconfigitem(
2065 coreconfigitem(
2070 b'ui',
2066 b'ui',
2071 b'commitsubrepos',
2067 b'commitsubrepos',
2072 default=False,
2068 default=False,
2073 )
2069 )
2074 coreconfigitem(
2070 coreconfigitem(
2075 b'ui',
2071 b'ui',
2076 b'debug',
2072 b'debug',
2077 default=False,
2073 default=False,
2078 )
2074 )
2079 coreconfigitem(
2075 coreconfigitem(
2080 b'ui',
2076 b'ui',
2081 b'debugger',
2077 b'debugger',
2082 default=None,
2078 default=None,
2083 )
2079 )
2084 coreconfigitem(
2080 coreconfigitem(
2085 b'ui',
2081 b'ui',
2086 b'editor',
2082 b'editor',
2087 default=dynamicdefault,
2083 default=dynamicdefault,
2088 )
2084 )
2089 coreconfigitem(
2085 coreconfigitem(
2090 b'ui',
2086 b'ui',
2091 b'detailed-exit-code',
2087 b'detailed-exit-code',
2092 default=False,
2088 default=False,
2093 experimental=True,
2089 experimental=True,
2094 )
2090 )
2095 coreconfigitem(
2091 coreconfigitem(
2096 b'ui',
2092 b'ui',
2097 b'fallbackencoding',
2093 b'fallbackencoding',
2098 default=None,
2094 default=None,
2099 )
2095 )
2100 coreconfigitem(
2096 coreconfigitem(
2101 b'ui',
2097 b'ui',
2102 b'forcecwd',
2098 b'forcecwd',
2103 default=None,
2099 default=None,
2104 )
2100 )
2105 coreconfigitem(
2101 coreconfigitem(
2106 b'ui',
2102 b'ui',
2107 b'forcemerge',
2103 b'forcemerge',
2108 default=None,
2104 default=None,
2109 )
2105 )
2110 coreconfigitem(
2106 coreconfigitem(
2111 b'ui',
2107 b'ui',
2112 b'formatdebug',
2108 b'formatdebug',
2113 default=False,
2109 default=False,
2114 )
2110 )
2115 coreconfigitem(
2111 coreconfigitem(
2116 b'ui',
2112 b'ui',
2117 b'formatjson',
2113 b'formatjson',
2118 default=False,
2114 default=False,
2119 )
2115 )
2120 coreconfigitem(
2116 coreconfigitem(
2121 b'ui',
2117 b'ui',
2122 b'formatted',
2118 b'formatted',
2123 default=None,
2119 default=None,
2124 )
2120 )
2125 coreconfigitem(
2121 coreconfigitem(
2126 b'ui',
2122 b'ui',
2127 b'interactive',
2123 b'interactive',
2128 default=None,
2124 default=None,
2129 )
2125 )
2130 coreconfigitem(
2126 coreconfigitem(
2131 b'ui',
2127 b'ui',
2132 b'interface',
2128 b'interface',
2133 default=None,
2129 default=None,
2134 )
2130 )
2135 coreconfigitem(
2131 coreconfigitem(
2136 b'ui',
2132 b'ui',
2137 b'interface.chunkselector',
2133 b'interface.chunkselector',
2138 default=None,
2134 default=None,
2139 )
2135 )
2140 coreconfigitem(
2136 coreconfigitem(
2141 b'ui',
2137 b'ui',
2142 b'large-file-limit',
2138 b'large-file-limit',
2143 default=10000000,
2139 default=10000000,
2144 )
2140 )
2145 coreconfigitem(
2141 coreconfigitem(
2146 b'ui',
2142 b'ui',
2147 b'logblockedtimes',
2143 b'logblockedtimes',
2148 default=False,
2144 default=False,
2149 )
2145 )
2150 coreconfigitem(
2146 coreconfigitem(
2151 b'ui',
2147 b'ui',
2152 b'merge',
2148 b'merge',
2153 default=None,
2149 default=None,
2154 )
2150 )
2155 coreconfigitem(
2151 coreconfigitem(
2156 b'ui',
2152 b'ui',
2157 b'mergemarkers',
2153 b'mergemarkers',
2158 default=b'basic',
2154 default=b'basic',
2159 )
2155 )
2160 coreconfigitem(
2156 coreconfigitem(
2161 b'ui',
2157 b'ui',
2162 b'message-output',
2158 b'message-output',
2163 default=b'stdio',
2159 default=b'stdio',
2164 )
2160 )
2165 coreconfigitem(
2161 coreconfigitem(
2166 b'ui',
2162 b'ui',
2167 b'nontty',
2163 b'nontty',
2168 default=False,
2164 default=False,
2169 )
2165 )
2170 coreconfigitem(
2166 coreconfigitem(
2171 b'ui',
2167 b'ui',
2172 b'origbackuppath',
2168 b'origbackuppath',
2173 default=None,
2169 default=None,
2174 )
2170 )
2175 coreconfigitem(
2171 coreconfigitem(
2176 b'ui',
2172 b'ui',
2177 b'paginate',
2173 b'paginate',
2178 default=True,
2174 default=True,
2179 )
2175 )
2180 coreconfigitem(
2176 coreconfigitem(
2181 b'ui',
2177 b'ui',
2182 b'patch',
2178 b'patch',
2183 default=None,
2179 default=None,
2184 )
2180 )
2185 coreconfigitem(
2181 coreconfigitem(
2186 b'ui',
2182 b'ui',
2187 b'portablefilenames',
2183 b'portablefilenames',
2188 default=b'warn',
2184 default=b'warn',
2189 )
2185 )
2190 coreconfigitem(
2186 coreconfigitem(
2191 b'ui',
2187 b'ui',
2192 b'promptecho',
2188 b'promptecho',
2193 default=False,
2189 default=False,
2194 )
2190 )
2195 coreconfigitem(
2191 coreconfigitem(
2196 b'ui',
2192 b'ui',
2197 b'quiet',
2193 b'quiet',
2198 default=False,
2194 default=False,
2199 )
2195 )
2200 coreconfigitem(
2196 coreconfigitem(
2201 b'ui',
2197 b'ui',
2202 b'quietbookmarkmove',
2198 b'quietbookmarkmove',
2203 default=False,
2199 default=False,
2204 )
2200 )
2205 coreconfigitem(
2201 coreconfigitem(
2206 b'ui',
2202 b'ui',
2207 b'relative-paths',
2203 b'relative-paths',
2208 default=b'legacy',
2204 default=b'legacy',
2209 )
2205 )
2210 coreconfigitem(
2206 coreconfigitem(
2211 b'ui',
2207 b'ui',
2212 b'remotecmd',
2208 b'remotecmd',
2213 default=b'hg',
2209 default=b'hg',
2214 )
2210 )
2215 coreconfigitem(
2211 coreconfigitem(
2216 b'ui',
2212 b'ui',
2217 b'report_untrusted',
2213 b'report_untrusted',
2218 default=True,
2214 default=True,
2219 )
2215 )
2220 coreconfigitem(
2216 coreconfigitem(
2221 b'ui',
2217 b'ui',
2222 b'rollback',
2218 b'rollback',
2223 default=True,
2219 default=True,
2224 )
2220 )
2225 coreconfigitem(
2221 coreconfigitem(
2226 b'ui',
2222 b'ui',
2227 b'signal-safe-lock',
2223 b'signal-safe-lock',
2228 default=True,
2224 default=True,
2229 )
2225 )
2230 coreconfigitem(
2226 coreconfigitem(
2231 b'ui',
2227 b'ui',
2232 b'slash',
2228 b'slash',
2233 default=False,
2229 default=False,
2234 )
2230 )
2235 coreconfigitem(
2231 coreconfigitem(
2236 b'ui',
2232 b'ui',
2237 b'ssh',
2233 b'ssh',
2238 default=b'ssh',
2234 default=b'ssh',
2239 )
2235 )
2240 coreconfigitem(
2236 coreconfigitem(
2241 b'ui',
2237 b'ui',
2242 b'ssherrorhint',
2238 b'ssherrorhint',
2243 default=None,
2239 default=None,
2244 )
2240 )
2245 coreconfigitem(
2241 coreconfigitem(
2246 b'ui',
2242 b'ui',
2247 b'statuscopies',
2243 b'statuscopies',
2248 default=False,
2244 default=False,
2249 )
2245 )
2250 coreconfigitem(
2246 coreconfigitem(
2251 b'ui',
2247 b'ui',
2252 b'strict',
2248 b'strict',
2253 default=False,
2249 default=False,
2254 )
2250 )
2255 coreconfigitem(
2251 coreconfigitem(
2256 b'ui',
2252 b'ui',
2257 b'style',
2253 b'style',
2258 default=b'',
2254 default=b'',
2259 )
2255 )
2260 coreconfigitem(
2256 coreconfigitem(
2261 b'ui',
2257 b'ui',
2262 b'supportcontact',
2258 b'supportcontact',
2263 default=None,
2259 default=None,
2264 )
2260 )
2265 coreconfigitem(
2261 coreconfigitem(
2266 b'ui',
2262 b'ui',
2267 b'textwidth',
2263 b'textwidth',
2268 default=78,
2264 default=78,
2269 )
2265 )
2270 coreconfigitem(
2266 coreconfigitem(
2271 b'ui',
2267 b'ui',
2272 b'timeout',
2268 b'timeout',
2273 default=b'600',
2269 default=b'600',
2274 )
2270 )
2275 coreconfigitem(
2271 coreconfigitem(
2276 b'ui',
2272 b'ui',
2277 b'timeout.warn',
2273 b'timeout.warn',
2278 default=0,
2274 default=0,
2279 )
2275 )
2280 coreconfigitem(
2276 coreconfigitem(
2281 b'ui',
2277 b'ui',
2282 b'timestamp-output',
2278 b'timestamp-output',
2283 default=False,
2279 default=False,
2284 )
2280 )
2285 coreconfigitem(
2281 coreconfigitem(
2286 b'ui',
2282 b'ui',
2287 b'traceback',
2283 b'traceback',
2288 default=False,
2284 default=False,
2289 )
2285 )
2290 coreconfigitem(
2286 coreconfigitem(
2291 b'ui',
2287 b'ui',
2292 b'tweakdefaults',
2288 b'tweakdefaults',
2293 default=False,
2289 default=False,
2294 )
2290 )
2295 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2291 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2296 coreconfigitem(
2292 coreconfigitem(
2297 b'ui',
2293 b'ui',
2298 b'verbose',
2294 b'verbose',
2299 default=False,
2295 default=False,
2300 )
2296 )
2301 coreconfigitem(
2297 coreconfigitem(
2302 b'verify',
2298 b'verify',
2303 b'skipflags',
2299 b'skipflags',
2304 default=None,
2300 default=None,
2305 )
2301 )
2306 coreconfigitem(
2302 coreconfigitem(
2307 b'web',
2303 b'web',
2308 b'allowbz2',
2304 b'allowbz2',
2309 default=False,
2305 default=False,
2310 )
2306 )
2311 coreconfigitem(
2307 coreconfigitem(
2312 b'web',
2308 b'web',
2313 b'allowgz',
2309 b'allowgz',
2314 default=False,
2310 default=False,
2315 )
2311 )
2316 coreconfigitem(
2312 coreconfigitem(
2317 b'web',
2313 b'web',
2318 b'allow-pull',
2314 b'allow-pull',
2319 alias=[(b'web', b'allowpull')],
2315 alias=[(b'web', b'allowpull')],
2320 default=True,
2316 default=True,
2321 )
2317 )
2322 coreconfigitem(
2318 coreconfigitem(
2323 b'web',
2319 b'web',
2324 b'allow-push',
2320 b'allow-push',
2325 alias=[(b'web', b'allow_push')],
2321 alias=[(b'web', b'allow_push')],
2326 default=list,
2322 default=list,
2327 )
2323 )
2328 coreconfigitem(
2324 coreconfigitem(
2329 b'web',
2325 b'web',
2330 b'allowzip',
2326 b'allowzip',
2331 default=False,
2327 default=False,
2332 )
2328 )
2333 coreconfigitem(
2329 coreconfigitem(
2334 b'web',
2330 b'web',
2335 b'archivesubrepos',
2331 b'archivesubrepos',
2336 default=False,
2332 default=False,
2337 )
2333 )
2338 coreconfigitem(
2334 coreconfigitem(
2339 b'web',
2335 b'web',
2340 b'cache',
2336 b'cache',
2341 default=True,
2337 default=True,
2342 )
2338 )
2343 coreconfigitem(
2339 coreconfigitem(
2344 b'web',
2340 b'web',
2345 b'comparisoncontext',
2341 b'comparisoncontext',
2346 default=5,
2342 default=5,
2347 )
2343 )
2348 coreconfigitem(
2344 coreconfigitem(
2349 b'web',
2345 b'web',
2350 b'contact',
2346 b'contact',
2351 default=None,
2347 default=None,
2352 )
2348 )
2353 coreconfigitem(
2349 coreconfigitem(
2354 b'web',
2350 b'web',
2355 b'deny_push',
2351 b'deny_push',
2356 default=list,
2352 default=list,
2357 )
2353 )
2358 coreconfigitem(
2354 coreconfigitem(
2359 b'web',
2355 b'web',
2360 b'guessmime',
2356 b'guessmime',
2361 default=False,
2357 default=False,
2362 )
2358 )
2363 coreconfigitem(
2359 coreconfigitem(
2364 b'web',
2360 b'web',
2365 b'hidden',
2361 b'hidden',
2366 default=False,
2362 default=False,
2367 )
2363 )
2368 coreconfigitem(
2364 coreconfigitem(
2369 b'web',
2365 b'web',
2370 b'labels',
2366 b'labels',
2371 default=list,
2367 default=list,
2372 )
2368 )
2373 coreconfigitem(
2369 coreconfigitem(
2374 b'web',
2370 b'web',
2375 b'logoimg',
2371 b'logoimg',
2376 default=b'hglogo.png',
2372 default=b'hglogo.png',
2377 )
2373 )
2378 coreconfigitem(
2374 coreconfigitem(
2379 b'web',
2375 b'web',
2380 b'logourl',
2376 b'logourl',
2381 default=b'https://mercurial-scm.org/',
2377 default=b'https://mercurial-scm.org/',
2382 )
2378 )
2383 coreconfigitem(
2379 coreconfigitem(
2384 b'web',
2380 b'web',
2385 b'accesslog',
2381 b'accesslog',
2386 default=b'-',
2382 default=b'-',
2387 )
2383 )
2388 coreconfigitem(
2384 coreconfigitem(
2389 b'web',
2385 b'web',
2390 b'address',
2386 b'address',
2391 default=b'',
2387 default=b'',
2392 )
2388 )
2393 coreconfigitem(
2389 coreconfigitem(
2394 b'web',
2390 b'web',
2395 b'allow-archive',
2391 b'allow-archive',
2396 alias=[(b'web', b'allow_archive')],
2392 alias=[(b'web', b'allow_archive')],
2397 default=list,
2393 default=list,
2398 )
2394 )
2399 coreconfigitem(
2395 coreconfigitem(
2400 b'web',
2396 b'web',
2401 b'allow_read',
2397 b'allow_read',
2402 default=list,
2398 default=list,
2403 )
2399 )
2404 coreconfigitem(
2400 coreconfigitem(
2405 b'web',
2401 b'web',
2406 b'baseurl',
2402 b'baseurl',
2407 default=None,
2403 default=None,
2408 )
2404 )
2409 coreconfigitem(
2405 coreconfigitem(
2410 b'web',
2406 b'web',
2411 b'cacerts',
2407 b'cacerts',
2412 default=None,
2408 default=None,
2413 )
2409 )
2414 coreconfigitem(
2410 coreconfigitem(
2415 b'web',
2411 b'web',
2416 b'certificate',
2412 b'certificate',
2417 default=None,
2413 default=None,
2418 )
2414 )
2419 coreconfigitem(
2415 coreconfigitem(
2420 b'web',
2416 b'web',
2421 b'collapse',
2417 b'collapse',
2422 default=False,
2418 default=False,
2423 )
2419 )
2424 coreconfigitem(
2420 coreconfigitem(
2425 b'web',
2421 b'web',
2426 b'csp',
2422 b'csp',
2427 default=None,
2423 default=None,
2428 )
2424 )
2429 coreconfigitem(
2425 coreconfigitem(
2430 b'web',
2426 b'web',
2431 b'deny_read',
2427 b'deny_read',
2432 default=list,
2428 default=list,
2433 )
2429 )
2434 coreconfigitem(
2430 coreconfigitem(
2435 b'web',
2431 b'web',
2436 b'descend',
2432 b'descend',
2437 default=True,
2433 default=True,
2438 )
2434 )
2439 coreconfigitem(
2435 coreconfigitem(
2440 b'web',
2436 b'web',
2441 b'description',
2437 b'description',
2442 default=b"",
2438 default=b"",
2443 )
2439 )
2444 coreconfigitem(
2440 coreconfigitem(
2445 b'web',
2441 b'web',
2446 b'encoding',
2442 b'encoding',
2447 default=lambda: encoding.encoding,
2443 default=lambda: encoding.encoding,
2448 )
2444 )
2449 coreconfigitem(
2445 coreconfigitem(
2450 b'web',
2446 b'web',
2451 b'errorlog',
2447 b'errorlog',
2452 default=b'-',
2448 default=b'-',
2453 )
2449 )
2454 coreconfigitem(
2450 coreconfigitem(
2455 b'web',
2451 b'web',
2456 b'ipv6',
2452 b'ipv6',
2457 default=False,
2453 default=False,
2458 )
2454 )
2459 coreconfigitem(
2455 coreconfigitem(
2460 b'web',
2456 b'web',
2461 b'maxchanges',
2457 b'maxchanges',
2462 default=10,
2458 default=10,
2463 )
2459 )
2464 coreconfigitem(
2460 coreconfigitem(
2465 b'web',
2461 b'web',
2466 b'maxfiles',
2462 b'maxfiles',
2467 default=10,
2463 default=10,
2468 )
2464 )
2469 coreconfigitem(
2465 coreconfigitem(
2470 b'web',
2466 b'web',
2471 b'maxshortchanges',
2467 b'maxshortchanges',
2472 default=60,
2468 default=60,
2473 )
2469 )
2474 coreconfigitem(
2470 coreconfigitem(
2475 b'web',
2471 b'web',
2476 b'motd',
2472 b'motd',
2477 default=b'',
2473 default=b'',
2478 )
2474 )
2479 coreconfigitem(
2475 coreconfigitem(
2480 b'web',
2476 b'web',
2481 b'name',
2477 b'name',
2482 default=dynamicdefault,
2478 default=dynamicdefault,
2483 )
2479 )
2484 coreconfigitem(
2480 coreconfigitem(
2485 b'web',
2481 b'web',
2486 b'port',
2482 b'port',
2487 default=8000,
2483 default=8000,
2488 )
2484 )
2489 coreconfigitem(
2485 coreconfigitem(
2490 b'web',
2486 b'web',
2491 b'prefix',
2487 b'prefix',
2492 default=b'',
2488 default=b'',
2493 )
2489 )
2494 coreconfigitem(
2490 coreconfigitem(
2495 b'web',
2491 b'web',
2496 b'push_ssl',
2492 b'push_ssl',
2497 default=True,
2493 default=True,
2498 )
2494 )
2499 coreconfigitem(
2495 coreconfigitem(
2500 b'web',
2496 b'web',
2501 b'refreshinterval',
2497 b'refreshinterval',
2502 default=20,
2498 default=20,
2503 )
2499 )
2504 coreconfigitem(
2500 coreconfigitem(
2505 b'web',
2501 b'web',
2506 b'server-header',
2502 b'server-header',
2507 default=None,
2503 default=None,
2508 )
2504 )
2509 coreconfigitem(
2505 coreconfigitem(
2510 b'web',
2506 b'web',
2511 b'static',
2507 b'static',
2512 default=None,
2508 default=None,
2513 )
2509 )
2514 coreconfigitem(
2510 coreconfigitem(
2515 b'web',
2511 b'web',
2516 b'staticurl',
2512 b'staticurl',
2517 default=None,
2513 default=None,
2518 )
2514 )
2519 coreconfigitem(
2515 coreconfigitem(
2520 b'web',
2516 b'web',
2521 b'stripes',
2517 b'stripes',
2522 default=1,
2518 default=1,
2523 )
2519 )
2524 coreconfigitem(
2520 coreconfigitem(
2525 b'web',
2521 b'web',
2526 b'style',
2522 b'style',
2527 default=b'paper',
2523 default=b'paper',
2528 )
2524 )
2529 coreconfigitem(
2525 coreconfigitem(
2530 b'web',
2526 b'web',
2531 b'templates',
2527 b'templates',
2532 default=None,
2528 default=None,
2533 )
2529 )
2534 coreconfigitem(
2530 coreconfigitem(
2535 b'web',
2531 b'web',
2536 b'view',
2532 b'view',
2537 default=b'served',
2533 default=b'served',
2538 experimental=True,
2534 experimental=True,
2539 )
2535 )
2540 coreconfigitem(
2536 coreconfigitem(
2541 b'worker',
2537 b'worker',
2542 b'backgroundclose',
2538 b'backgroundclose',
2543 default=dynamicdefault,
2539 default=dynamicdefault,
2544 )
2540 )
2545 # Windows defaults to a limit of 512 open files. A buffer of 128
2541 # Windows defaults to a limit of 512 open files. A buffer of 128
2546 # should give us enough headway.
2542 # should give us enough headway.
2547 coreconfigitem(
2543 coreconfigitem(
2548 b'worker',
2544 b'worker',
2549 b'backgroundclosemaxqueue',
2545 b'backgroundclosemaxqueue',
2550 default=384,
2546 default=384,
2551 )
2547 )
2552 coreconfigitem(
2548 coreconfigitem(
2553 b'worker',
2549 b'worker',
2554 b'backgroundcloseminfilecount',
2550 b'backgroundcloseminfilecount',
2555 default=2048,
2551 default=2048,
2556 )
2552 )
2557 coreconfigitem(
2553 coreconfigitem(
2558 b'worker',
2554 b'worker',
2559 b'backgroundclosethreadcount',
2555 b'backgroundclosethreadcount',
2560 default=4,
2556 default=4,
2561 )
2557 )
2562 coreconfigitem(
2558 coreconfigitem(
2563 b'worker',
2559 b'worker',
2564 b'enabled',
2560 b'enabled',
2565 default=True,
2561 default=True,
2566 )
2562 )
2567 coreconfigitem(
2563 coreconfigitem(
2568 b'worker',
2564 b'worker',
2569 b'numcpus',
2565 b'numcpus',
2570 default=None,
2566 default=None,
2571 )
2567 )
2572
2568
2573 # Rebase related configuration moved to core because other extension are doing
2569 # Rebase related configuration moved to core because other extension are doing
2574 # strange things. For example, shelve import the extensions to reuse some bit
2570 # strange things. For example, shelve import the extensions to reuse some bit
2575 # without formally loading it.
2571 # without formally loading it.
2576 coreconfigitem(
2572 coreconfigitem(
2577 b'commands',
2573 b'commands',
2578 b'rebase.requiredest',
2574 b'rebase.requiredest',
2579 default=False,
2575 default=False,
2580 )
2576 )
2581 coreconfigitem(
2577 coreconfigitem(
2582 b'experimental',
2578 b'experimental',
2583 b'rebaseskipobsolete',
2579 b'rebaseskipobsolete',
2584 default=True,
2580 default=True,
2585 )
2581 )
2586 coreconfigitem(
2582 coreconfigitem(
2587 b'rebase',
2583 b'rebase',
2588 b'singletransaction',
2584 b'singletransaction',
2589 default=False,
2585 default=False,
2590 )
2586 )
2591 coreconfigitem(
2587 coreconfigitem(
2592 b'rebase',
2588 b'rebase',
2593 b'experimental.inmemory',
2589 b'experimental.inmemory',
2594 default=False,
2590 default=False,
2595 )
2591 )
@@ -1,3653 +1,3651 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 )
74 )
75
75
76 from .interfaces import (
76 from .interfaces import (
77 repository,
77 repository,
78 util as interfaceutil,
78 util as interfaceutil,
79 )
79 )
80
80
81 from .utils import (
81 from .utils import (
82 hashutil,
82 hashutil,
83 procutil,
83 procutil,
84 stringutil,
84 stringutil,
85 )
85 )
86
86
87 from .revlogutils import constants as revlogconst
87 from .revlogutils import constants as revlogconst
88
88
89 release = lockmod.release
89 release = lockmod.release
90 urlerr = util.urlerr
90 urlerr = util.urlerr
91 urlreq = util.urlreq
91 urlreq = util.urlreq
92
92
93 # set of (path, vfs-location) tuples. vfs-location is:
93 # set of (path, vfs-location) tuples. vfs-location is:
94 # - 'plain for vfs relative paths
94 # - 'plain for vfs relative paths
95 # - '' for svfs relative paths
95 # - '' for svfs relative paths
96 _cachedfiles = set()
96 _cachedfiles = set()
97
97
98
98
99 class _basefilecache(scmutil.filecache):
99 class _basefilecache(scmutil.filecache):
100 """All filecache usage on repo are done for logic that should be unfiltered"""
100 """All filecache usage on repo are done for logic that should be unfiltered"""
101
101
102 def __get__(self, repo, type=None):
102 def __get__(self, repo, type=None):
103 if repo is None:
103 if repo is None:
104 return self
104 return self
105 # proxy to unfiltered __dict__ since filtered repo has no entry
105 # proxy to unfiltered __dict__ since filtered repo has no entry
106 unfi = repo.unfiltered()
106 unfi = repo.unfiltered()
107 try:
107 try:
108 return unfi.__dict__[self.sname]
108 return unfi.__dict__[self.sname]
109 except KeyError:
109 except KeyError:
110 pass
110 pass
111 return super(_basefilecache, self).__get__(unfi, type)
111 return super(_basefilecache, self).__get__(unfi, type)
112
112
113 def set(self, repo, value):
113 def set(self, repo, value):
114 return super(_basefilecache, self).set(repo.unfiltered(), value)
114 return super(_basefilecache, self).set(repo.unfiltered(), value)
115
115
116
116
117 class repofilecache(_basefilecache):
117 class repofilecache(_basefilecache):
118 """filecache for files in .hg but outside of .hg/store"""
118 """filecache for files in .hg but outside of .hg/store"""
119
119
120 def __init__(self, *paths):
120 def __init__(self, *paths):
121 super(repofilecache, self).__init__(*paths)
121 super(repofilecache, self).__init__(*paths)
122 for path in paths:
122 for path in paths:
123 _cachedfiles.add((path, b'plain'))
123 _cachedfiles.add((path, b'plain'))
124
124
125 def join(self, obj, fname):
125 def join(self, obj, fname):
126 return obj.vfs.join(fname)
126 return obj.vfs.join(fname)
127
127
128
128
129 class storecache(_basefilecache):
129 class storecache(_basefilecache):
130 """filecache for files in the store"""
130 """filecache for files in the store"""
131
131
132 def __init__(self, *paths):
132 def __init__(self, *paths):
133 super(storecache, self).__init__(*paths)
133 super(storecache, self).__init__(*paths)
134 for path in paths:
134 for path in paths:
135 _cachedfiles.add((path, b''))
135 _cachedfiles.add((path, b''))
136
136
137 def join(self, obj, fname):
137 def join(self, obj, fname):
138 return obj.sjoin(fname)
138 return obj.sjoin(fname)
139
139
140
140
141 class mixedrepostorecache(_basefilecache):
141 class mixedrepostorecache(_basefilecache):
142 """filecache for a mix files in .hg/store and outside"""
142 """filecache for a mix files in .hg/store and outside"""
143
143
144 def __init__(self, *pathsandlocations):
144 def __init__(self, *pathsandlocations):
145 # scmutil.filecache only uses the path for passing back into our
145 # scmutil.filecache only uses the path for passing back into our
146 # join(), so we can safely pass a list of paths and locations
146 # join(), so we can safely pass a list of paths and locations
147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
148 _cachedfiles.update(pathsandlocations)
148 _cachedfiles.update(pathsandlocations)
149
149
150 def join(self, obj, fnameandlocation):
150 def join(self, obj, fnameandlocation):
151 fname, location = fnameandlocation
151 fname, location = fnameandlocation
152 if location == b'plain':
152 if location == b'plain':
153 return obj.vfs.join(fname)
153 return obj.vfs.join(fname)
154 else:
154 else:
155 if location != b'':
155 if location != b'':
156 raise error.ProgrammingError(
156 raise error.ProgrammingError(
157 b'unexpected location: %s' % location
157 b'unexpected location: %s' % location
158 )
158 )
159 return obj.sjoin(fname)
159 return obj.sjoin(fname)
160
160
161
161
162 def isfilecached(repo, name):
162 def isfilecached(repo, name):
163 """check if a repo has already cached "name" filecache-ed property
163 """check if a repo has already cached "name" filecache-ed property
164
164
165 This returns (cachedobj-or-None, iscached) tuple.
165 This returns (cachedobj-or-None, iscached) tuple.
166 """
166 """
167 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 cacheentry = repo.unfiltered()._filecache.get(name, None)
168 if not cacheentry:
168 if not cacheentry:
169 return None, False
169 return None, False
170 return cacheentry.obj, True
170 return cacheentry.obj, True
171
171
172
172
173 class unfilteredpropertycache(util.propertycache):
173 class unfilteredpropertycache(util.propertycache):
174 """propertycache that apply to unfiltered repo only"""
174 """propertycache that apply to unfiltered repo only"""
175
175
176 def __get__(self, repo, type=None):
176 def __get__(self, repo, type=None):
177 unfi = repo.unfiltered()
177 unfi = repo.unfiltered()
178 if unfi is repo:
178 if unfi is repo:
179 return super(unfilteredpropertycache, self).__get__(unfi)
179 return super(unfilteredpropertycache, self).__get__(unfi)
180 return getattr(unfi, self.name)
180 return getattr(unfi, self.name)
181
181
182
182
183 class filteredpropertycache(util.propertycache):
183 class filteredpropertycache(util.propertycache):
184 """propertycache that must take filtering in account"""
184 """propertycache that must take filtering in account"""
185
185
186 def cachevalue(self, obj, value):
186 def cachevalue(self, obj, value):
187 object.__setattr__(obj, self.name, value)
187 object.__setattr__(obj, self.name, value)
188
188
189
189
190 def hasunfilteredcache(repo, name):
190 def hasunfilteredcache(repo, name):
191 """check if a repo has an unfilteredpropertycache value for <name>"""
191 """check if a repo has an unfilteredpropertycache value for <name>"""
192 return name in vars(repo.unfiltered())
192 return name in vars(repo.unfiltered())
193
193
194
194
195 def unfilteredmethod(orig):
195 def unfilteredmethod(orig):
196 """decorate method that always need to be run on unfiltered version"""
196 """decorate method that always need to be run on unfiltered version"""
197
197
198 @functools.wraps(orig)
198 @functools.wraps(orig)
199 def wrapper(repo, *args, **kwargs):
199 def wrapper(repo, *args, **kwargs):
200 return orig(repo.unfiltered(), *args, **kwargs)
200 return orig(repo.unfiltered(), *args, **kwargs)
201
201
202 return wrapper
202 return wrapper
203
203
204
204
205 moderncaps = {
205 moderncaps = {
206 b'lookup',
206 b'lookup',
207 b'branchmap',
207 b'branchmap',
208 b'pushkey',
208 b'pushkey',
209 b'known',
209 b'known',
210 b'getbundle',
210 b'getbundle',
211 b'unbundle',
211 b'unbundle',
212 }
212 }
213 legacycaps = moderncaps.union({b'changegroupsubset'})
213 legacycaps = moderncaps.union({b'changegroupsubset'})
214
214
215
215
216 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 @interfaceutil.implementer(repository.ipeercommandexecutor)
217 class localcommandexecutor(object):
217 class localcommandexecutor(object):
218 def __init__(self, peer):
218 def __init__(self, peer):
219 self._peer = peer
219 self._peer = peer
220 self._sent = False
220 self._sent = False
221 self._closed = False
221 self._closed = False
222
222
223 def __enter__(self):
223 def __enter__(self):
224 return self
224 return self
225
225
226 def __exit__(self, exctype, excvalue, exctb):
226 def __exit__(self, exctype, excvalue, exctb):
227 self.close()
227 self.close()
228
228
229 def callcommand(self, command, args):
229 def callcommand(self, command, args):
230 if self._sent:
230 if self._sent:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after sendcommands()'
232 b'callcommand() cannot be used after sendcommands()'
233 )
233 )
234
234
235 if self._closed:
235 if self._closed:
236 raise error.ProgrammingError(
236 raise error.ProgrammingError(
237 b'callcommand() cannot be used after close()'
237 b'callcommand() cannot be used after close()'
238 )
238 )
239
239
240 # We don't need to support anything fancy. Just call the named
240 # We don't need to support anything fancy. Just call the named
241 # method on the peer and return a resolved future.
241 # method on the peer and return a resolved future.
242 fn = getattr(self._peer, pycompat.sysstr(command))
242 fn = getattr(self._peer, pycompat.sysstr(command))
243
243
244 f = pycompat.futures.Future()
244 f = pycompat.futures.Future()
245
245
246 try:
246 try:
247 result = fn(**pycompat.strkwargs(args))
247 result = fn(**pycompat.strkwargs(args))
248 except Exception:
248 except Exception:
249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
250 else:
250 else:
251 f.set_result(result)
251 f.set_result(result)
252
252
253 return f
253 return f
254
254
255 def sendcommands(self):
255 def sendcommands(self):
256 self._sent = True
256 self._sent = True
257
257
258 def close(self):
258 def close(self):
259 self._closed = True
259 self._closed = True
260
260
261
261
262 @interfaceutil.implementer(repository.ipeercommands)
262 @interfaceutil.implementer(repository.ipeercommands)
263 class localpeer(repository.peer):
263 class localpeer(repository.peer):
264 '''peer for a local repo; reflects only the most recent API'''
264 '''peer for a local repo; reflects only the most recent API'''
265
265
266 def __init__(self, repo, caps=None):
266 def __init__(self, repo, caps=None):
267 super(localpeer, self).__init__()
267 super(localpeer, self).__init__()
268
268
269 if caps is None:
269 if caps is None:
270 caps = moderncaps.copy()
270 caps = moderncaps.copy()
271 self._repo = repo.filtered(b'served')
271 self._repo = repo.filtered(b'served')
272 self.ui = repo.ui
272 self.ui = repo.ui
273 self._caps = repo._restrictcapabilities(caps)
273 self._caps = repo._restrictcapabilities(caps)
274
274
275 # Begin of _basepeer interface.
275 # Begin of _basepeer interface.
276
276
277 def url(self):
277 def url(self):
278 return self._repo.url()
278 return self._repo.url()
279
279
280 def local(self):
280 def local(self):
281 return self._repo
281 return self._repo
282
282
283 def peer(self):
283 def peer(self):
284 return self
284 return self
285
285
286 def canpush(self):
286 def canpush(self):
287 return True
287 return True
288
288
289 def close(self):
289 def close(self):
290 self._repo.close()
290 self._repo.close()
291
291
292 # End of _basepeer interface.
292 # End of _basepeer interface.
293
293
294 # Begin of _basewirecommands interface.
294 # Begin of _basewirecommands interface.
295
295
296 def branchmap(self):
296 def branchmap(self):
297 return self._repo.branchmap()
297 return self._repo.branchmap()
298
298
299 def capabilities(self):
299 def capabilities(self):
300 return self._caps
300 return self._caps
301
301
302 def clonebundles(self):
302 def clonebundles(self):
303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
304
304
305 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 def debugwireargs(self, one, two, three=None, four=None, five=None):
306 """Used to test argument passing over the wire"""
306 """Used to test argument passing over the wire"""
307 return b"%s %s %s %s %s" % (
307 return b"%s %s %s %s %s" % (
308 one,
308 one,
309 two,
309 two,
310 pycompat.bytestr(three),
310 pycompat.bytestr(three),
311 pycompat.bytestr(four),
311 pycompat.bytestr(four),
312 pycompat.bytestr(five),
312 pycompat.bytestr(five),
313 )
313 )
314
314
315 def getbundle(
315 def getbundle(
316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
317 ):
317 ):
318 chunks = exchange.getbundlechunks(
318 chunks = exchange.getbundlechunks(
319 self._repo,
319 self._repo,
320 source,
320 source,
321 heads=heads,
321 heads=heads,
322 common=common,
322 common=common,
323 bundlecaps=bundlecaps,
323 bundlecaps=bundlecaps,
324 **kwargs
324 **kwargs
325 )[1]
325 )[1]
326 cb = util.chunkbuffer(chunks)
326 cb = util.chunkbuffer(chunks)
327
327
328 if exchange.bundle2requested(bundlecaps):
328 if exchange.bundle2requested(bundlecaps):
329 # When requesting a bundle2, getbundle returns a stream to make the
329 # When requesting a bundle2, getbundle returns a stream to make the
330 # wire level function happier. We need to build a proper object
330 # wire level function happier. We need to build a proper object
331 # from it in local peer.
331 # from it in local peer.
332 return bundle2.getunbundler(self.ui, cb)
332 return bundle2.getunbundler(self.ui, cb)
333 else:
333 else:
334 return changegroup.getunbundler(b'01', cb, None)
334 return changegroup.getunbundler(b'01', cb, None)
335
335
336 def heads(self):
336 def heads(self):
337 return self._repo.heads()
337 return self._repo.heads()
338
338
339 def known(self, nodes):
339 def known(self, nodes):
340 return self._repo.known(nodes)
340 return self._repo.known(nodes)
341
341
342 def listkeys(self, namespace):
342 def listkeys(self, namespace):
343 return self._repo.listkeys(namespace)
343 return self._repo.listkeys(namespace)
344
344
345 def lookup(self, key):
345 def lookup(self, key):
346 return self._repo.lookup(key)
346 return self._repo.lookup(key)
347
347
348 def pushkey(self, namespace, key, old, new):
348 def pushkey(self, namespace, key, old, new):
349 return self._repo.pushkey(namespace, key, old, new)
349 return self._repo.pushkey(namespace, key, old, new)
350
350
351 def stream_out(self):
351 def stream_out(self):
352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
353
353
354 def unbundle(self, bundle, heads, url):
354 def unbundle(self, bundle, heads, url):
355 """apply a bundle on a repo
355 """apply a bundle on a repo
356
356
357 This function handles the repo locking itself."""
357 This function handles the repo locking itself."""
358 try:
358 try:
359 try:
359 try:
360 bundle = exchange.readbundle(self.ui, bundle, None)
360 bundle = exchange.readbundle(self.ui, bundle, None)
361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
362 if util.safehasattr(ret, b'getchunks'):
362 if util.safehasattr(ret, b'getchunks'):
363 # This is a bundle20 object, turn it into an unbundler.
363 # This is a bundle20 object, turn it into an unbundler.
364 # This little dance should be dropped eventually when the
364 # This little dance should be dropped eventually when the
365 # API is finally improved.
365 # API is finally improved.
366 stream = util.chunkbuffer(ret.getchunks())
366 stream = util.chunkbuffer(ret.getchunks())
367 ret = bundle2.getunbundler(self.ui, stream)
367 ret = bundle2.getunbundler(self.ui, stream)
368 return ret
368 return ret
369 except Exception as exc:
369 except Exception as exc:
370 # If the exception contains output salvaged from a bundle2
370 # If the exception contains output salvaged from a bundle2
371 # reply, we need to make sure it is printed before continuing
371 # reply, we need to make sure it is printed before continuing
372 # to fail. So we build a bundle2 with such output and consume
372 # to fail. So we build a bundle2 with such output and consume
373 # it directly.
373 # it directly.
374 #
374 #
375 # This is not very elegant but allows a "simple" solution for
375 # This is not very elegant but allows a "simple" solution for
376 # issue4594
376 # issue4594
377 output = getattr(exc, '_bundle2salvagedoutput', ())
377 output = getattr(exc, '_bundle2salvagedoutput', ())
378 if output:
378 if output:
379 bundler = bundle2.bundle20(self._repo.ui)
379 bundler = bundle2.bundle20(self._repo.ui)
380 for out in output:
380 for out in output:
381 bundler.addpart(out)
381 bundler.addpart(out)
382 stream = util.chunkbuffer(bundler.getchunks())
382 stream = util.chunkbuffer(bundler.getchunks())
383 b = bundle2.getunbundler(self.ui, stream)
383 b = bundle2.getunbundler(self.ui, stream)
384 bundle2.processbundle(self._repo, b)
384 bundle2.processbundle(self._repo, b)
385 raise
385 raise
386 except error.PushRaced as exc:
386 except error.PushRaced as exc:
387 raise error.ResponseError(
387 raise error.ResponseError(
388 _(b'push failed:'), stringutil.forcebytestr(exc)
388 _(b'push failed:'), stringutil.forcebytestr(exc)
389 )
389 )
390
390
391 # End of _basewirecommands interface.
391 # End of _basewirecommands interface.
392
392
393 # Begin of peer interface.
393 # Begin of peer interface.
394
394
395 def commandexecutor(self):
395 def commandexecutor(self):
396 return localcommandexecutor(self)
396 return localcommandexecutor(self)
397
397
398 # End of peer interface.
398 # End of peer interface.
399
399
400
400
401 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 @interfaceutil.implementer(repository.ipeerlegacycommands)
402 class locallegacypeer(localpeer):
402 class locallegacypeer(localpeer):
403 """peer extension which implements legacy methods too; used for tests with
403 """peer extension which implements legacy methods too; used for tests with
404 restricted capabilities"""
404 restricted capabilities"""
405
405
406 def __init__(self, repo):
406 def __init__(self, repo):
407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
408
408
409 # Begin of baselegacywirecommands interface.
409 # Begin of baselegacywirecommands interface.
410
410
411 def between(self, pairs):
411 def between(self, pairs):
412 return self._repo.between(pairs)
412 return self._repo.between(pairs)
413
413
414 def branches(self, nodes):
414 def branches(self, nodes):
415 return self._repo.branches(nodes)
415 return self._repo.branches(nodes)
416
416
417 def changegroup(self, nodes, source):
417 def changegroup(self, nodes, source):
418 outgoing = discovery.outgoing(
418 outgoing = discovery.outgoing(
419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
420 )
420 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
422
423 def changegroupsubset(self, bases, heads, source):
423 def changegroupsubset(self, bases, heads, source):
424 outgoing = discovery.outgoing(
424 outgoing = discovery.outgoing(
425 self._repo, missingroots=bases, ancestorsof=heads
425 self._repo, missingroots=bases, ancestorsof=heads
426 )
426 )
427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
428
428
429 # End of baselegacywirecommands interface.
429 # End of baselegacywirecommands interface.
430
430
431
431
432 # Functions receiving (ui, features) that extensions can register to impact
432 # Functions receiving (ui, features) that extensions can register to impact
433 # the ability to load repositories with custom requirements. Only
433 # the ability to load repositories with custom requirements. Only
434 # functions defined in loaded extensions are called.
434 # functions defined in loaded extensions are called.
435 #
435 #
436 # The function receives a set of requirement strings that the repository
436 # The function receives a set of requirement strings that the repository
437 # is capable of opening. Functions will typically add elements to the
437 # is capable of opening. Functions will typically add elements to the
438 # set to reflect that the extension knows how to handle that requirements.
438 # set to reflect that the extension knows how to handle that requirements.
439 featuresetupfuncs = set()
439 featuresetupfuncs = set()
440
440
441
441
442 def _getsharedvfs(hgvfs, requirements):
442 def _getsharedvfs(hgvfs, requirements):
443 """returns the vfs object pointing to root of shared source
443 """returns the vfs object pointing to root of shared source
444 repo for a shared repository
444 repo for a shared repository
445
445
446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
447 requirements is a set of requirements of current repo (shared one)
447 requirements is a set of requirements of current repo (shared one)
448 """
448 """
449 # The ``shared`` or ``relshared`` requirements indicate the
449 # The ``shared`` or ``relshared`` requirements indicate the
450 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # store lives in the path contained in the ``.hg/sharedpath`` file.
451 # This is an absolute path for ``shared`` and relative to
451 # This is an absolute path for ``shared`` and relative to
452 # ``.hg/`` for ``relshared``.
452 # ``.hg/`` for ``relshared``.
453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
455 sharedpath = hgvfs.join(sharedpath)
455 sharedpath = hgvfs.join(sharedpath)
456
456
457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
458
458
459 if not sharedvfs.exists():
459 if not sharedvfs.exists():
460 raise error.RepoError(
460 raise error.RepoError(
461 _(b'.hg/sharedpath points to nonexistent directory %s')
461 _(b'.hg/sharedpath points to nonexistent directory %s')
462 % sharedvfs.base
462 % sharedvfs.base
463 )
463 )
464 return sharedvfs
464 return sharedvfs
465
465
466
466
467 def _readrequires(vfs, allowmissing):
467 def _readrequires(vfs, allowmissing):
468 """reads the require file present at root of this vfs
468 """reads the require file present at root of this vfs
469 and return a set of requirements
469 and return a set of requirements
470
470
471 If allowmissing is True, we suppress ENOENT if raised"""
471 If allowmissing is True, we suppress ENOENT if raised"""
472 # requires file contains a newline-delimited list of
472 # requires file contains a newline-delimited list of
473 # features/capabilities the opener (us) must have in order to use
473 # features/capabilities the opener (us) must have in order to use
474 # the repository. This file was introduced in Mercurial 0.9.2,
474 # the repository. This file was introduced in Mercurial 0.9.2,
475 # which means very old repositories may not have one. We assume
475 # which means very old repositories may not have one. We assume
476 # a missing file translates to no requirements.
476 # a missing file translates to no requirements.
477 try:
477 try:
478 requirements = set(vfs.read(b'requires').splitlines())
478 requirements = set(vfs.read(b'requires').splitlines())
479 except IOError as e:
479 except IOError as e:
480 if not (allowmissing and e.errno == errno.ENOENT):
480 if not (allowmissing and e.errno == errno.ENOENT):
481 raise
481 raise
482 requirements = set()
482 requirements = set()
483 return requirements
483 return requirements
484
484
485
485
486 def makelocalrepository(baseui, path, intents=None):
486 def makelocalrepository(baseui, path, intents=None):
487 """Create a local repository object.
487 """Create a local repository object.
488
488
489 Given arguments needed to construct a local repository, this function
489 Given arguments needed to construct a local repository, this function
490 performs various early repository loading functionality (such as
490 performs various early repository loading functionality (such as
491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
492 the repository can be opened, derives a type suitable for representing
492 the repository can be opened, derives a type suitable for representing
493 that repository, and returns an instance of it.
493 that repository, and returns an instance of it.
494
494
495 The returned object conforms to the ``repository.completelocalrepository``
495 The returned object conforms to the ``repository.completelocalrepository``
496 interface.
496 interface.
497
497
498 The repository type is derived by calling a series of factory functions
498 The repository type is derived by calling a series of factory functions
499 for each aspect/interface of the final repository. These are defined by
499 for each aspect/interface of the final repository. These are defined by
500 ``REPO_INTERFACES``.
500 ``REPO_INTERFACES``.
501
501
502 Each factory function is called to produce a type implementing a specific
502 Each factory function is called to produce a type implementing a specific
503 interface. The cumulative list of returned types will be combined into a
503 interface. The cumulative list of returned types will be combined into a
504 new type and that type will be instantiated to represent the local
504 new type and that type will be instantiated to represent the local
505 repository.
505 repository.
506
506
507 The factory functions each receive various state that may be consulted
507 The factory functions each receive various state that may be consulted
508 as part of deriving a type.
508 as part of deriving a type.
509
509
510 Extensions should wrap these factory functions to customize repository type
510 Extensions should wrap these factory functions to customize repository type
511 creation. Note that an extension's wrapped function may be called even if
511 creation. Note that an extension's wrapped function may be called even if
512 that extension is not loaded for the repo being constructed. Extensions
512 that extension is not loaded for the repo being constructed. Extensions
513 should check if their ``__name__`` appears in the
513 should check if their ``__name__`` appears in the
514 ``extensionmodulenames`` set passed to the factory function and no-op if
514 ``extensionmodulenames`` set passed to the factory function and no-op if
515 not.
515 not.
516 """
516 """
517 ui = baseui.copy()
517 ui = baseui.copy()
518 # Prevent copying repo configuration.
518 # Prevent copying repo configuration.
519 ui.copy = baseui.copy
519 ui.copy = baseui.copy
520
520
521 # Working directory VFS rooted at repository root.
521 # Working directory VFS rooted at repository root.
522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
523
523
524 # Main VFS for .hg/ directory.
524 # Main VFS for .hg/ directory.
525 hgpath = wdirvfs.join(b'.hg')
525 hgpath = wdirvfs.join(b'.hg')
526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
527 # Whether this repository is shared one or not
527 # Whether this repository is shared one or not
528 shared = False
528 shared = False
529 # If this repository is shared, vfs pointing to shared repo
529 # If this repository is shared, vfs pointing to shared repo
530 sharedvfs = None
530 sharedvfs = None
531
531
532 # The .hg/ path should exist and should be a directory. All other
532 # The .hg/ path should exist and should be a directory. All other
533 # cases are errors.
533 # cases are errors.
534 if not hgvfs.isdir():
534 if not hgvfs.isdir():
535 try:
535 try:
536 hgvfs.stat()
536 hgvfs.stat()
537 except OSError as e:
537 except OSError as e:
538 if e.errno != errno.ENOENT:
538 if e.errno != errno.ENOENT:
539 raise
539 raise
540 except ValueError as e:
540 except ValueError as e:
541 # Can be raised on Python 3.8 when path is invalid.
541 # Can be raised on Python 3.8 when path is invalid.
542 raise error.Abort(
542 raise error.Abort(
543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
544 )
544 )
545
545
546 raise error.RepoError(_(b'repository %s not found') % path)
546 raise error.RepoError(_(b'repository %s not found') % path)
547
547
548 requirements = _readrequires(hgvfs, True)
548 requirements = _readrequires(hgvfs, True)
549 shared = (
549 shared = (
550 requirementsmod.SHARED_REQUIREMENT in requirements
550 requirementsmod.SHARED_REQUIREMENT in requirements
551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
552 )
552 )
553 storevfs = None
553 storevfs = None
554 if shared:
554 if shared:
555 # This is a shared repo
555 # This is a shared repo
556 sharedvfs = _getsharedvfs(hgvfs, requirements)
556 sharedvfs = _getsharedvfs(hgvfs, requirements)
557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
558 else:
558 else:
559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
560
560
561 # if .hg/requires contains the sharesafe requirement, it means
561 # if .hg/requires contains the sharesafe requirement, it means
562 # there exists a `.hg/store/requires` too and we should read it
562 # there exists a `.hg/store/requires` too and we should read it
563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
565 # is not present, refer checkrequirementscompat() for that
565 # is not present, refer checkrequirementscompat() for that
566 #
566 #
567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
568 # repository was shared the old way. We check the share source .hg/requires
568 # repository was shared the old way. We check the share source .hg/requires
569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
570 # to be reshared
570 # to be reshared
571 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
571 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
572
572
573 if (
573 if (
574 shared
574 shared
575 and requirementsmod.SHARESAFE_REQUIREMENT
575 and requirementsmod.SHARESAFE_REQUIREMENT
576 not in _readrequires(sharedvfs, True)
576 not in _readrequires(sharedvfs, True)
577 ):
577 ):
578 if ui.configbool(
578 if ui.configbool(
579 b'experimental', b'sharesafe-auto-downgrade-shares'
579 b'experimental', b'sharesafe-auto-downgrade-shares'
580 ):
580 ):
581 # prevent cyclic import localrepo -> upgrade -> localrepo
581 # prevent cyclic import localrepo -> upgrade -> localrepo
582 from . import upgrade
582 from . import upgrade
583
583
584 upgrade.downgrade_share_to_non_safe(
584 upgrade.downgrade_share_to_non_safe(
585 ui,
585 ui,
586 hgvfs,
586 hgvfs,
587 sharedvfs,
587 sharedvfs,
588 requirements,
588 requirements,
589 )
589 )
590 else:
590 else:
591 raise error.Abort(
591 raise error.Abort(
592 _(
592 _(
593 b"share source does not support exp-sharesafe requirement"
593 b"share source does not support exp-sharesafe requirement"
594 )
594 )
595 )
595 )
596 else:
596 else:
597 requirements |= _readrequires(storevfs, False)
597 requirements |= _readrequires(storevfs, False)
598 elif shared:
598 elif shared:
599 sourcerequires = _readrequires(sharedvfs, False)
599 sourcerequires = _readrequires(sharedvfs, False)
600 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
600 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
601 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
601 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
602 # prevent cyclic import localrepo -> upgrade -> localrepo
602 # prevent cyclic import localrepo -> upgrade -> localrepo
603 from . import upgrade
603 from . import upgrade
604
604
605 upgrade.upgrade_share_to_safe(
605 upgrade.upgrade_share_to_safe(
606 ui,
606 ui,
607 hgvfs,
607 hgvfs,
608 storevfs,
608 storevfs,
609 requirements,
609 requirements,
610 )
610 )
611 elif ui.configbool(
611 elif ui.configbool(
612 b'experimental', b'sharesafe-warn-outdated-shares'
612 b'experimental', b'sharesafe-warn-outdated-shares'
613 ):
613 ):
614 ui.warn(
614 ui.warn(
615 _(
615 _(
616 b'warning: source repository supports share-safe functionality.'
616 b'warning: source repository supports share-safe functionality.'
617 b' Reshare to upgrade.\n'
617 b' Reshare to upgrade.\n'
618 )
618 )
619 )
619 )
620
620
621 # The .hg/hgrc file may load extensions or contain config options
621 # The .hg/hgrc file may load extensions or contain config options
622 # that influence repository construction. Attempt to load it and
622 # that influence repository construction. Attempt to load it and
623 # process any new extensions that it may have pulled in.
623 # process any new extensions that it may have pulled in.
624 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
624 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
625 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
625 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
626 extensions.loadall(ui)
626 extensions.loadall(ui)
627 extensions.populateui(ui)
627 extensions.populateui(ui)
628
628
629 # Set of module names of extensions loaded for this repository.
629 # Set of module names of extensions loaded for this repository.
630 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
630 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
631
631
632 supportedrequirements = gathersupportedrequirements(ui)
632 supportedrequirements = gathersupportedrequirements(ui)
633
633
634 # We first validate the requirements are known.
634 # We first validate the requirements are known.
635 ensurerequirementsrecognized(requirements, supportedrequirements)
635 ensurerequirementsrecognized(requirements, supportedrequirements)
636
636
637 # Then we validate that the known set is reasonable to use together.
637 # Then we validate that the known set is reasonable to use together.
638 ensurerequirementscompatible(ui, requirements)
638 ensurerequirementscompatible(ui, requirements)
639
639
640 # TODO there are unhandled edge cases related to opening repositories with
640 # TODO there are unhandled edge cases related to opening repositories with
641 # shared storage. If storage is shared, we should also test for requirements
641 # shared storage. If storage is shared, we should also test for requirements
642 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
642 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
643 # that repo, as that repo may load extensions needed to open it. This is a
643 # that repo, as that repo may load extensions needed to open it. This is a
644 # bit complicated because we don't want the other hgrc to overwrite settings
644 # bit complicated because we don't want the other hgrc to overwrite settings
645 # in this hgrc.
645 # in this hgrc.
646 #
646 #
647 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
647 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
648 # file when sharing repos. But if a requirement is added after the share is
648 # file when sharing repos. But if a requirement is added after the share is
649 # performed, thereby introducing a new requirement for the opener, we may
649 # performed, thereby introducing a new requirement for the opener, we may
650 # will not see that and could encounter a run-time error interacting with
650 # will not see that and could encounter a run-time error interacting with
651 # that shared store since it has an unknown-to-us requirement.
651 # that shared store since it has an unknown-to-us requirement.
652
652
653 # At this point, we know we should be capable of opening the repository.
653 # At this point, we know we should be capable of opening the repository.
654 # Now get on with doing that.
654 # Now get on with doing that.
655
655
656 features = set()
656 features = set()
657
657
658 # The "store" part of the repository holds versioned data. How it is
658 # The "store" part of the repository holds versioned data. How it is
659 # accessed is determined by various requirements. If `shared` or
659 # accessed is determined by various requirements. If `shared` or
660 # `relshared` requirements are present, this indicates current repository
660 # `relshared` requirements are present, this indicates current repository
661 # is a share and store exists in path mentioned in `.hg/sharedpath`
661 # is a share and store exists in path mentioned in `.hg/sharedpath`
662 if shared:
662 if shared:
663 storebasepath = sharedvfs.base
663 storebasepath = sharedvfs.base
664 cachepath = sharedvfs.join(b'cache')
664 cachepath = sharedvfs.join(b'cache')
665 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
665 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
666 else:
666 else:
667 storebasepath = hgvfs.base
667 storebasepath = hgvfs.base
668 cachepath = hgvfs.join(b'cache')
668 cachepath = hgvfs.join(b'cache')
669 wcachepath = hgvfs.join(b'wcache')
669 wcachepath = hgvfs.join(b'wcache')
670
670
671 # The store has changed over time and the exact layout is dictated by
671 # The store has changed over time and the exact layout is dictated by
672 # requirements. The store interface abstracts differences across all
672 # requirements. The store interface abstracts differences across all
673 # of them.
673 # of them.
674 store = makestore(
674 store = makestore(
675 requirements,
675 requirements,
676 storebasepath,
676 storebasepath,
677 lambda base: vfsmod.vfs(base, cacheaudited=True),
677 lambda base: vfsmod.vfs(base, cacheaudited=True),
678 )
678 )
679 hgvfs.createmode = store.createmode
679 hgvfs.createmode = store.createmode
680
680
681 storevfs = store.vfs
681 storevfs = store.vfs
682 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
682 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
683
683
684 # The cache vfs is used to manage cache files.
684 # The cache vfs is used to manage cache files.
685 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
685 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
686 cachevfs.createmode = store.createmode
686 cachevfs.createmode = store.createmode
687 # The cache vfs is used to manage cache files related to the working copy
687 # The cache vfs is used to manage cache files related to the working copy
688 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
688 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
689 wcachevfs.createmode = store.createmode
689 wcachevfs.createmode = store.createmode
690
690
691 # Now resolve the type for the repository object. We do this by repeatedly
691 # Now resolve the type for the repository object. We do this by repeatedly
692 # calling a factory function to produces types for specific aspects of the
692 # calling a factory function to produces types for specific aspects of the
693 # repo's operation. The aggregate returned types are used as base classes
693 # repo's operation. The aggregate returned types are used as base classes
694 # for a dynamically-derived type, which will represent our new repository.
694 # for a dynamically-derived type, which will represent our new repository.
695
695
696 bases = []
696 bases = []
697 extrastate = {}
697 extrastate = {}
698
698
699 for iface, fn in REPO_INTERFACES:
699 for iface, fn in REPO_INTERFACES:
700 # We pass all potentially useful state to give extensions tons of
700 # We pass all potentially useful state to give extensions tons of
701 # flexibility.
701 # flexibility.
702 typ = fn()(
702 typ = fn()(
703 ui=ui,
703 ui=ui,
704 intents=intents,
704 intents=intents,
705 requirements=requirements,
705 requirements=requirements,
706 features=features,
706 features=features,
707 wdirvfs=wdirvfs,
707 wdirvfs=wdirvfs,
708 hgvfs=hgvfs,
708 hgvfs=hgvfs,
709 store=store,
709 store=store,
710 storevfs=storevfs,
710 storevfs=storevfs,
711 storeoptions=storevfs.options,
711 storeoptions=storevfs.options,
712 cachevfs=cachevfs,
712 cachevfs=cachevfs,
713 wcachevfs=wcachevfs,
713 wcachevfs=wcachevfs,
714 extensionmodulenames=extensionmodulenames,
714 extensionmodulenames=extensionmodulenames,
715 extrastate=extrastate,
715 extrastate=extrastate,
716 baseclasses=bases,
716 baseclasses=bases,
717 )
717 )
718
718
719 if not isinstance(typ, type):
719 if not isinstance(typ, type):
720 raise error.ProgrammingError(
720 raise error.ProgrammingError(
721 b'unable to construct type for %s' % iface
721 b'unable to construct type for %s' % iface
722 )
722 )
723
723
724 bases.append(typ)
724 bases.append(typ)
725
725
726 # type() allows you to use characters in type names that wouldn't be
726 # type() allows you to use characters in type names that wouldn't be
727 # recognized as Python symbols in source code. We abuse that to add
727 # recognized as Python symbols in source code. We abuse that to add
728 # rich information about our constructed repo.
728 # rich information about our constructed repo.
729 name = pycompat.sysstr(
729 name = pycompat.sysstr(
730 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
730 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
731 )
731 )
732
732
733 cls = type(name, tuple(bases), {})
733 cls = type(name, tuple(bases), {})
734
734
735 return cls(
735 return cls(
736 baseui=baseui,
736 baseui=baseui,
737 ui=ui,
737 ui=ui,
738 origroot=path,
738 origroot=path,
739 wdirvfs=wdirvfs,
739 wdirvfs=wdirvfs,
740 hgvfs=hgvfs,
740 hgvfs=hgvfs,
741 requirements=requirements,
741 requirements=requirements,
742 supportedrequirements=supportedrequirements,
742 supportedrequirements=supportedrequirements,
743 sharedpath=storebasepath,
743 sharedpath=storebasepath,
744 store=store,
744 store=store,
745 cachevfs=cachevfs,
745 cachevfs=cachevfs,
746 wcachevfs=wcachevfs,
746 wcachevfs=wcachevfs,
747 features=features,
747 features=features,
748 intents=intents,
748 intents=intents,
749 )
749 )
750
750
751
751
752 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
752 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
753 """Load hgrc files/content into a ui instance.
753 """Load hgrc files/content into a ui instance.
754
754
755 This is called during repository opening to load any additional
755 This is called during repository opening to load any additional
756 config files or settings relevant to the current repository.
756 config files or settings relevant to the current repository.
757
757
758 Returns a bool indicating whether any additional configs were loaded.
758 Returns a bool indicating whether any additional configs were loaded.
759
759
760 Extensions should monkeypatch this function to modify how per-repo
760 Extensions should monkeypatch this function to modify how per-repo
761 configs are loaded. For example, an extension may wish to pull in
761 configs are loaded. For example, an extension may wish to pull in
762 configs from alternate files or sources.
762 configs from alternate files or sources.
763
763
764 sharedvfs is vfs object pointing to source repo if the current one is a
764 sharedvfs is vfs object pointing to source repo if the current one is a
765 shared one
765 shared one
766 """
766 """
767 if not rcutil.use_repo_hgrc():
767 if not rcutil.use_repo_hgrc():
768 return False
768 return False
769
769
770 ret = False
770 ret = False
771 # first load config from shared source if we has to
771 # first load config from shared source if we has to
772 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
772 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
773 try:
773 try:
774 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
774 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
775 ret = True
775 ret = True
776 except IOError:
776 except IOError:
777 pass
777 pass
778
778
779 try:
779 try:
780 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
780 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
781 ret = True
781 ret = True
782 except IOError:
782 except IOError:
783 pass
783 pass
784
784
785 try:
785 try:
786 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
786 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
787 ret = True
787 ret = True
788 except IOError:
788 except IOError:
789 pass
789 pass
790
790
791 return ret
791 return ret
792
792
793
793
794 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
794 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
795 """Perform additional actions after .hg/hgrc is loaded.
795 """Perform additional actions after .hg/hgrc is loaded.
796
796
797 This function is called during repository loading immediately after
797 This function is called during repository loading immediately after
798 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
798 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
799
799
800 The function can be used to validate configs, automatically add
800 The function can be used to validate configs, automatically add
801 options (including extensions) based on requirements, etc.
801 options (including extensions) based on requirements, etc.
802 """
802 """
803
803
804 # Map of requirements to list of extensions to load automatically when
804 # Map of requirements to list of extensions to load automatically when
805 # requirement is present.
805 # requirement is present.
806 autoextensions = {
806 autoextensions = {
807 b'git': [b'git'],
807 b'git': [b'git'],
808 b'largefiles': [b'largefiles'],
808 b'largefiles': [b'largefiles'],
809 b'lfs': [b'lfs'],
809 b'lfs': [b'lfs'],
810 }
810 }
811
811
812 for requirement, names in sorted(autoextensions.items()):
812 for requirement, names in sorted(autoextensions.items()):
813 if requirement not in requirements:
813 if requirement not in requirements:
814 continue
814 continue
815
815
816 for name in names:
816 for name in names:
817 if not ui.hasconfig(b'extensions', name):
817 if not ui.hasconfig(b'extensions', name):
818 ui.setconfig(b'extensions', name, b'', source=b'autoload')
818 ui.setconfig(b'extensions', name, b'', source=b'autoload')
819
819
820
820
821 def gathersupportedrequirements(ui):
821 def gathersupportedrequirements(ui):
822 """Determine the complete set of recognized requirements."""
822 """Determine the complete set of recognized requirements."""
823 # Start with all requirements supported by this file.
823 # Start with all requirements supported by this file.
824 supported = set(localrepository._basesupported)
824 supported = set(localrepository._basesupported)
825
825
826 # Execute ``featuresetupfuncs`` entries if they belong to an extension
826 # Execute ``featuresetupfuncs`` entries if they belong to an extension
827 # relevant to this ui instance.
827 # relevant to this ui instance.
828 modules = {m.__name__ for n, m in extensions.extensions(ui)}
828 modules = {m.__name__ for n, m in extensions.extensions(ui)}
829
829
830 for fn in featuresetupfuncs:
830 for fn in featuresetupfuncs:
831 if fn.__module__ in modules:
831 if fn.__module__ in modules:
832 fn(ui, supported)
832 fn(ui, supported)
833
833
834 # Add derived requirements from registered compression engines.
834 # Add derived requirements from registered compression engines.
835 for name in util.compengines:
835 for name in util.compengines:
836 engine = util.compengines[name]
836 engine = util.compengines[name]
837 if engine.available() and engine.revlogheader():
837 if engine.available() and engine.revlogheader():
838 supported.add(b'exp-compression-%s' % name)
838 supported.add(b'exp-compression-%s' % name)
839 if engine.name() == b'zstd':
839 if engine.name() == b'zstd':
840 supported.add(b'revlog-compression-zstd')
840 supported.add(b'revlog-compression-zstd')
841
841
842 return supported
842 return supported
843
843
844
844
845 def ensurerequirementsrecognized(requirements, supported):
845 def ensurerequirementsrecognized(requirements, supported):
846 """Validate that a set of local requirements is recognized.
846 """Validate that a set of local requirements is recognized.
847
847
848 Receives a set of requirements. Raises an ``error.RepoError`` if there
848 Receives a set of requirements. Raises an ``error.RepoError`` if there
849 exists any requirement in that set that currently loaded code doesn't
849 exists any requirement in that set that currently loaded code doesn't
850 recognize.
850 recognize.
851
851
852 Returns a set of supported requirements.
852 Returns a set of supported requirements.
853 """
853 """
854 missing = set()
854 missing = set()
855
855
856 for requirement in requirements:
856 for requirement in requirements:
857 if requirement in supported:
857 if requirement in supported:
858 continue
858 continue
859
859
860 if not requirement or not requirement[0:1].isalnum():
860 if not requirement or not requirement[0:1].isalnum():
861 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
861 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
862
862
863 missing.add(requirement)
863 missing.add(requirement)
864
864
865 if missing:
865 if missing:
866 raise error.RequirementError(
866 raise error.RequirementError(
867 _(b'repository requires features unknown to this Mercurial: %s')
867 _(b'repository requires features unknown to this Mercurial: %s')
868 % b' '.join(sorted(missing)),
868 % b' '.join(sorted(missing)),
869 hint=_(
869 hint=_(
870 b'see https://mercurial-scm.org/wiki/MissingRequirement '
870 b'see https://mercurial-scm.org/wiki/MissingRequirement '
871 b'for more information'
871 b'for more information'
872 ),
872 ),
873 )
873 )
874
874
875
875
876 def ensurerequirementscompatible(ui, requirements):
876 def ensurerequirementscompatible(ui, requirements):
877 """Validates that a set of recognized requirements is mutually compatible.
877 """Validates that a set of recognized requirements is mutually compatible.
878
878
879 Some requirements may not be compatible with others or require
879 Some requirements may not be compatible with others or require
880 config options that aren't enabled. This function is called during
880 config options that aren't enabled. This function is called during
881 repository opening to ensure that the set of requirements needed
881 repository opening to ensure that the set of requirements needed
882 to open a repository is sane and compatible with config options.
882 to open a repository is sane and compatible with config options.
883
883
884 Extensions can monkeypatch this function to perform additional
884 Extensions can monkeypatch this function to perform additional
885 checking.
885 checking.
886
886
887 ``error.RepoError`` should be raised on failure.
887 ``error.RepoError`` should be raised on failure.
888 """
888 """
889 if (
889 if (
890 requirementsmod.SPARSE_REQUIREMENT in requirements
890 requirementsmod.SPARSE_REQUIREMENT in requirements
891 and not sparse.enabled
891 and not sparse.enabled
892 ):
892 ):
893 raise error.RepoError(
893 raise error.RepoError(
894 _(
894 _(
895 b'repository is using sparse feature but '
895 b'repository is using sparse feature but '
896 b'sparse is not enabled; enable the '
896 b'sparse is not enabled; enable the '
897 b'"sparse" extensions to access'
897 b'"sparse" extensions to access'
898 )
898 )
899 )
899 )
900
900
901
901
902 def makestore(requirements, path, vfstype):
902 def makestore(requirements, path, vfstype):
903 """Construct a storage object for a repository."""
903 """Construct a storage object for a repository."""
904 if b'store' in requirements:
904 if b'store' in requirements:
905 if b'fncache' in requirements:
905 if b'fncache' in requirements:
906 return storemod.fncachestore(
906 return storemod.fncachestore(
907 path, vfstype, b'dotencode' in requirements
907 path, vfstype, b'dotencode' in requirements
908 )
908 )
909
909
910 return storemod.encodedstore(path, vfstype)
910 return storemod.encodedstore(path, vfstype)
911
911
912 return storemod.basicstore(path, vfstype)
912 return storemod.basicstore(path, vfstype)
913
913
914
914
915 def resolvestorevfsoptions(ui, requirements, features):
915 def resolvestorevfsoptions(ui, requirements, features):
916 """Resolve the options to pass to the store vfs opener.
916 """Resolve the options to pass to the store vfs opener.
917
917
918 The returned dict is used to influence behavior of the storage layer.
918 The returned dict is used to influence behavior of the storage layer.
919 """
919 """
920 options = {}
920 options = {}
921
921
922 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
922 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
923 options[b'treemanifest'] = True
923 options[b'treemanifest'] = True
924
924
925 # experimental config: format.manifestcachesize
925 # experimental config: format.manifestcachesize
926 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
926 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
927 if manifestcachesize is not None:
927 if manifestcachesize is not None:
928 options[b'manifestcachesize'] = manifestcachesize
928 options[b'manifestcachesize'] = manifestcachesize
929
929
930 # In the absence of another requirement superseding a revlog-related
930 # In the absence of another requirement superseding a revlog-related
931 # requirement, we have to assume the repo is using revlog version 0.
931 # requirement, we have to assume the repo is using revlog version 0.
932 # This revlog format is super old and we don't bother trying to parse
932 # This revlog format is super old and we don't bother trying to parse
933 # opener options for it because those options wouldn't do anything
933 # opener options for it because those options wouldn't do anything
934 # meaningful on such old repos.
934 # meaningful on such old repos.
935 if (
935 if (
936 b'revlogv1' in requirements
936 b'revlogv1' in requirements
937 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
937 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
938 ):
938 ):
939 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
939 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
940 else: # explicitly mark repo as using revlogv0
940 else: # explicitly mark repo as using revlogv0
941 options[b'revlogv0'] = True
941 options[b'revlogv0'] = True
942
942
943 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
943 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
944 options[b'copies-storage'] = b'changeset-sidedata'
944 options[b'copies-storage'] = b'changeset-sidedata'
945 else:
945 else:
946 writecopiesto = ui.config(b'experimental', b'copies.write-to')
946 writecopiesto = ui.config(b'experimental', b'copies.write-to')
947 copiesextramode = (b'changeset-only', b'compatibility')
947 copiesextramode = (b'changeset-only', b'compatibility')
948 if writecopiesto in copiesextramode:
948 if writecopiesto in copiesextramode:
949 options[b'copies-storage'] = b'extra'
949 options[b'copies-storage'] = b'extra'
950
950
951 return options
951 return options
952
952
953
953
954 def resolverevlogstorevfsoptions(ui, requirements, features):
954 def resolverevlogstorevfsoptions(ui, requirements, features):
955 """Resolve opener options specific to revlogs."""
955 """Resolve opener options specific to revlogs."""
956
956
957 options = {}
957 options = {}
958 options[b'flagprocessors'] = {}
958 options[b'flagprocessors'] = {}
959
959
960 if b'revlogv1' in requirements:
960 if b'revlogv1' in requirements:
961 options[b'revlogv1'] = True
961 options[b'revlogv1'] = True
962 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
962 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
963 options[b'revlogv2'] = True
963 options[b'revlogv2'] = True
964
964
965 if b'generaldelta' in requirements:
965 if b'generaldelta' in requirements:
966 options[b'generaldelta'] = True
966 options[b'generaldelta'] = True
967
967
968 # experimental config: format.chunkcachesize
968 # experimental config: format.chunkcachesize
969 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
969 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
970 if chunkcachesize is not None:
970 if chunkcachesize is not None:
971 options[b'chunkcachesize'] = chunkcachesize
971 options[b'chunkcachesize'] = chunkcachesize
972
972
973 deltabothparents = ui.configbool(
973 deltabothparents = ui.configbool(
974 b'storage', b'revlog.optimize-delta-parent-choice'
974 b'storage', b'revlog.optimize-delta-parent-choice'
975 )
975 )
976 options[b'deltabothparents'] = deltabothparents
976 options[b'deltabothparents'] = deltabothparents
977
977
978 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
978 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
979 lazydeltabase = False
979 lazydeltabase = False
980 if lazydelta:
980 if lazydelta:
981 lazydeltabase = ui.configbool(
981 lazydeltabase = ui.configbool(
982 b'storage', b'revlog.reuse-external-delta-parent'
982 b'storage', b'revlog.reuse-external-delta-parent'
983 )
983 )
984 if lazydeltabase is None:
984 if lazydeltabase is None:
985 lazydeltabase = not scmutil.gddeltaconfig(ui)
985 lazydeltabase = not scmutil.gddeltaconfig(ui)
986 options[b'lazydelta'] = lazydelta
986 options[b'lazydelta'] = lazydelta
987 options[b'lazydeltabase'] = lazydeltabase
987 options[b'lazydeltabase'] = lazydeltabase
988
988
989 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
989 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
990 if 0 <= chainspan:
990 if 0 <= chainspan:
991 options[b'maxdeltachainspan'] = chainspan
991 options[b'maxdeltachainspan'] = chainspan
992
992
993 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
993 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
994 if mmapindexthreshold is not None:
994 if mmapindexthreshold is not None:
995 options[b'mmapindexthreshold'] = mmapindexthreshold
995 options[b'mmapindexthreshold'] = mmapindexthreshold
996
996
997 withsparseread = ui.configbool(b'experimental', b'sparse-read')
997 withsparseread = ui.configbool(b'experimental', b'sparse-read')
998 srdensitythres = float(
998 srdensitythres = float(
999 ui.config(b'experimental', b'sparse-read.density-threshold')
999 ui.config(b'experimental', b'sparse-read.density-threshold')
1000 )
1000 )
1001 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1001 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1002 options[b'with-sparse-read'] = withsparseread
1002 options[b'with-sparse-read'] = withsparseread
1003 options[b'sparse-read-density-threshold'] = srdensitythres
1003 options[b'sparse-read-density-threshold'] = srdensitythres
1004 options[b'sparse-read-min-gap-size'] = srmingapsize
1004 options[b'sparse-read-min-gap-size'] = srmingapsize
1005
1005
1006 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1006 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1007 options[b'sparse-revlog'] = sparserevlog
1007 options[b'sparse-revlog'] = sparserevlog
1008 if sparserevlog:
1008 if sparserevlog:
1009 options[b'generaldelta'] = True
1009 options[b'generaldelta'] = True
1010
1010
1011 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1011 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1012 options[b'side-data'] = sidedata
1012 options[b'side-data'] = sidedata
1013
1013
1014 maxchainlen = None
1014 maxchainlen = None
1015 if sparserevlog:
1015 if sparserevlog:
1016 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1016 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1017 # experimental config: format.maxchainlen
1017 # experimental config: format.maxchainlen
1018 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1018 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1019 if maxchainlen is not None:
1019 if maxchainlen is not None:
1020 options[b'maxchainlen'] = maxchainlen
1020 options[b'maxchainlen'] = maxchainlen
1021
1021
1022 for r in requirements:
1022 for r in requirements:
1023 # we allow multiple compression engine requirement to co-exist because
1023 # we allow multiple compression engine requirement to co-exist because
1024 # strickly speaking, revlog seems to support mixed compression style.
1024 # strickly speaking, revlog seems to support mixed compression style.
1025 #
1025 #
1026 # The compression used for new entries will be "the last one"
1026 # The compression used for new entries will be "the last one"
1027 prefix = r.startswith
1027 prefix = r.startswith
1028 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1028 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1029 options[b'compengine'] = r.split(b'-', 2)[2]
1029 options[b'compengine'] = r.split(b'-', 2)[2]
1030
1030
1031 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1031 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1032 if options[b'zlib.level'] is not None:
1032 if options[b'zlib.level'] is not None:
1033 if not (0 <= options[b'zlib.level'] <= 9):
1033 if not (0 <= options[b'zlib.level'] <= 9):
1034 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1034 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1035 raise error.Abort(msg % options[b'zlib.level'])
1035 raise error.Abort(msg % options[b'zlib.level'])
1036 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1036 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1037 if options[b'zstd.level'] is not None:
1037 if options[b'zstd.level'] is not None:
1038 if not (0 <= options[b'zstd.level'] <= 22):
1038 if not (0 <= options[b'zstd.level'] <= 22):
1039 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1039 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1040 raise error.Abort(msg % options[b'zstd.level'])
1040 raise error.Abort(msg % options[b'zstd.level'])
1041
1041
1042 if requirementsmod.NARROW_REQUIREMENT in requirements:
1042 if requirementsmod.NARROW_REQUIREMENT in requirements:
1043 options[b'enableellipsis'] = True
1043 options[b'enableellipsis'] = True
1044
1044
1045 if ui.configbool(b'experimental', b'rust.index'):
1045 if ui.configbool(b'experimental', b'rust.index'):
1046 options[b'rust.index'] = True
1046 options[b'rust.index'] = True
1047 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1047 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1048 slow_path = ui.config(
1048 slow_path = ui.config(
1049 b'storage', b'revlog.persistent-nodemap.slow-path'
1049 b'storage', b'revlog.persistent-nodemap.slow-path'
1050 )
1050 )
1051 if slow_path not in (b'allow', b'warn', b'abort'):
1051 if slow_path not in (b'allow', b'warn', b'abort'):
1052 default = ui.config_default(
1052 default = ui.config_default(
1053 b'storage', b'revlog.persistent-nodemap.slow-path'
1053 b'storage', b'revlog.persistent-nodemap.slow-path'
1054 )
1054 )
1055 msg = _(
1055 msg = _(
1056 b'unknown value for config '
1056 b'unknown value for config '
1057 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1057 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1058 )
1058 )
1059 ui.warn(msg % slow_path)
1059 ui.warn(msg % slow_path)
1060 if not ui.quiet:
1060 if not ui.quiet:
1061 ui.warn(_(b'falling back to default value: %s\n') % default)
1061 ui.warn(_(b'falling back to default value: %s\n') % default)
1062 slow_path = default
1062 slow_path = default
1063
1063
1064 msg = _(
1064 msg = _(
1065 b"accessing `persistent-nodemap` repository without associated "
1065 b"accessing `persistent-nodemap` repository without associated "
1066 b"fast implementation."
1066 b"fast implementation."
1067 )
1067 )
1068 hint = _(
1068 hint = _(
1069 b"check `hg help config.format.use-persistent-nodemap` "
1069 b"check `hg help config.format.use-persistent-nodemap` "
1070 b"for details"
1070 b"for details"
1071 )
1071 )
1072 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1072 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1073 if slow_path == b'warn':
1073 if slow_path == b'warn':
1074 msg = b"warning: " + msg + b'\n'
1074 msg = b"warning: " + msg + b'\n'
1075 ui.warn(msg)
1075 ui.warn(msg)
1076 if not ui.quiet:
1076 if not ui.quiet:
1077 hint = b'(' + hint + b')\n'
1077 hint = b'(' + hint + b')\n'
1078 ui.warn(hint)
1078 ui.warn(hint)
1079 if slow_path == b'abort':
1079 if slow_path == b'abort':
1080 raise error.Abort(msg, hint=hint)
1080 raise error.Abort(msg, hint=hint)
1081 options[b'persistent-nodemap'] = True
1081 options[b'persistent-nodemap'] = True
1082 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1082 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1083 options[b'persistent-nodemap.mmap'] = True
1083 options[b'persistent-nodemap.mmap'] = True
1084 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1085 options[b'persistent-nodemap.mode'] = epnm
1086 if ui.configbool(b'devel', b'persistent-nodemap'):
1084 if ui.configbool(b'devel', b'persistent-nodemap'):
1087 options[b'devel-force-nodemap'] = True
1085 options[b'devel-force-nodemap'] = True
1088
1086
1089 return options
1087 return options
1090
1088
1091
1089
1092 def makemain(**kwargs):
1090 def makemain(**kwargs):
1093 """Produce a type conforming to ``ilocalrepositorymain``."""
1091 """Produce a type conforming to ``ilocalrepositorymain``."""
1094 return localrepository
1092 return localrepository
1095
1093
1096
1094
1097 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1095 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1098 class revlogfilestorage(object):
1096 class revlogfilestorage(object):
1099 """File storage when using revlogs."""
1097 """File storage when using revlogs."""
1100
1098
1101 def file(self, path):
1099 def file(self, path):
1102 if path[0] == b'/':
1100 if path[0] == b'/':
1103 path = path[1:]
1101 path = path[1:]
1104
1102
1105 return filelog.filelog(self.svfs, path)
1103 return filelog.filelog(self.svfs, path)
1106
1104
1107
1105
1108 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1106 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1109 class revlognarrowfilestorage(object):
1107 class revlognarrowfilestorage(object):
1110 """File storage when using revlogs and narrow files."""
1108 """File storage when using revlogs and narrow files."""
1111
1109
1112 def file(self, path):
1110 def file(self, path):
1113 if path[0] == b'/':
1111 if path[0] == b'/':
1114 path = path[1:]
1112 path = path[1:]
1115
1113
1116 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1114 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1117
1115
1118
1116
1119 def makefilestorage(requirements, features, **kwargs):
1117 def makefilestorage(requirements, features, **kwargs):
1120 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1118 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1121 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1119 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1122 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1120 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1123
1121
1124 if requirementsmod.NARROW_REQUIREMENT in requirements:
1122 if requirementsmod.NARROW_REQUIREMENT in requirements:
1125 return revlognarrowfilestorage
1123 return revlognarrowfilestorage
1126 else:
1124 else:
1127 return revlogfilestorage
1125 return revlogfilestorage
1128
1126
1129
1127
1130 # List of repository interfaces and factory functions for them. Each
1128 # List of repository interfaces and factory functions for them. Each
1131 # will be called in order during ``makelocalrepository()`` to iteratively
1129 # will be called in order during ``makelocalrepository()`` to iteratively
1132 # derive the final type for a local repository instance. We capture the
1130 # derive the final type for a local repository instance. We capture the
1133 # function as a lambda so we don't hold a reference and the module-level
1131 # function as a lambda so we don't hold a reference and the module-level
1134 # functions can be wrapped.
1132 # functions can be wrapped.
1135 REPO_INTERFACES = [
1133 REPO_INTERFACES = [
1136 (repository.ilocalrepositorymain, lambda: makemain),
1134 (repository.ilocalrepositorymain, lambda: makemain),
1137 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1135 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1138 ]
1136 ]
1139
1137
1140
1138
1141 @interfaceutil.implementer(repository.ilocalrepositorymain)
1139 @interfaceutil.implementer(repository.ilocalrepositorymain)
1142 class localrepository(object):
1140 class localrepository(object):
1143 """Main class for representing local repositories.
1141 """Main class for representing local repositories.
1144
1142
1145 All local repositories are instances of this class.
1143 All local repositories are instances of this class.
1146
1144
1147 Constructed on its own, instances of this class are not usable as
1145 Constructed on its own, instances of this class are not usable as
1148 repository objects. To obtain a usable repository object, call
1146 repository objects. To obtain a usable repository object, call
1149 ``hg.repository()``, ``localrepo.instance()``, or
1147 ``hg.repository()``, ``localrepo.instance()``, or
1150 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1148 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1151 ``instance()`` adds support for creating new repositories.
1149 ``instance()`` adds support for creating new repositories.
1152 ``hg.repository()`` adds more extension integration, including calling
1150 ``hg.repository()`` adds more extension integration, including calling
1153 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1151 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1154 used.
1152 used.
1155 """
1153 """
1156
1154
1157 # obsolete experimental requirements:
1155 # obsolete experimental requirements:
1158 # - manifestv2: An experimental new manifest format that allowed
1156 # - manifestv2: An experimental new manifest format that allowed
1159 # for stem compression of long paths. Experiment ended up not
1157 # for stem compression of long paths. Experiment ended up not
1160 # being successful (repository sizes went up due to worse delta
1158 # being successful (repository sizes went up due to worse delta
1161 # chains), and the code was deleted in 4.6.
1159 # chains), and the code was deleted in 4.6.
1162 supportedformats = {
1160 supportedformats = {
1163 b'revlogv1',
1161 b'revlogv1',
1164 b'generaldelta',
1162 b'generaldelta',
1165 requirementsmod.TREEMANIFEST_REQUIREMENT,
1163 requirementsmod.TREEMANIFEST_REQUIREMENT,
1166 requirementsmod.COPIESSDC_REQUIREMENT,
1164 requirementsmod.COPIESSDC_REQUIREMENT,
1167 requirementsmod.REVLOGV2_REQUIREMENT,
1165 requirementsmod.REVLOGV2_REQUIREMENT,
1168 requirementsmod.SIDEDATA_REQUIREMENT,
1166 requirementsmod.SIDEDATA_REQUIREMENT,
1169 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1167 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1170 requirementsmod.NODEMAP_REQUIREMENT,
1168 requirementsmod.NODEMAP_REQUIREMENT,
1171 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1169 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1172 requirementsmod.SHARESAFE_REQUIREMENT,
1170 requirementsmod.SHARESAFE_REQUIREMENT,
1173 }
1171 }
1174 _basesupported = supportedformats | {
1172 _basesupported = supportedformats | {
1175 b'store',
1173 b'store',
1176 b'fncache',
1174 b'fncache',
1177 requirementsmod.SHARED_REQUIREMENT,
1175 requirementsmod.SHARED_REQUIREMENT,
1178 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1176 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1179 b'dotencode',
1177 b'dotencode',
1180 requirementsmod.SPARSE_REQUIREMENT,
1178 requirementsmod.SPARSE_REQUIREMENT,
1181 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1179 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1182 }
1180 }
1183
1181
1184 # list of prefix for file which can be written without 'wlock'
1182 # list of prefix for file which can be written without 'wlock'
1185 # Extensions should extend this list when needed
1183 # Extensions should extend this list when needed
1186 _wlockfreeprefix = {
1184 _wlockfreeprefix = {
1187 # We migh consider requiring 'wlock' for the next
1185 # We migh consider requiring 'wlock' for the next
1188 # two, but pretty much all the existing code assume
1186 # two, but pretty much all the existing code assume
1189 # wlock is not needed so we keep them excluded for
1187 # wlock is not needed so we keep them excluded for
1190 # now.
1188 # now.
1191 b'hgrc',
1189 b'hgrc',
1192 b'requires',
1190 b'requires',
1193 # XXX cache is a complicatged business someone
1191 # XXX cache is a complicatged business someone
1194 # should investigate this in depth at some point
1192 # should investigate this in depth at some point
1195 b'cache/',
1193 b'cache/',
1196 # XXX shouldn't be dirstate covered by the wlock?
1194 # XXX shouldn't be dirstate covered by the wlock?
1197 b'dirstate',
1195 b'dirstate',
1198 # XXX bisect was still a bit too messy at the time
1196 # XXX bisect was still a bit too messy at the time
1199 # this changeset was introduced. Someone should fix
1197 # this changeset was introduced. Someone should fix
1200 # the remainig bit and drop this line
1198 # the remainig bit and drop this line
1201 b'bisect.state',
1199 b'bisect.state',
1202 }
1200 }
1203
1201
1204 def __init__(
1202 def __init__(
1205 self,
1203 self,
1206 baseui,
1204 baseui,
1207 ui,
1205 ui,
1208 origroot,
1206 origroot,
1209 wdirvfs,
1207 wdirvfs,
1210 hgvfs,
1208 hgvfs,
1211 requirements,
1209 requirements,
1212 supportedrequirements,
1210 supportedrequirements,
1213 sharedpath,
1211 sharedpath,
1214 store,
1212 store,
1215 cachevfs,
1213 cachevfs,
1216 wcachevfs,
1214 wcachevfs,
1217 features,
1215 features,
1218 intents=None,
1216 intents=None,
1219 ):
1217 ):
1220 """Create a new local repository instance.
1218 """Create a new local repository instance.
1221
1219
1222 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1220 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1223 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1221 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1224 object.
1222 object.
1225
1223
1226 Arguments:
1224 Arguments:
1227
1225
1228 baseui
1226 baseui
1229 ``ui.ui`` instance that ``ui`` argument was based off of.
1227 ``ui.ui`` instance that ``ui`` argument was based off of.
1230
1228
1231 ui
1229 ui
1232 ``ui.ui`` instance for use by the repository.
1230 ``ui.ui`` instance for use by the repository.
1233
1231
1234 origroot
1232 origroot
1235 ``bytes`` path to working directory root of this repository.
1233 ``bytes`` path to working directory root of this repository.
1236
1234
1237 wdirvfs
1235 wdirvfs
1238 ``vfs.vfs`` rooted at the working directory.
1236 ``vfs.vfs`` rooted at the working directory.
1239
1237
1240 hgvfs
1238 hgvfs
1241 ``vfs.vfs`` rooted at .hg/
1239 ``vfs.vfs`` rooted at .hg/
1242
1240
1243 requirements
1241 requirements
1244 ``set`` of bytestrings representing repository opening requirements.
1242 ``set`` of bytestrings representing repository opening requirements.
1245
1243
1246 supportedrequirements
1244 supportedrequirements
1247 ``set`` of bytestrings representing repository requirements that we
1245 ``set`` of bytestrings representing repository requirements that we
1248 know how to open. May be a supetset of ``requirements``.
1246 know how to open. May be a supetset of ``requirements``.
1249
1247
1250 sharedpath
1248 sharedpath
1251 ``bytes`` Defining path to storage base directory. Points to a
1249 ``bytes`` Defining path to storage base directory. Points to a
1252 ``.hg/`` directory somewhere.
1250 ``.hg/`` directory somewhere.
1253
1251
1254 store
1252 store
1255 ``store.basicstore`` (or derived) instance providing access to
1253 ``store.basicstore`` (or derived) instance providing access to
1256 versioned storage.
1254 versioned storage.
1257
1255
1258 cachevfs
1256 cachevfs
1259 ``vfs.vfs`` used for cache files.
1257 ``vfs.vfs`` used for cache files.
1260
1258
1261 wcachevfs
1259 wcachevfs
1262 ``vfs.vfs`` used for cache files related to the working copy.
1260 ``vfs.vfs`` used for cache files related to the working copy.
1263
1261
1264 features
1262 features
1265 ``set`` of bytestrings defining features/capabilities of this
1263 ``set`` of bytestrings defining features/capabilities of this
1266 instance.
1264 instance.
1267
1265
1268 intents
1266 intents
1269 ``set`` of system strings indicating what this repo will be used
1267 ``set`` of system strings indicating what this repo will be used
1270 for.
1268 for.
1271 """
1269 """
1272 self.baseui = baseui
1270 self.baseui = baseui
1273 self.ui = ui
1271 self.ui = ui
1274 self.origroot = origroot
1272 self.origroot = origroot
1275 # vfs rooted at working directory.
1273 # vfs rooted at working directory.
1276 self.wvfs = wdirvfs
1274 self.wvfs = wdirvfs
1277 self.root = wdirvfs.base
1275 self.root = wdirvfs.base
1278 # vfs rooted at .hg/. Used to access most non-store paths.
1276 # vfs rooted at .hg/. Used to access most non-store paths.
1279 self.vfs = hgvfs
1277 self.vfs = hgvfs
1280 self.path = hgvfs.base
1278 self.path = hgvfs.base
1281 self.requirements = requirements
1279 self.requirements = requirements
1282 self.supported = supportedrequirements
1280 self.supported = supportedrequirements
1283 self.sharedpath = sharedpath
1281 self.sharedpath = sharedpath
1284 self.store = store
1282 self.store = store
1285 self.cachevfs = cachevfs
1283 self.cachevfs = cachevfs
1286 self.wcachevfs = wcachevfs
1284 self.wcachevfs = wcachevfs
1287 self.features = features
1285 self.features = features
1288
1286
1289 self.filtername = None
1287 self.filtername = None
1290
1288
1291 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1289 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1292 b'devel', b'check-locks'
1290 b'devel', b'check-locks'
1293 ):
1291 ):
1294 self.vfs.audit = self._getvfsward(self.vfs.audit)
1292 self.vfs.audit = self._getvfsward(self.vfs.audit)
1295 # A list of callback to shape the phase if no data were found.
1293 # A list of callback to shape the phase if no data were found.
1296 # Callback are in the form: func(repo, roots) --> processed root.
1294 # Callback are in the form: func(repo, roots) --> processed root.
1297 # This list it to be filled by extension during repo setup
1295 # This list it to be filled by extension during repo setup
1298 self._phasedefaults = []
1296 self._phasedefaults = []
1299
1297
1300 color.setup(self.ui)
1298 color.setup(self.ui)
1301
1299
1302 self.spath = self.store.path
1300 self.spath = self.store.path
1303 self.svfs = self.store.vfs
1301 self.svfs = self.store.vfs
1304 self.sjoin = self.store.join
1302 self.sjoin = self.store.join
1305 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1303 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1306 b'devel', b'check-locks'
1304 b'devel', b'check-locks'
1307 ):
1305 ):
1308 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1306 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1309 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1307 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1310 else: # standard vfs
1308 else: # standard vfs
1311 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1309 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1312
1310
1313 self._dirstatevalidatewarned = False
1311 self._dirstatevalidatewarned = False
1314
1312
1315 self._branchcaches = branchmap.BranchMapCache()
1313 self._branchcaches = branchmap.BranchMapCache()
1316 self._revbranchcache = None
1314 self._revbranchcache = None
1317 self._filterpats = {}
1315 self._filterpats = {}
1318 self._datafilters = {}
1316 self._datafilters = {}
1319 self._transref = self._lockref = self._wlockref = None
1317 self._transref = self._lockref = self._wlockref = None
1320
1318
1321 # A cache for various files under .hg/ that tracks file changes,
1319 # A cache for various files under .hg/ that tracks file changes,
1322 # (used by the filecache decorator)
1320 # (used by the filecache decorator)
1323 #
1321 #
1324 # Maps a property name to its util.filecacheentry
1322 # Maps a property name to its util.filecacheentry
1325 self._filecache = {}
1323 self._filecache = {}
1326
1324
1327 # hold sets of revision to be filtered
1325 # hold sets of revision to be filtered
1328 # should be cleared when something might have changed the filter value:
1326 # should be cleared when something might have changed the filter value:
1329 # - new changesets,
1327 # - new changesets,
1330 # - phase change,
1328 # - phase change,
1331 # - new obsolescence marker,
1329 # - new obsolescence marker,
1332 # - working directory parent change,
1330 # - working directory parent change,
1333 # - bookmark changes
1331 # - bookmark changes
1334 self.filteredrevcache = {}
1332 self.filteredrevcache = {}
1335
1333
1336 # post-dirstate-status hooks
1334 # post-dirstate-status hooks
1337 self._postdsstatus = []
1335 self._postdsstatus = []
1338
1336
1339 # generic mapping between names and nodes
1337 # generic mapping between names and nodes
1340 self.names = namespaces.namespaces()
1338 self.names = namespaces.namespaces()
1341
1339
1342 # Key to signature value.
1340 # Key to signature value.
1343 self._sparsesignaturecache = {}
1341 self._sparsesignaturecache = {}
1344 # Signature to cached matcher instance.
1342 # Signature to cached matcher instance.
1345 self._sparsematchercache = {}
1343 self._sparsematchercache = {}
1346
1344
1347 self._extrafilterid = repoview.extrafilter(ui)
1345 self._extrafilterid = repoview.extrafilter(ui)
1348
1346
1349 self.filecopiesmode = None
1347 self.filecopiesmode = None
1350 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1348 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1351 self.filecopiesmode = b'changeset-sidedata'
1349 self.filecopiesmode = b'changeset-sidedata'
1352
1350
1353 def _getvfsward(self, origfunc):
1351 def _getvfsward(self, origfunc):
1354 """build a ward for self.vfs"""
1352 """build a ward for self.vfs"""
1355 rref = weakref.ref(self)
1353 rref = weakref.ref(self)
1356
1354
1357 def checkvfs(path, mode=None):
1355 def checkvfs(path, mode=None):
1358 ret = origfunc(path, mode=mode)
1356 ret = origfunc(path, mode=mode)
1359 repo = rref()
1357 repo = rref()
1360 if (
1358 if (
1361 repo is None
1359 repo is None
1362 or not util.safehasattr(repo, b'_wlockref')
1360 or not util.safehasattr(repo, b'_wlockref')
1363 or not util.safehasattr(repo, b'_lockref')
1361 or not util.safehasattr(repo, b'_lockref')
1364 ):
1362 ):
1365 return
1363 return
1366 if mode in (None, b'r', b'rb'):
1364 if mode in (None, b'r', b'rb'):
1367 return
1365 return
1368 if path.startswith(repo.path):
1366 if path.startswith(repo.path):
1369 # truncate name relative to the repository (.hg)
1367 # truncate name relative to the repository (.hg)
1370 path = path[len(repo.path) + 1 :]
1368 path = path[len(repo.path) + 1 :]
1371 if path.startswith(b'cache/'):
1369 if path.startswith(b'cache/'):
1372 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1370 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1373 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1371 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1374 # path prefixes covered by 'lock'
1372 # path prefixes covered by 'lock'
1375 vfs_path_prefixes = (
1373 vfs_path_prefixes = (
1376 b'journal.',
1374 b'journal.',
1377 b'undo.',
1375 b'undo.',
1378 b'strip-backup/',
1376 b'strip-backup/',
1379 b'cache/',
1377 b'cache/',
1380 )
1378 )
1381 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1379 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1382 if repo._currentlock(repo._lockref) is None:
1380 if repo._currentlock(repo._lockref) is None:
1383 repo.ui.develwarn(
1381 repo.ui.develwarn(
1384 b'write with no lock: "%s"' % path,
1382 b'write with no lock: "%s"' % path,
1385 stacklevel=3,
1383 stacklevel=3,
1386 config=b'check-locks',
1384 config=b'check-locks',
1387 )
1385 )
1388 elif repo._currentlock(repo._wlockref) is None:
1386 elif repo._currentlock(repo._wlockref) is None:
1389 # rest of vfs files are covered by 'wlock'
1387 # rest of vfs files are covered by 'wlock'
1390 #
1388 #
1391 # exclude special files
1389 # exclude special files
1392 for prefix in self._wlockfreeprefix:
1390 for prefix in self._wlockfreeprefix:
1393 if path.startswith(prefix):
1391 if path.startswith(prefix):
1394 return
1392 return
1395 repo.ui.develwarn(
1393 repo.ui.develwarn(
1396 b'write with no wlock: "%s"' % path,
1394 b'write with no wlock: "%s"' % path,
1397 stacklevel=3,
1395 stacklevel=3,
1398 config=b'check-locks',
1396 config=b'check-locks',
1399 )
1397 )
1400 return ret
1398 return ret
1401
1399
1402 return checkvfs
1400 return checkvfs
1403
1401
1404 def _getsvfsward(self, origfunc):
1402 def _getsvfsward(self, origfunc):
1405 """build a ward for self.svfs"""
1403 """build a ward for self.svfs"""
1406 rref = weakref.ref(self)
1404 rref = weakref.ref(self)
1407
1405
1408 def checksvfs(path, mode=None):
1406 def checksvfs(path, mode=None):
1409 ret = origfunc(path, mode=mode)
1407 ret = origfunc(path, mode=mode)
1410 repo = rref()
1408 repo = rref()
1411 if repo is None or not util.safehasattr(repo, b'_lockref'):
1409 if repo is None or not util.safehasattr(repo, b'_lockref'):
1412 return
1410 return
1413 if mode in (None, b'r', b'rb'):
1411 if mode in (None, b'r', b'rb'):
1414 return
1412 return
1415 if path.startswith(repo.sharedpath):
1413 if path.startswith(repo.sharedpath):
1416 # truncate name relative to the repository (.hg)
1414 # truncate name relative to the repository (.hg)
1417 path = path[len(repo.sharedpath) + 1 :]
1415 path = path[len(repo.sharedpath) + 1 :]
1418 if repo._currentlock(repo._lockref) is None:
1416 if repo._currentlock(repo._lockref) is None:
1419 repo.ui.develwarn(
1417 repo.ui.develwarn(
1420 b'write with no lock: "%s"' % path, stacklevel=4
1418 b'write with no lock: "%s"' % path, stacklevel=4
1421 )
1419 )
1422 return ret
1420 return ret
1423
1421
1424 return checksvfs
1422 return checksvfs
1425
1423
1426 def close(self):
1424 def close(self):
1427 self._writecaches()
1425 self._writecaches()
1428
1426
1429 def _writecaches(self):
1427 def _writecaches(self):
1430 if self._revbranchcache:
1428 if self._revbranchcache:
1431 self._revbranchcache.write()
1429 self._revbranchcache.write()
1432
1430
1433 def _restrictcapabilities(self, caps):
1431 def _restrictcapabilities(self, caps):
1434 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1432 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1435 caps = set(caps)
1433 caps = set(caps)
1436 capsblob = bundle2.encodecaps(
1434 capsblob = bundle2.encodecaps(
1437 bundle2.getrepocaps(self, role=b'client')
1435 bundle2.getrepocaps(self, role=b'client')
1438 )
1436 )
1439 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1437 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1440 return caps
1438 return caps
1441
1439
1442 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1440 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1443 # self -> auditor -> self._checknested -> self
1441 # self -> auditor -> self._checknested -> self
1444
1442
1445 @property
1443 @property
1446 def auditor(self):
1444 def auditor(self):
1447 # This is only used by context.workingctx.match in order to
1445 # This is only used by context.workingctx.match in order to
1448 # detect files in subrepos.
1446 # detect files in subrepos.
1449 return pathutil.pathauditor(self.root, callback=self._checknested)
1447 return pathutil.pathauditor(self.root, callback=self._checknested)
1450
1448
1451 @property
1449 @property
1452 def nofsauditor(self):
1450 def nofsauditor(self):
1453 # This is only used by context.basectx.match in order to detect
1451 # This is only used by context.basectx.match in order to detect
1454 # files in subrepos.
1452 # files in subrepos.
1455 return pathutil.pathauditor(
1453 return pathutil.pathauditor(
1456 self.root, callback=self._checknested, realfs=False, cached=True
1454 self.root, callback=self._checknested, realfs=False, cached=True
1457 )
1455 )
1458
1456
1459 def _checknested(self, path):
1457 def _checknested(self, path):
1460 """Determine if path is a legal nested repository."""
1458 """Determine if path is a legal nested repository."""
1461 if not path.startswith(self.root):
1459 if not path.startswith(self.root):
1462 return False
1460 return False
1463 subpath = path[len(self.root) + 1 :]
1461 subpath = path[len(self.root) + 1 :]
1464 normsubpath = util.pconvert(subpath)
1462 normsubpath = util.pconvert(subpath)
1465
1463
1466 # XXX: Checking against the current working copy is wrong in
1464 # XXX: Checking against the current working copy is wrong in
1467 # the sense that it can reject things like
1465 # the sense that it can reject things like
1468 #
1466 #
1469 # $ hg cat -r 10 sub/x.txt
1467 # $ hg cat -r 10 sub/x.txt
1470 #
1468 #
1471 # if sub/ is no longer a subrepository in the working copy
1469 # if sub/ is no longer a subrepository in the working copy
1472 # parent revision.
1470 # parent revision.
1473 #
1471 #
1474 # However, it can of course also allow things that would have
1472 # However, it can of course also allow things that would have
1475 # been rejected before, such as the above cat command if sub/
1473 # been rejected before, such as the above cat command if sub/
1476 # is a subrepository now, but was a normal directory before.
1474 # is a subrepository now, but was a normal directory before.
1477 # The old path auditor would have rejected by mistake since it
1475 # The old path auditor would have rejected by mistake since it
1478 # panics when it sees sub/.hg/.
1476 # panics when it sees sub/.hg/.
1479 #
1477 #
1480 # All in all, checking against the working copy seems sensible
1478 # All in all, checking against the working copy seems sensible
1481 # since we want to prevent access to nested repositories on
1479 # since we want to prevent access to nested repositories on
1482 # the filesystem *now*.
1480 # the filesystem *now*.
1483 ctx = self[None]
1481 ctx = self[None]
1484 parts = util.splitpath(subpath)
1482 parts = util.splitpath(subpath)
1485 while parts:
1483 while parts:
1486 prefix = b'/'.join(parts)
1484 prefix = b'/'.join(parts)
1487 if prefix in ctx.substate:
1485 if prefix in ctx.substate:
1488 if prefix == normsubpath:
1486 if prefix == normsubpath:
1489 return True
1487 return True
1490 else:
1488 else:
1491 sub = ctx.sub(prefix)
1489 sub = ctx.sub(prefix)
1492 return sub.checknested(subpath[len(prefix) + 1 :])
1490 return sub.checknested(subpath[len(prefix) + 1 :])
1493 else:
1491 else:
1494 parts.pop()
1492 parts.pop()
1495 return False
1493 return False
1496
1494
1497 def peer(self):
1495 def peer(self):
1498 return localpeer(self) # not cached to avoid reference cycle
1496 return localpeer(self) # not cached to avoid reference cycle
1499
1497
1500 def unfiltered(self):
1498 def unfiltered(self):
1501 """Return unfiltered version of the repository
1499 """Return unfiltered version of the repository
1502
1500
1503 Intended to be overwritten by filtered repo."""
1501 Intended to be overwritten by filtered repo."""
1504 return self
1502 return self
1505
1503
1506 def filtered(self, name, visibilityexceptions=None):
1504 def filtered(self, name, visibilityexceptions=None):
1507 """Return a filtered version of a repository
1505 """Return a filtered version of a repository
1508
1506
1509 The `name` parameter is the identifier of the requested view. This
1507 The `name` parameter is the identifier of the requested view. This
1510 will return a repoview object set "exactly" to the specified view.
1508 will return a repoview object set "exactly" to the specified view.
1511
1509
1512 This function does not apply recursive filtering to a repository. For
1510 This function does not apply recursive filtering to a repository. For
1513 example calling `repo.filtered("served")` will return a repoview using
1511 example calling `repo.filtered("served")` will return a repoview using
1514 the "served" view, regardless of the initial view used by `repo`.
1512 the "served" view, regardless of the initial view used by `repo`.
1515
1513
1516 In other word, there is always only one level of `repoview` "filtering".
1514 In other word, there is always only one level of `repoview` "filtering".
1517 """
1515 """
1518 if self._extrafilterid is not None and b'%' not in name:
1516 if self._extrafilterid is not None and b'%' not in name:
1519 name = name + b'%' + self._extrafilterid
1517 name = name + b'%' + self._extrafilterid
1520
1518
1521 cls = repoview.newtype(self.unfiltered().__class__)
1519 cls = repoview.newtype(self.unfiltered().__class__)
1522 return cls(self, name, visibilityexceptions)
1520 return cls(self, name, visibilityexceptions)
1523
1521
1524 @mixedrepostorecache(
1522 @mixedrepostorecache(
1525 (b'bookmarks', b'plain'),
1523 (b'bookmarks', b'plain'),
1526 (b'bookmarks.current', b'plain'),
1524 (b'bookmarks.current', b'plain'),
1527 (b'bookmarks', b''),
1525 (b'bookmarks', b''),
1528 (b'00changelog.i', b''),
1526 (b'00changelog.i', b''),
1529 )
1527 )
1530 def _bookmarks(self):
1528 def _bookmarks(self):
1531 # Since the multiple files involved in the transaction cannot be
1529 # Since the multiple files involved in the transaction cannot be
1532 # written atomically (with current repository format), there is a race
1530 # written atomically (with current repository format), there is a race
1533 # condition here.
1531 # condition here.
1534 #
1532 #
1535 # 1) changelog content A is read
1533 # 1) changelog content A is read
1536 # 2) outside transaction update changelog to content B
1534 # 2) outside transaction update changelog to content B
1537 # 3) outside transaction update bookmark file referring to content B
1535 # 3) outside transaction update bookmark file referring to content B
1538 # 4) bookmarks file content is read and filtered against changelog-A
1536 # 4) bookmarks file content is read and filtered against changelog-A
1539 #
1537 #
1540 # When this happens, bookmarks against nodes missing from A are dropped.
1538 # When this happens, bookmarks against nodes missing from A are dropped.
1541 #
1539 #
1542 # Having this happening during read is not great, but it become worse
1540 # Having this happening during read is not great, but it become worse
1543 # when this happen during write because the bookmarks to the "unknown"
1541 # when this happen during write because the bookmarks to the "unknown"
1544 # nodes will be dropped for good. However, writes happen within locks.
1542 # nodes will be dropped for good. However, writes happen within locks.
1545 # This locking makes it possible to have a race free consistent read.
1543 # This locking makes it possible to have a race free consistent read.
1546 # For this purpose data read from disc before locking are
1544 # For this purpose data read from disc before locking are
1547 # "invalidated" right after the locks are taken. This invalidations are
1545 # "invalidated" right after the locks are taken. This invalidations are
1548 # "light", the `filecache` mechanism keep the data in memory and will
1546 # "light", the `filecache` mechanism keep the data in memory and will
1549 # reuse them if the underlying files did not changed. Not parsing the
1547 # reuse them if the underlying files did not changed. Not parsing the
1550 # same data multiple times helps performances.
1548 # same data multiple times helps performances.
1551 #
1549 #
1552 # Unfortunately in the case describe above, the files tracked by the
1550 # Unfortunately in the case describe above, the files tracked by the
1553 # bookmarks file cache might not have changed, but the in-memory
1551 # bookmarks file cache might not have changed, but the in-memory
1554 # content is still "wrong" because we used an older changelog content
1552 # content is still "wrong" because we used an older changelog content
1555 # to process the on-disk data. So after locking, the changelog would be
1553 # to process the on-disk data. So after locking, the changelog would be
1556 # refreshed but `_bookmarks` would be preserved.
1554 # refreshed but `_bookmarks` would be preserved.
1557 # Adding `00changelog.i` to the list of tracked file is not
1555 # Adding `00changelog.i` to the list of tracked file is not
1558 # enough, because at the time we build the content for `_bookmarks` in
1556 # enough, because at the time we build the content for `_bookmarks` in
1559 # (4), the changelog file has already diverged from the content used
1557 # (4), the changelog file has already diverged from the content used
1560 # for loading `changelog` in (1)
1558 # for loading `changelog` in (1)
1561 #
1559 #
1562 # To prevent the issue, we force the changelog to be explicitly
1560 # To prevent the issue, we force the changelog to be explicitly
1563 # reloaded while computing `_bookmarks`. The data race can still happen
1561 # reloaded while computing `_bookmarks`. The data race can still happen
1564 # without the lock (with a narrower window), but it would no longer go
1562 # without the lock (with a narrower window), but it would no longer go
1565 # undetected during the lock time refresh.
1563 # undetected during the lock time refresh.
1566 #
1564 #
1567 # The new schedule is as follow
1565 # The new schedule is as follow
1568 #
1566 #
1569 # 1) filecache logic detect that `_bookmarks` needs to be computed
1567 # 1) filecache logic detect that `_bookmarks` needs to be computed
1570 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1568 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1571 # 3) We force `changelog` filecache to be tested
1569 # 3) We force `changelog` filecache to be tested
1572 # 4) cachestat for `changelog` are captured (for changelog)
1570 # 4) cachestat for `changelog` are captured (for changelog)
1573 # 5) `_bookmarks` is computed and cached
1571 # 5) `_bookmarks` is computed and cached
1574 #
1572 #
1575 # The step in (3) ensure we have a changelog at least as recent as the
1573 # The step in (3) ensure we have a changelog at least as recent as the
1576 # cache stat computed in (1). As a result at locking time:
1574 # cache stat computed in (1). As a result at locking time:
1577 # * if the changelog did not changed since (1) -> we can reuse the data
1575 # * if the changelog did not changed since (1) -> we can reuse the data
1578 # * otherwise -> the bookmarks get refreshed.
1576 # * otherwise -> the bookmarks get refreshed.
1579 self._refreshchangelog()
1577 self._refreshchangelog()
1580 return bookmarks.bmstore(self)
1578 return bookmarks.bmstore(self)
1581
1579
1582 def _refreshchangelog(self):
1580 def _refreshchangelog(self):
1583 """make sure the in memory changelog match the on-disk one"""
1581 """make sure the in memory changelog match the on-disk one"""
1584 if 'changelog' in vars(self) and self.currenttransaction() is None:
1582 if 'changelog' in vars(self) and self.currenttransaction() is None:
1585 del self.changelog
1583 del self.changelog
1586
1584
1587 @property
1585 @property
1588 def _activebookmark(self):
1586 def _activebookmark(self):
1589 return self._bookmarks.active
1587 return self._bookmarks.active
1590
1588
1591 # _phasesets depend on changelog. what we need is to call
1589 # _phasesets depend on changelog. what we need is to call
1592 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1590 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1593 # can't be easily expressed in filecache mechanism.
1591 # can't be easily expressed in filecache mechanism.
1594 @storecache(b'phaseroots', b'00changelog.i')
1592 @storecache(b'phaseroots', b'00changelog.i')
1595 def _phasecache(self):
1593 def _phasecache(self):
1596 return phases.phasecache(self, self._phasedefaults)
1594 return phases.phasecache(self, self._phasedefaults)
1597
1595
1598 @storecache(b'obsstore')
1596 @storecache(b'obsstore')
1599 def obsstore(self):
1597 def obsstore(self):
1600 return obsolete.makestore(self.ui, self)
1598 return obsolete.makestore(self.ui, self)
1601
1599
1602 @storecache(b'00changelog.i')
1600 @storecache(b'00changelog.i')
1603 def changelog(self):
1601 def changelog(self):
1604 # load dirstate before changelog to avoid race see issue6303
1602 # load dirstate before changelog to avoid race see issue6303
1605 self.dirstate.prefetch_parents()
1603 self.dirstate.prefetch_parents()
1606 return self.store.changelog(txnutil.mayhavepending(self.root))
1604 return self.store.changelog(txnutil.mayhavepending(self.root))
1607
1605
1608 @storecache(b'00manifest.i')
1606 @storecache(b'00manifest.i')
1609 def manifestlog(self):
1607 def manifestlog(self):
1610 return self.store.manifestlog(self, self._storenarrowmatch)
1608 return self.store.manifestlog(self, self._storenarrowmatch)
1611
1609
1612 @repofilecache(b'dirstate')
1610 @repofilecache(b'dirstate')
1613 def dirstate(self):
1611 def dirstate(self):
1614 return self._makedirstate()
1612 return self._makedirstate()
1615
1613
1616 def _makedirstate(self):
1614 def _makedirstate(self):
1617 """Extension point for wrapping the dirstate per-repo."""
1615 """Extension point for wrapping the dirstate per-repo."""
1618 sparsematchfn = lambda: sparse.matcher(self)
1616 sparsematchfn = lambda: sparse.matcher(self)
1619
1617
1620 return dirstate.dirstate(
1618 return dirstate.dirstate(
1621 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1619 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1622 )
1620 )
1623
1621
1624 def _dirstatevalidate(self, node):
1622 def _dirstatevalidate(self, node):
1625 try:
1623 try:
1626 self.changelog.rev(node)
1624 self.changelog.rev(node)
1627 return node
1625 return node
1628 except error.LookupError:
1626 except error.LookupError:
1629 if not self._dirstatevalidatewarned:
1627 if not self._dirstatevalidatewarned:
1630 self._dirstatevalidatewarned = True
1628 self._dirstatevalidatewarned = True
1631 self.ui.warn(
1629 self.ui.warn(
1632 _(b"warning: ignoring unknown working parent %s!\n")
1630 _(b"warning: ignoring unknown working parent %s!\n")
1633 % short(node)
1631 % short(node)
1634 )
1632 )
1635 return nullid
1633 return nullid
1636
1634
1637 @storecache(narrowspec.FILENAME)
1635 @storecache(narrowspec.FILENAME)
1638 def narrowpats(self):
1636 def narrowpats(self):
1639 """matcher patterns for this repository's narrowspec
1637 """matcher patterns for this repository's narrowspec
1640
1638
1641 A tuple of (includes, excludes).
1639 A tuple of (includes, excludes).
1642 """
1640 """
1643 return narrowspec.load(self)
1641 return narrowspec.load(self)
1644
1642
1645 @storecache(narrowspec.FILENAME)
1643 @storecache(narrowspec.FILENAME)
1646 def _storenarrowmatch(self):
1644 def _storenarrowmatch(self):
1647 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1645 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1648 return matchmod.always()
1646 return matchmod.always()
1649 include, exclude = self.narrowpats
1647 include, exclude = self.narrowpats
1650 return narrowspec.match(self.root, include=include, exclude=exclude)
1648 return narrowspec.match(self.root, include=include, exclude=exclude)
1651
1649
1652 @storecache(narrowspec.FILENAME)
1650 @storecache(narrowspec.FILENAME)
1653 def _narrowmatch(self):
1651 def _narrowmatch(self):
1654 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1652 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1655 return matchmod.always()
1653 return matchmod.always()
1656 narrowspec.checkworkingcopynarrowspec(self)
1654 narrowspec.checkworkingcopynarrowspec(self)
1657 include, exclude = self.narrowpats
1655 include, exclude = self.narrowpats
1658 return narrowspec.match(self.root, include=include, exclude=exclude)
1656 return narrowspec.match(self.root, include=include, exclude=exclude)
1659
1657
1660 def narrowmatch(self, match=None, includeexact=False):
1658 def narrowmatch(self, match=None, includeexact=False):
1661 """matcher corresponding the the repo's narrowspec
1659 """matcher corresponding the the repo's narrowspec
1662
1660
1663 If `match` is given, then that will be intersected with the narrow
1661 If `match` is given, then that will be intersected with the narrow
1664 matcher.
1662 matcher.
1665
1663
1666 If `includeexact` is True, then any exact matches from `match` will
1664 If `includeexact` is True, then any exact matches from `match` will
1667 be included even if they're outside the narrowspec.
1665 be included even if they're outside the narrowspec.
1668 """
1666 """
1669 if match:
1667 if match:
1670 if includeexact and not self._narrowmatch.always():
1668 if includeexact and not self._narrowmatch.always():
1671 # do not exclude explicitly-specified paths so that they can
1669 # do not exclude explicitly-specified paths so that they can
1672 # be warned later on
1670 # be warned later on
1673 em = matchmod.exact(match.files())
1671 em = matchmod.exact(match.files())
1674 nm = matchmod.unionmatcher([self._narrowmatch, em])
1672 nm = matchmod.unionmatcher([self._narrowmatch, em])
1675 return matchmod.intersectmatchers(match, nm)
1673 return matchmod.intersectmatchers(match, nm)
1676 return matchmod.intersectmatchers(match, self._narrowmatch)
1674 return matchmod.intersectmatchers(match, self._narrowmatch)
1677 return self._narrowmatch
1675 return self._narrowmatch
1678
1676
1679 def setnarrowpats(self, newincludes, newexcludes):
1677 def setnarrowpats(self, newincludes, newexcludes):
1680 narrowspec.save(self, newincludes, newexcludes)
1678 narrowspec.save(self, newincludes, newexcludes)
1681 self.invalidate(clearfilecache=True)
1679 self.invalidate(clearfilecache=True)
1682
1680
1683 @unfilteredpropertycache
1681 @unfilteredpropertycache
1684 def _quick_access_changeid_null(self):
1682 def _quick_access_changeid_null(self):
1685 return {
1683 return {
1686 b'null': (nullrev, nullid),
1684 b'null': (nullrev, nullid),
1687 nullrev: (nullrev, nullid),
1685 nullrev: (nullrev, nullid),
1688 nullid: (nullrev, nullid),
1686 nullid: (nullrev, nullid),
1689 }
1687 }
1690
1688
1691 @unfilteredpropertycache
1689 @unfilteredpropertycache
1692 def _quick_access_changeid_wc(self):
1690 def _quick_access_changeid_wc(self):
1693 # also fast path access to the working copy parents
1691 # also fast path access to the working copy parents
1694 # however, only do it for filter that ensure wc is visible.
1692 # however, only do it for filter that ensure wc is visible.
1695 quick = self._quick_access_changeid_null.copy()
1693 quick = self._quick_access_changeid_null.copy()
1696 cl = self.unfiltered().changelog
1694 cl = self.unfiltered().changelog
1697 for node in self.dirstate.parents():
1695 for node in self.dirstate.parents():
1698 if node == nullid:
1696 if node == nullid:
1699 continue
1697 continue
1700 rev = cl.index.get_rev(node)
1698 rev = cl.index.get_rev(node)
1701 if rev is None:
1699 if rev is None:
1702 # unknown working copy parent case:
1700 # unknown working copy parent case:
1703 #
1701 #
1704 # skip the fast path and let higher code deal with it
1702 # skip the fast path and let higher code deal with it
1705 continue
1703 continue
1706 pair = (rev, node)
1704 pair = (rev, node)
1707 quick[rev] = pair
1705 quick[rev] = pair
1708 quick[node] = pair
1706 quick[node] = pair
1709 # also add the parents of the parents
1707 # also add the parents of the parents
1710 for r in cl.parentrevs(rev):
1708 for r in cl.parentrevs(rev):
1711 if r == nullrev:
1709 if r == nullrev:
1712 continue
1710 continue
1713 n = cl.node(r)
1711 n = cl.node(r)
1714 pair = (r, n)
1712 pair = (r, n)
1715 quick[r] = pair
1713 quick[r] = pair
1716 quick[n] = pair
1714 quick[n] = pair
1717 p1node = self.dirstate.p1()
1715 p1node = self.dirstate.p1()
1718 if p1node != nullid:
1716 if p1node != nullid:
1719 quick[b'.'] = quick[p1node]
1717 quick[b'.'] = quick[p1node]
1720 return quick
1718 return quick
1721
1719
1722 @unfilteredmethod
1720 @unfilteredmethod
1723 def _quick_access_changeid_invalidate(self):
1721 def _quick_access_changeid_invalidate(self):
1724 if '_quick_access_changeid_wc' in vars(self):
1722 if '_quick_access_changeid_wc' in vars(self):
1725 del self.__dict__['_quick_access_changeid_wc']
1723 del self.__dict__['_quick_access_changeid_wc']
1726
1724
1727 @property
1725 @property
1728 def _quick_access_changeid(self):
1726 def _quick_access_changeid(self):
1729 """an helper dictionnary for __getitem__ calls
1727 """an helper dictionnary for __getitem__ calls
1730
1728
1731 This contains a list of symbol we can recognise right away without
1729 This contains a list of symbol we can recognise right away without
1732 further processing.
1730 further processing.
1733 """
1731 """
1734 if self.filtername in repoview.filter_has_wc:
1732 if self.filtername in repoview.filter_has_wc:
1735 return self._quick_access_changeid_wc
1733 return self._quick_access_changeid_wc
1736 return self._quick_access_changeid_null
1734 return self._quick_access_changeid_null
1737
1735
1738 def __getitem__(self, changeid):
1736 def __getitem__(self, changeid):
1739 # dealing with special cases
1737 # dealing with special cases
1740 if changeid is None:
1738 if changeid is None:
1741 return context.workingctx(self)
1739 return context.workingctx(self)
1742 if isinstance(changeid, context.basectx):
1740 if isinstance(changeid, context.basectx):
1743 return changeid
1741 return changeid
1744
1742
1745 # dealing with multiple revisions
1743 # dealing with multiple revisions
1746 if isinstance(changeid, slice):
1744 if isinstance(changeid, slice):
1747 # wdirrev isn't contiguous so the slice shouldn't include it
1745 # wdirrev isn't contiguous so the slice shouldn't include it
1748 return [
1746 return [
1749 self[i]
1747 self[i]
1750 for i in pycompat.xrange(*changeid.indices(len(self)))
1748 for i in pycompat.xrange(*changeid.indices(len(self)))
1751 if i not in self.changelog.filteredrevs
1749 if i not in self.changelog.filteredrevs
1752 ]
1750 ]
1753
1751
1754 # dealing with some special values
1752 # dealing with some special values
1755 quick_access = self._quick_access_changeid.get(changeid)
1753 quick_access = self._quick_access_changeid.get(changeid)
1756 if quick_access is not None:
1754 if quick_access is not None:
1757 rev, node = quick_access
1755 rev, node = quick_access
1758 return context.changectx(self, rev, node, maybe_filtered=False)
1756 return context.changectx(self, rev, node, maybe_filtered=False)
1759 if changeid == b'tip':
1757 if changeid == b'tip':
1760 node = self.changelog.tip()
1758 node = self.changelog.tip()
1761 rev = self.changelog.rev(node)
1759 rev = self.changelog.rev(node)
1762 return context.changectx(self, rev, node)
1760 return context.changectx(self, rev, node)
1763
1761
1764 # dealing with arbitrary values
1762 # dealing with arbitrary values
1765 try:
1763 try:
1766 if isinstance(changeid, int):
1764 if isinstance(changeid, int):
1767 node = self.changelog.node(changeid)
1765 node = self.changelog.node(changeid)
1768 rev = changeid
1766 rev = changeid
1769 elif changeid == b'.':
1767 elif changeid == b'.':
1770 # this is a hack to delay/avoid loading obsmarkers
1768 # this is a hack to delay/avoid loading obsmarkers
1771 # when we know that '.' won't be hidden
1769 # when we know that '.' won't be hidden
1772 node = self.dirstate.p1()
1770 node = self.dirstate.p1()
1773 rev = self.unfiltered().changelog.rev(node)
1771 rev = self.unfiltered().changelog.rev(node)
1774 elif len(changeid) == 20:
1772 elif len(changeid) == 20:
1775 try:
1773 try:
1776 node = changeid
1774 node = changeid
1777 rev = self.changelog.rev(changeid)
1775 rev = self.changelog.rev(changeid)
1778 except error.FilteredLookupError:
1776 except error.FilteredLookupError:
1779 changeid = hex(changeid) # for the error message
1777 changeid = hex(changeid) # for the error message
1780 raise
1778 raise
1781 except LookupError:
1779 except LookupError:
1782 # check if it might have come from damaged dirstate
1780 # check if it might have come from damaged dirstate
1783 #
1781 #
1784 # XXX we could avoid the unfiltered if we had a recognizable
1782 # XXX we could avoid the unfiltered if we had a recognizable
1785 # exception for filtered changeset access
1783 # exception for filtered changeset access
1786 if (
1784 if (
1787 self.local()
1785 self.local()
1788 and changeid in self.unfiltered().dirstate.parents()
1786 and changeid in self.unfiltered().dirstate.parents()
1789 ):
1787 ):
1790 msg = _(b"working directory has unknown parent '%s'!")
1788 msg = _(b"working directory has unknown parent '%s'!")
1791 raise error.Abort(msg % short(changeid))
1789 raise error.Abort(msg % short(changeid))
1792 changeid = hex(changeid) # for the error message
1790 changeid = hex(changeid) # for the error message
1793 raise
1791 raise
1794
1792
1795 elif len(changeid) == 40:
1793 elif len(changeid) == 40:
1796 node = bin(changeid)
1794 node = bin(changeid)
1797 rev = self.changelog.rev(node)
1795 rev = self.changelog.rev(node)
1798 else:
1796 else:
1799 raise error.ProgrammingError(
1797 raise error.ProgrammingError(
1800 b"unsupported changeid '%s' of type %s"
1798 b"unsupported changeid '%s' of type %s"
1801 % (changeid, pycompat.bytestr(type(changeid)))
1799 % (changeid, pycompat.bytestr(type(changeid)))
1802 )
1800 )
1803
1801
1804 return context.changectx(self, rev, node)
1802 return context.changectx(self, rev, node)
1805
1803
1806 except (error.FilteredIndexError, error.FilteredLookupError):
1804 except (error.FilteredIndexError, error.FilteredLookupError):
1807 raise error.FilteredRepoLookupError(
1805 raise error.FilteredRepoLookupError(
1808 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1806 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1809 )
1807 )
1810 except (IndexError, LookupError):
1808 except (IndexError, LookupError):
1811 raise error.RepoLookupError(
1809 raise error.RepoLookupError(
1812 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1810 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1813 )
1811 )
1814 except error.WdirUnsupported:
1812 except error.WdirUnsupported:
1815 return context.workingctx(self)
1813 return context.workingctx(self)
1816
1814
1817 def __contains__(self, changeid):
1815 def __contains__(self, changeid):
1818 """True if the given changeid exists"""
1816 """True if the given changeid exists"""
1819 try:
1817 try:
1820 self[changeid]
1818 self[changeid]
1821 return True
1819 return True
1822 except error.RepoLookupError:
1820 except error.RepoLookupError:
1823 return False
1821 return False
1824
1822
1825 def __nonzero__(self):
1823 def __nonzero__(self):
1826 return True
1824 return True
1827
1825
1828 __bool__ = __nonzero__
1826 __bool__ = __nonzero__
1829
1827
1830 def __len__(self):
1828 def __len__(self):
1831 # no need to pay the cost of repoview.changelog
1829 # no need to pay the cost of repoview.changelog
1832 unfi = self.unfiltered()
1830 unfi = self.unfiltered()
1833 return len(unfi.changelog)
1831 return len(unfi.changelog)
1834
1832
1835 def __iter__(self):
1833 def __iter__(self):
1836 return iter(self.changelog)
1834 return iter(self.changelog)
1837
1835
1838 def revs(self, expr, *args):
1836 def revs(self, expr, *args):
1839 """Find revisions matching a revset.
1837 """Find revisions matching a revset.
1840
1838
1841 The revset is specified as a string ``expr`` that may contain
1839 The revset is specified as a string ``expr`` that may contain
1842 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1840 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1843
1841
1844 Revset aliases from the configuration are not expanded. To expand
1842 Revset aliases from the configuration are not expanded. To expand
1845 user aliases, consider calling ``scmutil.revrange()`` or
1843 user aliases, consider calling ``scmutil.revrange()`` or
1846 ``repo.anyrevs([expr], user=True)``.
1844 ``repo.anyrevs([expr], user=True)``.
1847
1845
1848 Returns a smartset.abstractsmartset, which is a list-like interface
1846 Returns a smartset.abstractsmartset, which is a list-like interface
1849 that contains integer revisions.
1847 that contains integer revisions.
1850 """
1848 """
1851 tree = revsetlang.spectree(expr, *args)
1849 tree = revsetlang.spectree(expr, *args)
1852 return revset.makematcher(tree)(self)
1850 return revset.makematcher(tree)(self)
1853
1851
1854 def set(self, expr, *args):
1852 def set(self, expr, *args):
1855 """Find revisions matching a revset and emit changectx instances.
1853 """Find revisions matching a revset and emit changectx instances.
1856
1854
1857 This is a convenience wrapper around ``revs()`` that iterates the
1855 This is a convenience wrapper around ``revs()`` that iterates the
1858 result and is a generator of changectx instances.
1856 result and is a generator of changectx instances.
1859
1857
1860 Revset aliases from the configuration are not expanded. To expand
1858 Revset aliases from the configuration are not expanded. To expand
1861 user aliases, consider calling ``scmutil.revrange()``.
1859 user aliases, consider calling ``scmutil.revrange()``.
1862 """
1860 """
1863 for r in self.revs(expr, *args):
1861 for r in self.revs(expr, *args):
1864 yield self[r]
1862 yield self[r]
1865
1863
1866 def anyrevs(self, specs, user=False, localalias=None):
1864 def anyrevs(self, specs, user=False, localalias=None):
1867 """Find revisions matching one of the given revsets.
1865 """Find revisions matching one of the given revsets.
1868
1866
1869 Revset aliases from the configuration are not expanded by default. To
1867 Revset aliases from the configuration are not expanded by default. To
1870 expand user aliases, specify ``user=True``. To provide some local
1868 expand user aliases, specify ``user=True``. To provide some local
1871 definitions overriding user aliases, set ``localalias`` to
1869 definitions overriding user aliases, set ``localalias`` to
1872 ``{name: definitionstring}``.
1870 ``{name: definitionstring}``.
1873 """
1871 """
1874 if specs == [b'null']:
1872 if specs == [b'null']:
1875 return revset.baseset([nullrev])
1873 return revset.baseset([nullrev])
1876 if specs == [b'.']:
1874 if specs == [b'.']:
1877 quick_data = self._quick_access_changeid.get(b'.')
1875 quick_data = self._quick_access_changeid.get(b'.')
1878 if quick_data is not None:
1876 if quick_data is not None:
1879 return revset.baseset([quick_data[0]])
1877 return revset.baseset([quick_data[0]])
1880 if user:
1878 if user:
1881 m = revset.matchany(
1879 m = revset.matchany(
1882 self.ui,
1880 self.ui,
1883 specs,
1881 specs,
1884 lookup=revset.lookupfn(self),
1882 lookup=revset.lookupfn(self),
1885 localalias=localalias,
1883 localalias=localalias,
1886 )
1884 )
1887 else:
1885 else:
1888 m = revset.matchany(None, specs, localalias=localalias)
1886 m = revset.matchany(None, specs, localalias=localalias)
1889 return m(self)
1887 return m(self)
1890
1888
1891 def url(self):
1889 def url(self):
1892 return b'file:' + self.root
1890 return b'file:' + self.root
1893
1891
1894 def hook(self, name, throw=False, **args):
1892 def hook(self, name, throw=False, **args):
1895 """Call a hook, passing this repo instance.
1893 """Call a hook, passing this repo instance.
1896
1894
1897 This a convenience method to aid invoking hooks. Extensions likely
1895 This a convenience method to aid invoking hooks. Extensions likely
1898 won't call this unless they have registered a custom hook or are
1896 won't call this unless they have registered a custom hook or are
1899 replacing code that is expected to call a hook.
1897 replacing code that is expected to call a hook.
1900 """
1898 """
1901 return hook.hook(self.ui, self, name, throw, **args)
1899 return hook.hook(self.ui, self, name, throw, **args)
1902
1900
1903 @filteredpropertycache
1901 @filteredpropertycache
1904 def _tagscache(self):
1902 def _tagscache(self):
1905 """Returns a tagscache object that contains various tags related
1903 """Returns a tagscache object that contains various tags related
1906 caches."""
1904 caches."""
1907
1905
1908 # This simplifies its cache management by having one decorated
1906 # This simplifies its cache management by having one decorated
1909 # function (this one) and the rest simply fetch things from it.
1907 # function (this one) and the rest simply fetch things from it.
1910 class tagscache(object):
1908 class tagscache(object):
1911 def __init__(self):
1909 def __init__(self):
1912 # These two define the set of tags for this repository. tags
1910 # These two define the set of tags for this repository. tags
1913 # maps tag name to node; tagtypes maps tag name to 'global' or
1911 # maps tag name to node; tagtypes maps tag name to 'global' or
1914 # 'local'. (Global tags are defined by .hgtags across all
1912 # 'local'. (Global tags are defined by .hgtags across all
1915 # heads, and local tags are defined in .hg/localtags.)
1913 # heads, and local tags are defined in .hg/localtags.)
1916 # They constitute the in-memory cache of tags.
1914 # They constitute the in-memory cache of tags.
1917 self.tags = self.tagtypes = None
1915 self.tags = self.tagtypes = None
1918
1916
1919 self.nodetagscache = self.tagslist = None
1917 self.nodetagscache = self.tagslist = None
1920
1918
1921 cache = tagscache()
1919 cache = tagscache()
1922 cache.tags, cache.tagtypes = self._findtags()
1920 cache.tags, cache.tagtypes = self._findtags()
1923
1921
1924 return cache
1922 return cache
1925
1923
1926 def tags(self):
1924 def tags(self):
1927 '''return a mapping of tag to node'''
1925 '''return a mapping of tag to node'''
1928 t = {}
1926 t = {}
1929 if self.changelog.filteredrevs:
1927 if self.changelog.filteredrevs:
1930 tags, tt = self._findtags()
1928 tags, tt = self._findtags()
1931 else:
1929 else:
1932 tags = self._tagscache.tags
1930 tags = self._tagscache.tags
1933 rev = self.changelog.rev
1931 rev = self.changelog.rev
1934 for k, v in pycompat.iteritems(tags):
1932 for k, v in pycompat.iteritems(tags):
1935 try:
1933 try:
1936 # ignore tags to unknown nodes
1934 # ignore tags to unknown nodes
1937 rev(v)
1935 rev(v)
1938 t[k] = v
1936 t[k] = v
1939 except (error.LookupError, ValueError):
1937 except (error.LookupError, ValueError):
1940 pass
1938 pass
1941 return t
1939 return t
1942
1940
1943 def _findtags(self):
1941 def _findtags(self):
1944 """Do the hard work of finding tags. Return a pair of dicts
1942 """Do the hard work of finding tags. Return a pair of dicts
1945 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1943 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1946 maps tag name to a string like \'global\' or \'local\'.
1944 maps tag name to a string like \'global\' or \'local\'.
1947 Subclasses or extensions are free to add their own tags, but
1945 Subclasses or extensions are free to add their own tags, but
1948 should be aware that the returned dicts will be retained for the
1946 should be aware that the returned dicts will be retained for the
1949 duration of the localrepo object."""
1947 duration of the localrepo object."""
1950
1948
1951 # XXX what tagtype should subclasses/extensions use? Currently
1949 # XXX what tagtype should subclasses/extensions use? Currently
1952 # mq and bookmarks add tags, but do not set the tagtype at all.
1950 # mq and bookmarks add tags, but do not set the tagtype at all.
1953 # Should each extension invent its own tag type? Should there
1951 # Should each extension invent its own tag type? Should there
1954 # be one tagtype for all such "virtual" tags? Or is the status
1952 # be one tagtype for all such "virtual" tags? Or is the status
1955 # quo fine?
1953 # quo fine?
1956
1954
1957 # map tag name to (node, hist)
1955 # map tag name to (node, hist)
1958 alltags = tagsmod.findglobaltags(self.ui, self)
1956 alltags = tagsmod.findglobaltags(self.ui, self)
1959 # map tag name to tag type
1957 # map tag name to tag type
1960 tagtypes = {tag: b'global' for tag in alltags}
1958 tagtypes = {tag: b'global' for tag in alltags}
1961
1959
1962 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1960 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1963
1961
1964 # Build the return dicts. Have to re-encode tag names because
1962 # Build the return dicts. Have to re-encode tag names because
1965 # the tags module always uses UTF-8 (in order not to lose info
1963 # the tags module always uses UTF-8 (in order not to lose info
1966 # writing to the cache), but the rest of Mercurial wants them in
1964 # writing to the cache), but the rest of Mercurial wants them in
1967 # local encoding.
1965 # local encoding.
1968 tags = {}
1966 tags = {}
1969 for (name, (node, hist)) in pycompat.iteritems(alltags):
1967 for (name, (node, hist)) in pycompat.iteritems(alltags):
1970 if node != nullid:
1968 if node != nullid:
1971 tags[encoding.tolocal(name)] = node
1969 tags[encoding.tolocal(name)] = node
1972 tags[b'tip'] = self.changelog.tip()
1970 tags[b'tip'] = self.changelog.tip()
1973 tagtypes = {
1971 tagtypes = {
1974 encoding.tolocal(name): value
1972 encoding.tolocal(name): value
1975 for (name, value) in pycompat.iteritems(tagtypes)
1973 for (name, value) in pycompat.iteritems(tagtypes)
1976 }
1974 }
1977 return (tags, tagtypes)
1975 return (tags, tagtypes)
1978
1976
1979 def tagtype(self, tagname):
1977 def tagtype(self, tagname):
1980 """
1978 """
1981 return the type of the given tag. result can be:
1979 return the type of the given tag. result can be:
1982
1980
1983 'local' : a local tag
1981 'local' : a local tag
1984 'global' : a global tag
1982 'global' : a global tag
1985 None : tag does not exist
1983 None : tag does not exist
1986 """
1984 """
1987
1985
1988 return self._tagscache.tagtypes.get(tagname)
1986 return self._tagscache.tagtypes.get(tagname)
1989
1987
1990 def tagslist(self):
1988 def tagslist(self):
1991 '''return a list of tags ordered by revision'''
1989 '''return a list of tags ordered by revision'''
1992 if not self._tagscache.tagslist:
1990 if not self._tagscache.tagslist:
1993 l = []
1991 l = []
1994 for t, n in pycompat.iteritems(self.tags()):
1992 for t, n in pycompat.iteritems(self.tags()):
1995 l.append((self.changelog.rev(n), t, n))
1993 l.append((self.changelog.rev(n), t, n))
1996 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1994 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1997
1995
1998 return self._tagscache.tagslist
1996 return self._tagscache.tagslist
1999
1997
2000 def nodetags(self, node):
1998 def nodetags(self, node):
2001 '''return the tags associated with a node'''
1999 '''return the tags associated with a node'''
2002 if not self._tagscache.nodetagscache:
2000 if not self._tagscache.nodetagscache:
2003 nodetagscache = {}
2001 nodetagscache = {}
2004 for t, n in pycompat.iteritems(self._tagscache.tags):
2002 for t, n in pycompat.iteritems(self._tagscache.tags):
2005 nodetagscache.setdefault(n, []).append(t)
2003 nodetagscache.setdefault(n, []).append(t)
2006 for tags in pycompat.itervalues(nodetagscache):
2004 for tags in pycompat.itervalues(nodetagscache):
2007 tags.sort()
2005 tags.sort()
2008 self._tagscache.nodetagscache = nodetagscache
2006 self._tagscache.nodetagscache = nodetagscache
2009 return self._tagscache.nodetagscache.get(node, [])
2007 return self._tagscache.nodetagscache.get(node, [])
2010
2008
2011 def nodebookmarks(self, node):
2009 def nodebookmarks(self, node):
2012 """return the list of bookmarks pointing to the specified node"""
2010 """return the list of bookmarks pointing to the specified node"""
2013 return self._bookmarks.names(node)
2011 return self._bookmarks.names(node)
2014
2012
2015 def branchmap(self):
2013 def branchmap(self):
2016 """returns a dictionary {branch: [branchheads]} with branchheads
2014 """returns a dictionary {branch: [branchheads]} with branchheads
2017 ordered by increasing revision number"""
2015 ordered by increasing revision number"""
2018 return self._branchcaches[self]
2016 return self._branchcaches[self]
2019
2017
2020 @unfilteredmethod
2018 @unfilteredmethod
2021 def revbranchcache(self):
2019 def revbranchcache(self):
2022 if not self._revbranchcache:
2020 if not self._revbranchcache:
2023 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2021 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2024 return self._revbranchcache
2022 return self._revbranchcache
2025
2023
2026 def branchtip(self, branch, ignoremissing=False):
2024 def branchtip(self, branch, ignoremissing=False):
2027 """return the tip node for a given branch
2025 """return the tip node for a given branch
2028
2026
2029 If ignoremissing is True, then this method will not raise an error.
2027 If ignoremissing is True, then this method will not raise an error.
2030 This is helpful for callers that only expect None for a missing branch
2028 This is helpful for callers that only expect None for a missing branch
2031 (e.g. namespace).
2029 (e.g. namespace).
2032
2030
2033 """
2031 """
2034 try:
2032 try:
2035 return self.branchmap().branchtip(branch)
2033 return self.branchmap().branchtip(branch)
2036 except KeyError:
2034 except KeyError:
2037 if not ignoremissing:
2035 if not ignoremissing:
2038 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2036 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2039 else:
2037 else:
2040 pass
2038 pass
2041
2039
2042 def lookup(self, key):
2040 def lookup(self, key):
2043 node = scmutil.revsymbol(self, key).node()
2041 node = scmutil.revsymbol(self, key).node()
2044 if node is None:
2042 if node is None:
2045 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2043 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2046 return node
2044 return node
2047
2045
2048 def lookupbranch(self, key):
2046 def lookupbranch(self, key):
2049 if self.branchmap().hasbranch(key):
2047 if self.branchmap().hasbranch(key):
2050 return key
2048 return key
2051
2049
2052 return scmutil.revsymbol(self, key).branch()
2050 return scmutil.revsymbol(self, key).branch()
2053
2051
2054 def known(self, nodes):
2052 def known(self, nodes):
2055 cl = self.changelog
2053 cl = self.changelog
2056 get_rev = cl.index.get_rev
2054 get_rev = cl.index.get_rev
2057 filtered = cl.filteredrevs
2055 filtered = cl.filteredrevs
2058 result = []
2056 result = []
2059 for n in nodes:
2057 for n in nodes:
2060 r = get_rev(n)
2058 r = get_rev(n)
2061 resp = not (r is None or r in filtered)
2059 resp = not (r is None or r in filtered)
2062 result.append(resp)
2060 result.append(resp)
2063 return result
2061 return result
2064
2062
2065 def local(self):
2063 def local(self):
2066 return self
2064 return self
2067
2065
2068 def publishing(self):
2066 def publishing(self):
2069 # it's safe (and desirable) to trust the publish flag unconditionally
2067 # it's safe (and desirable) to trust the publish flag unconditionally
2070 # so that we don't finalize changes shared between users via ssh or nfs
2068 # so that we don't finalize changes shared between users via ssh or nfs
2071 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2069 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2072
2070
2073 def cancopy(self):
2071 def cancopy(self):
2074 # so statichttprepo's override of local() works
2072 # so statichttprepo's override of local() works
2075 if not self.local():
2073 if not self.local():
2076 return False
2074 return False
2077 if not self.publishing():
2075 if not self.publishing():
2078 return True
2076 return True
2079 # if publishing we can't copy if there is filtered content
2077 # if publishing we can't copy if there is filtered content
2080 return not self.filtered(b'visible').changelog.filteredrevs
2078 return not self.filtered(b'visible').changelog.filteredrevs
2081
2079
2082 def shared(self):
2080 def shared(self):
2083 '''the type of shared repository (None if not shared)'''
2081 '''the type of shared repository (None if not shared)'''
2084 if self.sharedpath != self.path:
2082 if self.sharedpath != self.path:
2085 return b'store'
2083 return b'store'
2086 return None
2084 return None
2087
2085
2088 def wjoin(self, f, *insidef):
2086 def wjoin(self, f, *insidef):
2089 return self.vfs.reljoin(self.root, f, *insidef)
2087 return self.vfs.reljoin(self.root, f, *insidef)
2090
2088
2091 def setparents(self, p1, p2=nullid):
2089 def setparents(self, p1, p2=nullid):
2092 self[None].setparents(p1, p2)
2090 self[None].setparents(p1, p2)
2093 self._quick_access_changeid_invalidate()
2091 self._quick_access_changeid_invalidate()
2094
2092
2095 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2093 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2096 """changeid must be a changeset revision, if specified.
2094 """changeid must be a changeset revision, if specified.
2097 fileid can be a file revision or node."""
2095 fileid can be a file revision or node."""
2098 return context.filectx(
2096 return context.filectx(
2099 self, path, changeid, fileid, changectx=changectx
2097 self, path, changeid, fileid, changectx=changectx
2100 )
2098 )
2101
2099
2102 def getcwd(self):
2100 def getcwd(self):
2103 return self.dirstate.getcwd()
2101 return self.dirstate.getcwd()
2104
2102
2105 def pathto(self, f, cwd=None):
2103 def pathto(self, f, cwd=None):
2106 return self.dirstate.pathto(f, cwd)
2104 return self.dirstate.pathto(f, cwd)
2107
2105
2108 def _loadfilter(self, filter):
2106 def _loadfilter(self, filter):
2109 if filter not in self._filterpats:
2107 if filter not in self._filterpats:
2110 l = []
2108 l = []
2111 for pat, cmd in self.ui.configitems(filter):
2109 for pat, cmd in self.ui.configitems(filter):
2112 if cmd == b'!':
2110 if cmd == b'!':
2113 continue
2111 continue
2114 mf = matchmod.match(self.root, b'', [pat])
2112 mf = matchmod.match(self.root, b'', [pat])
2115 fn = None
2113 fn = None
2116 params = cmd
2114 params = cmd
2117 for name, filterfn in pycompat.iteritems(self._datafilters):
2115 for name, filterfn in pycompat.iteritems(self._datafilters):
2118 if cmd.startswith(name):
2116 if cmd.startswith(name):
2119 fn = filterfn
2117 fn = filterfn
2120 params = cmd[len(name) :].lstrip()
2118 params = cmd[len(name) :].lstrip()
2121 break
2119 break
2122 if not fn:
2120 if not fn:
2123 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2121 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2124 fn.__name__ = 'commandfilter'
2122 fn.__name__ = 'commandfilter'
2125 # Wrap old filters not supporting keyword arguments
2123 # Wrap old filters not supporting keyword arguments
2126 if not pycompat.getargspec(fn)[2]:
2124 if not pycompat.getargspec(fn)[2]:
2127 oldfn = fn
2125 oldfn = fn
2128 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2126 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2129 fn.__name__ = 'compat-' + oldfn.__name__
2127 fn.__name__ = 'compat-' + oldfn.__name__
2130 l.append((mf, fn, params))
2128 l.append((mf, fn, params))
2131 self._filterpats[filter] = l
2129 self._filterpats[filter] = l
2132 return self._filterpats[filter]
2130 return self._filterpats[filter]
2133
2131
2134 def _filter(self, filterpats, filename, data):
2132 def _filter(self, filterpats, filename, data):
2135 for mf, fn, cmd in filterpats:
2133 for mf, fn, cmd in filterpats:
2136 if mf(filename):
2134 if mf(filename):
2137 self.ui.debug(
2135 self.ui.debug(
2138 b"filtering %s through %s\n"
2136 b"filtering %s through %s\n"
2139 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2137 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2140 )
2138 )
2141 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2139 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2142 break
2140 break
2143
2141
2144 return data
2142 return data
2145
2143
2146 @unfilteredpropertycache
2144 @unfilteredpropertycache
2147 def _encodefilterpats(self):
2145 def _encodefilterpats(self):
2148 return self._loadfilter(b'encode')
2146 return self._loadfilter(b'encode')
2149
2147
2150 @unfilteredpropertycache
2148 @unfilteredpropertycache
2151 def _decodefilterpats(self):
2149 def _decodefilterpats(self):
2152 return self._loadfilter(b'decode')
2150 return self._loadfilter(b'decode')
2153
2151
2154 def adddatafilter(self, name, filter):
2152 def adddatafilter(self, name, filter):
2155 self._datafilters[name] = filter
2153 self._datafilters[name] = filter
2156
2154
2157 def wread(self, filename):
2155 def wread(self, filename):
2158 if self.wvfs.islink(filename):
2156 if self.wvfs.islink(filename):
2159 data = self.wvfs.readlink(filename)
2157 data = self.wvfs.readlink(filename)
2160 else:
2158 else:
2161 data = self.wvfs.read(filename)
2159 data = self.wvfs.read(filename)
2162 return self._filter(self._encodefilterpats, filename, data)
2160 return self._filter(self._encodefilterpats, filename, data)
2163
2161
2164 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2162 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2165 """write ``data`` into ``filename`` in the working directory
2163 """write ``data`` into ``filename`` in the working directory
2166
2164
2167 This returns length of written (maybe decoded) data.
2165 This returns length of written (maybe decoded) data.
2168 """
2166 """
2169 data = self._filter(self._decodefilterpats, filename, data)
2167 data = self._filter(self._decodefilterpats, filename, data)
2170 if b'l' in flags:
2168 if b'l' in flags:
2171 self.wvfs.symlink(data, filename)
2169 self.wvfs.symlink(data, filename)
2172 else:
2170 else:
2173 self.wvfs.write(
2171 self.wvfs.write(
2174 filename, data, backgroundclose=backgroundclose, **kwargs
2172 filename, data, backgroundclose=backgroundclose, **kwargs
2175 )
2173 )
2176 if b'x' in flags:
2174 if b'x' in flags:
2177 self.wvfs.setflags(filename, False, True)
2175 self.wvfs.setflags(filename, False, True)
2178 else:
2176 else:
2179 self.wvfs.setflags(filename, False, False)
2177 self.wvfs.setflags(filename, False, False)
2180 return len(data)
2178 return len(data)
2181
2179
2182 def wwritedata(self, filename, data):
2180 def wwritedata(self, filename, data):
2183 return self._filter(self._decodefilterpats, filename, data)
2181 return self._filter(self._decodefilterpats, filename, data)
2184
2182
2185 def currenttransaction(self):
2183 def currenttransaction(self):
2186 """return the current transaction or None if non exists"""
2184 """return the current transaction or None if non exists"""
2187 if self._transref:
2185 if self._transref:
2188 tr = self._transref()
2186 tr = self._transref()
2189 else:
2187 else:
2190 tr = None
2188 tr = None
2191
2189
2192 if tr and tr.running():
2190 if tr and tr.running():
2193 return tr
2191 return tr
2194 return None
2192 return None
2195
2193
2196 def transaction(self, desc, report=None):
2194 def transaction(self, desc, report=None):
2197 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2198 b'devel', b'check-locks'
2196 b'devel', b'check-locks'
2199 ):
2197 ):
2200 if self._currentlock(self._lockref) is None:
2198 if self._currentlock(self._lockref) is None:
2201 raise error.ProgrammingError(b'transaction requires locking')
2199 raise error.ProgrammingError(b'transaction requires locking')
2202 tr = self.currenttransaction()
2200 tr = self.currenttransaction()
2203 if tr is not None:
2201 if tr is not None:
2204 return tr.nest(name=desc)
2202 return tr.nest(name=desc)
2205
2203
2206 # abort here if the journal already exists
2204 # abort here if the journal already exists
2207 if self.svfs.exists(b"journal"):
2205 if self.svfs.exists(b"journal"):
2208 raise error.RepoError(
2206 raise error.RepoError(
2209 _(b"abandoned transaction found"),
2207 _(b"abandoned transaction found"),
2210 hint=_(b"run 'hg recover' to clean up transaction"),
2208 hint=_(b"run 'hg recover' to clean up transaction"),
2211 )
2209 )
2212
2210
2213 idbase = b"%.40f#%f" % (random.random(), time.time())
2211 idbase = b"%.40f#%f" % (random.random(), time.time())
2214 ha = hex(hashutil.sha1(idbase).digest())
2212 ha = hex(hashutil.sha1(idbase).digest())
2215 txnid = b'TXN:' + ha
2213 txnid = b'TXN:' + ha
2216 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2214 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2217
2215
2218 self._writejournal(desc)
2216 self._writejournal(desc)
2219 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2217 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2220 if report:
2218 if report:
2221 rp = report
2219 rp = report
2222 else:
2220 else:
2223 rp = self.ui.warn
2221 rp = self.ui.warn
2224 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2222 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2225 # we must avoid cyclic reference between repo and transaction.
2223 # we must avoid cyclic reference between repo and transaction.
2226 reporef = weakref.ref(self)
2224 reporef = weakref.ref(self)
2227 # Code to track tag movement
2225 # Code to track tag movement
2228 #
2226 #
2229 # Since tags are all handled as file content, it is actually quite hard
2227 # Since tags are all handled as file content, it is actually quite hard
2230 # to track these movement from a code perspective. So we fallback to a
2228 # to track these movement from a code perspective. So we fallback to a
2231 # tracking at the repository level. One could envision to track changes
2229 # tracking at the repository level. One could envision to track changes
2232 # to the '.hgtags' file through changegroup apply but that fails to
2230 # to the '.hgtags' file through changegroup apply but that fails to
2233 # cope with case where transaction expose new heads without changegroup
2231 # cope with case where transaction expose new heads without changegroup
2234 # being involved (eg: phase movement).
2232 # being involved (eg: phase movement).
2235 #
2233 #
2236 # For now, We gate the feature behind a flag since this likely comes
2234 # For now, We gate the feature behind a flag since this likely comes
2237 # with performance impacts. The current code run more often than needed
2235 # with performance impacts. The current code run more often than needed
2238 # and do not use caches as much as it could. The current focus is on
2236 # and do not use caches as much as it could. The current focus is on
2239 # the behavior of the feature so we disable it by default. The flag
2237 # the behavior of the feature so we disable it by default. The flag
2240 # will be removed when we are happy with the performance impact.
2238 # will be removed when we are happy with the performance impact.
2241 #
2239 #
2242 # Once this feature is no longer experimental move the following
2240 # Once this feature is no longer experimental move the following
2243 # documentation to the appropriate help section:
2241 # documentation to the appropriate help section:
2244 #
2242 #
2245 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2243 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2246 # tags (new or changed or deleted tags). In addition the details of
2244 # tags (new or changed or deleted tags). In addition the details of
2247 # these changes are made available in a file at:
2245 # these changes are made available in a file at:
2248 # ``REPOROOT/.hg/changes/tags.changes``.
2246 # ``REPOROOT/.hg/changes/tags.changes``.
2249 # Make sure you check for HG_TAG_MOVED before reading that file as it
2247 # Make sure you check for HG_TAG_MOVED before reading that file as it
2250 # might exist from a previous transaction even if no tag were touched
2248 # might exist from a previous transaction even if no tag were touched
2251 # in this one. Changes are recorded in a line base format::
2249 # in this one. Changes are recorded in a line base format::
2252 #
2250 #
2253 # <action> <hex-node> <tag-name>\n
2251 # <action> <hex-node> <tag-name>\n
2254 #
2252 #
2255 # Actions are defined as follow:
2253 # Actions are defined as follow:
2256 # "-R": tag is removed,
2254 # "-R": tag is removed,
2257 # "+A": tag is added,
2255 # "+A": tag is added,
2258 # "-M": tag is moved (old value),
2256 # "-M": tag is moved (old value),
2259 # "+M": tag is moved (new value),
2257 # "+M": tag is moved (new value),
2260 tracktags = lambda x: None
2258 tracktags = lambda x: None
2261 # experimental config: experimental.hook-track-tags
2259 # experimental config: experimental.hook-track-tags
2262 shouldtracktags = self.ui.configbool(
2260 shouldtracktags = self.ui.configbool(
2263 b'experimental', b'hook-track-tags'
2261 b'experimental', b'hook-track-tags'
2264 )
2262 )
2265 if desc != b'strip' and shouldtracktags:
2263 if desc != b'strip' and shouldtracktags:
2266 oldheads = self.changelog.headrevs()
2264 oldheads = self.changelog.headrevs()
2267
2265
2268 def tracktags(tr2):
2266 def tracktags(tr2):
2269 repo = reporef()
2267 repo = reporef()
2270 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2268 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2271 newheads = repo.changelog.headrevs()
2269 newheads = repo.changelog.headrevs()
2272 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2270 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2273 # notes: we compare lists here.
2271 # notes: we compare lists here.
2274 # As we do it only once buiding set would not be cheaper
2272 # As we do it only once buiding set would not be cheaper
2275 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2273 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2276 if changes:
2274 if changes:
2277 tr2.hookargs[b'tag_moved'] = b'1'
2275 tr2.hookargs[b'tag_moved'] = b'1'
2278 with repo.vfs(
2276 with repo.vfs(
2279 b'changes/tags.changes', b'w', atomictemp=True
2277 b'changes/tags.changes', b'w', atomictemp=True
2280 ) as changesfile:
2278 ) as changesfile:
2281 # note: we do not register the file to the transaction
2279 # note: we do not register the file to the transaction
2282 # because we needs it to still exist on the transaction
2280 # because we needs it to still exist on the transaction
2283 # is close (for txnclose hooks)
2281 # is close (for txnclose hooks)
2284 tagsmod.writediff(changesfile, changes)
2282 tagsmod.writediff(changesfile, changes)
2285
2283
2286 def validate(tr2):
2284 def validate(tr2):
2287 """will run pre-closing hooks"""
2285 """will run pre-closing hooks"""
2288 # XXX the transaction API is a bit lacking here so we take a hacky
2286 # XXX the transaction API is a bit lacking here so we take a hacky
2289 # path for now
2287 # path for now
2290 #
2288 #
2291 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2289 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2292 # dict is copied before these run. In addition we needs the data
2290 # dict is copied before these run. In addition we needs the data
2293 # available to in memory hooks too.
2291 # available to in memory hooks too.
2294 #
2292 #
2295 # Moreover, we also need to make sure this runs before txnclose
2293 # Moreover, we also need to make sure this runs before txnclose
2296 # hooks and there is no "pending" mechanism that would execute
2294 # hooks and there is no "pending" mechanism that would execute
2297 # logic only if hooks are about to run.
2295 # logic only if hooks are about to run.
2298 #
2296 #
2299 # Fixing this limitation of the transaction is also needed to track
2297 # Fixing this limitation of the transaction is also needed to track
2300 # other families of changes (bookmarks, phases, obsolescence).
2298 # other families of changes (bookmarks, phases, obsolescence).
2301 #
2299 #
2302 # This will have to be fixed before we remove the experimental
2300 # This will have to be fixed before we remove the experimental
2303 # gating.
2301 # gating.
2304 tracktags(tr2)
2302 tracktags(tr2)
2305 repo = reporef()
2303 repo = reporef()
2306
2304
2307 singleheadopt = (b'experimental', b'single-head-per-branch')
2305 singleheadopt = (b'experimental', b'single-head-per-branch')
2308 singlehead = repo.ui.configbool(*singleheadopt)
2306 singlehead = repo.ui.configbool(*singleheadopt)
2309 if singlehead:
2307 if singlehead:
2310 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2308 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2311 accountclosed = singleheadsub.get(
2309 accountclosed = singleheadsub.get(
2312 b"account-closed-heads", False
2310 b"account-closed-heads", False
2313 )
2311 )
2314 if singleheadsub.get(b"public-changes-only", False):
2312 if singleheadsub.get(b"public-changes-only", False):
2315 filtername = b"immutable"
2313 filtername = b"immutable"
2316 else:
2314 else:
2317 filtername = b"visible"
2315 filtername = b"visible"
2318 scmutil.enforcesinglehead(
2316 scmutil.enforcesinglehead(
2319 repo, tr2, desc, accountclosed, filtername
2317 repo, tr2, desc, accountclosed, filtername
2320 )
2318 )
2321 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2319 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2322 for name, (old, new) in sorted(
2320 for name, (old, new) in sorted(
2323 tr.changes[b'bookmarks'].items()
2321 tr.changes[b'bookmarks'].items()
2324 ):
2322 ):
2325 args = tr.hookargs.copy()
2323 args = tr.hookargs.copy()
2326 args.update(bookmarks.preparehookargs(name, old, new))
2324 args.update(bookmarks.preparehookargs(name, old, new))
2327 repo.hook(
2325 repo.hook(
2328 b'pretxnclose-bookmark',
2326 b'pretxnclose-bookmark',
2329 throw=True,
2327 throw=True,
2330 **pycompat.strkwargs(args)
2328 **pycompat.strkwargs(args)
2331 )
2329 )
2332 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2330 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2333 cl = repo.unfiltered().changelog
2331 cl = repo.unfiltered().changelog
2334 for revs, (old, new) in tr.changes[b'phases']:
2332 for revs, (old, new) in tr.changes[b'phases']:
2335 for rev in revs:
2333 for rev in revs:
2336 args = tr.hookargs.copy()
2334 args = tr.hookargs.copy()
2337 node = hex(cl.node(rev))
2335 node = hex(cl.node(rev))
2338 args.update(phases.preparehookargs(node, old, new))
2336 args.update(phases.preparehookargs(node, old, new))
2339 repo.hook(
2337 repo.hook(
2340 b'pretxnclose-phase',
2338 b'pretxnclose-phase',
2341 throw=True,
2339 throw=True,
2342 **pycompat.strkwargs(args)
2340 **pycompat.strkwargs(args)
2343 )
2341 )
2344
2342
2345 repo.hook(
2343 repo.hook(
2346 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2344 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2347 )
2345 )
2348
2346
2349 def releasefn(tr, success):
2347 def releasefn(tr, success):
2350 repo = reporef()
2348 repo = reporef()
2351 if repo is None:
2349 if repo is None:
2352 # If the repo has been GC'd (and this release function is being
2350 # If the repo has been GC'd (and this release function is being
2353 # called from transaction.__del__), there's not much we can do,
2351 # called from transaction.__del__), there's not much we can do,
2354 # so just leave the unfinished transaction there and let the
2352 # so just leave the unfinished transaction there and let the
2355 # user run `hg recover`.
2353 # user run `hg recover`.
2356 return
2354 return
2357 if success:
2355 if success:
2358 # this should be explicitly invoked here, because
2356 # this should be explicitly invoked here, because
2359 # in-memory changes aren't written out at closing
2357 # in-memory changes aren't written out at closing
2360 # transaction, if tr.addfilegenerator (via
2358 # transaction, if tr.addfilegenerator (via
2361 # dirstate.write or so) isn't invoked while
2359 # dirstate.write or so) isn't invoked while
2362 # transaction running
2360 # transaction running
2363 repo.dirstate.write(None)
2361 repo.dirstate.write(None)
2364 else:
2362 else:
2365 # discard all changes (including ones already written
2363 # discard all changes (including ones already written
2366 # out) in this transaction
2364 # out) in this transaction
2367 narrowspec.restorebackup(self, b'journal.narrowspec')
2365 narrowspec.restorebackup(self, b'journal.narrowspec')
2368 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2366 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2369 repo.dirstate.restorebackup(None, b'journal.dirstate')
2367 repo.dirstate.restorebackup(None, b'journal.dirstate')
2370
2368
2371 repo.invalidate(clearfilecache=True)
2369 repo.invalidate(clearfilecache=True)
2372
2370
2373 tr = transaction.transaction(
2371 tr = transaction.transaction(
2374 rp,
2372 rp,
2375 self.svfs,
2373 self.svfs,
2376 vfsmap,
2374 vfsmap,
2377 b"journal",
2375 b"journal",
2378 b"undo",
2376 b"undo",
2379 aftertrans(renames),
2377 aftertrans(renames),
2380 self.store.createmode,
2378 self.store.createmode,
2381 validator=validate,
2379 validator=validate,
2382 releasefn=releasefn,
2380 releasefn=releasefn,
2383 checkambigfiles=_cachedfiles,
2381 checkambigfiles=_cachedfiles,
2384 name=desc,
2382 name=desc,
2385 )
2383 )
2386 tr.changes[b'origrepolen'] = len(self)
2384 tr.changes[b'origrepolen'] = len(self)
2387 tr.changes[b'obsmarkers'] = set()
2385 tr.changes[b'obsmarkers'] = set()
2388 tr.changes[b'phases'] = []
2386 tr.changes[b'phases'] = []
2389 tr.changes[b'bookmarks'] = {}
2387 tr.changes[b'bookmarks'] = {}
2390
2388
2391 tr.hookargs[b'txnid'] = txnid
2389 tr.hookargs[b'txnid'] = txnid
2392 tr.hookargs[b'txnname'] = desc
2390 tr.hookargs[b'txnname'] = desc
2393 tr.hookargs[b'changes'] = tr.changes
2391 tr.hookargs[b'changes'] = tr.changes
2394 # note: writing the fncache only during finalize mean that the file is
2392 # note: writing the fncache only during finalize mean that the file is
2395 # outdated when running hooks. As fncache is used for streaming clone,
2393 # outdated when running hooks. As fncache is used for streaming clone,
2396 # this is not expected to break anything that happen during the hooks.
2394 # this is not expected to break anything that happen during the hooks.
2397 tr.addfinalize(b'flush-fncache', self.store.write)
2395 tr.addfinalize(b'flush-fncache', self.store.write)
2398
2396
2399 def txnclosehook(tr2):
2397 def txnclosehook(tr2):
2400 """To be run if transaction is successful, will schedule a hook run"""
2398 """To be run if transaction is successful, will schedule a hook run"""
2401 # Don't reference tr2 in hook() so we don't hold a reference.
2399 # Don't reference tr2 in hook() so we don't hold a reference.
2402 # This reduces memory consumption when there are multiple
2400 # This reduces memory consumption when there are multiple
2403 # transactions per lock. This can likely go away if issue5045
2401 # transactions per lock. This can likely go away if issue5045
2404 # fixes the function accumulation.
2402 # fixes the function accumulation.
2405 hookargs = tr2.hookargs
2403 hookargs = tr2.hookargs
2406
2404
2407 def hookfunc(unused_success):
2405 def hookfunc(unused_success):
2408 repo = reporef()
2406 repo = reporef()
2409 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2407 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2410 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2408 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2411 for name, (old, new) in bmchanges:
2409 for name, (old, new) in bmchanges:
2412 args = tr.hookargs.copy()
2410 args = tr.hookargs.copy()
2413 args.update(bookmarks.preparehookargs(name, old, new))
2411 args.update(bookmarks.preparehookargs(name, old, new))
2414 repo.hook(
2412 repo.hook(
2415 b'txnclose-bookmark',
2413 b'txnclose-bookmark',
2416 throw=False,
2414 throw=False,
2417 **pycompat.strkwargs(args)
2415 **pycompat.strkwargs(args)
2418 )
2416 )
2419
2417
2420 if hook.hashook(repo.ui, b'txnclose-phase'):
2418 if hook.hashook(repo.ui, b'txnclose-phase'):
2421 cl = repo.unfiltered().changelog
2419 cl = repo.unfiltered().changelog
2422 phasemv = sorted(
2420 phasemv = sorted(
2423 tr.changes[b'phases'], key=lambda r: r[0][0]
2421 tr.changes[b'phases'], key=lambda r: r[0][0]
2424 )
2422 )
2425 for revs, (old, new) in phasemv:
2423 for revs, (old, new) in phasemv:
2426 for rev in revs:
2424 for rev in revs:
2427 args = tr.hookargs.copy()
2425 args = tr.hookargs.copy()
2428 node = hex(cl.node(rev))
2426 node = hex(cl.node(rev))
2429 args.update(phases.preparehookargs(node, old, new))
2427 args.update(phases.preparehookargs(node, old, new))
2430 repo.hook(
2428 repo.hook(
2431 b'txnclose-phase',
2429 b'txnclose-phase',
2432 throw=False,
2430 throw=False,
2433 **pycompat.strkwargs(args)
2431 **pycompat.strkwargs(args)
2434 )
2432 )
2435
2433
2436 repo.hook(
2434 repo.hook(
2437 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2435 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2438 )
2436 )
2439
2437
2440 reporef()._afterlock(hookfunc)
2438 reporef()._afterlock(hookfunc)
2441
2439
2442 tr.addfinalize(b'txnclose-hook', txnclosehook)
2440 tr.addfinalize(b'txnclose-hook', txnclosehook)
2443 # Include a leading "-" to make it happen before the transaction summary
2441 # Include a leading "-" to make it happen before the transaction summary
2444 # reports registered via scmutil.registersummarycallback() whose names
2442 # reports registered via scmutil.registersummarycallback() whose names
2445 # are 00-txnreport etc. That way, the caches will be warm when the
2443 # are 00-txnreport etc. That way, the caches will be warm when the
2446 # callbacks run.
2444 # callbacks run.
2447 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2445 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2448
2446
2449 def txnaborthook(tr2):
2447 def txnaborthook(tr2):
2450 """To be run if transaction is aborted"""
2448 """To be run if transaction is aborted"""
2451 reporef().hook(
2449 reporef().hook(
2452 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2450 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2453 )
2451 )
2454
2452
2455 tr.addabort(b'txnabort-hook', txnaborthook)
2453 tr.addabort(b'txnabort-hook', txnaborthook)
2456 # avoid eager cache invalidation. in-memory data should be identical
2454 # avoid eager cache invalidation. in-memory data should be identical
2457 # to stored data if transaction has no error.
2455 # to stored data if transaction has no error.
2458 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2456 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2459 self._transref = weakref.ref(tr)
2457 self._transref = weakref.ref(tr)
2460 scmutil.registersummarycallback(self, tr, desc)
2458 scmutil.registersummarycallback(self, tr, desc)
2461 return tr
2459 return tr
2462
2460
2463 def _journalfiles(self):
2461 def _journalfiles(self):
2464 return (
2462 return (
2465 (self.svfs, b'journal'),
2463 (self.svfs, b'journal'),
2466 (self.svfs, b'journal.narrowspec'),
2464 (self.svfs, b'journal.narrowspec'),
2467 (self.vfs, b'journal.narrowspec.dirstate'),
2465 (self.vfs, b'journal.narrowspec.dirstate'),
2468 (self.vfs, b'journal.dirstate'),
2466 (self.vfs, b'journal.dirstate'),
2469 (self.vfs, b'journal.branch'),
2467 (self.vfs, b'journal.branch'),
2470 (self.vfs, b'journal.desc'),
2468 (self.vfs, b'journal.desc'),
2471 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2469 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2472 (self.svfs, b'journal.phaseroots'),
2470 (self.svfs, b'journal.phaseroots'),
2473 )
2471 )
2474
2472
2475 def undofiles(self):
2473 def undofiles(self):
2476 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2474 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2477
2475
2478 @unfilteredmethod
2476 @unfilteredmethod
2479 def _writejournal(self, desc):
2477 def _writejournal(self, desc):
2480 self.dirstate.savebackup(None, b'journal.dirstate')
2478 self.dirstate.savebackup(None, b'journal.dirstate')
2481 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2479 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2482 narrowspec.savebackup(self, b'journal.narrowspec')
2480 narrowspec.savebackup(self, b'journal.narrowspec')
2483 self.vfs.write(
2481 self.vfs.write(
2484 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2482 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2485 )
2483 )
2486 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2484 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2487 bookmarksvfs = bookmarks.bookmarksvfs(self)
2485 bookmarksvfs = bookmarks.bookmarksvfs(self)
2488 bookmarksvfs.write(
2486 bookmarksvfs.write(
2489 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2487 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2490 )
2488 )
2491 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2489 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2492
2490
2493 def recover(self):
2491 def recover(self):
2494 with self.lock():
2492 with self.lock():
2495 if self.svfs.exists(b"journal"):
2493 if self.svfs.exists(b"journal"):
2496 self.ui.status(_(b"rolling back interrupted transaction\n"))
2494 self.ui.status(_(b"rolling back interrupted transaction\n"))
2497 vfsmap = {
2495 vfsmap = {
2498 b'': self.svfs,
2496 b'': self.svfs,
2499 b'plain': self.vfs,
2497 b'plain': self.vfs,
2500 }
2498 }
2501 transaction.rollback(
2499 transaction.rollback(
2502 self.svfs,
2500 self.svfs,
2503 vfsmap,
2501 vfsmap,
2504 b"journal",
2502 b"journal",
2505 self.ui.warn,
2503 self.ui.warn,
2506 checkambigfiles=_cachedfiles,
2504 checkambigfiles=_cachedfiles,
2507 )
2505 )
2508 self.invalidate()
2506 self.invalidate()
2509 return True
2507 return True
2510 else:
2508 else:
2511 self.ui.warn(_(b"no interrupted transaction available\n"))
2509 self.ui.warn(_(b"no interrupted transaction available\n"))
2512 return False
2510 return False
2513
2511
2514 def rollback(self, dryrun=False, force=False):
2512 def rollback(self, dryrun=False, force=False):
2515 wlock = lock = dsguard = None
2513 wlock = lock = dsguard = None
2516 try:
2514 try:
2517 wlock = self.wlock()
2515 wlock = self.wlock()
2518 lock = self.lock()
2516 lock = self.lock()
2519 if self.svfs.exists(b"undo"):
2517 if self.svfs.exists(b"undo"):
2520 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2518 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2521
2519
2522 return self._rollback(dryrun, force, dsguard)
2520 return self._rollback(dryrun, force, dsguard)
2523 else:
2521 else:
2524 self.ui.warn(_(b"no rollback information available\n"))
2522 self.ui.warn(_(b"no rollback information available\n"))
2525 return 1
2523 return 1
2526 finally:
2524 finally:
2527 release(dsguard, lock, wlock)
2525 release(dsguard, lock, wlock)
2528
2526
2529 @unfilteredmethod # Until we get smarter cache management
2527 @unfilteredmethod # Until we get smarter cache management
2530 def _rollback(self, dryrun, force, dsguard):
2528 def _rollback(self, dryrun, force, dsguard):
2531 ui = self.ui
2529 ui = self.ui
2532 try:
2530 try:
2533 args = self.vfs.read(b'undo.desc').splitlines()
2531 args = self.vfs.read(b'undo.desc').splitlines()
2534 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2532 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2535 if len(args) >= 3:
2533 if len(args) >= 3:
2536 detail = args[2]
2534 detail = args[2]
2537 oldtip = oldlen - 1
2535 oldtip = oldlen - 1
2538
2536
2539 if detail and ui.verbose:
2537 if detail and ui.verbose:
2540 msg = _(
2538 msg = _(
2541 b'repository tip rolled back to revision %d'
2539 b'repository tip rolled back to revision %d'
2542 b' (undo %s: %s)\n'
2540 b' (undo %s: %s)\n'
2543 ) % (oldtip, desc, detail)
2541 ) % (oldtip, desc, detail)
2544 else:
2542 else:
2545 msg = _(
2543 msg = _(
2546 b'repository tip rolled back to revision %d (undo %s)\n'
2544 b'repository tip rolled back to revision %d (undo %s)\n'
2547 ) % (oldtip, desc)
2545 ) % (oldtip, desc)
2548 except IOError:
2546 except IOError:
2549 msg = _(b'rolling back unknown transaction\n')
2547 msg = _(b'rolling back unknown transaction\n')
2550 desc = None
2548 desc = None
2551
2549
2552 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2550 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2553 raise error.Abort(
2551 raise error.Abort(
2554 _(
2552 _(
2555 b'rollback of last commit while not checked out '
2553 b'rollback of last commit while not checked out '
2556 b'may lose data'
2554 b'may lose data'
2557 ),
2555 ),
2558 hint=_(b'use -f to force'),
2556 hint=_(b'use -f to force'),
2559 )
2557 )
2560
2558
2561 ui.status(msg)
2559 ui.status(msg)
2562 if dryrun:
2560 if dryrun:
2563 return 0
2561 return 0
2564
2562
2565 parents = self.dirstate.parents()
2563 parents = self.dirstate.parents()
2566 self.destroying()
2564 self.destroying()
2567 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2565 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2568 transaction.rollback(
2566 transaction.rollback(
2569 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2567 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2570 )
2568 )
2571 bookmarksvfs = bookmarks.bookmarksvfs(self)
2569 bookmarksvfs = bookmarks.bookmarksvfs(self)
2572 if bookmarksvfs.exists(b'undo.bookmarks'):
2570 if bookmarksvfs.exists(b'undo.bookmarks'):
2573 bookmarksvfs.rename(
2571 bookmarksvfs.rename(
2574 b'undo.bookmarks', b'bookmarks', checkambig=True
2572 b'undo.bookmarks', b'bookmarks', checkambig=True
2575 )
2573 )
2576 if self.svfs.exists(b'undo.phaseroots'):
2574 if self.svfs.exists(b'undo.phaseroots'):
2577 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2575 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2578 self.invalidate()
2576 self.invalidate()
2579
2577
2580 has_node = self.changelog.index.has_node
2578 has_node = self.changelog.index.has_node
2581 parentgone = any(not has_node(p) for p in parents)
2579 parentgone = any(not has_node(p) for p in parents)
2582 if parentgone:
2580 if parentgone:
2583 # prevent dirstateguard from overwriting already restored one
2581 # prevent dirstateguard from overwriting already restored one
2584 dsguard.close()
2582 dsguard.close()
2585
2583
2586 narrowspec.restorebackup(self, b'undo.narrowspec')
2584 narrowspec.restorebackup(self, b'undo.narrowspec')
2587 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2585 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2588 self.dirstate.restorebackup(None, b'undo.dirstate')
2586 self.dirstate.restorebackup(None, b'undo.dirstate')
2589 try:
2587 try:
2590 branch = self.vfs.read(b'undo.branch')
2588 branch = self.vfs.read(b'undo.branch')
2591 self.dirstate.setbranch(encoding.tolocal(branch))
2589 self.dirstate.setbranch(encoding.tolocal(branch))
2592 except IOError:
2590 except IOError:
2593 ui.warn(
2591 ui.warn(
2594 _(
2592 _(
2595 b'named branch could not be reset: '
2593 b'named branch could not be reset: '
2596 b'current branch is still \'%s\'\n'
2594 b'current branch is still \'%s\'\n'
2597 )
2595 )
2598 % self.dirstate.branch()
2596 % self.dirstate.branch()
2599 )
2597 )
2600
2598
2601 parents = tuple([p.rev() for p in self[None].parents()])
2599 parents = tuple([p.rev() for p in self[None].parents()])
2602 if len(parents) > 1:
2600 if len(parents) > 1:
2603 ui.status(
2601 ui.status(
2604 _(
2602 _(
2605 b'working directory now based on '
2603 b'working directory now based on '
2606 b'revisions %d and %d\n'
2604 b'revisions %d and %d\n'
2607 )
2605 )
2608 % parents
2606 % parents
2609 )
2607 )
2610 else:
2608 else:
2611 ui.status(
2609 ui.status(
2612 _(b'working directory now based on revision %d\n') % parents
2610 _(b'working directory now based on revision %d\n') % parents
2613 )
2611 )
2614 mergestatemod.mergestate.clean(self)
2612 mergestatemod.mergestate.clean(self)
2615
2613
2616 # TODO: if we know which new heads may result from this rollback, pass
2614 # TODO: if we know which new heads may result from this rollback, pass
2617 # them to destroy(), which will prevent the branchhead cache from being
2615 # them to destroy(), which will prevent the branchhead cache from being
2618 # invalidated.
2616 # invalidated.
2619 self.destroyed()
2617 self.destroyed()
2620 return 0
2618 return 0
2621
2619
2622 def _buildcacheupdater(self, newtransaction):
2620 def _buildcacheupdater(self, newtransaction):
2623 """called during transaction to build the callback updating cache
2621 """called during transaction to build the callback updating cache
2624
2622
2625 Lives on the repository to help extension who might want to augment
2623 Lives on the repository to help extension who might want to augment
2626 this logic. For this purpose, the created transaction is passed to the
2624 this logic. For this purpose, the created transaction is passed to the
2627 method.
2625 method.
2628 """
2626 """
2629 # we must avoid cyclic reference between repo and transaction.
2627 # we must avoid cyclic reference between repo and transaction.
2630 reporef = weakref.ref(self)
2628 reporef = weakref.ref(self)
2631
2629
2632 def updater(tr):
2630 def updater(tr):
2633 repo = reporef()
2631 repo = reporef()
2634 repo.updatecaches(tr)
2632 repo.updatecaches(tr)
2635
2633
2636 return updater
2634 return updater
2637
2635
2638 @unfilteredmethod
2636 @unfilteredmethod
2639 def updatecaches(self, tr=None, full=False):
2637 def updatecaches(self, tr=None, full=False):
2640 """warm appropriate caches
2638 """warm appropriate caches
2641
2639
2642 If this function is called after a transaction closed. The transaction
2640 If this function is called after a transaction closed. The transaction
2643 will be available in the 'tr' argument. This can be used to selectively
2641 will be available in the 'tr' argument. This can be used to selectively
2644 update caches relevant to the changes in that transaction.
2642 update caches relevant to the changes in that transaction.
2645
2643
2646 If 'full' is set, make sure all caches the function knows about have
2644 If 'full' is set, make sure all caches the function knows about have
2647 up-to-date data. Even the ones usually loaded more lazily.
2645 up-to-date data. Even the ones usually loaded more lazily.
2648 """
2646 """
2649 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2647 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2650 # During strip, many caches are invalid but
2648 # During strip, many caches are invalid but
2651 # later call to `destroyed` will refresh them.
2649 # later call to `destroyed` will refresh them.
2652 return
2650 return
2653
2651
2654 if tr is None or tr.changes[b'origrepolen'] < len(self):
2652 if tr is None or tr.changes[b'origrepolen'] < len(self):
2655 # accessing the 'served' branchmap should refresh all the others,
2653 # accessing the 'served' branchmap should refresh all the others,
2656 self.ui.debug(b'updating the branch cache\n')
2654 self.ui.debug(b'updating the branch cache\n')
2657 self.filtered(b'served').branchmap()
2655 self.filtered(b'served').branchmap()
2658 self.filtered(b'served.hidden').branchmap()
2656 self.filtered(b'served.hidden').branchmap()
2659
2657
2660 if full:
2658 if full:
2661 unfi = self.unfiltered()
2659 unfi = self.unfiltered()
2662
2660
2663 self.changelog.update_caches(transaction=tr)
2661 self.changelog.update_caches(transaction=tr)
2664 self.manifestlog.update_caches(transaction=tr)
2662 self.manifestlog.update_caches(transaction=tr)
2665
2663
2666 rbc = unfi.revbranchcache()
2664 rbc = unfi.revbranchcache()
2667 for r in unfi.changelog:
2665 for r in unfi.changelog:
2668 rbc.branchinfo(r)
2666 rbc.branchinfo(r)
2669 rbc.write()
2667 rbc.write()
2670
2668
2671 # ensure the working copy parents are in the manifestfulltextcache
2669 # ensure the working copy parents are in the manifestfulltextcache
2672 for ctx in self[b'.'].parents():
2670 for ctx in self[b'.'].parents():
2673 ctx.manifest() # accessing the manifest is enough
2671 ctx.manifest() # accessing the manifest is enough
2674
2672
2675 # accessing fnode cache warms the cache
2673 # accessing fnode cache warms the cache
2676 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2674 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2677 # accessing tags warm the cache
2675 # accessing tags warm the cache
2678 self.tags()
2676 self.tags()
2679 self.filtered(b'served').tags()
2677 self.filtered(b'served').tags()
2680
2678
2681 # The `full` arg is documented as updating even the lazily-loaded
2679 # The `full` arg is documented as updating even the lazily-loaded
2682 # caches immediately, so we're forcing a write to cause these caches
2680 # caches immediately, so we're forcing a write to cause these caches
2683 # to be warmed up even if they haven't explicitly been requested
2681 # to be warmed up even if they haven't explicitly been requested
2684 # yet (if they've never been used by hg, they won't ever have been
2682 # yet (if they've never been used by hg, they won't ever have been
2685 # written, even if they're a subset of another kind of cache that
2683 # written, even if they're a subset of another kind of cache that
2686 # *has* been used).
2684 # *has* been used).
2687 for filt in repoview.filtertable.keys():
2685 for filt in repoview.filtertable.keys():
2688 filtered = self.filtered(filt)
2686 filtered = self.filtered(filt)
2689 filtered.branchmap().write(filtered)
2687 filtered.branchmap().write(filtered)
2690
2688
2691 def invalidatecaches(self):
2689 def invalidatecaches(self):
2692
2690
2693 if '_tagscache' in vars(self):
2691 if '_tagscache' in vars(self):
2694 # can't use delattr on proxy
2692 # can't use delattr on proxy
2695 del self.__dict__['_tagscache']
2693 del self.__dict__['_tagscache']
2696
2694
2697 self._branchcaches.clear()
2695 self._branchcaches.clear()
2698 self.invalidatevolatilesets()
2696 self.invalidatevolatilesets()
2699 self._sparsesignaturecache.clear()
2697 self._sparsesignaturecache.clear()
2700
2698
2701 def invalidatevolatilesets(self):
2699 def invalidatevolatilesets(self):
2702 self.filteredrevcache.clear()
2700 self.filteredrevcache.clear()
2703 obsolete.clearobscaches(self)
2701 obsolete.clearobscaches(self)
2704 self._quick_access_changeid_invalidate()
2702 self._quick_access_changeid_invalidate()
2705
2703
2706 def invalidatedirstate(self):
2704 def invalidatedirstate(self):
2707 """Invalidates the dirstate, causing the next call to dirstate
2705 """Invalidates the dirstate, causing the next call to dirstate
2708 to check if it was modified since the last time it was read,
2706 to check if it was modified since the last time it was read,
2709 rereading it if it has.
2707 rereading it if it has.
2710
2708
2711 This is different to dirstate.invalidate() that it doesn't always
2709 This is different to dirstate.invalidate() that it doesn't always
2712 rereads the dirstate. Use dirstate.invalidate() if you want to
2710 rereads the dirstate. Use dirstate.invalidate() if you want to
2713 explicitly read the dirstate again (i.e. restoring it to a previous
2711 explicitly read the dirstate again (i.e. restoring it to a previous
2714 known good state)."""
2712 known good state)."""
2715 if hasunfilteredcache(self, 'dirstate'):
2713 if hasunfilteredcache(self, 'dirstate'):
2716 for k in self.dirstate._filecache:
2714 for k in self.dirstate._filecache:
2717 try:
2715 try:
2718 delattr(self.dirstate, k)
2716 delattr(self.dirstate, k)
2719 except AttributeError:
2717 except AttributeError:
2720 pass
2718 pass
2721 delattr(self.unfiltered(), 'dirstate')
2719 delattr(self.unfiltered(), 'dirstate')
2722
2720
2723 def invalidate(self, clearfilecache=False):
2721 def invalidate(self, clearfilecache=False):
2724 """Invalidates both store and non-store parts other than dirstate
2722 """Invalidates both store and non-store parts other than dirstate
2725
2723
2726 If a transaction is running, invalidation of store is omitted,
2724 If a transaction is running, invalidation of store is omitted,
2727 because discarding in-memory changes might cause inconsistency
2725 because discarding in-memory changes might cause inconsistency
2728 (e.g. incomplete fncache causes unintentional failure, but
2726 (e.g. incomplete fncache causes unintentional failure, but
2729 redundant one doesn't).
2727 redundant one doesn't).
2730 """
2728 """
2731 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2729 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2732 for k in list(self._filecache.keys()):
2730 for k in list(self._filecache.keys()):
2733 # dirstate is invalidated separately in invalidatedirstate()
2731 # dirstate is invalidated separately in invalidatedirstate()
2734 if k == b'dirstate':
2732 if k == b'dirstate':
2735 continue
2733 continue
2736 if (
2734 if (
2737 k == b'changelog'
2735 k == b'changelog'
2738 and self.currenttransaction()
2736 and self.currenttransaction()
2739 and self.changelog._delayed
2737 and self.changelog._delayed
2740 ):
2738 ):
2741 # The changelog object may store unwritten revisions. We don't
2739 # The changelog object may store unwritten revisions. We don't
2742 # want to lose them.
2740 # want to lose them.
2743 # TODO: Solve the problem instead of working around it.
2741 # TODO: Solve the problem instead of working around it.
2744 continue
2742 continue
2745
2743
2746 if clearfilecache:
2744 if clearfilecache:
2747 del self._filecache[k]
2745 del self._filecache[k]
2748 try:
2746 try:
2749 delattr(unfiltered, k)
2747 delattr(unfiltered, k)
2750 except AttributeError:
2748 except AttributeError:
2751 pass
2749 pass
2752 self.invalidatecaches()
2750 self.invalidatecaches()
2753 if not self.currenttransaction():
2751 if not self.currenttransaction():
2754 # TODO: Changing contents of store outside transaction
2752 # TODO: Changing contents of store outside transaction
2755 # causes inconsistency. We should make in-memory store
2753 # causes inconsistency. We should make in-memory store
2756 # changes detectable, and abort if changed.
2754 # changes detectable, and abort if changed.
2757 self.store.invalidatecaches()
2755 self.store.invalidatecaches()
2758
2756
2759 def invalidateall(self):
2757 def invalidateall(self):
2760 """Fully invalidates both store and non-store parts, causing the
2758 """Fully invalidates both store and non-store parts, causing the
2761 subsequent operation to reread any outside changes."""
2759 subsequent operation to reread any outside changes."""
2762 # extension should hook this to invalidate its caches
2760 # extension should hook this to invalidate its caches
2763 self.invalidate()
2761 self.invalidate()
2764 self.invalidatedirstate()
2762 self.invalidatedirstate()
2765
2763
2766 @unfilteredmethod
2764 @unfilteredmethod
2767 def _refreshfilecachestats(self, tr):
2765 def _refreshfilecachestats(self, tr):
2768 """Reload stats of cached files so that they are flagged as valid"""
2766 """Reload stats of cached files so that they are flagged as valid"""
2769 for k, ce in self._filecache.items():
2767 for k, ce in self._filecache.items():
2770 k = pycompat.sysstr(k)
2768 k = pycompat.sysstr(k)
2771 if k == 'dirstate' or k not in self.__dict__:
2769 if k == 'dirstate' or k not in self.__dict__:
2772 continue
2770 continue
2773 ce.refresh()
2771 ce.refresh()
2774
2772
2775 def _lock(
2773 def _lock(
2776 self,
2774 self,
2777 vfs,
2775 vfs,
2778 lockname,
2776 lockname,
2779 wait,
2777 wait,
2780 releasefn,
2778 releasefn,
2781 acquirefn,
2779 acquirefn,
2782 desc,
2780 desc,
2783 ):
2781 ):
2784 timeout = 0
2782 timeout = 0
2785 warntimeout = 0
2783 warntimeout = 0
2786 if wait:
2784 if wait:
2787 timeout = self.ui.configint(b"ui", b"timeout")
2785 timeout = self.ui.configint(b"ui", b"timeout")
2788 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2786 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2789 # internal config: ui.signal-safe-lock
2787 # internal config: ui.signal-safe-lock
2790 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2788 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2791
2789
2792 l = lockmod.trylock(
2790 l = lockmod.trylock(
2793 self.ui,
2791 self.ui,
2794 vfs,
2792 vfs,
2795 lockname,
2793 lockname,
2796 timeout,
2794 timeout,
2797 warntimeout,
2795 warntimeout,
2798 releasefn=releasefn,
2796 releasefn=releasefn,
2799 acquirefn=acquirefn,
2797 acquirefn=acquirefn,
2800 desc=desc,
2798 desc=desc,
2801 signalsafe=signalsafe,
2799 signalsafe=signalsafe,
2802 )
2800 )
2803 return l
2801 return l
2804
2802
2805 def _afterlock(self, callback):
2803 def _afterlock(self, callback):
2806 """add a callback to be run when the repository is fully unlocked
2804 """add a callback to be run when the repository is fully unlocked
2807
2805
2808 The callback will be executed when the outermost lock is released
2806 The callback will be executed when the outermost lock is released
2809 (with wlock being higher level than 'lock')."""
2807 (with wlock being higher level than 'lock')."""
2810 for ref in (self._wlockref, self._lockref):
2808 for ref in (self._wlockref, self._lockref):
2811 l = ref and ref()
2809 l = ref and ref()
2812 if l and l.held:
2810 if l and l.held:
2813 l.postrelease.append(callback)
2811 l.postrelease.append(callback)
2814 break
2812 break
2815 else: # no lock have been found.
2813 else: # no lock have been found.
2816 callback(True)
2814 callback(True)
2817
2815
2818 def lock(self, wait=True):
2816 def lock(self, wait=True):
2819 """Lock the repository store (.hg/store) and return a weak reference
2817 """Lock the repository store (.hg/store) and return a weak reference
2820 to the lock. Use this before modifying the store (e.g. committing or
2818 to the lock. Use this before modifying the store (e.g. committing or
2821 stripping). If you are opening a transaction, get a lock as well.)
2819 stripping). If you are opening a transaction, get a lock as well.)
2822
2820
2823 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2821 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2824 'wlock' first to avoid a dead-lock hazard."""
2822 'wlock' first to avoid a dead-lock hazard."""
2825 l = self._currentlock(self._lockref)
2823 l = self._currentlock(self._lockref)
2826 if l is not None:
2824 if l is not None:
2827 l.lock()
2825 l.lock()
2828 return l
2826 return l
2829
2827
2830 l = self._lock(
2828 l = self._lock(
2831 vfs=self.svfs,
2829 vfs=self.svfs,
2832 lockname=b"lock",
2830 lockname=b"lock",
2833 wait=wait,
2831 wait=wait,
2834 releasefn=None,
2832 releasefn=None,
2835 acquirefn=self.invalidate,
2833 acquirefn=self.invalidate,
2836 desc=_(b'repository %s') % self.origroot,
2834 desc=_(b'repository %s') % self.origroot,
2837 )
2835 )
2838 self._lockref = weakref.ref(l)
2836 self._lockref = weakref.ref(l)
2839 return l
2837 return l
2840
2838
2841 def wlock(self, wait=True):
2839 def wlock(self, wait=True):
2842 """Lock the non-store parts of the repository (everything under
2840 """Lock the non-store parts of the repository (everything under
2843 .hg except .hg/store) and return a weak reference to the lock.
2841 .hg except .hg/store) and return a weak reference to the lock.
2844
2842
2845 Use this before modifying files in .hg.
2843 Use this before modifying files in .hg.
2846
2844
2847 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2845 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2848 'wlock' first to avoid a dead-lock hazard."""
2846 'wlock' first to avoid a dead-lock hazard."""
2849 l = self._wlockref and self._wlockref()
2847 l = self._wlockref and self._wlockref()
2850 if l is not None and l.held:
2848 if l is not None and l.held:
2851 l.lock()
2849 l.lock()
2852 return l
2850 return l
2853
2851
2854 # We do not need to check for non-waiting lock acquisition. Such
2852 # We do not need to check for non-waiting lock acquisition. Such
2855 # acquisition would not cause dead-lock as they would just fail.
2853 # acquisition would not cause dead-lock as they would just fail.
2856 if wait and (
2854 if wait and (
2857 self.ui.configbool(b'devel', b'all-warnings')
2855 self.ui.configbool(b'devel', b'all-warnings')
2858 or self.ui.configbool(b'devel', b'check-locks')
2856 or self.ui.configbool(b'devel', b'check-locks')
2859 ):
2857 ):
2860 if self._currentlock(self._lockref) is not None:
2858 if self._currentlock(self._lockref) is not None:
2861 self.ui.develwarn(b'"wlock" acquired after "lock"')
2859 self.ui.develwarn(b'"wlock" acquired after "lock"')
2862
2860
2863 def unlock():
2861 def unlock():
2864 if self.dirstate.pendingparentchange():
2862 if self.dirstate.pendingparentchange():
2865 self.dirstate.invalidate()
2863 self.dirstate.invalidate()
2866 else:
2864 else:
2867 self.dirstate.write(None)
2865 self.dirstate.write(None)
2868
2866
2869 self._filecache[b'dirstate'].refresh()
2867 self._filecache[b'dirstate'].refresh()
2870
2868
2871 l = self._lock(
2869 l = self._lock(
2872 self.vfs,
2870 self.vfs,
2873 b"wlock",
2871 b"wlock",
2874 wait,
2872 wait,
2875 unlock,
2873 unlock,
2876 self.invalidatedirstate,
2874 self.invalidatedirstate,
2877 _(b'working directory of %s') % self.origroot,
2875 _(b'working directory of %s') % self.origroot,
2878 )
2876 )
2879 self._wlockref = weakref.ref(l)
2877 self._wlockref = weakref.ref(l)
2880 return l
2878 return l
2881
2879
2882 def _currentlock(self, lockref):
2880 def _currentlock(self, lockref):
2883 """Returns the lock if it's held, or None if it's not."""
2881 """Returns the lock if it's held, or None if it's not."""
2884 if lockref is None:
2882 if lockref is None:
2885 return None
2883 return None
2886 l = lockref()
2884 l = lockref()
2887 if l is None or not l.held:
2885 if l is None or not l.held:
2888 return None
2886 return None
2889 return l
2887 return l
2890
2888
2891 def currentwlock(self):
2889 def currentwlock(self):
2892 """Returns the wlock if it's held, or None if it's not."""
2890 """Returns the wlock if it's held, or None if it's not."""
2893 return self._currentlock(self._wlockref)
2891 return self._currentlock(self._wlockref)
2894
2892
2895 def checkcommitpatterns(self, wctx, match, status, fail):
2893 def checkcommitpatterns(self, wctx, match, status, fail):
2896 """check for commit arguments that aren't committable"""
2894 """check for commit arguments that aren't committable"""
2897 if match.isexact() or match.prefix():
2895 if match.isexact() or match.prefix():
2898 matched = set(status.modified + status.added + status.removed)
2896 matched = set(status.modified + status.added + status.removed)
2899
2897
2900 for f in match.files():
2898 for f in match.files():
2901 f = self.dirstate.normalize(f)
2899 f = self.dirstate.normalize(f)
2902 if f == b'.' or f in matched or f in wctx.substate:
2900 if f == b'.' or f in matched or f in wctx.substate:
2903 continue
2901 continue
2904 if f in status.deleted:
2902 if f in status.deleted:
2905 fail(f, _(b'file not found!'))
2903 fail(f, _(b'file not found!'))
2906 # Is it a directory that exists or used to exist?
2904 # Is it a directory that exists or used to exist?
2907 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2905 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2908 d = f + b'/'
2906 d = f + b'/'
2909 for mf in matched:
2907 for mf in matched:
2910 if mf.startswith(d):
2908 if mf.startswith(d):
2911 break
2909 break
2912 else:
2910 else:
2913 fail(f, _(b"no match under directory!"))
2911 fail(f, _(b"no match under directory!"))
2914 elif f not in self.dirstate:
2912 elif f not in self.dirstate:
2915 fail(f, _(b"file not tracked!"))
2913 fail(f, _(b"file not tracked!"))
2916
2914
2917 @unfilteredmethod
2915 @unfilteredmethod
2918 def commit(
2916 def commit(
2919 self,
2917 self,
2920 text=b"",
2918 text=b"",
2921 user=None,
2919 user=None,
2922 date=None,
2920 date=None,
2923 match=None,
2921 match=None,
2924 force=False,
2922 force=False,
2925 editor=None,
2923 editor=None,
2926 extra=None,
2924 extra=None,
2927 ):
2925 ):
2928 """Add a new revision to current repository.
2926 """Add a new revision to current repository.
2929
2927
2930 Revision information is gathered from the working directory,
2928 Revision information is gathered from the working directory,
2931 match can be used to filter the committed files. If editor is
2929 match can be used to filter the committed files. If editor is
2932 supplied, it is called to get a commit message.
2930 supplied, it is called to get a commit message.
2933 """
2931 """
2934 if extra is None:
2932 if extra is None:
2935 extra = {}
2933 extra = {}
2936
2934
2937 def fail(f, msg):
2935 def fail(f, msg):
2938 raise error.InputError(b'%s: %s' % (f, msg))
2936 raise error.InputError(b'%s: %s' % (f, msg))
2939
2937
2940 if not match:
2938 if not match:
2941 match = matchmod.always()
2939 match = matchmod.always()
2942
2940
2943 if not force:
2941 if not force:
2944 match.bad = fail
2942 match.bad = fail
2945
2943
2946 # lock() for recent changelog (see issue4368)
2944 # lock() for recent changelog (see issue4368)
2947 with self.wlock(), self.lock():
2945 with self.wlock(), self.lock():
2948 wctx = self[None]
2946 wctx = self[None]
2949 merge = len(wctx.parents()) > 1
2947 merge = len(wctx.parents()) > 1
2950
2948
2951 if not force and merge and not match.always():
2949 if not force and merge and not match.always():
2952 raise error.Abort(
2950 raise error.Abort(
2953 _(
2951 _(
2954 b'cannot partially commit a merge '
2952 b'cannot partially commit a merge '
2955 b'(do not specify files or patterns)'
2953 b'(do not specify files or patterns)'
2956 )
2954 )
2957 )
2955 )
2958
2956
2959 status = self.status(match=match, clean=force)
2957 status = self.status(match=match, clean=force)
2960 if force:
2958 if force:
2961 status.modified.extend(
2959 status.modified.extend(
2962 status.clean
2960 status.clean
2963 ) # mq may commit clean files
2961 ) # mq may commit clean files
2964
2962
2965 # check subrepos
2963 # check subrepos
2966 subs, commitsubs, newstate = subrepoutil.precommit(
2964 subs, commitsubs, newstate = subrepoutil.precommit(
2967 self.ui, wctx, status, match, force=force
2965 self.ui, wctx, status, match, force=force
2968 )
2966 )
2969
2967
2970 # make sure all explicit patterns are matched
2968 # make sure all explicit patterns are matched
2971 if not force:
2969 if not force:
2972 self.checkcommitpatterns(wctx, match, status, fail)
2970 self.checkcommitpatterns(wctx, match, status, fail)
2973
2971
2974 cctx = context.workingcommitctx(
2972 cctx = context.workingcommitctx(
2975 self, status, text, user, date, extra
2973 self, status, text, user, date, extra
2976 )
2974 )
2977
2975
2978 ms = mergestatemod.mergestate.read(self)
2976 ms = mergestatemod.mergestate.read(self)
2979 mergeutil.checkunresolved(ms)
2977 mergeutil.checkunresolved(ms)
2980
2978
2981 # internal config: ui.allowemptycommit
2979 # internal config: ui.allowemptycommit
2982 if cctx.isempty() and not self.ui.configbool(
2980 if cctx.isempty() and not self.ui.configbool(
2983 b'ui', b'allowemptycommit'
2981 b'ui', b'allowemptycommit'
2984 ):
2982 ):
2985 self.ui.debug(b'nothing to commit, clearing merge state\n')
2983 self.ui.debug(b'nothing to commit, clearing merge state\n')
2986 ms.reset()
2984 ms.reset()
2987 return None
2985 return None
2988
2986
2989 if merge and cctx.deleted():
2987 if merge and cctx.deleted():
2990 raise error.Abort(_(b"cannot commit merge with missing files"))
2988 raise error.Abort(_(b"cannot commit merge with missing files"))
2991
2989
2992 if editor:
2990 if editor:
2993 cctx._text = editor(self, cctx, subs)
2991 cctx._text = editor(self, cctx, subs)
2994 edited = text != cctx._text
2992 edited = text != cctx._text
2995
2993
2996 # Save commit message in case this transaction gets rolled back
2994 # Save commit message in case this transaction gets rolled back
2997 # (e.g. by a pretxncommit hook). Leave the content alone on
2995 # (e.g. by a pretxncommit hook). Leave the content alone on
2998 # the assumption that the user will use the same editor again.
2996 # the assumption that the user will use the same editor again.
2999 msgfn = self.savecommitmessage(cctx._text)
2997 msgfn = self.savecommitmessage(cctx._text)
3000
2998
3001 # commit subs and write new state
2999 # commit subs and write new state
3002 if subs:
3000 if subs:
3003 uipathfn = scmutil.getuipathfn(self)
3001 uipathfn = scmutil.getuipathfn(self)
3004 for s in sorted(commitsubs):
3002 for s in sorted(commitsubs):
3005 sub = wctx.sub(s)
3003 sub = wctx.sub(s)
3006 self.ui.status(
3004 self.ui.status(
3007 _(b'committing subrepository %s\n')
3005 _(b'committing subrepository %s\n')
3008 % uipathfn(subrepoutil.subrelpath(sub))
3006 % uipathfn(subrepoutil.subrelpath(sub))
3009 )
3007 )
3010 sr = sub.commit(cctx._text, user, date)
3008 sr = sub.commit(cctx._text, user, date)
3011 newstate[s] = (newstate[s][0], sr)
3009 newstate[s] = (newstate[s][0], sr)
3012 subrepoutil.writestate(self, newstate)
3010 subrepoutil.writestate(self, newstate)
3013
3011
3014 p1, p2 = self.dirstate.parents()
3012 p1, p2 = self.dirstate.parents()
3015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3013 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3016 try:
3014 try:
3017 self.hook(
3015 self.hook(
3018 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3016 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3019 )
3017 )
3020 with self.transaction(b'commit'):
3018 with self.transaction(b'commit'):
3021 ret = self.commitctx(cctx, True)
3019 ret = self.commitctx(cctx, True)
3022 # update bookmarks, dirstate and mergestate
3020 # update bookmarks, dirstate and mergestate
3023 bookmarks.update(self, [p1, p2], ret)
3021 bookmarks.update(self, [p1, p2], ret)
3024 cctx.markcommitted(ret)
3022 cctx.markcommitted(ret)
3025 ms.reset()
3023 ms.reset()
3026 except: # re-raises
3024 except: # re-raises
3027 if edited:
3025 if edited:
3028 self.ui.write(
3026 self.ui.write(
3029 _(b'note: commit message saved in %s\n') % msgfn
3027 _(b'note: commit message saved in %s\n') % msgfn
3030 )
3028 )
3031 self.ui.write(
3029 self.ui.write(
3032 _(
3030 _(
3033 b"note: use 'hg commit --logfile "
3031 b"note: use 'hg commit --logfile "
3034 b".hg/last-message.txt --edit' to reuse it\n"
3032 b".hg/last-message.txt --edit' to reuse it\n"
3035 )
3033 )
3036 )
3034 )
3037 raise
3035 raise
3038
3036
3039 def commithook(unused_success):
3037 def commithook(unused_success):
3040 # hack for command that use a temporary commit (eg: histedit)
3038 # hack for command that use a temporary commit (eg: histedit)
3041 # temporary commit got stripped before hook release
3039 # temporary commit got stripped before hook release
3042 if self.changelog.hasnode(ret):
3040 if self.changelog.hasnode(ret):
3043 self.hook(
3041 self.hook(
3044 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3042 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3045 )
3043 )
3046
3044
3047 self._afterlock(commithook)
3045 self._afterlock(commithook)
3048 return ret
3046 return ret
3049
3047
3050 @unfilteredmethod
3048 @unfilteredmethod
3051 def commitctx(self, ctx, error=False, origctx=None):
3049 def commitctx(self, ctx, error=False, origctx=None):
3052 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3050 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3053
3051
3054 @unfilteredmethod
3052 @unfilteredmethod
3055 def destroying(self):
3053 def destroying(self):
3056 """Inform the repository that nodes are about to be destroyed.
3054 """Inform the repository that nodes are about to be destroyed.
3057 Intended for use by strip and rollback, so there's a common
3055 Intended for use by strip and rollback, so there's a common
3058 place for anything that has to be done before destroying history.
3056 place for anything that has to be done before destroying history.
3059
3057
3060 This is mostly useful for saving state that is in memory and waiting
3058 This is mostly useful for saving state that is in memory and waiting
3061 to be flushed when the current lock is released. Because a call to
3059 to be flushed when the current lock is released. Because a call to
3062 destroyed is imminent, the repo will be invalidated causing those
3060 destroyed is imminent, the repo will be invalidated causing those
3063 changes to stay in memory (waiting for the next unlock), or vanish
3061 changes to stay in memory (waiting for the next unlock), or vanish
3064 completely.
3062 completely.
3065 """
3063 """
3066 # When using the same lock to commit and strip, the phasecache is left
3064 # When using the same lock to commit and strip, the phasecache is left
3067 # dirty after committing. Then when we strip, the repo is invalidated,
3065 # dirty after committing. Then when we strip, the repo is invalidated,
3068 # causing those changes to disappear.
3066 # causing those changes to disappear.
3069 if '_phasecache' in vars(self):
3067 if '_phasecache' in vars(self):
3070 self._phasecache.write()
3068 self._phasecache.write()
3071
3069
3072 @unfilteredmethod
3070 @unfilteredmethod
3073 def destroyed(self):
3071 def destroyed(self):
3074 """Inform the repository that nodes have been destroyed.
3072 """Inform the repository that nodes have been destroyed.
3075 Intended for use by strip and rollback, so there's a common
3073 Intended for use by strip and rollback, so there's a common
3076 place for anything that has to be done after destroying history.
3074 place for anything that has to be done after destroying history.
3077 """
3075 """
3078 # When one tries to:
3076 # When one tries to:
3079 # 1) destroy nodes thus calling this method (e.g. strip)
3077 # 1) destroy nodes thus calling this method (e.g. strip)
3080 # 2) use phasecache somewhere (e.g. commit)
3078 # 2) use phasecache somewhere (e.g. commit)
3081 #
3079 #
3082 # then 2) will fail because the phasecache contains nodes that were
3080 # then 2) will fail because the phasecache contains nodes that were
3083 # removed. We can either remove phasecache from the filecache,
3081 # removed. We can either remove phasecache from the filecache,
3084 # causing it to reload next time it is accessed, or simply filter
3082 # causing it to reload next time it is accessed, or simply filter
3085 # the removed nodes now and write the updated cache.
3083 # the removed nodes now and write the updated cache.
3086 self._phasecache.filterunknown(self)
3084 self._phasecache.filterunknown(self)
3087 self._phasecache.write()
3085 self._phasecache.write()
3088
3086
3089 # refresh all repository caches
3087 # refresh all repository caches
3090 self.updatecaches()
3088 self.updatecaches()
3091
3089
3092 # Ensure the persistent tag cache is updated. Doing it now
3090 # Ensure the persistent tag cache is updated. Doing it now
3093 # means that the tag cache only has to worry about destroyed
3091 # means that the tag cache only has to worry about destroyed
3094 # heads immediately after a strip/rollback. That in turn
3092 # heads immediately after a strip/rollback. That in turn
3095 # guarantees that "cachetip == currenttip" (comparing both rev
3093 # guarantees that "cachetip == currenttip" (comparing both rev
3096 # and node) always means no nodes have been added or destroyed.
3094 # and node) always means no nodes have been added or destroyed.
3097
3095
3098 # XXX this is suboptimal when qrefresh'ing: we strip the current
3096 # XXX this is suboptimal when qrefresh'ing: we strip the current
3099 # head, refresh the tag cache, then immediately add a new head.
3097 # head, refresh the tag cache, then immediately add a new head.
3100 # But I think doing it this way is necessary for the "instant
3098 # But I think doing it this way is necessary for the "instant
3101 # tag cache retrieval" case to work.
3099 # tag cache retrieval" case to work.
3102 self.invalidate()
3100 self.invalidate()
3103
3101
3104 def status(
3102 def status(
3105 self,
3103 self,
3106 node1=b'.',
3104 node1=b'.',
3107 node2=None,
3105 node2=None,
3108 match=None,
3106 match=None,
3109 ignored=False,
3107 ignored=False,
3110 clean=False,
3108 clean=False,
3111 unknown=False,
3109 unknown=False,
3112 listsubrepos=False,
3110 listsubrepos=False,
3113 ):
3111 ):
3114 '''a convenience method that calls node1.status(node2)'''
3112 '''a convenience method that calls node1.status(node2)'''
3115 return self[node1].status(
3113 return self[node1].status(
3116 node2, match, ignored, clean, unknown, listsubrepos
3114 node2, match, ignored, clean, unknown, listsubrepos
3117 )
3115 )
3118
3116
3119 def addpostdsstatus(self, ps):
3117 def addpostdsstatus(self, ps):
3120 """Add a callback to run within the wlock, at the point at which status
3118 """Add a callback to run within the wlock, at the point at which status
3121 fixups happen.
3119 fixups happen.
3122
3120
3123 On status completion, callback(wctx, status) will be called with the
3121 On status completion, callback(wctx, status) will be called with the
3124 wlock held, unless the dirstate has changed from underneath or the wlock
3122 wlock held, unless the dirstate has changed from underneath or the wlock
3125 couldn't be grabbed.
3123 couldn't be grabbed.
3126
3124
3127 Callbacks should not capture and use a cached copy of the dirstate --
3125 Callbacks should not capture and use a cached copy of the dirstate --
3128 it might change in the meanwhile. Instead, they should access the
3126 it might change in the meanwhile. Instead, they should access the
3129 dirstate via wctx.repo().dirstate.
3127 dirstate via wctx.repo().dirstate.
3130
3128
3131 This list is emptied out after each status run -- extensions should
3129 This list is emptied out after each status run -- extensions should
3132 make sure it adds to this list each time dirstate.status is called.
3130 make sure it adds to this list each time dirstate.status is called.
3133 Extensions should also make sure they don't call this for statuses
3131 Extensions should also make sure they don't call this for statuses
3134 that don't involve the dirstate.
3132 that don't involve the dirstate.
3135 """
3133 """
3136
3134
3137 # The list is located here for uniqueness reasons -- it is actually
3135 # The list is located here for uniqueness reasons -- it is actually
3138 # managed by the workingctx, but that isn't unique per-repo.
3136 # managed by the workingctx, but that isn't unique per-repo.
3139 self._postdsstatus.append(ps)
3137 self._postdsstatus.append(ps)
3140
3138
3141 def postdsstatus(self):
3139 def postdsstatus(self):
3142 """Used by workingctx to get the list of post-dirstate-status hooks."""
3140 """Used by workingctx to get the list of post-dirstate-status hooks."""
3143 return self._postdsstatus
3141 return self._postdsstatus
3144
3142
3145 def clearpostdsstatus(self):
3143 def clearpostdsstatus(self):
3146 """Used by workingctx to clear post-dirstate-status hooks."""
3144 """Used by workingctx to clear post-dirstate-status hooks."""
3147 del self._postdsstatus[:]
3145 del self._postdsstatus[:]
3148
3146
3149 def heads(self, start=None):
3147 def heads(self, start=None):
3150 if start is None:
3148 if start is None:
3151 cl = self.changelog
3149 cl = self.changelog
3152 headrevs = reversed(cl.headrevs())
3150 headrevs = reversed(cl.headrevs())
3153 return [cl.node(rev) for rev in headrevs]
3151 return [cl.node(rev) for rev in headrevs]
3154
3152
3155 heads = self.changelog.heads(start)
3153 heads = self.changelog.heads(start)
3156 # sort the output in rev descending order
3154 # sort the output in rev descending order
3157 return sorted(heads, key=self.changelog.rev, reverse=True)
3155 return sorted(heads, key=self.changelog.rev, reverse=True)
3158
3156
3159 def branchheads(self, branch=None, start=None, closed=False):
3157 def branchheads(self, branch=None, start=None, closed=False):
3160 """return a (possibly filtered) list of heads for the given branch
3158 """return a (possibly filtered) list of heads for the given branch
3161
3159
3162 Heads are returned in topological order, from newest to oldest.
3160 Heads are returned in topological order, from newest to oldest.
3163 If branch is None, use the dirstate branch.
3161 If branch is None, use the dirstate branch.
3164 If start is not None, return only heads reachable from start.
3162 If start is not None, return only heads reachable from start.
3165 If closed is True, return heads that are marked as closed as well.
3163 If closed is True, return heads that are marked as closed as well.
3166 """
3164 """
3167 if branch is None:
3165 if branch is None:
3168 branch = self[None].branch()
3166 branch = self[None].branch()
3169 branches = self.branchmap()
3167 branches = self.branchmap()
3170 if not branches.hasbranch(branch):
3168 if not branches.hasbranch(branch):
3171 return []
3169 return []
3172 # the cache returns heads ordered lowest to highest
3170 # the cache returns heads ordered lowest to highest
3173 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3171 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3174 if start is not None:
3172 if start is not None:
3175 # filter out the heads that cannot be reached from startrev
3173 # filter out the heads that cannot be reached from startrev
3176 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3174 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3177 bheads = [h for h in bheads if h in fbheads]
3175 bheads = [h for h in bheads if h in fbheads]
3178 return bheads
3176 return bheads
3179
3177
3180 def branches(self, nodes):
3178 def branches(self, nodes):
3181 if not nodes:
3179 if not nodes:
3182 nodes = [self.changelog.tip()]
3180 nodes = [self.changelog.tip()]
3183 b = []
3181 b = []
3184 for n in nodes:
3182 for n in nodes:
3185 t = n
3183 t = n
3186 while True:
3184 while True:
3187 p = self.changelog.parents(n)
3185 p = self.changelog.parents(n)
3188 if p[1] != nullid or p[0] == nullid:
3186 if p[1] != nullid or p[0] == nullid:
3189 b.append((t, n, p[0], p[1]))
3187 b.append((t, n, p[0], p[1]))
3190 break
3188 break
3191 n = p[0]
3189 n = p[0]
3192 return b
3190 return b
3193
3191
3194 def between(self, pairs):
3192 def between(self, pairs):
3195 r = []
3193 r = []
3196
3194
3197 for top, bottom in pairs:
3195 for top, bottom in pairs:
3198 n, l, i = top, [], 0
3196 n, l, i = top, [], 0
3199 f = 1
3197 f = 1
3200
3198
3201 while n != bottom and n != nullid:
3199 while n != bottom and n != nullid:
3202 p = self.changelog.parents(n)[0]
3200 p = self.changelog.parents(n)[0]
3203 if i == f:
3201 if i == f:
3204 l.append(n)
3202 l.append(n)
3205 f = f * 2
3203 f = f * 2
3206 n = p
3204 n = p
3207 i += 1
3205 i += 1
3208
3206
3209 r.append(l)
3207 r.append(l)
3210
3208
3211 return r
3209 return r
3212
3210
3213 def checkpush(self, pushop):
3211 def checkpush(self, pushop):
3214 """Extensions can override this function if additional checks have
3212 """Extensions can override this function if additional checks have
3215 to be performed before pushing, or call it if they override push
3213 to be performed before pushing, or call it if they override push
3216 command.
3214 command.
3217 """
3215 """
3218
3216
3219 @unfilteredpropertycache
3217 @unfilteredpropertycache
3220 def prepushoutgoinghooks(self):
3218 def prepushoutgoinghooks(self):
3221 """Return util.hooks consists of a pushop with repo, remote, outgoing
3219 """Return util.hooks consists of a pushop with repo, remote, outgoing
3222 methods, which are called before pushing changesets.
3220 methods, which are called before pushing changesets.
3223 """
3221 """
3224 return util.hooks()
3222 return util.hooks()
3225
3223
3226 def pushkey(self, namespace, key, old, new):
3224 def pushkey(self, namespace, key, old, new):
3227 try:
3225 try:
3228 tr = self.currenttransaction()
3226 tr = self.currenttransaction()
3229 hookargs = {}
3227 hookargs = {}
3230 if tr is not None:
3228 if tr is not None:
3231 hookargs.update(tr.hookargs)
3229 hookargs.update(tr.hookargs)
3232 hookargs = pycompat.strkwargs(hookargs)
3230 hookargs = pycompat.strkwargs(hookargs)
3233 hookargs['namespace'] = namespace
3231 hookargs['namespace'] = namespace
3234 hookargs['key'] = key
3232 hookargs['key'] = key
3235 hookargs['old'] = old
3233 hookargs['old'] = old
3236 hookargs['new'] = new
3234 hookargs['new'] = new
3237 self.hook(b'prepushkey', throw=True, **hookargs)
3235 self.hook(b'prepushkey', throw=True, **hookargs)
3238 except error.HookAbort as exc:
3236 except error.HookAbort as exc:
3239 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3237 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3240 if exc.hint:
3238 if exc.hint:
3241 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3239 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3242 return False
3240 return False
3243 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3241 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3244 ret = pushkey.push(self, namespace, key, old, new)
3242 ret = pushkey.push(self, namespace, key, old, new)
3245
3243
3246 def runhook(unused_success):
3244 def runhook(unused_success):
3247 self.hook(
3245 self.hook(
3248 b'pushkey',
3246 b'pushkey',
3249 namespace=namespace,
3247 namespace=namespace,
3250 key=key,
3248 key=key,
3251 old=old,
3249 old=old,
3252 new=new,
3250 new=new,
3253 ret=ret,
3251 ret=ret,
3254 )
3252 )
3255
3253
3256 self._afterlock(runhook)
3254 self._afterlock(runhook)
3257 return ret
3255 return ret
3258
3256
3259 def listkeys(self, namespace):
3257 def listkeys(self, namespace):
3260 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3258 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3261 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3259 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3262 values = pushkey.list(self, namespace)
3260 values = pushkey.list(self, namespace)
3263 self.hook(b'listkeys', namespace=namespace, values=values)
3261 self.hook(b'listkeys', namespace=namespace, values=values)
3264 return values
3262 return values
3265
3263
3266 def debugwireargs(self, one, two, three=None, four=None, five=None):
3264 def debugwireargs(self, one, two, three=None, four=None, five=None):
3267 '''used to test argument passing over the wire'''
3265 '''used to test argument passing over the wire'''
3268 return b"%s %s %s %s %s" % (
3266 return b"%s %s %s %s %s" % (
3269 one,
3267 one,
3270 two,
3268 two,
3271 pycompat.bytestr(three),
3269 pycompat.bytestr(three),
3272 pycompat.bytestr(four),
3270 pycompat.bytestr(four),
3273 pycompat.bytestr(five),
3271 pycompat.bytestr(five),
3274 )
3272 )
3275
3273
3276 def savecommitmessage(self, text):
3274 def savecommitmessage(self, text):
3277 fp = self.vfs(b'last-message.txt', b'wb')
3275 fp = self.vfs(b'last-message.txt', b'wb')
3278 try:
3276 try:
3279 fp.write(text)
3277 fp.write(text)
3280 finally:
3278 finally:
3281 fp.close()
3279 fp.close()
3282 return self.pathto(fp.name[len(self.root) + 1 :])
3280 return self.pathto(fp.name[len(self.root) + 1 :])
3283
3281
3284
3282
3285 # used to avoid circular references so destructors work
3283 # used to avoid circular references so destructors work
3286 def aftertrans(files):
3284 def aftertrans(files):
3287 renamefiles = [tuple(t) for t in files]
3285 renamefiles = [tuple(t) for t in files]
3288
3286
3289 def a():
3287 def a():
3290 for vfs, src, dest in renamefiles:
3288 for vfs, src, dest in renamefiles:
3291 # if src and dest refer to a same file, vfs.rename is a no-op,
3289 # if src and dest refer to a same file, vfs.rename is a no-op,
3292 # leaving both src and dest on disk. delete dest to make sure
3290 # leaving both src and dest on disk. delete dest to make sure
3293 # the rename couldn't be such a no-op.
3291 # the rename couldn't be such a no-op.
3294 vfs.tryunlink(dest)
3292 vfs.tryunlink(dest)
3295 try:
3293 try:
3296 vfs.rename(src, dest)
3294 vfs.rename(src, dest)
3297 except OSError: # journal file does not yet exist
3295 except OSError: # journal file does not yet exist
3298 pass
3296 pass
3299
3297
3300 return a
3298 return a
3301
3299
3302
3300
3303 def undoname(fn):
3301 def undoname(fn):
3304 base, name = os.path.split(fn)
3302 base, name = os.path.split(fn)
3305 assert name.startswith(b'journal')
3303 assert name.startswith(b'journal')
3306 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3304 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3307
3305
3308
3306
3309 def instance(ui, path, create, intents=None, createopts=None):
3307 def instance(ui, path, create, intents=None, createopts=None):
3310 localpath = util.urllocalpath(path)
3308 localpath = util.urllocalpath(path)
3311 if create:
3309 if create:
3312 createrepository(ui, localpath, createopts=createopts)
3310 createrepository(ui, localpath, createopts=createopts)
3313
3311
3314 return makelocalrepository(ui, localpath, intents=intents)
3312 return makelocalrepository(ui, localpath, intents=intents)
3315
3313
3316
3314
3317 def islocal(path):
3315 def islocal(path):
3318 return True
3316 return True
3319
3317
3320
3318
3321 def defaultcreateopts(ui, createopts=None):
3319 def defaultcreateopts(ui, createopts=None):
3322 """Populate the default creation options for a repository.
3320 """Populate the default creation options for a repository.
3323
3321
3324 A dictionary of explicitly requested creation options can be passed
3322 A dictionary of explicitly requested creation options can be passed
3325 in. Missing keys will be populated.
3323 in. Missing keys will be populated.
3326 """
3324 """
3327 createopts = dict(createopts or {})
3325 createopts = dict(createopts or {})
3328
3326
3329 if b'backend' not in createopts:
3327 if b'backend' not in createopts:
3330 # experimental config: storage.new-repo-backend
3328 # experimental config: storage.new-repo-backend
3331 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3329 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3332
3330
3333 return createopts
3331 return createopts
3334
3332
3335
3333
3336 def newreporequirements(ui, createopts):
3334 def newreporequirements(ui, createopts):
3337 """Determine the set of requirements for a new local repository.
3335 """Determine the set of requirements for a new local repository.
3338
3336
3339 Extensions can wrap this function to specify custom requirements for
3337 Extensions can wrap this function to specify custom requirements for
3340 new repositories.
3338 new repositories.
3341 """
3339 """
3342 # If the repo is being created from a shared repository, we copy
3340 # If the repo is being created from a shared repository, we copy
3343 # its requirements.
3341 # its requirements.
3344 if b'sharedrepo' in createopts:
3342 if b'sharedrepo' in createopts:
3345 requirements = set(createopts[b'sharedrepo'].requirements)
3343 requirements = set(createopts[b'sharedrepo'].requirements)
3346 if createopts.get(b'sharedrelative'):
3344 if createopts.get(b'sharedrelative'):
3347 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3345 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3348 else:
3346 else:
3349 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3347 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3350
3348
3351 return requirements
3349 return requirements
3352
3350
3353 if b'backend' not in createopts:
3351 if b'backend' not in createopts:
3354 raise error.ProgrammingError(
3352 raise error.ProgrammingError(
3355 b'backend key not present in createopts; '
3353 b'backend key not present in createopts; '
3356 b'was defaultcreateopts() called?'
3354 b'was defaultcreateopts() called?'
3357 )
3355 )
3358
3356
3359 if createopts[b'backend'] != b'revlogv1':
3357 if createopts[b'backend'] != b'revlogv1':
3360 raise error.Abort(
3358 raise error.Abort(
3361 _(
3359 _(
3362 b'unable to determine repository requirements for '
3360 b'unable to determine repository requirements for '
3363 b'storage backend: %s'
3361 b'storage backend: %s'
3364 )
3362 )
3365 % createopts[b'backend']
3363 % createopts[b'backend']
3366 )
3364 )
3367
3365
3368 requirements = {b'revlogv1'}
3366 requirements = {b'revlogv1'}
3369 if ui.configbool(b'format', b'usestore'):
3367 if ui.configbool(b'format', b'usestore'):
3370 requirements.add(b'store')
3368 requirements.add(b'store')
3371 if ui.configbool(b'format', b'usefncache'):
3369 if ui.configbool(b'format', b'usefncache'):
3372 requirements.add(b'fncache')
3370 requirements.add(b'fncache')
3373 if ui.configbool(b'format', b'dotencode'):
3371 if ui.configbool(b'format', b'dotencode'):
3374 requirements.add(b'dotencode')
3372 requirements.add(b'dotencode')
3375
3373
3376 compengines = ui.configlist(b'format', b'revlog-compression')
3374 compengines = ui.configlist(b'format', b'revlog-compression')
3377 for compengine in compengines:
3375 for compengine in compengines:
3378 if compengine in util.compengines:
3376 if compengine in util.compengines:
3379 break
3377 break
3380 else:
3378 else:
3381 raise error.Abort(
3379 raise error.Abort(
3382 _(
3380 _(
3383 b'compression engines %s defined by '
3381 b'compression engines %s defined by '
3384 b'format.revlog-compression not available'
3382 b'format.revlog-compression not available'
3385 )
3383 )
3386 % b', '.join(b'"%s"' % e for e in compengines),
3384 % b', '.join(b'"%s"' % e for e in compengines),
3387 hint=_(
3385 hint=_(
3388 b'run "hg debuginstall" to list available '
3386 b'run "hg debuginstall" to list available '
3389 b'compression engines'
3387 b'compression engines'
3390 ),
3388 ),
3391 )
3389 )
3392
3390
3393 # zlib is the historical default and doesn't need an explicit requirement.
3391 # zlib is the historical default and doesn't need an explicit requirement.
3394 if compengine == b'zstd':
3392 if compengine == b'zstd':
3395 requirements.add(b'revlog-compression-zstd')
3393 requirements.add(b'revlog-compression-zstd')
3396 elif compengine != b'zlib':
3394 elif compengine != b'zlib':
3397 requirements.add(b'exp-compression-%s' % compengine)
3395 requirements.add(b'exp-compression-%s' % compengine)
3398
3396
3399 if scmutil.gdinitconfig(ui):
3397 if scmutil.gdinitconfig(ui):
3400 requirements.add(b'generaldelta')
3398 requirements.add(b'generaldelta')
3401 if ui.configbool(b'format', b'sparse-revlog'):
3399 if ui.configbool(b'format', b'sparse-revlog'):
3402 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3400 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3403
3401
3404 # experimental config: format.exp-use-side-data
3402 # experimental config: format.exp-use-side-data
3405 if ui.configbool(b'format', b'exp-use-side-data'):
3403 if ui.configbool(b'format', b'exp-use-side-data'):
3406 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3404 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3407 # experimental config: format.exp-use-copies-side-data-changeset
3405 # experimental config: format.exp-use-copies-side-data-changeset
3408 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3406 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3409 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3407 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3410 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3408 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3411 if ui.configbool(b'experimental', b'treemanifest'):
3409 if ui.configbool(b'experimental', b'treemanifest'):
3412 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3410 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3413
3411
3414 revlogv2 = ui.config(b'experimental', b'revlogv2')
3412 revlogv2 = ui.config(b'experimental', b'revlogv2')
3415 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3413 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3416 requirements.remove(b'revlogv1')
3414 requirements.remove(b'revlogv1')
3417 # generaldelta is implied by revlogv2.
3415 # generaldelta is implied by revlogv2.
3418 requirements.discard(b'generaldelta')
3416 requirements.discard(b'generaldelta')
3419 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3417 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3420 # experimental config: format.internal-phase
3418 # experimental config: format.internal-phase
3421 if ui.configbool(b'format', b'internal-phase'):
3419 if ui.configbool(b'format', b'internal-phase'):
3422 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3420 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3423
3421
3424 if createopts.get(b'narrowfiles'):
3422 if createopts.get(b'narrowfiles'):
3425 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3423 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3426
3424
3427 if createopts.get(b'lfs'):
3425 if createopts.get(b'lfs'):
3428 requirements.add(b'lfs')
3426 requirements.add(b'lfs')
3429
3427
3430 if ui.configbool(b'format', b'bookmarks-in-store'):
3428 if ui.configbool(b'format', b'bookmarks-in-store'):
3431 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3429 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3432
3430
3433 if ui.configbool(b'format', b'use-persistent-nodemap'):
3431 if ui.configbool(b'format', b'use-persistent-nodemap'):
3434 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3432 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3435
3433
3436 # if share-safe is enabled, let's create the new repository with the new
3434 # if share-safe is enabled, let's create the new repository with the new
3437 # requirement
3435 # requirement
3438 if ui.configbool(b'format', b'exp-share-safe'):
3436 if ui.configbool(b'format', b'exp-share-safe'):
3439 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3437 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3440
3438
3441 return requirements
3439 return requirements
3442
3440
3443
3441
3444 def checkrequirementscompat(ui, requirements):
3442 def checkrequirementscompat(ui, requirements):
3445 """Checks compatibility of repository requirements enabled and disabled.
3443 """Checks compatibility of repository requirements enabled and disabled.
3446
3444
3447 Returns a set of requirements which needs to be dropped because dependend
3445 Returns a set of requirements which needs to be dropped because dependend
3448 requirements are not enabled. Also warns users about it"""
3446 requirements are not enabled. Also warns users about it"""
3449
3447
3450 dropped = set()
3448 dropped = set()
3451
3449
3452 if b'store' not in requirements:
3450 if b'store' not in requirements:
3453 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3451 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3454 ui.warn(
3452 ui.warn(
3455 _(
3453 _(
3456 b'ignoring enabled \'format.bookmarks-in-store\' config '
3454 b'ignoring enabled \'format.bookmarks-in-store\' config '
3457 b'beacuse it is incompatible with disabled '
3455 b'beacuse it is incompatible with disabled '
3458 b'\'format.usestore\' config\n'
3456 b'\'format.usestore\' config\n'
3459 )
3457 )
3460 )
3458 )
3461 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3459 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3462
3460
3463 if (
3461 if (
3464 requirementsmod.SHARED_REQUIREMENT in requirements
3462 requirementsmod.SHARED_REQUIREMENT in requirements
3465 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3463 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3466 ):
3464 ):
3467 raise error.Abort(
3465 raise error.Abort(
3468 _(
3466 _(
3469 b"cannot create shared repository as source was created"
3467 b"cannot create shared repository as source was created"
3470 b" with 'format.usestore' config disabled"
3468 b" with 'format.usestore' config disabled"
3471 )
3469 )
3472 )
3470 )
3473
3471
3474 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3472 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3475 ui.warn(
3473 ui.warn(
3476 _(
3474 _(
3477 b"ignoring enabled 'format.exp-share-safe' config because "
3475 b"ignoring enabled 'format.exp-share-safe' config because "
3478 b"it is incompatible with disabled 'format.usestore'"
3476 b"it is incompatible with disabled 'format.usestore'"
3479 b" config\n"
3477 b" config\n"
3480 )
3478 )
3481 )
3479 )
3482 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3480 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3483
3481
3484 return dropped
3482 return dropped
3485
3483
3486
3484
3487 def filterknowncreateopts(ui, createopts):
3485 def filterknowncreateopts(ui, createopts):
3488 """Filters a dict of repo creation options against options that are known.
3486 """Filters a dict of repo creation options against options that are known.
3489
3487
3490 Receives a dict of repo creation options and returns a dict of those
3488 Receives a dict of repo creation options and returns a dict of those
3491 options that we don't know how to handle.
3489 options that we don't know how to handle.
3492
3490
3493 This function is called as part of repository creation. If the
3491 This function is called as part of repository creation. If the
3494 returned dict contains any items, repository creation will not
3492 returned dict contains any items, repository creation will not
3495 be allowed, as it means there was a request to create a repository
3493 be allowed, as it means there was a request to create a repository
3496 with options not recognized by loaded code.
3494 with options not recognized by loaded code.
3497
3495
3498 Extensions can wrap this function to filter out creation options
3496 Extensions can wrap this function to filter out creation options
3499 they know how to handle.
3497 they know how to handle.
3500 """
3498 """
3501 known = {
3499 known = {
3502 b'backend',
3500 b'backend',
3503 b'lfs',
3501 b'lfs',
3504 b'narrowfiles',
3502 b'narrowfiles',
3505 b'sharedrepo',
3503 b'sharedrepo',
3506 b'sharedrelative',
3504 b'sharedrelative',
3507 b'shareditems',
3505 b'shareditems',
3508 b'shallowfilestore',
3506 b'shallowfilestore',
3509 }
3507 }
3510
3508
3511 return {k: v for k, v in createopts.items() if k not in known}
3509 return {k: v for k, v in createopts.items() if k not in known}
3512
3510
3513
3511
3514 def createrepository(ui, path, createopts=None):
3512 def createrepository(ui, path, createopts=None):
3515 """Create a new repository in a vfs.
3513 """Create a new repository in a vfs.
3516
3514
3517 ``path`` path to the new repo's working directory.
3515 ``path`` path to the new repo's working directory.
3518 ``createopts`` options for the new repository.
3516 ``createopts`` options for the new repository.
3519
3517
3520 The following keys for ``createopts`` are recognized:
3518 The following keys for ``createopts`` are recognized:
3521
3519
3522 backend
3520 backend
3523 The storage backend to use.
3521 The storage backend to use.
3524 lfs
3522 lfs
3525 Repository will be created with ``lfs`` requirement. The lfs extension
3523 Repository will be created with ``lfs`` requirement. The lfs extension
3526 will automatically be loaded when the repository is accessed.
3524 will automatically be loaded when the repository is accessed.
3527 narrowfiles
3525 narrowfiles
3528 Set up repository to support narrow file storage.
3526 Set up repository to support narrow file storage.
3529 sharedrepo
3527 sharedrepo
3530 Repository object from which storage should be shared.
3528 Repository object from which storage should be shared.
3531 sharedrelative
3529 sharedrelative
3532 Boolean indicating if the path to the shared repo should be
3530 Boolean indicating if the path to the shared repo should be
3533 stored as relative. By default, the pointer to the "parent" repo
3531 stored as relative. By default, the pointer to the "parent" repo
3534 is stored as an absolute path.
3532 is stored as an absolute path.
3535 shareditems
3533 shareditems
3536 Set of items to share to the new repository (in addition to storage).
3534 Set of items to share to the new repository (in addition to storage).
3537 shallowfilestore
3535 shallowfilestore
3538 Indicates that storage for files should be shallow (not all ancestor
3536 Indicates that storage for files should be shallow (not all ancestor
3539 revisions are known).
3537 revisions are known).
3540 """
3538 """
3541 createopts = defaultcreateopts(ui, createopts=createopts)
3539 createopts = defaultcreateopts(ui, createopts=createopts)
3542
3540
3543 unknownopts = filterknowncreateopts(ui, createopts)
3541 unknownopts = filterknowncreateopts(ui, createopts)
3544
3542
3545 if not isinstance(unknownopts, dict):
3543 if not isinstance(unknownopts, dict):
3546 raise error.ProgrammingError(
3544 raise error.ProgrammingError(
3547 b'filterknowncreateopts() did not return a dict'
3545 b'filterknowncreateopts() did not return a dict'
3548 )
3546 )
3549
3547
3550 if unknownopts:
3548 if unknownopts:
3551 raise error.Abort(
3549 raise error.Abort(
3552 _(
3550 _(
3553 b'unable to create repository because of unknown '
3551 b'unable to create repository because of unknown '
3554 b'creation option: %s'
3552 b'creation option: %s'
3555 )
3553 )
3556 % b', '.join(sorted(unknownopts)),
3554 % b', '.join(sorted(unknownopts)),
3557 hint=_(b'is a required extension not loaded?'),
3555 hint=_(b'is a required extension not loaded?'),
3558 )
3556 )
3559
3557
3560 requirements = newreporequirements(ui, createopts=createopts)
3558 requirements = newreporequirements(ui, createopts=createopts)
3561 requirements -= checkrequirementscompat(ui, requirements)
3559 requirements -= checkrequirementscompat(ui, requirements)
3562
3560
3563 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3561 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3564
3562
3565 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3563 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3566 if hgvfs.exists():
3564 if hgvfs.exists():
3567 raise error.RepoError(_(b'repository %s already exists') % path)
3565 raise error.RepoError(_(b'repository %s already exists') % path)
3568
3566
3569 if b'sharedrepo' in createopts:
3567 if b'sharedrepo' in createopts:
3570 sharedpath = createopts[b'sharedrepo'].sharedpath
3568 sharedpath = createopts[b'sharedrepo'].sharedpath
3571
3569
3572 if createopts.get(b'sharedrelative'):
3570 if createopts.get(b'sharedrelative'):
3573 try:
3571 try:
3574 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3572 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3575 except (IOError, ValueError) as e:
3573 except (IOError, ValueError) as e:
3576 # ValueError is raised on Windows if the drive letters differ
3574 # ValueError is raised on Windows if the drive letters differ
3577 # on each path.
3575 # on each path.
3578 raise error.Abort(
3576 raise error.Abort(
3579 _(b'cannot calculate relative path'),
3577 _(b'cannot calculate relative path'),
3580 hint=stringutil.forcebytestr(e),
3578 hint=stringutil.forcebytestr(e),
3581 )
3579 )
3582
3580
3583 if not wdirvfs.exists():
3581 if not wdirvfs.exists():
3584 wdirvfs.makedirs()
3582 wdirvfs.makedirs()
3585
3583
3586 hgvfs.makedir(notindexed=True)
3584 hgvfs.makedir(notindexed=True)
3587 if b'sharedrepo' not in createopts:
3585 if b'sharedrepo' not in createopts:
3588 hgvfs.mkdir(b'cache')
3586 hgvfs.mkdir(b'cache')
3589 hgvfs.mkdir(b'wcache')
3587 hgvfs.mkdir(b'wcache')
3590
3588
3591 if b'store' in requirements and b'sharedrepo' not in createopts:
3589 if b'store' in requirements and b'sharedrepo' not in createopts:
3592 hgvfs.mkdir(b'store')
3590 hgvfs.mkdir(b'store')
3593
3591
3594 # We create an invalid changelog outside the store so very old
3592 # We create an invalid changelog outside the store so very old
3595 # Mercurial versions (which didn't know about the requirements
3593 # Mercurial versions (which didn't know about the requirements
3596 # file) encounter an error on reading the changelog. This
3594 # file) encounter an error on reading the changelog. This
3597 # effectively locks out old clients and prevents them from
3595 # effectively locks out old clients and prevents them from
3598 # mucking with a repo in an unknown format.
3596 # mucking with a repo in an unknown format.
3599 #
3597 #
3600 # The revlog header has version 2, which won't be recognized by
3598 # The revlog header has version 2, which won't be recognized by
3601 # such old clients.
3599 # such old clients.
3602 hgvfs.append(
3600 hgvfs.append(
3603 b'00changelog.i',
3601 b'00changelog.i',
3604 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3602 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3605 b'layout',
3603 b'layout',
3606 )
3604 )
3607
3605
3608 # Filter the requirements into working copy and store ones
3606 # Filter the requirements into working copy and store ones
3609 wcreq, storereq = scmutil.filterrequirements(requirements)
3607 wcreq, storereq = scmutil.filterrequirements(requirements)
3610 # write working copy ones
3608 # write working copy ones
3611 scmutil.writerequires(hgvfs, wcreq)
3609 scmutil.writerequires(hgvfs, wcreq)
3612 # If there are store requirements and the current repository
3610 # If there are store requirements and the current repository
3613 # is not a shared one, write stored requirements
3611 # is not a shared one, write stored requirements
3614 # For new shared repository, we don't need to write the store
3612 # For new shared repository, we don't need to write the store
3615 # requirements as they are already present in store requires
3613 # requirements as they are already present in store requires
3616 if storereq and b'sharedrepo' not in createopts:
3614 if storereq and b'sharedrepo' not in createopts:
3617 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3615 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3618 scmutil.writerequires(storevfs, storereq)
3616 scmutil.writerequires(storevfs, storereq)
3619
3617
3620 # Write out file telling readers where to find the shared store.
3618 # Write out file telling readers where to find the shared store.
3621 if b'sharedrepo' in createopts:
3619 if b'sharedrepo' in createopts:
3622 hgvfs.write(b'sharedpath', sharedpath)
3620 hgvfs.write(b'sharedpath', sharedpath)
3623
3621
3624 if createopts.get(b'shareditems'):
3622 if createopts.get(b'shareditems'):
3625 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3623 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3626 hgvfs.write(b'shared', shared)
3624 hgvfs.write(b'shared', shared)
3627
3625
3628
3626
3629 def poisonrepository(repo):
3627 def poisonrepository(repo):
3630 """Poison a repository instance so it can no longer be used."""
3628 """Poison a repository instance so it can no longer be used."""
3631 # Perform any cleanup on the instance.
3629 # Perform any cleanup on the instance.
3632 repo.close()
3630 repo.close()
3633
3631
3634 # Our strategy is to replace the type of the object with one that
3632 # Our strategy is to replace the type of the object with one that
3635 # has all attribute lookups result in error.
3633 # has all attribute lookups result in error.
3636 #
3634 #
3637 # But we have to allow the close() method because some constructors
3635 # But we have to allow the close() method because some constructors
3638 # of repos call close() on repo references.
3636 # of repos call close() on repo references.
3639 class poisonedrepository(object):
3637 class poisonedrepository(object):
3640 def __getattribute__(self, item):
3638 def __getattribute__(self, item):
3641 if item == 'close':
3639 if item == 'close':
3642 return object.__getattribute__(self, item)
3640 return object.__getattribute__(self, item)
3643
3641
3644 raise error.ProgrammingError(
3642 raise error.ProgrammingError(
3645 b'repo instances should not be used after unshare'
3643 b'repo instances should not be used after unshare'
3646 )
3644 )
3647
3645
3648 def close(self):
3646 def close(self):
3649 pass
3647 pass
3650
3648
3651 # We may have a repoview, which intercepts __setattr__. So be sure
3649 # We may have a repoview, which intercepts __setattr__. So be sure
3652 # we operate at the lowest level possible.
3650 # we operate at the lowest level possible.
3653 object.__setattr__(repo, '__class__', poisonedrepository)
3651 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,644 +1,636 b''
1 # nodemap.py - nodemap related code and utilities
1 # nodemap.py - nodemap related code and utilities
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14 import struct
14 import struct
15
15
16 from ..i18n import _
17 from ..node import hex
16 from ..node import hex
18
17
19 from .. import (
18 from .. import (
20 error,
19 error,
21 util,
20 util,
22 )
21 )
23
22
24
23
25 class NodeMap(dict):
24 class NodeMap(dict):
26 def __missing__(self, x):
25 def __missing__(self, x):
27 raise error.RevlogError(b'unknown node: %s' % x)
26 raise error.RevlogError(b'unknown node: %s' % x)
28
27
29
28
30 def persisted_data(revlog):
29 def persisted_data(revlog):
31 """read the nodemap for a revlog from disk"""
30 """read the nodemap for a revlog from disk"""
32 if revlog.nodemap_file is None:
31 if revlog.nodemap_file is None:
33 return None
32 return None
34 pdata = revlog.opener.tryread(revlog.nodemap_file)
33 pdata = revlog.opener.tryread(revlog.nodemap_file)
35 if not pdata:
34 if not pdata:
36 return None
35 return None
37 offset = 0
36 offset = 0
38 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
37 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
39 if version != ONDISK_VERSION:
38 if version != ONDISK_VERSION:
40 return None
39 return None
41 offset += S_VERSION.size
40 offset += S_VERSION.size
42 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
41 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
43 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
42 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
44 offset += S_HEADER.size
43 offset += S_HEADER.size
45 docket = NodeMapDocket(pdata[offset : offset + uid_size])
44 docket = NodeMapDocket(pdata[offset : offset + uid_size])
46 offset += uid_size
45 offset += uid_size
47 docket.tip_rev = tip_rev
46 docket.tip_rev = tip_rev
48 docket.tip_node = pdata[offset : offset + tip_node_size]
47 docket.tip_node = pdata[offset : offset + tip_node_size]
49 docket.data_length = data_length
48 docket.data_length = data_length
50 docket.data_unused = data_unused
49 docket.data_unused = data_unused
51
50
52 filename = _rawdata_filepath(revlog, docket)
51 filename = _rawdata_filepath(revlog, docket)
53 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
52 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
54 try:
53 try:
55 with revlog.opener(filename) as fd:
54 with revlog.opener(filename) as fd:
56 if use_mmap:
55 if use_mmap:
57 data = util.buffer(util.mmapread(fd, data_length))
56 data = util.buffer(util.mmapread(fd, data_length))
58 else:
57 else:
59 data = fd.read(data_length)
58 data = fd.read(data_length)
60 except OSError as e:
59 except OSError as e:
61 if e.errno == errno.ENOENT:
60 if e.errno == errno.ENOENT:
62 return None
61 return None
63 else:
62 else:
64 raise
63 raise
65 if len(data) < data_length:
64 if len(data) < data_length:
66 return None
65 return None
67 return docket, data
66 return docket, data
68
67
69
68
70 def setup_persistent_nodemap(tr, revlog):
69 def setup_persistent_nodemap(tr, revlog):
71 """Install whatever is needed transaction side to persist a nodemap on disk
70 """Install whatever is needed transaction side to persist a nodemap on disk
72
71
73 (only actually persist the nodemap if this is relevant for this revlog)
72 (only actually persist the nodemap if this is relevant for this revlog)
74 """
73 """
75 if revlog._inline:
74 if revlog._inline:
76 return # inlined revlog are too small for this to be relevant
75 return # inlined revlog are too small for this to be relevant
77 if revlog.nodemap_file is None:
76 if revlog.nodemap_file is None:
78 return # we do not use persistent_nodemap on this revlog
77 return # we do not use persistent_nodemap on this revlog
79
78
80 # we need to happen after the changelog finalization, in that use "cl-"
79 # we need to happen after the changelog finalization, in that use "cl-"
81 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
80 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
82 if tr.hasfinalize(callback_id):
81 if tr.hasfinalize(callback_id):
83 return # no need to register again
82 return # no need to register again
84 tr.addpending(
83 tr.addpending(
85 callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True)
84 callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True)
86 )
85 )
87 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
86 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
88
87
89
88
90 class _NoTransaction(object):
89 class _NoTransaction(object):
91 """transaction like object to update the nodemap outside a transaction"""
90 """transaction like object to update the nodemap outside a transaction"""
92
91
93 def __init__(self):
92 def __init__(self):
94 self._postclose = {}
93 self._postclose = {}
95
94
96 def addpostclose(self, callback_id, callback_func):
95 def addpostclose(self, callback_id, callback_func):
97 self._postclose[callback_id] = callback_func
96 self._postclose[callback_id] = callback_func
98
97
99 def registertmp(self, *args, **kwargs):
98 def registertmp(self, *args, **kwargs):
100 pass
99 pass
101
100
102 def addbackup(self, *args, **kwargs):
101 def addbackup(self, *args, **kwargs):
103 pass
102 pass
104
103
105 def add(self, *args, **kwargs):
104 def add(self, *args, **kwargs):
106 pass
105 pass
107
106
108 def addabort(self, *args, **kwargs):
107 def addabort(self, *args, **kwargs):
109 pass
108 pass
110
109
111 def _report(self, *args):
110 def _report(self, *args):
112 pass
111 pass
113
112
114
113
115 def update_persistent_nodemap(revlog):
114 def update_persistent_nodemap(revlog):
116 """update the persistent nodemap right now
115 """update the persistent nodemap right now
117
116
118 To be used for updating the nodemap on disk outside of a normal transaction
117 To be used for updating the nodemap on disk outside of a normal transaction
119 setup (eg, `debugupdatecache`).
118 setup (eg, `debugupdatecache`).
120 """
119 """
121 if revlog._inline:
120 if revlog._inline:
122 return # inlined revlog are too small for this to be relevant
121 return # inlined revlog are too small for this to be relevant
123 if revlog.nodemap_file is None:
122 if revlog.nodemap_file is None:
124 return # we do not use persistent_nodemap on this revlog
123 return # we do not use persistent_nodemap on this revlog
125
124
126 notr = _NoTransaction()
125 notr = _NoTransaction()
127 _persist_nodemap(notr, revlog)
126 _persist_nodemap(notr, revlog)
128 for k in sorted(notr._postclose):
127 for k in sorted(notr._postclose):
129 notr._postclose[k](None)
128 notr._postclose[k](None)
130
129
131
130
132 def _persist_nodemap(tr, revlog, pending=False):
131 def _persist_nodemap(tr, revlog, pending=False):
133 """Write nodemap data on disk for a given revlog"""
132 """Write nodemap data on disk for a given revlog"""
134 if getattr(revlog, 'filteredrevs', ()):
133 if getattr(revlog, 'filteredrevs', ()):
135 raise error.ProgrammingError(
134 raise error.ProgrammingError(
136 "cannot persist nodemap of a filtered changelog"
135 "cannot persist nodemap of a filtered changelog"
137 )
136 )
138 if revlog.nodemap_file is None:
137 if revlog.nodemap_file is None:
139 msg = "calling persist nodemap on a revlog without the feature enableb"
138 msg = "calling persist nodemap on a revlog without the feature enableb"
140 raise error.ProgrammingError(msg)
139 raise error.ProgrammingError(msg)
141
140
142 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
141 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
143 ondisk_docket = revlog._nodemap_docket
142 ondisk_docket = revlog._nodemap_docket
144 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
143 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
145 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
144 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
146 mode = revlog.opener.options.get(b"persistent-nodemap.mode")
147 if not can_incremental:
148 msg = _(b"persistent nodemap in strict mode without efficient method")
149 if mode == b'warn':
150 tr._report(b"%s\n" % msg)
151 elif mode == b'strict':
152 raise error.Abort(msg)
153
145
154 data = None
146 data = None
155 # first attemp an incremental update of the data
147 # first attemp an incremental update of the data
156 if can_incremental and ondisk_docket is not None:
148 if can_incremental and ondisk_docket is not None:
157 target_docket = revlog._nodemap_docket.copy()
149 target_docket = revlog._nodemap_docket.copy()
158 (
150 (
159 src_docket,
151 src_docket,
160 data_changed_count,
152 data_changed_count,
161 data,
153 data,
162 ) = revlog.index.nodemap_data_incremental()
154 ) = revlog.index.nodemap_data_incremental()
163 new_length = target_docket.data_length + len(data)
155 new_length = target_docket.data_length + len(data)
164 new_unused = target_docket.data_unused + data_changed_count
156 new_unused = target_docket.data_unused + data_changed_count
165 if src_docket != target_docket:
157 if src_docket != target_docket:
166 data = None
158 data = None
167 elif new_length <= (new_unused * 10): # under 10% of unused data
159 elif new_length <= (new_unused * 10): # under 10% of unused data
168 data = None
160 data = None
169 else:
161 else:
170 datafile = _rawdata_filepath(revlog, target_docket)
162 datafile = _rawdata_filepath(revlog, target_docket)
171 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
163 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
172 # store vfs
164 # store vfs
173 tr.add(datafile, target_docket.data_length)
165 tr.add(datafile, target_docket.data_length)
174 with revlog.opener(datafile, b'r+') as fd:
166 with revlog.opener(datafile, b'r+') as fd:
175 fd.seek(target_docket.data_length)
167 fd.seek(target_docket.data_length)
176 fd.write(data)
168 fd.write(data)
177 if feed_data:
169 if feed_data:
178 if use_mmap:
170 if use_mmap:
179 fd.seek(0)
171 fd.seek(0)
180 new_data = fd.read(new_length)
172 new_data = fd.read(new_length)
181 else:
173 else:
182 fd.flush()
174 fd.flush()
183 new_data = util.buffer(util.mmapread(fd, new_length))
175 new_data = util.buffer(util.mmapread(fd, new_length))
184 target_docket.data_length = new_length
176 target_docket.data_length = new_length
185 target_docket.data_unused = new_unused
177 target_docket.data_unused = new_unused
186
178
187 if data is None:
179 if data is None:
188 # otherwise fallback to a full new export
180 # otherwise fallback to a full new export
189 target_docket = NodeMapDocket()
181 target_docket = NodeMapDocket()
190 datafile = _rawdata_filepath(revlog, target_docket)
182 datafile = _rawdata_filepath(revlog, target_docket)
191 if util.safehasattr(revlog.index, "nodemap_data_all"):
183 if util.safehasattr(revlog.index, "nodemap_data_all"):
192 data = revlog.index.nodemap_data_all()
184 data = revlog.index.nodemap_data_all()
193 else:
185 else:
194 data = persistent_data(revlog.index)
186 data = persistent_data(revlog.index)
195 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
187 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
196 # store vfs
188 # store vfs
197
189
198 tryunlink = revlog.opener.tryunlink
190 tryunlink = revlog.opener.tryunlink
199
191
200 def abortck(tr):
192 def abortck(tr):
201 tryunlink(datafile)
193 tryunlink(datafile)
202
194
203 callback_id = b"delete-%s" % datafile
195 callback_id = b"delete-%s" % datafile
204
196
205 # some flavor of the transaction abort does not cleanup new file, it
197 # some flavor of the transaction abort does not cleanup new file, it
206 # simply empty them.
198 # simply empty them.
207 tr.addabort(callback_id, abortck)
199 tr.addabort(callback_id, abortck)
208 with revlog.opener(datafile, b'w+') as fd:
200 with revlog.opener(datafile, b'w+') as fd:
209 fd.write(data)
201 fd.write(data)
210 if feed_data:
202 if feed_data:
211 if use_mmap:
203 if use_mmap:
212 new_data = data
204 new_data = data
213 else:
205 else:
214 fd.flush()
206 fd.flush()
215 new_data = util.buffer(util.mmapread(fd, len(data)))
207 new_data = util.buffer(util.mmapread(fd, len(data)))
216 target_docket.data_length = len(data)
208 target_docket.data_length = len(data)
217 target_docket.tip_rev = revlog.tiprev()
209 target_docket.tip_rev = revlog.tiprev()
218 target_docket.tip_node = revlog.node(target_docket.tip_rev)
210 target_docket.tip_node = revlog.node(target_docket.tip_rev)
219 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
211 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
220 # store vfs
212 # store vfs
221 file_path = revlog.nodemap_file
213 file_path = revlog.nodemap_file
222 if pending:
214 if pending:
223 file_path += b'.a'
215 file_path += b'.a'
224 tr.registertmp(file_path)
216 tr.registertmp(file_path)
225 else:
217 else:
226 tr.addbackup(file_path)
218 tr.addbackup(file_path)
227
219
228 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
220 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
229 fp.write(target_docket.serialize())
221 fp.write(target_docket.serialize())
230 revlog._nodemap_docket = target_docket
222 revlog._nodemap_docket = target_docket
231 if feed_data:
223 if feed_data:
232 revlog.index.update_nodemap_data(target_docket, new_data)
224 revlog.index.update_nodemap_data(target_docket, new_data)
233
225
234 # search for old index file in all cases, some older process might have
226 # search for old index file in all cases, some older process might have
235 # left one behind.
227 # left one behind.
236 olds = _other_rawdata_filepath(revlog, target_docket)
228 olds = _other_rawdata_filepath(revlog, target_docket)
237 if olds:
229 if olds:
238 realvfs = getattr(revlog, '_realopener', revlog.opener)
230 realvfs = getattr(revlog, '_realopener', revlog.opener)
239
231
240 def cleanup(tr):
232 def cleanup(tr):
241 for oldfile in olds:
233 for oldfile in olds:
242 realvfs.tryunlink(oldfile)
234 realvfs.tryunlink(oldfile)
243
235
244 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
236 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
245 tr.addpostclose(callback_id, cleanup)
237 tr.addpostclose(callback_id, cleanup)
246
238
247
239
248 ### Nodemap docket file
240 ### Nodemap docket file
249 #
241 #
250 # The nodemap data are stored on disk using 2 files:
242 # The nodemap data are stored on disk using 2 files:
251 #
243 #
252 # * a raw data files containing a persistent nodemap
244 # * a raw data files containing a persistent nodemap
253 # (see `Nodemap Trie` section)
245 # (see `Nodemap Trie` section)
254 #
246 #
255 # * a small "docket" file containing medatadata
247 # * a small "docket" file containing medatadata
256 #
248 #
257 # While the nodemap data can be multiple tens of megabytes, the "docket" is
249 # While the nodemap data can be multiple tens of megabytes, the "docket" is
258 # small, it is easy to update it automatically or to duplicated its content
250 # small, it is easy to update it automatically or to duplicated its content
259 # during a transaction.
251 # during a transaction.
260 #
252 #
261 # Multiple raw data can exist at the same time (The currently valid one and a
253 # Multiple raw data can exist at the same time (The currently valid one and a
262 # new one beind used by an in progress transaction). To accomodate this, the
254 # new one beind used by an in progress transaction). To accomodate this, the
263 # filename hosting the raw data has a variable parts. The exact filename is
255 # filename hosting the raw data has a variable parts. The exact filename is
264 # specified inside the "docket" file.
256 # specified inside the "docket" file.
265 #
257 #
266 # The docket file contains information to find, qualify and validate the raw
258 # The docket file contains information to find, qualify and validate the raw
267 # data. Its content is currently very light, but it will expand as the on disk
259 # data. Its content is currently very light, but it will expand as the on disk
268 # nodemap gains the necessary features to be used in production.
260 # nodemap gains the necessary features to be used in production.
269
261
270 ONDISK_VERSION = 1
262 ONDISK_VERSION = 1
271 S_VERSION = struct.Struct(">B")
263 S_VERSION = struct.Struct(">B")
272 S_HEADER = struct.Struct(">BQQQQ")
264 S_HEADER = struct.Struct(">BQQQQ")
273
265
274 ID_SIZE = 8
266 ID_SIZE = 8
275
267
276
268
277 def _make_uid():
269 def _make_uid():
278 """return a new unique identifier.
270 """return a new unique identifier.
279
271
280 The identifier is random and composed of ascii characters."""
272 The identifier is random and composed of ascii characters."""
281 return hex(os.urandom(ID_SIZE))
273 return hex(os.urandom(ID_SIZE))
282
274
283
275
284 class NodeMapDocket(object):
276 class NodeMapDocket(object):
285 """metadata associated with persistent nodemap data
277 """metadata associated with persistent nodemap data
286
278
287 The persistent data may come from disk or be on their way to disk.
279 The persistent data may come from disk or be on their way to disk.
288 """
280 """
289
281
290 def __init__(self, uid=None):
282 def __init__(self, uid=None):
291 if uid is None:
283 if uid is None:
292 uid = _make_uid()
284 uid = _make_uid()
293 # a unique identifier for the data file:
285 # a unique identifier for the data file:
294 # - When new data are appended, it is preserved.
286 # - When new data are appended, it is preserved.
295 # - When a new data file is created, a new identifier is generated.
287 # - When a new data file is created, a new identifier is generated.
296 self.uid = uid
288 self.uid = uid
297 # the tipmost revision stored in the data file. This revision and all
289 # the tipmost revision stored in the data file. This revision and all
298 # revision before it are expected to be encoded in the data file.
290 # revision before it are expected to be encoded in the data file.
299 self.tip_rev = None
291 self.tip_rev = None
300 # the node of that tipmost revision, if it mismatch the current index
292 # the node of that tipmost revision, if it mismatch the current index
301 # data the docket is not valid for the current index and should be
293 # data the docket is not valid for the current index and should be
302 # discarded.
294 # discarded.
303 #
295 #
304 # note: this method is not perfect as some destructive operation could
296 # note: this method is not perfect as some destructive operation could
305 # preserve the same tip_rev + tip_node while altering lower revision.
297 # preserve the same tip_rev + tip_node while altering lower revision.
306 # However this multiple other caches have the same vulnerability (eg:
298 # However this multiple other caches have the same vulnerability (eg:
307 # brancmap cache).
299 # brancmap cache).
308 self.tip_node = None
300 self.tip_node = None
309 # the size (in bytes) of the persisted data to encode the nodemap valid
301 # the size (in bytes) of the persisted data to encode the nodemap valid
310 # for `tip_rev`.
302 # for `tip_rev`.
311 # - data file shorter than this are corrupted,
303 # - data file shorter than this are corrupted,
312 # - any extra data should be ignored.
304 # - any extra data should be ignored.
313 self.data_length = None
305 self.data_length = None
314 # the amount (in bytes) of "dead" data, still in the data file but no
306 # the amount (in bytes) of "dead" data, still in the data file but no
315 # longer used for the nodemap.
307 # longer used for the nodemap.
316 self.data_unused = 0
308 self.data_unused = 0
317
309
318 def copy(self):
310 def copy(self):
319 new = NodeMapDocket(uid=self.uid)
311 new = NodeMapDocket(uid=self.uid)
320 new.tip_rev = self.tip_rev
312 new.tip_rev = self.tip_rev
321 new.tip_node = self.tip_node
313 new.tip_node = self.tip_node
322 new.data_length = self.data_length
314 new.data_length = self.data_length
323 new.data_unused = self.data_unused
315 new.data_unused = self.data_unused
324 return new
316 return new
325
317
326 def __cmp__(self, other):
318 def __cmp__(self, other):
327 if self.uid < other.uid:
319 if self.uid < other.uid:
328 return -1
320 return -1
329 if self.uid > other.uid:
321 if self.uid > other.uid:
330 return 1
322 return 1
331 elif self.data_length < other.data_length:
323 elif self.data_length < other.data_length:
332 return -1
324 return -1
333 elif self.data_length > other.data_length:
325 elif self.data_length > other.data_length:
334 return 1
326 return 1
335 return 0
327 return 0
336
328
337 def __eq__(self, other):
329 def __eq__(self, other):
338 return self.uid == other.uid and self.data_length == other.data_length
330 return self.uid == other.uid and self.data_length == other.data_length
339
331
340 def serialize(self):
332 def serialize(self):
341 """return serialized bytes for a docket using the passed uid"""
333 """return serialized bytes for a docket using the passed uid"""
342 data = []
334 data = []
343 data.append(S_VERSION.pack(ONDISK_VERSION))
335 data.append(S_VERSION.pack(ONDISK_VERSION))
344 headers = (
336 headers = (
345 len(self.uid),
337 len(self.uid),
346 self.tip_rev,
338 self.tip_rev,
347 self.data_length,
339 self.data_length,
348 self.data_unused,
340 self.data_unused,
349 len(self.tip_node),
341 len(self.tip_node),
350 )
342 )
351 data.append(S_HEADER.pack(*headers))
343 data.append(S_HEADER.pack(*headers))
352 data.append(self.uid)
344 data.append(self.uid)
353 data.append(self.tip_node)
345 data.append(self.tip_node)
354 return b''.join(data)
346 return b''.join(data)
355
347
356
348
357 def _rawdata_filepath(revlog, docket):
349 def _rawdata_filepath(revlog, docket):
358 """The (vfs relative) nodemap's rawdata file for a given uid"""
350 """The (vfs relative) nodemap's rawdata file for a given uid"""
359 if revlog.nodemap_file.endswith(b'.n.a'):
351 if revlog.nodemap_file.endswith(b'.n.a'):
360 prefix = revlog.nodemap_file[:-4]
352 prefix = revlog.nodemap_file[:-4]
361 else:
353 else:
362 prefix = revlog.nodemap_file[:-2]
354 prefix = revlog.nodemap_file[:-2]
363 return b"%s-%s.nd" % (prefix, docket.uid)
355 return b"%s-%s.nd" % (prefix, docket.uid)
364
356
365
357
366 def _other_rawdata_filepath(revlog, docket):
358 def _other_rawdata_filepath(revlog, docket):
367 prefix = revlog.nodemap_file[:-2]
359 prefix = revlog.nodemap_file[:-2]
368 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
360 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
369 new_file_path = _rawdata_filepath(revlog, docket)
361 new_file_path = _rawdata_filepath(revlog, docket)
370 new_file_name = revlog.opener.basename(new_file_path)
362 new_file_name = revlog.opener.basename(new_file_path)
371 dirpath = revlog.opener.dirname(new_file_path)
363 dirpath = revlog.opener.dirname(new_file_path)
372 others = []
364 others = []
373 for f in revlog.opener.listdir(dirpath):
365 for f in revlog.opener.listdir(dirpath):
374 if pattern.match(f) and f != new_file_name:
366 if pattern.match(f) and f != new_file_name:
375 others.append(f)
367 others.append(f)
376 return others
368 return others
377
369
378
370
379 ### Nodemap Trie
371 ### Nodemap Trie
380 #
372 #
381 # This is a simple reference implementation to compute and persist a nodemap
373 # This is a simple reference implementation to compute and persist a nodemap
382 # trie. This reference implementation is write only. The python version of this
374 # trie. This reference implementation is write only. The python version of this
383 # is not expected to be actually used, since it wont provide performance
375 # is not expected to be actually used, since it wont provide performance
384 # improvement over existing non-persistent C implementation.
376 # improvement over existing non-persistent C implementation.
385 #
377 #
386 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
378 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
387 # revision can be adressed using its node shortest prefix.
379 # revision can be adressed using its node shortest prefix.
388 #
380 #
389 # The trie is stored as a sequence of block. Each block contains 16 entries
381 # The trie is stored as a sequence of block. Each block contains 16 entries
390 # (signed 64bit integer, big endian). Each entry can be one of the following:
382 # (signed 64bit integer, big endian). Each entry can be one of the following:
391 #
383 #
392 # * value >= 0 -> index of sub-block
384 # * value >= 0 -> index of sub-block
393 # * value == -1 -> no value
385 # * value == -1 -> no value
394 # * value < -1 -> encoded revision: rev = -(value+2)
386 # * value < -1 -> encoded revision: rev = -(value+2)
395 #
387 #
396 # See REV_OFFSET and _transform_rev below.
388 # See REV_OFFSET and _transform_rev below.
397 #
389 #
398 # The implementation focus on simplicity, not on performance. A Rust
390 # The implementation focus on simplicity, not on performance. A Rust
399 # implementation should provide a efficient version of the same binary
391 # implementation should provide a efficient version of the same binary
400 # persistence. This reference python implementation is never meant to be
392 # persistence. This reference python implementation is never meant to be
401 # extensively use in production.
393 # extensively use in production.
402
394
403
395
404 def persistent_data(index):
396 def persistent_data(index):
405 """return the persistent binary form for a nodemap for a given index"""
397 """return the persistent binary form for a nodemap for a given index"""
406 trie = _build_trie(index)
398 trie = _build_trie(index)
407 return _persist_trie(trie)
399 return _persist_trie(trie)
408
400
409
401
410 def update_persistent_data(index, root, max_idx, last_rev):
402 def update_persistent_data(index, root, max_idx, last_rev):
411 """return the incremental update for persistent nodemap from a given index"""
403 """return the incremental update for persistent nodemap from a given index"""
412 changed_block, trie = _update_trie(index, root, last_rev)
404 changed_block, trie = _update_trie(index, root, last_rev)
413 return (
405 return (
414 changed_block * S_BLOCK.size,
406 changed_block * S_BLOCK.size,
415 _persist_trie(trie, existing_idx=max_idx),
407 _persist_trie(trie, existing_idx=max_idx),
416 )
408 )
417
409
418
410
419 S_BLOCK = struct.Struct(">" + ("l" * 16))
411 S_BLOCK = struct.Struct(">" + ("l" * 16))
420
412
421 NO_ENTRY = -1
413 NO_ENTRY = -1
422 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
414 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
423 REV_OFFSET = 2
415 REV_OFFSET = 2
424
416
425
417
426 def _transform_rev(rev):
418 def _transform_rev(rev):
427 """Return the number used to represent the rev in the tree.
419 """Return the number used to represent the rev in the tree.
428
420
429 (or retrieve a rev number from such representation)
421 (or retrieve a rev number from such representation)
430
422
431 Note that this is an involution, a function equal to its inverse (i.e.
423 Note that this is an involution, a function equal to its inverse (i.e.
432 which gives the identity when applied to itself).
424 which gives the identity when applied to itself).
433 """
425 """
434 return -(rev + REV_OFFSET)
426 return -(rev + REV_OFFSET)
435
427
436
428
437 def _to_int(hex_digit):
429 def _to_int(hex_digit):
438 """turn an hexadecimal digit into a proper integer"""
430 """turn an hexadecimal digit into a proper integer"""
439 return int(hex_digit, 16)
431 return int(hex_digit, 16)
440
432
441
433
442 class Block(dict):
434 class Block(dict):
443 """represent a block of the Trie
435 """represent a block of the Trie
444
436
445 contains up to 16 entry indexed from 0 to 15"""
437 contains up to 16 entry indexed from 0 to 15"""
446
438
447 def __init__(self):
439 def __init__(self):
448 super(Block, self).__init__()
440 super(Block, self).__init__()
449 # If this block exist on disk, here is its ID
441 # If this block exist on disk, here is its ID
450 self.ondisk_id = None
442 self.ondisk_id = None
451
443
452 def __iter__(self):
444 def __iter__(self):
453 return iter(self.get(i) for i in range(16))
445 return iter(self.get(i) for i in range(16))
454
446
455
447
456 def _build_trie(index):
448 def _build_trie(index):
457 """build a nodemap trie
449 """build a nodemap trie
458
450
459 The nodemap stores revision number for each unique prefix.
451 The nodemap stores revision number for each unique prefix.
460
452
461 Each block is a dictionary with keys in `[0, 15]`. Values are either
453 Each block is a dictionary with keys in `[0, 15]`. Values are either
462 another block or a revision number.
454 another block or a revision number.
463 """
455 """
464 root = Block()
456 root = Block()
465 for rev in range(len(index)):
457 for rev in range(len(index)):
466 current_hex = hex(index[rev][7])
458 current_hex = hex(index[rev][7])
467 _insert_into_block(index, 0, root, rev, current_hex)
459 _insert_into_block(index, 0, root, rev, current_hex)
468 return root
460 return root
469
461
470
462
471 def _update_trie(index, root, last_rev):
463 def _update_trie(index, root, last_rev):
472 """consume"""
464 """consume"""
473 changed = 0
465 changed = 0
474 for rev in range(last_rev + 1, len(index)):
466 for rev in range(last_rev + 1, len(index)):
475 current_hex = hex(index[rev][7])
467 current_hex = hex(index[rev][7])
476 changed += _insert_into_block(index, 0, root, rev, current_hex)
468 changed += _insert_into_block(index, 0, root, rev, current_hex)
477 return changed, root
469 return changed, root
478
470
479
471
480 def _insert_into_block(index, level, block, current_rev, current_hex):
472 def _insert_into_block(index, level, block, current_rev, current_hex):
481 """insert a new revision in a block
473 """insert a new revision in a block
482
474
483 index: the index we are adding revision for
475 index: the index we are adding revision for
484 level: the depth of the current block in the trie
476 level: the depth of the current block in the trie
485 block: the block currently being considered
477 block: the block currently being considered
486 current_rev: the revision number we are adding
478 current_rev: the revision number we are adding
487 current_hex: the hexadecimal representation of the of that revision
479 current_hex: the hexadecimal representation of the of that revision
488 """
480 """
489 changed = 1
481 changed = 1
490 if block.ondisk_id is not None:
482 if block.ondisk_id is not None:
491 block.ondisk_id = None
483 block.ondisk_id = None
492 hex_digit = _to_int(current_hex[level : level + 1])
484 hex_digit = _to_int(current_hex[level : level + 1])
493 entry = block.get(hex_digit)
485 entry = block.get(hex_digit)
494 if entry is None:
486 if entry is None:
495 # no entry, simply store the revision number
487 # no entry, simply store the revision number
496 block[hex_digit] = current_rev
488 block[hex_digit] = current_rev
497 elif isinstance(entry, dict):
489 elif isinstance(entry, dict):
498 # need to recurse to an underlying block
490 # need to recurse to an underlying block
499 changed += _insert_into_block(
491 changed += _insert_into_block(
500 index, level + 1, entry, current_rev, current_hex
492 index, level + 1, entry, current_rev, current_hex
501 )
493 )
502 else:
494 else:
503 # collision with a previously unique prefix, inserting new
495 # collision with a previously unique prefix, inserting new
504 # vertices to fit both entry.
496 # vertices to fit both entry.
505 other_hex = hex(index[entry][7])
497 other_hex = hex(index[entry][7])
506 other_rev = entry
498 other_rev = entry
507 new = Block()
499 new = Block()
508 block[hex_digit] = new
500 block[hex_digit] = new
509 _insert_into_block(index, level + 1, new, other_rev, other_hex)
501 _insert_into_block(index, level + 1, new, other_rev, other_hex)
510 _insert_into_block(index, level + 1, new, current_rev, current_hex)
502 _insert_into_block(index, level + 1, new, current_rev, current_hex)
511 return changed
503 return changed
512
504
513
505
514 def _persist_trie(root, existing_idx=None):
506 def _persist_trie(root, existing_idx=None):
515 """turn a nodemap trie into persistent binary data
507 """turn a nodemap trie into persistent binary data
516
508
517 See `_build_trie` for nodemap trie structure"""
509 See `_build_trie` for nodemap trie structure"""
518 block_map = {}
510 block_map = {}
519 if existing_idx is not None:
511 if existing_idx is not None:
520 base_idx = existing_idx + 1
512 base_idx = existing_idx + 1
521 else:
513 else:
522 base_idx = 0
514 base_idx = 0
523 chunks = []
515 chunks = []
524 for tn in _walk_trie(root):
516 for tn in _walk_trie(root):
525 if tn.ondisk_id is not None:
517 if tn.ondisk_id is not None:
526 block_map[id(tn)] = tn.ondisk_id
518 block_map[id(tn)] = tn.ondisk_id
527 else:
519 else:
528 block_map[id(tn)] = len(chunks) + base_idx
520 block_map[id(tn)] = len(chunks) + base_idx
529 chunks.append(_persist_block(tn, block_map))
521 chunks.append(_persist_block(tn, block_map))
530 return b''.join(chunks)
522 return b''.join(chunks)
531
523
532
524
533 def _walk_trie(block):
525 def _walk_trie(block):
534 """yield all the block in a trie
526 """yield all the block in a trie
535
527
536 Children blocks are always yield before their parent block.
528 Children blocks are always yield before their parent block.
537 """
529 """
538 for (__, item) in sorted(block.items()):
530 for (__, item) in sorted(block.items()):
539 if isinstance(item, dict):
531 if isinstance(item, dict):
540 for sub_block in _walk_trie(item):
532 for sub_block in _walk_trie(item):
541 yield sub_block
533 yield sub_block
542 yield block
534 yield block
543
535
544
536
545 def _persist_block(block_node, block_map):
537 def _persist_block(block_node, block_map):
546 """produce persistent binary data for a single block
538 """produce persistent binary data for a single block
547
539
548 Children block are assumed to be already persisted and present in
540 Children block are assumed to be already persisted and present in
549 block_map.
541 block_map.
550 """
542 """
551 data = tuple(_to_value(v, block_map) for v in block_node)
543 data = tuple(_to_value(v, block_map) for v in block_node)
552 return S_BLOCK.pack(*data)
544 return S_BLOCK.pack(*data)
553
545
554
546
555 def _to_value(item, block_map):
547 def _to_value(item, block_map):
556 """persist any value as an integer"""
548 """persist any value as an integer"""
557 if item is None:
549 if item is None:
558 return NO_ENTRY
550 return NO_ENTRY
559 elif isinstance(item, dict):
551 elif isinstance(item, dict):
560 return block_map[id(item)]
552 return block_map[id(item)]
561 else:
553 else:
562 return _transform_rev(item)
554 return _transform_rev(item)
563
555
564
556
565 def parse_data(data):
557 def parse_data(data):
566 """parse parse nodemap data into a nodemap Trie"""
558 """parse parse nodemap data into a nodemap Trie"""
567 if (len(data) % S_BLOCK.size) != 0:
559 if (len(data) % S_BLOCK.size) != 0:
568 msg = "nodemap data size is not a multiple of block size (%d): %d"
560 msg = "nodemap data size is not a multiple of block size (%d): %d"
569 raise error.Abort(msg % (S_BLOCK.size, len(data)))
561 raise error.Abort(msg % (S_BLOCK.size, len(data)))
570 if not data:
562 if not data:
571 return Block(), None
563 return Block(), None
572 block_map = {}
564 block_map = {}
573 new_blocks = []
565 new_blocks = []
574 for i in range(0, len(data), S_BLOCK.size):
566 for i in range(0, len(data), S_BLOCK.size):
575 block = Block()
567 block = Block()
576 block.ondisk_id = len(block_map)
568 block.ondisk_id = len(block_map)
577 block_map[block.ondisk_id] = block
569 block_map[block.ondisk_id] = block
578 block_data = data[i : i + S_BLOCK.size]
570 block_data = data[i : i + S_BLOCK.size]
579 values = S_BLOCK.unpack(block_data)
571 values = S_BLOCK.unpack(block_data)
580 new_blocks.append((block, values))
572 new_blocks.append((block, values))
581 for b, values in new_blocks:
573 for b, values in new_blocks:
582 for idx, v in enumerate(values):
574 for idx, v in enumerate(values):
583 if v == NO_ENTRY:
575 if v == NO_ENTRY:
584 continue
576 continue
585 elif v >= 0:
577 elif v >= 0:
586 b[idx] = block_map[v]
578 b[idx] = block_map[v]
587 else:
579 else:
588 b[idx] = _transform_rev(v)
580 b[idx] = _transform_rev(v)
589 return block, i // S_BLOCK.size
581 return block, i // S_BLOCK.size
590
582
591
583
592 # debug utility
584 # debug utility
593
585
594
586
595 def check_data(ui, index, data):
587 def check_data(ui, index, data):
596 """verify that the provided nodemap data are valid for the given idex"""
588 """verify that the provided nodemap data are valid for the given idex"""
597 ret = 0
589 ret = 0
598 ui.status((b"revision in index: %d\n") % len(index))
590 ui.status((b"revision in index: %d\n") % len(index))
599 root, __ = parse_data(data)
591 root, __ = parse_data(data)
600 all_revs = set(_all_revisions(root))
592 all_revs = set(_all_revisions(root))
601 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
593 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
602 for r in range(len(index)):
594 for r in range(len(index)):
603 if r not in all_revs:
595 if r not in all_revs:
604 msg = b" revision missing from nodemap: %d\n" % r
596 msg = b" revision missing from nodemap: %d\n" % r
605 ui.write_err(msg)
597 ui.write_err(msg)
606 ret = 1
598 ret = 1
607 else:
599 else:
608 all_revs.remove(r)
600 all_revs.remove(r)
609 nm_rev = _find_node(root, hex(index[r][7]))
601 nm_rev = _find_node(root, hex(index[r][7]))
610 if nm_rev is None:
602 if nm_rev is None:
611 msg = b" revision node does not match any entries: %d\n" % r
603 msg = b" revision node does not match any entries: %d\n" % r
612 ui.write_err(msg)
604 ui.write_err(msg)
613 ret = 1
605 ret = 1
614 elif nm_rev != r:
606 elif nm_rev != r:
615 msg = (
607 msg = (
616 b" revision node does not match the expected revision: "
608 b" revision node does not match the expected revision: "
617 b"%d != %d\n" % (r, nm_rev)
609 b"%d != %d\n" % (r, nm_rev)
618 )
610 )
619 ui.write_err(msg)
611 ui.write_err(msg)
620 ret = 1
612 ret = 1
621
613
622 if all_revs:
614 if all_revs:
623 for r in sorted(all_revs):
615 for r in sorted(all_revs):
624 msg = b" extra revision in nodemap: %d\n" % r
616 msg = b" extra revision in nodemap: %d\n" % r
625 ui.write_err(msg)
617 ui.write_err(msg)
626 ret = 1
618 ret = 1
627 return ret
619 return ret
628
620
629
621
630 def _all_revisions(root):
622 def _all_revisions(root):
631 """return all revisions stored in a Trie"""
623 """return all revisions stored in a Trie"""
632 for block in _walk_trie(root):
624 for block in _walk_trie(root):
633 for v in block:
625 for v in block:
634 if v is None or isinstance(v, Block):
626 if v is None or isinstance(v, Block):
635 continue
627 continue
636 yield v
628 yield v
637
629
638
630
639 def _find_node(block, node):
631 def _find_node(block, node):
640 """find the revision associated with a given node"""
632 """find the revision associated with a given node"""
641 entry = block.get(_to_int(node[0:1]))
633 entry = block.get(_to_int(node[0:1]))
642 if isinstance(entry, dict):
634 if isinstance(entry, dict):
643 return _find_node(entry, node[1:])
635 return _find_node(entry, node[1:])
644 return entry
636 return entry
General Comments 0
You need to be logged in to leave comments. Login now