##// END OF EJS Templates
dirstate-v2: Add a new experimental `exp-dirstate-v2` repository requirement...
Simon Sapin -
r48052:ed0d54b2 default
parent child Browse files
Show More
@@ -1,2711 +1,2719 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * include management of a persistent nodemap in the main docket
1153 # * include management of a persistent nodemap in the main docket
1154 # * enforce a "no-truncate" policy for mmap safety
1154 # * enforce a "no-truncate" policy for mmap safety
1155 # - for censoring operation
1155 # - for censoring operation
1156 # - for stripping operation
1156 # - for stripping operation
1157 # - for rollback operation
1157 # - for rollback operation
1158 # * proper streaming (race free) of the docket file
1158 # * proper streaming (race free) of the docket file
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 # * Exchange-wise, we will also need to do something more efficient than
1160 # * Exchange-wise, we will also need to do something more efficient than
1161 # keeping references to the affected revlogs, especially memory-wise when
1161 # keeping references to the affected revlogs, especially memory-wise when
1162 # rewriting sidedata.
1162 # rewriting sidedata.
1163 # * sidedata compression
1163 # * sidedata compression
1164 # * introduce a proper solution to reduce the number of filelog related files.
1164 # * introduce a proper solution to reduce the number of filelog related files.
1165 # * Improvement to consider
1165 # * Improvement to consider
1166 # - avoid compression header in chunk using the default compression?
1166 # - avoid compression header in chunk using the default compression?
1167 # - forbid "inline" compression mode entirely?
1167 # - forbid "inline" compression mode entirely?
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 # - keep track of chain base or size (probably not that useful anymore)
1170 # - keep track of chain base or size (probably not that useful anymore)
1171 # - store data and sidedata in different files
1171 # - store data and sidedata in different files
1172 coreconfigitem(
1172 coreconfigitem(
1173 b'experimental',
1173 b'experimental',
1174 b'revlogv2',
1174 b'revlogv2',
1175 default=None,
1175 default=None,
1176 )
1176 )
1177 coreconfigitem(
1177 coreconfigitem(
1178 b'experimental',
1178 b'experimental',
1179 b'revisions.disambiguatewithin',
1179 b'revisions.disambiguatewithin',
1180 default=None,
1180 default=None,
1181 )
1181 )
1182 coreconfigitem(
1182 coreconfigitem(
1183 b'experimental',
1183 b'experimental',
1184 b'rust.index',
1184 b'rust.index',
1185 default=False,
1185 default=False,
1186 )
1186 )
1187 coreconfigitem(
1187 coreconfigitem(
1188 b'experimental',
1188 b'experimental',
1189 b'server.filesdata.recommended-batch-size',
1189 b'server.filesdata.recommended-batch-size',
1190 default=50000,
1190 default=50000,
1191 )
1191 )
1192 coreconfigitem(
1192 coreconfigitem(
1193 b'experimental',
1193 b'experimental',
1194 b'server.manifestdata.recommended-batch-size',
1194 b'server.manifestdata.recommended-batch-size',
1195 default=100000,
1195 default=100000,
1196 )
1196 )
1197 coreconfigitem(
1197 coreconfigitem(
1198 b'experimental',
1198 b'experimental',
1199 b'server.stream-narrow-clones',
1199 b'server.stream-narrow-clones',
1200 default=False,
1200 default=False,
1201 )
1201 )
1202 coreconfigitem(
1202 coreconfigitem(
1203 b'experimental',
1203 b'experimental',
1204 b'single-head-per-branch',
1204 b'single-head-per-branch',
1205 default=False,
1205 default=False,
1206 )
1206 )
1207 coreconfigitem(
1207 coreconfigitem(
1208 b'experimental',
1208 b'experimental',
1209 b'single-head-per-branch:account-closed-heads',
1209 b'single-head-per-branch:account-closed-heads',
1210 default=False,
1210 default=False,
1211 )
1211 )
1212 coreconfigitem(
1212 coreconfigitem(
1213 b'experimental',
1213 b'experimental',
1214 b'single-head-per-branch:public-changes-only',
1214 b'single-head-per-branch:public-changes-only',
1215 default=False,
1215 default=False,
1216 )
1216 )
1217 coreconfigitem(
1217 coreconfigitem(
1218 b'experimental',
1218 b'experimental',
1219 b'sshserver.support-v2',
1219 b'sshserver.support-v2',
1220 default=False,
1220 default=False,
1221 )
1221 )
1222 coreconfigitem(
1222 coreconfigitem(
1223 b'experimental',
1223 b'experimental',
1224 b'sparse-read',
1224 b'sparse-read',
1225 default=False,
1225 default=False,
1226 )
1226 )
1227 coreconfigitem(
1227 coreconfigitem(
1228 b'experimental',
1228 b'experimental',
1229 b'sparse-read.density-threshold',
1229 b'sparse-read.density-threshold',
1230 default=0.50,
1230 default=0.50,
1231 )
1231 )
1232 coreconfigitem(
1232 coreconfigitem(
1233 b'experimental',
1233 b'experimental',
1234 b'sparse-read.min-gap-size',
1234 b'sparse-read.min-gap-size',
1235 default=b'65K',
1235 default=b'65K',
1236 )
1236 )
1237 coreconfigitem(
1237 coreconfigitem(
1238 b'experimental',
1238 b'experimental',
1239 b'treemanifest',
1239 b'treemanifest',
1240 default=False,
1240 default=False,
1241 )
1241 )
1242 coreconfigitem(
1242 coreconfigitem(
1243 b'experimental',
1243 b'experimental',
1244 b'update.atomic-file',
1244 b'update.atomic-file',
1245 default=False,
1245 default=False,
1246 )
1246 )
1247 coreconfigitem(
1247 coreconfigitem(
1248 b'experimental',
1248 b'experimental',
1249 b'sshpeer.advertise-v2',
1249 b'sshpeer.advertise-v2',
1250 default=False,
1250 default=False,
1251 )
1251 )
1252 coreconfigitem(
1252 coreconfigitem(
1253 b'experimental',
1253 b'experimental',
1254 b'web.apiserver',
1254 b'web.apiserver',
1255 default=False,
1255 default=False,
1256 )
1256 )
1257 coreconfigitem(
1257 coreconfigitem(
1258 b'experimental',
1258 b'experimental',
1259 b'web.api.http-v2',
1259 b'web.api.http-v2',
1260 default=False,
1260 default=False,
1261 )
1261 )
1262 coreconfigitem(
1262 coreconfigitem(
1263 b'experimental',
1263 b'experimental',
1264 b'web.api.debugreflect',
1264 b'web.api.debugreflect',
1265 default=False,
1265 default=False,
1266 )
1266 )
1267 coreconfigitem(
1267 coreconfigitem(
1268 b'experimental',
1268 b'experimental',
1269 b'worker.wdir-get-thread-safe',
1269 b'worker.wdir-get-thread-safe',
1270 default=False,
1270 default=False,
1271 )
1271 )
1272 coreconfigitem(
1272 coreconfigitem(
1273 b'experimental',
1273 b'experimental',
1274 b'worker.repository-upgrade',
1274 b'worker.repository-upgrade',
1275 default=False,
1275 default=False,
1276 )
1276 )
1277 coreconfigitem(
1277 coreconfigitem(
1278 b'experimental',
1278 b'experimental',
1279 b'xdiff',
1279 b'xdiff',
1280 default=False,
1280 default=False,
1281 )
1281 )
1282 coreconfigitem(
1282 coreconfigitem(
1283 b'extensions',
1283 b'extensions',
1284 b'.*',
1284 b'.*',
1285 default=None,
1285 default=None,
1286 generic=True,
1286 generic=True,
1287 )
1287 )
1288 coreconfigitem(
1288 coreconfigitem(
1289 b'extdata',
1289 b'extdata',
1290 b'.*',
1290 b'.*',
1291 default=None,
1291 default=None,
1292 generic=True,
1292 generic=True,
1293 )
1293 )
1294 coreconfigitem(
1294 coreconfigitem(
1295 b'format',
1295 b'format',
1296 b'bookmarks-in-store',
1296 b'bookmarks-in-store',
1297 default=False,
1297 default=False,
1298 )
1298 )
1299 coreconfigitem(
1299 coreconfigitem(
1300 b'format',
1300 b'format',
1301 b'chunkcachesize',
1301 b'chunkcachesize',
1302 default=None,
1302 default=None,
1303 experimental=True,
1303 experimental=True,
1304 )
1304 )
1305 coreconfigitem(
1305 coreconfigitem(
1306 # Enable this dirstate format *when creating a new repository*.
1307 # Which format to use for existing repos is controlled by .hg/requires
1308 b'format',
1309 b'exp-dirstate-v2',
1310 default=False,
1311 experimental=True,
1312 )
1313 coreconfigitem(
1306 b'format',
1314 b'format',
1307 b'dotencode',
1315 b'dotencode',
1308 default=True,
1316 default=True,
1309 )
1317 )
1310 coreconfigitem(
1318 coreconfigitem(
1311 b'format',
1319 b'format',
1312 b'generaldelta',
1320 b'generaldelta',
1313 default=False,
1321 default=False,
1314 experimental=True,
1322 experimental=True,
1315 )
1323 )
1316 coreconfigitem(
1324 coreconfigitem(
1317 b'format',
1325 b'format',
1318 b'manifestcachesize',
1326 b'manifestcachesize',
1319 default=None,
1327 default=None,
1320 experimental=True,
1328 experimental=True,
1321 )
1329 )
1322 coreconfigitem(
1330 coreconfigitem(
1323 b'format',
1331 b'format',
1324 b'maxchainlen',
1332 b'maxchainlen',
1325 default=dynamicdefault,
1333 default=dynamicdefault,
1326 experimental=True,
1334 experimental=True,
1327 )
1335 )
1328 coreconfigitem(
1336 coreconfigitem(
1329 b'format',
1337 b'format',
1330 b'obsstore-version',
1338 b'obsstore-version',
1331 default=None,
1339 default=None,
1332 )
1340 )
1333 coreconfigitem(
1341 coreconfigitem(
1334 b'format',
1342 b'format',
1335 b'sparse-revlog',
1343 b'sparse-revlog',
1336 default=True,
1344 default=True,
1337 )
1345 )
1338 coreconfigitem(
1346 coreconfigitem(
1339 b'format',
1347 b'format',
1340 b'revlog-compression',
1348 b'revlog-compression',
1341 default=lambda: [b'zstd', b'zlib'],
1349 default=lambda: [b'zstd', b'zlib'],
1342 alias=[(b'experimental', b'format.compression')],
1350 alias=[(b'experimental', b'format.compression')],
1343 )
1351 )
1344 # Experimental TODOs:
1352 # Experimental TODOs:
1345 #
1353 #
1346 # * Same as for evlogv2 (but for the reduction of the number of files)
1354 # * Same as for evlogv2 (but for the reduction of the number of files)
1347 # * Improvement to investigate
1355 # * Improvement to investigate
1348 # - storing .hgtags fnode
1356 # - storing .hgtags fnode
1349 # - storing `rank` of changesets
1357 # - storing `rank` of changesets
1350 # - storing branch related identifier
1358 # - storing branch related identifier
1351
1359
1352 coreconfigitem(
1360 coreconfigitem(
1353 b'format',
1361 b'format',
1354 b'exp-use-changelog-v2',
1362 b'exp-use-changelog-v2',
1355 default=None,
1363 default=None,
1356 experimental=True,
1364 experimental=True,
1357 )
1365 )
1358 coreconfigitem(
1366 coreconfigitem(
1359 b'format',
1367 b'format',
1360 b'usefncache',
1368 b'usefncache',
1361 default=True,
1369 default=True,
1362 )
1370 )
1363 coreconfigitem(
1371 coreconfigitem(
1364 b'format',
1372 b'format',
1365 b'usegeneraldelta',
1373 b'usegeneraldelta',
1366 default=True,
1374 default=True,
1367 )
1375 )
1368 coreconfigitem(
1376 coreconfigitem(
1369 b'format',
1377 b'format',
1370 b'usestore',
1378 b'usestore',
1371 default=True,
1379 default=True,
1372 )
1380 )
1373
1381
1374
1382
1375 def _persistent_nodemap_default():
1383 def _persistent_nodemap_default():
1376 """compute `use-persistent-nodemap` default value
1384 """compute `use-persistent-nodemap` default value
1377
1385
1378 The feature is disabled unless a fast implementation is available.
1386 The feature is disabled unless a fast implementation is available.
1379 """
1387 """
1380 from . import policy
1388 from . import policy
1381
1389
1382 return policy.importrust('revlog') is not None
1390 return policy.importrust('revlog') is not None
1383
1391
1384
1392
1385 coreconfigitem(
1393 coreconfigitem(
1386 b'format',
1394 b'format',
1387 b'use-persistent-nodemap',
1395 b'use-persistent-nodemap',
1388 default=_persistent_nodemap_default,
1396 default=_persistent_nodemap_default,
1389 )
1397 )
1390 coreconfigitem(
1398 coreconfigitem(
1391 b'format',
1399 b'format',
1392 b'exp-use-copies-side-data-changeset',
1400 b'exp-use-copies-side-data-changeset',
1393 default=False,
1401 default=False,
1394 experimental=True,
1402 experimental=True,
1395 )
1403 )
1396 coreconfigitem(
1404 coreconfigitem(
1397 b'format',
1405 b'format',
1398 b'use-share-safe',
1406 b'use-share-safe',
1399 default=False,
1407 default=False,
1400 )
1408 )
1401 coreconfigitem(
1409 coreconfigitem(
1402 b'format',
1410 b'format',
1403 b'internal-phase',
1411 b'internal-phase',
1404 default=False,
1412 default=False,
1405 experimental=True,
1413 experimental=True,
1406 )
1414 )
1407 coreconfigitem(
1415 coreconfigitem(
1408 b'fsmonitor',
1416 b'fsmonitor',
1409 b'warn_when_unused',
1417 b'warn_when_unused',
1410 default=True,
1418 default=True,
1411 )
1419 )
1412 coreconfigitem(
1420 coreconfigitem(
1413 b'fsmonitor',
1421 b'fsmonitor',
1414 b'warn_update_file_count',
1422 b'warn_update_file_count',
1415 default=50000,
1423 default=50000,
1416 )
1424 )
1417 coreconfigitem(
1425 coreconfigitem(
1418 b'fsmonitor',
1426 b'fsmonitor',
1419 b'warn_update_file_count_rust',
1427 b'warn_update_file_count_rust',
1420 default=400000,
1428 default=400000,
1421 )
1429 )
1422 coreconfigitem(
1430 coreconfigitem(
1423 b'help',
1431 b'help',
1424 br'hidden-command\..*',
1432 br'hidden-command\..*',
1425 default=False,
1433 default=False,
1426 generic=True,
1434 generic=True,
1427 )
1435 )
1428 coreconfigitem(
1436 coreconfigitem(
1429 b'help',
1437 b'help',
1430 br'hidden-topic\..*',
1438 br'hidden-topic\..*',
1431 default=False,
1439 default=False,
1432 generic=True,
1440 generic=True,
1433 )
1441 )
1434 coreconfigitem(
1442 coreconfigitem(
1435 b'hooks',
1443 b'hooks',
1436 b'[^:]*',
1444 b'[^:]*',
1437 default=dynamicdefault,
1445 default=dynamicdefault,
1438 generic=True,
1446 generic=True,
1439 )
1447 )
1440 coreconfigitem(
1448 coreconfigitem(
1441 b'hooks',
1449 b'hooks',
1442 b'.*:run-with-plain',
1450 b'.*:run-with-plain',
1443 default=True,
1451 default=True,
1444 generic=True,
1452 generic=True,
1445 )
1453 )
1446 coreconfigitem(
1454 coreconfigitem(
1447 b'hgweb-paths',
1455 b'hgweb-paths',
1448 b'.*',
1456 b'.*',
1449 default=list,
1457 default=list,
1450 generic=True,
1458 generic=True,
1451 )
1459 )
1452 coreconfigitem(
1460 coreconfigitem(
1453 b'hostfingerprints',
1461 b'hostfingerprints',
1454 b'.*',
1462 b'.*',
1455 default=list,
1463 default=list,
1456 generic=True,
1464 generic=True,
1457 )
1465 )
1458 coreconfigitem(
1466 coreconfigitem(
1459 b'hostsecurity',
1467 b'hostsecurity',
1460 b'ciphers',
1468 b'ciphers',
1461 default=None,
1469 default=None,
1462 )
1470 )
1463 coreconfigitem(
1471 coreconfigitem(
1464 b'hostsecurity',
1472 b'hostsecurity',
1465 b'minimumprotocol',
1473 b'minimumprotocol',
1466 default=dynamicdefault,
1474 default=dynamicdefault,
1467 )
1475 )
1468 coreconfigitem(
1476 coreconfigitem(
1469 b'hostsecurity',
1477 b'hostsecurity',
1470 b'.*:minimumprotocol$',
1478 b'.*:minimumprotocol$',
1471 default=dynamicdefault,
1479 default=dynamicdefault,
1472 generic=True,
1480 generic=True,
1473 )
1481 )
1474 coreconfigitem(
1482 coreconfigitem(
1475 b'hostsecurity',
1483 b'hostsecurity',
1476 b'.*:ciphers$',
1484 b'.*:ciphers$',
1477 default=dynamicdefault,
1485 default=dynamicdefault,
1478 generic=True,
1486 generic=True,
1479 )
1487 )
1480 coreconfigitem(
1488 coreconfigitem(
1481 b'hostsecurity',
1489 b'hostsecurity',
1482 b'.*:fingerprints$',
1490 b'.*:fingerprints$',
1483 default=list,
1491 default=list,
1484 generic=True,
1492 generic=True,
1485 )
1493 )
1486 coreconfigitem(
1494 coreconfigitem(
1487 b'hostsecurity',
1495 b'hostsecurity',
1488 b'.*:verifycertsfile$',
1496 b'.*:verifycertsfile$',
1489 default=None,
1497 default=None,
1490 generic=True,
1498 generic=True,
1491 )
1499 )
1492
1500
1493 coreconfigitem(
1501 coreconfigitem(
1494 b'http_proxy',
1502 b'http_proxy',
1495 b'always',
1503 b'always',
1496 default=False,
1504 default=False,
1497 )
1505 )
1498 coreconfigitem(
1506 coreconfigitem(
1499 b'http_proxy',
1507 b'http_proxy',
1500 b'host',
1508 b'host',
1501 default=None,
1509 default=None,
1502 )
1510 )
1503 coreconfigitem(
1511 coreconfigitem(
1504 b'http_proxy',
1512 b'http_proxy',
1505 b'no',
1513 b'no',
1506 default=list,
1514 default=list,
1507 )
1515 )
1508 coreconfigitem(
1516 coreconfigitem(
1509 b'http_proxy',
1517 b'http_proxy',
1510 b'passwd',
1518 b'passwd',
1511 default=None,
1519 default=None,
1512 )
1520 )
1513 coreconfigitem(
1521 coreconfigitem(
1514 b'http_proxy',
1522 b'http_proxy',
1515 b'user',
1523 b'user',
1516 default=None,
1524 default=None,
1517 )
1525 )
1518
1526
1519 coreconfigitem(
1527 coreconfigitem(
1520 b'http',
1528 b'http',
1521 b'timeout',
1529 b'timeout',
1522 default=None,
1530 default=None,
1523 )
1531 )
1524
1532
1525 coreconfigitem(
1533 coreconfigitem(
1526 b'logtoprocess',
1534 b'logtoprocess',
1527 b'commandexception',
1535 b'commandexception',
1528 default=None,
1536 default=None,
1529 )
1537 )
1530 coreconfigitem(
1538 coreconfigitem(
1531 b'logtoprocess',
1539 b'logtoprocess',
1532 b'commandfinish',
1540 b'commandfinish',
1533 default=None,
1541 default=None,
1534 )
1542 )
1535 coreconfigitem(
1543 coreconfigitem(
1536 b'logtoprocess',
1544 b'logtoprocess',
1537 b'command',
1545 b'command',
1538 default=None,
1546 default=None,
1539 )
1547 )
1540 coreconfigitem(
1548 coreconfigitem(
1541 b'logtoprocess',
1549 b'logtoprocess',
1542 b'develwarn',
1550 b'develwarn',
1543 default=None,
1551 default=None,
1544 )
1552 )
1545 coreconfigitem(
1553 coreconfigitem(
1546 b'logtoprocess',
1554 b'logtoprocess',
1547 b'uiblocked',
1555 b'uiblocked',
1548 default=None,
1556 default=None,
1549 )
1557 )
1550 coreconfigitem(
1558 coreconfigitem(
1551 b'merge',
1559 b'merge',
1552 b'checkunknown',
1560 b'checkunknown',
1553 default=b'abort',
1561 default=b'abort',
1554 )
1562 )
1555 coreconfigitem(
1563 coreconfigitem(
1556 b'merge',
1564 b'merge',
1557 b'checkignored',
1565 b'checkignored',
1558 default=b'abort',
1566 default=b'abort',
1559 )
1567 )
1560 coreconfigitem(
1568 coreconfigitem(
1561 b'experimental',
1569 b'experimental',
1562 b'merge.checkpathconflicts',
1570 b'merge.checkpathconflicts',
1563 default=False,
1571 default=False,
1564 )
1572 )
1565 coreconfigitem(
1573 coreconfigitem(
1566 b'merge',
1574 b'merge',
1567 b'followcopies',
1575 b'followcopies',
1568 default=True,
1576 default=True,
1569 )
1577 )
1570 coreconfigitem(
1578 coreconfigitem(
1571 b'merge',
1579 b'merge',
1572 b'on-failure',
1580 b'on-failure',
1573 default=b'continue',
1581 default=b'continue',
1574 )
1582 )
1575 coreconfigitem(
1583 coreconfigitem(
1576 b'merge',
1584 b'merge',
1577 b'preferancestor',
1585 b'preferancestor',
1578 default=lambda: [b'*'],
1586 default=lambda: [b'*'],
1579 experimental=True,
1587 experimental=True,
1580 )
1588 )
1581 coreconfigitem(
1589 coreconfigitem(
1582 b'merge',
1590 b'merge',
1583 b'strict-capability-check',
1591 b'strict-capability-check',
1584 default=False,
1592 default=False,
1585 )
1593 )
1586 coreconfigitem(
1594 coreconfigitem(
1587 b'merge-tools',
1595 b'merge-tools',
1588 b'.*',
1596 b'.*',
1589 default=None,
1597 default=None,
1590 generic=True,
1598 generic=True,
1591 )
1599 )
1592 coreconfigitem(
1600 coreconfigitem(
1593 b'merge-tools',
1601 b'merge-tools',
1594 br'.*\.args$',
1602 br'.*\.args$',
1595 default=b"$local $base $other",
1603 default=b"$local $base $other",
1596 generic=True,
1604 generic=True,
1597 priority=-1,
1605 priority=-1,
1598 )
1606 )
1599 coreconfigitem(
1607 coreconfigitem(
1600 b'merge-tools',
1608 b'merge-tools',
1601 br'.*\.binary$',
1609 br'.*\.binary$',
1602 default=False,
1610 default=False,
1603 generic=True,
1611 generic=True,
1604 priority=-1,
1612 priority=-1,
1605 )
1613 )
1606 coreconfigitem(
1614 coreconfigitem(
1607 b'merge-tools',
1615 b'merge-tools',
1608 br'.*\.check$',
1616 br'.*\.check$',
1609 default=list,
1617 default=list,
1610 generic=True,
1618 generic=True,
1611 priority=-1,
1619 priority=-1,
1612 )
1620 )
1613 coreconfigitem(
1621 coreconfigitem(
1614 b'merge-tools',
1622 b'merge-tools',
1615 br'.*\.checkchanged$',
1623 br'.*\.checkchanged$',
1616 default=False,
1624 default=False,
1617 generic=True,
1625 generic=True,
1618 priority=-1,
1626 priority=-1,
1619 )
1627 )
1620 coreconfigitem(
1628 coreconfigitem(
1621 b'merge-tools',
1629 b'merge-tools',
1622 br'.*\.executable$',
1630 br'.*\.executable$',
1623 default=dynamicdefault,
1631 default=dynamicdefault,
1624 generic=True,
1632 generic=True,
1625 priority=-1,
1633 priority=-1,
1626 )
1634 )
1627 coreconfigitem(
1635 coreconfigitem(
1628 b'merge-tools',
1636 b'merge-tools',
1629 br'.*\.fixeol$',
1637 br'.*\.fixeol$',
1630 default=False,
1638 default=False,
1631 generic=True,
1639 generic=True,
1632 priority=-1,
1640 priority=-1,
1633 )
1641 )
1634 coreconfigitem(
1642 coreconfigitem(
1635 b'merge-tools',
1643 b'merge-tools',
1636 br'.*\.gui$',
1644 br'.*\.gui$',
1637 default=False,
1645 default=False,
1638 generic=True,
1646 generic=True,
1639 priority=-1,
1647 priority=-1,
1640 )
1648 )
1641 coreconfigitem(
1649 coreconfigitem(
1642 b'merge-tools',
1650 b'merge-tools',
1643 br'.*\.mergemarkers$',
1651 br'.*\.mergemarkers$',
1644 default=b'basic',
1652 default=b'basic',
1645 generic=True,
1653 generic=True,
1646 priority=-1,
1654 priority=-1,
1647 )
1655 )
1648 coreconfigitem(
1656 coreconfigitem(
1649 b'merge-tools',
1657 b'merge-tools',
1650 br'.*\.mergemarkertemplate$',
1658 br'.*\.mergemarkertemplate$',
1651 default=dynamicdefault, # take from command-templates.mergemarker
1659 default=dynamicdefault, # take from command-templates.mergemarker
1652 generic=True,
1660 generic=True,
1653 priority=-1,
1661 priority=-1,
1654 )
1662 )
1655 coreconfigitem(
1663 coreconfigitem(
1656 b'merge-tools',
1664 b'merge-tools',
1657 br'.*\.priority$',
1665 br'.*\.priority$',
1658 default=0,
1666 default=0,
1659 generic=True,
1667 generic=True,
1660 priority=-1,
1668 priority=-1,
1661 )
1669 )
1662 coreconfigitem(
1670 coreconfigitem(
1663 b'merge-tools',
1671 b'merge-tools',
1664 br'.*\.premerge$',
1672 br'.*\.premerge$',
1665 default=dynamicdefault,
1673 default=dynamicdefault,
1666 generic=True,
1674 generic=True,
1667 priority=-1,
1675 priority=-1,
1668 )
1676 )
1669 coreconfigitem(
1677 coreconfigitem(
1670 b'merge-tools',
1678 b'merge-tools',
1671 br'.*\.symlink$',
1679 br'.*\.symlink$',
1672 default=False,
1680 default=False,
1673 generic=True,
1681 generic=True,
1674 priority=-1,
1682 priority=-1,
1675 )
1683 )
1676 coreconfigitem(
1684 coreconfigitem(
1677 b'pager',
1685 b'pager',
1678 b'attend-.*',
1686 b'attend-.*',
1679 default=dynamicdefault,
1687 default=dynamicdefault,
1680 generic=True,
1688 generic=True,
1681 )
1689 )
1682 coreconfigitem(
1690 coreconfigitem(
1683 b'pager',
1691 b'pager',
1684 b'ignore',
1692 b'ignore',
1685 default=list,
1693 default=list,
1686 )
1694 )
1687 coreconfigitem(
1695 coreconfigitem(
1688 b'pager',
1696 b'pager',
1689 b'pager',
1697 b'pager',
1690 default=dynamicdefault,
1698 default=dynamicdefault,
1691 )
1699 )
1692 coreconfigitem(
1700 coreconfigitem(
1693 b'patch',
1701 b'patch',
1694 b'eol',
1702 b'eol',
1695 default=b'strict',
1703 default=b'strict',
1696 )
1704 )
1697 coreconfigitem(
1705 coreconfigitem(
1698 b'patch',
1706 b'patch',
1699 b'fuzz',
1707 b'fuzz',
1700 default=2,
1708 default=2,
1701 )
1709 )
1702 coreconfigitem(
1710 coreconfigitem(
1703 b'paths',
1711 b'paths',
1704 b'default',
1712 b'default',
1705 default=None,
1713 default=None,
1706 )
1714 )
1707 coreconfigitem(
1715 coreconfigitem(
1708 b'paths',
1716 b'paths',
1709 b'default-push',
1717 b'default-push',
1710 default=None,
1718 default=None,
1711 )
1719 )
1712 coreconfigitem(
1720 coreconfigitem(
1713 b'paths',
1721 b'paths',
1714 b'.*',
1722 b'.*',
1715 default=None,
1723 default=None,
1716 generic=True,
1724 generic=True,
1717 )
1725 )
1718 coreconfigitem(
1726 coreconfigitem(
1719 b'phases',
1727 b'phases',
1720 b'checksubrepos',
1728 b'checksubrepos',
1721 default=b'follow',
1729 default=b'follow',
1722 )
1730 )
1723 coreconfigitem(
1731 coreconfigitem(
1724 b'phases',
1732 b'phases',
1725 b'new-commit',
1733 b'new-commit',
1726 default=b'draft',
1734 default=b'draft',
1727 )
1735 )
1728 coreconfigitem(
1736 coreconfigitem(
1729 b'phases',
1737 b'phases',
1730 b'publish',
1738 b'publish',
1731 default=True,
1739 default=True,
1732 )
1740 )
1733 coreconfigitem(
1741 coreconfigitem(
1734 b'profiling',
1742 b'profiling',
1735 b'enabled',
1743 b'enabled',
1736 default=False,
1744 default=False,
1737 )
1745 )
1738 coreconfigitem(
1746 coreconfigitem(
1739 b'profiling',
1747 b'profiling',
1740 b'format',
1748 b'format',
1741 default=b'text',
1749 default=b'text',
1742 )
1750 )
1743 coreconfigitem(
1751 coreconfigitem(
1744 b'profiling',
1752 b'profiling',
1745 b'freq',
1753 b'freq',
1746 default=1000,
1754 default=1000,
1747 )
1755 )
1748 coreconfigitem(
1756 coreconfigitem(
1749 b'profiling',
1757 b'profiling',
1750 b'limit',
1758 b'limit',
1751 default=30,
1759 default=30,
1752 )
1760 )
1753 coreconfigitem(
1761 coreconfigitem(
1754 b'profiling',
1762 b'profiling',
1755 b'nested',
1763 b'nested',
1756 default=0,
1764 default=0,
1757 )
1765 )
1758 coreconfigitem(
1766 coreconfigitem(
1759 b'profiling',
1767 b'profiling',
1760 b'output',
1768 b'output',
1761 default=None,
1769 default=None,
1762 )
1770 )
1763 coreconfigitem(
1771 coreconfigitem(
1764 b'profiling',
1772 b'profiling',
1765 b'showmax',
1773 b'showmax',
1766 default=0.999,
1774 default=0.999,
1767 )
1775 )
1768 coreconfigitem(
1776 coreconfigitem(
1769 b'profiling',
1777 b'profiling',
1770 b'showmin',
1778 b'showmin',
1771 default=dynamicdefault,
1779 default=dynamicdefault,
1772 )
1780 )
1773 coreconfigitem(
1781 coreconfigitem(
1774 b'profiling',
1782 b'profiling',
1775 b'showtime',
1783 b'showtime',
1776 default=True,
1784 default=True,
1777 )
1785 )
1778 coreconfigitem(
1786 coreconfigitem(
1779 b'profiling',
1787 b'profiling',
1780 b'sort',
1788 b'sort',
1781 default=b'inlinetime',
1789 default=b'inlinetime',
1782 )
1790 )
1783 coreconfigitem(
1791 coreconfigitem(
1784 b'profiling',
1792 b'profiling',
1785 b'statformat',
1793 b'statformat',
1786 default=b'hotpath',
1794 default=b'hotpath',
1787 )
1795 )
1788 coreconfigitem(
1796 coreconfigitem(
1789 b'profiling',
1797 b'profiling',
1790 b'time-track',
1798 b'time-track',
1791 default=dynamicdefault,
1799 default=dynamicdefault,
1792 )
1800 )
1793 coreconfigitem(
1801 coreconfigitem(
1794 b'profiling',
1802 b'profiling',
1795 b'type',
1803 b'type',
1796 default=b'stat',
1804 default=b'stat',
1797 )
1805 )
1798 coreconfigitem(
1806 coreconfigitem(
1799 b'progress',
1807 b'progress',
1800 b'assume-tty',
1808 b'assume-tty',
1801 default=False,
1809 default=False,
1802 )
1810 )
1803 coreconfigitem(
1811 coreconfigitem(
1804 b'progress',
1812 b'progress',
1805 b'changedelay',
1813 b'changedelay',
1806 default=1,
1814 default=1,
1807 )
1815 )
1808 coreconfigitem(
1816 coreconfigitem(
1809 b'progress',
1817 b'progress',
1810 b'clear-complete',
1818 b'clear-complete',
1811 default=True,
1819 default=True,
1812 )
1820 )
1813 coreconfigitem(
1821 coreconfigitem(
1814 b'progress',
1822 b'progress',
1815 b'debug',
1823 b'debug',
1816 default=False,
1824 default=False,
1817 )
1825 )
1818 coreconfigitem(
1826 coreconfigitem(
1819 b'progress',
1827 b'progress',
1820 b'delay',
1828 b'delay',
1821 default=3,
1829 default=3,
1822 )
1830 )
1823 coreconfigitem(
1831 coreconfigitem(
1824 b'progress',
1832 b'progress',
1825 b'disable',
1833 b'disable',
1826 default=False,
1834 default=False,
1827 )
1835 )
1828 coreconfigitem(
1836 coreconfigitem(
1829 b'progress',
1837 b'progress',
1830 b'estimateinterval',
1838 b'estimateinterval',
1831 default=60.0,
1839 default=60.0,
1832 )
1840 )
1833 coreconfigitem(
1841 coreconfigitem(
1834 b'progress',
1842 b'progress',
1835 b'format',
1843 b'format',
1836 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1844 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1837 )
1845 )
1838 coreconfigitem(
1846 coreconfigitem(
1839 b'progress',
1847 b'progress',
1840 b'refresh',
1848 b'refresh',
1841 default=0.1,
1849 default=0.1,
1842 )
1850 )
1843 coreconfigitem(
1851 coreconfigitem(
1844 b'progress',
1852 b'progress',
1845 b'width',
1853 b'width',
1846 default=dynamicdefault,
1854 default=dynamicdefault,
1847 )
1855 )
1848 coreconfigitem(
1856 coreconfigitem(
1849 b'pull',
1857 b'pull',
1850 b'confirm',
1858 b'confirm',
1851 default=False,
1859 default=False,
1852 )
1860 )
1853 coreconfigitem(
1861 coreconfigitem(
1854 b'push',
1862 b'push',
1855 b'pushvars.server',
1863 b'pushvars.server',
1856 default=False,
1864 default=False,
1857 )
1865 )
1858 coreconfigitem(
1866 coreconfigitem(
1859 b'rewrite',
1867 b'rewrite',
1860 b'backup-bundle',
1868 b'backup-bundle',
1861 default=True,
1869 default=True,
1862 alias=[(b'ui', b'history-editing-backup')],
1870 alias=[(b'ui', b'history-editing-backup')],
1863 )
1871 )
1864 coreconfigitem(
1872 coreconfigitem(
1865 b'rewrite',
1873 b'rewrite',
1866 b'update-timestamp',
1874 b'update-timestamp',
1867 default=False,
1875 default=False,
1868 )
1876 )
1869 coreconfigitem(
1877 coreconfigitem(
1870 b'rewrite',
1878 b'rewrite',
1871 b'empty-successor',
1879 b'empty-successor',
1872 default=b'skip',
1880 default=b'skip',
1873 experimental=True,
1881 experimental=True,
1874 )
1882 )
1875 coreconfigitem(
1883 coreconfigitem(
1876 b'storage',
1884 b'storage',
1877 b'new-repo-backend',
1885 b'new-repo-backend',
1878 default=b'revlogv1',
1886 default=b'revlogv1',
1879 experimental=True,
1887 experimental=True,
1880 )
1888 )
1881 coreconfigitem(
1889 coreconfigitem(
1882 b'storage',
1890 b'storage',
1883 b'revlog.optimize-delta-parent-choice',
1891 b'revlog.optimize-delta-parent-choice',
1884 default=True,
1892 default=True,
1885 alias=[(b'format', b'aggressivemergedeltas')],
1893 alias=[(b'format', b'aggressivemergedeltas')],
1886 )
1894 )
1887 # experimental as long as rust is experimental (or a C version is implemented)
1895 # experimental as long as rust is experimental (or a C version is implemented)
1888 coreconfigitem(
1896 coreconfigitem(
1889 b'storage',
1897 b'storage',
1890 b'revlog.persistent-nodemap.mmap',
1898 b'revlog.persistent-nodemap.mmap',
1891 default=True,
1899 default=True,
1892 )
1900 )
1893 # experimental as long as format.use-persistent-nodemap is.
1901 # experimental as long as format.use-persistent-nodemap is.
1894 coreconfigitem(
1902 coreconfigitem(
1895 b'storage',
1903 b'storage',
1896 b'revlog.persistent-nodemap.slow-path',
1904 b'revlog.persistent-nodemap.slow-path',
1897 default=b"abort",
1905 default=b"abort",
1898 )
1906 )
1899
1907
1900 coreconfigitem(
1908 coreconfigitem(
1901 b'storage',
1909 b'storage',
1902 b'revlog.reuse-external-delta',
1910 b'revlog.reuse-external-delta',
1903 default=True,
1911 default=True,
1904 )
1912 )
1905 coreconfigitem(
1913 coreconfigitem(
1906 b'storage',
1914 b'storage',
1907 b'revlog.reuse-external-delta-parent',
1915 b'revlog.reuse-external-delta-parent',
1908 default=None,
1916 default=None,
1909 )
1917 )
1910 coreconfigitem(
1918 coreconfigitem(
1911 b'storage',
1919 b'storage',
1912 b'revlog.zlib.level',
1920 b'revlog.zlib.level',
1913 default=None,
1921 default=None,
1914 )
1922 )
1915 coreconfigitem(
1923 coreconfigitem(
1916 b'storage',
1924 b'storage',
1917 b'revlog.zstd.level',
1925 b'revlog.zstd.level',
1918 default=None,
1926 default=None,
1919 )
1927 )
1920 coreconfigitem(
1928 coreconfigitem(
1921 b'server',
1929 b'server',
1922 b'bookmarks-pushkey-compat',
1930 b'bookmarks-pushkey-compat',
1923 default=True,
1931 default=True,
1924 )
1932 )
1925 coreconfigitem(
1933 coreconfigitem(
1926 b'server',
1934 b'server',
1927 b'bundle1',
1935 b'bundle1',
1928 default=True,
1936 default=True,
1929 )
1937 )
1930 coreconfigitem(
1938 coreconfigitem(
1931 b'server',
1939 b'server',
1932 b'bundle1gd',
1940 b'bundle1gd',
1933 default=None,
1941 default=None,
1934 )
1942 )
1935 coreconfigitem(
1943 coreconfigitem(
1936 b'server',
1944 b'server',
1937 b'bundle1.pull',
1945 b'bundle1.pull',
1938 default=None,
1946 default=None,
1939 )
1947 )
1940 coreconfigitem(
1948 coreconfigitem(
1941 b'server',
1949 b'server',
1942 b'bundle1gd.pull',
1950 b'bundle1gd.pull',
1943 default=None,
1951 default=None,
1944 )
1952 )
1945 coreconfigitem(
1953 coreconfigitem(
1946 b'server',
1954 b'server',
1947 b'bundle1.push',
1955 b'bundle1.push',
1948 default=None,
1956 default=None,
1949 )
1957 )
1950 coreconfigitem(
1958 coreconfigitem(
1951 b'server',
1959 b'server',
1952 b'bundle1gd.push',
1960 b'bundle1gd.push',
1953 default=None,
1961 default=None,
1954 )
1962 )
1955 coreconfigitem(
1963 coreconfigitem(
1956 b'server',
1964 b'server',
1957 b'bundle2.stream',
1965 b'bundle2.stream',
1958 default=True,
1966 default=True,
1959 alias=[(b'experimental', b'bundle2.stream')],
1967 alias=[(b'experimental', b'bundle2.stream')],
1960 )
1968 )
1961 coreconfigitem(
1969 coreconfigitem(
1962 b'server',
1970 b'server',
1963 b'compressionengines',
1971 b'compressionengines',
1964 default=list,
1972 default=list,
1965 )
1973 )
1966 coreconfigitem(
1974 coreconfigitem(
1967 b'server',
1975 b'server',
1968 b'concurrent-push-mode',
1976 b'concurrent-push-mode',
1969 default=b'check-related',
1977 default=b'check-related',
1970 )
1978 )
1971 coreconfigitem(
1979 coreconfigitem(
1972 b'server',
1980 b'server',
1973 b'disablefullbundle',
1981 b'disablefullbundle',
1974 default=False,
1982 default=False,
1975 )
1983 )
1976 coreconfigitem(
1984 coreconfigitem(
1977 b'server',
1985 b'server',
1978 b'maxhttpheaderlen',
1986 b'maxhttpheaderlen',
1979 default=1024,
1987 default=1024,
1980 )
1988 )
1981 coreconfigitem(
1989 coreconfigitem(
1982 b'server',
1990 b'server',
1983 b'pullbundle',
1991 b'pullbundle',
1984 default=False,
1992 default=False,
1985 )
1993 )
1986 coreconfigitem(
1994 coreconfigitem(
1987 b'server',
1995 b'server',
1988 b'preferuncompressed',
1996 b'preferuncompressed',
1989 default=False,
1997 default=False,
1990 )
1998 )
1991 coreconfigitem(
1999 coreconfigitem(
1992 b'server',
2000 b'server',
1993 b'streamunbundle',
2001 b'streamunbundle',
1994 default=False,
2002 default=False,
1995 )
2003 )
1996 coreconfigitem(
2004 coreconfigitem(
1997 b'server',
2005 b'server',
1998 b'uncompressed',
2006 b'uncompressed',
1999 default=True,
2007 default=True,
2000 )
2008 )
2001 coreconfigitem(
2009 coreconfigitem(
2002 b'server',
2010 b'server',
2003 b'uncompressedallowsecret',
2011 b'uncompressedallowsecret',
2004 default=False,
2012 default=False,
2005 )
2013 )
2006 coreconfigitem(
2014 coreconfigitem(
2007 b'server',
2015 b'server',
2008 b'view',
2016 b'view',
2009 default=b'served',
2017 default=b'served',
2010 )
2018 )
2011 coreconfigitem(
2019 coreconfigitem(
2012 b'server',
2020 b'server',
2013 b'validate',
2021 b'validate',
2014 default=False,
2022 default=False,
2015 )
2023 )
2016 coreconfigitem(
2024 coreconfigitem(
2017 b'server',
2025 b'server',
2018 b'zliblevel',
2026 b'zliblevel',
2019 default=-1,
2027 default=-1,
2020 )
2028 )
2021 coreconfigitem(
2029 coreconfigitem(
2022 b'server',
2030 b'server',
2023 b'zstdlevel',
2031 b'zstdlevel',
2024 default=3,
2032 default=3,
2025 )
2033 )
2026 coreconfigitem(
2034 coreconfigitem(
2027 b'share',
2035 b'share',
2028 b'pool',
2036 b'pool',
2029 default=None,
2037 default=None,
2030 )
2038 )
2031 coreconfigitem(
2039 coreconfigitem(
2032 b'share',
2040 b'share',
2033 b'poolnaming',
2041 b'poolnaming',
2034 default=b'identity',
2042 default=b'identity',
2035 )
2043 )
2036 coreconfigitem(
2044 coreconfigitem(
2037 b'share',
2045 b'share',
2038 b'safe-mismatch.source-not-safe',
2046 b'safe-mismatch.source-not-safe',
2039 default=b'abort',
2047 default=b'abort',
2040 )
2048 )
2041 coreconfigitem(
2049 coreconfigitem(
2042 b'share',
2050 b'share',
2043 b'safe-mismatch.source-safe',
2051 b'safe-mismatch.source-safe',
2044 default=b'abort',
2052 default=b'abort',
2045 )
2053 )
2046 coreconfigitem(
2054 coreconfigitem(
2047 b'share',
2055 b'share',
2048 b'safe-mismatch.source-not-safe.warn',
2056 b'safe-mismatch.source-not-safe.warn',
2049 default=True,
2057 default=True,
2050 )
2058 )
2051 coreconfigitem(
2059 coreconfigitem(
2052 b'share',
2060 b'share',
2053 b'safe-mismatch.source-safe.warn',
2061 b'safe-mismatch.source-safe.warn',
2054 default=True,
2062 default=True,
2055 )
2063 )
2056 coreconfigitem(
2064 coreconfigitem(
2057 b'shelve',
2065 b'shelve',
2058 b'maxbackups',
2066 b'maxbackups',
2059 default=10,
2067 default=10,
2060 )
2068 )
2061 coreconfigitem(
2069 coreconfigitem(
2062 b'smtp',
2070 b'smtp',
2063 b'host',
2071 b'host',
2064 default=None,
2072 default=None,
2065 )
2073 )
2066 coreconfigitem(
2074 coreconfigitem(
2067 b'smtp',
2075 b'smtp',
2068 b'local_hostname',
2076 b'local_hostname',
2069 default=None,
2077 default=None,
2070 )
2078 )
2071 coreconfigitem(
2079 coreconfigitem(
2072 b'smtp',
2080 b'smtp',
2073 b'password',
2081 b'password',
2074 default=None,
2082 default=None,
2075 )
2083 )
2076 coreconfigitem(
2084 coreconfigitem(
2077 b'smtp',
2085 b'smtp',
2078 b'port',
2086 b'port',
2079 default=dynamicdefault,
2087 default=dynamicdefault,
2080 )
2088 )
2081 coreconfigitem(
2089 coreconfigitem(
2082 b'smtp',
2090 b'smtp',
2083 b'tls',
2091 b'tls',
2084 default=b'none',
2092 default=b'none',
2085 )
2093 )
2086 coreconfigitem(
2094 coreconfigitem(
2087 b'smtp',
2095 b'smtp',
2088 b'username',
2096 b'username',
2089 default=None,
2097 default=None,
2090 )
2098 )
2091 coreconfigitem(
2099 coreconfigitem(
2092 b'sparse',
2100 b'sparse',
2093 b'missingwarning',
2101 b'missingwarning',
2094 default=True,
2102 default=True,
2095 experimental=True,
2103 experimental=True,
2096 )
2104 )
2097 coreconfigitem(
2105 coreconfigitem(
2098 b'subrepos',
2106 b'subrepos',
2099 b'allowed',
2107 b'allowed',
2100 default=dynamicdefault, # to make backporting simpler
2108 default=dynamicdefault, # to make backporting simpler
2101 )
2109 )
2102 coreconfigitem(
2110 coreconfigitem(
2103 b'subrepos',
2111 b'subrepos',
2104 b'hg:allowed',
2112 b'hg:allowed',
2105 default=dynamicdefault,
2113 default=dynamicdefault,
2106 )
2114 )
2107 coreconfigitem(
2115 coreconfigitem(
2108 b'subrepos',
2116 b'subrepos',
2109 b'git:allowed',
2117 b'git:allowed',
2110 default=dynamicdefault,
2118 default=dynamicdefault,
2111 )
2119 )
2112 coreconfigitem(
2120 coreconfigitem(
2113 b'subrepos',
2121 b'subrepos',
2114 b'svn:allowed',
2122 b'svn:allowed',
2115 default=dynamicdefault,
2123 default=dynamicdefault,
2116 )
2124 )
2117 coreconfigitem(
2125 coreconfigitem(
2118 b'templates',
2126 b'templates',
2119 b'.*',
2127 b'.*',
2120 default=None,
2128 default=None,
2121 generic=True,
2129 generic=True,
2122 )
2130 )
2123 coreconfigitem(
2131 coreconfigitem(
2124 b'templateconfig',
2132 b'templateconfig',
2125 b'.*',
2133 b'.*',
2126 default=dynamicdefault,
2134 default=dynamicdefault,
2127 generic=True,
2135 generic=True,
2128 )
2136 )
2129 coreconfigitem(
2137 coreconfigitem(
2130 b'trusted',
2138 b'trusted',
2131 b'groups',
2139 b'groups',
2132 default=list,
2140 default=list,
2133 )
2141 )
2134 coreconfigitem(
2142 coreconfigitem(
2135 b'trusted',
2143 b'trusted',
2136 b'users',
2144 b'users',
2137 default=list,
2145 default=list,
2138 )
2146 )
2139 coreconfigitem(
2147 coreconfigitem(
2140 b'ui',
2148 b'ui',
2141 b'_usedassubrepo',
2149 b'_usedassubrepo',
2142 default=False,
2150 default=False,
2143 )
2151 )
2144 coreconfigitem(
2152 coreconfigitem(
2145 b'ui',
2153 b'ui',
2146 b'allowemptycommit',
2154 b'allowemptycommit',
2147 default=False,
2155 default=False,
2148 )
2156 )
2149 coreconfigitem(
2157 coreconfigitem(
2150 b'ui',
2158 b'ui',
2151 b'archivemeta',
2159 b'archivemeta',
2152 default=True,
2160 default=True,
2153 )
2161 )
2154 coreconfigitem(
2162 coreconfigitem(
2155 b'ui',
2163 b'ui',
2156 b'askusername',
2164 b'askusername',
2157 default=False,
2165 default=False,
2158 )
2166 )
2159 coreconfigitem(
2167 coreconfigitem(
2160 b'ui',
2168 b'ui',
2161 b'available-memory',
2169 b'available-memory',
2162 default=None,
2170 default=None,
2163 )
2171 )
2164
2172
2165 coreconfigitem(
2173 coreconfigitem(
2166 b'ui',
2174 b'ui',
2167 b'clonebundlefallback',
2175 b'clonebundlefallback',
2168 default=False,
2176 default=False,
2169 )
2177 )
2170 coreconfigitem(
2178 coreconfigitem(
2171 b'ui',
2179 b'ui',
2172 b'clonebundleprefers',
2180 b'clonebundleprefers',
2173 default=list,
2181 default=list,
2174 )
2182 )
2175 coreconfigitem(
2183 coreconfigitem(
2176 b'ui',
2184 b'ui',
2177 b'clonebundles',
2185 b'clonebundles',
2178 default=True,
2186 default=True,
2179 )
2187 )
2180 coreconfigitem(
2188 coreconfigitem(
2181 b'ui',
2189 b'ui',
2182 b'color',
2190 b'color',
2183 default=b'auto',
2191 default=b'auto',
2184 )
2192 )
2185 coreconfigitem(
2193 coreconfigitem(
2186 b'ui',
2194 b'ui',
2187 b'commitsubrepos',
2195 b'commitsubrepos',
2188 default=False,
2196 default=False,
2189 )
2197 )
2190 coreconfigitem(
2198 coreconfigitem(
2191 b'ui',
2199 b'ui',
2192 b'debug',
2200 b'debug',
2193 default=False,
2201 default=False,
2194 )
2202 )
2195 coreconfigitem(
2203 coreconfigitem(
2196 b'ui',
2204 b'ui',
2197 b'debugger',
2205 b'debugger',
2198 default=None,
2206 default=None,
2199 )
2207 )
2200 coreconfigitem(
2208 coreconfigitem(
2201 b'ui',
2209 b'ui',
2202 b'editor',
2210 b'editor',
2203 default=dynamicdefault,
2211 default=dynamicdefault,
2204 )
2212 )
2205 coreconfigitem(
2213 coreconfigitem(
2206 b'ui',
2214 b'ui',
2207 b'detailed-exit-code',
2215 b'detailed-exit-code',
2208 default=False,
2216 default=False,
2209 experimental=True,
2217 experimental=True,
2210 )
2218 )
2211 coreconfigitem(
2219 coreconfigitem(
2212 b'ui',
2220 b'ui',
2213 b'fallbackencoding',
2221 b'fallbackencoding',
2214 default=None,
2222 default=None,
2215 )
2223 )
2216 coreconfigitem(
2224 coreconfigitem(
2217 b'ui',
2225 b'ui',
2218 b'forcecwd',
2226 b'forcecwd',
2219 default=None,
2227 default=None,
2220 )
2228 )
2221 coreconfigitem(
2229 coreconfigitem(
2222 b'ui',
2230 b'ui',
2223 b'forcemerge',
2231 b'forcemerge',
2224 default=None,
2232 default=None,
2225 )
2233 )
2226 coreconfigitem(
2234 coreconfigitem(
2227 b'ui',
2235 b'ui',
2228 b'formatdebug',
2236 b'formatdebug',
2229 default=False,
2237 default=False,
2230 )
2238 )
2231 coreconfigitem(
2239 coreconfigitem(
2232 b'ui',
2240 b'ui',
2233 b'formatjson',
2241 b'formatjson',
2234 default=False,
2242 default=False,
2235 )
2243 )
2236 coreconfigitem(
2244 coreconfigitem(
2237 b'ui',
2245 b'ui',
2238 b'formatted',
2246 b'formatted',
2239 default=None,
2247 default=None,
2240 )
2248 )
2241 coreconfigitem(
2249 coreconfigitem(
2242 b'ui',
2250 b'ui',
2243 b'interactive',
2251 b'interactive',
2244 default=None,
2252 default=None,
2245 )
2253 )
2246 coreconfigitem(
2254 coreconfigitem(
2247 b'ui',
2255 b'ui',
2248 b'interface',
2256 b'interface',
2249 default=None,
2257 default=None,
2250 )
2258 )
2251 coreconfigitem(
2259 coreconfigitem(
2252 b'ui',
2260 b'ui',
2253 b'interface.chunkselector',
2261 b'interface.chunkselector',
2254 default=None,
2262 default=None,
2255 )
2263 )
2256 coreconfigitem(
2264 coreconfigitem(
2257 b'ui',
2265 b'ui',
2258 b'large-file-limit',
2266 b'large-file-limit',
2259 default=10000000,
2267 default=10000000,
2260 )
2268 )
2261 coreconfigitem(
2269 coreconfigitem(
2262 b'ui',
2270 b'ui',
2263 b'logblockedtimes',
2271 b'logblockedtimes',
2264 default=False,
2272 default=False,
2265 )
2273 )
2266 coreconfigitem(
2274 coreconfigitem(
2267 b'ui',
2275 b'ui',
2268 b'merge',
2276 b'merge',
2269 default=None,
2277 default=None,
2270 )
2278 )
2271 coreconfigitem(
2279 coreconfigitem(
2272 b'ui',
2280 b'ui',
2273 b'mergemarkers',
2281 b'mergemarkers',
2274 default=b'basic',
2282 default=b'basic',
2275 )
2283 )
2276 coreconfigitem(
2284 coreconfigitem(
2277 b'ui',
2285 b'ui',
2278 b'message-output',
2286 b'message-output',
2279 default=b'stdio',
2287 default=b'stdio',
2280 )
2288 )
2281 coreconfigitem(
2289 coreconfigitem(
2282 b'ui',
2290 b'ui',
2283 b'nontty',
2291 b'nontty',
2284 default=False,
2292 default=False,
2285 )
2293 )
2286 coreconfigitem(
2294 coreconfigitem(
2287 b'ui',
2295 b'ui',
2288 b'origbackuppath',
2296 b'origbackuppath',
2289 default=None,
2297 default=None,
2290 )
2298 )
2291 coreconfigitem(
2299 coreconfigitem(
2292 b'ui',
2300 b'ui',
2293 b'paginate',
2301 b'paginate',
2294 default=True,
2302 default=True,
2295 )
2303 )
2296 coreconfigitem(
2304 coreconfigitem(
2297 b'ui',
2305 b'ui',
2298 b'patch',
2306 b'patch',
2299 default=None,
2307 default=None,
2300 )
2308 )
2301 coreconfigitem(
2309 coreconfigitem(
2302 b'ui',
2310 b'ui',
2303 b'portablefilenames',
2311 b'portablefilenames',
2304 default=b'warn',
2312 default=b'warn',
2305 )
2313 )
2306 coreconfigitem(
2314 coreconfigitem(
2307 b'ui',
2315 b'ui',
2308 b'promptecho',
2316 b'promptecho',
2309 default=False,
2317 default=False,
2310 )
2318 )
2311 coreconfigitem(
2319 coreconfigitem(
2312 b'ui',
2320 b'ui',
2313 b'quiet',
2321 b'quiet',
2314 default=False,
2322 default=False,
2315 )
2323 )
2316 coreconfigitem(
2324 coreconfigitem(
2317 b'ui',
2325 b'ui',
2318 b'quietbookmarkmove',
2326 b'quietbookmarkmove',
2319 default=False,
2327 default=False,
2320 )
2328 )
2321 coreconfigitem(
2329 coreconfigitem(
2322 b'ui',
2330 b'ui',
2323 b'relative-paths',
2331 b'relative-paths',
2324 default=b'legacy',
2332 default=b'legacy',
2325 )
2333 )
2326 coreconfigitem(
2334 coreconfigitem(
2327 b'ui',
2335 b'ui',
2328 b'remotecmd',
2336 b'remotecmd',
2329 default=b'hg',
2337 default=b'hg',
2330 )
2338 )
2331 coreconfigitem(
2339 coreconfigitem(
2332 b'ui',
2340 b'ui',
2333 b'report_untrusted',
2341 b'report_untrusted',
2334 default=True,
2342 default=True,
2335 )
2343 )
2336 coreconfigitem(
2344 coreconfigitem(
2337 b'ui',
2345 b'ui',
2338 b'rollback',
2346 b'rollback',
2339 default=True,
2347 default=True,
2340 )
2348 )
2341 coreconfigitem(
2349 coreconfigitem(
2342 b'ui',
2350 b'ui',
2343 b'signal-safe-lock',
2351 b'signal-safe-lock',
2344 default=True,
2352 default=True,
2345 )
2353 )
2346 coreconfigitem(
2354 coreconfigitem(
2347 b'ui',
2355 b'ui',
2348 b'slash',
2356 b'slash',
2349 default=False,
2357 default=False,
2350 )
2358 )
2351 coreconfigitem(
2359 coreconfigitem(
2352 b'ui',
2360 b'ui',
2353 b'ssh',
2361 b'ssh',
2354 default=b'ssh',
2362 default=b'ssh',
2355 )
2363 )
2356 coreconfigitem(
2364 coreconfigitem(
2357 b'ui',
2365 b'ui',
2358 b'ssherrorhint',
2366 b'ssherrorhint',
2359 default=None,
2367 default=None,
2360 )
2368 )
2361 coreconfigitem(
2369 coreconfigitem(
2362 b'ui',
2370 b'ui',
2363 b'statuscopies',
2371 b'statuscopies',
2364 default=False,
2372 default=False,
2365 )
2373 )
2366 coreconfigitem(
2374 coreconfigitem(
2367 b'ui',
2375 b'ui',
2368 b'strict',
2376 b'strict',
2369 default=False,
2377 default=False,
2370 )
2378 )
2371 coreconfigitem(
2379 coreconfigitem(
2372 b'ui',
2380 b'ui',
2373 b'style',
2381 b'style',
2374 default=b'',
2382 default=b'',
2375 )
2383 )
2376 coreconfigitem(
2384 coreconfigitem(
2377 b'ui',
2385 b'ui',
2378 b'supportcontact',
2386 b'supportcontact',
2379 default=None,
2387 default=None,
2380 )
2388 )
2381 coreconfigitem(
2389 coreconfigitem(
2382 b'ui',
2390 b'ui',
2383 b'textwidth',
2391 b'textwidth',
2384 default=78,
2392 default=78,
2385 )
2393 )
2386 coreconfigitem(
2394 coreconfigitem(
2387 b'ui',
2395 b'ui',
2388 b'timeout',
2396 b'timeout',
2389 default=b'600',
2397 default=b'600',
2390 )
2398 )
2391 coreconfigitem(
2399 coreconfigitem(
2392 b'ui',
2400 b'ui',
2393 b'timeout.warn',
2401 b'timeout.warn',
2394 default=0,
2402 default=0,
2395 )
2403 )
2396 coreconfigitem(
2404 coreconfigitem(
2397 b'ui',
2405 b'ui',
2398 b'timestamp-output',
2406 b'timestamp-output',
2399 default=False,
2407 default=False,
2400 )
2408 )
2401 coreconfigitem(
2409 coreconfigitem(
2402 b'ui',
2410 b'ui',
2403 b'traceback',
2411 b'traceback',
2404 default=False,
2412 default=False,
2405 )
2413 )
2406 coreconfigitem(
2414 coreconfigitem(
2407 b'ui',
2415 b'ui',
2408 b'tweakdefaults',
2416 b'tweakdefaults',
2409 default=False,
2417 default=False,
2410 )
2418 )
2411 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2419 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2412 coreconfigitem(
2420 coreconfigitem(
2413 b'ui',
2421 b'ui',
2414 b'verbose',
2422 b'verbose',
2415 default=False,
2423 default=False,
2416 )
2424 )
2417 coreconfigitem(
2425 coreconfigitem(
2418 b'verify',
2426 b'verify',
2419 b'skipflags',
2427 b'skipflags',
2420 default=None,
2428 default=None,
2421 )
2429 )
2422 coreconfigitem(
2430 coreconfigitem(
2423 b'web',
2431 b'web',
2424 b'allowbz2',
2432 b'allowbz2',
2425 default=False,
2433 default=False,
2426 )
2434 )
2427 coreconfigitem(
2435 coreconfigitem(
2428 b'web',
2436 b'web',
2429 b'allowgz',
2437 b'allowgz',
2430 default=False,
2438 default=False,
2431 )
2439 )
2432 coreconfigitem(
2440 coreconfigitem(
2433 b'web',
2441 b'web',
2434 b'allow-pull',
2442 b'allow-pull',
2435 alias=[(b'web', b'allowpull')],
2443 alias=[(b'web', b'allowpull')],
2436 default=True,
2444 default=True,
2437 )
2445 )
2438 coreconfigitem(
2446 coreconfigitem(
2439 b'web',
2447 b'web',
2440 b'allow-push',
2448 b'allow-push',
2441 alias=[(b'web', b'allow_push')],
2449 alias=[(b'web', b'allow_push')],
2442 default=list,
2450 default=list,
2443 )
2451 )
2444 coreconfigitem(
2452 coreconfigitem(
2445 b'web',
2453 b'web',
2446 b'allowzip',
2454 b'allowzip',
2447 default=False,
2455 default=False,
2448 )
2456 )
2449 coreconfigitem(
2457 coreconfigitem(
2450 b'web',
2458 b'web',
2451 b'archivesubrepos',
2459 b'archivesubrepos',
2452 default=False,
2460 default=False,
2453 )
2461 )
2454 coreconfigitem(
2462 coreconfigitem(
2455 b'web',
2463 b'web',
2456 b'cache',
2464 b'cache',
2457 default=True,
2465 default=True,
2458 )
2466 )
2459 coreconfigitem(
2467 coreconfigitem(
2460 b'web',
2468 b'web',
2461 b'comparisoncontext',
2469 b'comparisoncontext',
2462 default=5,
2470 default=5,
2463 )
2471 )
2464 coreconfigitem(
2472 coreconfigitem(
2465 b'web',
2473 b'web',
2466 b'contact',
2474 b'contact',
2467 default=None,
2475 default=None,
2468 )
2476 )
2469 coreconfigitem(
2477 coreconfigitem(
2470 b'web',
2478 b'web',
2471 b'deny_push',
2479 b'deny_push',
2472 default=list,
2480 default=list,
2473 )
2481 )
2474 coreconfigitem(
2482 coreconfigitem(
2475 b'web',
2483 b'web',
2476 b'guessmime',
2484 b'guessmime',
2477 default=False,
2485 default=False,
2478 )
2486 )
2479 coreconfigitem(
2487 coreconfigitem(
2480 b'web',
2488 b'web',
2481 b'hidden',
2489 b'hidden',
2482 default=False,
2490 default=False,
2483 )
2491 )
2484 coreconfigitem(
2492 coreconfigitem(
2485 b'web',
2493 b'web',
2486 b'labels',
2494 b'labels',
2487 default=list,
2495 default=list,
2488 )
2496 )
2489 coreconfigitem(
2497 coreconfigitem(
2490 b'web',
2498 b'web',
2491 b'logoimg',
2499 b'logoimg',
2492 default=b'hglogo.png',
2500 default=b'hglogo.png',
2493 )
2501 )
2494 coreconfigitem(
2502 coreconfigitem(
2495 b'web',
2503 b'web',
2496 b'logourl',
2504 b'logourl',
2497 default=b'https://mercurial-scm.org/',
2505 default=b'https://mercurial-scm.org/',
2498 )
2506 )
2499 coreconfigitem(
2507 coreconfigitem(
2500 b'web',
2508 b'web',
2501 b'accesslog',
2509 b'accesslog',
2502 default=b'-',
2510 default=b'-',
2503 )
2511 )
2504 coreconfigitem(
2512 coreconfigitem(
2505 b'web',
2513 b'web',
2506 b'address',
2514 b'address',
2507 default=b'',
2515 default=b'',
2508 )
2516 )
2509 coreconfigitem(
2517 coreconfigitem(
2510 b'web',
2518 b'web',
2511 b'allow-archive',
2519 b'allow-archive',
2512 alias=[(b'web', b'allow_archive')],
2520 alias=[(b'web', b'allow_archive')],
2513 default=list,
2521 default=list,
2514 )
2522 )
2515 coreconfigitem(
2523 coreconfigitem(
2516 b'web',
2524 b'web',
2517 b'allow_read',
2525 b'allow_read',
2518 default=list,
2526 default=list,
2519 )
2527 )
2520 coreconfigitem(
2528 coreconfigitem(
2521 b'web',
2529 b'web',
2522 b'baseurl',
2530 b'baseurl',
2523 default=None,
2531 default=None,
2524 )
2532 )
2525 coreconfigitem(
2533 coreconfigitem(
2526 b'web',
2534 b'web',
2527 b'cacerts',
2535 b'cacerts',
2528 default=None,
2536 default=None,
2529 )
2537 )
2530 coreconfigitem(
2538 coreconfigitem(
2531 b'web',
2539 b'web',
2532 b'certificate',
2540 b'certificate',
2533 default=None,
2541 default=None,
2534 )
2542 )
2535 coreconfigitem(
2543 coreconfigitem(
2536 b'web',
2544 b'web',
2537 b'collapse',
2545 b'collapse',
2538 default=False,
2546 default=False,
2539 )
2547 )
2540 coreconfigitem(
2548 coreconfigitem(
2541 b'web',
2549 b'web',
2542 b'csp',
2550 b'csp',
2543 default=None,
2551 default=None,
2544 )
2552 )
2545 coreconfigitem(
2553 coreconfigitem(
2546 b'web',
2554 b'web',
2547 b'deny_read',
2555 b'deny_read',
2548 default=list,
2556 default=list,
2549 )
2557 )
2550 coreconfigitem(
2558 coreconfigitem(
2551 b'web',
2559 b'web',
2552 b'descend',
2560 b'descend',
2553 default=True,
2561 default=True,
2554 )
2562 )
2555 coreconfigitem(
2563 coreconfigitem(
2556 b'web',
2564 b'web',
2557 b'description',
2565 b'description',
2558 default=b"",
2566 default=b"",
2559 )
2567 )
2560 coreconfigitem(
2568 coreconfigitem(
2561 b'web',
2569 b'web',
2562 b'encoding',
2570 b'encoding',
2563 default=lambda: encoding.encoding,
2571 default=lambda: encoding.encoding,
2564 )
2572 )
2565 coreconfigitem(
2573 coreconfigitem(
2566 b'web',
2574 b'web',
2567 b'errorlog',
2575 b'errorlog',
2568 default=b'-',
2576 default=b'-',
2569 )
2577 )
2570 coreconfigitem(
2578 coreconfigitem(
2571 b'web',
2579 b'web',
2572 b'ipv6',
2580 b'ipv6',
2573 default=False,
2581 default=False,
2574 )
2582 )
2575 coreconfigitem(
2583 coreconfigitem(
2576 b'web',
2584 b'web',
2577 b'maxchanges',
2585 b'maxchanges',
2578 default=10,
2586 default=10,
2579 )
2587 )
2580 coreconfigitem(
2588 coreconfigitem(
2581 b'web',
2589 b'web',
2582 b'maxfiles',
2590 b'maxfiles',
2583 default=10,
2591 default=10,
2584 )
2592 )
2585 coreconfigitem(
2593 coreconfigitem(
2586 b'web',
2594 b'web',
2587 b'maxshortchanges',
2595 b'maxshortchanges',
2588 default=60,
2596 default=60,
2589 )
2597 )
2590 coreconfigitem(
2598 coreconfigitem(
2591 b'web',
2599 b'web',
2592 b'motd',
2600 b'motd',
2593 default=b'',
2601 default=b'',
2594 )
2602 )
2595 coreconfigitem(
2603 coreconfigitem(
2596 b'web',
2604 b'web',
2597 b'name',
2605 b'name',
2598 default=dynamicdefault,
2606 default=dynamicdefault,
2599 )
2607 )
2600 coreconfigitem(
2608 coreconfigitem(
2601 b'web',
2609 b'web',
2602 b'port',
2610 b'port',
2603 default=8000,
2611 default=8000,
2604 )
2612 )
2605 coreconfigitem(
2613 coreconfigitem(
2606 b'web',
2614 b'web',
2607 b'prefix',
2615 b'prefix',
2608 default=b'',
2616 default=b'',
2609 )
2617 )
2610 coreconfigitem(
2618 coreconfigitem(
2611 b'web',
2619 b'web',
2612 b'push_ssl',
2620 b'push_ssl',
2613 default=True,
2621 default=True,
2614 )
2622 )
2615 coreconfigitem(
2623 coreconfigitem(
2616 b'web',
2624 b'web',
2617 b'refreshinterval',
2625 b'refreshinterval',
2618 default=20,
2626 default=20,
2619 )
2627 )
2620 coreconfigitem(
2628 coreconfigitem(
2621 b'web',
2629 b'web',
2622 b'server-header',
2630 b'server-header',
2623 default=None,
2631 default=None,
2624 )
2632 )
2625 coreconfigitem(
2633 coreconfigitem(
2626 b'web',
2634 b'web',
2627 b'static',
2635 b'static',
2628 default=None,
2636 default=None,
2629 )
2637 )
2630 coreconfigitem(
2638 coreconfigitem(
2631 b'web',
2639 b'web',
2632 b'staticurl',
2640 b'staticurl',
2633 default=None,
2641 default=None,
2634 )
2642 )
2635 coreconfigitem(
2643 coreconfigitem(
2636 b'web',
2644 b'web',
2637 b'stripes',
2645 b'stripes',
2638 default=1,
2646 default=1,
2639 )
2647 )
2640 coreconfigitem(
2648 coreconfigitem(
2641 b'web',
2649 b'web',
2642 b'style',
2650 b'style',
2643 default=b'paper',
2651 default=b'paper',
2644 )
2652 )
2645 coreconfigitem(
2653 coreconfigitem(
2646 b'web',
2654 b'web',
2647 b'templates',
2655 b'templates',
2648 default=None,
2656 default=None,
2649 )
2657 )
2650 coreconfigitem(
2658 coreconfigitem(
2651 b'web',
2659 b'web',
2652 b'view',
2660 b'view',
2653 default=b'served',
2661 default=b'served',
2654 experimental=True,
2662 experimental=True,
2655 )
2663 )
2656 coreconfigitem(
2664 coreconfigitem(
2657 b'worker',
2665 b'worker',
2658 b'backgroundclose',
2666 b'backgroundclose',
2659 default=dynamicdefault,
2667 default=dynamicdefault,
2660 )
2668 )
2661 # Windows defaults to a limit of 512 open files. A buffer of 128
2669 # Windows defaults to a limit of 512 open files. A buffer of 128
2662 # should give us enough headway.
2670 # should give us enough headway.
2663 coreconfigitem(
2671 coreconfigitem(
2664 b'worker',
2672 b'worker',
2665 b'backgroundclosemaxqueue',
2673 b'backgroundclosemaxqueue',
2666 default=384,
2674 default=384,
2667 )
2675 )
2668 coreconfigitem(
2676 coreconfigitem(
2669 b'worker',
2677 b'worker',
2670 b'backgroundcloseminfilecount',
2678 b'backgroundcloseminfilecount',
2671 default=2048,
2679 default=2048,
2672 )
2680 )
2673 coreconfigitem(
2681 coreconfigitem(
2674 b'worker',
2682 b'worker',
2675 b'backgroundclosethreadcount',
2683 b'backgroundclosethreadcount',
2676 default=4,
2684 default=4,
2677 )
2685 )
2678 coreconfigitem(
2686 coreconfigitem(
2679 b'worker',
2687 b'worker',
2680 b'enabled',
2688 b'enabled',
2681 default=True,
2689 default=True,
2682 )
2690 )
2683 coreconfigitem(
2691 coreconfigitem(
2684 b'worker',
2692 b'worker',
2685 b'numcpus',
2693 b'numcpus',
2686 default=None,
2694 default=None,
2687 )
2695 )
2688
2696
2689 # Rebase related configuration moved to core because other extension are doing
2697 # Rebase related configuration moved to core because other extension are doing
2690 # strange things. For example, shelve import the extensions to reuse some bit
2698 # strange things. For example, shelve import the extensions to reuse some bit
2691 # without formally loading it.
2699 # without formally loading it.
2692 coreconfigitem(
2700 coreconfigitem(
2693 b'commands',
2701 b'commands',
2694 b'rebase.requiredest',
2702 b'rebase.requiredest',
2695 default=False,
2703 default=False,
2696 )
2704 )
2697 coreconfigitem(
2705 coreconfigitem(
2698 b'experimental',
2706 b'experimental',
2699 b'rebaseskipobsolete',
2707 b'rebaseskipobsolete',
2700 default=True,
2708 default=True,
2701 )
2709 )
2702 coreconfigitem(
2710 coreconfigitem(
2703 b'rebase',
2711 b'rebase',
2704 b'singletransaction',
2712 b'singletransaction',
2705 default=False,
2713 default=False,
2706 )
2714 )
2707 coreconfigitem(
2715 coreconfigitem(
2708 b'rebase',
2716 b'rebase',
2709 b'experimental.inmemory',
2717 b'experimental.inmemory',
2710 default=False,
2718 default=False,
2711 )
2719 )
@@ -1,1952 +1,1954 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 pathutil,
25 pathutil,
26 policy,
26 policy,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 txnutil,
30 txnutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
42 propertycache = util.propertycache
44 propertycache = util.propertycache
43 filecache = scmutil.filecache
45 filecache = scmutil.filecache
44 _rangemask = 0x7FFFFFFF
46 _rangemask = 0x7FFFFFFF
45
47
46 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
47
49
48
50
49 class repocache(filecache):
51 class repocache(filecache):
50 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
51
53
52 def join(self, obj, fname):
54 def join(self, obj, fname):
53 return obj._opener.join(fname)
55 return obj._opener.join(fname)
54
56
55
57
56 class rootcache(filecache):
58 class rootcache(filecache):
57 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
58
60
59 def join(self, obj, fname):
61 def join(self, obj, fname):
60 return obj._join(fname)
62 return obj._join(fname)
61
63
62
64
63 def _getfsnow(vfs):
65 def _getfsnow(vfs):
64 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
65 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
66 try:
68 try:
67 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
68 finally:
70 finally:
69 os.close(tmpfd)
71 os.close(tmpfd)
70 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
71
73
72
74
73 @interfaceutil.implementer(intdirstate.idirstate)
75 @interfaceutil.implementer(intdirstate.idirstate)
74 class dirstate(object):
76 class dirstate(object):
75 def __init__(
77 def __init__(
76 self, opener, ui, root, validate, sparsematchfn, nodeconstants
78 self, opener, ui, root, validate, sparsematchfn, nodeconstants
77 ):
79 ):
78 """Create a new dirstate object.
80 """Create a new dirstate object.
79
81
80 opener is an open()-like callable that can be used to open the
82 opener is an open()-like callable that can be used to open the
81 dirstate file; root is the root of the directory tracked by
83 dirstate file; root is the root of the directory tracked by
82 the dirstate.
84 the dirstate.
83 """
85 """
84 self._nodeconstants = nodeconstants
86 self._nodeconstants = nodeconstants
85 self._opener = opener
87 self._opener = opener
86 self._validate = validate
88 self._validate = validate
87 self._root = root
89 self._root = root
88 self._sparsematchfn = sparsematchfn
90 self._sparsematchfn = sparsematchfn
89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
91 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 # UNC path pointing to root share (issue4557)
92 # UNC path pointing to root share (issue4557)
91 self._rootdir = pathutil.normasprefix(root)
93 self._rootdir = pathutil.normasprefix(root)
92 self._dirty = False
94 self._dirty = False
93 self._lastnormaltime = 0
95 self._lastnormaltime = 0
94 self._ui = ui
96 self._ui = ui
95 self._filecache = {}
97 self._filecache = {}
96 self._parentwriters = 0
98 self._parentwriters = 0
97 self._filename = b'dirstate'
99 self._filename = b'dirstate'
98 self._pendingfilename = b'%s.pending' % self._filename
100 self._pendingfilename = b'%s.pending' % self._filename
99 self._plchangecallbacks = {}
101 self._plchangecallbacks = {}
100 self._origpl = None
102 self._origpl = None
101 self._updatedfiles = set()
103 self._updatedfiles = set()
102 self._mapcls = dirstatemap
104 self._mapcls = dirstatemap
103 # Access and cache cwd early, so we don't access it for the first time
105 # Access and cache cwd early, so we don't access it for the first time
104 # after a working-copy update caused it to not exist (accessing it then
106 # after a working-copy update caused it to not exist (accessing it then
105 # raises an exception).
107 # raises an exception).
106 self._cwd
108 self._cwd
107
109
108 def prefetch_parents(self):
110 def prefetch_parents(self):
109 """make sure the parents are loaded
111 """make sure the parents are loaded
110
112
111 Used to avoid a race condition.
113 Used to avoid a race condition.
112 """
114 """
113 self._pl
115 self._pl
114
116
115 @contextlib.contextmanager
117 @contextlib.contextmanager
116 def parentchange(self):
118 def parentchange(self):
117 """Context manager for handling dirstate parents.
119 """Context manager for handling dirstate parents.
118
120
119 If an exception occurs in the scope of the context manager,
121 If an exception occurs in the scope of the context manager,
120 the incoherent dirstate won't be written when wlock is
122 the incoherent dirstate won't be written when wlock is
121 released.
123 released.
122 """
124 """
123 self._parentwriters += 1
125 self._parentwriters += 1
124 yield
126 yield
125 # Typically we want the "undo" step of a context manager in a
127 # Typically we want the "undo" step of a context manager in a
126 # finally block so it happens even when an exception
128 # finally block so it happens even when an exception
127 # occurs. In this case, however, we only want to decrement
129 # occurs. In this case, however, we only want to decrement
128 # parentwriters if the code in the with statement exits
130 # parentwriters if the code in the with statement exits
129 # normally, so we don't have a try/finally here on purpose.
131 # normally, so we don't have a try/finally here on purpose.
130 self._parentwriters -= 1
132 self._parentwriters -= 1
131
133
132 def pendingparentchange(self):
134 def pendingparentchange(self):
133 """Returns true if the dirstate is in the middle of a set of changes
135 """Returns true if the dirstate is in the middle of a set of changes
134 that modify the dirstate parent.
136 that modify the dirstate parent.
135 """
137 """
136 return self._parentwriters > 0
138 return self._parentwriters > 0
137
139
138 @propertycache
140 @propertycache
139 def _map(self):
141 def _map(self):
140 """Return the dirstate contents (see documentation for dirstatemap)."""
142 """Return the dirstate contents (see documentation for dirstatemap)."""
141 self._map = self._mapcls(
143 self._map = self._mapcls(
142 self._ui, self._opener, self._root, self._nodeconstants
144 self._ui, self._opener, self._root, self._nodeconstants
143 )
145 )
144 return self._map
146 return self._map
145
147
146 @property
148 @property
147 def _sparsematcher(self):
149 def _sparsematcher(self):
148 """The matcher for the sparse checkout.
150 """The matcher for the sparse checkout.
149
151
150 The working directory may not include every file from a manifest. The
152 The working directory may not include every file from a manifest. The
151 matcher obtained by this property will match a path if it is to be
153 matcher obtained by this property will match a path if it is to be
152 included in the working directory.
154 included in the working directory.
153 """
155 """
154 # TODO there is potential to cache this property. For now, the matcher
156 # TODO there is potential to cache this property. For now, the matcher
155 # is resolved on every access. (But the called function does use a
157 # is resolved on every access. (But the called function does use a
156 # cache to keep the lookup fast.)
158 # cache to keep the lookup fast.)
157 return self._sparsematchfn()
159 return self._sparsematchfn()
158
160
159 @repocache(b'branch')
161 @repocache(b'branch')
160 def _branch(self):
162 def _branch(self):
161 try:
163 try:
162 return self._opener.read(b"branch").strip() or b"default"
164 return self._opener.read(b"branch").strip() or b"default"
163 except IOError as inst:
165 except IOError as inst:
164 if inst.errno != errno.ENOENT:
166 if inst.errno != errno.ENOENT:
165 raise
167 raise
166 return b"default"
168 return b"default"
167
169
168 @property
170 @property
169 def _pl(self):
171 def _pl(self):
170 return self._map.parents()
172 return self._map.parents()
171
173
172 def hasdir(self, d):
174 def hasdir(self, d):
173 return self._map.hastrackeddir(d)
175 return self._map.hastrackeddir(d)
174
176
175 @rootcache(b'.hgignore')
177 @rootcache(b'.hgignore')
176 def _ignore(self):
178 def _ignore(self):
177 files = self._ignorefiles()
179 files = self._ignorefiles()
178 if not files:
180 if not files:
179 return matchmod.never()
181 return matchmod.never()
180
182
181 pats = [b'include:%s' % f for f in files]
183 pats = [b'include:%s' % f for f in files]
182 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
184 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
183
185
184 @propertycache
186 @propertycache
185 def _slash(self):
187 def _slash(self):
186 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
188 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
187
189
188 @propertycache
190 @propertycache
189 def _checklink(self):
191 def _checklink(self):
190 return util.checklink(self._root)
192 return util.checklink(self._root)
191
193
192 @propertycache
194 @propertycache
193 def _checkexec(self):
195 def _checkexec(self):
194 return bool(util.checkexec(self._root))
196 return bool(util.checkexec(self._root))
195
197
196 @propertycache
198 @propertycache
197 def _checkcase(self):
199 def _checkcase(self):
198 return not util.fscasesensitive(self._join(b'.hg'))
200 return not util.fscasesensitive(self._join(b'.hg'))
199
201
200 def _join(self, f):
202 def _join(self, f):
201 # much faster than os.path.join()
203 # much faster than os.path.join()
202 # it's safe because f is always a relative path
204 # it's safe because f is always a relative path
203 return self._rootdir + f
205 return self._rootdir + f
204
206
205 def flagfunc(self, buildfallback):
207 def flagfunc(self, buildfallback):
206 if self._checklink and self._checkexec:
208 if self._checklink and self._checkexec:
207
209
208 def f(x):
210 def f(x):
209 try:
211 try:
210 st = os.lstat(self._join(x))
212 st = os.lstat(self._join(x))
211 if util.statislink(st):
213 if util.statislink(st):
212 return b'l'
214 return b'l'
213 if util.statisexec(st):
215 if util.statisexec(st):
214 return b'x'
216 return b'x'
215 except OSError:
217 except OSError:
216 pass
218 pass
217 return b''
219 return b''
218
220
219 return f
221 return f
220
222
221 fallback = buildfallback()
223 fallback = buildfallback()
222 if self._checklink:
224 if self._checklink:
223
225
224 def f(x):
226 def f(x):
225 if os.path.islink(self._join(x)):
227 if os.path.islink(self._join(x)):
226 return b'l'
228 return b'l'
227 if b'x' in fallback(x):
229 if b'x' in fallback(x):
228 return b'x'
230 return b'x'
229 return b''
231 return b''
230
232
231 return f
233 return f
232 if self._checkexec:
234 if self._checkexec:
233
235
234 def f(x):
236 def f(x):
235 if b'l' in fallback(x):
237 if b'l' in fallback(x):
236 return b'l'
238 return b'l'
237 if util.isexec(self._join(x)):
239 if util.isexec(self._join(x)):
238 return b'x'
240 return b'x'
239 return b''
241 return b''
240
242
241 return f
243 return f
242 else:
244 else:
243 return fallback
245 return fallback
244
246
245 @propertycache
247 @propertycache
246 def _cwd(self):
248 def _cwd(self):
247 # internal config: ui.forcecwd
249 # internal config: ui.forcecwd
248 forcecwd = self._ui.config(b'ui', b'forcecwd')
250 forcecwd = self._ui.config(b'ui', b'forcecwd')
249 if forcecwd:
251 if forcecwd:
250 return forcecwd
252 return forcecwd
251 return encoding.getcwd()
253 return encoding.getcwd()
252
254
253 def getcwd(self):
255 def getcwd(self):
254 """Return the path from which a canonical path is calculated.
256 """Return the path from which a canonical path is calculated.
255
257
256 This path should be used to resolve file patterns or to convert
258 This path should be used to resolve file patterns or to convert
257 canonical paths back to file paths for display. It shouldn't be
259 canonical paths back to file paths for display. It shouldn't be
258 used to get real file paths. Use vfs functions instead.
260 used to get real file paths. Use vfs functions instead.
259 """
261 """
260 cwd = self._cwd
262 cwd = self._cwd
261 if cwd == self._root:
263 if cwd == self._root:
262 return b''
264 return b''
263 # self._root ends with a path separator if self._root is '/' or 'C:\'
265 # self._root ends with a path separator if self._root is '/' or 'C:\'
264 rootsep = self._root
266 rootsep = self._root
265 if not util.endswithsep(rootsep):
267 if not util.endswithsep(rootsep):
266 rootsep += pycompat.ossep
268 rootsep += pycompat.ossep
267 if cwd.startswith(rootsep):
269 if cwd.startswith(rootsep):
268 return cwd[len(rootsep) :]
270 return cwd[len(rootsep) :]
269 else:
271 else:
270 # we're outside the repo. return an absolute path.
272 # we're outside the repo. return an absolute path.
271 return cwd
273 return cwd
272
274
273 def pathto(self, f, cwd=None):
275 def pathto(self, f, cwd=None):
274 if cwd is None:
276 if cwd is None:
275 cwd = self.getcwd()
277 cwd = self.getcwd()
276 path = util.pathto(self._root, cwd, f)
278 path = util.pathto(self._root, cwd, f)
277 if self._slash:
279 if self._slash:
278 return util.pconvert(path)
280 return util.pconvert(path)
279 return path
281 return path
280
282
281 def __getitem__(self, key):
283 def __getitem__(self, key):
282 """Return the current state of key (a filename) in the dirstate.
284 """Return the current state of key (a filename) in the dirstate.
283
285
284 States are:
286 States are:
285 n normal
287 n normal
286 m needs merging
288 m needs merging
287 r marked for removal
289 r marked for removal
288 a marked for addition
290 a marked for addition
289 ? not tracked
291 ? not tracked
290 """
292 """
291 return self._map.get(key, (b"?",))[0]
293 return self._map.get(key, (b"?",))[0]
292
294
293 def __contains__(self, key):
295 def __contains__(self, key):
294 return key in self._map
296 return key in self._map
295
297
296 def __iter__(self):
298 def __iter__(self):
297 return iter(sorted(self._map))
299 return iter(sorted(self._map))
298
300
299 def items(self):
301 def items(self):
300 return pycompat.iteritems(self._map)
302 return pycompat.iteritems(self._map)
301
303
302 iteritems = items
304 iteritems = items
303
305
304 def parents(self):
306 def parents(self):
305 return [self._validate(p) for p in self._pl]
307 return [self._validate(p) for p in self._pl]
306
308
307 def p1(self):
309 def p1(self):
308 return self._validate(self._pl[0])
310 return self._validate(self._pl[0])
309
311
310 def p2(self):
312 def p2(self):
311 return self._validate(self._pl[1])
313 return self._validate(self._pl[1])
312
314
313 def branch(self):
315 def branch(self):
314 return encoding.tolocal(self._branch)
316 return encoding.tolocal(self._branch)
315
317
316 def setparents(self, p1, p2=None):
318 def setparents(self, p1, p2=None):
317 """Set dirstate parents to p1 and p2.
319 """Set dirstate parents to p1 and p2.
318
320
319 When moving from two parents to one, 'm' merged entries a
321 When moving from two parents to one, 'm' merged entries a
320 adjusted to normal and previous copy records discarded and
322 adjusted to normal and previous copy records discarded and
321 returned by the call.
323 returned by the call.
322
324
323 See localrepo.setparents()
325 See localrepo.setparents()
324 """
326 """
325 if p2 is None:
327 if p2 is None:
326 p2 = self._nodeconstants.nullid
328 p2 = self._nodeconstants.nullid
327 if self._parentwriters == 0:
329 if self._parentwriters == 0:
328 raise ValueError(
330 raise ValueError(
329 b"cannot set dirstate parent outside of "
331 b"cannot set dirstate parent outside of "
330 b"dirstate.parentchange context manager"
332 b"dirstate.parentchange context manager"
331 )
333 )
332
334
333 self._dirty = True
335 self._dirty = True
334 oldp2 = self._pl[1]
336 oldp2 = self._pl[1]
335 if self._origpl is None:
337 if self._origpl is None:
336 self._origpl = self._pl
338 self._origpl = self._pl
337 self._map.setparents(p1, p2)
339 self._map.setparents(p1, p2)
338 copies = {}
340 copies = {}
339 if (
341 if (
340 oldp2 != self._nodeconstants.nullid
342 oldp2 != self._nodeconstants.nullid
341 and p2 == self._nodeconstants.nullid
343 and p2 == self._nodeconstants.nullid
342 ):
344 ):
343 candidatefiles = self._map.non_normal_or_other_parent_paths()
345 candidatefiles = self._map.non_normal_or_other_parent_paths()
344
346
345 for f in candidatefiles:
347 for f in candidatefiles:
346 s = self._map.get(f)
348 s = self._map.get(f)
347 if s is None:
349 if s is None:
348 continue
350 continue
349
351
350 # Discard 'm' markers when moving away from a merge state
352 # Discard 'm' markers when moving away from a merge state
351 if s[0] == b'm':
353 if s[0] == b'm':
352 source = self._map.copymap.get(f)
354 source = self._map.copymap.get(f)
353 if source:
355 if source:
354 copies[f] = source
356 copies[f] = source
355 self.normallookup(f)
357 self.normallookup(f)
356 # Also fix up otherparent markers
358 # Also fix up otherparent markers
357 elif s[0] == b'n' and s[2] == -2:
359 elif s[0] == b'n' and s[2] == -2:
358 source = self._map.copymap.get(f)
360 source = self._map.copymap.get(f)
359 if source:
361 if source:
360 copies[f] = source
362 copies[f] = source
361 self.add(f)
363 self.add(f)
362 return copies
364 return copies
363
365
364 def setbranch(self, branch):
366 def setbranch(self, branch):
365 self.__class__._branch.set(self, encoding.fromlocal(branch))
367 self.__class__._branch.set(self, encoding.fromlocal(branch))
366 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
368 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
367 try:
369 try:
368 f.write(self._branch + b'\n')
370 f.write(self._branch + b'\n')
369 f.close()
371 f.close()
370
372
371 # make sure filecache has the correct stat info for _branch after
373 # make sure filecache has the correct stat info for _branch after
372 # replacing the underlying file
374 # replacing the underlying file
373 ce = self._filecache[b'_branch']
375 ce = self._filecache[b'_branch']
374 if ce:
376 if ce:
375 ce.refresh()
377 ce.refresh()
376 except: # re-raises
378 except: # re-raises
377 f.discard()
379 f.discard()
378 raise
380 raise
379
381
380 def invalidate(self):
382 def invalidate(self):
381 """Causes the next access to reread the dirstate.
383 """Causes the next access to reread the dirstate.
382
384
383 This is different from localrepo.invalidatedirstate() because it always
385 This is different from localrepo.invalidatedirstate() because it always
384 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
386 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
385 check whether the dirstate has changed before rereading it."""
387 check whether the dirstate has changed before rereading it."""
386
388
387 for a in ("_map", "_branch", "_ignore"):
389 for a in ("_map", "_branch", "_ignore"):
388 if a in self.__dict__:
390 if a in self.__dict__:
389 delattr(self, a)
391 delattr(self, a)
390 self._lastnormaltime = 0
392 self._lastnormaltime = 0
391 self._dirty = False
393 self._dirty = False
392 self._updatedfiles.clear()
394 self._updatedfiles.clear()
393 self._parentwriters = 0
395 self._parentwriters = 0
394 self._origpl = None
396 self._origpl = None
395
397
396 def copy(self, source, dest):
398 def copy(self, source, dest):
397 """Mark dest as a copy of source. Unmark dest if source is None."""
399 """Mark dest as a copy of source. Unmark dest if source is None."""
398 if source == dest:
400 if source == dest:
399 return
401 return
400 self._dirty = True
402 self._dirty = True
401 if source is not None:
403 if source is not None:
402 self._map.copymap[dest] = source
404 self._map.copymap[dest] = source
403 self._updatedfiles.add(source)
405 self._updatedfiles.add(source)
404 self._updatedfiles.add(dest)
406 self._updatedfiles.add(dest)
405 elif self._map.copymap.pop(dest, None):
407 elif self._map.copymap.pop(dest, None):
406 self._updatedfiles.add(dest)
408 self._updatedfiles.add(dest)
407
409
408 def copied(self, file):
410 def copied(self, file):
409 return self._map.copymap.get(file, None)
411 return self._map.copymap.get(file, None)
410
412
411 def copies(self):
413 def copies(self):
412 return self._map.copymap
414 return self._map.copymap
413
415
414 def _addpath(self, f, state, mode, size, mtime):
416 def _addpath(self, f, state, mode, size, mtime):
415 oldstate = self[f]
417 oldstate = self[f]
416 if state == b'a' or oldstate == b'r':
418 if state == b'a' or oldstate == b'r':
417 scmutil.checkfilename(f)
419 scmutil.checkfilename(f)
418 if self._map.hastrackeddir(f):
420 if self._map.hastrackeddir(f):
419 raise error.Abort(
421 raise error.Abort(
420 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
422 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
421 )
423 )
422 # shadows
424 # shadows
423 for d in pathutil.finddirs(f):
425 for d in pathutil.finddirs(f):
424 if self._map.hastrackeddir(d):
426 if self._map.hastrackeddir(d):
425 break
427 break
426 entry = self._map.get(d)
428 entry = self._map.get(d)
427 if entry is not None and entry[0] != b'r':
429 if entry is not None and entry[0] != b'r':
428 raise error.Abort(
430 raise error.Abort(
429 _(b'file %r in dirstate clashes with %r')
431 _(b'file %r in dirstate clashes with %r')
430 % (pycompat.bytestr(d), pycompat.bytestr(f))
432 % (pycompat.bytestr(d), pycompat.bytestr(f))
431 )
433 )
432 self._dirty = True
434 self._dirty = True
433 self._updatedfiles.add(f)
435 self._updatedfiles.add(f)
434 self._map.addfile(f, oldstate, state, mode, size, mtime)
436 self._map.addfile(f, oldstate, state, mode, size, mtime)
435
437
436 def normal(self, f, parentfiledata=None):
438 def normal(self, f, parentfiledata=None):
437 """Mark a file normal and clean.
439 """Mark a file normal and clean.
438
440
439 parentfiledata: (mode, size, mtime) of the clean file
441 parentfiledata: (mode, size, mtime) of the clean file
440
442
441 parentfiledata should be computed from memory (for mode,
443 parentfiledata should be computed from memory (for mode,
442 size), as or close as possible from the point where we
444 size), as or close as possible from the point where we
443 determined the file was clean, to limit the risk of the
445 determined the file was clean, to limit the risk of the
444 file having been changed by an external process between the
446 file having been changed by an external process between the
445 moment where the file was determined to be clean and now."""
447 moment where the file was determined to be clean and now."""
446 if parentfiledata:
448 if parentfiledata:
447 (mode, size, mtime) = parentfiledata
449 (mode, size, mtime) = parentfiledata
448 else:
450 else:
449 s = os.lstat(self._join(f))
451 s = os.lstat(self._join(f))
450 mode = s.st_mode
452 mode = s.st_mode
451 size = s.st_size
453 size = s.st_size
452 mtime = s[stat.ST_MTIME]
454 mtime = s[stat.ST_MTIME]
453 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
455 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
454 self._map.copymap.pop(f, None)
456 self._map.copymap.pop(f, None)
455 if f in self._map.nonnormalset:
457 if f in self._map.nonnormalset:
456 self._map.nonnormalset.remove(f)
458 self._map.nonnormalset.remove(f)
457 if mtime > self._lastnormaltime:
459 if mtime > self._lastnormaltime:
458 # Remember the most recent modification timeslot for status(),
460 # Remember the most recent modification timeslot for status(),
459 # to make sure we won't miss future size-preserving file content
461 # to make sure we won't miss future size-preserving file content
460 # modifications that happen within the same timeslot.
462 # modifications that happen within the same timeslot.
461 self._lastnormaltime = mtime
463 self._lastnormaltime = mtime
462
464
463 def normallookup(self, f):
465 def normallookup(self, f):
464 '''Mark a file normal, but possibly dirty.'''
466 '''Mark a file normal, but possibly dirty.'''
465 if self._pl[1] != self._nodeconstants.nullid:
467 if self._pl[1] != self._nodeconstants.nullid:
466 # if there is a merge going on and the file was either
468 # if there is a merge going on and the file was either
467 # in state 'm' (-1) or coming from other parent (-2) before
469 # in state 'm' (-1) or coming from other parent (-2) before
468 # being removed, restore that state.
470 # being removed, restore that state.
469 entry = self._map.get(f)
471 entry = self._map.get(f)
470 if entry is not None:
472 if entry is not None:
471 if entry[0] == b'r' and entry[2] in (-1, -2):
473 if entry[0] == b'r' and entry[2] in (-1, -2):
472 source = self._map.copymap.get(f)
474 source = self._map.copymap.get(f)
473 if entry[2] == -1:
475 if entry[2] == -1:
474 self.merge(f)
476 self.merge(f)
475 elif entry[2] == -2:
477 elif entry[2] == -2:
476 self.otherparent(f)
478 self.otherparent(f)
477 if source:
479 if source:
478 self.copy(source, f)
480 self.copy(source, f)
479 return
481 return
480 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
482 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
481 return
483 return
482 self._addpath(f, b'n', 0, -1, -1)
484 self._addpath(f, b'n', 0, -1, -1)
483 self._map.copymap.pop(f, None)
485 self._map.copymap.pop(f, None)
484
486
485 def otherparent(self, f):
487 def otherparent(self, f):
486 '''Mark as coming from the other parent, always dirty.'''
488 '''Mark as coming from the other parent, always dirty.'''
487 if self._pl[1] == self._nodeconstants.nullid:
489 if self._pl[1] == self._nodeconstants.nullid:
488 raise error.Abort(
490 raise error.Abort(
489 _(b"setting %r to other parent only allowed in merges") % f
491 _(b"setting %r to other parent only allowed in merges") % f
490 )
492 )
491 if f in self and self[f] == b'n':
493 if f in self and self[f] == b'n':
492 # merge-like
494 # merge-like
493 self._addpath(f, b'm', 0, -2, -1)
495 self._addpath(f, b'm', 0, -2, -1)
494 else:
496 else:
495 # add-like
497 # add-like
496 self._addpath(f, b'n', 0, -2, -1)
498 self._addpath(f, b'n', 0, -2, -1)
497 self._map.copymap.pop(f, None)
499 self._map.copymap.pop(f, None)
498
500
499 def add(self, f):
501 def add(self, f):
500 '''Mark a file added.'''
502 '''Mark a file added.'''
501 self._addpath(f, b'a', 0, -1, -1)
503 self._addpath(f, b'a', 0, -1, -1)
502 self._map.copymap.pop(f, None)
504 self._map.copymap.pop(f, None)
503
505
504 def remove(self, f):
506 def remove(self, f):
505 '''Mark a file removed.'''
507 '''Mark a file removed.'''
506 self._dirty = True
508 self._dirty = True
507 oldstate = self[f]
509 oldstate = self[f]
508 size = 0
510 size = 0
509 if self._pl[1] != self._nodeconstants.nullid:
511 if self._pl[1] != self._nodeconstants.nullid:
510 entry = self._map.get(f)
512 entry = self._map.get(f)
511 if entry is not None:
513 if entry is not None:
512 # backup the previous state
514 # backup the previous state
513 if entry[0] == b'm': # merge
515 if entry[0] == b'm': # merge
514 size = -1
516 size = -1
515 elif entry[0] == b'n' and entry[2] == -2: # other parent
517 elif entry[0] == b'n' and entry[2] == -2: # other parent
516 size = -2
518 size = -2
517 self._map.otherparentset.add(f)
519 self._map.otherparentset.add(f)
518 self._updatedfiles.add(f)
520 self._updatedfiles.add(f)
519 self._map.removefile(f, oldstate, size)
521 self._map.removefile(f, oldstate, size)
520 if size == 0:
522 if size == 0:
521 self._map.copymap.pop(f, None)
523 self._map.copymap.pop(f, None)
522
524
523 def merge(self, f):
525 def merge(self, f):
524 '''Mark a file merged.'''
526 '''Mark a file merged.'''
525 if self._pl[1] == self._nodeconstants.nullid:
527 if self._pl[1] == self._nodeconstants.nullid:
526 return self.normallookup(f)
528 return self.normallookup(f)
527 return self.otherparent(f)
529 return self.otherparent(f)
528
530
529 def drop(self, f):
531 def drop(self, f):
530 '''Drop a file from the dirstate'''
532 '''Drop a file from the dirstate'''
531 oldstate = self[f]
533 oldstate = self[f]
532 if self._map.dropfile(f, oldstate):
534 if self._map.dropfile(f, oldstate):
533 self._dirty = True
535 self._dirty = True
534 self._updatedfiles.add(f)
536 self._updatedfiles.add(f)
535 self._map.copymap.pop(f, None)
537 self._map.copymap.pop(f, None)
536
538
537 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
539 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
538 if exists is None:
540 if exists is None:
539 exists = os.path.lexists(os.path.join(self._root, path))
541 exists = os.path.lexists(os.path.join(self._root, path))
540 if not exists:
542 if not exists:
541 # Maybe a path component exists
543 # Maybe a path component exists
542 if not ignoremissing and b'/' in path:
544 if not ignoremissing and b'/' in path:
543 d, f = path.rsplit(b'/', 1)
545 d, f = path.rsplit(b'/', 1)
544 d = self._normalize(d, False, ignoremissing, None)
546 d = self._normalize(d, False, ignoremissing, None)
545 folded = d + b"/" + f
547 folded = d + b"/" + f
546 else:
548 else:
547 # No path components, preserve original case
549 # No path components, preserve original case
548 folded = path
550 folded = path
549 else:
551 else:
550 # recursively normalize leading directory components
552 # recursively normalize leading directory components
551 # against dirstate
553 # against dirstate
552 if b'/' in normed:
554 if b'/' in normed:
553 d, f = normed.rsplit(b'/', 1)
555 d, f = normed.rsplit(b'/', 1)
554 d = self._normalize(d, False, ignoremissing, True)
556 d = self._normalize(d, False, ignoremissing, True)
555 r = self._root + b"/" + d
557 r = self._root + b"/" + d
556 folded = d + b"/" + util.fspath(f, r)
558 folded = d + b"/" + util.fspath(f, r)
557 else:
559 else:
558 folded = util.fspath(normed, self._root)
560 folded = util.fspath(normed, self._root)
559 storemap[normed] = folded
561 storemap[normed] = folded
560
562
561 return folded
563 return folded
562
564
563 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
565 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
564 normed = util.normcase(path)
566 normed = util.normcase(path)
565 folded = self._map.filefoldmap.get(normed, None)
567 folded = self._map.filefoldmap.get(normed, None)
566 if folded is None:
568 if folded is None:
567 if isknown:
569 if isknown:
568 folded = path
570 folded = path
569 else:
571 else:
570 folded = self._discoverpath(
572 folded = self._discoverpath(
571 path, normed, ignoremissing, exists, self._map.filefoldmap
573 path, normed, ignoremissing, exists, self._map.filefoldmap
572 )
574 )
573 return folded
575 return folded
574
576
575 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
577 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
576 normed = util.normcase(path)
578 normed = util.normcase(path)
577 folded = self._map.filefoldmap.get(normed, None)
579 folded = self._map.filefoldmap.get(normed, None)
578 if folded is None:
580 if folded is None:
579 folded = self._map.dirfoldmap.get(normed, None)
581 folded = self._map.dirfoldmap.get(normed, None)
580 if folded is None:
582 if folded is None:
581 if isknown:
583 if isknown:
582 folded = path
584 folded = path
583 else:
585 else:
584 # store discovered result in dirfoldmap so that future
586 # store discovered result in dirfoldmap so that future
585 # normalizefile calls don't start matching directories
587 # normalizefile calls don't start matching directories
586 folded = self._discoverpath(
588 folded = self._discoverpath(
587 path, normed, ignoremissing, exists, self._map.dirfoldmap
589 path, normed, ignoremissing, exists, self._map.dirfoldmap
588 )
590 )
589 return folded
591 return folded
590
592
591 def normalize(self, path, isknown=False, ignoremissing=False):
593 def normalize(self, path, isknown=False, ignoremissing=False):
592 """
594 """
593 normalize the case of a pathname when on a casefolding filesystem
595 normalize the case of a pathname when on a casefolding filesystem
594
596
595 isknown specifies whether the filename came from walking the
597 isknown specifies whether the filename came from walking the
596 disk, to avoid extra filesystem access.
598 disk, to avoid extra filesystem access.
597
599
598 If ignoremissing is True, missing path are returned
600 If ignoremissing is True, missing path are returned
599 unchanged. Otherwise, we try harder to normalize possibly
601 unchanged. Otherwise, we try harder to normalize possibly
600 existing path components.
602 existing path components.
601
603
602 The normalized case is determined based on the following precedence:
604 The normalized case is determined based on the following precedence:
603
605
604 - version of name already stored in the dirstate
606 - version of name already stored in the dirstate
605 - version of name stored on disk
607 - version of name stored on disk
606 - version provided via command arguments
608 - version provided via command arguments
607 """
609 """
608
610
609 if self._checkcase:
611 if self._checkcase:
610 return self._normalize(path, isknown, ignoremissing)
612 return self._normalize(path, isknown, ignoremissing)
611 return path
613 return path
612
614
613 def clear(self):
615 def clear(self):
614 self._map.clear()
616 self._map.clear()
615 self._lastnormaltime = 0
617 self._lastnormaltime = 0
616 self._updatedfiles.clear()
618 self._updatedfiles.clear()
617 self._dirty = True
619 self._dirty = True
618
620
619 def rebuild(self, parent, allfiles, changedfiles=None):
621 def rebuild(self, parent, allfiles, changedfiles=None):
620 if changedfiles is None:
622 if changedfiles is None:
621 # Rebuild entire dirstate
623 # Rebuild entire dirstate
622 to_lookup = allfiles
624 to_lookup = allfiles
623 to_drop = []
625 to_drop = []
624 lastnormaltime = self._lastnormaltime
626 lastnormaltime = self._lastnormaltime
625 self.clear()
627 self.clear()
626 self._lastnormaltime = lastnormaltime
628 self._lastnormaltime = lastnormaltime
627 elif len(changedfiles) < 10:
629 elif len(changedfiles) < 10:
628 # Avoid turning allfiles into a set, which can be expensive if it's
630 # Avoid turning allfiles into a set, which can be expensive if it's
629 # large.
631 # large.
630 to_lookup = []
632 to_lookup = []
631 to_drop = []
633 to_drop = []
632 for f in changedfiles:
634 for f in changedfiles:
633 if f in allfiles:
635 if f in allfiles:
634 to_lookup.append(f)
636 to_lookup.append(f)
635 else:
637 else:
636 to_drop.append(f)
638 to_drop.append(f)
637 else:
639 else:
638 changedfilesset = set(changedfiles)
640 changedfilesset = set(changedfiles)
639 to_lookup = changedfilesset & set(allfiles)
641 to_lookup = changedfilesset & set(allfiles)
640 to_drop = changedfilesset - to_lookup
642 to_drop = changedfilesset - to_lookup
641
643
642 if self._origpl is None:
644 if self._origpl is None:
643 self._origpl = self._pl
645 self._origpl = self._pl
644 self._map.setparents(parent, self._nodeconstants.nullid)
646 self._map.setparents(parent, self._nodeconstants.nullid)
645
647
646 for f in to_lookup:
648 for f in to_lookup:
647 self.normallookup(f)
649 self.normallookup(f)
648 for f in to_drop:
650 for f in to_drop:
649 self.drop(f)
651 self.drop(f)
650
652
651 self._dirty = True
653 self._dirty = True
652
654
653 def identity(self):
655 def identity(self):
654 """Return identity of dirstate itself to detect changing in storage
656 """Return identity of dirstate itself to detect changing in storage
655
657
656 If identity of previous dirstate is equal to this, writing
658 If identity of previous dirstate is equal to this, writing
657 changes based on the former dirstate out can keep consistency.
659 changes based on the former dirstate out can keep consistency.
658 """
660 """
659 return self._map.identity
661 return self._map.identity
660
662
661 def write(self, tr):
663 def write(self, tr):
662 if not self._dirty:
664 if not self._dirty:
663 return
665 return
664
666
665 filename = self._filename
667 filename = self._filename
666 if tr:
668 if tr:
667 # 'dirstate.write()' is not only for writing in-memory
669 # 'dirstate.write()' is not only for writing in-memory
668 # changes out, but also for dropping ambiguous timestamp.
670 # changes out, but also for dropping ambiguous timestamp.
669 # delayed writing re-raise "ambiguous timestamp issue".
671 # delayed writing re-raise "ambiguous timestamp issue".
670 # See also the wiki page below for detail:
672 # See also the wiki page below for detail:
671 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
673 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
672
674
673 # emulate dropping timestamp in 'parsers.pack_dirstate'
675 # emulate dropping timestamp in 'parsers.pack_dirstate'
674 now = _getfsnow(self._opener)
676 now = _getfsnow(self._opener)
675 self._map.clearambiguoustimes(self._updatedfiles, now)
677 self._map.clearambiguoustimes(self._updatedfiles, now)
676
678
677 # emulate that all 'dirstate.normal' results are written out
679 # emulate that all 'dirstate.normal' results are written out
678 self._lastnormaltime = 0
680 self._lastnormaltime = 0
679 self._updatedfiles.clear()
681 self._updatedfiles.clear()
680
682
681 # delay writing in-memory changes out
683 # delay writing in-memory changes out
682 tr.addfilegenerator(
684 tr.addfilegenerator(
683 b'dirstate',
685 b'dirstate',
684 (self._filename,),
686 (self._filename,),
685 self._writedirstate,
687 self._writedirstate,
686 location=b'plain',
688 location=b'plain',
687 )
689 )
688 return
690 return
689
691
690 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
692 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
691 self._writedirstate(st)
693 self._writedirstate(st)
692
694
693 def addparentchangecallback(self, category, callback):
695 def addparentchangecallback(self, category, callback):
694 """add a callback to be called when the wd parents are changed
696 """add a callback to be called when the wd parents are changed
695
697
696 Callback will be called with the following arguments:
698 Callback will be called with the following arguments:
697 dirstate, (oldp1, oldp2), (newp1, newp2)
699 dirstate, (oldp1, oldp2), (newp1, newp2)
698
700
699 Category is a unique identifier to allow overwriting an old callback
701 Category is a unique identifier to allow overwriting an old callback
700 with a newer callback.
702 with a newer callback.
701 """
703 """
702 self._plchangecallbacks[category] = callback
704 self._plchangecallbacks[category] = callback
703
705
704 def _writedirstate(self, st):
706 def _writedirstate(self, st):
705 # notify callbacks about parents change
707 # notify callbacks about parents change
706 if self._origpl is not None and self._origpl != self._pl:
708 if self._origpl is not None and self._origpl != self._pl:
707 for c, callback in sorted(
709 for c, callback in sorted(
708 pycompat.iteritems(self._plchangecallbacks)
710 pycompat.iteritems(self._plchangecallbacks)
709 ):
711 ):
710 callback(self, self._origpl, self._pl)
712 callback(self, self._origpl, self._pl)
711 self._origpl = None
713 self._origpl = None
712 # use the modification time of the newly created temporary file as the
714 # use the modification time of the newly created temporary file as the
713 # filesystem's notion of 'now'
715 # filesystem's notion of 'now'
714 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
716 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
715
717
716 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
718 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
717 # timestamp of each entries in dirstate, because of 'now > mtime'
719 # timestamp of each entries in dirstate, because of 'now > mtime'
718 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
720 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
719 if delaywrite > 0:
721 if delaywrite > 0:
720 # do we have any files to delay for?
722 # do we have any files to delay for?
721 for f, e in pycompat.iteritems(self._map):
723 for f, e in pycompat.iteritems(self._map):
722 if e[0] == b'n' and e[3] == now:
724 if e[0] == b'n' and e[3] == now:
723 import time # to avoid useless import
725 import time # to avoid useless import
724
726
725 # rather than sleep n seconds, sleep until the next
727 # rather than sleep n seconds, sleep until the next
726 # multiple of n seconds
728 # multiple of n seconds
727 clock = time.time()
729 clock = time.time()
728 start = int(clock) - (int(clock) % delaywrite)
730 start = int(clock) - (int(clock) % delaywrite)
729 end = start + delaywrite
731 end = start + delaywrite
730 time.sleep(end - clock)
732 time.sleep(end - clock)
731 now = end # trust our estimate that the end is near now
733 now = end # trust our estimate that the end is near now
732 break
734 break
733
735
734 self._map.write(st, now)
736 self._map.write(st, now)
735 self._lastnormaltime = 0
737 self._lastnormaltime = 0
736 self._dirty = False
738 self._dirty = False
737
739
738 def _dirignore(self, f):
740 def _dirignore(self, f):
739 if self._ignore(f):
741 if self._ignore(f):
740 return True
742 return True
741 for p in pathutil.finddirs(f):
743 for p in pathutil.finddirs(f):
742 if self._ignore(p):
744 if self._ignore(p):
743 return True
745 return True
744 return False
746 return False
745
747
746 def _ignorefiles(self):
748 def _ignorefiles(self):
747 files = []
749 files = []
748 if os.path.exists(self._join(b'.hgignore')):
750 if os.path.exists(self._join(b'.hgignore')):
749 files.append(self._join(b'.hgignore'))
751 files.append(self._join(b'.hgignore'))
750 for name, path in self._ui.configitems(b"ui"):
752 for name, path in self._ui.configitems(b"ui"):
751 if name == b'ignore' or name.startswith(b'ignore.'):
753 if name == b'ignore' or name.startswith(b'ignore.'):
752 # we need to use os.path.join here rather than self._join
754 # we need to use os.path.join here rather than self._join
753 # because path is arbitrary and user-specified
755 # because path is arbitrary and user-specified
754 files.append(os.path.join(self._rootdir, util.expandpath(path)))
756 files.append(os.path.join(self._rootdir, util.expandpath(path)))
755 return files
757 return files
756
758
757 def _ignorefileandline(self, f):
759 def _ignorefileandline(self, f):
758 files = collections.deque(self._ignorefiles())
760 files = collections.deque(self._ignorefiles())
759 visited = set()
761 visited = set()
760 while files:
762 while files:
761 i = files.popleft()
763 i = files.popleft()
762 patterns = matchmod.readpatternfile(
764 patterns = matchmod.readpatternfile(
763 i, self._ui.warn, sourceinfo=True
765 i, self._ui.warn, sourceinfo=True
764 )
766 )
765 for pattern, lineno, line in patterns:
767 for pattern, lineno, line in patterns:
766 kind, p = matchmod._patsplit(pattern, b'glob')
768 kind, p = matchmod._patsplit(pattern, b'glob')
767 if kind == b"subinclude":
769 if kind == b"subinclude":
768 if p not in visited:
770 if p not in visited:
769 files.append(p)
771 files.append(p)
770 continue
772 continue
771 m = matchmod.match(
773 m = matchmod.match(
772 self._root, b'', [], [pattern], warn=self._ui.warn
774 self._root, b'', [], [pattern], warn=self._ui.warn
773 )
775 )
774 if m(f):
776 if m(f):
775 return (i, lineno, line)
777 return (i, lineno, line)
776 visited.add(i)
778 visited.add(i)
777 return (None, -1, b"")
779 return (None, -1, b"")
778
780
779 def _walkexplicit(self, match, subrepos):
781 def _walkexplicit(self, match, subrepos):
780 """Get stat data about the files explicitly specified by match.
782 """Get stat data about the files explicitly specified by match.
781
783
782 Return a triple (results, dirsfound, dirsnotfound).
784 Return a triple (results, dirsfound, dirsnotfound).
783 - results is a mapping from filename to stat result. It also contains
785 - results is a mapping from filename to stat result. It also contains
784 listings mapping subrepos and .hg to None.
786 listings mapping subrepos and .hg to None.
785 - dirsfound is a list of files found to be directories.
787 - dirsfound is a list of files found to be directories.
786 - dirsnotfound is a list of files that the dirstate thinks are
788 - dirsnotfound is a list of files that the dirstate thinks are
787 directories and that were not found."""
789 directories and that were not found."""
788
790
789 def badtype(mode):
791 def badtype(mode):
790 kind = _(b'unknown')
792 kind = _(b'unknown')
791 if stat.S_ISCHR(mode):
793 if stat.S_ISCHR(mode):
792 kind = _(b'character device')
794 kind = _(b'character device')
793 elif stat.S_ISBLK(mode):
795 elif stat.S_ISBLK(mode):
794 kind = _(b'block device')
796 kind = _(b'block device')
795 elif stat.S_ISFIFO(mode):
797 elif stat.S_ISFIFO(mode):
796 kind = _(b'fifo')
798 kind = _(b'fifo')
797 elif stat.S_ISSOCK(mode):
799 elif stat.S_ISSOCK(mode):
798 kind = _(b'socket')
800 kind = _(b'socket')
799 elif stat.S_ISDIR(mode):
801 elif stat.S_ISDIR(mode):
800 kind = _(b'directory')
802 kind = _(b'directory')
801 return _(b'unsupported file type (type is %s)') % kind
803 return _(b'unsupported file type (type is %s)') % kind
802
804
803 badfn = match.bad
805 badfn = match.bad
804 dmap = self._map
806 dmap = self._map
805 lstat = os.lstat
807 lstat = os.lstat
806 getkind = stat.S_IFMT
808 getkind = stat.S_IFMT
807 dirkind = stat.S_IFDIR
809 dirkind = stat.S_IFDIR
808 regkind = stat.S_IFREG
810 regkind = stat.S_IFREG
809 lnkkind = stat.S_IFLNK
811 lnkkind = stat.S_IFLNK
810 join = self._join
812 join = self._join
811 dirsfound = []
813 dirsfound = []
812 foundadd = dirsfound.append
814 foundadd = dirsfound.append
813 dirsnotfound = []
815 dirsnotfound = []
814 notfoundadd = dirsnotfound.append
816 notfoundadd = dirsnotfound.append
815
817
816 if not match.isexact() and self._checkcase:
818 if not match.isexact() and self._checkcase:
817 normalize = self._normalize
819 normalize = self._normalize
818 else:
820 else:
819 normalize = None
821 normalize = None
820
822
821 files = sorted(match.files())
823 files = sorted(match.files())
822 subrepos.sort()
824 subrepos.sort()
823 i, j = 0, 0
825 i, j = 0, 0
824 while i < len(files) and j < len(subrepos):
826 while i < len(files) and j < len(subrepos):
825 subpath = subrepos[j] + b"/"
827 subpath = subrepos[j] + b"/"
826 if files[i] < subpath:
828 if files[i] < subpath:
827 i += 1
829 i += 1
828 continue
830 continue
829 while i < len(files) and files[i].startswith(subpath):
831 while i < len(files) and files[i].startswith(subpath):
830 del files[i]
832 del files[i]
831 j += 1
833 j += 1
832
834
833 if not files or b'' in files:
835 if not files or b'' in files:
834 files = [b'']
836 files = [b'']
835 # constructing the foldmap is expensive, so don't do it for the
837 # constructing the foldmap is expensive, so don't do it for the
836 # common case where files is ['']
838 # common case where files is ['']
837 normalize = None
839 normalize = None
838 results = dict.fromkeys(subrepos)
840 results = dict.fromkeys(subrepos)
839 results[b'.hg'] = None
841 results[b'.hg'] = None
840
842
841 for ff in files:
843 for ff in files:
842 if normalize:
844 if normalize:
843 nf = normalize(ff, False, True)
845 nf = normalize(ff, False, True)
844 else:
846 else:
845 nf = ff
847 nf = ff
846 if nf in results:
848 if nf in results:
847 continue
849 continue
848
850
849 try:
851 try:
850 st = lstat(join(nf))
852 st = lstat(join(nf))
851 kind = getkind(st.st_mode)
853 kind = getkind(st.st_mode)
852 if kind == dirkind:
854 if kind == dirkind:
853 if nf in dmap:
855 if nf in dmap:
854 # file replaced by dir on disk but still in dirstate
856 # file replaced by dir on disk but still in dirstate
855 results[nf] = None
857 results[nf] = None
856 foundadd((nf, ff))
858 foundadd((nf, ff))
857 elif kind == regkind or kind == lnkkind:
859 elif kind == regkind or kind == lnkkind:
858 results[nf] = st
860 results[nf] = st
859 else:
861 else:
860 badfn(ff, badtype(kind))
862 badfn(ff, badtype(kind))
861 if nf in dmap:
863 if nf in dmap:
862 results[nf] = None
864 results[nf] = None
863 except OSError as inst: # nf not found on disk - it is dirstate only
865 except OSError as inst: # nf not found on disk - it is dirstate only
864 if nf in dmap: # does it exactly match a missing file?
866 if nf in dmap: # does it exactly match a missing file?
865 results[nf] = None
867 results[nf] = None
866 else: # does it match a missing directory?
868 else: # does it match a missing directory?
867 if self._map.hasdir(nf):
869 if self._map.hasdir(nf):
868 notfoundadd(nf)
870 notfoundadd(nf)
869 else:
871 else:
870 badfn(ff, encoding.strtolocal(inst.strerror))
872 badfn(ff, encoding.strtolocal(inst.strerror))
871
873
872 # match.files() may contain explicitly-specified paths that shouldn't
874 # match.files() may contain explicitly-specified paths that shouldn't
873 # be taken; drop them from the list of files found. dirsfound/notfound
875 # be taken; drop them from the list of files found. dirsfound/notfound
874 # aren't filtered here because they will be tested later.
876 # aren't filtered here because they will be tested later.
875 if match.anypats():
877 if match.anypats():
876 for f in list(results):
878 for f in list(results):
877 if f == b'.hg' or f in subrepos:
879 if f == b'.hg' or f in subrepos:
878 # keep sentinel to disable further out-of-repo walks
880 # keep sentinel to disable further out-of-repo walks
879 continue
881 continue
880 if not match(f):
882 if not match(f):
881 del results[f]
883 del results[f]
882
884
883 # Case insensitive filesystems cannot rely on lstat() failing to detect
885 # Case insensitive filesystems cannot rely on lstat() failing to detect
884 # a case-only rename. Prune the stat object for any file that does not
886 # a case-only rename. Prune the stat object for any file that does not
885 # match the case in the filesystem, if there are multiple files that
887 # match the case in the filesystem, if there are multiple files that
886 # normalize to the same path.
888 # normalize to the same path.
887 if match.isexact() and self._checkcase:
889 if match.isexact() and self._checkcase:
888 normed = {}
890 normed = {}
889
891
890 for f, st in pycompat.iteritems(results):
892 for f, st in pycompat.iteritems(results):
891 if st is None:
893 if st is None:
892 continue
894 continue
893
895
894 nc = util.normcase(f)
896 nc = util.normcase(f)
895 paths = normed.get(nc)
897 paths = normed.get(nc)
896
898
897 if paths is None:
899 if paths is None:
898 paths = set()
900 paths = set()
899 normed[nc] = paths
901 normed[nc] = paths
900
902
901 paths.add(f)
903 paths.add(f)
902
904
903 for norm, paths in pycompat.iteritems(normed):
905 for norm, paths in pycompat.iteritems(normed):
904 if len(paths) > 1:
906 if len(paths) > 1:
905 for path in paths:
907 for path in paths:
906 folded = self._discoverpath(
908 folded = self._discoverpath(
907 path, norm, True, None, self._map.dirfoldmap
909 path, norm, True, None, self._map.dirfoldmap
908 )
910 )
909 if path != folded:
911 if path != folded:
910 results[path] = None
912 results[path] = None
911
913
912 return results, dirsfound, dirsnotfound
914 return results, dirsfound, dirsnotfound
913
915
914 def walk(self, match, subrepos, unknown, ignored, full=True):
916 def walk(self, match, subrepos, unknown, ignored, full=True):
915 """
917 """
916 Walk recursively through the directory tree, finding all files
918 Walk recursively through the directory tree, finding all files
917 matched by match.
919 matched by match.
918
920
919 If full is False, maybe skip some known-clean files.
921 If full is False, maybe skip some known-clean files.
920
922
921 Return a dict mapping filename to stat-like object (either
923 Return a dict mapping filename to stat-like object (either
922 mercurial.osutil.stat instance or return value of os.stat()).
924 mercurial.osutil.stat instance or return value of os.stat()).
923
925
924 """
926 """
925 # full is a flag that extensions that hook into walk can use -- this
927 # full is a flag that extensions that hook into walk can use -- this
926 # implementation doesn't use it at all. This satisfies the contract
928 # implementation doesn't use it at all. This satisfies the contract
927 # because we only guarantee a "maybe".
929 # because we only guarantee a "maybe".
928
930
929 if ignored:
931 if ignored:
930 ignore = util.never
932 ignore = util.never
931 dirignore = util.never
933 dirignore = util.never
932 elif unknown:
934 elif unknown:
933 ignore = self._ignore
935 ignore = self._ignore
934 dirignore = self._dirignore
936 dirignore = self._dirignore
935 else:
937 else:
936 # if not unknown and not ignored, drop dir recursion and step 2
938 # if not unknown and not ignored, drop dir recursion and step 2
937 ignore = util.always
939 ignore = util.always
938 dirignore = util.always
940 dirignore = util.always
939
941
940 matchfn = match.matchfn
942 matchfn = match.matchfn
941 matchalways = match.always()
943 matchalways = match.always()
942 matchtdir = match.traversedir
944 matchtdir = match.traversedir
943 dmap = self._map
945 dmap = self._map
944 listdir = util.listdir
946 listdir = util.listdir
945 lstat = os.lstat
947 lstat = os.lstat
946 dirkind = stat.S_IFDIR
948 dirkind = stat.S_IFDIR
947 regkind = stat.S_IFREG
949 regkind = stat.S_IFREG
948 lnkkind = stat.S_IFLNK
950 lnkkind = stat.S_IFLNK
949 join = self._join
951 join = self._join
950
952
951 exact = skipstep3 = False
953 exact = skipstep3 = False
952 if match.isexact(): # match.exact
954 if match.isexact(): # match.exact
953 exact = True
955 exact = True
954 dirignore = util.always # skip step 2
956 dirignore = util.always # skip step 2
955 elif match.prefix(): # match.match, no patterns
957 elif match.prefix(): # match.match, no patterns
956 skipstep3 = True
958 skipstep3 = True
957
959
958 if not exact and self._checkcase:
960 if not exact and self._checkcase:
959 normalize = self._normalize
961 normalize = self._normalize
960 normalizefile = self._normalizefile
962 normalizefile = self._normalizefile
961 skipstep3 = False
963 skipstep3 = False
962 else:
964 else:
963 normalize = self._normalize
965 normalize = self._normalize
964 normalizefile = None
966 normalizefile = None
965
967
966 # step 1: find all explicit files
968 # step 1: find all explicit files
967 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
969 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
968 if matchtdir:
970 if matchtdir:
969 for d in work:
971 for d in work:
970 matchtdir(d[0])
972 matchtdir(d[0])
971 for d in dirsnotfound:
973 for d in dirsnotfound:
972 matchtdir(d)
974 matchtdir(d)
973
975
974 skipstep3 = skipstep3 and not (work or dirsnotfound)
976 skipstep3 = skipstep3 and not (work or dirsnotfound)
975 work = [d for d in work if not dirignore(d[0])]
977 work = [d for d in work if not dirignore(d[0])]
976
978
977 # step 2: visit subdirectories
979 # step 2: visit subdirectories
978 def traverse(work, alreadynormed):
980 def traverse(work, alreadynormed):
979 wadd = work.append
981 wadd = work.append
980 while work:
982 while work:
981 tracing.counter('dirstate.walk work', len(work))
983 tracing.counter('dirstate.walk work', len(work))
982 nd = work.pop()
984 nd = work.pop()
983 visitentries = match.visitchildrenset(nd)
985 visitentries = match.visitchildrenset(nd)
984 if not visitentries:
986 if not visitentries:
985 continue
987 continue
986 if visitentries == b'this' or visitentries == b'all':
988 if visitentries == b'this' or visitentries == b'all':
987 visitentries = None
989 visitentries = None
988 skip = None
990 skip = None
989 if nd != b'':
991 if nd != b'':
990 skip = b'.hg'
992 skip = b'.hg'
991 try:
993 try:
992 with tracing.log('dirstate.walk.traverse listdir %s', nd):
994 with tracing.log('dirstate.walk.traverse listdir %s', nd):
993 entries = listdir(join(nd), stat=True, skip=skip)
995 entries = listdir(join(nd), stat=True, skip=skip)
994 except OSError as inst:
996 except OSError as inst:
995 if inst.errno in (errno.EACCES, errno.ENOENT):
997 if inst.errno in (errno.EACCES, errno.ENOENT):
996 match.bad(
998 match.bad(
997 self.pathto(nd), encoding.strtolocal(inst.strerror)
999 self.pathto(nd), encoding.strtolocal(inst.strerror)
998 )
1000 )
999 continue
1001 continue
1000 raise
1002 raise
1001 for f, kind, st in entries:
1003 for f, kind, st in entries:
1002 # Some matchers may return files in the visitentries set,
1004 # Some matchers may return files in the visitentries set,
1003 # instead of 'this', if the matcher explicitly mentions them
1005 # instead of 'this', if the matcher explicitly mentions them
1004 # and is not an exactmatcher. This is acceptable; we do not
1006 # and is not an exactmatcher. This is acceptable; we do not
1005 # make any hard assumptions about file-or-directory below
1007 # make any hard assumptions about file-or-directory below
1006 # based on the presence of `f` in visitentries. If
1008 # based on the presence of `f` in visitentries. If
1007 # visitchildrenset returned a set, we can always skip the
1009 # visitchildrenset returned a set, we can always skip the
1008 # entries *not* in the set it provided regardless of whether
1010 # entries *not* in the set it provided regardless of whether
1009 # they're actually a file or a directory.
1011 # they're actually a file or a directory.
1010 if visitentries and f not in visitentries:
1012 if visitentries and f not in visitentries:
1011 continue
1013 continue
1012 if normalizefile:
1014 if normalizefile:
1013 # even though f might be a directory, we're only
1015 # even though f might be a directory, we're only
1014 # interested in comparing it to files currently in the
1016 # interested in comparing it to files currently in the
1015 # dmap -- therefore normalizefile is enough
1017 # dmap -- therefore normalizefile is enough
1016 nf = normalizefile(
1018 nf = normalizefile(
1017 nd and (nd + b"/" + f) or f, True, True
1019 nd and (nd + b"/" + f) or f, True, True
1018 )
1020 )
1019 else:
1021 else:
1020 nf = nd and (nd + b"/" + f) or f
1022 nf = nd and (nd + b"/" + f) or f
1021 if nf not in results:
1023 if nf not in results:
1022 if kind == dirkind:
1024 if kind == dirkind:
1023 if not ignore(nf):
1025 if not ignore(nf):
1024 if matchtdir:
1026 if matchtdir:
1025 matchtdir(nf)
1027 matchtdir(nf)
1026 wadd(nf)
1028 wadd(nf)
1027 if nf in dmap and (matchalways or matchfn(nf)):
1029 if nf in dmap and (matchalways or matchfn(nf)):
1028 results[nf] = None
1030 results[nf] = None
1029 elif kind == regkind or kind == lnkkind:
1031 elif kind == regkind or kind == lnkkind:
1030 if nf in dmap:
1032 if nf in dmap:
1031 if matchalways or matchfn(nf):
1033 if matchalways or matchfn(nf):
1032 results[nf] = st
1034 results[nf] = st
1033 elif (matchalways or matchfn(nf)) and not ignore(
1035 elif (matchalways or matchfn(nf)) and not ignore(
1034 nf
1036 nf
1035 ):
1037 ):
1036 # unknown file -- normalize if necessary
1038 # unknown file -- normalize if necessary
1037 if not alreadynormed:
1039 if not alreadynormed:
1038 nf = normalize(nf, False, True)
1040 nf = normalize(nf, False, True)
1039 results[nf] = st
1041 results[nf] = st
1040 elif nf in dmap and (matchalways or matchfn(nf)):
1042 elif nf in dmap and (matchalways or matchfn(nf)):
1041 results[nf] = None
1043 results[nf] = None
1042
1044
1043 for nd, d in work:
1045 for nd, d in work:
1044 # alreadynormed means that processwork doesn't have to do any
1046 # alreadynormed means that processwork doesn't have to do any
1045 # expensive directory normalization
1047 # expensive directory normalization
1046 alreadynormed = not normalize or nd == d
1048 alreadynormed = not normalize or nd == d
1047 traverse([d], alreadynormed)
1049 traverse([d], alreadynormed)
1048
1050
1049 for s in subrepos:
1051 for s in subrepos:
1050 del results[s]
1052 del results[s]
1051 del results[b'.hg']
1053 del results[b'.hg']
1052
1054
1053 # step 3: visit remaining files from dmap
1055 # step 3: visit remaining files from dmap
1054 if not skipstep3 and not exact:
1056 if not skipstep3 and not exact:
1055 # If a dmap file is not in results yet, it was either
1057 # If a dmap file is not in results yet, it was either
1056 # a) not matching matchfn b) ignored, c) missing, or d) under a
1058 # a) not matching matchfn b) ignored, c) missing, or d) under a
1057 # symlink directory.
1059 # symlink directory.
1058 if not results and matchalways:
1060 if not results and matchalways:
1059 visit = [f for f in dmap]
1061 visit = [f for f in dmap]
1060 else:
1062 else:
1061 visit = [f for f in dmap if f not in results and matchfn(f)]
1063 visit = [f for f in dmap if f not in results and matchfn(f)]
1062 visit.sort()
1064 visit.sort()
1063
1065
1064 if unknown:
1066 if unknown:
1065 # unknown == True means we walked all dirs under the roots
1067 # unknown == True means we walked all dirs under the roots
1066 # that wasn't ignored, and everything that matched was stat'ed
1068 # that wasn't ignored, and everything that matched was stat'ed
1067 # and is already in results.
1069 # and is already in results.
1068 # The rest must thus be ignored or under a symlink.
1070 # The rest must thus be ignored or under a symlink.
1069 audit_path = pathutil.pathauditor(self._root, cached=True)
1071 audit_path = pathutil.pathauditor(self._root, cached=True)
1070
1072
1071 for nf in iter(visit):
1073 for nf in iter(visit):
1072 # If a stat for the same file was already added with a
1074 # If a stat for the same file was already added with a
1073 # different case, don't add one for this, since that would
1075 # different case, don't add one for this, since that would
1074 # make it appear as if the file exists under both names
1076 # make it appear as if the file exists under both names
1075 # on disk.
1077 # on disk.
1076 if (
1078 if (
1077 normalizefile
1079 normalizefile
1078 and normalizefile(nf, True, True) in results
1080 and normalizefile(nf, True, True) in results
1079 ):
1081 ):
1080 results[nf] = None
1082 results[nf] = None
1081 # Report ignored items in the dmap as long as they are not
1083 # Report ignored items in the dmap as long as they are not
1082 # under a symlink directory.
1084 # under a symlink directory.
1083 elif audit_path.check(nf):
1085 elif audit_path.check(nf):
1084 try:
1086 try:
1085 results[nf] = lstat(join(nf))
1087 results[nf] = lstat(join(nf))
1086 # file was just ignored, no links, and exists
1088 # file was just ignored, no links, and exists
1087 except OSError:
1089 except OSError:
1088 # file doesn't exist
1090 # file doesn't exist
1089 results[nf] = None
1091 results[nf] = None
1090 else:
1092 else:
1091 # It's either missing or under a symlink directory
1093 # It's either missing or under a symlink directory
1092 # which we in this case report as missing
1094 # which we in this case report as missing
1093 results[nf] = None
1095 results[nf] = None
1094 else:
1096 else:
1095 # We may not have walked the full directory tree above,
1097 # We may not have walked the full directory tree above,
1096 # so stat and check everything we missed.
1098 # so stat and check everything we missed.
1097 iv = iter(visit)
1099 iv = iter(visit)
1098 for st in util.statfiles([join(i) for i in visit]):
1100 for st in util.statfiles([join(i) for i in visit]):
1099 results[next(iv)] = st
1101 results[next(iv)] = st
1100 return results
1102 return results
1101
1103
1102 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1104 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1103 # Force Rayon (Rust parallelism library) to respect the number of
1105 # Force Rayon (Rust parallelism library) to respect the number of
1104 # workers. This is a temporary workaround until Rust code knows
1106 # workers. This is a temporary workaround until Rust code knows
1105 # how to read the config file.
1107 # how to read the config file.
1106 numcpus = self._ui.configint(b"worker", b"numcpus")
1108 numcpus = self._ui.configint(b"worker", b"numcpus")
1107 if numcpus is not None:
1109 if numcpus is not None:
1108 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1110 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1109
1111
1110 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1112 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1111 if not workers_enabled:
1113 if not workers_enabled:
1112 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1114 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1113
1115
1114 (
1116 (
1115 lookup,
1117 lookup,
1116 modified,
1118 modified,
1117 added,
1119 added,
1118 removed,
1120 removed,
1119 deleted,
1121 deleted,
1120 clean,
1122 clean,
1121 ignored,
1123 ignored,
1122 unknown,
1124 unknown,
1123 warnings,
1125 warnings,
1124 bad,
1126 bad,
1125 traversed,
1127 traversed,
1126 ) = rustmod.status(
1128 ) = rustmod.status(
1127 self._map._rustmap,
1129 self._map._rustmap,
1128 matcher,
1130 matcher,
1129 self._rootdir,
1131 self._rootdir,
1130 self._ignorefiles(),
1132 self._ignorefiles(),
1131 self._checkexec,
1133 self._checkexec,
1132 self._lastnormaltime,
1134 self._lastnormaltime,
1133 bool(list_clean),
1135 bool(list_clean),
1134 bool(list_ignored),
1136 bool(list_ignored),
1135 bool(list_unknown),
1137 bool(list_unknown),
1136 bool(matcher.traversedir),
1138 bool(matcher.traversedir),
1137 )
1139 )
1138
1140
1139 if matcher.traversedir:
1141 if matcher.traversedir:
1140 for dir in traversed:
1142 for dir in traversed:
1141 matcher.traversedir(dir)
1143 matcher.traversedir(dir)
1142
1144
1143 if self._ui.warn:
1145 if self._ui.warn:
1144 for item in warnings:
1146 for item in warnings:
1145 if isinstance(item, tuple):
1147 if isinstance(item, tuple):
1146 file_path, syntax = item
1148 file_path, syntax = item
1147 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1149 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1148 file_path,
1150 file_path,
1149 syntax,
1151 syntax,
1150 )
1152 )
1151 self._ui.warn(msg)
1153 self._ui.warn(msg)
1152 else:
1154 else:
1153 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1155 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1154 self._ui.warn(
1156 self._ui.warn(
1155 msg
1157 msg
1156 % (
1158 % (
1157 pathutil.canonpath(
1159 pathutil.canonpath(
1158 self._rootdir, self._rootdir, item
1160 self._rootdir, self._rootdir, item
1159 ),
1161 ),
1160 b"No such file or directory",
1162 b"No such file or directory",
1161 )
1163 )
1162 )
1164 )
1163
1165
1164 for (fn, message) in bad:
1166 for (fn, message) in bad:
1165 matcher.bad(fn, encoding.strtolocal(message))
1167 matcher.bad(fn, encoding.strtolocal(message))
1166
1168
1167 status = scmutil.status(
1169 status = scmutil.status(
1168 modified=modified,
1170 modified=modified,
1169 added=added,
1171 added=added,
1170 removed=removed,
1172 removed=removed,
1171 deleted=deleted,
1173 deleted=deleted,
1172 unknown=unknown,
1174 unknown=unknown,
1173 ignored=ignored,
1175 ignored=ignored,
1174 clean=clean,
1176 clean=clean,
1175 )
1177 )
1176 return (lookup, status)
1178 return (lookup, status)
1177
1179
1178 def status(self, match, subrepos, ignored, clean, unknown):
1180 def status(self, match, subrepos, ignored, clean, unknown):
1179 """Determine the status of the working copy relative to the
1181 """Determine the status of the working copy relative to the
1180 dirstate and return a pair of (unsure, status), where status is of type
1182 dirstate and return a pair of (unsure, status), where status is of type
1181 scmutil.status and:
1183 scmutil.status and:
1182
1184
1183 unsure:
1185 unsure:
1184 files that might have been modified since the dirstate was
1186 files that might have been modified since the dirstate was
1185 written, but need to be read to be sure (size is the same
1187 written, but need to be read to be sure (size is the same
1186 but mtime differs)
1188 but mtime differs)
1187 status.modified:
1189 status.modified:
1188 files that have definitely been modified since the dirstate
1190 files that have definitely been modified since the dirstate
1189 was written (different size or mode)
1191 was written (different size or mode)
1190 status.clean:
1192 status.clean:
1191 files that have definitely not been modified since the
1193 files that have definitely not been modified since the
1192 dirstate was written
1194 dirstate was written
1193 """
1195 """
1194 listignored, listclean, listunknown = ignored, clean, unknown
1196 listignored, listclean, listunknown = ignored, clean, unknown
1195 lookup, modified, added, unknown, ignored = [], [], [], [], []
1197 lookup, modified, added, unknown, ignored = [], [], [], [], []
1196 removed, deleted, clean = [], [], []
1198 removed, deleted, clean = [], [], []
1197
1199
1198 dmap = self._map
1200 dmap = self._map
1199 dmap.preload()
1201 dmap.preload()
1200
1202
1201 use_rust = True
1203 use_rust = True
1202
1204
1203 allowed_matchers = (
1205 allowed_matchers = (
1204 matchmod.alwaysmatcher,
1206 matchmod.alwaysmatcher,
1205 matchmod.exactmatcher,
1207 matchmod.exactmatcher,
1206 matchmod.includematcher,
1208 matchmod.includematcher,
1207 )
1209 )
1208
1210
1209 if rustmod is None:
1211 if rustmod is None:
1210 use_rust = False
1212 use_rust = False
1211 elif self._checkcase:
1213 elif self._checkcase:
1212 # Case-insensitive filesystems are not handled yet
1214 # Case-insensitive filesystems are not handled yet
1213 use_rust = False
1215 use_rust = False
1214 elif subrepos:
1216 elif subrepos:
1215 use_rust = False
1217 use_rust = False
1216 elif sparse.enabled:
1218 elif sparse.enabled:
1217 use_rust = False
1219 use_rust = False
1218 elif not isinstance(match, allowed_matchers):
1220 elif not isinstance(match, allowed_matchers):
1219 # Some matchers have yet to be implemented
1221 # Some matchers have yet to be implemented
1220 use_rust = False
1222 use_rust = False
1221
1223
1222 if use_rust:
1224 if use_rust:
1223 try:
1225 try:
1224 return self._rust_status(
1226 return self._rust_status(
1225 match, listclean, listignored, listunknown
1227 match, listclean, listignored, listunknown
1226 )
1228 )
1227 except rustmod.FallbackError:
1229 except rustmod.FallbackError:
1228 pass
1230 pass
1229
1231
1230 def noop(f):
1232 def noop(f):
1231 pass
1233 pass
1232
1234
1233 dcontains = dmap.__contains__
1235 dcontains = dmap.__contains__
1234 dget = dmap.__getitem__
1236 dget = dmap.__getitem__
1235 ladd = lookup.append # aka "unsure"
1237 ladd = lookup.append # aka "unsure"
1236 madd = modified.append
1238 madd = modified.append
1237 aadd = added.append
1239 aadd = added.append
1238 uadd = unknown.append if listunknown else noop
1240 uadd = unknown.append if listunknown else noop
1239 iadd = ignored.append if listignored else noop
1241 iadd = ignored.append if listignored else noop
1240 radd = removed.append
1242 radd = removed.append
1241 dadd = deleted.append
1243 dadd = deleted.append
1242 cadd = clean.append if listclean else noop
1244 cadd = clean.append if listclean else noop
1243 mexact = match.exact
1245 mexact = match.exact
1244 dirignore = self._dirignore
1246 dirignore = self._dirignore
1245 checkexec = self._checkexec
1247 checkexec = self._checkexec
1246 copymap = self._map.copymap
1248 copymap = self._map.copymap
1247 lastnormaltime = self._lastnormaltime
1249 lastnormaltime = self._lastnormaltime
1248
1250
1249 # We need to do full walks when either
1251 # We need to do full walks when either
1250 # - we're listing all clean files, or
1252 # - we're listing all clean files, or
1251 # - match.traversedir does something, because match.traversedir should
1253 # - match.traversedir does something, because match.traversedir should
1252 # be called for every dir in the working dir
1254 # be called for every dir in the working dir
1253 full = listclean or match.traversedir is not None
1255 full = listclean or match.traversedir is not None
1254 for fn, st in pycompat.iteritems(
1256 for fn, st in pycompat.iteritems(
1255 self.walk(match, subrepos, listunknown, listignored, full=full)
1257 self.walk(match, subrepos, listunknown, listignored, full=full)
1256 ):
1258 ):
1257 if not dcontains(fn):
1259 if not dcontains(fn):
1258 if (listignored or mexact(fn)) and dirignore(fn):
1260 if (listignored or mexact(fn)) and dirignore(fn):
1259 if listignored:
1261 if listignored:
1260 iadd(fn)
1262 iadd(fn)
1261 else:
1263 else:
1262 uadd(fn)
1264 uadd(fn)
1263 continue
1265 continue
1264
1266
1265 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1267 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1266 # written like that for performance reasons. dmap[fn] is not a
1268 # written like that for performance reasons. dmap[fn] is not a
1267 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1269 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1268 # opcode has fast paths when the value to be unpacked is a tuple or
1270 # opcode has fast paths when the value to be unpacked is a tuple or
1269 # a list, but falls back to creating a full-fledged iterator in
1271 # a list, but falls back to creating a full-fledged iterator in
1270 # general. That is much slower than simply accessing and storing the
1272 # general. That is much slower than simply accessing and storing the
1271 # tuple members one by one.
1273 # tuple members one by one.
1272 t = dget(fn)
1274 t = dget(fn)
1273 state = t[0]
1275 state = t[0]
1274 mode = t[1]
1276 mode = t[1]
1275 size = t[2]
1277 size = t[2]
1276 time = t[3]
1278 time = t[3]
1277
1279
1278 if not st and state in b"nma":
1280 if not st and state in b"nma":
1279 dadd(fn)
1281 dadd(fn)
1280 elif state == b'n':
1282 elif state == b'n':
1281 if (
1283 if (
1282 size >= 0
1284 size >= 0
1283 and (
1285 and (
1284 (size != st.st_size and size != st.st_size & _rangemask)
1286 (size != st.st_size and size != st.st_size & _rangemask)
1285 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1287 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1286 )
1288 )
1287 or size == -2 # other parent
1289 or size == -2 # other parent
1288 or fn in copymap
1290 or fn in copymap
1289 ):
1291 ):
1290 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1292 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1291 # issue6456: Size returned may be longer due to
1293 # issue6456: Size returned may be longer due to
1292 # encryption on EXT-4 fscrypt, undecided.
1294 # encryption on EXT-4 fscrypt, undecided.
1293 ladd(fn)
1295 ladd(fn)
1294 else:
1296 else:
1295 madd(fn)
1297 madd(fn)
1296 elif (
1298 elif (
1297 time != st[stat.ST_MTIME]
1299 time != st[stat.ST_MTIME]
1298 and time != st[stat.ST_MTIME] & _rangemask
1300 and time != st[stat.ST_MTIME] & _rangemask
1299 ):
1301 ):
1300 ladd(fn)
1302 ladd(fn)
1301 elif st[stat.ST_MTIME] == lastnormaltime:
1303 elif st[stat.ST_MTIME] == lastnormaltime:
1302 # fn may have just been marked as normal and it may have
1304 # fn may have just been marked as normal and it may have
1303 # changed in the same second without changing its size.
1305 # changed in the same second without changing its size.
1304 # This can happen if we quickly do multiple commits.
1306 # This can happen if we quickly do multiple commits.
1305 # Force lookup, so we don't miss such a racy file change.
1307 # Force lookup, so we don't miss such a racy file change.
1306 ladd(fn)
1308 ladd(fn)
1307 elif listclean:
1309 elif listclean:
1308 cadd(fn)
1310 cadd(fn)
1309 elif state == b'm':
1311 elif state == b'm':
1310 madd(fn)
1312 madd(fn)
1311 elif state == b'a':
1313 elif state == b'a':
1312 aadd(fn)
1314 aadd(fn)
1313 elif state == b'r':
1315 elif state == b'r':
1314 radd(fn)
1316 radd(fn)
1315 status = scmutil.status(
1317 status = scmutil.status(
1316 modified, added, removed, deleted, unknown, ignored, clean
1318 modified, added, removed, deleted, unknown, ignored, clean
1317 )
1319 )
1318 return (lookup, status)
1320 return (lookup, status)
1319
1321
1320 def matches(self, match):
1322 def matches(self, match):
1321 """
1323 """
1322 return files in the dirstate (in whatever state) filtered by match
1324 return files in the dirstate (in whatever state) filtered by match
1323 """
1325 """
1324 dmap = self._map
1326 dmap = self._map
1325 if rustmod is not None:
1327 if rustmod is not None:
1326 dmap = self._map._rustmap
1328 dmap = self._map._rustmap
1327
1329
1328 if match.always():
1330 if match.always():
1329 return dmap.keys()
1331 return dmap.keys()
1330 files = match.files()
1332 files = match.files()
1331 if match.isexact():
1333 if match.isexact():
1332 # fast path -- filter the other way around, since typically files is
1334 # fast path -- filter the other way around, since typically files is
1333 # much smaller than dmap
1335 # much smaller than dmap
1334 return [f for f in files if f in dmap]
1336 return [f for f in files if f in dmap]
1335 if match.prefix() and all(fn in dmap for fn in files):
1337 if match.prefix() and all(fn in dmap for fn in files):
1336 # fast path -- all the values are known to be files, so just return
1338 # fast path -- all the values are known to be files, so just return
1337 # that
1339 # that
1338 return list(files)
1340 return list(files)
1339 return [f for f in dmap if match(f)]
1341 return [f for f in dmap if match(f)]
1340
1342
1341 def _actualfilename(self, tr):
1343 def _actualfilename(self, tr):
1342 if tr:
1344 if tr:
1343 return self._pendingfilename
1345 return self._pendingfilename
1344 else:
1346 else:
1345 return self._filename
1347 return self._filename
1346
1348
1347 def savebackup(self, tr, backupname):
1349 def savebackup(self, tr, backupname):
1348 '''Save current dirstate into backup file'''
1350 '''Save current dirstate into backup file'''
1349 filename = self._actualfilename(tr)
1351 filename = self._actualfilename(tr)
1350 assert backupname != filename
1352 assert backupname != filename
1351
1353
1352 # use '_writedirstate' instead of 'write' to write changes certainly,
1354 # use '_writedirstate' instead of 'write' to write changes certainly,
1353 # because the latter omits writing out if transaction is running.
1355 # because the latter omits writing out if transaction is running.
1354 # output file will be used to create backup of dirstate at this point.
1356 # output file will be used to create backup of dirstate at this point.
1355 if self._dirty or not self._opener.exists(filename):
1357 if self._dirty or not self._opener.exists(filename):
1356 self._writedirstate(
1358 self._writedirstate(
1357 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1359 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1358 )
1360 )
1359
1361
1360 if tr:
1362 if tr:
1361 # ensure that subsequent tr.writepending returns True for
1363 # ensure that subsequent tr.writepending returns True for
1362 # changes written out above, even if dirstate is never
1364 # changes written out above, even if dirstate is never
1363 # changed after this
1365 # changed after this
1364 tr.addfilegenerator(
1366 tr.addfilegenerator(
1365 b'dirstate',
1367 b'dirstate',
1366 (self._filename,),
1368 (self._filename,),
1367 self._writedirstate,
1369 self._writedirstate,
1368 location=b'plain',
1370 location=b'plain',
1369 )
1371 )
1370
1372
1371 # ensure that pending file written above is unlinked at
1373 # ensure that pending file written above is unlinked at
1372 # failure, even if tr.writepending isn't invoked until the
1374 # failure, even if tr.writepending isn't invoked until the
1373 # end of this transaction
1375 # end of this transaction
1374 tr.registertmp(filename, location=b'plain')
1376 tr.registertmp(filename, location=b'plain')
1375
1377
1376 self._opener.tryunlink(backupname)
1378 self._opener.tryunlink(backupname)
1377 # hardlink backup is okay because _writedirstate is always called
1379 # hardlink backup is okay because _writedirstate is always called
1378 # with an "atomictemp=True" file.
1380 # with an "atomictemp=True" file.
1379 util.copyfile(
1381 util.copyfile(
1380 self._opener.join(filename),
1382 self._opener.join(filename),
1381 self._opener.join(backupname),
1383 self._opener.join(backupname),
1382 hardlink=True,
1384 hardlink=True,
1383 )
1385 )
1384
1386
1385 def restorebackup(self, tr, backupname):
1387 def restorebackup(self, tr, backupname):
1386 '''Restore dirstate by backup file'''
1388 '''Restore dirstate by backup file'''
1387 # this "invalidate()" prevents "wlock.release()" from writing
1389 # this "invalidate()" prevents "wlock.release()" from writing
1388 # changes of dirstate out after restoring from backup file
1390 # changes of dirstate out after restoring from backup file
1389 self.invalidate()
1391 self.invalidate()
1390 filename = self._actualfilename(tr)
1392 filename = self._actualfilename(tr)
1391 o = self._opener
1393 o = self._opener
1392 if util.samefile(o.join(backupname), o.join(filename)):
1394 if util.samefile(o.join(backupname), o.join(filename)):
1393 o.unlink(backupname)
1395 o.unlink(backupname)
1394 else:
1396 else:
1395 o.rename(backupname, filename, checkambig=True)
1397 o.rename(backupname, filename, checkambig=True)
1396
1398
1397 def clearbackup(self, tr, backupname):
1399 def clearbackup(self, tr, backupname):
1398 '''Clear backup file'''
1400 '''Clear backup file'''
1399 self._opener.unlink(backupname)
1401 self._opener.unlink(backupname)
1400
1402
1401
1403
1402 class dirstatemap(object):
1404 class dirstatemap(object):
1403 """Map encapsulating the dirstate's contents.
1405 """Map encapsulating the dirstate's contents.
1404
1406
1405 The dirstate contains the following state:
1407 The dirstate contains the following state:
1406
1408
1407 - `identity` is the identity of the dirstate file, which can be used to
1409 - `identity` is the identity of the dirstate file, which can be used to
1408 detect when changes have occurred to the dirstate file.
1410 detect when changes have occurred to the dirstate file.
1409
1411
1410 - `parents` is a pair containing the parents of the working copy. The
1412 - `parents` is a pair containing the parents of the working copy. The
1411 parents are updated by calling `setparents`.
1413 parents are updated by calling `setparents`.
1412
1414
1413 - the state map maps filenames to tuples of (state, mode, size, mtime),
1415 - the state map maps filenames to tuples of (state, mode, size, mtime),
1414 where state is a single character representing 'normal', 'added',
1416 where state is a single character representing 'normal', 'added',
1415 'removed', or 'merged'. It is read by treating the dirstate as a
1417 'removed', or 'merged'. It is read by treating the dirstate as a
1416 dict. File state is updated by calling the `addfile`, `removefile` and
1418 dict. File state is updated by calling the `addfile`, `removefile` and
1417 `dropfile` methods.
1419 `dropfile` methods.
1418
1420
1419 - `copymap` maps destination filenames to their source filename.
1421 - `copymap` maps destination filenames to their source filename.
1420
1422
1421 The dirstate also provides the following views onto the state:
1423 The dirstate also provides the following views onto the state:
1422
1424
1423 - `nonnormalset` is a set of the filenames that have state other
1425 - `nonnormalset` is a set of the filenames that have state other
1424 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1426 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1425
1427
1426 - `otherparentset` is a set of the filenames that are marked as coming
1428 - `otherparentset` is a set of the filenames that are marked as coming
1427 from the second parent when the dirstate is currently being merged.
1429 from the second parent when the dirstate is currently being merged.
1428
1430
1429 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1431 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1430 form that they appear as in the dirstate.
1432 form that they appear as in the dirstate.
1431
1433
1432 - `dirfoldmap` is a dict mapping normalized directory names to the
1434 - `dirfoldmap` is a dict mapping normalized directory names to the
1433 denormalized form that they appear as in the dirstate.
1435 denormalized form that they appear as in the dirstate.
1434 """
1436 """
1435
1437
1436 def __init__(self, ui, opener, root, nodeconstants):
1438 def __init__(self, ui, opener, root, nodeconstants):
1437 self._ui = ui
1439 self._ui = ui
1438 self._opener = opener
1440 self._opener = opener
1439 self._root = root
1441 self._root = root
1440 self._filename = b'dirstate'
1442 self._filename = b'dirstate'
1441 self._nodelen = 20
1443 self._nodelen = 20
1442 self._nodeconstants = nodeconstants
1444 self._nodeconstants = nodeconstants
1443
1445
1444 self._parents = None
1446 self._parents = None
1445 self._dirtyparents = False
1447 self._dirtyparents = False
1446
1448
1447 # for consistent view between _pl() and _read() invocations
1449 # for consistent view between _pl() and _read() invocations
1448 self._pendingmode = None
1450 self._pendingmode = None
1449
1451
1450 @propertycache
1452 @propertycache
1451 def _map(self):
1453 def _map(self):
1452 self._map = {}
1454 self._map = {}
1453 self.read()
1455 self.read()
1454 return self._map
1456 return self._map
1455
1457
1456 @propertycache
1458 @propertycache
1457 def copymap(self):
1459 def copymap(self):
1458 self.copymap = {}
1460 self.copymap = {}
1459 self._map
1461 self._map
1460 return self.copymap
1462 return self.copymap
1461
1463
1462 def clear(self):
1464 def clear(self):
1463 self._map.clear()
1465 self._map.clear()
1464 self.copymap.clear()
1466 self.copymap.clear()
1465 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1467 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1466 util.clearcachedproperty(self, b"_dirs")
1468 util.clearcachedproperty(self, b"_dirs")
1467 util.clearcachedproperty(self, b"_alldirs")
1469 util.clearcachedproperty(self, b"_alldirs")
1468 util.clearcachedproperty(self, b"filefoldmap")
1470 util.clearcachedproperty(self, b"filefoldmap")
1469 util.clearcachedproperty(self, b"dirfoldmap")
1471 util.clearcachedproperty(self, b"dirfoldmap")
1470 util.clearcachedproperty(self, b"nonnormalset")
1472 util.clearcachedproperty(self, b"nonnormalset")
1471 util.clearcachedproperty(self, b"otherparentset")
1473 util.clearcachedproperty(self, b"otherparentset")
1472
1474
1473 def items(self):
1475 def items(self):
1474 return pycompat.iteritems(self._map)
1476 return pycompat.iteritems(self._map)
1475
1477
1476 # forward for python2,3 compat
1478 # forward for python2,3 compat
1477 iteritems = items
1479 iteritems = items
1478
1480
1479 def __len__(self):
1481 def __len__(self):
1480 return len(self._map)
1482 return len(self._map)
1481
1483
1482 def __iter__(self):
1484 def __iter__(self):
1483 return iter(self._map)
1485 return iter(self._map)
1484
1486
1485 def get(self, key, default=None):
1487 def get(self, key, default=None):
1486 return self._map.get(key, default)
1488 return self._map.get(key, default)
1487
1489
1488 def __contains__(self, key):
1490 def __contains__(self, key):
1489 return key in self._map
1491 return key in self._map
1490
1492
1491 def __getitem__(self, key):
1493 def __getitem__(self, key):
1492 return self._map[key]
1494 return self._map[key]
1493
1495
1494 def keys(self):
1496 def keys(self):
1495 return self._map.keys()
1497 return self._map.keys()
1496
1498
1497 def preload(self):
1499 def preload(self):
1498 """Loads the underlying data, if it's not already loaded"""
1500 """Loads the underlying data, if it's not already loaded"""
1499 self._map
1501 self._map
1500
1502
1501 def addfile(self, f, oldstate, state, mode, size, mtime):
1503 def addfile(self, f, oldstate, state, mode, size, mtime):
1502 """Add a tracked file to the dirstate."""
1504 """Add a tracked file to the dirstate."""
1503 if oldstate in b"?r" and "_dirs" in self.__dict__:
1505 if oldstate in b"?r" and "_dirs" in self.__dict__:
1504 self._dirs.addpath(f)
1506 self._dirs.addpath(f)
1505 if oldstate == b"?" and "_alldirs" in self.__dict__:
1507 if oldstate == b"?" and "_alldirs" in self.__dict__:
1506 self._alldirs.addpath(f)
1508 self._alldirs.addpath(f)
1507 self._map[f] = dirstatetuple(state, mode, size, mtime)
1509 self._map[f] = dirstatetuple(state, mode, size, mtime)
1508 if state != b'n' or mtime == -1:
1510 if state != b'n' or mtime == -1:
1509 self.nonnormalset.add(f)
1511 self.nonnormalset.add(f)
1510 if size == -2:
1512 if size == -2:
1511 self.otherparentset.add(f)
1513 self.otherparentset.add(f)
1512
1514
1513 def removefile(self, f, oldstate, size):
1515 def removefile(self, f, oldstate, size):
1514 """
1516 """
1515 Mark a file as removed in the dirstate.
1517 Mark a file as removed in the dirstate.
1516
1518
1517 The `size` parameter is used to store sentinel values that indicate
1519 The `size` parameter is used to store sentinel values that indicate
1518 the file's previous state. In the future, we should refactor this
1520 the file's previous state. In the future, we should refactor this
1519 to be more explicit about what that state is.
1521 to be more explicit about what that state is.
1520 """
1522 """
1521 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1523 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1522 self._dirs.delpath(f)
1524 self._dirs.delpath(f)
1523 if oldstate == b"?" and "_alldirs" in self.__dict__:
1525 if oldstate == b"?" and "_alldirs" in self.__dict__:
1524 self._alldirs.addpath(f)
1526 self._alldirs.addpath(f)
1525 if "filefoldmap" in self.__dict__:
1527 if "filefoldmap" in self.__dict__:
1526 normed = util.normcase(f)
1528 normed = util.normcase(f)
1527 self.filefoldmap.pop(normed, None)
1529 self.filefoldmap.pop(normed, None)
1528 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1530 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1529 self.nonnormalset.add(f)
1531 self.nonnormalset.add(f)
1530
1532
1531 def dropfile(self, f, oldstate):
1533 def dropfile(self, f, oldstate):
1532 """
1534 """
1533 Remove a file from the dirstate. Returns True if the file was
1535 Remove a file from the dirstate. Returns True if the file was
1534 previously recorded.
1536 previously recorded.
1535 """
1537 """
1536 exists = self._map.pop(f, None) is not None
1538 exists = self._map.pop(f, None) is not None
1537 if exists:
1539 if exists:
1538 if oldstate != b"r" and "_dirs" in self.__dict__:
1540 if oldstate != b"r" and "_dirs" in self.__dict__:
1539 self._dirs.delpath(f)
1541 self._dirs.delpath(f)
1540 if "_alldirs" in self.__dict__:
1542 if "_alldirs" in self.__dict__:
1541 self._alldirs.delpath(f)
1543 self._alldirs.delpath(f)
1542 if "filefoldmap" in self.__dict__:
1544 if "filefoldmap" in self.__dict__:
1543 normed = util.normcase(f)
1545 normed = util.normcase(f)
1544 self.filefoldmap.pop(normed, None)
1546 self.filefoldmap.pop(normed, None)
1545 self.nonnormalset.discard(f)
1547 self.nonnormalset.discard(f)
1546 return exists
1548 return exists
1547
1549
1548 def clearambiguoustimes(self, files, now):
1550 def clearambiguoustimes(self, files, now):
1549 for f in files:
1551 for f in files:
1550 e = self.get(f)
1552 e = self.get(f)
1551 if e is not None and e[0] == b'n' and e[3] == now:
1553 if e is not None and e[0] == b'n' and e[3] == now:
1552 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1554 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1553 self.nonnormalset.add(f)
1555 self.nonnormalset.add(f)
1554
1556
1555 def nonnormalentries(self):
1557 def nonnormalentries(self):
1556 '''Compute the nonnormal dirstate entries from the dmap'''
1558 '''Compute the nonnormal dirstate entries from the dmap'''
1557 try:
1559 try:
1558 return parsers.nonnormalotherparententries(self._map)
1560 return parsers.nonnormalotherparententries(self._map)
1559 except AttributeError:
1561 except AttributeError:
1560 nonnorm = set()
1562 nonnorm = set()
1561 otherparent = set()
1563 otherparent = set()
1562 for fname, e in pycompat.iteritems(self._map):
1564 for fname, e in pycompat.iteritems(self._map):
1563 if e[0] != b'n' or e[3] == -1:
1565 if e[0] != b'n' or e[3] == -1:
1564 nonnorm.add(fname)
1566 nonnorm.add(fname)
1565 if e[0] == b'n' and e[2] == -2:
1567 if e[0] == b'n' and e[2] == -2:
1566 otherparent.add(fname)
1568 otherparent.add(fname)
1567 return nonnorm, otherparent
1569 return nonnorm, otherparent
1568
1570
1569 @propertycache
1571 @propertycache
1570 def filefoldmap(self):
1572 def filefoldmap(self):
1571 """Returns a dictionary mapping normalized case paths to their
1573 """Returns a dictionary mapping normalized case paths to their
1572 non-normalized versions.
1574 non-normalized versions.
1573 """
1575 """
1574 try:
1576 try:
1575 makefilefoldmap = parsers.make_file_foldmap
1577 makefilefoldmap = parsers.make_file_foldmap
1576 except AttributeError:
1578 except AttributeError:
1577 pass
1579 pass
1578 else:
1580 else:
1579 return makefilefoldmap(
1581 return makefilefoldmap(
1580 self._map, util.normcasespec, util.normcasefallback
1582 self._map, util.normcasespec, util.normcasefallback
1581 )
1583 )
1582
1584
1583 f = {}
1585 f = {}
1584 normcase = util.normcase
1586 normcase = util.normcase
1585 for name, s in pycompat.iteritems(self._map):
1587 for name, s in pycompat.iteritems(self._map):
1586 if s[0] != b'r':
1588 if s[0] != b'r':
1587 f[normcase(name)] = name
1589 f[normcase(name)] = name
1588 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1590 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1589 return f
1591 return f
1590
1592
1591 def hastrackeddir(self, d):
1593 def hastrackeddir(self, d):
1592 """
1594 """
1593 Returns True if the dirstate contains a tracked (not removed) file
1595 Returns True if the dirstate contains a tracked (not removed) file
1594 in this directory.
1596 in this directory.
1595 """
1597 """
1596 return d in self._dirs
1598 return d in self._dirs
1597
1599
1598 def hasdir(self, d):
1600 def hasdir(self, d):
1599 """
1601 """
1600 Returns True if the dirstate contains a file (tracked or removed)
1602 Returns True if the dirstate contains a file (tracked or removed)
1601 in this directory.
1603 in this directory.
1602 """
1604 """
1603 return d in self._alldirs
1605 return d in self._alldirs
1604
1606
1605 @propertycache
1607 @propertycache
1606 def _dirs(self):
1608 def _dirs(self):
1607 return pathutil.dirs(self._map, b'r')
1609 return pathutil.dirs(self._map, b'r')
1608
1610
1609 @propertycache
1611 @propertycache
1610 def _alldirs(self):
1612 def _alldirs(self):
1611 return pathutil.dirs(self._map)
1613 return pathutil.dirs(self._map)
1612
1614
1613 def _opendirstatefile(self):
1615 def _opendirstatefile(self):
1614 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1616 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1615 if self._pendingmode is not None and self._pendingmode != mode:
1617 if self._pendingmode is not None and self._pendingmode != mode:
1616 fp.close()
1618 fp.close()
1617 raise error.Abort(
1619 raise error.Abort(
1618 _(b'working directory state may be changed parallelly')
1620 _(b'working directory state may be changed parallelly')
1619 )
1621 )
1620 self._pendingmode = mode
1622 self._pendingmode = mode
1621 return fp
1623 return fp
1622
1624
1623 def parents(self):
1625 def parents(self):
1624 if not self._parents:
1626 if not self._parents:
1625 try:
1627 try:
1626 fp = self._opendirstatefile()
1628 fp = self._opendirstatefile()
1627 st = fp.read(2 * self._nodelen)
1629 st = fp.read(2 * self._nodelen)
1628 fp.close()
1630 fp.close()
1629 except IOError as err:
1631 except IOError as err:
1630 if err.errno != errno.ENOENT:
1632 if err.errno != errno.ENOENT:
1631 raise
1633 raise
1632 # File doesn't exist, so the current state is empty
1634 # File doesn't exist, so the current state is empty
1633 st = b''
1635 st = b''
1634
1636
1635 l = len(st)
1637 l = len(st)
1636 if l == self._nodelen * 2:
1638 if l == self._nodelen * 2:
1637 self._parents = (
1639 self._parents = (
1638 st[: self._nodelen],
1640 st[: self._nodelen],
1639 st[self._nodelen : 2 * self._nodelen],
1641 st[self._nodelen : 2 * self._nodelen],
1640 )
1642 )
1641 elif l == 0:
1643 elif l == 0:
1642 self._parents = (
1644 self._parents = (
1643 self._nodeconstants.nullid,
1645 self._nodeconstants.nullid,
1644 self._nodeconstants.nullid,
1646 self._nodeconstants.nullid,
1645 )
1647 )
1646 else:
1648 else:
1647 raise error.Abort(
1649 raise error.Abort(
1648 _(b'working directory state appears damaged!')
1650 _(b'working directory state appears damaged!')
1649 )
1651 )
1650
1652
1651 return self._parents
1653 return self._parents
1652
1654
1653 def setparents(self, p1, p2):
1655 def setparents(self, p1, p2):
1654 self._parents = (p1, p2)
1656 self._parents = (p1, p2)
1655 self._dirtyparents = True
1657 self._dirtyparents = True
1656
1658
1657 def read(self):
1659 def read(self):
1658 # ignore HG_PENDING because identity is used only for writing
1660 # ignore HG_PENDING because identity is used only for writing
1659 self.identity = util.filestat.frompath(
1661 self.identity = util.filestat.frompath(
1660 self._opener.join(self._filename)
1662 self._opener.join(self._filename)
1661 )
1663 )
1662
1664
1663 try:
1665 try:
1664 fp = self._opendirstatefile()
1666 fp = self._opendirstatefile()
1665 try:
1667 try:
1666 st = fp.read()
1668 st = fp.read()
1667 finally:
1669 finally:
1668 fp.close()
1670 fp.close()
1669 except IOError as err:
1671 except IOError as err:
1670 if err.errno != errno.ENOENT:
1672 if err.errno != errno.ENOENT:
1671 raise
1673 raise
1672 return
1674 return
1673 if not st:
1675 if not st:
1674 return
1676 return
1675
1677
1676 if util.safehasattr(parsers, b'dict_new_presized'):
1678 if util.safehasattr(parsers, b'dict_new_presized'):
1677 # Make an estimate of the number of files in the dirstate based on
1679 # Make an estimate of the number of files in the dirstate based on
1678 # its size. This trades wasting some memory for avoiding costly
1680 # its size. This trades wasting some memory for avoiding costly
1679 # resizes. Each entry have a prefix of 17 bytes followed by one or
1681 # resizes. Each entry have a prefix of 17 bytes followed by one or
1680 # two path names. Studies on various large-scale real-world repositories
1682 # two path names. Studies on various large-scale real-world repositories
1681 # found 54 bytes a reasonable upper limit for the average path names.
1683 # found 54 bytes a reasonable upper limit for the average path names.
1682 # Copy entries are ignored for the sake of this estimate.
1684 # Copy entries are ignored for the sake of this estimate.
1683 self._map = parsers.dict_new_presized(len(st) // 71)
1685 self._map = parsers.dict_new_presized(len(st) // 71)
1684
1686
1685 # Python's garbage collector triggers a GC each time a certain number
1687 # Python's garbage collector triggers a GC each time a certain number
1686 # of container objects (the number being defined by
1688 # of container objects (the number being defined by
1687 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1689 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1688 # for each file in the dirstate. The C version then immediately marks
1690 # for each file in the dirstate. The C version then immediately marks
1689 # them as not to be tracked by the collector. However, this has no
1691 # them as not to be tracked by the collector. However, this has no
1690 # effect on when GCs are triggered, only on what objects the GC looks
1692 # effect on when GCs are triggered, only on what objects the GC looks
1691 # into. This means that O(number of files) GCs are unavoidable.
1693 # into. This means that O(number of files) GCs are unavoidable.
1692 # Depending on when in the process's lifetime the dirstate is parsed,
1694 # Depending on when in the process's lifetime the dirstate is parsed,
1693 # this can get very expensive. As a workaround, disable GC while
1695 # this can get very expensive. As a workaround, disable GC while
1694 # parsing the dirstate.
1696 # parsing the dirstate.
1695 #
1697 #
1696 # (we cannot decorate the function directly since it is in a C module)
1698 # (we cannot decorate the function directly since it is in a C module)
1697 parse_dirstate = util.nogc(parsers.parse_dirstate)
1699 parse_dirstate = util.nogc(parsers.parse_dirstate)
1698 p = parse_dirstate(self._map, self.copymap, st)
1700 p = parse_dirstate(self._map, self.copymap, st)
1699 if not self._dirtyparents:
1701 if not self._dirtyparents:
1700 self.setparents(*p)
1702 self.setparents(*p)
1701
1703
1702 # Avoid excess attribute lookups by fast pathing certain checks
1704 # Avoid excess attribute lookups by fast pathing certain checks
1703 self.__contains__ = self._map.__contains__
1705 self.__contains__ = self._map.__contains__
1704 self.__getitem__ = self._map.__getitem__
1706 self.__getitem__ = self._map.__getitem__
1705 self.get = self._map.get
1707 self.get = self._map.get
1706
1708
1707 def write(self, st, now):
1709 def write(self, st, now):
1708 st.write(
1710 st.write(
1709 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1711 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1710 )
1712 )
1711 st.close()
1713 st.close()
1712 self._dirtyparents = False
1714 self._dirtyparents = False
1713 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1715 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1714
1716
1715 @propertycache
1717 @propertycache
1716 def nonnormalset(self):
1718 def nonnormalset(self):
1717 nonnorm, otherparents = self.nonnormalentries()
1719 nonnorm, otherparents = self.nonnormalentries()
1718 self.otherparentset = otherparents
1720 self.otherparentset = otherparents
1719 return nonnorm
1721 return nonnorm
1720
1722
1721 @propertycache
1723 @propertycache
1722 def otherparentset(self):
1724 def otherparentset(self):
1723 nonnorm, otherparents = self.nonnormalentries()
1725 nonnorm, otherparents = self.nonnormalentries()
1724 self.nonnormalset = nonnorm
1726 self.nonnormalset = nonnorm
1725 return otherparents
1727 return otherparents
1726
1728
1727 def non_normal_or_other_parent_paths(self):
1729 def non_normal_or_other_parent_paths(self):
1728 return self.nonnormalset.union(self.otherparentset)
1730 return self.nonnormalset.union(self.otherparentset)
1729
1731
1730 @propertycache
1732 @propertycache
1731 def identity(self):
1733 def identity(self):
1732 self._map
1734 self._map
1733 return self.identity
1735 return self.identity
1734
1736
1735 @propertycache
1737 @propertycache
1736 def dirfoldmap(self):
1738 def dirfoldmap(self):
1737 f = {}
1739 f = {}
1738 normcase = util.normcase
1740 normcase = util.normcase
1739 for name in self._dirs:
1741 for name in self._dirs:
1740 f[normcase(name)] = name
1742 f[normcase(name)] = name
1741 return f
1743 return f
1742
1744
1743
1745
1744 if rustmod is not None:
1746 if rustmod is not None:
1745
1747
1746 class dirstatemap(object):
1748 class dirstatemap(object):
1747 def __init__(self, ui, opener, root, nodeconstants):
1749 def __init__(self, ui, opener, root, nodeconstants):
1748 self._nodeconstants = nodeconstants
1750 self._nodeconstants = nodeconstants
1749 self._ui = ui
1751 self._ui = ui
1750 self._opener = opener
1752 self._opener = opener
1751 self._root = root
1753 self._root = root
1752 self._filename = b'dirstate'
1754 self._filename = b'dirstate'
1753 self._nodelen = 20
1755 self._nodelen = 20
1754 self._parents = None
1756 self._parents = None
1755 self._dirtyparents = False
1757 self._dirtyparents = False
1756
1758
1757 # for consistent view between _pl() and _read() invocations
1759 # for consistent view between _pl() and _read() invocations
1758 self._pendingmode = None
1760 self._pendingmode = None
1759
1761
1760 def addfile(self, *args, **kwargs):
1762 def addfile(self, *args, **kwargs):
1761 return self._rustmap.addfile(*args, **kwargs)
1763 return self._rustmap.addfile(*args, **kwargs)
1762
1764
1763 def removefile(self, *args, **kwargs):
1765 def removefile(self, *args, **kwargs):
1764 return self._rustmap.removefile(*args, **kwargs)
1766 return self._rustmap.removefile(*args, **kwargs)
1765
1767
1766 def dropfile(self, *args, **kwargs):
1768 def dropfile(self, *args, **kwargs):
1767 return self._rustmap.dropfile(*args, **kwargs)
1769 return self._rustmap.dropfile(*args, **kwargs)
1768
1770
1769 def clearambiguoustimes(self, *args, **kwargs):
1771 def clearambiguoustimes(self, *args, **kwargs):
1770 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1772 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1771
1773
1772 def nonnormalentries(self):
1774 def nonnormalentries(self):
1773 return self._rustmap.nonnormalentries()
1775 return self._rustmap.nonnormalentries()
1774
1776
1775 def get(self, *args, **kwargs):
1777 def get(self, *args, **kwargs):
1776 return self._rustmap.get(*args, **kwargs)
1778 return self._rustmap.get(*args, **kwargs)
1777
1779
1778 @property
1780 @property
1779 def copymap(self):
1781 def copymap(self):
1780 return self._rustmap.copymap()
1782 return self._rustmap.copymap()
1781
1783
1782 def preload(self):
1784 def preload(self):
1783 self._rustmap
1785 self._rustmap
1784
1786
1785 def clear(self):
1787 def clear(self):
1786 self._rustmap.clear()
1788 self._rustmap.clear()
1787 self.setparents(
1789 self.setparents(
1788 self._nodeconstants.nullid, self._nodeconstants.nullid
1790 self._nodeconstants.nullid, self._nodeconstants.nullid
1789 )
1791 )
1790 util.clearcachedproperty(self, b"_dirs")
1792 util.clearcachedproperty(self, b"_dirs")
1791 util.clearcachedproperty(self, b"_alldirs")
1793 util.clearcachedproperty(self, b"_alldirs")
1792 util.clearcachedproperty(self, b"dirfoldmap")
1794 util.clearcachedproperty(self, b"dirfoldmap")
1793
1795
1794 def items(self):
1796 def items(self):
1795 return self._rustmap.items()
1797 return self._rustmap.items()
1796
1798
1797 def keys(self):
1799 def keys(self):
1798 return iter(self._rustmap)
1800 return iter(self._rustmap)
1799
1801
1800 def __contains__(self, key):
1802 def __contains__(self, key):
1801 return key in self._rustmap
1803 return key in self._rustmap
1802
1804
1803 def __getitem__(self, item):
1805 def __getitem__(self, item):
1804 return self._rustmap[item]
1806 return self._rustmap[item]
1805
1807
1806 def __len__(self):
1808 def __len__(self):
1807 return len(self._rustmap)
1809 return len(self._rustmap)
1808
1810
1809 def __iter__(self):
1811 def __iter__(self):
1810 return iter(self._rustmap)
1812 return iter(self._rustmap)
1811
1813
1812 # forward for python2,3 compat
1814 # forward for python2,3 compat
1813 iteritems = items
1815 iteritems = items
1814
1816
1815 def _opendirstatefile(self):
1817 def _opendirstatefile(self):
1816 fp, mode = txnutil.trypending(
1818 fp, mode = txnutil.trypending(
1817 self._root, self._opener, self._filename
1819 self._root, self._opener, self._filename
1818 )
1820 )
1819 if self._pendingmode is not None and self._pendingmode != mode:
1821 if self._pendingmode is not None and self._pendingmode != mode:
1820 fp.close()
1822 fp.close()
1821 raise error.Abort(
1823 raise error.Abort(
1822 _(b'working directory state may be changed parallelly')
1824 _(b'working directory state may be changed parallelly')
1823 )
1825 )
1824 self._pendingmode = mode
1826 self._pendingmode = mode
1825 return fp
1827 return fp
1826
1828
1827 def setparents(self, p1, p2):
1829 def setparents(self, p1, p2):
1828 self._parents = (p1, p2)
1830 self._parents = (p1, p2)
1829 self._dirtyparents = True
1831 self._dirtyparents = True
1830
1832
1831 def parents(self):
1833 def parents(self):
1832 if not self._parents:
1834 if not self._parents:
1833 try:
1835 try:
1834 fp = self._opendirstatefile()
1836 fp = self._opendirstatefile()
1835 st = fp.read(40)
1837 st = fp.read(40)
1836 fp.close()
1838 fp.close()
1837 except IOError as err:
1839 except IOError as err:
1838 if err.errno != errno.ENOENT:
1840 if err.errno != errno.ENOENT:
1839 raise
1841 raise
1840 # File doesn't exist, so the current state is empty
1842 # File doesn't exist, so the current state is empty
1841 st = b''
1843 st = b''
1842
1844
1843 l = len(st)
1845 l = len(st)
1844 if l == self._nodelen * 2:
1846 if l == self._nodelen * 2:
1845 self._parents = (
1847 self._parents = (
1846 st[: self._nodelen],
1848 st[: self._nodelen],
1847 st[self._nodelen : 2 * self._nodelen],
1849 st[self._nodelen : 2 * self._nodelen],
1848 )
1850 )
1849 elif l == 0:
1851 elif l == 0:
1850 self._parents = (
1852 self._parents = (
1851 self._nodeconstants.nullid,
1853 self._nodeconstants.nullid,
1852 self._nodeconstants.nullid,
1854 self._nodeconstants.nullid,
1853 )
1855 )
1854 else:
1856 else:
1855 raise error.Abort(
1857 raise error.Abort(
1856 _(b'working directory state appears damaged!')
1858 _(b'working directory state appears damaged!')
1857 )
1859 )
1858
1860
1859 return self._parents
1861 return self._parents
1860
1862
1861 @propertycache
1863 @propertycache
1862 def _rustmap(self):
1864 def _rustmap(self):
1863 """
1865 """
1864 Fills the Dirstatemap when called.
1866 Fills the Dirstatemap when called.
1865 """
1867 """
1866 # ignore HG_PENDING because identity is used only for writing
1868 # ignore HG_PENDING because identity is used only for writing
1867 self.identity = util.filestat.frompath(
1869 self.identity = util.filestat.frompath(
1868 self._opener.join(self._filename)
1870 self._opener.join(self._filename)
1869 )
1871 )
1870
1872
1871 try:
1873 try:
1872 fp = self._opendirstatefile()
1874 fp = self._opendirstatefile()
1873 try:
1875 try:
1874 st = fp.read()
1876 st = fp.read()
1875 finally:
1877 finally:
1876 fp.close()
1878 fp.close()
1877 except IOError as err:
1879 except IOError as err:
1878 if err.errno != errno.ENOENT:
1880 if err.errno != errno.ENOENT:
1879 raise
1881 raise
1880 st = b''
1882 st = b''
1881
1883
1882 use_dirstate_tree = self._ui.configbool(
1884 use_dirstate_tree = self._ui.configbool(
1883 b"experimental",
1885 b"experimental",
1884 b"dirstate-tree.in-memory",
1886 b"dirstate-tree.in-memory",
1885 False,
1887 False,
1886 )
1888 )
1887 self._rustmap, parents = rustmod.DirstateMap.new(
1889 self._rustmap, parents = rustmod.DirstateMap.new(
1888 use_dirstate_tree, st
1890 use_dirstate_tree, st
1889 )
1891 )
1890
1892
1891 if parents and not self._dirtyparents:
1893 if parents and not self._dirtyparents:
1892 self.setparents(*parents)
1894 self.setparents(*parents)
1893
1895
1894 self.__contains__ = self._rustmap.__contains__
1896 self.__contains__ = self._rustmap.__contains__
1895 self.__getitem__ = self._rustmap.__getitem__
1897 self.__getitem__ = self._rustmap.__getitem__
1896 self.get = self._rustmap.get
1898 self.get = self._rustmap.get
1897 return self._rustmap
1899 return self._rustmap
1898
1900
1899 def write(self, st, now):
1901 def write(self, st, now):
1900 parents = self.parents()
1902 parents = self.parents()
1901 st.write(self._rustmap.write(parents[0], parents[1], now))
1903 st.write(self._rustmap.write(parents[0], parents[1], now))
1902 st.close()
1904 st.close()
1903 self._dirtyparents = False
1905 self._dirtyparents = False
1904
1906
1905 @propertycache
1907 @propertycache
1906 def filefoldmap(self):
1908 def filefoldmap(self):
1907 """Returns a dictionary mapping normalized case paths to their
1909 """Returns a dictionary mapping normalized case paths to their
1908 non-normalized versions.
1910 non-normalized versions.
1909 """
1911 """
1910 return self._rustmap.filefoldmapasdict()
1912 return self._rustmap.filefoldmapasdict()
1911
1913
1912 def hastrackeddir(self, d):
1914 def hastrackeddir(self, d):
1913 self._dirs # Trigger Python's propertycache
1915 self._dirs # Trigger Python's propertycache
1914 return self._rustmap.hastrackeddir(d)
1916 return self._rustmap.hastrackeddir(d)
1915
1917
1916 def hasdir(self, d):
1918 def hasdir(self, d):
1917 self._dirs # Trigger Python's propertycache
1919 self._dirs # Trigger Python's propertycache
1918 return self._rustmap.hasdir(d)
1920 return self._rustmap.hasdir(d)
1919
1921
1920 @propertycache
1922 @propertycache
1921 def _dirs(self):
1923 def _dirs(self):
1922 return self._rustmap.getdirs()
1924 return self._rustmap.getdirs()
1923
1925
1924 @propertycache
1926 @propertycache
1925 def _alldirs(self):
1927 def _alldirs(self):
1926 return self._rustmap.getalldirs()
1928 return self._rustmap.getalldirs()
1927
1929
1928 @propertycache
1930 @propertycache
1929 def identity(self):
1931 def identity(self):
1930 self._rustmap
1932 self._rustmap
1931 return self.identity
1933 return self.identity
1932
1934
1933 @property
1935 @property
1934 def nonnormalset(self):
1936 def nonnormalset(self):
1935 nonnorm = self._rustmap.non_normal_entries()
1937 nonnorm = self._rustmap.non_normal_entries()
1936 return nonnorm
1938 return nonnorm
1937
1939
1938 @propertycache
1940 @propertycache
1939 def otherparentset(self):
1941 def otherparentset(self):
1940 otherparents = self._rustmap.other_parent_entries()
1942 otherparents = self._rustmap.other_parent_entries()
1941 return otherparents
1943 return otherparents
1942
1944
1943 def non_normal_or_other_parent_paths(self):
1945 def non_normal_or_other_parent_paths(self):
1944 return self._rustmap.non_normal_or_other_parent_paths()
1946 return self._rustmap.non_normal_or_other_parent_paths()
1945
1947
1946 @propertycache
1948 @propertycache
1947 def dirfoldmap(self):
1949 def dirfoldmap(self):
1948 f = {}
1950 f = {}
1949 normcase = util.normcase
1951 normcase = util.normcase
1950 for name in self._dirs:
1952 for name in self._dirs:
1951 f[normcase(name)] = name
1953 f[normcase(name)] = name
1952 return f
1954 return f
@@ -1,3780 +1,3795 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class mixedrepostorecache(_basefilecache):
147 class mixedrepostorecache(_basefilecache):
148 """filecache for a mix files in .hg/store and outside"""
148 """filecache for a mix files in .hg/store and outside"""
149
149
150 def __init__(self, *pathsandlocations):
150 def __init__(self, *pathsandlocations):
151 # scmutil.filecache only uses the path for passing back into our
151 # scmutil.filecache only uses the path for passing back into our
152 # join(), so we can safely pass a list of paths and locations
152 # join(), so we can safely pass a list of paths and locations
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
155
155
156 def join(self, obj, fnameandlocation):
156 def join(self, obj, fnameandlocation):
157 fname, location = fnameandlocation
157 fname, location = fnameandlocation
158 if location == b'plain':
158 if location == b'plain':
159 return obj.vfs.join(fname)
159 return obj.vfs.join(fname)
160 else:
160 else:
161 if location != b'':
161 if location != b'':
162 raise error.ProgrammingError(
162 raise error.ProgrammingError(
163 b'unexpected location: %s' % location
163 b'unexpected location: %s' % location
164 )
164 )
165 return obj.sjoin(fname)
165 return obj.sjoin(fname)
166
166
167
167
168 def isfilecached(repo, name):
168 def isfilecached(repo, name):
169 """check if a repo has already cached "name" filecache-ed property
169 """check if a repo has already cached "name" filecache-ed property
170
170
171 This returns (cachedobj-or-None, iscached) tuple.
171 This returns (cachedobj-or-None, iscached) tuple.
172 """
172 """
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 if not cacheentry:
174 if not cacheentry:
175 return None, False
175 return None, False
176 return cacheentry.obj, True
176 return cacheentry.obj, True
177
177
178
178
179 class unfilteredpropertycache(util.propertycache):
179 class unfilteredpropertycache(util.propertycache):
180 """propertycache that apply to unfiltered repo only"""
180 """propertycache that apply to unfiltered repo only"""
181
181
182 def __get__(self, repo, type=None):
182 def __get__(self, repo, type=None):
183 unfi = repo.unfiltered()
183 unfi = repo.unfiltered()
184 if unfi is repo:
184 if unfi is repo:
185 return super(unfilteredpropertycache, self).__get__(unfi)
185 return super(unfilteredpropertycache, self).__get__(unfi)
186 return getattr(unfi, self.name)
186 return getattr(unfi, self.name)
187
187
188
188
189 class filteredpropertycache(util.propertycache):
189 class filteredpropertycache(util.propertycache):
190 """propertycache that must take filtering in account"""
190 """propertycache that must take filtering in account"""
191
191
192 def cachevalue(self, obj, value):
192 def cachevalue(self, obj, value):
193 object.__setattr__(obj, self.name, value)
193 object.__setattr__(obj, self.name, value)
194
194
195
195
196 def hasunfilteredcache(repo, name):
196 def hasunfilteredcache(repo, name):
197 """check if a repo has an unfilteredpropertycache value for <name>"""
197 """check if a repo has an unfilteredpropertycache value for <name>"""
198 return name in vars(repo.unfiltered())
198 return name in vars(repo.unfiltered())
199
199
200
200
201 def unfilteredmethod(orig):
201 def unfilteredmethod(orig):
202 """decorate method that always need to be run on unfiltered version"""
202 """decorate method that always need to be run on unfiltered version"""
203
203
204 @functools.wraps(orig)
204 @functools.wraps(orig)
205 def wrapper(repo, *args, **kwargs):
205 def wrapper(repo, *args, **kwargs):
206 return orig(repo.unfiltered(), *args, **kwargs)
206 return orig(repo.unfiltered(), *args, **kwargs)
207
207
208 return wrapper
208 return wrapper
209
209
210
210
211 moderncaps = {
211 moderncaps = {
212 b'lookup',
212 b'lookup',
213 b'branchmap',
213 b'branchmap',
214 b'pushkey',
214 b'pushkey',
215 b'known',
215 b'known',
216 b'getbundle',
216 b'getbundle',
217 b'unbundle',
217 b'unbundle',
218 }
218 }
219 legacycaps = moderncaps.union({b'changegroupsubset'})
219 legacycaps = moderncaps.union({b'changegroupsubset'})
220
220
221
221
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 class localcommandexecutor(object):
223 class localcommandexecutor(object):
224 def __init__(self, peer):
224 def __init__(self, peer):
225 self._peer = peer
225 self._peer = peer
226 self._sent = False
226 self._sent = False
227 self._closed = False
227 self._closed = False
228
228
229 def __enter__(self):
229 def __enter__(self):
230 return self
230 return self
231
231
232 def __exit__(self, exctype, excvalue, exctb):
232 def __exit__(self, exctype, excvalue, exctb):
233 self.close()
233 self.close()
234
234
235 def callcommand(self, command, args):
235 def callcommand(self, command, args):
236 if self._sent:
236 if self._sent:
237 raise error.ProgrammingError(
237 raise error.ProgrammingError(
238 b'callcommand() cannot be used after sendcommands()'
238 b'callcommand() cannot be used after sendcommands()'
239 )
239 )
240
240
241 if self._closed:
241 if self._closed:
242 raise error.ProgrammingError(
242 raise error.ProgrammingError(
243 b'callcommand() cannot be used after close()'
243 b'callcommand() cannot be used after close()'
244 )
244 )
245
245
246 # We don't need to support anything fancy. Just call the named
246 # We don't need to support anything fancy. Just call the named
247 # method on the peer and return a resolved future.
247 # method on the peer and return a resolved future.
248 fn = getattr(self._peer, pycompat.sysstr(command))
248 fn = getattr(self._peer, pycompat.sysstr(command))
249
249
250 f = pycompat.futures.Future()
250 f = pycompat.futures.Future()
251
251
252 try:
252 try:
253 result = fn(**pycompat.strkwargs(args))
253 result = fn(**pycompat.strkwargs(args))
254 except Exception:
254 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 else:
256 else:
257 f.set_result(result)
257 f.set_result(result)
258
258
259 return f
259 return f
260
260
261 def sendcommands(self):
261 def sendcommands(self):
262 self._sent = True
262 self._sent = True
263
263
264 def close(self):
264 def close(self):
265 self._closed = True
265 self._closed = True
266
266
267
267
268 @interfaceutil.implementer(repository.ipeercommands)
268 @interfaceutil.implementer(repository.ipeercommands)
269 class localpeer(repository.peer):
269 class localpeer(repository.peer):
270 '''peer for a local repo; reflects only the most recent API'''
270 '''peer for a local repo; reflects only the most recent API'''
271
271
272 def __init__(self, repo, caps=None):
272 def __init__(self, repo, caps=None):
273 super(localpeer, self).__init__()
273 super(localpeer, self).__init__()
274
274
275 if caps is None:
275 if caps is None:
276 caps = moderncaps.copy()
276 caps = moderncaps.copy()
277 self._repo = repo.filtered(b'served')
277 self._repo = repo.filtered(b'served')
278 self.ui = repo.ui
278 self.ui = repo.ui
279
279
280 if repo._wanted_sidedata:
280 if repo._wanted_sidedata:
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
283
283
284 self._caps = repo._restrictcapabilities(caps)
284 self._caps = repo._restrictcapabilities(caps)
285
285
286 # Begin of _basepeer interface.
286 # Begin of _basepeer interface.
287
287
288 def url(self):
288 def url(self):
289 return self._repo.url()
289 return self._repo.url()
290
290
291 def local(self):
291 def local(self):
292 return self._repo
292 return self._repo
293
293
294 def peer(self):
294 def peer(self):
295 return self
295 return self
296
296
297 def canpush(self):
297 def canpush(self):
298 return True
298 return True
299
299
300 def close(self):
300 def close(self):
301 self._repo.close()
301 self._repo.close()
302
302
303 # End of _basepeer interface.
303 # End of _basepeer interface.
304
304
305 # Begin of _basewirecommands interface.
305 # Begin of _basewirecommands interface.
306
306
307 def branchmap(self):
307 def branchmap(self):
308 return self._repo.branchmap()
308 return self._repo.branchmap()
309
309
310 def capabilities(self):
310 def capabilities(self):
311 return self._caps
311 return self._caps
312
312
313 def clonebundles(self):
313 def clonebundles(self):
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315
315
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 """Used to test argument passing over the wire"""
317 """Used to test argument passing over the wire"""
318 return b"%s %s %s %s %s" % (
318 return b"%s %s %s %s %s" % (
319 one,
319 one,
320 two,
320 two,
321 pycompat.bytestr(three),
321 pycompat.bytestr(three),
322 pycompat.bytestr(four),
322 pycompat.bytestr(four),
323 pycompat.bytestr(five),
323 pycompat.bytestr(five),
324 )
324 )
325
325
326 def getbundle(
326 def getbundle(
327 self,
327 self,
328 source,
328 source,
329 heads=None,
329 heads=None,
330 common=None,
330 common=None,
331 bundlecaps=None,
331 bundlecaps=None,
332 remote_sidedata=None,
332 remote_sidedata=None,
333 **kwargs
333 **kwargs
334 ):
334 ):
335 chunks = exchange.getbundlechunks(
335 chunks = exchange.getbundlechunks(
336 self._repo,
336 self._repo,
337 source,
337 source,
338 heads=heads,
338 heads=heads,
339 common=common,
339 common=common,
340 bundlecaps=bundlecaps,
340 bundlecaps=bundlecaps,
341 remote_sidedata=remote_sidedata,
341 remote_sidedata=remote_sidedata,
342 **kwargs
342 **kwargs
343 )[1]
343 )[1]
344 cb = util.chunkbuffer(chunks)
344 cb = util.chunkbuffer(chunks)
345
345
346 if exchange.bundle2requested(bundlecaps):
346 if exchange.bundle2requested(bundlecaps):
347 # When requesting a bundle2, getbundle returns a stream to make the
347 # When requesting a bundle2, getbundle returns a stream to make the
348 # wire level function happier. We need to build a proper object
348 # wire level function happier. We need to build a proper object
349 # from it in local peer.
349 # from it in local peer.
350 return bundle2.getunbundler(self.ui, cb)
350 return bundle2.getunbundler(self.ui, cb)
351 else:
351 else:
352 return changegroup.getunbundler(b'01', cb, None)
352 return changegroup.getunbundler(b'01', cb, None)
353
353
354 def heads(self):
354 def heads(self):
355 return self._repo.heads()
355 return self._repo.heads()
356
356
357 def known(self, nodes):
357 def known(self, nodes):
358 return self._repo.known(nodes)
358 return self._repo.known(nodes)
359
359
360 def listkeys(self, namespace):
360 def listkeys(self, namespace):
361 return self._repo.listkeys(namespace)
361 return self._repo.listkeys(namespace)
362
362
363 def lookup(self, key):
363 def lookup(self, key):
364 return self._repo.lookup(key)
364 return self._repo.lookup(key)
365
365
366 def pushkey(self, namespace, key, old, new):
366 def pushkey(self, namespace, key, old, new):
367 return self._repo.pushkey(namespace, key, old, new)
367 return self._repo.pushkey(namespace, key, old, new)
368
368
369 def stream_out(self):
369 def stream_out(self):
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371
371
372 def unbundle(self, bundle, heads, url):
372 def unbundle(self, bundle, heads, url):
373 """apply a bundle on a repo
373 """apply a bundle on a repo
374
374
375 This function handles the repo locking itself."""
375 This function handles the repo locking itself."""
376 try:
376 try:
377 try:
377 try:
378 bundle = exchange.readbundle(self.ui, bundle, None)
378 bundle = exchange.readbundle(self.ui, bundle, None)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 if util.safehasattr(ret, b'getchunks'):
380 if util.safehasattr(ret, b'getchunks'):
381 # This is a bundle20 object, turn it into an unbundler.
381 # This is a bundle20 object, turn it into an unbundler.
382 # This little dance should be dropped eventually when the
382 # This little dance should be dropped eventually when the
383 # API is finally improved.
383 # API is finally improved.
384 stream = util.chunkbuffer(ret.getchunks())
384 stream = util.chunkbuffer(ret.getchunks())
385 ret = bundle2.getunbundler(self.ui, stream)
385 ret = bundle2.getunbundler(self.ui, stream)
386 return ret
386 return ret
387 except Exception as exc:
387 except Exception as exc:
388 # If the exception contains output salvaged from a bundle2
388 # If the exception contains output salvaged from a bundle2
389 # reply, we need to make sure it is printed before continuing
389 # reply, we need to make sure it is printed before continuing
390 # to fail. So we build a bundle2 with such output and consume
390 # to fail. So we build a bundle2 with such output and consume
391 # it directly.
391 # it directly.
392 #
392 #
393 # This is not very elegant but allows a "simple" solution for
393 # This is not very elegant but allows a "simple" solution for
394 # issue4594
394 # issue4594
395 output = getattr(exc, '_bundle2salvagedoutput', ())
395 output = getattr(exc, '_bundle2salvagedoutput', ())
396 if output:
396 if output:
397 bundler = bundle2.bundle20(self._repo.ui)
397 bundler = bundle2.bundle20(self._repo.ui)
398 for out in output:
398 for out in output:
399 bundler.addpart(out)
399 bundler.addpart(out)
400 stream = util.chunkbuffer(bundler.getchunks())
400 stream = util.chunkbuffer(bundler.getchunks())
401 b = bundle2.getunbundler(self.ui, stream)
401 b = bundle2.getunbundler(self.ui, stream)
402 bundle2.processbundle(self._repo, b)
402 bundle2.processbundle(self._repo, b)
403 raise
403 raise
404 except error.PushRaced as exc:
404 except error.PushRaced as exc:
405 raise error.ResponseError(
405 raise error.ResponseError(
406 _(b'push failed:'), stringutil.forcebytestr(exc)
406 _(b'push failed:'), stringutil.forcebytestr(exc)
407 )
407 )
408
408
409 # End of _basewirecommands interface.
409 # End of _basewirecommands interface.
410
410
411 # Begin of peer interface.
411 # Begin of peer interface.
412
412
413 def commandexecutor(self):
413 def commandexecutor(self):
414 return localcommandexecutor(self)
414 return localcommandexecutor(self)
415
415
416 # End of peer interface.
416 # End of peer interface.
417
417
418
418
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 class locallegacypeer(localpeer):
420 class locallegacypeer(localpeer):
421 """peer extension which implements legacy methods too; used for tests with
421 """peer extension which implements legacy methods too; used for tests with
422 restricted capabilities"""
422 restricted capabilities"""
423
423
424 def __init__(self, repo):
424 def __init__(self, repo):
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426
426
427 # Begin of baselegacywirecommands interface.
427 # Begin of baselegacywirecommands interface.
428
428
429 def between(self, pairs):
429 def between(self, pairs):
430 return self._repo.between(pairs)
430 return self._repo.between(pairs)
431
431
432 def branches(self, nodes):
432 def branches(self, nodes):
433 return self._repo.branches(nodes)
433 return self._repo.branches(nodes)
434
434
435 def changegroup(self, nodes, source):
435 def changegroup(self, nodes, source):
436 outgoing = discovery.outgoing(
436 outgoing = discovery.outgoing(
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 )
438 )
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440
440
441 def changegroupsubset(self, bases, heads, source):
441 def changegroupsubset(self, bases, heads, source):
442 outgoing = discovery.outgoing(
442 outgoing = discovery.outgoing(
443 self._repo, missingroots=bases, ancestorsof=heads
443 self._repo, missingroots=bases, ancestorsof=heads
444 )
444 )
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446
446
447 # End of baselegacywirecommands interface.
447 # End of baselegacywirecommands interface.
448
448
449
449
450 # Functions receiving (ui, features) that extensions can register to impact
450 # Functions receiving (ui, features) that extensions can register to impact
451 # the ability to load repositories with custom requirements. Only
451 # the ability to load repositories with custom requirements. Only
452 # functions defined in loaded extensions are called.
452 # functions defined in loaded extensions are called.
453 #
453 #
454 # The function receives a set of requirement strings that the repository
454 # The function receives a set of requirement strings that the repository
455 # is capable of opening. Functions will typically add elements to the
455 # is capable of opening. Functions will typically add elements to the
456 # set to reflect that the extension knows how to handle that requirements.
456 # set to reflect that the extension knows how to handle that requirements.
457 featuresetupfuncs = set()
457 featuresetupfuncs = set()
458
458
459
459
460 def _getsharedvfs(hgvfs, requirements):
460 def _getsharedvfs(hgvfs, requirements):
461 """returns the vfs object pointing to root of shared source
461 """returns the vfs object pointing to root of shared source
462 repo for a shared repository
462 repo for a shared repository
463
463
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
466 """
466 """
467 # The ``shared`` or ``relshared`` requirements indicate the
467 # The ``shared`` or ``relshared`` requirements indicate the
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 # This is an absolute path for ``shared`` and relative to
469 # This is an absolute path for ``shared`` and relative to
470 # ``.hg/`` for ``relshared``.
470 # ``.hg/`` for ``relshared``.
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
474
474
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476
476
477 if not sharedvfs.exists():
477 if not sharedvfs.exists():
478 raise error.RepoError(
478 raise error.RepoError(
479 _(b'.hg/sharedpath points to nonexistent directory %s')
479 _(b'.hg/sharedpath points to nonexistent directory %s')
480 % sharedvfs.base
480 % sharedvfs.base
481 )
481 )
482 return sharedvfs
482 return sharedvfs
483
483
484
484
485 def _readrequires(vfs, allowmissing):
485 def _readrequires(vfs, allowmissing):
486 """reads the require file present at root of this vfs
486 """reads the require file present at root of this vfs
487 and return a set of requirements
487 and return a set of requirements
488
488
489 If allowmissing is True, we suppress ENOENT if raised"""
489 If allowmissing is True, we suppress ENOENT if raised"""
490 # requires file contains a newline-delimited list of
490 # requires file contains a newline-delimited list of
491 # features/capabilities the opener (us) must have in order to use
491 # features/capabilities the opener (us) must have in order to use
492 # the repository. This file was introduced in Mercurial 0.9.2,
492 # the repository. This file was introduced in Mercurial 0.9.2,
493 # which means very old repositories may not have one. We assume
493 # which means very old repositories may not have one. We assume
494 # a missing file translates to no requirements.
494 # a missing file translates to no requirements.
495 try:
495 try:
496 requirements = set(vfs.read(b'requires').splitlines())
496 requirements = set(vfs.read(b'requires').splitlines())
497 except IOError as e:
497 except IOError as e:
498 if not (allowmissing and e.errno == errno.ENOENT):
498 if not (allowmissing and e.errno == errno.ENOENT):
499 raise
499 raise
500 requirements = set()
500 requirements = set()
501 return requirements
501 return requirements
502
502
503
503
504 def makelocalrepository(baseui, path, intents=None):
504 def makelocalrepository(baseui, path, intents=None):
505 """Create a local repository object.
505 """Create a local repository object.
506
506
507 Given arguments needed to construct a local repository, this function
507 Given arguments needed to construct a local repository, this function
508 performs various early repository loading functionality (such as
508 performs various early repository loading functionality (such as
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 the repository can be opened, derives a type suitable for representing
510 the repository can be opened, derives a type suitable for representing
511 that repository, and returns an instance of it.
511 that repository, and returns an instance of it.
512
512
513 The returned object conforms to the ``repository.completelocalrepository``
513 The returned object conforms to the ``repository.completelocalrepository``
514 interface.
514 interface.
515
515
516 The repository type is derived by calling a series of factory functions
516 The repository type is derived by calling a series of factory functions
517 for each aspect/interface of the final repository. These are defined by
517 for each aspect/interface of the final repository. These are defined by
518 ``REPO_INTERFACES``.
518 ``REPO_INTERFACES``.
519
519
520 Each factory function is called to produce a type implementing a specific
520 Each factory function is called to produce a type implementing a specific
521 interface. The cumulative list of returned types will be combined into a
521 interface. The cumulative list of returned types will be combined into a
522 new type and that type will be instantiated to represent the local
522 new type and that type will be instantiated to represent the local
523 repository.
523 repository.
524
524
525 The factory functions each receive various state that may be consulted
525 The factory functions each receive various state that may be consulted
526 as part of deriving a type.
526 as part of deriving a type.
527
527
528 Extensions should wrap these factory functions to customize repository type
528 Extensions should wrap these factory functions to customize repository type
529 creation. Note that an extension's wrapped function may be called even if
529 creation. Note that an extension's wrapped function may be called even if
530 that extension is not loaded for the repo being constructed. Extensions
530 that extension is not loaded for the repo being constructed. Extensions
531 should check if their ``__name__`` appears in the
531 should check if their ``__name__`` appears in the
532 ``extensionmodulenames`` set passed to the factory function and no-op if
532 ``extensionmodulenames`` set passed to the factory function and no-op if
533 not.
533 not.
534 """
534 """
535 ui = baseui.copy()
535 ui = baseui.copy()
536 # Prevent copying repo configuration.
536 # Prevent copying repo configuration.
537 ui.copy = baseui.copy
537 ui.copy = baseui.copy
538
538
539 # Working directory VFS rooted at repository root.
539 # Working directory VFS rooted at repository root.
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541
541
542 # Main VFS for .hg/ directory.
542 # Main VFS for .hg/ directory.
543 hgpath = wdirvfs.join(b'.hg')
543 hgpath = wdirvfs.join(b'.hg')
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 # Whether this repository is shared one or not
545 # Whether this repository is shared one or not
546 shared = False
546 shared = False
547 # If this repository is shared, vfs pointing to shared repo
547 # If this repository is shared, vfs pointing to shared repo
548 sharedvfs = None
548 sharedvfs = None
549
549
550 # The .hg/ path should exist and should be a directory. All other
550 # The .hg/ path should exist and should be a directory. All other
551 # cases are errors.
551 # cases are errors.
552 if not hgvfs.isdir():
552 if not hgvfs.isdir():
553 try:
553 try:
554 hgvfs.stat()
554 hgvfs.stat()
555 except OSError as e:
555 except OSError as e:
556 if e.errno != errno.ENOENT:
556 if e.errno != errno.ENOENT:
557 raise
557 raise
558 except ValueError as e:
558 except ValueError as e:
559 # Can be raised on Python 3.8 when path is invalid.
559 # Can be raised on Python 3.8 when path is invalid.
560 raise error.Abort(
560 raise error.Abort(
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 )
562 )
563
563
564 raise error.RepoError(_(b'repository %s not found') % path)
564 raise error.RepoError(_(b'repository %s not found') % path)
565
565
566 requirements = _readrequires(hgvfs, True)
566 requirements = _readrequires(hgvfs, True)
567 shared = (
567 shared = (
568 requirementsmod.SHARED_REQUIREMENT in requirements
568 requirementsmod.SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 )
570 )
571 storevfs = None
571 storevfs = None
572 if shared:
572 if shared:
573 # This is a shared repo
573 # This is a shared repo
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 else:
576 else:
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578
578
579 # if .hg/requires contains the sharesafe requirement, it means
579 # if .hg/requires contains the sharesafe requirement, it means
580 # there exists a `.hg/store/requires` too and we should read it
580 # there exists a `.hg/store/requires` too and we should read it
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 # is not present, refer checkrequirementscompat() for that
583 # is not present, refer checkrequirementscompat() for that
584 #
584 #
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 # repository was shared the old way. We check the share source .hg/requires
586 # repository was shared the old way. We check the share source .hg/requires
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 # to be reshared
588 # to be reshared
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591
591
592 if (
592 if (
593 shared
593 shared
594 and requirementsmod.SHARESAFE_REQUIREMENT
594 and requirementsmod.SHARESAFE_REQUIREMENT
595 not in _readrequires(sharedvfs, True)
595 not in _readrequires(sharedvfs, True)
596 ):
596 ):
597 mismatch_warn = ui.configbool(
597 mismatch_warn = ui.configbool(
598 b'share', b'safe-mismatch.source-not-safe.warn'
598 b'share', b'safe-mismatch.source-not-safe.warn'
599 )
599 )
600 mismatch_config = ui.config(
600 mismatch_config = ui.config(
601 b'share', b'safe-mismatch.source-not-safe'
601 b'share', b'safe-mismatch.source-not-safe'
602 )
602 )
603 if mismatch_config in (
603 if mismatch_config in (
604 b'downgrade-allow',
604 b'downgrade-allow',
605 b'allow',
605 b'allow',
606 b'downgrade-abort',
606 b'downgrade-abort',
607 ):
607 ):
608 # prevent cyclic import localrepo -> upgrade -> localrepo
608 # prevent cyclic import localrepo -> upgrade -> localrepo
609 from . import upgrade
609 from . import upgrade
610
610
611 upgrade.downgrade_share_to_non_safe(
611 upgrade.downgrade_share_to_non_safe(
612 ui,
612 ui,
613 hgvfs,
613 hgvfs,
614 sharedvfs,
614 sharedvfs,
615 requirements,
615 requirements,
616 mismatch_config,
616 mismatch_config,
617 mismatch_warn,
617 mismatch_warn,
618 )
618 )
619 elif mismatch_config == b'abort':
619 elif mismatch_config == b'abort':
620 raise error.Abort(
620 raise error.Abort(
621 _(b"share source does not support share-safe requirement"),
621 _(b"share source does not support share-safe requirement"),
622 hint=hint,
622 hint=hint,
623 )
623 )
624 else:
624 else:
625 raise error.Abort(
625 raise error.Abort(
626 _(
626 _(
627 b"share-safe mismatch with source.\nUnrecognized"
627 b"share-safe mismatch with source.\nUnrecognized"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 b" set."
629 b" set."
630 )
630 )
631 % mismatch_config,
631 % mismatch_config,
632 hint=hint,
632 hint=hint,
633 )
633 )
634 else:
634 else:
635 requirements |= _readrequires(storevfs, False)
635 requirements |= _readrequires(storevfs, False)
636 elif shared:
636 elif shared:
637 sourcerequires = _readrequires(sharedvfs, False)
637 sourcerequires = _readrequires(sharedvfs, False)
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 mismatch_warn = ui.configbool(
640 mismatch_warn = ui.configbool(
641 b'share', b'safe-mismatch.source-safe.warn'
641 b'share', b'safe-mismatch.source-safe.warn'
642 )
642 )
643 if mismatch_config in (
643 if mismatch_config in (
644 b'upgrade-allow',
644 b'upgrade-allow',
645 b'allow',
645 b'allow',
646 b'upgrade-abort',
646 b'upgrade-abort',
647 ):
647 ):
648 # prevent cyclic import localrepo -> upgrade -> localrepo
648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 from . import upgrade
649 from . import upgrade
650
650
651 upgrade.upgrade_share_to_safe(
651 upgrade.upgrade_share_to_safe(
652 ui,
652 ui,
653 hgvfs,
653 hgvfs,
654 storevfs,
654 storevfs,
655 requirements,
655 requirements,
656 mismatch_config,
656 mismatch_config,
657 mismatch_warn,
657 mismatch_warn,
658 )
658 )
659 elif mismatch_config == b'abort':
659 elif mismatch_config == b'abort':
660 raise error.Abort(
660 raise error.Abort(
661 _(
661 _(
662 b'version mismatch: source uses share-safe'
662 b'version mismatch: source uses share-safe'
663 b' functionality while the current share does not'
663 b' functionality while the current share does not'
664 ),
664 ),
665 hint=hint,
665 hint=hint,
666 )
666 )
667 else:
667 else:
668 raise error.Abort(
668 raise error.Abort(
669 _(
669 _(
670 b"share-safe mismatch with source.\nUnrecognized"
670 b"share-safe mismatch with source.\nUnrecognized"
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 )
672 )
673 % mismatch_config,
673 % mismatch_config,
674 hint=hint,
674 hint=hint,
675 )
675 )
676
676
677 # The .hg/hgrc file may load extensions or contain config options
677 # The .hg/hgrc file may load extensions or contain config options
678 # that influence repository construction. Attempt to load it and
678 # that influence repository construction. Attempt to load it and
679 # process any new extensions that it may have pulled in.
679 # process any new extensions that it may have pulled in.
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 extensions.loadall(ui)
682 extensions.loadall(ui)
683 extensions.populateui(ui)
683 extensions.populateui(ui)
684
684
685 # Set of module names of extensions loaded for this repository.
685 # Set of module names of extensions loaded for this repository.
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687
687
688 supportedrequirements = gathersupportedrequirements(ui)
688 supportedrequirements = gathersupportedrequirements(ui)
689
689
690 # We first validate the requirements are known.
690 # We first validate the requirements are known.
691 ensurerequirementsrecognized(requirements, supportedrequirements)
691 ensurerequirementsrecognized(requirements, supportedrequirements)
692
692
693 # Then we validate that the known set is reasonable to use together.
693 # Then we validate that the known set is reasonable to use together.
694 ensurerequirementscompatible(ui, requirements)
694 ensurerequirementscompatible(ui, requirements)
695
695
696 # TODO there are unhandled edge cases related to opening repositories with
696 # TODO there are unhandled edge cases related to opening repositories with
697 # shared storage. If storage is shared, we should also test for requirements
697 # shared storage. If storage is shared, we should also test for requirements
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 # that repo, as that repo may load extensions needed to open it. This is a
699 # that repo, as that repo may load extensions needed to open it. This is a
700 # bit complicated because we don't want the other hgrc to overwrite settings
700 # bit complicated because we don't want the other hgrc to overwrite settings
701 # in this hgrc.
701 # in this hgrc.
702 #
702 #
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 # file when sharing repos. But if a requirement is added after the share is
704 # file when sharing repos. But if a requirement is added after the share is
705 # performed, thereby introducing a new requirement for the opener, we may
705 # performed, thereby introducing a new requirement for the opener, we may
706 # will not see that and could encounter a run-time error interacting with
706 # will not see that and could encounter a run-time error interacting with
707 # that shared store since it has an unknown-to-us requirement.
707 # that shared store since it has an unknown-to-us requirement.
708
708
709 # At this point, we know we should be capable of opening the repository.
709 # At this point, we know we should be capable of opening the repository.
710 # Now get on with doing that.
710 # Now get on with doing that.
711
711
712 features = set()
712 features = set()
713
713
714 # The "store" part of the repository holds versioned data. How it is
714 # The "store" part of the repository holds versioned data. How it is
715 # accessed is determined by various requirements. If `shared` or
715 # accessed is determined by various requirements. If `shared` or
716 # `relshared` requirements are present, this indicates current repository
716 # `relshared` requirements are present, this indicates current repository
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 if shared:
718 if shared:
719 storebasepath = sharedvfs.base
719 storebasepath = sharedvfs.base
720 cachepath = sharedvfs.join(b'cache')
720 cachepath = sharedvfs.join(b'cache')
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 else:
722 else:
723 storebasepath = hgvfs.base
723 storebasepath = hgvfs.base
724 cachepath = hgvfs.join(b'cache')
724 cachepath = hgvfs.join(b'cache')
725 wcachepath = hgvfs.join(b'wcache')
725 wcachepath = hgvfs.join(b'wcache')
726
726
727 # The store has changed over time and the exact layout is dictated by
727 # The store has changed over time and the exact layout is dictated by
728 # requirements. The store interface abstracts differences across all
728 # requirements. The store interface abstracts differences across all
729 # of them.
729 # of them.
730 store = makestore(
730 store = makestore(
731 requirements,
731 requirements,
732 storebasepath,
732 storebasepath,
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 )
734 )
735 hgvfs.createmode = store.createmode
735 hgvfs.createmode = store.createmode
736
736
737 storevfs = store.vfs
737 storevfs = store.vfs
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739
739
740 if (
740 if (
741 requirementsmod.REVLOGV2_REQUIREMENT in requirements
741 requirementsmod.REVLOGV2_REQUIREMENT in requirements
742 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
742 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
743 ):
743 ):
744 features.add(repository.REPO_FEATURE_SIDE_DATA)
744 features.add(repository.REPO_FEATURE_SIDE_DATA)
745 # the revlogv2 docket introduced race condition that we need to fix
745 # the revlogv2 docket introduced race condition that we need to fix
746 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
746 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
747
747
748 # The cache vfs is used to manage cache files.
748 # The cache vfs is used to manage cache files.
749 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
749 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
750 cachevfs.createmode = store.createmode
750 cachevfs.createmode = store.createmode
751 # The cache vfs is used to manage cache files related to the working copy
751 # The cache vfs is used to manage cache files related to the working copy
752 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
752 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
753 wcachevfs.createmode = store.createmode
753 wcachevfs.createmode = store.createmode
754
754
755 # Now resolve the type for the repository object. We do this by repeatedly
755 # Now resolve the type for the repository object. We do this by repeatedly
756 # calling a factory function to produces types for specific aspects of the
756 # calling a factory function to produces types for specific aspects of the
757 # repo's operation. The aggregate returned types are used as base classes
757 # repo's operation. The aggregate returned types are used as base classes
758 # for a dynamically-derived type, which will represent our new repository.
758 # for a dynamically-derived type, which will represent our new repository.
759
759
760 bases = []
760 bases = []
761 extrastate = {}
761 extrastate = {}
762
762
763 for iface, fn in REPO_INTERFACES:
763 for iface, fn in REPO_INTERFACES:
764 # We pass all potentially useful state to give extensions tons of
764 # We pass all potentially useful state to give extensions tons of
765 # flexibility.
765 # flexibility.
766 typ = fn()(
766 typ = fn()(
767 ui=ui,
767 ui=ui,
768 intents=intents,
768 intents=intents,
769 requirements=requirements,
769 requirements=requirements,
770 features=features,
770 features=features,
771 wdirvfs=wdirvfs,
771 wdirvfs=wdirvfs,
772 hgvfs=hgvfs,
772 hgvfs=hgvfs,
773 store=store,
773 store=store,
774 storevfs=storevfs,
774 storevfs=storevfs,
775 storeoptions=storevfs.options,
775 storeoptions=storevfs.options,
776 cachevfs=cachevfs,
776 cachevfs=cachevfs,
777 wcachevfs=wcachevfs,
777 wcachevfs=wcachevfs,
778 extensionmodulenames=extensionmodulenames,
778 extensionmodulenames=extensionmodulenames,
779 extrastate=extrastate,
779 extrastate=extrastate,
780 baseclasses=bases,
780 baseclasses=bases,
781 )
781 )
782
782
783 if not isinstance(typ, type):
783 if not isinstance(typ, type):
784 raise error.ProgrammingError(
784 raise error.ProgrammingError(
785 b'unable to construct type for %s' % iface
785 b'unable to construct type for %s' % iface
786 )
786 )
787
787
788 bases.append(typ)
788 bases.append(typ)
789
789
790 # type() allows you to use characters in type names that wouldn't be
790 # type() allows you to use characters in type names that wouldn't be
791 # recognized as Python symbols in source code. We abuse that to add
791 # recognized as Python symbols in source code. We abuse that to add
792 # rich information about our constructed repo.
792 # rich information about our constructed repo.
793 name = pycompat.sysstr(
793 name = pycompat.sysstr(
794 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
794 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
795 )
795 )
796
796
797 cls = type(name, tuple(bases), {})
797 cls = type(name, tuple(bases), {})
798
798
799 return cls(
799 return cls(
800 baseui=baseui,
800 baseui=baseui,
801 ui=ui,
801 ui=ui,
802 origroot=path,
802 origroot=path,
803 wdirvfs=wdirvfs,
803 wdirvfs=wdirvfs,
804 hgvfs=hgvfs,
804 hgvfs=hgvfs,
805 requirements=requirements,
805 requirements=requirements,
806 supportedrequirements=supportedrequirements,
806 supportedrequirements=supportedrequirements,
807 sharedpath=storebasepath,
807 sharedpath=storebasepath,
808 store=store,
808 store=store,
809 cachevfs=cachevfs,
809 cachevfs=cachevfs,
810 wcachevfs=wcachevfs,
810 wcachevfs=wcachevfs,
811 features=features,
811 features=features,
812 intents=intents,
812 intents=intents,
813 )
813 )
814
814
815
815
816 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
816 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
817 """Load hgrc files/content into a ui instance.
817 """Load hgrc files/content into a ui instance.
818
818
819 This is called during repository opening to load any additional
819 This is called during repository opening to load any additional
820 config files or settings relevant to the current repository.
820 config files or settings relevant to the current repository.
821
821
822 Returns a bool indicating whether any additional configs were loaded.
822 Returns a bool indicating whether any additional configs were loaded.
823
823
824 Extensions should monkeypatch this function to modify how per-repo
824 Extensions should monkeypatch this function to modify how per-repo
825 configs are loaded. For example, an extension may wish to pull in
825 configs are loaded. For example, an extension may wish to pull in
826 configs from alternate files or sources.
826 configs from alternate files or sources.
827
827
828 sharedvfs is vfs object pointing to source repo if the current one is a
828 sharedvfs is vfs object pointing to source repo if the current one is a
829 shared one
829 shared one
830 """
830 """
831 if not rcutil.use_repo_hgrc():
831 if not rcutil.use_repo_hgrc():
832 return False
832 return False
833
833
834 ret = False
834 ret = False
835 # first load config from shared source if we has to
835 # first load config from shared source if we has to
836 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
836 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
837 try:
837 try:
838 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
838 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
839 ret = True
839 ret = True
840 except IOError:
840 except IOError:
841 pass
841 pass
842
842
843 try:
843 try:
844 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
844 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
845 ret = True
845 ret = True
846 except IOError:
846 except IOError:
847 pass
847 pass
848
848
849 try:
849 try:
850 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
850 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
851 ret = True
851 ret = True
852 except IOError:
852 except IOError:
853 pass
853 pass
854
854
855 return ret
855 return ret
856
856
857
857
858 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
858 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
859 """Perform additional actions after .hg/hgrc is loaded.
859 """Perform additional actions after .hg/hgrc is loaded.
860
860
861 This function is called during repository loading immediately after
861 This function is called during repository loading immediately after
862 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
862 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
863
863
864 The function can be used to validate configs, automatically add
864 The function can be used to validate configs, automatically add
865 options (including extensions) based on requirements, etc.
865 options (including extensions) based on requirements, etc.
866 """
866 """
867
867
868 # Map of requirements to list of extensions to load automatically when
868 # Map of requirements to list of extensions to load automatically when
869 # requirement is present.
869 # requirement is present.
870 autoextensions = {
870 autoextensions = {
871 b'git': [b'git'],
871 b'git': [b'git'],
872 b'largefiles': [b'largefiles'],
872 b'largefiles': [b'largefiles'],
873 b'lfs': [b'lfs'],
873 b'lfs': [b'lfs'],
874 }
874 }
875
875
876 for requirement, names in sorted(autoextensions.items()):
876 for requirement, names in sorted(autoextensions.items()):
877 if requirement not in requirements:
877 if requirement not in requirements:
878 continue
878 continue
879
879
880 for name in names:
880 for name in names:
881 if not ui.hasconfig(b'extensions', name):
881 if not ui.hasconfig(b'extensions', name):
882 ui.setconfig(b'extensions', name, b'', source=b'autoload')
882 ui.setconfig(b'extensions', name, b'', source=b'autoload')
883
883
884
884
885 def gathersupportedrequirements(ui):
885 def gathersupportedrequirements(ui):
886 """Determine the complete set of recognized requirements."""
886 """Determine the complete set of recognized requirements."""
887 # Start with all requirements supported by this file.
887 # Start with all requirements supported by this file.
888 supported = set(localrepository._basesupported)
888 supported = set(localrepository._basesupported)
889
889
890 if dirstate.SUPPORTS_DIRSTATE_V2:
891 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
892
890 # Execute ``featuresetupfuncs`` entries if they belong to an extension
893 # Execute ``featuresetupfuncs`` entries if they belong to an extension
891 # relevant to this ui instance.
894 # relevant to this ui instance.
892 modules = {m.__name__ for n, m in extensions.extensions(ui)}
895 modules = {m.__name__ for n, m in extensions.extensions(ui)}
893
896
894 for fn in featuresetupfuncs:
897 for fn in featuresetupfuncs:
895 if fn.__module__ in modules:
898 if fn.__module__ in modules:
896 fn(ui, supported)
899 fn(ui, supported)
897
900
898 # Add derived requirements from registered compression engines.
901 # Add derived requirements from registered compression engines.
899 for name in util.compengines:
902 for name in util.compengines:
900 engine = util.compengines[name]
903 engine = util.compengines[name]
901 if engine.available() and engine.revlogheader():
904 if engine.available() and engine.revlogheader():
902 supported.add(b'exp-compression-%s' % name)
905 supported.add(b'exp-compression-%s' % name)
903 if engine.name() == b'zstd':
906 if engine.name() == b'zstd':
904 supported.add(b'revlog-compression-zstd')
907 supported.add(b'revlog-compression-zstd')
905
908
906 return supported
909 return supported
907
910
908
911
909 def ensurerequirementsrecognized(requirements, supported):
912 def ensurerequirementsrecognized(requirements, supported):
910 """Validate that a set of local requirements is recognized.
913 """Validate that a set of local requirements is recognized.
911
914
912 Receives a set of requirements. Raises an ``error.RepoError`` if there
915 Receives a set of requirements. Raises an ``error.RepoError`` if there
913 exists any requirement in that set that currently loaded code doesn't
916 exists any requirement in that set that currently loaded code doesn't
914 recognize.
917 recognize.
915
918
916 Returns a set of supported requirements.
919 Returns a set of supported requirements.
917 """
920 """
918 missing = set()
921 missing = set()
919
922
920 for requirement in requirements:
923 for requirement in requirements:
921 if requirement in supported:
924 if requirement in supported:
922 continue
925 continue
923
926
924 if not requirement or not requirement[0:1].isalnum():
927 if not requirement or not requirement[0:1].isalnum():
925 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
928 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
926
929
927 missing.add(requirement)
930 missing.add(requirement)
928
931
929 if missing:
932 if missing:
930 raise error.RequirementError(
933 raise error.RequirementError(
931 _(b'repository requires features unknown to this Mercurial: %s')
934 _(b'repository requires features unknown to this Mercurial: %s')
932 % b' '.join(sorted(missing)),
935 % b' '.join(sorted(missing)),
933 hint=_(
936 hint=_(
934 b'see https://mercurial-scm.org/wiki/MissingRequirement '
937 b'see https://mercurial-scm.org/wiki/MissingRequirement '
935 b'for more information'
938 b'for more information'
936 ),
939 ),
937 )
940 )
938
941
939
942
940 def ensurerequirementscompatible(ui, requirements):
943 def ensurerequirementscompatible(ui, requirements):
941 """Validates that a set of recognized requirements is mutually compatible.
944 """Validates that a set of recognized requirements is mutually compatible.
942
945
943 Some requirements may not be compatible with others or require
946 Some requirements may not be compatible with others or require
944 config options that aren't enabled. This function is called during
947 config options that aren't enabled. This function is called during
945 repository opening to ensure that the set of requirements needed
948 repository opening to ensure that the set of requirements needed
946 to open a repository is sane and compatible with config options.
949 to open a repository is sane and compatible with config options.
947
950
948 Extensions can monkeypatch this function to perform additional
951 Extensions can monkeypatch this function to perform additional
949 checking.
952 checking.
950
953
951 ``error.RepoError`` should be raised on failure.
954 ``error.RepoError`` should be raised on failure.
952 """
955 """
953 if (
956 if (
954 requirementsmod.SPARSE_REQUIREMENT in requirements
957 requirementsmod.SPARSE_REQUIREMENT in requirements
955 and not sparse.enabled
958 and not sparse.enabled
956 ):
959 ):
957 raise error.RepoError(
960 raise error.RepoError(
958 _(
961 _(
959 b'repository is using sparse feature but '
962 b'repository is using sparse feature but '
960 b'sparse is not enabled; enable the '
963 b'sparse is not enabled; enable the '
961 b'"sparse" extensions to access'
964 b'"sparse" extensions to access'
962 )
965 )
963 )
966 )
964
967
965
968
966 def makestore(requirements, path, vfstype):
969 def makestore(requirements, path, vfstype):
967 """Construct a storage object for a repository."""
970 """Construct a storage object for a repository."""
968 if requirementsmod.STORE_REQUIREMENT in requirements:
971 if requirementsmod.STORE_REQUIREMENT in requirements:
969 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
972 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
970 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
973 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
971 return storemod.fncachestore(path, vfstype, dotencode)
974 return storemod.fncachestore(path, vfstype, dotencode)
972
975
973 return storemod.encodedstore(path, vfstype)
976 return storemod.encodedstore(path, vfstype)
974
977
975 return storemod.basicstore(path, vfstype)
978 return storemod.basicstore(path, vfstype)
976
979
977
980
978 def resolvestorevfsoptions(ui, requirements, features):
981 def resolvestorevfsoptions(ui, requirements, features):
979 """Resolve the options to pass to the store vfs opener.
982 """Resolve the options to pass to the store vfs opener.
980
983
981 The returned dict is used to influence behavior of the storage layer.
984 The returned dict is used to influence behavior of the storage layer.
982 """
985 """
983 options = {}
986 options = {}
984
987
985 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
988 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
986 options[b'treemanifest'] = True
989 options[b'treemanifest'] = True
987
990
988 # experimental config: format.manifestcachesize
991 # experimental config: format.manifestcachesize
989 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
992 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
990 if manifestcachesize is not None:
993 if manifestcachesize is not None:
991 options[b'manifestcachesize'] = manifestcachesize
994 options[b'manifestcachesize'] = manifestcachesize
992
995
993 # In the absence of another requirement superseding a revlog-related
996 # In the absence of another requirement superseding a revlog-related
994 # requirement, we have to assume the repo is using revlog version 0.
997 # requirement, we have to assume the repo is using revlog version 0.
995 # This revlog format is super old and we don't bother trying to parse
998 # This revlog format is super old and we don't bother trying to parse
996 # opener options for it because those options wouldn't do anything
999 # opener options for it because those options wouldn't do anything
997 # meaningful on such old repos.
1000 # meaningful on such old repos.
998 if (
1001 if (
999 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1002 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1000 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1003 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1001 ):
1004 ):
1002 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1005 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1003 else: # explicitly mark repo as using revlogv0
1006 else: # explicitly mark repo as using revlogv0
1004 options[b'revlogv0'] = True
1007 options[b'revlogv0'] = True
1005
1008
1006 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1009 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1007 options[b'copies-storage'] = b'changeset-sidedata'
1010 options[b'copies-storage'] = b'changeset-sidedata'
1008 else:
1011 else:
1009 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1012 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1010 copiesextramode = (b'changeset-only', b'compatibility')
1013 copiesextramode = (b'changeset-only', b'compatibility')
1011 if writecopiesto in copiesextramode:
1014 if writecopiesto in copiesextramode:
1012 options[b'copies-storage'] = b'extra'
1015 options[b'copies-storage'] = b'extra'
1013
1016
1014 return options
1017 return options
1015
1018
1016
1019
1017 def resolverevlogstorevfsoptions(ui, requirements, features):
1020 def resolverevlogstorevfsoptions(ui, requirements, features):
1018 """Resolve opener options specific to revlogs."""
1021 """Resolve opener options specific to revlogs."""
1019
1022
1020 options = {}
1023 options = {}
1021 options[b'flagprocessors'] = {}
1024 options[b'flagprocessors'] = {}
1022
1025
1023 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1026 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1024 options[b'revlogv1'] = True
1027 options[b'revlogv1'] = True
1025 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1028 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1026 options[b'revlogv2'] = True
1029 options[b'revlogv2'] = True
1027 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1030 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1028 options[b'changelogv2'] = True
1031 options[b'changelogv2'] = True
1029
1032
1030 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1033 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1031 options[b'generaldelta'] = True
1034 options[b'generaldelta'] = True
1032
1035
1033 # experimental config: format.chunkcachesize
1036 # experimental config: format.chunkcachesize
1034 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1037 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1035 if chunkcachesize is not None:
1038 if chunkcachesize is not None:
1036 options[b'chunkcachesize'] = chunkcachesize
1039 options[b'chunkcachesize'] = chunkcachesize
1037
1040
1038 deltabothparents = ui.configbool(
1041 deltabothparents = ui.configbool(
1039 b'storage', b'revlog.optimize-delta-parent-choice'
1042 b'storage', b'revlog.optimize-delta-parent-choice'
1040 )
1043 )
1041 options[b'deltabothparents'] = deltabothparents
1044 options[b'deltabothparents'] = deltabothparents
1042
1045
1043 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1046 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1044 lazydeltabase = False
1047 lazydeltabase = False
1045 if lazydelta:
1048 if lazydelta:
1046 lazydeltabase = ui.configbool(
1049 lazydeltabase = ui.configbool(
1047 b'storage', b'revlog.reuse-external-delta-parent'
1050 b'storage', b'revlog.reuse-external-delta-parent'
1048 )
1051 )
1049 if lazydeltabase is None:
1052 if lazydeltabase is None:
1050 lazydeltabase = not scmutil.gddeltaconfig(ui)
1053 lazydeltabase = not scmutil.gddeltaconfig(ui)
1051 options[b'lazydelta'] = lazydelta
1054 options[b'lazydelta'] = lazydelta
1052 options[b'lazydeltabase'] = lazydeltabase
1055 options[b'lazydeltabase'] = lazydeltabase
1053
1056
1054 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1057 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1055 if 0 <= chainspan:
1058 if 0 <= chainspan:
1056 options[b'maxdeltachainspan'] = chainspan
1059 options[b'maxdeltachainspan'] = chainspan
1057
1060
1058 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1061 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1059 if mmapindexthreshold is not None:
1062 if mmapindexthreshold is not None:
1060 options[b'mmapindexthreshold'] = mmapindexthreshold
1063 options[b'mmapindexthreshold'] = mmapindexthreshold
1061
1064
1062 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1065 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1063 srdensitythres = float(
1066 srdensitythres = float(
1064 ui.config(b'experimental', b'sparse-read.density-threshold')
1067 ui.config(b'experimental', b'sparse-read.density-threshold')
1065 )
1068 )
1066 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1069 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1067 options[b'with-sparse-read'] = withsparseread
1070 options[b'with-sparse-read'] = withsparseread
1068 options[b'sparse-read-density-threshold'] = srdensitythres
1071 options[b'sparse-read-density-threshold'] = srdensitythres
1069 options[b'sparse-read-min-gap-size'] = srmingapsize
1072 options[b'sparse-read-min-gap-size'] = srmingapsize
1070
1073
1071 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1074 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1072 options[b'sparse-revlog'] = sparserevlog
1075 options[b'sparse-revlog'] = sparserevlog
1073 if sparserevlog:
1076 if sparserevlog:
1074 options[b'generaldelta'] = True
1077 options[b'generaldelta'] = True
1075
1078
1076 maxchainlen = None
1079 maxchainlen = None
1077 if sparserevlog:
1080 if sparserevlog:
1078 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1081 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1079 # experimental config: format.maxchainlen
1082 # experimental config: format.maxchainlen
1080 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1083 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1081 if maxchainlen is not None:
1084 if maxchainlen is not None:
1082 options[b'maxchainlen'] = maxchainlen
1085 options[b'maxchainlen'] = maxchainlen
1083
1086
1084 for r in requirements:
1087 for r in requirements:
1085 # we allow multiple compression engine requirement to co-exist because
1088 # we allow multiple compression engine requirement to co-exist because
1086 # strickly speaking, revlog seems to support mixed compression style.
1089 # strickly speaking, revlog seems to support mixed compression style.
1087 #
1090 #
1088 # The compression used for new entries will be "the last one"
1091 # The compression used for new entries will be "the last one"
1089 prefix = r.startswith
1092 prefix = r.startswith
1090 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1093 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1091 options[b'compengine'] = r.split(b'-', 2)[2]
1094 options[b'compengine'] = r.split(b'-', 2)[2]
1092
1095
1093 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1096 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1094 if options[b'zlib.level'] is not None:
1097 if options[b'zlib.level'] is not None:
1095 if not (0 <= options[b'zlib.level'] <= 9):
1098 if not (0 <= options[b'zlib.level'] <= 9):
1096 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1099 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1097 raise error.Abort(msg % options[b'zlib.level'])
1100 raise error.Abort(msg % options[b'zlib.level'])
1098 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1101 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1099 if options[b'zstd.level'] is not None:
1102 if options[b'zstd.level'] is not None:
1100 if not (0 <= options[b'zstd.level'] <= 22):
1103 if not (0 <= options[b'zstd.level'] <= 22):
1101 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1104 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1102 raise error.Abort(msg % options[b'zstd.level'])
1105 raise error.Abort(msg % options[b'zstd.level'])
1103
1106
1104 if requirementsmod.NARROW_REQUIREMENT in requirements:
1107 if requirementsmod.NARROW_REQUIREMENT in requirements:
1105 options[b'enableellipsis'] = True
1108 options[b'enableellipsis'] = True
1106
1109
1107 if ui.configbool(b'experimental', b'rust.index'):
1110 if ui.configbool(b'experimental', b'rust.index'):
1108 options[b'rust.index'] = True
1111 options[b'rust.index'] = True
1109 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1112 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1110 slow_path = ui.config(
1113 slow_path = ui.config(
1111 b'storage', b'revlog.persistent-nodemap.slow-path'
1114 b'storage', b'revlog.persistent-nodemap.slow-path'
1112 )
1115 )
1113 if slow_path not in (b'allow', b'warn', b'abort'):
1116 if slow_path not in (b'allow', b'warn', b'abort'):
1114 default = ui.config_default(
1117 default = ui.config_default(
1115 b'storage', b'revlog.persistent-nodemap.slow-path'
1118 b'storage', b'revlog.persistent-nodemap.slow-path'
1116 )
1119 )
1117 msg = _(
1120 msg = _(
1118 b'unknown value for config '
1121 b'unknown value for config '
1119 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1122 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1120 )
1123 )
1121 ui.warn(msg % slow_path)
1124 ui.warn(msg % slow_path)
1122 if not ui.quiet:
1125 if not ui.quiet:
1123 ui.warn(_(b'falling back to default value: %s\n') % default)
1126 ui.warn(_(b'falling back to default value: %s\n') % default)
1124 slow_path = default
1127 slow_path = default
1125
1128
1126 msg = _(
1129 msg = _(
1127 b"accessing `persistent-nodemap` repository without associated "
1130 b"accessing `persistent-nodemap` repository without associated "
1128 b"fast implementation."
1131 b"fast implementation."
1129 )
1132 )
1130 hint = _(
1133 hint = _(
1131 b"check `hg help config.format.use-persistent-nodemap` "
1134 b"check `hg help config.format.use-persistent-nodemap` "
1132 b"for details"
1135 b"for details"
1133 )
1136 )
1134 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1137 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1135 if slow_path == b'warn':
1138 if slow_path == b'warn':
1136 msg = b"warning: " + msg + b'\n'
1139 msg = b"warning: " + msg + b'\n'
1137 ui.warn(msg)
1140 ui.warn(msg)
1138 if not ui.quiet:
1141 if not ui.quiet:
1139 hint = b'(' + hint + b')\n'
1142 hint = b'(' + hint + b')\n'
1140 ui.warn(hint)
1143 ui.warn(hint)
1141 if slow_path == b'abort':
1144 if slow_path == b'abort':
1142 raise error.Abort(msg, hint=hint)
1145 raise error.Abort(msg, hint=hint)
1143 options[b'persistent-nodemap'] = True
1146 options[b'persistent-nodemap'] = True
1144 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1147 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1145 options[b'persistent-nodemap.mmap'] = True
1148 options[b'persistent-nodemap.mmap'] = True
1146 if ui.configbool(b'devel', b'persistent-nodemap'):
1149 if ui.configbool(b'devel', b'persistent-nodemap'):
1147 options[b'devel-force-nodemap'] = True
1150 options[b'devel-force-nodemap'] = True
1148
1151
1149 return options
1152 return options
1150
1153
1151
1154
1152 def makemain(**kwargs):
1155 def makemain(**kwargs):
1153 """Produce a type conforming to ``ilocalrepositorymain``."""
1156 """Produce a type conforming to ``ilocalrepositorymain``."""
1154 return localrepository
1157 return localrepository
1155
1158
1156
1159
1157 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1160 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1158 class revlogfilestorage(object):
1161 class revlogfilestorage(object):
1159 """File storage when using revlogs."""
1162 """File storage when using revlogs."""
1160
1163
1161 def file(self, path):
1164 def file(self, path):
1162 if path.startswith(b'/'):
1165 if path.startswith(b'/'):
1163 path = path[1:]
1166 path = path[1:]
1164
1167
1165 return filelog.filelog(self.svfs, path)
1168 return filelog.filelog(self.svfs, path)
1166
1169
1167
1170
1168 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1171 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1169 class revlognarrowfilestorage(object):
1172 class revlognarrowfilestorage(object):
1170 """File storage when using revlogs and narrow files."""
1173 """File storage when using revlogs and narrow files."""
1171
1174
1172 def file(self, path):
1175 def file(self, path):
1173 if path.startswith(b'/'):
1176 if path.startswith(b'/'):
1174 path = path[1:]
1177 path = path[1:]
1175
1178
1176 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1179 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1177
1180
1178
1181
1179 def makefilestorage(requirements, features, **kwargs):
1182 def makefilestorage(requirements, features, **kwargs):
1180 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1183 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1181 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1184 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1182 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1185 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1183
1186
1184 if requirementsmod.NARROW_REQUIREMENT in requirements:
1187 if requirementsmod.NARROW_REQUIREMENT in requirements:
1185 return revlognarrowfilestorage
1188 return revlognarrowfilestorage
1186 else:
1189 else:
1187 return revlogfilestorage
1190 return revlogfilestorage
1188
1191
1189
1192
1190 # List of repository interfaces and factory functions for them. Each
1193 # List of repository interfaces and factory functions for them. Each
1191 # will be called in order during ``makelocalrepository()`` to iteratively
1194 # will be called in order during ``makelocalrepository()`` to iteratively
1192 # derive the final type for a local repository instance. We capture the
1195 # derive the final type for a local repository instance. We capture the
1193 # function as a lambda so we don't hold a reference and the module-level
1196 # function as a lambda so we don't hold a reference and the module-level
1194 # functions can be wrapped.
1197 # functions can be wrapped.
1195 REPO_INTERFACES = [
1198 REPO_INTERFACES = [
1196 (repository.ilocalrepositorymain, lambda: makemain),
1199 (repository.ilocalrepositorymain, lambda: makemain),
1197 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1200 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1198 ]
1201 ]
1199
1202
1200
1203
1201 @interfaceutil.implementer(repository.ilocalrepositorymain)
1204 @interfaceutil.implementer(repository.ilocalrepositorymain)
1202 class localrepository(object):
1205 class localrepository(object):
1203 """Main class for representing local repositories.
1206 """Main class for representing local repositories.
1204
1207
1205 All local repositories are instances of this class.
1208 All local repositories are instances of this class.
1206
1209
1207 Constructed on its own, instances of this class are not usable as
1210 Constructed on its own, instances of this class are not usable as
1208 repository objects. To obtain a usable repository object, call
1211 repository objects. To obtain a usable repository object, call
1209 ``hg.repository()``, ``localrepo.instance()``, or
1212 ``hg.repository()``, ``localrepo.instance()``, or
1210 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1213 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1211 ``instance()`` adds support for creating new repositories.
1214 ``instance()`` adds support for creating new repositories.
1212 ``hg.repository()`` adds more extension integration, including calling
1215 ``hg.repository()`` adds more extension integration, including calling
1213 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1216 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1214 used.
1217 used.
1215 """
1218 """
1216
1219
1217 # obsolete experimental requirements:
1220 # obsolete experimental requirements:
1218 # - manifestv2: An experimental new manifest format that allowed
1221 # - manifestv2: An experimental new manifest format that allowed
1219 # for stem compression of long paths. Experiment ended up not
1222 # for stem compression of long paths. Experiment ended up not
1220 # being successful (repository sizes went up due to worse delta
1223 # being successful (repository sizes went up due to worse delta
1221 # chains), and the code was deleted in 4.6.
1224 # chains), and the code was deleted in 4.6.
1222 supportedformats = {
1225 supportedformats = {
1223 requirementsmod.REVLOGV1_REQUIREMENT,
1226 requirementsmod.REVLOGV1_REQUIREMENT,
1224 requirementsmod.GENERALDELTA_REQUIREMENT,
1227 requirementsmod.GENERALDELTA_REQUIREMENT,
1225 requirementsmod.TREEMANIFEST_REQUIREMENT,
1228 requirementsmod.TREEMANIFEST_REQUIREMENT,
1226 requirementsmod.COPIESSDC_REQUIREMENT,
1229 requirementsmod.COPIESSDC_REQUIREMENT,
1227 requirementsmod.REVLOGV2_REQUIREMENT,
1230 requirementsmod.REVLOGV2_REQUIREMENT,
1228 requirementsmod.CHANGELOGV2_REQUIREMENT,
1231 requirementsmod.CHANGELOGV2_REQUIREMENT,
1229 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1232 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1230 requirementsmod.NODEMAP_REQUIREMENT,
1233 requirementsmod.NODEMAP_REQUIREMENT,
1231 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1234 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1232 requirementsmod.SHARESAFE_REQUIREMENT,
1235 requirementsmod.SHARESAFE_REQUIREMENT,
1233 }
1236 }
1234 _basesupported = supportedformats | {
1237 _basesupported = supportedformats | {
1235 requirementsmod.STORE_REQUIREMENT,
1238 requirementsmod.STORE_REQUIREMENT,
1236 requirementsmod.FNCACHE_REQUIREMENT,
1239 requirementsmod.FNCACHE_REQUIREMENT,
1237 requirementsmod.SHARED_REQUIREMENT,
1240 requirementsmod.SHARED_REQUIREMENT,
1238 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1241 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1239 requirementsmod.DOTENCODE_REQUIREMENT,
1242 requirementsmod.DOTENCODE_REQUIREMENT,
1240 requirementsmod.SPARSE_REQUIREMENT,
1243 requirementsmod.SPARSE_REQUIREMENT,
1241 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1244 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1242 }
1245 }
1243
1246
1244 # list of prefix for file which can be written without 'wlock'
1247 # list of prefix for file which can be written without 'wlock'
1245 # Extensions should extend this list when needed
1248 # Extensions should extend this list when needed
1246 _wlockfreeprefix = {
1249 _wlockfreeprefix = {
1247 # We migh consider requiring 'wlock' for the next
1250 # We migh consider requiring 'wlock' for the next
1248 # two, but pretty much all the existing code assume
1251 # two, but pretty much all the existing code assume
1249 # wlock is not needed so we keep them excluded for
1252 # wlock is not needed so we keep them excluded for
1250 # now.
1253 # now.
1251 b'hgrc',
1254 b'hgrc',
1252 b'requires',
1255 b'requires',
1253 # XXX cache is a complicatged business someone
1256 # XXX cache is a complicatged business someone
1254 # should investigate this in depth at some point
1257 # should investigate this in depth at some point
1255 b'cache/',
1258 b'cache/',
1256 # XXX shouldn't be dirstate covered by the wlock?
1259 # XXX shouldn't be dirstate covered by the wlock?
1257 b'dirstate',
1260 b'dirstate',
1258 # XXX bisect was still a bit too messy at the time
1261 # XXX bisect was still a bit too messy at the time
1259 # this changeset was introduced. Someone should fix
1262 # this changeset was introduced. Someone should fix
1260 # the remainig bit and drop this line
1263 # the remainig bit and drop this line
1261 b'bisect.state',
1264 b'bisect.state',
1262 }
1265 }
1263
1266
1264 def __init__(
1267 def __init__(
1265 self,
1268 self,
1266 baseui,
1269 baseui,
1267 ui,
1270 ui,
1268 origroot,
1271 origroot,
1269 wdirvfs,
1272 wdirvfs,
1270 hgvfs,
1273 hgvfs,
1271 requirements,
1274 requirements,
1272 supportedrequirements,
1275 supportedrequirements,
1273 sharedpath,
1276 sharedpath,
1274 store,
1277 store,
1275 cachevfs,
1278 cachevfs,
1276 wcachevfs,
1279 wcachevfs,
1277 features,
1280 features,
1278 intents=None,
1281 intents=None,
1279 ):
1282 ):
1280 """Create a new local repository instance.
1283 """Create a new local repository instance.
1281
1284
1282 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1285 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1283 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1286 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1284 object.
1287 object.
1285
1288
1286 Arguments:
1289 Arguments:
1287
1290
1288 baseui
1291 baseui
1289 ``ui.ui`` instance that ``ui`` argument was based off of.
1292 ``ui.ui`` instance that ``ui`` argument was based off of.
1290
1293
1291 ui
1294 ui
1292 ``ui.ui`` instance for use by the repository.
1295 ``ui.ui`` instance for use by the repository.
1293
1296
1294 origroot
1297 origroot
1295 ``bytes`` path to working directory root of this repository.
1298 ``bytes`` path to working directory root of this repository.
1296
1299
1297 wdirvfs
1300 wdirvfs
1298 ``vfs.vfs`` rooted at the working directory.
1301 ``vfs.vfs`` rooted at the working directory.
1299
1302
1300 hgvfs
1303 hgvfs
1301 ``vfs.vfs`` rooted at .hg/
1304 ``vfs.vfs`` rooted at .hg/
1302
1305
1303 requirements
1306 requirements
1304 ``set`` of bytestrings representing repository opening requirements.
1307 ``set`` of bytestrings representing repository opening requirements.
1305
1308
1306 supportedrequirements
1309 supportedrequirements
1307 ``set`` of bytestrings representing repository requirements that we
1310 ``set`` of bytestrings representing repository requirements that we
1308 know how to open. May be a supetset of ``requirements``.
1311 know how to open. May be a supetset of ``requirements``.
1309
1312
1310 sharedpath
1313 sharedpath
1311 ``bytes`` Defining path to storage base directory. Points to a
1314 ``bytes`` Defining path to storage base directory. Points to a
1312 ``.hg/`` directory somewhere.
1315 ``.hg/`` directory somewhere.
1313
1316
1314 store
1317 store
1315 ``store.basicstore`` (or derived) instance providing access to
1318 ``store.basicstore`` (or derived) instance providing access to
1316 versioned storage.
1319 versioned storage.
1317
1320
1318 cachevfs
1321 cachevfs
1319 ``vfs.vfs`` used for cache files.
1322 ``vfs.vfs`` used for cache files.
1320
1323
1321 wcachevfs
1324 wcachevfs
1322 ``vfs.vfs`` used for cache files related to the working copy.
1325 ``vfs.vfs`` used for cache files related to the working copy.
1323
1326
1324 features
1327 features
1325 ``set`` of bytestrings defining features/capabilities of this
1328 ``set`` of bytestrings defining features/capabilities of this
1326 instance.
1329 instance.
1327
1330
1328 intents
1331 intents
1329 ``set`` of system strings indicating what this repo will be used
1332 ``set`` of system strings indicating what this repo will be used
1330 for.
1333 for.
1331 """
1334 """
1332 self.baseui = baseui
1335 self.baseui = baseui
1333 self.ui = ui
1336 self.ui = ui
1334 self.origroot = origroot
1337 self.origroot = origroot
1335 # vfs rooted at working directory.
1338 # vfs rooted at working directory.
1336 self.wvfs = wdirvfs
1339 self.wvfs = wdirvfs
1337 self.root = wdirvfs.base
1340 self.root = wdirvfs.base
1338 # vfs rooted at .hg/. Used to access most non-store paths.
1341 # vfs rooted at .hg/. Used to access most non-store paths.
1339 self.vfs = hgvfs
1342 self.vfs = hgvfs
1340 self.path = hgvfs.base
1343 self.path = hgvfs.base
1341 self.requirements = requirements
1344 self.requirements = requirements
1342 self.nodeconstants = sha1nodeconstants
1345 self.nodeconstants = sha1nodeconstants
1343 self.nullid = self.nodeconstants.nullid
1346 self.nullid = self.nodeconstants.nullid
1344 self.supported = supportedrequirements
1347 self.supported = supportedrequirements
1345 self.sharedpath = sharedpath
1348 self.sharedpath = sharedpath
1346 self.store = store
1349 self.store = store
1347 self.cachevfs = cachevfs
1350 self.cachevfs = cachevfs
1348 self.wcachevfs = wcachevfs
1351 self.wcachevfs = wcachevfs
1349 self.features = features
1352 self.features = features
1350
1353
1351 self.filtername = None
1354 self.filtername = None
1352
1355
1353 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1356 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1354 b'devel', b'check-locks'
1357 b'devel', b'check-locks'
1355 ):
1358 ):
1356 self.vfs.audit = self._getvfsward(self.vfs.audit)
1359 self.vfs.audit = self._getvfsward(self.vfs.audit)
1357 # A list of callback to shape the phase if no data were found.
1360 # A list of callback to shape the phase if no data were found.
1358 # Callback are in the form: func(repo, roots) --> processed root.
1361 # Callback are in the form: func(repo, roots) --> processed root.
1359 # This list it to be filled by extension during repo setup
1362 # This list it to be filled by extension during repo setup
1360 self._phasedefaults = []
1363 self._phasedefaults = []
1361
1364
1362 color.setup(self.ui)
1365 color.setup(self.ui)
1363
1366
1364 self.spath = self.store.path
1367 self.spath = self.store.path
1365 self.svfs = self.store.vfs
1368 self.svfs = self.store.vfs
1366 self.sjoin = self.store.join
1369 self.sjoin = self.store.join
1367 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1370 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1368 b'devel', b'check-locks'
1371 b'devel', b'check-locks'
1369 ):
1372 ):
1370 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1373 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1371 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1374 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1372 else: # standard vfs
1375 else: # standard vfs
1373 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1376 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1374
1377
1375 self._dirstatevalidatewarned = False
1378 self._dirstatevalidatewarned = False
1376
1379
1377 self._branchcaches = branchmap.BranchMapCache()
1380 self._branchcaches = branchmap.BranchMapCache()
1378 self._revbranchcache = None
1381 self._revbranchcache = None
1379 self._filterpats = {}
1382 self._filterpats = {}
1380 self._datafilters = {}
1383 self._datafilters = {}
1381 self._transref = self._lockref = self._wlockref = None
1384 self._transref = self._lockref = self._wlockref = None
1382
1385
1383 # A cache for various files under .hg/ that tracks file changes,
1386 # A cache for various files under .hg/ that tracks file changes,
1384 # (used by the filecache decorator)
1387 # (used by the filecache decorator)
1385 #
1388 #
1386 # Maps a property name to its util.filecacheentry
1389 # Maps a property name to its util.filecacheentry
1387 self._filecache = {}
1390 self._filecache = {}
1388
1391
1389 # hold sets of revision to be filtered
1392 # hold sets of revision to be filtered
1390 # should be cleared when something might have changed the filter value:
1393 # should be cleared when something might have changed the filter value:
1391 # - new changesets,
1394 # - new changesets,
1392 # - phase change,
1395 # - phase change,
1393 # - new obsolescence marker,
1396 # - new obsolescence marker,
1394 # - working directory parent change,
1397 # - working directory parent change,
1395 # - bookmark changes
1398 # - bookmark changes
1396 self.filteredrevcache = {}
1399 self.filteredrevcache = {}
1397
1400
1398 # post-dirstate-status hooks
1401 # post-dirstate-status hooks
1399 self._postdsstatus = []
1402 self._postdsstatus = []
1400
1403
1401 # generic mapping between names and nodes
1404 # generic mapping between names and nodes
1402 self.names = namespaces.namespaces()
1405 self.names = namespaces.namespaces()
1403
1406
1404 # Key to signature value.
1407 # Key to signature value.
1405 self._sparsesignaturecache = {}
1408 self._sparsesignaturecache = {}
1406 # Signature to cached matcher instance.
1409 # Signature to cached matcher instance.
1407 self._sparsematchercache = {}
1410 self._sparsematchercache = {}
1408
1411
1409 self._extrafilterid = repoview.extrafilter(ui)
1412 self._extrafilterid = repoview.extrafilter(ui)
1410
1413
1411 self.filecopiesmode = None
1414 self.filecopiesmode = None
1412 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1415 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1413 self.filecopiesmode = b'changeset-sidedata'
1416 self.filecopiesmode = b'changeset-sidedata'
1414
1417
1415 self._wanted_sidedata = set()
1418 self._wanted_sidedata = set()
1416 self._sidedata_computers = {}
1419 self._sidedata_computers = {}
1417 sidedatamod.set_sidedata_spec_for_repo(self)
1420 sidedatamod.set_sidedata_spec_for_repo(self)
1418
1421
1419 def _getvfsward(self, origfunc):
1422 def _getvfsward(self, origfunc):
1420 """build a ward for self.vfs"""
1423 """build a ward for self.vfs"""
1421 rref = weakref.ref(self)
1424 rref = weakref.ref(self)
1422
1425
1423 def checkvfs(path, mode=None):
1426 def checkvfs(path, mode=None):
1424 ret = origfunc(path, mode=mode)
1427 ret = origfunc(path, mode=mode)
1425 repo = rref()
1428 repo = rref()
1426 if (
1429 if (
1427 repo is None
1430 repo is None
1428 or not util.safehasattr(repo, b'_wlockref')
1431 or not util.safehasattr(repo, b'_wlockref')
1429 or not util.safehasattr(repo, b'_lockref')
1432 or not util.safehasattr(repo, b'_lockref')
1430 ):
1433 ):
1431 return
1434 return
1432 if mode in (None, b'r', b'rb'):
1435 if mode in (None, b'r', b'rb'):
1433 return
1436 return
1434 if path.startswith(repo.path):
1437 if path.startswith(repo.path):
1435 # truncate name relative to the repository (.hg)
1438 # truncate name relative to the repository (.hg)
1436 path = path[len(repo.path) + 1 :]
1439 path = path[len(repo.path) + 1 :]
1437 if path.startswith(b'cache/'):
1440 if path.startswith(b'cache/'):
1438 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1441 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1439 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1442 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1440 # path prefixes covered by 'lock'
1443 # path prefixes covered by 'lock'
1441 vfs_path_prefixes = (
1444 vfs_path_prefixes = (
1442 b'journal.',
1445 b'journal.',
1443 b'undo.',
1446 b'undo.',
1444 b'strip-backup/',
1447 b'strip-backup/',
1445 b'cache/',
1448 b'cache/',
1446 )
1449 )
1447 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1450 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1448 if repo._currentlock(repo._lockref) is None:
1451 if repo._currentlock(repo._lockref) is None:
1449 repo.ui.develwarn(
1452 repo.ui.develwarn(
1450 b'write with no lock: "%s"' % path,
1453 b'write with no lock: "%s"' % path,
1451 stacklevel=3,
1454 stacklevel=3,
1452 config=b'check-locks',
1455 config=b'check-locks',
1453 )
1456 )
1454 elif repo._currentlock(repo._wlockref) is None:
1457 elif repo._currentlock(repo._wlockref) is None:
1455 # rest of vfs files are covered by 'wlock'
1458 # rest of vfs files are covered by 'wlock'
1456 #
1459 #
1457 # exclude special files
1460 # exclude special files
1458 for prefix in self._wlockfreeprefix:
1461 for prefix in self._wlockfreeprefix:
1459 if path.startswith(prefix):
1462 if path.startswith(prefix):
1460 return
1463 return
1461 repo.ui.develwarn(
1464 repo.ui.develwarn(
1462 b'write with no wlock: "%s"' % path,
1465 b'write with no wlock: "%s"' % path,
1463 stacklevel=3,
1466 stacklevel=3,
1464 config=b'check-locks',
1467 config=b'check-locks',
1465 )
1468 )
1466 return ret
1469 return ret
1467
1470
1468 return checkvfs
1471 return checkvfs
1469
1472
1470 def _getsvfsward(self, origfunc):
1473 def _getsvfsward(self, origfunc):
1471 """build a ward for self.svfs"""
1474 """build a ward for self.svfs"""
1472 rref = weakref.ref(self)
1475 rref = weakref.ref(self)
1473
1476
1474 def checksvfs(path, mode=None):
1477 def checksvfs(path, mode=None):
1475 ret = origfunc(path, mode=mode)
1478 ret = origfunc(path, mode=mode)
1476 repo = rref()
1479 repo = rref()
1477 if repo is None or not util.safehasattr(repo, b'_lockref'):
1480 if repo is None or not util.safehasattr(repo, b'_lockref'):
1478 return
1481 return
1479 if mode in (None, b'r', b'rb'):
1482 if mode in (None, b'r', b'rb'):
1480 return
1483 return
1481 if path.startswith(repo.sharedpath):
1484 if path.startswith(repo.sharedpath):
1482 # truncate name relative to the repository (.hg)
1485 # truncate name relative to the repository (.hg)
1483 path = path[len(repo.sharedpath) + 1 :]
1486 path = path[len(repo.sharedpath) + 1 :]
1484 if repo._currentlock(repo._lockref) is None:
1487 if repo._currentlock(repo._lockref) is None:
1485 repo.ui.develwarn(
1488 repo.ui.develwarn(
1486 b'write with no lock: "%s"' % path, stacklevel=4
1489 b'write with no lock: "%s"' % path, stacklevel=4
1487 )
1490 )
1488 return ret
1491 return ret
1489
1492
1490 return checksvfs
1493 return checksvfs
1491
1494
1492 def close(self):
1495 def close(self):
1493 self._writecaches()
1496 self._writecaches()
1494
1497
1495 def _writecaches(self):
1498 def _writecaches(self):
1496 if self._revbranchcache:
1499 if self._revbranchcache:
1497 self._revbranchcache.write()
1500 self._revbranchcache.write()
1498
1501
1499 def _restrictcapabilities(self, caps):
1502 def _restrictcapabilities(self, caps):
1500 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1503 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1501 caps = set(caps)
1504 caps = set(caps)
1502 capsblob = bundle2.encodecaps(
1505 capsblob = bundle2.encodecaps(
1503 bundle2.getrepocaps(self, role=b'client')
1506 bundle2.getrepocaps(self, role=b'client')
1504 )
1507 )
1505 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1508 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1506 if self.ui.configbool(b'experimental', b'narrow'):
1509 if self.ui.configbool(b'experimental', b'narrow'):
1507 caps.add(wireprototypes.NARROWCAP)
1510 caps.add(wireprototypes.NARROWCAP)
1508 return caps
1511 return caps
1509
1512
1510 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1513 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1511 # self -> auditor -> self._checknested -> self
1514 # self -> auditor -> self._checknested -> self
1512
1515
1513 @property
1516 @property
1514 def auditor(self):
1517 def auditor(self):
1515 # This is only used by context.workingctx.match in order to
1518 # This is only used by context.workingctx.match in order to
1516 # detect files in subrepos.
1519 # detect files in subrepos.
1517 return pathutil.pathauditor(self.root, callback=self._checknested)
1520 return pathutil.pathauditor(self.root, callback=self._checknested)
1518
1521
1519 @property
1522 @property
1520 def nofsauditor(self):
1523 def nofsauditor(self):
1521 # This is only used by context.basectx.match in order to detect
1524 # This is only used by context.basectx.match in order to detect
1522 # files in subrepos.
1525 # files in subrepos.
1523 return pathutil.pathauditor(
1526 return pathutil.pathauditor(
1524 self.root, callback=self._checknested, realfs=False, cached=True
1527 self.root, callback=self._checknested, realfs=False, cached=True
1525 )
1528 )
1526
1529
1527 def _checknested(self, path):
1530 def _checknested(self, path):
1528 """Determine if path is a legal nested repository."""
1531 """Determine if path is a legal nested repository."""
1529 if not path.startswith(self.root):
1532 if not path.startswith(self.root):
1530 return False
1533 return False
1531 subpath = path[len(self.root) + 1 :]
1534 subpath = path[len(self.root) + 1 :]
1532 normsubpath = util.pconvert(subpath)
1535 normsubpath = util.pconvert(subpath)
1533
1536
1534 # XXX: Checking against the current working copy is wrong in
1537 # XXX: Checking against the current working copy is wrong in
1535 # the sense that it can reject things like
1538 # the sense that it can reject things like
1536 #
1539 #
1537 # $ hg cat -r 10 sub/x.txt
1540 # $ hg cat -r 10 sub/x.txt
1538 #
1541 #
1539 # if sub/ is no longer a subrepository in the working copy
1542 # if sub/ is no longer a subrepository in the working copy
1540 # parent revision.
1543 # parent revision.
1541 #
1544 #
1542 # However, it can of course also allow things that would have
1545 # However, it can of course also allow things that would have
1543 # been rejected before, such as the above cat command if sub/
1546 # been rejected before, such as the above cat command if sub/
1544 # is a subrepository now, but was a normal directory before.
1547 # is a subrepository now, but was a normal directory before.
1545 # The old path auditor would have rejected by mistake since it
1548 # The old path auditor would have rejected by mistake since it
1546 # panics when it sees sub/.hg/.
1549 # panics when it sees sub/.hg/.
1547 #
1550 #
1548 # All in all, checking against the working copy seems sensible
1551 # All in all, checking against the working copy seems sensible
1549 # since we want to prevent access to nested repositories on
1552 # since we want to prevent access to nested repositories on
1550 # the filesystem *now*.
1553 # the filesystem *now*.
1551 ctx = self[None]
1554 ctx = self[None]
1552 parts = util.splitpath(subpath)
1555 parts = util.splitpath(subpath)
1553 while parts:
1556 while parts:
1554 prefix = b'/'.join(parts)
1557 prefix = b'/'.join(parts)
1555 if prefix in ctx.substate:
1558 if prefix in ctx.substate:
1556 if prefix == normsubpath:
1559 if prefix == normsubpath:
1557 return True
1560 return True
1558 else:
1561 else:
1559 sub = ctx.sub(prefix)
1562 sub = ctx.sub(prefix)
1560 return sub.checknested(subpath[len(prefix) + 1 :])
1563 return sub.checknested(subpath[len(prefix) + 1 :])
1561 else:
1564 else:
1562 parts.pop()
1565 parts.pop()
1563 return False
1566 return False
1564
1567
1565 def peer(self):
1568 def peer(self):
1566 return localpeer(self) # not cached to avoid reference cycle
1569 return localpeer(self) # not cached to avoid reference cycle
1567
1570
1568 def unfiltered(self):
1571 def unfiltered(self):
1569 """Return unfiltered version of the repository
1572 """Return unfiltered version of the repository
1570
1573
1571 Intended to be overwritten by filtered repo."""
1574 Intended to be overwritten by filtered repo."""
1572 return self
1575 return self
1573
1576
1574 def filtered(self, name, visibilityexceptions=None):
1577 def filtered(self, name, visibilityexceptions=None):
1575 """Return a filtered version of a repository
1578 """Return a filtered version of a repository
1576
1579
1577 The `name` parameter is the identifier of the requested view. This
1580 The `name` parameter is the identifier of the requested view. This
1578 will return a repoview object set "exactly" to the specified view.
1581 will return a repoview object set "exactly" to the specified view.
1579
1582
1580 This function does not apply recursive filtering to a repository. For
1583 This function does not apply recursive filtering to a repository. For
1581 example calling `repo.filtered("served")` will return a repoview using
1584 example calling `repo.filtered("served")` will return a repoview using
1582 the "served" view, regardless of the initial view used by `repo`.
1585 the "served" view, regardless of the initial view used by `repo`.
1583
1586
1584 In other word, there is always only one level of `repoview` "filtering".
1587 In other word, there is always only one level of `repoview` "filtering".
1585 """
1588 """
1586 if self._extrafilterid is not None and b'%' not in name:
1589 if self._extrafilterid is not None and b'%' not in name:
1587 name = name + b'%' + self._extrafilterid
1590 name = name + b'%' + self._extrafilterid
1588
1591
1589 cls = repoview.newtype(self.unfiltered().__class__)
1592 cls = repoview.newtype(self.unfiltered().__class__)
1590 return cls(self, name, visibilityexceptions)
1593 return cls(self, name, visibilityexceptions)
1591
1594
1592 @mixedrepostorecache(
1595 @mixedrepostorecache(
1593 (b'bookmarks', b'plain'),
1596 (b'bookmarks', b'plain'),
1594 (b'bookmarks.current', b'plain'),
1597 (b'bookmarks.current', b'plain'),
1595 (b'bookmarks', b''),
1598 (b'bookmarks', b''),
1596 (b'00changelog.i', b''),
1599 (b'00changelog.i', b''),
1597 )
1600 )
1598 def _bookmarks(self):
1601 def _bookmarks(self):
1599 # Since the multiple files involved in the transaction cannot be
1602 # Since the multiple files involved in the transaction cannot be
1600 # written atomically (with current repository format), there is a race
1603 # written atomically (with current repository format), there is a race
1601 # condition here.
1604 # condition here.
1602 #
1605 #
1603 # 1) changelog content A is read
1606 # 1) changelog content A is read
1604 # 2) outside transaction update changelog to content B
1607 # 2) outside transaction update changelog to content B
1605 # 3) outside transaction update bookmark file referring to content B
1608 # 3) outside transaction update bookmark file referring to content B
1606 # 4) bookmarks file content is read and filtered against changelog-A
1609 # 4) bookmarks file content is read and filtered against changelog-A
1607 #
1610 #
1608 # When this happens, bookmarks against nodes missing from A are dropped.
1611 # When this happens, bookmarks against nodes missing from A are dropped.
1609 #
1612 #
1610 # Having this happening during read is not great, but it become worse
1613 # Having this happening during read is not great, but it become worse
1611 # when this happen during write because the bookmarks to the "unknown"
1614 # when this happen during write because the bookmarks to the "unknown"
1612 # nodes will be dropped for good. However, writes happen within locks.
1615 # nodes will be dropped for good. However, writes happen within locks.
1613 # This locking makes it possible to have a race free consistent read.
1616 # This locking makes it possible to have a race free consistent read.
1614 # For this purpose data read from disc before locking are
1617 # For this purpose data read from disc before locking are
1615 # "invalidated" right after the locks are taken. This invalidations are
1618 # "invalidated" right after the locks are taken. This invalidations are
1616 # "light", the `filecache` mechanism keep the data in memory and will
1619 # "light", the `filecache` mechanism keep the data in memory and will
1617 # reuse them if the underlying files did not changed. Not parsing the
1620 # reuse them if the underlying files did not changed. Not parsing the
1618 # same data multiple times helps performances.
1621 # same data multiple times helps performances.
1619 #
1622 #
1620 # Unfortunately in the case describe above, the files tracked by the
1623 # Unfortunately in the case describe above, the files tracked by the
1621 # bookmarks file cache might not have changed, but the in-memory
1624 # bookmarks file cache might not have changed, but the in-memory
1622 # content is still "wrong" because we used an older changelog content
1625 # content is still "wrong" because we used an older changelog content
1623 # to process the on-disk data. So after locking, the changelog would be
1626 # to process the on-disk data. So after locking, the changelog would be
1624 # refreshed but `_bookmarks` would be preserved.
1627 # refreshed but `_bookmarks` would be preserved.
1625 # Adding `00changelog.i` to the list of tracked file is not
1628 # Adding `00changelog.i` to the list of tracked file is not
1626 # enough, because at the time we build the content for `_bookmarks` in
1629 # enough, because at the time we build the content for `_bookmarks` in
1627 # (4), the changelog file has already diverged from the content used
1630 # (4), the changelog file has already diverged from the content used
1628 # for loading `changelog` in (1)
1631 # for loading `changelog` in (1)
1629 #
1632 #
1630 # To prevent the issue, we force the changelog to be explicitly
1633 # To prevent the issue, we force the changelog to be explicitly
1631 # reloaded while computing `_bookmarks`. The data race can still happen
1634 # reloaded while computing `_bookmarks`. The data race can still happen
1632 # without the lock (with a narrower window), but it would no longer go
1635 # without the lock (with a narrower window), but it would no longer go
1633 # undetected during the lock time refresh.
1636 # undetected during the lock time refresh.
1634 #
1637 #
1635 # The new schedule is as follow
1638 # The new schedule is as follow
1636 #
1639 #
1637 # 1) filecache logic detect that `_bookmarks` needs to be computed
1640 # 1) filecache logic detect that `_bookmarks` needs to be computed
1638 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1641 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1639 # 3) We force `changelog` filecache to be tested
1642 # 3) We force `changelog` filecache to be tested
1640 # 4) cachestat for `changelog` are captured (for changelog)
1643 # 4) cachestat for `changelog` are captured (for changelog)
1641 # 5) `_bookmarks` is computed and cached
1644 # 5) `_bookmarks` is computed and cached
1642 #
1645 #
1643 # The step in (3) ensure we have a changelog at least as recent as the
1646 # The step in (3) ensure we have a changelog at least as recent as the
1644 # cache stat computed in (1). As a result at locking time:
1647 # cache stat computed in (1). As a result at locking time:
1645 # * if the changelog did not changed since (1) -> we can reuse the data
1648 # * if the changelog did not changed since (1) -> we can reuse the data
1646 # * otherwise -> the bookmarks get refreshed.
1649 # * otherwise -> the bookmarks get refreshed.
1647 self._refreshchangelog()
1650 self._refreshchangelog()
1648 return bookmarks.bmstore(self)
1651 return bookmarks.bmstore(self)
1649
1652
1650 def _refreshchangelog(self):
1653 def _refreshchangelog(self):
1651 """make sure the in memory changelog match the on-disk one"""
1654 """make sure the in memory changelog match the on-disk one"""
1652 if 'changelog' in vars(self) and self.currenttransaction() is None:
1655 if 'changelog' in vars(self) and self.currenttransaction() is None:
1653 del self.changelog
1656 del self.changelog
1654
1657
1655 @property
1658 @property
1656 def _activebookmark(self):
1659 def _activebookmark(self):
1657 return self._bookmarks.active
1660 return self._bookmarks.active
1658
1661
1659 # _phasesets depend on changelog. what we need is to call
1662 # _phasesets depend on changelog. what we need is to call
1660 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1663 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1661 # can't be easily expressed in filecache mechanism.
1664 # can't be easily expressed in filecache mechanism.
1662 @storecache(b'phaseroots', b'00changelog.i')
1665 @storecache(b'phaseroots', b'00changelog.i')
1663 def _phasecache(self):
1666 def _phasecache(self):
1664 return phases.phasecache(self, self._phasedefaults)
1667 return phases.phasecache(self, self._phasedefaults)
1665
1668
1666 @storecache(b'obsstore')
1669 @storecache(b'obsstore')
1667 def obsstore(self):
1670 def obsstore(self):
1668 return obsolete.makestore(self.ui, self)
1671 return obsolete.makestore(self.ui, self)
1669
1672
1670 @storecache(b'00changelog.i')
1673 @storecache(b'00changelog.i')
1671 def changelog(self):
1674 def changelog(self):
1672 # load dirstate before changelog to avoid race see issue6303
1675 # load dirstate before changelog to avoid race see issue6303
1673 self.dirstate.prefetch_parents()
1676 self.dirstate.prefetch_parents()
1674 return self.store.changelog(
1677 return self.store.changelog(
1675 txnutil.mayhavepending(self.root),
1678 txnutil.mayhavepending(self.root),
1676 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1679 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1677 )
1680 )
1678
1681
1679 @storecache(b'00manifest.i')
1682 @storecache(b'00manifest.i')
1680 def manifestlog(self):
1683 def manifestlog(self):
1681 return self.store.manifestlog(self, self._storenarrowmatch)
1684 return self.store.manifestlog(self, self._storenarrowmatch)
1682
1685
1683 @repofilecache(b'dirstate')
1686 @repofilecache(b'dirstate')
1684 def dirstate(self):
1687 def dirstate(self):
1685 return self._makedirstate()
1688 return self._makedirstate()
1686
1689
1687 def _makedirstate(self):
1690 def _makedirstate(self):
1688 """Extension point for wrapping the dirstate per-repo."""
1691 """Extension point for wrapping the dirstate per-repo."""
1689 sparsematchfn = lambda: sparse.matcher(self)
1692 sparsematchfn = lambda: sparse.matcher(self)
1690
1693
1691 return dirstate.dirstate(
1694 return dirstate.dirstate(
1692 self.vfs,
1695 self.vfs,
1693 self.ui,
1696 self.ui,
1694 self.root,
1697 self.root,
1695 self._dirstatevalidate,
1698 self._dirstatevalidate,
1696 sparsematchfn,
1699 sparsematchfn,
1697 self.nodeconstants,
1700 self.nodeconstants,
1698 )
1701 )
1699
1702
1700 def _dirstatevalidate(self, node):
1703 def _dirstatevalidate(self, node):
1701 try:
1704 try:
1702 self.changelog.rev(node)
1705 self.changelog.rev(node)
1703 return node
1706 return node
1704 except error.LookupError:
1707 except error.LookupError:
1705 if not self._dirstatevalidatewarned:
1708 if not self._dirstatevalidatewarned:
1706 self._dirstatevalidatewarned = True
1709 self._dirstatevalidatewarned = True
1707 self.ui.warn(
1710 self.ui.warn(
1708 _(b"warning: ignoring unknown working parent %s!\n")
1711 _(b"warning: ignoring unknown working parent %s!\n")
1709 % short(node)
1712 % short(node)
1710 )
1713 )
1711 return self.nullid
1714 return self.nullid
1712
1715
1713 @storecache(narrowspec.FILENAME)
1716 @storecache(narrowspec.FILENAME)
1714 def narrowpats(self):
1717 def narrowpats(self):
1715 """matcher patterns for this repository's narrowspec
1718 """matcher patterns for this repository's narrowspec
1716
1719
1717 A tuple of (includes, excludes).
1720 A tuple of (includes, excludes).
1718 """
1721 """
1719 return narrowspec.load(self)
1722 return narrowspec.load(self)
1720
1723
1721 @storecache(narrowspec.FILENAME)
1724 @storecache(narrowspec.FILENAME)
1722 def _storenarrowmatch(self):
1725 def _storenarrowmatch(self):
1723 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1726 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1724 return matchmod.always()
1727 return matchmod.always()
1725 include, exclude = self.narrowpats
1728 include, exclude = self.narrowpats
1726 return narrowspec.match(self.root, include=include, exclude=exclude)
1729 return narrowspec.match(self.root, include=include, exclude=exclude)
1727
1730
1728 @storecache(narrowspec.FILENAME)
1731 @storecache(narrowspec.FILENAME)
1729 def _narrowmatch(self):
1732 def _narrowmatch(self):
1730 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1733 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1731 return matchmod.always()
1734 return matchmod.always()
1732 narrowspec.checkworkingcopynarrowspec(self)
1735 narrowspec.checkworkingcopynarrowspec(self)
1733 include, exclude = self.narrowpats
1736 include, exclude = self.narrowpats
1734 return narrowspec.match(self.root, include=include, exclude=exclude)
1737 return narrowspec.match(self.root, include=include, exclude=exclude)
1735
1738
1736 def narrowmatch(self, match=None, includeexact=False):
1739 def narrowmatch(self, match=None, includeexact=False):
1737 """matcher corresponding the the repo's narrowspec
1740 """matcher corresponding the the repo's narrowspec
1738
1741
1739 If `match` is given, then that will be intersected with the narrow
1742 If `match` is given, then that will be intersected with the narrow
1740 matcher.
1743 matcher.
1741
1744
1742 If `includeexact` is True, then any exact matches from `match` will
1745 If `includeexact` is True, then any exact matches from `match` will
1743 be included even if they're outside the narrowspec.
1746 be included even if they're outside the narrowspec.
1744 """
1747 """
1745 if match:
1748 if match:
1746 if includeexact and not self._narrowmatch.always():
1749 if includeexact and not self._narrowmatch.always():
1747 # do not exclude explicitly-specified paths so that they can
1750 # do not exclude explicitly-specified paths so that they can
1748 # be warned later on
1751 # be warned later on
1749 em = matchmod.exact(match.files())
1752 em = matchmod.exact(match.files())
1750 nm = matchmod.unionmatcher([self._narrowmatch, em])
1753 nm = matchmod.unionmatcher([self._narrowmatch, em])
1751 return matchmod.intersectmatchers(match, nm)
1754 return matchmod.intersectmatchers(match, nm)
1752 return matchmod.intersectmatchers(match, self._narrowmatch)
1755 return matchmod.intersectmatchers(match, self._narrowmatch)
1753 return self._narrowmatch
1756 return self._narrowmatch
1754
1757
1755 def setnarrowpats(self, newincludes, newexcludes):
1758 def setnarrowpats(self, newincludes, newexcludes):
1756 narrowspec.save(self, newincludes, newexcludes)
1759 narrowspec.save(self, newincludes, newexcludes)
1757 self.invalidate(clearfilecache=True)
1760 self.invalidate(clearfilecache=True)
1758
1761
1759 @unfilteredpropertycache
1762 @unfilteredpropertycache
1760 def _quick_access_changeid_null(self):
1763 def _quick_access_changeid_null(self):
1761 return {
1764 return {
1762 b'null': (nullrev, self.nodeconstants.nullid),
1765 b'null': (nullrev, self.nodeconstants.nullid),
1763 nullrev: (nullrev, self.nodeconstants.nullid),
1766 nullrev: (nullrev, self.nodeconstants.nullid),
1764 self.nullid: (nullrev, self.nullid),
1767 self.nullid: (nullrev, self.nullid),
1765 }
1768 }
1766
1769
1767 @unfilteredpropertycache
1770 @unfilteredpropertycache
1768 def _quick_access_changeid_wc(self):
1771 def _quick_access_changeid_wc(self):
1769 # also fast path access to the working copy parents
1772 # also fast path access to the working copy parents
1770 # however, only do it for filter that ensure wc is visible.
1773 # however, only do it for filter that ensure wc is visible.
1771 quick = self._quick_access_changeid_null.copy()
1774 quick = self._quick_access_changeid_null.copy()
1772 cl = self.unfiltered().changelog
1775 cl = self.unfiltered().changelog
1773 for node in self.dirstate.parents():
1776 for node in self.dirstate.parents():
1774 if node == self.nullid:
1777 if node == self.nullid:
1775 continue
1778 continue
1776 rev = cl.index.get_rev(node)
1779 rev = cl.index.get_rev(node)
1777 if rev is None:
1780 if rev is None:
1778 # unknown working copy parent case:
1781 # unknown working copy parent case:
1779 #
1782 #
1780 # skip the fast path and let higher code deal with it
1783 # skip the fast path and let higher code deal with it
1781 continue
1784 continue
1782 pair = (rev, node)
1785 pair = (rev, node)
1783 quick[rev] = pair
1786 quick[rev] = pair
1784 quick[node] = pair
1787 quick[node] = pair
1785 # also add the parents of the parents
1788 # also add the parents of the parents
1786 for r in cl.parentrevs(rev):
1789 for r in cl.parentrevs(rev):
1787 if r == nullrev:
1790 if r == nullrev:
1788 continue
1791 continue
1789 n = cl.node(r)
1792 n = cl.node(r)
1790 pair = (r, n)
1793 pair = (r, n)
1791 quick[r] = pair
1794 quick[r] = pair
1792 quick[n] = pair
1795 quick[n] = pair
1793 p1node = self.dirstate.p1()
1796 p1node = self.dirstate.p1()
1794 if p1node != self.nullid:
1797 if p1node != self.nullid:
1795 quick[b'.'] = quick[p1node]
1798 quick[b'.'] = quick[p1node]
1796 return quick
1799 return quick
1797
1800
1798 @unfilteredmethod
1801 @unfilteredmethod
1799 def _quick_access_changeid_invalidate(self):
1802 def _quick_access_changeid_invalidate(self):
1800 if '_quick_access_changeid_wc' in vars(self):
1803 if '_quick_access_changeid_wc' in vars(self):
1801 del self.__dict__['_quick_access_changeid_wc']
1804 del self.__dict__['_quick_access_changeid_wc']
1802
1805
1803 @property
1806 @property
1804 def _quick_access_changeid(self):
1807 def _quick_access_changeid(self):
1805 """an helper dictionnary for __getitem__ calls
1808 """an helper dictionnary for __getitem__ calls
1806
1809
1807 This contains a list of symbol we can recognise right away without
1810 This contains a list of symbol we can recognise right away without
1808 further processing.
1811 further processing.
1809 """
1812 """
1810 if self.filtername in repoview.filter_has_wc:
1813 if self.filtername in repoview.filter_has_wc:
1811 return self._quick_access_changeid_wc
1814 return self._quick_access_changeid_wc
1812 return self._quick_access_changeid_null
1815 return self._quick_access_changeid_null
1813
1816
1814 def __getitem__(self, changeid):
1817 def __getitem__(self, changeid):
1815 # dealing with special cases
1818 # dealing with special cases
1816 if changeid is None:
1819 if changeid is None:
1817 return context.workingctx(self)
1820 return context.workingctx(self)
1818 if isinstance(changeid, context.basectx):
1821 if isinstance(changeid, context.basectx):
1819 return changeid
1822 return changeid
1820
1823
1821 # dealing with multiple revisions
1824 # dealing with multiple revisions
1822 if isinstance(changeid, slice):
1825 if isinstance(changeid, slice):
1823 # wdirrev isn't contiguous so the slice shouldn't include it
1826 # wdirrev isn't contiguous so the slice shouldn't include it
1824 return [
1827 return [
1825 self[i]
1828 self[i]
1826 for i in pycompat.xrange(*changeid.indices(len(self)))
1829 for i in pycompat.xrange(*changeid.indices(len(self)))
1827 if i not in self.changelog.filteredrevs
1830 if i not in self.changelog.filteredrevs
1828 ]
1831 ]
1829
1832
1830 # dealing with some special values
1833 # dealing with some special values
1831 quick_access = self._quick_access_changeid.get(changeid)
1834 quick_access = self._quick_access_changeid.get(changeid)
1832 if quick_access is not None:
1835 if quick_access is not None:
1833 rev, node = quick_access
1836 rev, node = quick_access
1834 return context.changectx(self, rev, node, maybe_filtered=False)
1837 return context.changectx(self, rev, node, maybe_filtered=False)
1835 if changeid == b'tip':
1838 if changeid == b'tip':
1836 node = self.changelog.tip()
1839 node = self.changelog.tip()
1837 rev = self.changelog.rev(node)
1840 rev = self.changelog.rev(node)
1838 return context.changectx(self, rev, node)
1841 return context.changectx(self, rev, node)
1839
1842
1840 # dealing with arbitrary values
1843 # dealing with arbitrary values
1841 try:
1844 try:
1842 if isinstance(changeid, int):
1845 if isinstance(changeid, int):
1843 node = self.changelog.node(changeid)
1846 node = self.changelog.node(changeid)
1844 rev = changeid
1847 rev = changeid
1845 elif changeid == b'.':
1848 elif changeid == b'.':
1846 # this is a hack to delay/avoid loading obsmarkers
1849 # this is a hack to delay/avoid loading obsmarkers
1847 # when we know that '.' won't be hidden
1850 # when we know that '.' won't be hidden
1848 node = self.dirstate.p1()
1851 node = self.dirstate.p1()
1849 rev = self.unfiltered().changelog.rev(node)
1852 rev = self.unfiltered().changelog.rev(node)
1850 elif len(changeid) == self.nodeconstants.nodelen:
1853 elif len(changeid) == self.nodeconstants.nodelen:
1851 try:
1854 try:
1852 node = changeid
1855 node = changeid
1853 rev = self.changelog.rev(changeid)
1856 rev = self.changelog.rev(changeid)
1854 except error.FilteredLookupError:
1857 except error.FilteredLookupError:
1855 changeid = hex(changeid) # for the error message
1858 changeid = hex(changeid) # for the error message
1856 raise
1859 raise
1857 except LookupError:
1860 except LookupError:
1858 # check if it might have come from damaged dirstate
1861 # check if it might have come from damaged dirstate
1859 #
1862 #
1860 # XXX we could avoid the unfiltered if we had a recognizable
1863 # XXX we could avoid the unfiltered if we had a recognizable
1861 # exception for filtered changeset access
1864 # exception for filtered changeset access
1862 if (
1865 if (
1863 self.local()
1866 self.local()
1864 and changeid in self.unfiltered().dirstate.parents()
1867 and changeid in self.unfiltered().dirstate.parents()
1865 ):
1868 ):
1866 msg = _(b"working directory has unknown parent '%s'!")
1869 msg = _(b"working directory has unknown parent '%s'!")
1867 raise error.Abort(msg % short(changeid))
1870 raise error.Abort(msg % short(changeid))
1868 changeid = hex(changeid) # for the error message
1871 changeid = hex(changeid) # for the error message
1869 raise
1872 raise
1870
1873
1871 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1874 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1872 node = bin(changeid)
1875 node = bin(changeid)
1873 rev = self.changelog.rev(node)
1876 rev = self.changelog.rev(node)
1874 else:
1877 else:
1875 raise error.ProgrammingError(
1878 raise error.ProgrammingError(
1876 b"unsupported changeid '%s' of type %s"
1879 b"unsupported changeid '%s' of type %s"
1877 % (changeid, pycompat.bytestr(type(changeid)))
1880 % (changeid, pycompat.bytestr(type(changeid)))
1878 )
1881 )
1879
1882
1880 return context.changectx(self, rev, node)
1883 return context.changectx(self, rev, node)
1881
1884
1882 except (error.FilteredIndexError, error.FilteredLookupError):
1885 except (error.FilteredIndexError, error.FilteredLookupError):
1883 raise error.FilteredRepoLookupError(
1886 raise error.FilteredRepoLookupError(
1884 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1887 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1885 )
1888 )
1886 except (IndexError, LookupError):
1889 except (IndexError, LookupError):
1887 raise error.RepoLookupError(
1890 raise error.RepoLookupError(
1888 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1891 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1889 )
1892 )
1890 except error.WdirUnsupported:
1893 except error.WdirUnsupported:
1891 return context.workingctx(self)
1894 return context.workingctx(self)
1892
1895
1893 def __contains__(self, changeid):
1896 def __contains__(self, changeid):
1894 """True if the given changeid exists"""
1897 """True if the given changeid exists"""
1895 try:
1898 try:
1896 self[changeid]
1899 self[changeid]
1897 return True
1900 return True
1898 except error.RepoLookupError:
1901 except error.RepoLookupError:
1899 return False
1902 return False
1900
1903
1901 def __nonzero__(self):
1904 def __nonzero__(self):
1902 return True
1905 return True
1903
1906
1904 __bool__ = __nonzero__
1907 __bool__ = __nonzero__
1905
1908
1906 def __len__(self):
1909 def __len__(self):
1907 # no need to pay the cost of repoview.changelog
1910 # no need to pay the cost of repoview.changelog
1908 unfi = self.unfiltered()
1911 unfi = self.unfiltered()
1909 return len(unfi.changelog)
1912 return len(unfi.changelog)
1910
1913
1911 def __iter__(self):
1914 def __iter__(self):
1912 return iter(self.changelog)
1915 return iter(self.changelog)
1913
1916
1914 def revs(self, expr, *args):
1917 def revs(self, expr, *args):
1915 """Find revisions matching a revset.
1918 """Find revisions matching a revset.
1916
1919
1917 The revset is specified as a string ``expr`` that may contain
1920 The revset is specified as a string ``expr`` that may contain
1918 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1921 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1919
1922
1920 Revset aliases from the configuration are not expanded. To expand
1923 Revset aliases from the configuration are not expanded. To expand
1921 user aliases, consider calling ``scmutil.revrange()`` or
1924 user aliases, consider calling ``scmutil.revrange()`` or
1922 ``repo.anyrevs([expr], user=True)``.
1925 ``repo.anyrevs([expr], user=True)``.
1923
1926
1924 Returns a smartset.abstractsmartset, which is a list-like interface
1927 Returns a smartset.abstractsmartset, which is a list-like interface
1925 that contains integer revisions.
1928 that contains integer revisions.
1926 """
1929 """
1927 tree = revsetlang.spectree(expr, *args)
1930 tree = revsetlang.spectree(expr, *args)
1928 return revset.makematcher(tree)(self)
1931 return revset.makematcher(tree)(self)
1929
1932
1930 def set(self, expr, *args):
1933 def set(self, expr, *args):
1931 """Find revisions matching a revset and emit changectx instances.
1934 """Find revisions matching a revset and emit changectx instances.
1932
1935
1933 This is a convenience wrapper around ``revs()`` that iterates the
1936 This is a convenience wrapper around ``revs()`` that iterates the
1934 result and is a generator of changectx instances.
1937 result and is a generator of changectx instances.
1935
1938
1936 Revset aliases from the configuration are not expanded. To expand
1939 Revset aliases from the configuration are not expanded. To expand
1937 user aliases, consider calling ``scmutil.revrange()``.
1940 user aliases, consider calling ``scmutil.revrange()``.
1938 """
1941 """
1939 for r in self.revs(expr, *args):
1942 for r in self.revs(expr, *args):
1940 yield self[r]
1943 yield self[r]
1941
1944
1942 def anyrevs(self, specs, user=False, localalias=None):
1945 def anyrevs(self, specs, user=False, localalias=None):
1943 """Find revisions matching one of the given revsets.
1946 """Find revisions matching one of the given revsets.
1944
1947
1945 Revset aliases from the configuration are not expanded by default. To
1948 Revset aliases from the configuration are not expanded by default. To
1946 expand user aliases, specify ``user=True``. To provide some local
1949 expand user aliases, specify ``user=True``. To provide some local
1947 definitions overriding user aliases, set ``localalias`` to
1950 definitions overriding user aliases, set ``localalias`` to
1948 ``{name: definitionstring}``.
1951 ``{name: definitionstring}``.
1949 """
1952 """
1950 if specs == [b'null']:
1953 if specs == [b'null']:
1951 return revset.baseset([nullrev])
1954 return revset.baseset([nullrev])
1952 if specs == [b'.']:
1955 if specs == [b'.']:
1953 quick_data = self._quick_access_changeid.get(b'.')
1956 quick_data = self._quick_access_changeid.get(b'.')
1954 if quick_data is not None:
1957 if quick_data is not None:
1955 return revset.baseset([quick_data[0]])
1958 return revset.baseset([quick_data[0]])
1956 if user:
1959 if user:
1957 m = revset.matchany(
1960 m = revset.matchany(
1958 self.ui,
1961 self.ui,
1959 specs,
1962 specs,
1960 lookup=revset.lookupfn(self),
1963 lookup=revset.lookupfn(self),
1961 localalias=localalias,
1964 localalias=localalias,
1962 )
1965 )
1963 else:
1966 else:
1964 m = revset.matchany(None, specs, localalias=localalias)
1967 m = revset.matchany(None, specs, localalias=localalias)
1965 return m(self)
1968 return m(self)
1966
1969
1967 def url(self):
1970 def url(self):
1968 return b'file:' + self.root
1971 return b'file:' + self.root
1969
1972
1970 def hook(self, name, throw=False, **args):
1973 def hook(self, name, throw=False, **args):
1971 """Call a hook, passing this repo instance.
1974 """Call a hook, passing this repo instance.
1972
1975
1973 This a convenience method to aid invoking hooks. Extensions likely
1976 This a convenience method to aid invoking hooks. Extensions likely
1974 won't call this unless they have registered a custom hook or are
1977 won't call this unless they have registered a custom hook or are
1975 replacing code that is expected to call a hook.
1978 replacing code that is expected to call a hook.
1976 """
1979 """
1977 return hook.hook(self.ui, self, name, throw, **args)
1980 return hook.hook(self.ui, self, name, throw, **args)
1978
1981
1979 @filteredpropertycache
1982 @filteredpropertycache
1980 def _tagscache(self):
1983 def _tagscache(self):
1981 """Returns a tagscache object that contains various tags related
1984 """Returns a tagscache object that contains various tags related
1982 caches."""
1985 caches."""
1983
1986
1984 # This simplifies its cache management by having one decorated
1987 # This simplifies its cache management by having one decorated
1985 # function (this one) and the rest simply fetch things from it.
1988 # function (this one) and the rest simply fetch things from it.
1986 class tagscache(object):
1989 class tagscache(object):
1987 def __init__(self):
1990 def __init__(self):
1988 # These two define the set of tags for this repository. tags
1991 # These two define the set of tags for this repository. tags
1989 # maps tag name to node; tagtypes maps tag name to 'global' or
1992 # maps tag name to node; tagtypes maps tag name to 'global' or
1990 # 'local'. (Global tags are defined by .hgtags across all
1993 # 'local'. (Global tags are defined by .hgtags across all
1991 # heads, and local tags are defined in .hg/localtags.)
1994 # heads, and local tags are defined in .hg/localtags.)
1992 # They constitute the in-memory cache of tags.
1995 # They constitute the in-memory cache of tags.
1993 self.tags = self.tagtypes = None
1996 self.tags = self.tagtypes = None
1994
1997
1995 self.nodetagscache = self.tagslist = None
1998 self.nodetagscache = self.tagslist = None
1996
1999
1997 cache = tagscache()
2000 cache = tagscache()
1998 cache.tags, cache.tagtypes = self._findtags()
2001 cache.tags, cache.tagtypes = self._findtags()
1999
2002
2000 return cache
2003 return cache
2001
2004
2002 def tags(self):
2005 def tags(self):
2003 '''return a mapping of tag to node'''
2006 '''return a mapping of tag to node'''
2004 t = {}
2007 t = {}
2005 if self.changelog.filteredrevs:
2008 if self.changelog.filteredrevs:
2006 tags, tt = self._findtags()
2009 tags, tt = self._findtags()
2007 else:
2010 else:
2008 tags = self._tagscache.tags
2011 tags = self._tagscache.tags
2009 rev = self.changelog.rev
2012 rev = self.changelog.rev
2010 for k, v in pycompat.iteritems(tags):
2013 for k, v in pycompat.iteritems(tags):
2011 try:
2014 try:
2012 # ignore tags to unknown nodes
2015 # ignore tags to unknown nodes
2013 rev(v)
2016 rev(v)
2014 t[k] = v
2017 t[k] = v
2015 except (error.LookupError, ValueError):
2018 except (error.LookupError, ValueError):
2016 pass
2019 pass
2017 return t
2020 return t
2018
2021
2019 def _findtags(self):
2022 def _findtags(self):
2020 """Do the hard work of finding tags. Return a pair of dicts
2023 """Do the hard work of finding tags. Return a pair of dicts
2021 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2024 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2022 maps tag name to a string like \'global\' or \'local\'.
2025 maps tag name to a string like \'global\' or \'local\'.
2023 Subclasses or extensions are free to add their own tags, but
2026 Subclasses or extensions are free to add their own tags, but
2024 should be aware that the returned dicts will be retained for the
2027 should be aware that the returned dicts will be retained for the
2025 duration of the localrepo object."""
2028 duration of the localrepo object."""
2026
2029
2027 # XXX what tagtype should subclasses/extensions use? Currently
2030 # XXX what tagtype should subclasses/extensions use? Currently
2028 # mq and bookmarks add tags, but do not set the tagtype at all.
2031 # mq and bookmarks add tags, but do not set the tagtype at all.
2029 # Should each extension invent its own tag type? Should there
2032 # Should each extension invent its own tag type? Should there
2030 # be one tagtype for all such "virtual" tags? Or is the status
2033 # be one tagtype for all such "virtual" tags? Or is the status
2031 # quo fine?
2034 # quo fine?
2032
2035
2033 # map tag name to (node, hist)
2036 # map tag name to (node, hist)
2034 alltags = tagsmod.findglobaltags(self.ui, self)
2037 alltags = tagsmod.findglobaltags(self.ui, self)
2035 # map tag name to tag type
2038 # map tag name to tag type
2036 tagtypes = {tag: b'global' for tag in alltags}
2039 tagtypes = {tag: b'global' for tag in alltags}
2037
2040
2038 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2041 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2039
2042
2040 # Build the return dicts. Have to re-encode tag names because
2043 # Build the return dicts. Have to re-encode tag names because
2041 # the tags module always uses UTF-8 (in order not to lose info
2044 # the tags module always uses UTF-8 (in order not to lose info
2042 # writing to the cache), but the rest of Mercurial wants them in
2045 # writing to the cache), but the rest of Mercurial wants them in
2043 # local encoding.
2046 # local encoding.
2044 tags = {}
2047 tags = {}
2045 for (name, (node, hist)) in pycompat.iteritems(alltags):
2048 for (name, (node, hist)) in pycompat.iteritems(alltags):
2046 if node != self.nullid:
2049 if node != self.nullid:
2047 tags[encoding.tolocal(name)] = node
2050 tags[encoding.tolocal(name)] = node
2048 tags[b'tip'] = self.changelog.tip()
2051 tags[b'tip'] = self.changelog.tip()
2049 tagtypes = {
2052 tagtypes = {
2050 encoding.tolocal(name): value
2053 encoding.tolocal(name): value
2051 for (name, value) in pycompat.iteritems(tagtypes)
2054 for (name, value) in pycompat.iteritems(tagtypes)
2052 }
2055 }
2053 return (tags, tagtypes)
2056 return (tags, tagtypes)
2054
2057
2055 def tagtype(self, tagname):
2058 def tagtype(self, tagname):
2056 """
2059 """
2057 return the type of the given tag. result can be:
2060 return the type of the given tag. result can be:
2058
2061
2059 'local' : a local tag
2062 'local' : a local tag
2060 'global' : a global tag
2063 'global' : a global tag
2061 None : tag does not exist
2064 None : tag does not exist
2062 """
2065 """
2063
2066
2064 return self._tagscache.tagtypes.get(tagname)
2067 return self._tagscache.tagtypes.get(tagname)
2065
2068
2066 def tagslist(self):
2069 def tagslist(self):
2067 '''return a list of tags ordered by revision'''
2070 '''return a list of tags ordered by revision'''
2068 if not self._tagscache.tagslist:
2071 if not self._tagscache.tagslist:
2069 l = []
2072 l = []
2070 for t, n in pycompat.iteritems(self.tags()):
2073 for t, n in pycompat.iteritems(self.tags()):
2071 l.append((self.changelog.rev(n), t, n))
2074 l.append((self.changelog.rev(n), t, n))
2072 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2075 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2073
2076
2074 return self._tagscache.tagslist
2077 return self._tagscache.tagslist
2075
2078
2076 def nodetags(self, node):
2079 def nodetags(self, node):
2077 '''return the tags associated with a node'''
2080 '''return the tags associated with a node'''
2078 if not self._tagscache.nodetagscache:
2081 if not self._tagscache.nodetagscache:
2079 nodetagscache = {}
2082 nodetagscache = {}
2080 for t, n in pycompat.iteritems(self._tagscache.tags):
2083 for t, n in pycompat.iteritems(self._tagscache.tags):
2081 nodetagscache.setdefault(n, []).append(t)
2084 nodetagscache.setdefault(n, []).append(t)
2082 for tags in pycompat.itervalues(nodetagscache):
2085 for tags in pycompat.itervalues(nodetagscache):
2083 tags.sort()
2086 tags.sort()
2084 self._tagscache.nodetagscache = nodetagscache
2087 self._tagscache.nodetagscache = nodetagscache
2085 return self._tagscache.nodetagscache.get(node, [])
2088 return self._tagscache.nodetagscache.get(node, [])
2086
2089
2087 def nodebookmarks(self, node):
2090 def nodebookmarks(self, node):
2088 """return the list of bookmarks pointing to the specified node"""
2091 """return the list of bookmarks pointing to the specified node"""
2089 return self._bookmarks.names(node)
2092 return self._bookmarks.names(node)
2090
2093
2091 def branchmap(self):
2094 def branchmap(self):
2092 """returns a dictionary {branch: [branchheads]} with branchheads
2095 """returns a dictionary {branch: [branchheads]} with branchheads
2093 ordered by increasing revision number"""
2096 ordered by increasing revision number"""
2094 return self._branchcaches[self]
2097 return self._branchcaches[self]
2095
2098
2096 @unfilteredmethod
2099 @unfilteredmethod
2097 def revbranchcache(self):
2100 def revbranchcache(self):
2098 if not self._revbranchcache:
2101 if not self._revbranchcache:
2099 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2102 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2100 return self._revbranchcache
2103 return self._revbranchcache
2101
2104
2102 def register_changeset(self, rev, changelogrevision):
2105 def register_changeset(self, rev, changelogrevision):
2103 self.revbranchcache().setdata(rev, changelogrevision)
2106 self.revbranchcache().setdata(rev, changelogrevision)
2104
2107
2105 def branchtip(self, branch, ignoremissing=False):
2108 def branchtip(self, branch, ignoremissing=False):
2106 """return the tip node for a given branch
2109 """return the tip node for a given branch
2107
2110
2108 If ignoremissing is True, then this method will not raise an error.
2111 If ignoremissing is True, then this method will not raise an error.
2109 This is helpful for callers that only expect None for a missing branch
2112 This is helpful for callers that only expect None for a missing branch
2110 (e.g. namespace).
2113 (e.g. namespace).
2111
2114
2112 """
2115 """
2113 try:
2116 try:
2114 return self.branchmap().branchtip(branch)
2117 return self.branchmap().branchtip(branch)
2115 except KeyError:
2118 except KeyError:
2116 if not ignoremissing:
2119 if not ignoremissing:
2117 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2120 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2118 else:
2121 else:
2119 pass
2122 pass
2120
2123
2121 def lookup(self, key):
2124 def lookup(self, key):
2122 node = scmutil.revsymbol(self, key).node()
2125 node = scmutil.revsymbol(self, key).node()
2123 if node is None:
2126 if node is None:
2124 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2127 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2125 return node
2128 return node
2126
2129
2127 def lookupbranch(self, key):
2130 def lookupbranch(self, key):
2128 if self.branchmap().hasbranch(key):
2131 if self.branchmap().hasbranch(key):
2129 return key
2132 return key
2130
2133
2131 return scmutil.revsymbol(self, key).branch()
2134 return scmutil.revsymbol(self, key).branch()
2132
2135
2133 def known(self, nodes):
2136 def known(self, nodes):
2134 cl = self.changelog
2137 cl = self.changelog
2135 get_rev = cl.index.get_rev
2138 get_rev = cl.index.get_rev
2136 filtered = cl.filteredrevs
2139 filtered = cl.filteredrevs
2137 result = []
2140 result = []
2138 for n in nodes:
2141 for n in nodes:
2139 r = get_rev(n)
2142 r = get_rev(n)
2140 resp = not (r is None or r in filtered)
2143 resp = not (r is None or r in filtered)
2141 result.append(resp)
2144 result.append(resp)
2142 return result
2145 return result
2143
2146
2144 def local(self):
2147 def local(self):
2145 return self
2148 return self
2146
2149
2147 def publishing(self):
2150 def publishing(self):
2148 # it's safe (and desirable) to trust the publish flag unconditionally
2151 # it's safe (and desirable) to trust the publish flag unconditionally
2149 # so that we don't finalize changes shared between users via ssh or nfs
2152 # so that we don't finalize changes shared between users via ssh or nfs
2150 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2153 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2151
2154
2152 def cancopy(self):
2155 def cancopy(self):
2153 # so statichttprepo's override of local() works
2156 # so statichttprepo's override of local() works
2154 if not self.local():
2157 if not self.local():
2155 return False
2158 return False
2156 if not self.publishing():
2159 if not self.publishing():
2157 return True
2160 return True
2158 # if publishing we can't copy if there is filtered content
2161 # if publishing we can't copy if there is filtered content
2159 return not self.filtered(b'visible').changelog.filteredrevs
2162 return not self.filtered(b'visible').changelog.filteredrevs
2160
2163
2161 def shared(self):
2164 def shared(self):
2162 '''the type of shared repository (None if not shared)'''
2165 '''the type of shared repository (None if not shared)'''
2163 if self.sharedpath != self.path:
2166 if self.sharedpath != self.path:
2164 return b'store'
2167 return b'store'
2165 return None
2168 return None
2166
2169
2167 def wjoin(self, f, *insidef):
2170 def wjoin(self, f, *insidef):
2168 return self.vfs.reljoin(self.root, f, *insidef)
2171 return self.vfs.reljoin(self.root, f, *insidef)
2169
2172
2170 def setparents(self, p1, p2=None):
2173 def setparents(self, p1, p2=None):
2171 if p2 is None:
2174 if p2 is None:
2172 p2 = self.nullid
2175 p2 = self.nullid
2173 self[None].setparents(p1, p2)
2176 self[None].setparents(p1, p2)
2174 self._quick_access_changeid_invalidate()
2177 self._quick_access_changeid_invalidate()
2175
2178
2176 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2179 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2177 """changeid must be a changeset revision, if specified.
2180 """changeid must be a changeset revision, if specified.
2178 fileid can be a file revision or node."""
2181 fileid can be a file revision or node."""
2179 return context.filectx(
2182 return context.filectx(
2180 self, path, changeid, fileid, changectx=changectx
2183 self, path, changeid, fileid, changectx=changectx
2181 )
2184 )
2182
2185
2183 def getcwd(self):
2186 def getcwd(self):
2184 return self.dirstate.getcwd()
2187 return self.dirstate.getcwd()
2185
2188
2186 def pathto(self, f, cwd=None):
2189 def pathto(self, f, cwd=None):
2187 return self.dirstate.pathto(f, cwd)
2190 return self.dirstate.pathto(f, cwd)
2188
2191
2189 def _loadfilter(self, filter):
2192 def _loadfilter(self, filter):
2190 if filter not in self._filterpats:
2193 if filter not in self._filterpats:
2191 l = []
2194 l = []
2192 for pat, cmd in self.ui.configitems(filter):
2195 for pat, cmd in self.ui.configitems(filter):
2193 if cmd == b'!':
2196 if cmd == b'!':
2194 continue
2197 continue
2195 mf = matchmod.match(self.root, b'', [pat])
2198 mf = matchmod.match(self.root, b'', [pat])
2196 fn = None
2199 fn = None
2197 params = cmd
2200 params = cmd
2198 for name, filterfn in pycompat.iteritems(self._datafilters):
2201 for name, filterfn in pycompat.iteritems(self._datafilters):
2199 if cmd.startswith(name):
2202 if cmd.startswith(name):
2200 fn = filterfn
2203 fn = filterfn
2201 params = cmd[len(name) :].lstrip()
2204 params = cmd[len(name) :].lstrip()
2202 break
2205 break
2203 if not fn:
2206 if not fn:
2204 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2207 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2205 fn.__name__ = 'commandfilter'
2208 fn.__name__ = 'commandfilter'
2206 # Wrap old filters not supporting keyword arguments
2209 # Wrap old filters not supporting keyword arguments
2207 if not pycompat.getargspec(fn)[2]:
2210 if not pycompat.getargspec(fn)[2]:
2208 oldfn = fn
2211 oldfn = fn
2209 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2212 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2210 fn.__name__ = 'compat-' + oldfn.__name__
2213 fn.__name__ = 'compat-' + oldfn.__name__
2211 l.append((mf, fn, params))
2214 l.append((mf, fn, params))
2212 self._filterpats[filter] = l
2215 self._filterpats[filter] = l
2213 return self._filterpats[filter]
2216 return self._filterpats[filter]
2214
2217
2215 def _filter(self, filterpats, filename, data):
2218 def _filter(self, filterpats, filename, data):
2216 for mf, fn, cmd in filterpats:
2219 for mf, fn, cmd in filterpats:
2217 if mf(filename):
2220 if mf(filename):
2218 self.ui.debug(
2221 self.ui.debug(
2219 b"filtering %s through %s\n"
2222 b"filtering %s through %s\n"
2220 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2223 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2221 )
2224 )
2222 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2225 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2223 break
2226 break
2224
2227
2225 return data
2228 return data
2226
2229
2227 @unfilteredpropertycache
2230 @unfilteredpropertycache
2228 def _encodefilterpats(self):
2231 def _encodefilterpats(self):
2229 return self._loadfilter(b'encode')
2232 return self._loadfilter(b'encode')
2230
2233
2231 @unfilteredpropertycache
2234 @unfilteredpropertycache
2232 def _decodefilterpats(self):
2235 def _decodefilterpats(self):
2233 return self._loadfilter(b'decode')
2236 return self._loadfilter(b'decode')
2234
2237
2235 def adddatafilter(self, name, filter):
2238 def adddatafilter(self, name, filter):
2236 self._datafilters[name] = filter
2239 self._datafilters[name] = filter
2237
2240
2238 def wread(self, filename):
2241 def wread(self, filename):
2239 if self.wvfs.islink(filename):
2242 if self.wvfs.islink(filename):
2240 data = self.wvfs.readlink(filename)
2243 data = self.wvfs.readlink(filename)
2241 else:
2244 else:
2242 data = self.wvfs.read(filename)
2245 data = self.wvfs.read(filename)
2243 return self._filter(self._encodefilterpats, filename, data)
2246 return self._filter(self._encodefilterpats, filename, data)
2244
2247
2245 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2248 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2246 """write ``data`` into ``filename`` in the working directory
2249 """write ``data`` into ``filename`` in the working directory
2247
2250
2248 This returns length of written (maybe decoded) data.
2251 This returns length of written (maybe decoded) data.
2249 """
2252 """
2250 data = self._filter(self._decodefilterpats, filename, data)
2253 data = self._filter(self._decodefilterpats, filename, data)
2251 if b'l' in flags:
2254 if b'l' in flags:
2252 self.wvfs.symlink(data, filename)
2255 self.wvfs.symlink(data, filename)
2253 else:
2256 else:
2254 self.wvfs.write(
2257 self.wvfs.write(
2255 filename, data, backgroundclose=backgroundclose, **kwargs
2258 filename, data, backgroundclose=backgroundclose, **kwargs
2256 )
2259 )
2257 if b'x' in flags:
2260 if b'x' in flags:
2258 self.wvfs.setflags(filename, False, True)
2261 self.wvfs.setflags(filename, False, True)
2259 else:
2262 else:
2260 self.wvfs.setflags(filename, False, False)
2263 self.wvfs.setflags(filename, False, False)
2261 return len(data)
2264 return len(data)
2262
2265
2263 def wwritedata(self, filename, data):
2266 def wwritedata(self, filename, data):
2264 return self._filter(self._decodefilterpats, filename, data)
2267 return self._filter(self._decodefilterpats, filename, data)
2265
2268
2266 def currenttransaction(self):
2269 def currenttransaction(self):
2267 """return the current transaction or None if non exists"""
2270 """return the current transaction or None if non exists"""
2268 if self._transref:
2271 if self._transref:
2269 tr = self._transref()
2272 tr = self._transref()
2270 else:
2273 else:
2271 tr = None
2274 tr = None
2272
2275
2273 if tr and tr.running():
2276 if tr and tr.running():
2274 return tr
2277 return tr
2275 return None
2278 return None
2276
2279
2277 def transaction(self, desc, report=None):
2280 def transaction(self, desc, report=None):
2278 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2281 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2279 b'devel', b'check-locks'
2282 b'devel', b'check-locks'
2280 ):
2283 ):
2281 if self._currentlock(self._lockref) is None:
2284 if self._currentlock(self._lockref) is None:
2282 raise error.ProgrammingError(b'transaction requires locking')
2285 raise error.ProgrammingError(b'transaction requires locking')
2283 tr = self.currenttransaction()
2286 tr = self.currenttransaction()
2284 if tr is not None:
2287 if tr is not None:
2285 return tr.nest(name=desc)
2288 return tr.nest(name=desc)
2286
2289
2287 # abort here if the journal already exists
2290 # abort here if the journal already exists
2288 if self.svfs.exists(b"journal"):
2291 if self.svfs.exists(b"journal"):
2289 raise error.RepoError(
2292 raise error.RepoError(
2290 _(b"abandoned transaction found"),
2293 _(b"abandoned transaction found"),
2291 hint=_(b"run 'hg recover' to clean up transaction"),
2294 hint=_(b"run 'hg recover' to clean up transaction"),
2292 )
2295 )
2293
2296
2294 idbase = b"%.40f#%f" % (random.random(), time.time())
2297 idbase = b"%.40f#%f" % (random.random(), time.time())
2295 ha = hex(hashutil.sha1(idbase).digest())
2298 ha = hex(hashutil.sha1(idbase).digest())
2296 txnid = b'TXN:' + ha
2299 txnid = b'TXN:' + ha
2297 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2300 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2298
2301
2299 self._writejournal(desc)
2302 self._writejournal(desc)
2300 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2303 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2301 if report:
2304 if report:
2302 rp = report
2305 rp = report
2303 else:
2306 else:
2304 rp = self.ui.warn
2307 rp = self.ui.warn
2305 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2308 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2306 # we must avoid cyclic reference between repo and transaction.
2309 # we must avoid cyclic reference between repo and transaction.
2307 reporef = weakref.ref(self)
2310 reporef = weakref.ref(self)
2308 # Code to track tag movement
2311 # Code to track tag movement
2309 #
2312 #
2310 # Since tags are all handled as file content, it is actually quite hard
2313 # Since tags are all handled as file content, it is actually quite hard
2311 # to track these movement from a code perspective. So we fallback to a
2314 # to track these movement from a code perspective. So we fallback to a
2312 # tracking at the repository level. One could envision to track changes
2315 # tracking at the repository level. One could envision to track changes
2313 # to the '.hgtags' file through changegroup apply but that fails to
2316 # to the '.hgtags' file through changegroup apply but that fails to
2314 # cope with case where transaction expose new heads without changegroup
2317 # cope with case where transaction expose new heads without changegroup
2315 # being involved (eg: phase movement).
2318 # being involved (eg: phase movement).
2316 #
2319 #
2317 # For now, We gate the feature behind a flag since this likely comes
2320 # For now, We gate the feature behind a flag since this likely comes
2318 # with performance impacts. The current code run more often than needed
2321 # with performance impacts. The current code run more often than needed
2319 # and do not use caches as much as it could. The current focus is on
2322 # and do not use caches as much as it could. The current focus is on
2320 # the behavior of the feature so we disable it by default. The flag
2323 # the behavior of the feature so we disable it by default. The flag
2321 # will be removed when we are happy with the performance impact.
2324 # will be removed when we are happy with the performance impact.
2322 #
2325 #
2323 # Once this feature is no longer experimental move the following
2326 # Once this feature is no longer experimental move the following
2324 # documentation to the appropriate help section:
2327 # documentation to the appropriate help section:
2325 #
2328 #
2326 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2329 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2327 # tags (new or changed or deleted tags). In addition the details of
2330 # tags (new or changed or deleted tags). In addition the details of
2328 # these changes are made available in a file at:
2331 # these changes are made available in a file at:
2329 # ``REPOROOT/.hg/changes/tags.changes``.
2332 # ``REPOROOT/.hg/changes/tags.changes``.
2330 # Make sure you check for HG_TAG_MOVED before reading that file as it
2333 # Make sure you check for HG_TAG_MOVED before reading that file as it
2331 # might exist from a previous transaction even if no tag were touched
2334 # might exist from a previous transaction even if no tag were touched
2332 # in this one. Changes are recorded in a line base format::
2335 # in this one. Changes are recorded in a line base format::
2333 #
2336 #
2334 # <action> <hex-node> <tag-name>\n
2337 # <action> <hex-node> <tag-name>\n
2335 #
2338 #
2336 # Actions are defined as follow:
2339 # Actions are defined as follow:
2337 # "-R": tag is removed,
2340 # "-R": tag is removed,
2338 # "+A": tag is added,
2341 # "+A": tag is added,
2339 # "-M": tag is moved (old value),
2342 # "-M": tag is moved (old value),
2340 # "+M": tag is moved (new value),
2343 # "+M": tag is moved (new value),
2341 tracktags = lambda x: None
2344 tracktags = lambda x: None
2342 # experimental config: experimental.hook-track-tags
2345 # experimental config: experimental.hook-track-tags
2343 shouldtracktags = self.ui.configbool(
2346 shouldtracktags = self.ui.configbool(
2344 b'experimental', b'hook-track-tags'
2347 b'experimental', b'hook-track-tags'
2345 )
2348 )
2346 if desc != b'strip' and shouldtracktags:
2349 if desc != b'strip' and shouldtracktags:
2347 oldheads = self.changelog.headrevs()
2350 oldheads = self.changelog.headrevs()
2348
2351
2349 def tracktags(tr2):
2352 def tracktags(tr2):
2350 repo = reporef()
2353 repo = reporef()
2351 assert repo is not None # help pytype
2354 assert repo is not None # help pytype
2352 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2355 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2353 newheads = repo.changelog.headrevs()
2356 newheads = repo.changelog.headrevs()
2354 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2357 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2355 # notes: we compare lists here.
2358 # notes: we compare lists here.
2356 # As we do it only once buiding set would not be cheaper
2359 # As we do it only once buiding set would not be cheaper
2357 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2360 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2358 if changes:
2361 if changes:
2359 tr2.hookargs[b'tag_moved'] = b'1'
2362 tr2.hookargs[b'tag_moved'] = b'1'
2360 with repo.vfs(
2363 with repo.vfs(
2361 b'changes/tags.changes', b'w', atomictemp=True
2364 b'changes/tags.changes', b'w', atomictemp=True
2362 ) as changesfile:
2365 ) as changesfile:
2363 # note: we do not register the file to the transaction
2366 # note: we do not register the file to the transaction
2364 # because we needs it to still exist on the transaction
2367 # because we needs it to still exist on the transaction
2365 # is close (for txnclose hooks)
2368 # is close (for txnclose hooks)
2366 tagsmod.writediff(changesfile, changes)
2369 tagsmod.writediff(changesfile, changes)
2367
2370
2368 def validate(tr2):
2371 def validate(tr2):
2369 """will run pre-closing hooks"""
2372 """will run pre-closing hooks"""
2370 # XXX the transaction API is a bit lacking here so we take a hacky
2373 # XXX the transaction API is a bit lacking here so we take a hacky
2371 # path for now
2374 # path for now
2372 #
2375 #
2373 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2376 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2374 # dict is copied before these run. In addition we needs the data
2377 # dict is copied before these run. In addition we needs the data
2375 # available to in memory hooks too.
2378 # available to in memory hooks too.
2376 #
2379 #
2377 # Moreover, we also need to make sure this runs before txnclose
2380 # Moreover, we also need to make sure this runs before txnclose
2378 # hooks and there is no "pending" mechanism that would execute
2381 # hooks and there is no "pending" mechanism that would execute
2379 # logic only if hooks are about to run.
2382 # logic only if hooks are about to run.
2380 #
2383 #
2381 # Fixing this limitation of the transaction is also needed to track
2384 # Fixing this limitation of the transaction is also needed to track
2382 # other families of changes (bookmarks, phases, obsolescence).
2385 # other families of changes (bookmarks, phases, obsolescence).
2383 #
2386 #
2384 # This will have to be fixed before we remove the experimental
2387 # This will have to be fixed before we remove the experimental
2385 # gating.
2388 # gating.
2386 tracktags(tr2)
2389 tracktags(tr2)
2387 repo = reporef()
2390 repo = reporef()
2388 assert repo is not None # help pytype
2391 assert repo is not None # help pytype
2389
2392
2390 singleheadopt = (b'experimental', b'single-head-per-branch')
2393 singleheadopt = (b'experimental', b'single-head-per-branch')
2391 singlehead = repo.ui.configbool(*singleheadopt)
2394 singlehead = repo.ui.configbool(*singleheadopt)
2392 if singlehead:
2395 if singlehead:
2393 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2396 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2394 accountclosed = singleheadsub.get(
2397 accountclosed = singleheadsub.get(
2395 b"account-closed-heads", False
2398 b"account-closed-heads", False
2396 )
2399 )
2397 if singleheadsub.get(b"public-changes-only", False):
2400 if singleheadsub.get(b"public-changes-only", False):
2398 filtername = b"immutable"
2401 filtername = b"immutable"
2399 else:
2402 else:
2400 filtername = b"visible"
2403 filtername = b"visible"
2401 scmutil.enforcesinglehead(
2404 scmutil.enforcesinglehead(
2402 repo, tr2, desc, accountclosed, filtername
2405 repo, tr2, desc, accountclosed, filtername
2403 )
2406 )
2404 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2407 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2405 for name, (old, new) in sorted(
2408 for name, (old, new) in sorted(
2406 tr.changes[b'bookmarks'].items()
2409 tr.changes[b'bookmarks'].items()
2407 ):
2410 ):
2408 args = tr.hookargs.copy()
2411 args = tr.hookargs.copy()
2409 args.update(bookmarks.preparehookargs(name, old, new))
2412 args.update(bookmarks.preparehookargs(name, old, new))
2410 repo.hook(
2413 repo.hook(
2411 b'pretxnclose-bookmark',
2414 b'pretxnclose-bookmark',
2412 throw=True,
2415 throw=True,
2413 **pycompat.strkwargs(args)
2416 **pycompat.strkwargs(args)
2414 )
2417 )
2415 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2418 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2416 cl = repo.unfiltered().changelog
2419 cl = repo.unfiltered().changelog
2417 for revs, (old, new) in tr.changes[b'phases']:
2420 for revs, (old, new) in tr.changes[b'phases']:
2418 for rev in revs:
2421 for rev in revs:
2419 args = tr.hookargs.copy()
2422 args = tr.hookargs.copy()
2420 node = hex(cl.node(rev))
2423 node = hex(cl.node(rev))
2421 args.update(phases.preparehookargs(node, old, new))
2424 args.update(phases.preparehookargs(node, old, new))
2422 repo.hook(
2425 repo.hook(
2423 b'pretxnclose-phase',
2426 b'pretxnclose-phase',
2424 throw=True,
2427 throw=True,
2425 **pycompat.strkwargs(args)
2428 **pycompat.strkwargs(args)
2426 )
2429 )
2427
2430
2428 repo.hook(
2431 repo.hook(
2429 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2432 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2430 )
2433 )
2431
2434
2432 def releasefn(tr, success):
2435 def releasefn(tr, success):
2433 repo = reporef()
2436 repo = reporef()
2434 if repo is None:
2437 if repo is None:
2435 # If the repo has been GC'd (and this release function is being
2438 # If the repo has been GC'd (and this release function is being
2436 # called from transaction.__del__), there's not much we can do,
2439 # called from transaction.__del__), there's not much we can do,
2437 # so just leave the unfinished transaction there and let the
2440 # so just leave the unfinished transaction there and let the
2438 # user run `hg recover`.
2441 # user run `hg recover`.
2439 return
2442 return
2440 if success:
2443 if success:
2441 # this should be explicitly invoked here, because
2444 # this should be explicitly invoked here, because
2442 # in-memory changes aren't written out at closing
2445 # in-memory changes aren't written out at closing
2443 # transaction, if tr.addfilegenerator (via
2446 # transaction, if tr.addfilegenerator (via
2444 # dirstate.write or so) isn't invoked while
2447 # dirstate.write or so) isn't invoked while
2445 # transaction running
2448 # transaction running
2446 repo.dirstate.write(None)
2449 repo.dirstate.write(None)
2447 else:
2450 else:
2448 # discard all changes (including ones already written
2451 # discard all changes (including ones already written
2449 # out) in this transaction
2452 # out) in this transaction
2450 narrowspec.restorebackup(self, b'journal.narrowspec')
2453 narrowspec.restorebackup(self, b'journal.narrowspec')
2451 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2454 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2452 repo.dirstate.restorebackup(None, b'journal.dirstate')
2455 repo.dirstate.restorebackup(None, b'journal.dirstate')
2453
2456
2454 repo.invalidate(clearfilecache=True)
2457 repo.invalidate(clearfilecache=True)
2455
2458
2456 tr = transaction.transaction(
2459 tr = transaction.transaction(
2457 rp,
2460 rp,
2458 self.svfs,
2461 self.svfs,
2459 vfsmap,
2462 vfsmap,
2460 b"journal",
2463 b"journal",
2461 b"undo",
2464 b"undo",
2462 aftertrans(renames),
2465 aftertrans(renames),
2463 self.store.createmode,
2466 self.store.createmode,
2464 validator=validate,
2467 validator=validate,
2465 releasefn=releasefn,
2468 releasefn=releasefn,
2466 checkambigfiles=_cachedfiles,
2469 checkambigfiles=_cachedfiles,
2467 name=desc,
2470 name=desc,
2468 )
2471 )
2469 tr.changes[b'origrepolen'] = len(self)
2472 tr.changes[b'origrepolen'] = len(self)
2470 tr.changes[b'obsmarkers'] = set()
2473 tr.changes[b'obsmarkers'] = set()
2471 tr.changes[b'phases'] = []
2474 tr.changes[b'phases'] = []
2472 tr.changes[b'bookmarks'] = {}
2475 tr.changes[b'bookmarks'] = {}
2473
2476
2474 tr.hookargs[b'txnid'] = txnid
2477 tr.hookargs[b'txnid'] = txnid
2475 tr.hookargs[b'txnname'] = desc
2478 tr.hookargs[b'txnname'] = desc
2476 tr.hookargs[b'changes'] = tr.changes
2479 tr.hookargs[b'changes'] = tr.changes
2477 # note: writing the fncache only during finalize mean that the file is
2480 # note: writing the fncache only during finalize mean that the file is
2478 # outdated when running hooks. As fncache is used for streaming clone,
2481 # outdated when running hooks. As fncache is used for streaming clone,
2479 # this is not expected to break anything that happen during the hooks.
2482 # this is not expected to break anything that happen during the hooks.
2480 tr.addfinalize(b'flush-fncache', self.store.write)
2483 tr.addfinalize(b'flush-fncache', self.store.write)
2481
2484
2482 def txnclosehook(tr2):
2485 def txnclosehook(tr2):
2483 """To be run if transaction is successful, will schedule a hook run"""
2486 """To be run if transaction is successful, will schedule a hook run"""
2484 # Don't reference tr2 in hook() so we don't hold a reference.
2487 # Don't reference tr2 in hook() so we don't hold a reference.
2485 # This reduces memory consumption when there are multiple
2488 # This reduces memory consumption when there are multiple
2486 # transactions per lock. This can likely go away if issue5045
2489 # transactions per lock. This can likely go away if issue5045
2487 # fixes the function accumulation.
2490 # fixes the function accumulation.
2488 hookargs = tr2.hookargs
2491 hookargs = tr2.hookargs
2489
2492
2490 def hookfunc(unused_success):
2493 def hookfunc(unused_success):
2491 repo = reporef()
2494 repo = reporef()
2492 assert repo is not None # help pytype
2495 assert repo is not None # help pytype
2493
2496
2494 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2497 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2495 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2498 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2496 for name, (old, new) in bmchanges:
2499 for name, (old, new) in bmchanges:
2497 args = tr.hookargs.copy()
2500 args = tr.hookargs.copy()
2498 args.update(bookmarks.preparehookargs(name, old, new))
2501 args.update(bookmarks.preparehookargs(name, old, new))
2499 repo.hook(
2502 repo.hook(
2500 b'txnclose-bookmark',
2503 b'txnclose-bookmark',
2501 throw=False,
2504 throw=False,
2502 **pycompat.strkwargs(args)
2505 **pycompat.strkwargs(args)
2503 )
2506 )
2504
2507
2505 if hook.hashook(repo.ui, b'txnclose-phase'):
2508 if hook.hashook(repo.ui, b'txnclose-phase'):
2506 cl = repo.unfiltered().changelog
2509 cl = repo.unfiltered().changelog
2507 phasemv = sorted(
2510 phasemv = sorted(
2508 tr.changes[b'phases'], key=lambda r: r[0][0]
2511 tr.changes[b'phases'], key=lambda r: r[0][0]
2509 )
2512 )
2510 for revs, (old, new) in phasemv:
2513 for revs, (old, new) in phasemv:
2511 for rev in revs:
2514 for rev in revs:
2512 args = tr.hookargs.copy()
2515 args = tr.hookargs.copy()
2513 node = hex(cl.node(rev))
2516 node = hex(cl.node(rev))
2514 args.update(phases.preparehookargs(node, old, new))
2517 args.update(phases.preparehookargs(node, old, new))
2515 repo.hook(
2518 repo.hook(
2516 b'txnclose-phase',
2519 b'txnclose-phase',
2517 throw=False,
2520 throw=False,
2518 **pycompat.strkwargs(args)
2521 **pycompat.strkwargs(args)
2519 )
2522 )
2520
2523
2521 repo.hook(
2524 repo.hook(
2522 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2525 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2523 )
2526 )
2524
2527
2525 repo = reporef()
2528 repo = reporef()
2526 assert repo is not None # help pytype
2529 assert repo is not None # help pytype
2527 repo._afterlock(hookfunc)
2530 repo._afterlock(hookfunc)
2528
2531
2529 tr.addfinalize(b'txnclose-hook', txnclosehook)
2532 tr.addfinalize(b'txnclose-hook', txnclosehook)
2530 # Include a leading "-" to make it happen before the transaction summary
2533 # Include a leading "-" to make it happen before the transaction summary
2531 # reports registered via scmutil.registersummarycallback() whose names
2534 # reports registered via scmutil.registersummarycallback() whose names
2532 # are 00-txnreport etc. That way, the caches will be warm when the
2535 # are 00-txnreport etc. That way, the caches will be warm when the
2533 # callbacks run.
2536 # callbacks run.
2534 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2537 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2535
2538
2536 def txnaborthook(tr2):
2539 def txnaborthook(tr2):
2537 """To be run if transaction is aborted"""
2540 """To be run if transaction is aborted"""
2538 repo = reporef()
2541 repo = reporef()
2539 assert repo is not None # help pytype
2542 assert repo is not None # help pytype
2540 repo.hook(
2543 repo.hook(
2541 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2544 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2542 )
2545 )
2543
2546
2544 tr.addabort(b'txnabort-hook', txnaborthook)
2547 tr.addabort(b'txnabort-hook', txnaborthook)
2545 # avoid eager cache invalidation. in-memory data should be identical
2548 # avoid eager cache invalidation. in-memory data should be identical
2546 # to stored data if transaction has no error.
2549 # to stored data if transaction has no error.
2547 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2550 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2548 self._transref = weakref.ref(tr)
2551 self._transref = weakref.ref(tr)
2549 scmutil.registersummarycallback(self, tr, desc)
2552 scmutil.registersummarycallback(self, tr, desc)
2550 return tr
2553 return tr
2551
2554
2552 def _journalfiles(self):
2555 def _journalfiles(self):
2553 return (
2556 return (
2554 (self.svfs, b'journal'),
2557 (self.svfs, b'journal'),
2555 (self.svfs, b'journal.narrowspec'),
2558 (self.svfs, b'journal.narrowspec'),
2556 (self.vfs, b'journal.narrowspec.dirstate'),
2559 (self.vfs, b'journal.narrowspec.dirstate'),
2557 (self.vfs, b'journal.dirstate'),
2560 (self.vfs, b'journal.dirstate'),
2558 (self.vfs, b'journal.branch'),
2561 (self.vfs, b'journal.branch'),
2559 (self.vfs, b'journal.desc'),
2562 (self.vfs, b'journal.desc'),
2560 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2563 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2561 (self.svfs, b'journal.phaseroots'),
2564 (self.svfs, b'journal.phaseroots'),
2562 )
2565 )
2563
2566
2564 def undofiles(self):
2567 def undofiles(self):
2565 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2568 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2566
2569
2567 @unfilteredmethod
2570 @unfilteredmethod
2568 def _writejournal(self, desc):
2571 def _writejournal(self, desc):
2569 self.dirstate.savebackup(None, b'journal.dirstate')
2572 self.dirstate.savebackup(None, b'journal.dirstate')
2570 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2573 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2571 narrowspec.savebackup(self, b'journal.narrowspec')
2574 narrowspec.savebackup(self, b'journal.narrowspec')
2572 self.vfs.write(
2575 self.vfs.write(
2573 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2576 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2574 )
2577 )
2575 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2578 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2576 bookmarksvfs = bookmarks.bookmarksvfs(self)
2579 bookmarksvfs = bookmarks.bookmarksvfs(self)
2577 bookmarksvfs.write(
2580 bookmarksvfs.write(
2578 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2581 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2579 )
2582 )
2580 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2583 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2581
2584
2582 def recover(self):
2585 def recover(self):
2583 with self.lock():
2586 with self.lock():
2584 if self.svfs.exists(b"journal"):
2587 if self.svfs.exists(b"journal"):
2585 self.ui.status(_(b"rolling back interrupted transaction\n"))
2588 self.ui.status(_(b"rolling back interrupted transaction\n"))
2586 vfsmap = {
2589 vfsmap = {
2587 b'': self.svfs,
2590 b'': self.svfs,
2588 b'plain': self.vfs,
2591 b'plain': self.vfs,
2589 }
2592 }
2590 transaction.rollback(
2593 transaction.rollback(
2591 self.svfs,
2594 self.svfs,
2592 vfsmap,
2595 vfsmap,
2593 b"journal",
2596 b"journal",
2594 self.ui.warn,
2597 self.ui.warn,
2595 checkambigfiles=_cachedfiles,
2598 checkambigfiles=_cachedfiles,
2596 )
2599 )
2597 self.invalidate()
2600 self.invalidate()
2598 return True
2601 return True
2599 else:
2602 else:
2600 self.ui.warn(_(b"no interrupted transaction available\n"))
2603 self.ui.warn(_(b"no interrupted transaction available\n"))
2601 return False
2604 return False
2602
2605
2603 def rollback(self, dryrun=False, force=False):
2606 def rollback(self, dryrun=False, force=False):
2604 wlock = lock = dsguard = None
2607 wlock = lock = dsguard = None
2605 try:
2608 try:
2606 wlock = self.wlock()
2609 wlock = self.wlock()
2607 lock = self.lock()
2610 lock = self.lock()
2608 if self.svfs.exists(b"undo"):
2611 if self.svfs.exists(b"undo"):
2609 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2612 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2610
2613
2611 return self._rollback(dryrun, force, dsguard)
2614 return self._rollback(dryrun, force, dsguard)
2612 else:
2615 else:
2613 self.ui.warn(_(b"no rollback information available\n"))
2616 self.ui.warn(_(b"no rollback information available\n"))
2614 return 1
2617 return 1
2615 finally:
2618 finally:
2616 release(dsguard, lock, wlock)
2619 release(dsguard, lock, wlock)
2617
2620
2618 @unfilteredmethod # Until we get smarter cache management
2621 @unfilteredmethod # Until we get smarter cache management
2619 def _rollback(self, dryrun, force, dsguard):
2622 def _rollback(self, dryrun, force, dsguard):
2620 ui = self.ui
2623 ui = self.ui
2621 try:
2624 try:
2622 args = self.vfs.read(b'undo.desc').splitlines()
2625 args = self.vfs.read(b'undo.desc').splitlines()
2623 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2626 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2624 if len(args) >= 3:
2627 if len(args) >= 3:
2625 detail = args[2]
2628 detail = args[2]
2626 oldtip = oldlen - 1
2629 oldtip = oldlen - 1
2627
2630
2628 if detail and ui.verbose:
2631 if detail and ui.verbose:
2629 msg = _(
2632 msg = _(
2630 b'repository tip rolled back to revision %d'
2633 b'repository tip rolled back to revision %d'
2631 b' (undo %s: %s)\n'
2634 b' (undo %s: %s)\n'
2632 ) % (oldtip, desc, detail)
2635 ) % (oldtip, desc, detail)
2633 else:
2636 else:
2634 msg = _(
2637 msg = _(
2635 b'repository tip rolled back to revision %d (undo %s)\n'
2638 b'repository tip rolled back to revision %d (undo %s)\n'
2636 ) % (oldtip, desc)
2639 ) % (oldtip, desc)
2637 except IOError:
2640 except IOError:
2638 msg = _(b'rolling back unknown transaction\n')
2641 msg = _(b'rolling back unknown transaction\n')
2639 desc = None
2642 desc = None
2640
2643
2641 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2644 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2642 raise error.Abort(
2645 raise error.Abort(
2643 _(
2646 _(
2644 b'rollback of last commit while not checked out '
2647 b'rollback of last commit while not checked out '
2645 b'may lose data'
2648 b'may lose data'
2646 ),
2649 ),
2647 hint=_(b'use -f to force'),
2650 hint=_(b'use -f to force'),
2648 )
2651 )
2649
2652
2650 ui.status(msg)
2653 ui.status(msg)
2651 if dryrun:
2654 if dryrun:
2652 return 0
2655 return 0
2653
2656
2654 parents = self.dirstate.parents()
2657 parents = self.dirstate.parents()
2655 self.destroying()
2658 self.destroying()
2656 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2659 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2657 transaction.rollback(
2660 transaction.rollback(
2658 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2661 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2659 )
2662 )
2660 bookmarksvfs = bookmarks.bookmarksvfs(self)
2663 bookmarksvfs = bookmarks.bookmarksvfs(self)
2661 if bookmarksvfs.exists(b'undo.bookmarks'):
2664 if bookmarksvfs.exists(b'undo.bookmarks'):
2662 bookmarksvfs.rename(
2665 bookmarksvfs.rename(
2663 b'undo.bookmarks', b'bookmarks', checkambig=True
2666 b'undo.bookmarks', b'bookmarks', checkambig=True
2664 )
2667 )
2665 if self.svfs.exists(b'undo.phaseroots'):
2668 if self.svfs.exists(b'undo.phaseroots'):
2666 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2669 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2667 self.invalidate()
2670 self.invalidate()
2668
2671
2669 has_node = self.changelog.index.has_node
2672 has_node = self.changelog.index.has_node
2670 parentgone = any(not has_node(p) for p in parents)
2673 parentgone = any(not has_node(p) for p in parents)
2671 if parentgone:
2674 if parentgone:
2672 # prevent dirstateguard from overwriting already restored one
2675 # prevent dirstateguard from overwriting already restored one
2673 dsguard.close()
2676 dsguard.close()
2674
2677
2675 narrowspec.restorebackup(self, b'undo.narrowspec')
2678 narrowspec.restorebackup(self, b'undo.narrowspec')
2676 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2679 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2677 self.dirstate.restorebackup(None, b'undo.dirstate')
2680 self.dirstate.restorebackup(None, b'undo.dirstate')
2678 try:
2681 try:
2679 branch = self.vfs.read(b'undo.branch')
2682 branch = self.vfs.read(b'undo.branch')
2680 self.dirstate.setbranch(encoding.tolocal(branch))
2683 self.dirstate.setbranch(encoding.tolocal(branch))
2681 except IOError:
2684 except IOError:
2682 ui.warn(
2685 ui.warn(
2683 _(
2686 _(
2684 b'named branch could not be reset: '
2687 b'named branch could not be reset: '
2685 b'current branch is still \'%s\'\n'
2688 b'current branch is still \'%s\'\n'
2686 )
2689 )
2687 % self.dirstate.branch()
2690 % self.dirstate.branch()
2688 )
2691 )
2689
2692
2690 parents = tuple([p.rev() for p in self[None].parents()])
2693 parents = tuple([p.rev() for p in self[None].parents()])
2691 if len(parents) > 1:
2694 if len(parents) > 1:
2692 ui.status(
2695 ui.status(
2693 _(
2696 _(
2694 b'working directory now based on '
2697 b'working directory now based on '
2695 b'revisions %d and %d\n'
2698 b'revisions %d and %d\n'
2696 )
2699 )
2697 % parents
2700 % parents
2698 )
2701 )
2699 else:
2702 else:
2700 ui.status(
2703 ui.status(
2701 _(b'working directory now based on revision %d\n') % parents
2704 _(b'working directory now based on revision %d\n') % parents
2702 )
2705 )
2703 mergestatemod.mergestate.clean(self)
2706 mergestatemod.mergestate.clean(self)
2704
2707
2705 # TODO: if we know which new heads may result from this rollback, pass
2708 # TODO: if we know which new heads may result from this rollback, pass
2706 # them to destroy(), which will prevent the branchhead cache from being
2709 # them to destroy(), which will prevent the branchhead cache from being
2707 # invalidated.
2710 # invalidated.
2708 self.destroyed()
2711 self.destroyed()
2709 return 0
2712 return 0
2710
2713
2711 def _buildcacheupdater(self, newtransaction):
2714 def _buildcacheupdater(self, newtransaction):
2712 """called during transaction to build the callback updating cache
2715 """called during transaction to build the callback updating cache
2713
2716
2714 Lives on the repository to help extension who might want to augment
2717 Lives on the repository to help extension who might want to augment
2715 this logic. For this purpose, the created transaction is passed to the
2718 this logic. For this purpose, the created transaction is passed to the
2716 method.
2719 method.
2717 """
2720 """
2718 # we must avoid cyclic reference between repo and transaction.
2721 # we must avoid cyclic reference between repo and transaction.
2719 reporef = weakref.ref(self)
2722 reporef = weakref.ref(self)
2720
2723
2721 def updater(tr):
2724 def updater(tr):
2722 repo = reporef()
2725 repo = reporef()
2723 assert repo is not None # help pytype
2726 assert repo is not None # help pytype
2724 repo.updatecaches(tr)
2727 repo.updatecaches(tr)
2725
2728
2726 return updater
2729 return updater
2727
2730
2728 @unfilteredmethod
2731 @unfilteredmethod
2729 def updatecaches(self, tr=None, full=False):
2732 def updatecaches(self, tr=None, full=False):
2730 """warm appropriate caches
2733 """warm appropriate caches
2731
2734
2732 If this function is called after a transaction closed. The transaction
2735 If this function is called after a transaction closed. The transaction
2733 will be available in the 'tr' argument. This can be used to selectively
2736 will be available in the 'tr' argument. This can be used to selectively
2734 update caches relevant to the changes in that transaction.
2737 update caches relevant to the changes in that transaction.
2735
2738
2736 If 'full' is set, make sure all caches the function knows about have
2739 If 'full' is set, make sure all caches the function knows about have
2737 up-to-date data. Even the ones usually loaded more lazily.
2740 up-to-date data. Even the ones usually loaded more lazily.
2738
2741
2739 The `full` argument can take a special "post-clone" value. In this case
2742 The `full` argument can take a special "post-clone" value. In this case
2740 the cache warming is made after a clone and of the slower cache might
2743 the cache warming is made after a clone and of the slower cache might
2741 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2744 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2742 as we plan for a cleaner way to deal with this for 5.9.
2745 as we plan for a cleaner way to deal with this for 5.9.
2743 """
2746 """
2744 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2747 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2745 # During strip, many caches are invalid but
2748 # During strip, many caches are invalid but
2746 # later call to `destroyed` will refresh them.
2749 # later call to `destroyed` will refresh them.
2747 return
2750 return
2748
2751
2749 if tr is None or tr.changes[b'origrepolen'] < len(self):
2752 if tr is None or tr.changes[b'origrepolen'] < len(self):
2750 # accessing the 'served' branchmap should refresh all the others,
2753 # accessing the 'served' branchmap should refresh all the others,
2751 self.ui.debug(b'updating the branch cache\n')
2754 self.ui.debug(b'updating the branch cache\n')
2752 self.filtered(b'served').branchmap()
2755 self.filtered(b'served').branchmap()
2753 self.filtered(b'served.hidden').branchmap()
2756 self.filtered(b'served.hidden').branchmap()
2754
2757
2755 if full:
2758 if full:
2756 unfi = self.unfiltered()
2759 unfi = self.unfiltered()
2757
2760
2758 self.changelog.update_caches(transaction=tr)
2761 self.changelog.update_caches(transaction=tr)
2759 self.manifestlog.update_caches(transaction=tr)
2762 self.manifestlog.update_caches(transaction=tr)
2760
2763
2761 rbc = unfi.revbranchcache()
2764 rbc = unfi.revbranchcache()
2762 for r in unfi.changelog:
2765 for r in unfi.changelog:
2763 rbc.branchinfo(r)
2766 rbc.branchinfo(r)
2764 rbc.write()
2767 rbc.write()
2765
2768
2766 # ensure the working copy parents are in the manifestfulltextcache
2769 # ensure the working copy parents are in the manifestfulltextcache
2767 for ctx in self[b'.'].parents():
2770 for ctx in self[b'.'].parents():
2768 ctx.manifest() # accessing the manifest is enough
2771 ctx.manifest() # accessing the manifest is enough
2769
2772
2770 if not full == b"post-clone":
2773 if not full == b"post-clone":
2771 # accessing fnode cache warms the cache
2774 # accessing fnode cache warms the cache
2772 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2775 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2773 # accessing tags warm the cache
2776 # accessing tags warm the cache
2774 self.tags()
2777 self.tags()
2775 self.filtered(b'served').tags()
2778 self.filtered(b'served').tags()
2776
2779
2777 # The `full` arg is documented as updating even the lazily-loaded
2780 # The `full` arg is documented as updating even the lazily-loaded
2778 # caches immediately, so we're forcing a write to cause these caches
2781 # caches immediately, so we're forcing a write to cause these caches
2779 # to be warmed up even if they haven't explicitly been requested
2782 # to be warmed up even if they haven't explicitly been requested
2780 # yet (if they've never been used by hg, they won't ever have been
2783 # yet (if they've never been used by hg, they won't ever have been
2781 # written, even if they're a subset of another kind of cache that
2784 # written, even if they're a subset of another kind of cache that
2782 # *has* been used).
2785 # *has* been used).
2783 for filt in repoview.filtertable.keys():
2786 for filt in repoview.filtertable.keys():
2784 filtered = self.filtered(filt)
2787 filtered = self.filtered(filt)
2785 filtered.branchmap().write(filtered)
2788 filtered.branchmap().write(filtered)
2786
2789
2787 def invalidatecaches(self):
2790 def invalidatecaches(self):
2788
2791
2789 if '_tagscache' in vars(self):
2792 if '_tagscache' in vars(self):
2790 # can't use delattr on proxy
2793 # can't use delattr on proxy
2791 del self.__dict__['_tagscache']
2794 del self.__dict__['_tagscache']
2792
2795
2793 self._branchcaches.clear()
2796 self._branchcaches.clear()
2794 self.invalidatevolatilesets()
2797 self.invalidatevolatilesets()
2795 self._sparsesignaturecache.clear()
2798 self._sparsesignaturecache.clear()
2796
2799
2797 def invalidatevolatilesets(self):
2800 def invalidatevolatilesets(self):
2798 self.filteredrevcache.clear()
2801 self.filteredrevcache.clear()
2799 obsolete.clearobscaches(self)
2802 obsolete.clearobscaches(self)
2800 self._quick_access_changeid_invalidate()
2803 self._quick_access_changeid_invalidate()
2801
2804
2802 def invalidatedirstate(self):
2805 def invalidatedirstate(self):
2803 """Invalidates the dirstate, causing the next call to dirstate
2806 """Invalidates the dirstate, causing the next call to dirstate
2804 to check if it was modified since the last time it was read,
2807 to check if it was modified since the last time it was read,
2805 rereading it if it has.
2808 rereading it if it has.
2806
2809
2807 This is different to dirstate.invalidate() that it doesn't always
2810 This is different to dirstate.invalidate() that it doesn't always
2808 rereads the dirstate. Use dirstate.invalidate() if you want to
2811 rereads the dirstate. Use dirstate.invalidate() if you want to
2809 explicitly read the dirstate again (i.e. restoring it to a previous
2812 explicitly read the dirstate again (i.e. restoring it to a previous
2810 known good state)."""
2813 known good state)."""
2811 if hasunfilteredcache(self, 'dirstate'):
2814 if hasunfilteredcache(self, 'dirstate'):
2812 for k in self.dirstate._filecache:
2815 for k in self.dirstate._filecache:
2813 try:
2816 try:
2814 delattr(self.dirstate, k)
2817 delattr(self.dirstate, k)
2815 except AttributeError:
2818 except AttributeError:
2816 pass
2819 pass
2817 delattr(self.unfiltered(), 'dirstate')
2820 delattr(self.unfiltered(), 'dirstate')
2818
2821
2819 def invalidate(self, clearfilecache=False):
2822 def invalidate(self, clearfilecache=False):
2820 """Invalidates both store and non-store parts other than dirstate
2823 """Invalidates both store and non-store parts other than dirstate
2821
2824
2822 If a transaction is running, invalidation of store is omitted,
2825 If a transaction is running, invalidation of store is omitted,
2823 because discarding in-memory changes might cause inconsistency
2826 because discarding in-memory changes might cause inconsistency
2824 (e.g. incomplete fncache causes unintentional failure, but
2827 (e.g. incomplete fncache causes unintentional failure, but
2825 redundant one doesn't).
2828 redundant one doesn't).
2826 """
2829 """
2827 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2830 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2828 for k in list(self._filecache.keys()):
2831 for k in list(self._filecache.keys()):
2829 # dirstate is invalidated separately in invalidatedirstate()
2832 # dirstate is invalidated separately in invalidatedirstate()
2830 if k == b'dirstate':
2833 if k == b'dirstate':
2831 continue
2834 continue
2832 if (
2835 if (
2833 k == b'changelog'
2836 k == b'changelog'
2834 and self.currenttransaction()
2837 and self.currenttransaction()
2835 and self.changelog._delayed
2838 and self.changelog._delayed
2836 ):
2839 ):
2837 # The changelog object may store unwritten revisions. We don't
2840 # The changelog object may store unwritten revisions. We don't
2838 # want to lose them.
2841 # want to lose them.
2839 # TODO: Solve the problem instead of working around it.
2842 # TODO: Solve the problem instead of working around it.
2840 continue
2843 continue
2841
2844
2842 if clearfilecache:
2845 if clearfilecache:
2843 del self._filecache[k]
2846 del self._filecache[k]
2844 try:
2847 try:
2845 delattr(unfiltered, k)
2848 delattr(unfiltered, k)
2846 except AttributeError:
2849 except AttributeError:
2847 pass
2850 pass
2848 self.invalidatecaches()
2851 self.invalidatecaches()
2849 if not self.currenttransaction():
2852 if not self.currenttransaction():
2850 # TODO: Changing contents of store outside transaction
2853 # TODO: Changing contents of store outside transaction
2851 # causes inconsistency. We should make in-memory store
2854 # causes inconsistency. We should make in-memory store
2852 # changes detectable, and abort if changed.
2855 # changes detectable, and abort if changed.
2853 self.store.invalidatecaches()
2856 self.store.invalidatecaches()
2854
2857
2855 def invalidateall(self):
2858 def invalidateall(self):
2856 """Fully invalidates both store and non-store parts, causing the
2859 """Fully invalidates both store and non-store parts, causing the
2857 subsequent operation to reread any outside changes."""
2860 subsequent operation to reread any outside changes."""
2858 # extension should hook this to invalidate its caches
2861 # extension should hook this to invalidate its caches
2859 self.invalidate()
2862 self.invalidate()
2860 self.invalidatedirstate()
2863 self.invalidatedirstate()
2861
2864
2862 @unfilteredmethod
2865 @unfilteredmethod
2863 def _refreshfilecachestats(self, tr):
2866 def _refreshfilecachestats(self, tr):
2864 """Reload stats of cached files so that they are flagged as valid"""
2867 """Reload stats of cached files so that they are flagged as valid"""
2865 for k, ce in self._filecache.items():
2868 for k, ce in self._filecache.items():
2866 k = pycompat.sysstr(k)
2869 k = pycompat.sysstr(k)
2867 if k == 'dirstate' or k not in self.__dict__:
2870 if k == 'dirstate' or k not in self.__dict__:
2868 continue
2871 continue
2869 ce.refresh()
2872 ce.refresh()
2870
2873
2871 def _lock(
2874 def _lock(
2872 self,
2875 self,
2873 vfs,
2876 vfs,
2874 lockname,
2877 lockname,
2875 wait,
2878 wait,
2876 releasefn,
2879 releasefn,
2877 acquirefn,
2880 acquirefn,
2878 desc,
2881 desc,
2879 ):
2882 ):
2880 timeout = 0
2883 timeout = 0
2881 warntimeout = 0
2884 warntimeout = 0
2882 if wait:
2885 if wait:
2883 timeout = self.ui.configint(b"ui", b"timeout")
2886 timeout = self.ui.configint(b"ui", b"timeout")
2884 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2887 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2885 # internal config: ui.signal-safe-lock
2888 # internal config: ui.signal-safe-lock
2886 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2889 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2887
2890
2888 l = lockmod.trylock(
2891 l = lockmod.trylock(
2889 self.ui,
2892 self.ui,
2890 vfs,
2893 vfs,
2891 lockname,
2894 lockname,
2892 timeout,
2895 timeout,
2893 warntimeout,
2896 warntimeout,
2894 releasefn=releasefn,
2897 releasefn=releasefn,
2895 acquirefn=acquirefn,
2898 acquirefn=acquirefn,
2896 desc=desc,
2899 desc=desc,
2897 signalsafe=signalsafe,
2900 signalsafe=signalsafe,
2898 )
2901 )
2899 return l
2902 return l
2900
2903
2901 def _afterlock(self, callback):
2904 def _afterlock(self, callback):
2902 """add a callback to be run when the repository is fully unlocked
2905 """add a callback to be run when the repository is fully unlocked
2903
2906
2904 The callback will be executed when the outermost lock is released
2907 The callback will be executed when the outermost lock is released
2905 (with wlock being higher level than 'lock')."""
2908 (with wlock being higher level than 'lock')."""
2906 for ref in (self._wlockref, self._lockref):
2909 for ref in (self._wlockref, self._lockref):
2907 l = ref and ref()
2910 l = ref and ref()
2908 if l and l.held:
2911 if l and l.held:
2909 l.postrelease.append(callback)
2912 l.postrelease.append(callback)
2910 break
2913 break
2911 else: # no lock have been found.
2914 else: # no lock have been found.
2912 callback(True)
2915 callback(True)
2913
2916
2914 def lock(self, wait=True):
2917 def lock(self, wait=True):
2915 """Lock the repository store (.hg/store) and return a weak reference
2918 """Lock the repository store (.hg/store) and return a weak reference
2916 to the lock. Use this before modifying the store (e.g. committing or
2919 to the lock. Use this before modifying the store (e.g. committing or
2917 stripping). If you are opening a transaction, get a lock as well.)
2920 stripping). If you are opening a transaction, get a lock as well.)
2918
2921
2919 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2922 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2920 'wlock' first to avoid a dead-lock hazard."""
2923 'wlock' first to avoid a dead-lock hazard."""
2921 l = self._currentlock(self._lockref)
2924 l = self._currentlock(self._lockref)
2922 if l is not None:
2925 if l is not None:
2923 l.lock()
2926 l.lock()
2924 return l
2927 return l
2925
2928
2926 l = self._lock(
2929 l = self._lock(
2927 vfs=self.svfs,
2930 vfs=self.svfs,
2928 lockname=b"lock",
2931 lockname=b"lock",
2929 wait=wait,
2932 wait=wait,
2930 releasefn=None,
2933 releasefn=None,
2931 acquirefn=self.invalidate,
2934 acquirefn=self.invalidate,
2932 desc=_(b'repository %s') % self.origroot,
2935 desc=_(b'repository %s') % self.origroot,
2933 )
2936 )
2934 self._lockref = weakref.ref(l)
2937 self._lockref = weakref.ref(l)
2935 return l
2938 return l
2936
2939
2937 def wlock(self, wait=True):
2940 def wlock(self, wait=True):
2938 """Lock the non-store parts of the repository (everything under
2941 """Lock the non-store parts of the repository (everything under
2939 .hg except .hg/store) and return a weak reference to the lock.
2942 .hg except .hg/store) and return a weak reference to the lock.
2940
2943
2941 Use this before modifying files in .hg.
2944 Use this before modifying files in .hg.
2942
2945
2943 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2946 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2944 'wlock' first to avoid a dead-lock hazard."""
2947 'wlock' first to avoid a dead-lock hazard."""
2945 l = self._wlockref() if self._wlockref else None
2948 l = self._wlockref() if self._wlockref else None
2946 if l is not None and l.held:
2949 if l is not None and l.held:
2947 l.lock()
2950 l.lock()
2948 return l
2951 return l
2949
2952
2950 # We do not need to check for non-waiting lock acquisition. Such
2953 # We do not need to check for non-waiting lock acquisition. Such
2951 # acquisition would not cause dead-lock as they would just fail.
2954 # acquisition would not cause dead-lock as they would just fail.
2952 if wait and (
2955 if wait and (
2953 self.ui.configbool(b'devel', b'all-warnings')
2956 self.ui.configbool(b'devel', b'all-warnings')
2954 or self.ui.configbool(b'devel', b'check-locks')
2957 or self.ui.configbool(b'devel', b'check-locks')
2955 ):
2958 ):
2956 if self._currentlock(self._lockref) is not None:
2959 if self._currentlock(self._lockref) is not None:
2957 self.ui.develwarn(b'"wlock" acquired after "lock"')
2960 self.ui.develwarn(b'"wlock" acquired after "lock"')
2958
2961
2959 def unlock():
2962 def unlock():
2960 if self.dirstate.pendingparentchange():
2963 if self.dirstate.pendingparentchange():
2961 self.dirstate.invalidate()
2964 self.dirstate.invalidate()
2962 else:
2965 else:
2963 self.dirstate.write(None)
2966 self.dirstate.write(None)
2964
2967
2965 self._filecache[b'dirstate'].refresh()
2968 self._filecache[b'dirstate'].refresh()
2966
2969
2967 l = self._lock(
2970 l = self._lock(
2968 self.vfs,
2971 self.vfs,
2969 b"wlock",
2972 b"wlock",
2970 wait,
2973 wait,
2971 unlock,
2974 unlock,
2972 self.invalidatedirstate,
2975 self.invalidatedirstate,
2973 _(b'working directory of %s') % self.origroot,
2976 _(b'working directory of %s') % self.origroot,
2974 )
2977 )
2975 self._wlockref = weakref.ref(l)
2978 self._wlockref = weakref.ref(l)
2976 return l
2979 return l
2977
2980
2978 def _currentlock(self, lockref):
2981 def _currentlock(self, lockref):
2979 """Returns the lock if it's held, or None if it's not."""
2982 """Returns the lock if it's held, or None if it's not."""
2980 if lockref is None:
2983 if lockref is None:
2981 return None
2984 return None
2982 l = lockref()
2985 l = lockref()
2983 if l is None or not l.held:
2986 if l is None or not l.held:
2984 return None
2987 return None
2985 return l
2988 return l
2986
2989
2987 def currentwlock(self):
2990 def currentwlock(self):
2988 """Returns the wlock if it's held, or None if it's not."""
2991 """Returns the wlock if it's held, or None if it's not."""
2989 return self._currentlock(self._wlockref)
2992 return self._currentlock(self._wlockref)
2990
2993
2991 def checkcommitpatterns(self, wctx, match, status, fail):
2994 def checkcommitpatterns(self, wctx, match, status, fail):
2992 """check for commit arguments that aren't committable"""
2995 """check for commit arguments that aren't committable"""
2993 if match.isexact() or match.prefix():
2996 if match.isexact() or match.prefix():
2994 matched = set(status.modified + status.added + status.removed)
2997 matched = set(status.modified + status.added + status.removed)
2995
2998
2996 for f in match.files():
2999 for f in match.files():
2997 f = self.dirstate.normalize(f)
3000 f = self.dirstate.normalize(f)
2998 if f == b'.' or f in matched or f in wctx.substate:
3001 if f == b'.' or f in matched or f in wctx.substate:
2999 continue
3002 continue
3000 if f in status.deleted:
3003 if f in status.deleted:
3001 fail(f, _(b'file not found!'))
3004 fail(f, _(b'file not found!'))
3002 # Is it a directory that exists or used to exist?
3005 # Is it a directory that exists or used to exist?
3003 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3006 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3004 d = f + b'/'
3007 d = f + b'/'
3005 for mf in matched:
3008 for mf in matched:
3006 if mf.startswith(d):
3009 if mf.startswith(d):
3007 break
3010 break
3008 else:
3011 else:
3009 fail(f, _(b"no match under directory!"))
3012 fail(f, _(b"no match under directory!"))
3010 elif f not in self.dirstate:
3013 elif f not in self.dirstate:
3011 fail(f, _(b"file not tracked!"))
3014 fail(f, _(b"file not tracked!"))
3012
3015
3013 @unfilteredmethod
3016 @unfilteredmethod
3014 def commit(
3017 def commit(
3015 self,
3018 self,
3016 text=b"",
3019 text=b"",
3017 user=None,
3020 user=None,
3018 date=None,
3021 date=None,
3019 match=None,
3022 match=None,
3020 force=False,
3023 force=False,
3021 editor=None,
3024 editor=None,
3022 extra=None,
3025 extra=None,
3023 ):
3026 ):
3024 """Add a new revision to current repository.
3027 """Add a new revision to current repository.
3025
3028
3026 Revision information is gathered from the working directory,
3029 Revision information is gathered from the working directory,
3027 match can be used to filter the committed files. If editor is
3030 match can be used to filter the committed files. If editor is
3028 supplied, it is called to get a commit message.
3031 supplied, it is called to get a commit message.
3029 """
3032 """
3030 if extra is None:
3033 if extra is None:
3031 extra = {}
3034 extra = {}
3032
3035
3033 def fail(f, msg):
3036 def fail(f, msg):
3034 raise error.InputError(b'%s: %s' % (f, msg))
3037 raise error.InputError(b'%s: %s' % (f, msg))
3035
3038
3036 if not match:
3039 if not match:
3037 match = matchmod.always()
3040 match = matchmod.always()
3038
3041
3039 if not force:
3042 if not force:
3040 match.bad = fail
3043 match.bad = fail
3041
3044
3042 # lock() for recent changelog (see issue4368)
3045 # lock() for recent changelog (see issue4368)
3043 with self.wlock(), self.lock():
3046 with self.wlock(), self.lock():
3044 wctx = self[None]
3047 wctx = self[None]
3045 merge = len(wctx.parents()) > 1
3048 merge = len(wctx.parents()) > 1
3046
3049
3047 if not force and merge and not match.always():
3050 if not force and merge and not match.always():
3048 raise error.Abort(
3051 raise error.Abort(
3049 _(
3052 _(
3050 b'cannot partially commit a merge '
3053 b'cannot partially commit a merge '
3051 b'(do not specify files or patterns)'
3054 b'(do not specify files or patterns)'
3052 )
3055 )
3053 )
3056 )
3054
3057
3055 status = self.status(match=match, clean=force)
3058 status = self.status(match=match, clean=force)
3056 if force:
3059 if force:
3057 status.modified.extend(
3060 status.modified.extend(
3058 status.clean
3061 status.clean
3059 ) # mq may commit clean files
3062 ) # mq may commit clean files
3060
3063
3061 # check subrepos
3064 # check subrepos
3062 subs, commitsubs, newstate = subrepoutil.precommit(
3065 subs, commitsubs, newstate = subrepoutil.precommit(
3063 self.ui, wctx, status, match, force=force
3066 self.ui, wctx, status, match, force=force
3064 )
3067 )
3065
3068
3066 # make sure all explicit patterns are matched
3069 # make sure all explicit patterns are matched
3067 if not force:
3070 if not force:
3068 self.checkcommitpatterns(wctx, match, status, fail)
3071 self.checkcommitpatterns(wctx, match, status, fail)
3069
3072
3070 cctx = context.workingcommitctx(
3073 cctx = context.workingcommitctx(
3071 self, status, text, user, date, extra
3074 self, status, text, user, date, extra
3072 )
3075 )
3073
3076
3074 ms = mergestatemod.mergestate.read(self)
3077 ms = mergestatemod.mergestate.read(self)
3075 mergeutil.checkunresolved(ms)
3078 mergeutil.checkunresolved(ms)
3076
3079
3077 # internal config: ui.allowemptycommit
3080 # internal config: ui.allowemptycommit
3078 if cctx.isempty() and not self.ui.configbool(
3081 if cctx.isempty() and not self.ui.configbool(
3079 b'ui', b'allowemptycommit'
3082 b'ui', b'allowemptycommit'
3080 ):
3083 ):
3081 self.ui.debug(b'nothing to commit, clearing merge state\n')
3084 self.ui.debug(b'nothing to commit, clearing merge state\n')
3082 ms.reset()
3085 ms.reset()
3083 return None
3086 return None
3084
3087
3085 if merge and cctx.deleted():
3088 if merge and cctx.deleted():
3086 raise error.Abort(_(b"cannot commit merge with missing files"))
3089 raise error.Abort(_(b"cannot commit merge with missing files"))
3087
3090
3088 if editor:
3091 if editor:
3089 cctx._text = editor(self, cctx, subs)
3092 cctx._text = editor(self, cctx, subs)
3090 edited = text != cctx._text
3093 edited = text != cctx._text
3091
3094
3092 # Save commit message in case this transaction gets rolled back
3095 # Save commit message in case this transaction gets rolled back
3093 # (e.g. by a pretxncommit hook). Leave the content alone on
3096 # (e.g. by a pretxncommit hook). Leave the content alone on
3094 # the assumption that the user will use the same editor again.
3097 # the assumption that the user will use the same editor again.
3095 msgfn = self.savecommitmessage(cctx._text)
3098 msgfn = self.savecommitmessage(cctx._text)
3096
3099
3097 # commit subs and write new state
3100 # commit subs and write new state
3098 if subs:
3101 if subs:
3099 uipathfn = scmutil.getuipathfn(self)
3102 uipathfn = scmutil.getuipathfn(self)
3100 for s in sorted(commitsubs):
3103 for s in sorted(commitsubs):
3101 sub = wctx.sub(s)
3104 sub = wctx.sub(s)
3102 self.ui.status(
3105 self.ui.status(
3103 _(b'committing subrepository %s\n')
3106 _(b'committing subrepository %s\n')
3104 % uipathfn(subrepoutil.subrelpath(sub))
3107 % uipathfn(subrepoutil.subrelpath(sub))
3105 )
3108 )
3106 sr = sub.commit(cctx._text, user, date)
3109 sr = sub.commit(cctx._text, user, date)
3107 newstate[s] = (newstate[s][0], sr)
3110 newstate[s] = (newstate[s][0], sr)
3108 subrepoutil.writestate(self, newstate)
3111 subrepoutil.writestate(self, newstate)
3109
3112
3110 p1, p2 = self.dirstate.parents()
3113 p1, p2 = self.dirstate.parents()
3111 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3114 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3112 try:
3115 try:
3113 self.hook(
3116 self.hook(
3114 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3117 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3115 )
3118 )
3116 with self.transaction(b'commit'):
3119 with self.transaction(b'commit'):
3117 ret = self.commitctx(cctx, True)
3120 ret = self.commitctx(cctx, True)
3118 # update bookmarks, dirstate and mergestate
3121 # update bookmarks, dirstate and mergestate
3119 bookmarks.update(self, [p1, p2], ret)
3122 bookmarks.update(self, [p1, p2], ret)
3120 cctx.markcommitted(ret)
3123 cctx.markcommitted(ret)
3121 ms.reset()
3124 ms.reset()
3122 except: # re-raises
3125 except: # re-raises
3123 if edited:
3126 if edited:
3124 self.ui.write(
3127 self.ui.write(
3125 _(b'note: commit message saved in %s\n') % msgfn
3128 _(b'note: commit message saved in %s\n') % msgfn
3126 )
3129 )
3127 self.ui.write(
3130 self.ui.write(
3128 _(
3131 _(
3129 b"note: use 'hg commit --logfile "
3132 b"note: use 'hg commit --logfile "
3130 b".hg/last-message.txt --edit' to reuse it\n"
3133 b".hg/last-message.txt --edit' to reuse it\n"
3131 )
3134 )
3132 )
3135 )
3133 raise
3136 raise
3134
3137
3135 def commithook(unused_success):
3138 def commithook(unused_success):
3136 # hack for command that use a temporary commit (eg: histedit)
3139 # hack for command that use a temporary commit (eg: histedit)
3137 # temporary commit got stripped before hook release
3140 # temporary commit got stripped before hook release
3138 if self.changelog.hasnode(ret):
3141 if self.changelog.hasnode(ret):
3139 self.hook(
3142 self.hook(
3140 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3143 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3141 )
3144 )
3142
3145
3143 self._afterlock(commithook)
3146 self._afterlock(commithook)
3144 return ret
3147 return ret
3145
3148
3146 @unfilteredmethod
3149 @unfilteredmethod
3147 def commitctx(self, ctx, error=False, origctx=None):
3150 def commitctx(self, ctx, error=False, origctx=None):
3148 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3151 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3149
3152
3150 @unfilteredmethod
3153 @unfilteredmethod
3151 def destroying(self):
3154 def destroying(self):
3152 """Inform the repository that nodes are about to be destroyed.
3155 """Inform the repository that nodes are about to be destroyed.
3153 Intended for use by strip and rollback, so there's a common
3156 Intended for use by strip and rollback, so there's a common
3154 place for anything that has to be done before destroying history.
3157 place for anything that has to be done before destroying history.
3155
3158
3156 This is mostly useful for saving state that is in memory and waiting
3159 This is mostly useful for saving state that is in memory and waiting
3157 to be flushed when the current lock is released. Because a call to
3160 to be flushed when the current lock is released. Because a call to
3158 destroyed is imminent, the repo will be invalidated causing those
3161 destroyed is imminent, the repo will be invalidated causing those
3159 changes to stay in memory (waiting for the next unlock), or vanish
3162 changes to stay in memory (waiting for the next unlock), or vanish
3160 completely.
3163 completely.
3161 """
3164 """
3162 # When using the same lock to commit and strip, the phasecache is left
3165 # When using the same lock to commit and strip, the phasecache is left
3163 # dirty after committing. Then when we strip, the repo is invalidated,
3166 # dirty after committing. Then when we strip, the repo is invalidated,
3164 # causing those changes to disappear.
3167 # causing those changes to disappear.
3165 if '_phasecache' in vars(self):
3168 if '_phasecache' in vars(self):
3166 self._phasecache.write()
3169 self._phasecache.write()
3167
3170
3168 @unfilteredmethod
3171 @unfilteredmethod
3169 def destroyed(self):
3172 def destroyed(self):
3170 """Inform the repository that nodes have been destroyed.
3173 """Inform the repository that nodes have been destroyed.
3171 Intended for use by strip and rollback, so there's a common
3174 Intended for use by strip and rollback, so there's a common
3172 place for anything that has to be done after destroying history.
3175 place for anything that has to be done after destroying history.
3173 """
3176 """
3174 # When one tries to:
3177 # When one tries to:
3175 # 1) destroy nodes thus calling this method (e.g. strip)
3178 # 1) destroy nodes thus calling this method (e.g. strip)
3176 # 2) use phasecache somewhere (e.g. commit)
3179 # 2) use phasecache somewhere (e.g. commit)
3177 #
3180 #
3178 # then 2) will fail because the phasecache contains nodes that were
3181 # then 2) will fail because the phasecache contains nodes that were
3179 # removed. We can either remove phasecache from the filecache,
3182 # removed. We can either remove phasecache from the filecache,
3180 # causing it to reload next time it is accessed, or simply filter
3183 # causing it to reload next time it is accessed, or simply filter
3181 # the removed nodes now and write the updated cache.
3184 # the removed nodes now and write the updated cache.
3182 self._phasecache.filterunknown(self)
3185 self._phasecache.filterunknown(self)
3183 self._phasecache.write()
3186 self._phasecache.write()
3184
3187
3185 # refresh all repository caches
3188 # refresh all repository caches
3186 self.updatecaches()
3189 self.updatecaches()
3187
3190
3188 # Ensure the persistent tag cache is updated. Doing it now
3191 # Ensure the persistent tag cache is updated. Doing it now
3189 # means that the tag cache only has to worry about destroyed
3192 # means that the tag cache only has to worry about destroyed
3190 # heads immediately after a strip/rollback. That in turn
3193 # heads immediately after a strip/rollback. That in turn
3191 # guarantees that "cachetip == currenttip" (comparing both rev
3194 # guarantees that "cachetip == currenttip" (comparing both rev
3192 # and node) always means no nodes have been added or destroyed.
3195 # and node) always means no nodes have been added or destroyed.
3193
3196
3194 # XXX this is suboptimal when qrefresh'ing: we strip the current
3197 # XXX this is suboptimal when qrefresh'ing: we strip the current
3195 # head, refresh the tag cache, then immediately add a new head.
3198 # head, refresh the tag cache, then immediately add a new head.
3196 # But I think doing it this way is necessary for the "instant
3199 # But I think doing it this way is necessary for the "instant
3197 # tag cache retrieval" case to work.
3200 # tag cache retrieval" case to work.
3198 self.invalidate()
3201 self.invalidate()
3199
3202
3200 def status(
3203 def status(
3201 self,
3204 self,
3202 node1=b'.',
3205 node1=b'.',
3203 node2=None,
3206 node2=None,
3204 match=None,
3207 match=None,
3205 ignored=False,
3208 ignored=False,
3206 clean=False,
3209 clean=False,
3207 unknown=False,
3210 unknown=False,
3208 listsubrepos=False,
3211 listsubrepos=False,
3209 ):
3212 ):
3210 '''a convenience method that calls node1.status(node2)'''
3213 '''a convenience method that calls node1.status(node2)'''
3211 return self[node1].status(
3214 return self[node1].status(
3212 node2, match, ignored, clean, unknown, listsubrepos
3215 node2, match, ignored, clean, unknown, listsubrepos
3213 )
3216 )
3214
3217
3215 def addpostdsstatus(self, ps):
3218 def addpostdsstatus(self, ps):
3216 """Add a callback to run within the wlock, at the point at which status
3219 """Add a callback to run within the wlock, at the point at which status
3217 fixups happen.
3220 fixups happen.
3218
3221
3219 On status completion, callback(wctx, status) will be called with the
3222 On status completion, callback(wctx, status) will be called with the
3220 wlock held, unless the dirstate has changed from underneath or the wlock
3223 wlock held, unless the dirstate has changed from underneath or the wlock
3221 couldn't be grabbed.
3224 couldn't be grabbed.
3222
3225
3223 Callbacks should not capture and use a cached copy of the dirstate --
3226 Callbacks should not capture and use a cached copy of the dirstate --
3224 it might change in the meanwhile. Instead, they should access the
3227 it might change in the meanwhile. Instead, they should access the
3225 dirstate via wctx.repo().dirstate.
3228 dirstate via wctx.repo().dirstate.
3226
3229
3227 This list is emptied out after each status run -- extensions should
3230 This list is emptied out after each status run -- extensions should
3228 make sure it adds to this list each time dirstate.status is called.
3231 make sure it adds to this list each time dirstate.status is called.
3229 Extensions should also make sure they don't call this for statuses
3232 Extensions should also make sure they don't call this for statuses
3230 that don't involve the dirstate.
3233 that don't involve the dirstate.
3231 """
3234 """
3232
3235
3233 # The list is located here for uniqueness reasons -- it is actually
3236 # The list is located here for uniqueness reasons -- it is actually
3234 # managed by the workingctx, but that isn't unique per-repo.
3237 # managed by the workingctx, but that isn't unique per-repo.
3235 self._postdsstatus.append(ps)
3238 self._postdsstatus.append(ps)
3236
3239
3237 def postdsstatus(self):
3240 def postdsstatus(self):
3238 """Used by workingctx to get the list of post-dirstate-status hooks."""
3241 """Used by workingctx to get the list of post-dirstate-status hooks."""
3239 return self._postdsstatus
3242 return self._postdsstatus
3240
3243
3241 def clearpostdsstatus(self):
3244 def clearpostdsstatus(self):
3242 """Used by workingctx to clear post-dirstate-status hooks."""
3245 """Used by workingctx to clear post-dirstate-status hooks."""
3243 del self._postdsstatus[:]
3246 del self._postdsstatus[:]
3244
3247
3245 def heads(self, start=None):
3248 def heads(self, start=None):
3246 if start is None:
3249 if start is None:
3247 cl = self.changelog
3250 cl = self.changelog
3248 headrevs = reversed(cl.headrevs())
3251 headrevs = reversed(cl.headrevs())
3249 return [cl.node(rev) for rev in headrevs]
3252 return [cl.node(rev) for rev in headrevs]
3250
3253
3251 heads = self.changelog.heads(start)
3254 heads = self.changelog.heads(start)
3252 # sort the output in rev descending order
3255 # sort the output in rev descending order
3253 return sorted(heads, key=self.changelog.rev, reverse=True)
3256 return sorted(heads, key=self.changelog.rev, reverse=True)
3254
3257
3255 def branchheads(self, branch=None, start=None, closed=False):
3258 def branchheads(self, branch=None, start=None, closed=False):
3256 """return a (possibly filtered) list of heads for the given branch
3259 """return a (possibly filtered) list of heads for the given branch
3257
3260
3258 Heads are returned in topological order, from newest to oldest.
3261 Heads are returned in topological order, from newest to oldest.
3259 If branch is None, use the dirstate branch.
3262 If branch is None, use the dirstate branch.
3260 If start is not None, return only heads reachable from start.
3263 If start is not None, return only heads reachable from start.
3261 If closed is True, return heads that are marked as closed as well.
3264 If closed is True, return heads that are marked as closed as well.
3262 """
3265 """
3263 if branch is None:
3266 if branch is None:
3264 branch = self[None].branch()
3267 branch = self[None].branch()
3265 branches = self.branchmap()
3268 branches = self.branchmap()
3266 if not branches.hasbranch(branch):
3269 if not branches.hasbranch(branch):
3267 return []
3270 return []
3268 # the cache returns heads ordered lowest to highest
3271 # the cache returns heads ordered lowest to highest
3269 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3272 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3270 if start is not None:
3273 if start is not None:
3271 # filter out the heads that cannot be reached from startrev
3274 # filter out the heads that cannot be reached from startrev
3272 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3275 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3273 bheads = [h for h in bheads if h in fbheads]
3276 bheads = [h for h in bheads if h in fbheads]
3274 return bheads
3277 return bheads
3275
3278
3276 def branches(self, nodes):
3279 def branches(self, nodes):
3277 if not nodes:
3280 if not nodes:
3278 nodes = [self.changelog.tip()]
3281 nodes = [self.changelog.tip()]
3279 b = []
3282 b = []
3280 for n in nodes:
3283 for n in nodes:
3281 t = n
3284 t = n
3282 while True:
3285 while True:
3283 p = self.changelog.parents(n)
3286 p = self.changelog.parents(n)
3284 if p[1] != self.nullid or p[0] == self.nullid:
3287 if p[1] != self.nullid or p[0] == self.nullid:
3285 b.append((t, n, p[0], p[1]))
3288 b.append((t, n, p[0], p[1]))
3286 break
3289 break
3287 n = p[0]
3290 n = p[0]
3288 return b
3291 return b
3289
3292
3290 def between(self, pairs):
3293 def between(self, pairs):
3291 r = []
3294 r = []
3292
3295
3293 for top, bottom in pairs:
3296 for top, bottom in pairs:
3294 n, l, i = top, [], 0
3297 n, l, i = top, [], 0
3295 f = 1
3298 f = 1
3296
3299
3297 while n != bottom and n != self.nullid:
3300 while n != bottom and n != self.nullid:
3298 p = self.changelog.parents(n)[0]
3301 p = self.changelog.parents(n)[0]
3299 if i == f:
3302 if i == f:
3300 l.append(n)
3303 l.append(n)
3301 f = f * 2
3304 f = f * 2
3302 n = p
3305 n = p
3303 i += 1
3306 i += 1
3304
3307
3305 r.append(l)
3308 r.append(l)
3306
3309
3307 return r
3310 return r
3308
3311
3309 def checkpush(self, pushop):
3312 def checkpush(self, pushop):
3310 """Extensions can override this function if additional checks have
3313 """Extensions can override this function if additional checks have
3311 to be performed before pushing, or call it if they override push
3314 to be performed before pushing, or call it if they override push
3312 command.
3315 command.
3313 """
3316 """
3314
3317
3315 @unfilteredpropertycache
3318 @unfilteredpropertycache
3316 def prepushoutgoinghooks(self):
3319 def prepushoutgoinghooks(self):
3317 """Return util.hooks consists of a pushop with repo, remote, outgoing
3320 """Return util.hooks consists of a pushop with repo, remote, outgoing
3318 methods, which are called before pushing changesets.
3321 methods, which are called before pushing changesets.
3319 """
3322 """
3320 return util.hooks()
3323 return util.hooks()
3321
3324
3322 def pushkey(self, namespace, key, old, new):
3325 def pushkey(self, namespace, key, old, new):
3323 try:
3326 try:
3324 tr = self.currenttransaction()
3327 tr = self.currenttransaction()
3325 hookargs = {}
3328 hookargs = {}
3326 if tr is not None:
3329 if tr is not None:
3327 hookargs.update(tr.hookargs)
3330 hookargs.update(tr.hookargs)
3328 hookargs = pycompat.strkwargs(hookargs)
3331 hookargs = pycompat.strkwargs(hookargs)
3329 hookargs['namespace'] = namespace
3332 hookargs['namespace'] = namespace
3330 hookargs['key'] = key
3333 hookargs['key'] = key
3331 hookargs['old'] = old
3334 hookargs['old'] = old
3332 hookargs['new'] = new
3335 hookargs['new'] = new
3333 self.hook(b'prepushkey', throw=True, **hookargs)
3336 self.hook(b'prepushkey', throw=True, **hookargs)
3334 except error.HookAbort as exc:
3337 except error.HookAbort as exc:
3335 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3338 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3336 if exc.hint:
3339 if exc.hint:
3337 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3340 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3338 return False
3341 return False
3339 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3342 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3340 ret = pushkey.push(self, namespace, key, old, new)
3343 ret = pushkey.push(self, namespace, key, old, new)
3341
3344
3342 def runhook(unused_success):
3345 def runhook(unused_success):
3343 self.hook(
3346 self.hook(
3344 b'pushkey',
3347 b'pushkey',
3345 namespace=namespace,
3348 namespace=namespace,
3346 key=key,
3349 key=key,
3347 old=old,
3350 old=old,
3348 new=new,
3351 new=new,
3349 ret=ret,
3352 ret=ret,
3350 )
3353 )
3351
3354
3352 self._afterlock(runhook)
3355 self._afterlock(runhook)
3353 return ret
3356 return ret
3354
3357
3355 def listkeys(self, namespace):
3358 def listkeys(self, namespace):
3356 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3359 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3357 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3360 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3358 values = pushkey.list(self, namespace)
3361 values = pushkey.list(self, namespace)
3359 self.hook(b'listkeys', namespace=namespace, values=values)
3362 self.hook(b'listkeys', namespace=namespace, values=values)
3360 return values
3363 return values
3361
3364
3362 def debugwireargs(self, one, two, three=None, four=None, five=None):
3365 def debugwireargs(self, one, two, three=None, four=None, five=None):
3363 '''used to test argument passing over the wire'''
3366 '''used to test argument passing over the wire'''
3364 return b"%s %s %s %s %s" % (
3367 return b"%s %s %s %s %s" % (
3365 one,
3368 one,
3366 two,
3369 two,
3367 pycompat.bytestr(three),
3370 pycompat.bytestr(three),
3368 pycompat.bytestr(four),
3371 pycompat.bytestr(four),
3369 pycompat.bytestr(five),
3372 pycompat.bytestr(five),
3370 )
3373 )
3371
3374
3372 def savecommitmessage(self, text):
3375 def savecommitmessage(self, text):
3373 fp = self.vfs(b'last-message.txt', b'wb')
3376 fp = self.vfs(b'last-message.txt', b'wb')
3374 try:
3377 try:
3375 fp.write(text)
3378 fp.write(text)
3376 finally:
3379 finally:
3377 fp.close()
3380 fp.close()
3378 return self.pathto(fp.name[len(self.root) + 1 :])
3381 return self.pathto(fp.name[len(self.root) + 1 :])
3379
3382
3380 def register_wanted_sidedata(self, category):
3383 def register_wanted_sidedata(self, category):
3381 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3384 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3382 # Only revlogv2 repos can want sidedata.
3385 # Only revlogv2 repos can want sidedata.
3383 return
3386 return
3384 self._wanted_sidedata.add(pycompat.bytestr(category))
3387 self._wanted_sidedata.add(pycompat.bytestr(category))
3385
3388
3386 def register_sidedata_computer(
3389 def register_sidedata_computer(
3387 self, kind, category, keys, computer, flags, replace=False
3390 self, kind, category, keys, computer, flags, replace=False
3388 ):
3391 ):
3389 if kind not in revlogconst.ALL_KINDS:
3392 if kind not in revlogconst.ALL_KINDS:
3390 msg = _(b"unexpected revlog kind '%s'.")
3393 msg = _(b"unexpected revlog kind '%s'.")
3391 raise error.ProgrammingError(msg % kind)
3394 raise error.ProgrammingError(msg % kind)
3392 category = pycompat.bytestr(category)
3395 category = pycompat.bytestr(category)
3393 already_registered = category in self._sidedata_computers.get(kind, [])
3396 already_registered = category in self._sidedata_computers.get(kind, [])
3394 if already_registered and not replace:
3397 if already_registered and not replace:
3395 msg = _(
3398 msg = _(
3396 b"cannot register a sidedata computer twice for category '%s'."
3399 b"cannot register a sidedata computer twice for category '%s'."
3397 )
3400 )
3398 raise error.ProgrammingError(msg % category)
3401 raise error.ProgrammingError(msg % category)
3399 if replace and not already_registered:
3402 if replace and not already_registered:
3400 msg = _(
3403 msg = _(
3401 b"cannot replace a sidedata computer that isn't registered "
3404 b"cannot replace a sidedata computer that isn't registered "
3402 b"for category '%s'."
3405 b"for category '%s'."
3403 )
3406 )
3404 raise error.ProgrammingError(msg % category)
3407 raise error.ProgrammingError(msg % category)
3405 self._sidedata_computers.setdefault(kind, {})
3408 self._sidedata_computers.setdefault(kind, {})
3406 self._sidedata_computers[kind][category] = (keys, computer, flags)
3409 self._sidedata_computers[kind][category] = (keys, computer, flags)
3407
3410
3408
3411
3409 # used to avoid circular references so destructors work
3412 # used to avoid circular references so destructors work
3410 def aftertrans(files):
3413 def aftertrans(files):
3411 renamefiles = [tuple(t) for t in files]
3414 renamefiles = [tuple(t) for t in files]
3412
3415
3413 def a():
3416 def a():
3414 for vfs, src, dest in renamefiles:
3417 for vfs, src, dest in renamefiles:
3415 # if src and dest refer to a same file, vfs.rename is a no-op,
3418 # if src and dest refer to a same file, vfs.rename is a no-op,
3416 # leaving both src and dest on disk. delete dest to make sure
3419 # leaving both src and dest on disk. delete dest to make sure
3417 # the rename couldn't be such a no-op.
3420 # the rename couldn't be such a no-op.
3418 vfs.tryunlink(dest)
3421 vfs.tryunlink(dest)
3419 try:
3422 try:
3420 vfs.rename(src, dest)
3423 vfs.rename(src, dest)
3421 except OSError: # journal file does not yet exist
3424 except OSError: # journal file does not yet exist
3422 pass
3425 pass
3423
3426
3424 return a
3427 return a
3425
3428
3426
3429
3427 def undoname(fn):
3430 def undoname(fn):
3428 base, name = os.path.split(fn)
3431 base, name = os.path.split(fn)
3429 assert name.startswith(b'journal')
3432 assert name.startswith(b'journal')
3430 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3433 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3431
3434
3432
3435
3433 def instance(ui, path, create, intents=None, createopts=None):
3436 def instance(ui, path, create, intents=None, createopts=None):
3434 localpath = urlutil.urllocalpath(path)
3437 localpath = urlutil.urllocalpath(path)
3435 if create:
3438 if create:
3436 createrepository(ui, localpath, createopts=createopts)
3439 createrepository(ui, localpath, createopts=createopts)
3437
3440
3438 return makelocalrepository(ui, localpath, intents=intents)
3441 return makelocalrepository(ui, localpath, intents=intents)
3439
3442
3440
3443
3441 def islocal(path):
3444 def islocal(path):
3442 return True
3445 return True
3443
3446
3444
3447
3445 def defaultcreateopts(ui, createopts=None):
3448 def defaultcreateopts(ui, createopts=None):
3446 """Populate the default creation options for a repository.
3449 """Populate the default creation options for a repository.
3447
3450
3448 A dictionary of explicitly requested creation options can be passed
3451 A dictionary of explicitly requested creation options can be passed
3449 in. Missing keys will be populated.
3452 in. Missing keys will be populated.
3450 """
3453 """
3451 createopts = dict(createopts or {})
3454 createopts = dict(createopts or {})
3452
3455
3453 if b'backend' not in createopts:
3456 if b'backend' not in createopts:
3454 # experimental config: storage.new-repo-backend
3457 # experimental config: storage.new-repo-backend
3455 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3458 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3456
3459
3457 return createopts
3460 return createopts
3458
3461
3459
3462
3460 def newreporequirements(ui, createopts):
3463 def newreporequirements(ui, createopts):
3461 """Determine the set of requirements for a new local repository.
3464 """Determine the set of requirements for a new local repository.
3462
3465
3463 Extensions can wrap this function to specify custom requirements for
3466 Extensions can wrap this function to specify custom requirements for
3464 new repositories.
3467 new repositories.
3465 """
3468 """
3466 # If the repo is being created from a shared repository, we copy
3469 # If the repo is being created from a shared repository, we copy
3467 # its requirements.
3470 # its requirements.
3468 if b'sharedrepo' in createopts:
3471 if b'sharedrepo' in createopts:
3469 requirements = set(createopts[b'sharedrepo'].requirements)
3472 requirements = set(createopts[b'sharedrepo'].requirements)
3470 if createopts.get(b'sharedrelative'):
3473 if createopts.get(b'sharedrelative'):
3471 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3474 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3472 else:
3475 else:
3473 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3476 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3474
3477
3475 return requirements
3478 return requirements
3476
3479
3477 if b'backend' not in createopts:
3480 if b'backend' not in createopts:
3478 raise error.ProgrammingError(
3481 raise error.ProgrammingError(
3479 b'backend key not present in createopts; '
3482 b'backend key not present in createopts; '
3480 b'was defaultcreateopts() called?'
3483 b'was defaultcreateopts() called?'
3481 )
3484 )
3482
3485
3483 if createopts[b'backend'] != b'revlogv1':
3486 if createopts[b'backend'] != b'revlogv1':
3484 raise error.Abort(
3487 raise error.Abort(
3485 _(
3488 _(
3486 b'unable to determine repository requirements for '
3489 b'unable to determine repository requirements for '
3487 b'storage backend: %s'
3490 b'storage backend: %s'
3488 )
3491 )
3489 % createopts[b'backend']
3492 % createopts[b'backend']
3490 )
3493 )
3491
3494
3492 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3495 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3493 if ui.configbool(b'format', b'usestore'):
3496 if ui.configbool(b'format', b'usestore'):
3494 requirements.add(requirementsmod.STORE_REQUIREMENT)
3497 requirements.add(requirementsmod.STORE_REQUIREMENT)
3495 if ui.configbool(b'format', b'usefncache'):
3498 if ui.configbool(b'format', b'usefncache'):
3496 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3499 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3497 if ui.configbool(b'format', b'dotencode'):
3500 if ui.configbool(b'format', b'dotencode'):
3498 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3501 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3499
3502
3500 compengines = ui.configlist(b'format', b'revlog-compression')
3503 compengines = ui.configlist(b'format', b'revlog-compression')
3501 for compengine in compengines:
3504 for compengine in compengines:
3502 if compengine in util.compengines:
3505 if compengine in util.compengines:
3503 engine = util.compengines[compengine]
3506 engine = util.compengines[compengine]
3504 if engine.available() and engine.revlogheader():
3507 if engine.available() and engine.revlogheader():
3505 break
3508 break
3506 else:
3509 else:
3507 raise error.Abort(
3510 raise error.Abort(
3508 _(
3511 _(
3509 b'compression engines %s defined by '
3512 b'compression engines %s defined by '
3510 b'format.revlog-compression not available'
3513 b'format.revlog-compression not available'
3511 )
3514 )
3512 % b', '.join(b'"%s"' % e for e in compengines),
3515 % b', '.join(b'"%s"' % e for e in compengines),
3513 hint=_(
3516 hint=_(
3514 b'run "hg debuginstall" to list available '
3517 b'run "hg debuginstall" to list available '
3515 b'compression engines'
3518 b'compression engines'
3516 ),
3519 ),
3517 )
3520 )
3518
3521
3519 # zlib is the historical default and doesn't need an explicit requirement.
3522 # zlib is the historical default and doesn't need an explicit requirement.
3520 if compengine == b'zstd':
3523 if compengine == b'zstd':
3521 requirements.add(b'revlog-compression-zstd')
3524 requirements.add(b'revlog-compression-zstd')
3522 elif compengine != b'zlib':
3525 elif compengine != b'zlib':
3523 requirements.add(b'exp-compression-%s' % compengine)
3526 requirements.add(b'exp-compression-%s' % compengine)
3524
3527
3525 if scmutil.gdinitconfig(ui):
3528 if scmutil.gdinitconfig(ui):
3526 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3529 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3527 if ui.configbool(b'format', b'sparse-revlog'):
3530 if ui.configbool(b'format', b'sparse-revlog'):
3528 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3531 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3529
3532
3533 # experimental config: format.exp-dirstate-v2
3534 if ui.configbool(b'format', b'exp-dirstate-v2'):
3535 if dirstate.SUPPORTS_DIRSTATE_V2:
3536 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3537 else:
3538 raise error.Abort(
3539 _(
3540 b"dirstate v2 format requested by config "
3541 b"but not supported (requires Rust extensions)"
3542 )
3543 )
3544
3530 # experimental config: format.exp-use-copies-side-data-changeset
3545 # experimental config: format.exp-use-copies-side-data-changeset
3531 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3546 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3532 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3547 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3533 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3548 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3534 if ui.configbool(b'experimental', b'treemanifest'):
3549 if ui.configbool(b'experimental', b'treemanifest'):
3535 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3550 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3536
3551
3537 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3552 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3538 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3553 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3539 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3554 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3540
3555
3541 revlogv2 = ui.config(b'experimental', b'revlogv2')
3556 revlogv2 = ui.config(b'experimental', b'revlogv2')
3542 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3557 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3543 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3558 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3544 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3559 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3545 # experimental config: format.internal-phase
3560 # experimental config: format.internal-phase
3546 if ui.configbool(b'format', b'internal-phase'):
3561 if ui.configbool(b'format', b'internal-phase'):
3547 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3562 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3548
3563
3549 if createopts.get(b'narrowfiles'):
3564 if createopts.get(b'narrowfiles'):
3550 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3565 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3551
3566
3552 if createopts.get(b'lfs'):
3567 if createopts.get(b'lfs'):
3553 requirements.add(b'lfs')
3568 requirements.add(b'lfs')
3554
3569
3555 if ui.configbool(b'format', b'bookmarks-in-store'):
3570 if ui.configbool(b'format', b'bookmarks-in-store'):
3556 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3571 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3557
3572
3558 if ui.configbool(b'format', b'use-persistent-nodemap'):
3573 if ui.configbool(b'format', b'use-persistent-nodemap'):
3559 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3574 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3560
3575
3561 # if share-safe is enabled, let's create the new repository with the new
3576 # if share-safe is enabled, let's create the new repository with the new
3562 # requirement
3577 # requirement
3563 if ui.configbool(b'format', b'use-share-safe'):
3578 if ui.configbool(b'format', b'use-share-safe'):
3564 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3579 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3565
3580
3566 return requirements
3581 return requirements
3567
3582
3568
3583
3569 def checkrequirementscompat(ui, requirements):
3584 def checkrequirementscompat(ui, requirements):
3570 """Checks compatibility of repository requirements enabled and disabled.
3585 """Checks compatibility of repository requirements enabled and disabled.
3571
3586
3572 Returns a set of requirements which needs to be dropped because dependend
3587 Returns a set of requirements which needs to be dropped because dependend
3573 requirements are not enabled. Also warns users about it"""
3588 requirements are not enabled. Also warns users about it"""
3574
3589
3575 dropped = set()
3590 dropped = set()
3576
3591
3577 if requirementsmod.STORE_REQUIREMENT not in requirements:
3592 if requirementsmod.STORE_REQUIREMENT not in requirements:
3578 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3593 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3579 ui.warn(
3594 ui.warn(
3580 _(
3595 _(
3581 b'ignoring enabled \'format.bookmarks-in-store\' config '
3596 b'ignoring enabled \'format.bookmarks-in-store\' config '
3582 b'beacuse it is incompatible with disabled '
3597 b'beacuse it is incompatible with disabled '
3583 b'\'format.usestore\' config\n'
3598 b'\'format.usestore\' config\n'
3584 )
3599 )
3585 )
3600 )
3586 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3601 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3587
3602
3588 if (
3603 if (
3589 requirementsmod.SHARED_REQUIREMENT in requirements
3604 requirementsmod.SHARED_REQUIREMENT in requirements
3590 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3605 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3591 ):
3606 ):
3592 raise error.Abort(
3607 raise error.Abort(
3593 _(
3608 _(
3594 b"cannot create shared repository as source was created"
3609 b"cannot create shared repository as source was created"
3595 b" with 'format.usestore' config disabled"
3610 b" with 'format.usestore' config disabled"
3596 )
3611 )
3597 )
3612 )
3598
3613
3599 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3600 ui.warn(
3615 ui.warn(
3601 _(
3616 _(
3602 b"ignoring enabled 'format.use-share-safe' config because "
3617 b"ignoring enabled 'format.use-share-safe' config because "
3603 b"it is incompatible with disabled 'format.usestore'"
3618 b"it is incompatible with disabled 'format.usestore'"
3604 b" config\n"
3619 b" config\n"
3605 )
3620 )
3606 )
3621 )
3607 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3622 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3608
3623
3609 return dropped
3624 return dropped
3610
3625
3611
3626
3612 def filterknowncreateopts(ui, createopts):
3627 def filterknowncreateopts(ui, createopts):
3613 """Filters a dict of repo creation options against options that are known.
3628 """Filters a dict of repo creation options against options that are known.
3614
3629
3615 Receives a dict of repo creation options and returns a dict of those
3630 Receives a dict of repo creation options and returns a dict of those
3616 options that we don't know how to handle.
3631 options that we don't know how to handle.
3617
3632
3618 This function is called as part of repository creation. If the
3633 This function is called as part of repository creation. If the
3619 returned dict contains any items, repository creation will not
3634 returned dict contains any items, repository creation will not
3620 be allowed, as it means there was a request to create a repository
3635 be allowed, as it means there was a request to create a repository
3621 with options not recognized by loaded code.
3636 with options not recognized by loaded code.
3622
3637
3623 Extensions can wrap this function to filter out creation options
3638 Extensions can wrap this function to filter out creation options
3624 they know how to handle.
3639 they know how to handle.
3625 """
3640 """
3626 known = {
3641 known = {
3627 b'backend',
3642 b'backend',
3628 b'lfs',
3643 b'lfs',
3629 b'narrowfiles',
3644 b'narrowfiles',
3630 b'sharedrepo',
3645 b'sharedrepo',
3631 b'sharedrelative',
3646 b'sharedrelative',
3632 b'shareditems',
3647 b'shareditems',
3633 b'shallowfilestore',
3648 b'shallowfilestore',
3634 }
3649 }
3635
3650
3636 return {k: v for k, v in createopts.items() if k not in known}
3651 return {k: v for k, v in createopts.items() if k not in known}
3637
3652
3638
3653
3639 def createrepository(ui, path, createopts=None):
3654 def createrepository(ui, path, createopts=None):
3640 """Create a new repository in a vfs.
3655 """Create a new repository in a vfs.
3641
3656
3642 ``path`` path to the new repo's working directory.
3657 ``path`` path to the new repo's working directory.
3643 ``createopts`` options for the new repository.
3658 ``createopts`` options for the new repository.
3644
3659
3645 The following keys for ``createopts`` are recognized:
3660 The following keys for ``createopts`` are recognized:
3646
3661
3647 backend
3662 backend
3648 The storage backend to use.
3663 The storage backend to use.
3649 lfs
3664 lfs
3650 Repository will be created with ``lfs`` requirement. The lfs extension
3665 Repository will be created with ``lfs`` requirement. The lfs extension
3651 will automatically be loaded when the repository is accessed.
3666 will automatically be loaded when the repository is accessed.
3652 narrowfiles
3667 narrowfiles
3653 Set up repository to support narrow file storage.
3668 Set up repository to support narrow file storage.
3654 sharedrepo
3669 sharedrepo
3655 Repository object from which storage should be shared.
3670 Repository object from which storage should be shared.
3656 sharedrelative
3671 sharedrelative
3657 Boolean indicating if the path to the shared repo should be
3672 Boolean indicating if the path to the shared repo should be
3658 stored as relative. By default, the pointer to the "parent" repo
3673 stored as relative. By default, the pointer to the "parent" repo
3659 is stored as an absolute path.
3674 is stored as an absolute path.
3660 shareditems
3675 shareditems
3661 Set of items to share to the new repository (in addition to storage).
3676 Set of items to share to the new repository (in addition to storage).
3662 shallowfilestore
3677 shallowfilestore
3663 Indicates that storage for files should be shallow (not all ancestor
3678 Indicates that storage for files should be shallow (not all ancestor
3664 revisions are known).
3679 revisions are known).
3665 """
3680 """
3666 createopts = defaultcreateopts(ui, createopts=createopts)
3681 createopts = defaultcreateopts(ui, createopts=createopts)
3667
3682
3668 unknownopts = filterknowncreateopts(ui, createopts)
3683 unknownopts = filterknowncreateopts(ui, createopts)
3669
3684
3670 if not isinstance(unknownopts, dict):
3685 if not isinstance(unknownopts, dict):
3671 raise error.ProgrammingError(
3686 raise error.ProgrammingError(
3672 b'filterknowncreateopts() did not return a dict'
3687 b'filterknowncreateopts() did not return a dict'
3673 )
3688 )
3674
3689
3675 if unknownopts:
3690 if unknownopts:
3676 raise error.Abort(
3691 raise error.Abort(
3677 _(
3692 _(
3678 b'unable to create repository because of unknown '
3693 b'unable to create repository because of unknown '
3679 b'creation option: %s'
3694 b'creation option: %s'
3680 )
3695 )
3681 % b', '.join(sorted(unknownopts)),
3696 % b', '.join(sorted(unknownopts)),
3682 hint=_(b'is a required extension not loaded?'),
3697 hint=_(b'is a required extension not loaded?'),
3683 )
3698 )
3684
3699
3685 requirements = newreporequirements(ui, createopts=createopts)
3700 requirements = newreporequirements(ui, createopts=createopts)
3686 requirements -= checkrequirementscompat(ui, requirements)
3701 requirements -= checkrequirementscompat(ui, requirements)
3687
3702
3688 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3703 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3689
3704
3690 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3705 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3691 if hgvfs.exists():
3706 if hgvfs.exists():
3692 raise error.RepoError(_(b'repository %s already exists') % path)
3707 raise error.RepoError(_(b'repository %s already exists') % path)
3693
3708
3694 if b'sharedrepo' in createopts:
3709 if b'sharedrepo' in createopts:
3695 sharedpath = createopts[b'sharedrepo'].sharedpath
3710 sharedpath = createopts[b'sharedrepo'].sharedpath
3696
3711
3697 if createopts.get(b'sharedrelative'):
3712 if createopts.get(b'sharedrelative'):
3698 try:
3713 try:
3699 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3714 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3700 sharedpath = util.pconvert(sharedpath)
3715 sharedpath = util.pconvert(sharedpath)
3701 except (IOError, ValueError) as e:
3716 except (IOError, ValueError) as e:
3702 # ValueError is raised on Windows if the drive letters differ
3717 # ValueError is raised on Windows if the drive letters differ
3703 # on each path.
3718 # on each path.
3704 raise error.Abort(
3719 raise error.Abort(
3705 _(b'cannot calculate relative path'),
3720 _(b'cannot calculate relative path'),
3706 hint=stringutil.forcebytestr(e),
3721 hint=stringutil.forcebytestr(e),
3707 )
3722 )
3708
3723
3709 if not wdirvfs.exists():
3724 if not wdirvfs.exists():
3710 wdirvfs.makedirs()
3725 wdirvfs.makedirs()
3711
3726
3712 hgvfs.makedir(notindexed=True)
3727 hgvfs.makedir(notindexed=True)
3713 if b'sharedrepo' not in createopts:
3728 if b'sharedrepo' not in createopts:
3714 hgvfs.mkdir(b'cache')
3729 hgvfs.mkdir(b'cache')
3715 hgvfs.mkdir(b'wcache')
3730 hgvfs.mkdir(b'wcache')
3716
3731
3717 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3732 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3718 if has_store and b'sharedrepo' not in createopts:
3733 if has_store and b'sharedrepo' not in createopts:
3719 hgvfs.mkdir(b'store')
3734 hgvfs.mkdir(b'store')
3720
3735
3721 # We create an invalid changelog outside the store so very old
3736 # We create an invalid changelog outside the store so very old
3722 # Mercurial versions (which didn't know about the requirements
3737 # Mercurial versions (which didn't know about the requirements
3723 # file) encounter an error on reading the changelog. This
3738 # file) encounter an error on reading the changelog. This
3724 # effectively locks out old clients and prevents them from
3739 # effectively locks out old clients and prevents them from
3725 # mucking with a repo in an unknown format.
3740 # mucking with a repo in an unknown format.
3726 #
3741 #
3727 # The revlog header has version 65535, which won't be recognized by
3742 # The revlog header has version 65535, which won't be recognized by
3728 # such old clients.
3743 # such old clients.
3729 hgvfs.append(
3744 hgvfs.append(
3730 b'00changelog.i',
3745 b'00changelog.i',
3731 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3746 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3732 b'layout',
3747 b'layout',
3733 )
3748 )
3734
3749
3735 # Filter the requirements into working copy and store ones
3750 # Filter the requirements into working copy and store ones
3736 wcreq, storereq = scmutil.filterrequirements(requirements)
3751 wcreq, storereq = scmutil.filterrequirements(requirements)
3737 # write working copy ones
3752 # write working copy ones
3738 scmutil.writerequires(hgvfs, wcreq)
3753 scmutil.writerequires(hgvfs, wcreq)
3739 # If there are store requirements and the current repository
3754 # If there are store requirements and the current repository
3740 # is not a shared one, write stored requirements
3755 # is not a shared one, write stored requirements
3741 # For new shared repository, we don't need to write the store
3756 # For new shared repository, we don't need to write the store
3742 # requirements as they are already present in store requires
3757 # requirements as they are already present in store requires
3743 if storereq and b'sharedrepo' not in createopts:
3758 if storereq and b'sharedrepo' not in createopts:
3744 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3759 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3745 scmutil.writerequires(storevfs, storereq)
3760 scmutil.writerequires(storevfs, storereq)
3746
3761
3747 # Write out file telling readers where to find the shared store.
3762 # Write out file telling readers where to find the shared store.
3748 if b'sharedrepo' in createopts:
3763 if b'sharedrepo' in createopts:
3749 hgvfs.write(b'sharedpath', sharedpath)
3764 hgvfs.write(b'sharedpath', sharedpath)
3750
3765
3751 if createopts.get(b'shareditems'):
3766 if createopts.get(b'shareditems'):
3752 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3767 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3753 hgvfs.write(b'shared', shared)
3768 hgvfs.write(b'shared', shared)
3754
3769
3755
3770
3756 def poisonrepository(repo):
3771 def poisonrepository(repo):
3757 """Poison a repository instance so it can no longer be used."""
3772 """Poison a repository instance so it can no longer be used."""
3758 # Perform any cleanup on the instance.
3773 # Perform any cleanup on the instance.
3759 repo.close()
3774 repo.close()
3760
3775
3761 # Our strategy is to replace the type of the object with one that
3776 # Our strategy is to replace the type of the object with one that
3762 # has all attribute lookups result in error.
3777 # has all attribute lookups result in error.
3763 #
3778 #
3764 # But we have to allow the close() method because some constructors
3779 # But we have to allow the close() method because some constructors
3765 # of repos call close() on repo references.
3780 # of repos call close() on repo references.
3766 class poisonedrepository(object):
3781 class poisonedrepository(object):
3767 def __getattribute__(self, item):
3782 def __getattribute__(self, item):
3768 if item == 'close':
3783 if item == 'close':
3769 return object.__getattribute__(self, item)
3784 return object.__getattribute__(self, item)
3770
3785
3771 raise error.ProgrammingError(
3786 raise error.ProgrammingError(
3772 b'repo instances should not be used after unshare'
3787 b'repo instances should not be used after unshare'
3773 )
3788 )
3774
3789
3775 def close(self):
3790 def close(self):
3776 pass
3791 pass
3777
3792
3778 # We may have a repoview, which intercepts __setattr__. So be sure
3793 # We may have a repoview, which intercepts __setattr__. So be sure
3779 # we operate at the lowest level possible.
3794 # we operate at the lowest level possible.
3780 object.__setattr__(repo, '__class__', poisonedrepository)
3795 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,82 +1,87 b''
1 # requirements.py - objects and functions related to repository requirements
1 # requirements.py - objects and functions related to repository requirements
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 GENERALDELTA_REQUIREMENT = b'generaldelta'
10 GENERALDELTA_REQUIREMENT = b'generaldelta'
11 DOTENCODE_REQUIREMENT = b'dotencode'
11 DOTENCODE_REQUIREMENT = b'dotencode'
12 STORE_REQUIREMENT = b'store'
12 STORE_REQUIREMENT = b'store'
13 FNCACHE_REQUIREMENT = b'fncache'
13 FNCACHE_REQUIREMENT = b'fncache'
14
14
15 DIRSTATE_V2_REQUIREMENT = b'exp-dirstate-v2'
16
15 # When narrowing is finalized and no longer subject to format changes,
17 # When narrowing is finalized and no longer subject to format changes,
16 # we should move this to just "narrow" or similar.
18 # we should move this to just "narrow" or similar.
17 NARROW_REQUIREMENT = b'narrowhg-experimental'
19 NARROW_REQUIREMENT = b'narrowhg-experimental'
18
20
19 # Enables sparse working directory usage
21 # Enables sparse working directory usage
20 SPARSE_REQUIREMENT = b'exp-sparse'
22 SPARSE_REQUIREMENT = b'exp-sparse'
21
23
22 # Enables the internal phase which is used to hide changesets instead
24 # Enables the internal phase which is used to hide changesets instead
23 # of stripping them
25 # of stripping them
24 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
26 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
25
27
26 # Stores manifest in Tree structure
28 # Stores manifest in Tree structure
27 TREEMANIFEST_REQUIREMENT = b'treemanifest'
29 TREEMANIFEST_REQUIREMENT = b'treemanifest'
28
30
29 REVLOGV1_REQUIREMENT = b'revlogv1'
31 REVLOGV1_REQUIREMENT = b'revlogv1'
30
32
31 # Increment the sub-version when the revlog v2 format changes to lock out old
33 # Increment the sub-version when the revlog v2 format changes to lock out old
32 # clients.
34 # clients.
33 CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
35 CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
34
36
35 # Increment the sub-version when the revlog v2 format changes to lock out old
37 # Increment the sub-version when the revlog v2 format changes to lock out old
36 # clients.
38 # clients.
37 REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
39 REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
38
40
39 # A repository with the sparserevlog feature will have delta chains that
41 # A repository with the sparserevlog feature will have delta chains that
40 # can spread over a larger span. Sparse reading cuts these large spans into
42 # can spread over a larger span. Sparse reading cuts these large spans into
41 # pieces, so that each piece isn't too big.
43 # pieces, so that each piece isn't too big.
42 # Without the sparserevlog capability, reading from the repository could use
44 # Without the sparserevlog capability, reading from the repository could use
43 # huge amounts of memory, because the whole span would be read at once,
45 # huge amounts of memory, because the whole span would be read at once,
44 # including all the intermediate revisions that aren't pertinent for the chain.
46 # including all the intermediate revisions that aren't pertinent for the chain.
45 # This is why once a repository has enabled sparse-read, it becomes required.
47 # This is why once a repository has enabled sparse-read, it becomes required.
46 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
48 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
47
49
48 # A repository with the the copies-sidedata-changeset requirement will store
50 # A repository with the the copies-sidedata-changeset requirement will store
49 # copies related information in changeset's sidedata.
51 # copies related information in changeset's sidedata.
50 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
52 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
51
53
52 # The repository use persistent nodemap for the changelog and the manifest.
54 # The repository use persistent nodemap for the changelog and the manifest.
53 NODEMAP_REQUIREMENT = b'persistent-nodemap'
55 NODEMAP_REQUIREMENT = b'persistent-nodemap'
54
56
55 # Denotes that the current repository is a share
57 # Denotes that the current repository is a share
56 SHARED_REQUIREMENT = b'shared'
58 SHARED_REQUIREMENT = b'shared'
57
59
58 # Denotes that current repository is a share and the shared source path is
60 # Denotes that current repository is a share and the shared source path is
59 # relative to the current repository root path
61 # relative to the current repository root path
60 RELATIVE_SHARED_REQUIREMENT = b'relshared'
62 RELATIVE_SHARED_REQUIREMENT = b'relshared'
61
63
62 # A repository with share implemented safely. The repository has different
64 # A repository with share implemented safely. The repository has different
63 # store and working copy requirements i.e. both `.hg/requires` and
65 # store and working copy requirements i.e. both `.hg/requires` and
64 # `.hg/store/requires` are present.
66 # `.hg/store/requires` are present.
65 SHARESAFE_REQUIREMENT = b'share-safe'
67 SHARESAFE_REQUIREMENT = b'share-safe'
66
68
67 # List of requirements which are working directory specific
69 # List of requirements which are working directory specific
68 # These requirements cannot be shared between repositories if they
70 # These requirements cannot be shared between repositories if they
69 # share the same store
71 # share the same store
70 # * sparse is a working directory specific functionality and hence working
72 # * sparse is a working directory specific functionality and hence working
71 # directory specific requirement
73 # directory specific requirement
72 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
74 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
73 # represents that the current working copy/repository shares store of another
75 # represents that the current working copy/repository shares store of another
74 # repo. Hence both of them should be stored in working copy
76 # repo. Hence both of them should be stored in working copy
75 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
77 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
76 # the requirements are stored in store's requires
78 # the requirements are stored in store's requires
79 # * DIRSTATE_V2_REQUIREMENT affects .hg/dirstate, of which there is one per
80 # working directory.
77 WORKING_DIR_REQUIREMENTS = {
81 WORKING_DIR_REQUIREMENTS = {
78 SPARSE_REQUIREMENT,
82 SPARSE_REQUIREMENT,
79 SHARED_REQUIREMENT,
83 SHARED_REQUIREMENT,
80 RELATIVE_SHARED_REQUIREMENT,
84 RELATIVE_SHARED_REQUIREMENT,
81 SHARESAFE_REQUIREMENT,
85 SHARESAFE_REQUIREMENT,
86 DIRSTATE_V2_REQUIREMENT,
82 }
87 }
@@ -1,1051 +1,1052 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import (
11 from .. import (
12 error,
12 error,
13 localrepo,
13 localrepo,
14 pycompat,
14 pycompat,
15 requirements,
15 requirements,
16 revlog,
16 revlog,
17 util,
17 util,
18 )
18 )
19
19
20 from ..utils import compression
20 from ..utils import compression
21
21
22 if pycompat.TYPE_CHECKING:
22 if pycompat.TYPE_CHECKING:
23 from typing import (
23 from typing import (
24 List,
24 List,
25 Type,
25 Type,
26 )
26 )
27
27
28
28
29 # list of requirements that request a clone of all revlog if added/removed
29 # list of requirements that request a clone of all revlog if added/removed
30 RECLONES_REQUIREMENTS = {
30 RECLONES_REQUIREMENTS = {
31 requirements.GENERALDELTA_REQUIREMENT,
31 requirements.GENERALDELTA_REQUIREMENT,
32 requirements.SPARSEREVLOG_REQUIREMENT,
32 requirements.SPARSEREVLOG_REQUIREMENT,
33 requirements.REVLOGV2_REQUIREMENT,
33 requirements.REVLOGV2_REQUIREMENT,
34 requirements.CHANGELOGV2_REQUIREMENT,
34 requirements.CHANGELOGV2_REQUIREMENT,
35 }
35 }
36
36
37
37
38 def preservedrequirements(repo):
38 def preservedrequirements(repo):
39 return set()
39 return set()
40
40
41
41
42 FORMAT_VARIANT = b'deficiency'
42 FORMAT_VARIANT = b'deficiency'
43 OPTIMISATION = b'optimization'
43 OPTIMISATION = b'optimization'
44
44
45
45
46 class improvement(object):
46 class improvement(object):
47 """Represents an improvement that can be made as part of an upgrade.
47 """Represents an improvement that can be made as part of an upgrade.
48
48
49 The following attributes are defined on each instance:
49 The following attributes are defined on each instance:
50
50
51 name
51 name
52 Machine-readable string uniquely identifying this improvement. It
52 Machine-readable string uniquely identifying this improvement. It
53 will be mapped to an action later in the upgrade process.
53 will be mapped to an action later in the upgrade process.
54
54
55 type
55 type
56 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
56 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
57 A format variant is where we change the storage format. Not all format
57 A format variant is where we change the storage format. Not all format
58 variant changes are an obvious problem.
58 variant changes are an obvious problem.
59 An optimization is an action (sometimes optional) that
59 An optimization is an action (sometimes optional) that
60 can be taken to further improve the state of the repository.
60 can be taken to further improve the state of the repository.
61
61
62 description
62 description
63 Message intended for humans explaining the improvement in more detail,
63 Message intended for humans explaining the improvement in more detail,
64 including the implications of it. For ``FORMAT_VARIANT`` types, should be
64 including the implications of it. For ``FORMAT_VARIANT`` types, should be
65 worded in the present tense. For ``OPTIMISATION`` types, should be
65 worded in the present tense. For ``OPTIMISATION`` types, should be
66 worded in the future tense.
66 worded in the future tense.
67
67
68 upgrademessage
68 upgrademessage
69 Message intended for humans explaining what an upgrade addressing this
69 Message intended for humans explaining what an upgrade addressing this
70 issue will do. Should be worded in the future tense.
70 issue will do. Should be worded in the future tense.
71
71
72 postupgrademessage
72 postupgrademessage
73 Message intended for humans which will be shown post an upgrade
73 Message intended for humans which will be shown post an upgrade
74 operation when the improvement will be added
74 operation when the improvement will be added
75
75
76 postdowngrademessage
76 postdowngrademessage
77 Message intended for humans which will be shown post an upgrade
77 Message intended for humans which will be shown post an upgrade
78 operation in which this improvement was removed
78 operation in which this improvement was removed
79
79
80 touches_filelogs (bool)
80 touches_filelogs (bool)
81 Whether this improvement touches filelogs
81 Whether this improvement touches filelogs
82
82
83 touches_manifests (bool)
83 touches_manifests (bool)
84 Whether this improvement touches manifests
84 Whether this improvement touches manifests
85
85
86 touches_changelog (bool)
86 touches_changelog (bool)
87 Whether this improvement touches changelog
87 Whether this improvement touches changelog
88
88
89 touches_requirements (bool)
89 touches_requirements (bool)
90 Whether this improvement changes repository requirements
90 Whether this improvement changes repository requirements
91 """
91 """
92
92
93 def __init__(self, name, type, description, upgrademessage):
93 def __init__(self, name, type, description, upgrademessage):
94 self.name = name
94 self.name = name
95 self.type = type
95 self.type = type
96 self.description = description
96 self.description = description
97 self.upgrademessage = upgrademessage
97 self.upgrademessage = upgrademessage
98 self.postupgrademessage = None
98 self.postupgrademessage = None
99 self.postdowngrademessage = None
99 self.postdowngrademessage = None
100 # By default for now, we assume every improvement touches
100 # By default for now, we assume every improvement touches
101 # all the things
101 # all the things
102 self.touches_filelogs = True
102 self.touches_filelogs = True
103 self.touches_manifests = True
103 self.touches_manifests = True
104 self.touches_changelog = True
104 self.touches_changelog = True
105 self.touches_requirements = True
105 self.touches_requirements = True
106
106
107 def __eq__(self, other):
107 def __eq__(self, other):
108 if not isinstance(other, improvement):
108 if not isinstance(other, improvement):
109 # This is what python tell use to do
109 # This is what python tell use to do
110 return NotImplemented
110 return NotImplemented
111 return self.name == other.name
111 return self.name == other.name
112
112
113 def __ne__(self, other):
113 def __ne__(self, other):
114 return not (self == other)
114 return not (self == other)
115
115
116 def __hash__(self):
116 def __hash__(self):
117 return hash(self.name)
117 return hash(self.name)
118
118
119
119
120 allformatvariant = [] # type: List[Type['formatvariant']]
120 allformatvariant = [] # type: List[Type['formatvariant']]
121
121
122
122
123 def registerformatvariant(cls):
123 def registerformatvariant(cls):
124 allformatvariant.append(cls)
124 allformatvariant.append(cls)
125 return cls
125 return cls
126
126
127
127
128 class formatvariant(improvement):
128 class formatvariant(improvement):
129 """an improvement subclass dedicated to repository format"""
129 """an improvement subclass dedicated to repository format"""
130
130
131 type = FORMAT_VARIANT
131 type = FORMAT_VARIANT
132 ### The following attributes should be defined for each class:
132 ### The following attributes should be defined for each class:
133
133
134 # machine-readable string uniquely identifying this improvement. it will be
134 # machine-readable string uniquely identifying this improvement. it will be
135 # mapped to an action later in the upgrade process.
135 # mapped to an action later in the upgrade process.
136 name = None
136 name = None
137
137
138 # message intended for humans explaining the improvement in more detail,
138 # message intended for humans explaining the improvement in more detail,
139 # including the implications of it ``FORMAT_VARIANT`` types, should be
139 # including the implications of it ``FORMAT_VARIANT`` types, should be
140 # worded
140 # worded
141 # in the present tense.
141 # in the present tense.
142 description = None
142 description = None
143
143
144 # message intended for humans explaining what an upgrade addressing this
144 # message intended for humans explaining what an upgrade addressing this
145 # issue will do. should be worded in the future tense.
145 # issue will do. should be worded in the future tense.
146 upgrademessage = None
146 upgrademessage = None
147
147
148 # value of current Mercurial default for new repository
148 # value of current Mercurial default for new repository
149 default = None
149 default = None
150
150
151 # Message intended for humans which will be shown post an upgrade
151 # Message intended for humans which will be shown post an upgrade
152 # operation when the improvement will be added
152 # operation when the improvement will be added
153 postupgrademessage = None
153 postupgrademessage = None
154
154
155 # Message intended for humans which will be shown post an upgrade
155 # Message intended for humans which will be shown post an upgrade
156 # operation in which this improvement was removed
156 # operation in which this improvement was removed
157 postdowngrademessage = None
157 postdowngrademessage = None
158
158
159 # By default for now, we assume every improvement touches all the things
159 # By default for now, we assume every improvement touches all the things
160 touches_filelogs = True
160 touches_filelogs = True
161 touches_manifests = True
161 touches_manifests = True
162 touches_changelog = True
162 touches_changelog = True
163 touches_requirements = True
163 touches_requirements = True
164
164
165 def __init__(self):
165 def __init__(self):
166 raise NotImplementedError()
166 raise NotImplementedError()
167
167
168 @staticmethod
168 @staticmethod
169 def fromrepo(repo):
169 def fromrepo(repo):
170 """current value of the variant in the repository"""
170 """current value of the variant in the repository"""
171 raise NotImplementedError()
171 raise NotImplementedError()
172
172
173 @staticmethod
173 @staticmethod
174 def fromconfig(repo):
174 def fromconfig(repo):
175 """current value of the variant in the configuration"""
175 """current value of the variant in the configuration"""
176 raise NotImplementedError()
176 raise NotImplementedError()
177
177
178
178
179 class requirementformatvariant(formatvariant):
179 class requirementformatvariant(formatvariant):
180 """formatvariant based on a 'requirement' name.
180 """formatvariant based on a 'requirement' name.
181
181
182 Many format variant are controlled by a 'requirement'. We define a small
182 Many format variant are controlled by a 'requirement'. We define a small
183 subclass to factor the code.
183 subclass to factor the code.
184 """
184 """
185
185
186 # the requirement that control this format variant
186 # the requirement that control this format variant
187 _requirement = None
187 _requirement = None
188
188
189 @staticmethod
189 @staticmethod
190 def _newreporequirements(ui):
190 def _newreporequirements(ui):
191 return localrepo.newreporequirements(
191 return localrepo.newreporequirements(
192 ui, localrepo.defaultcreateopts(ui)
192 ui, localrepo.defaultcreateopts(ui)
193 )
193 )
194
194
195 @classmethod
195 @classmethod
196 def fromrepo(cls, repo):
196 def fromrepo(cls, repo):
197 assert cls._requirement is not None
197 assert cls._requirement is not None
198 return cls._requirement in repo.requirements
198 return cls._requirement in repo.requirements
199
199
200 @classmethod
200 @classmethod
201 def fromconfig(cls, repo):
201 def fromconfig(cls, repo):
202 assert cls._requirement is not None
202 assert cls._requirement is not None
203 return cls._requirement in cls._newreporequirements(repo.ui)
203 return cls._requirement in cls._newreporequirements(repo.ui)
204
204
205
205
206 @registerformatvariant
206 @registerformatvariant
207 class fncache(requirementformatvariant):
207 class fncache(requirementformatvariant):
208 name = b'fncache'
208 name = b'fncache'
209
209
210 _requirement = requirements.FNCACHE_REQUIREMENT
210 _requirement = requirements.FNCACHE_REQUIREMENT
211
211
212 default = True
212 default = True
213
213
214 description = _(
214 description = _(
215 b'long and reserved filenames may not work correctly; '
215 b'long and reserved filenames may not work correctly; '
216 b'repository performance is sub-optimal'
216 b'repository performance is sub-optimal'
217 )
217 )
218
218
219 upgrademessage = _(
219 upgrademessage = _(
220 b'repository will be more resilient to storing '
220 b'repository will be more resilient to storing '
221 b'certain paths and performance of certain '
221 b'certain paths and performance of certain '
222 b'operations should be improved'
222 b'operations should be improved'
223 )
223 )
224
224
225
225
226 @registerformatvariant
226 @registerformatvariant
227 class dotencode(requirementformatvariant):
227 class dotencode(requirementformatvariant):
228 name = b'dotencode'
228 name = b'dotencode'
229
229
230 _requirement = requirements.DOTENCODE_REQUIREMENT
230 _requirement = requirements.DOTENCODE_REQUIREMENT
231
231
232 default = True
232 default = True
233
233
234 description = _(
234 description = _(
235 b'storage of filenames beginning with a period or '
235 b'storage of filenames beginning with a period or '
236 b'space may not work correctly'
236 b'space may not work correctly'
237 )
237 )
238
238
239 upgrademessage = _(
239 upgrademessage = _(
240 b'repository will be better able to store files '
240 b'repository will be better able to store files '
241 b'beginning with a space or period'
241 b'beginning with a space or period'
242 )
242 )
243
243
244
244
245 @registerformatvariant
245 @registerformatvariant
246 class generaldelta(requirementformatvariant):
246 class generaldelta(requirementformatvariant):
247 name = b'generaldelta'
247 name = b'generaldelta'
248
248
249 _requirement = requirements.GENERALDELTA_REQUIREMENT
249 _requirement = requirements.GENERALDELTA_REQUIREMENT
250
250
251 default = True
251 default = True
252
252
253 description = _(
253 description = _(
254 b'deltas within internal storage are unable to '
254 b'deltas within internal storage are unable to '
255 b'choose optimal revisions; repository is larger and '
255 b'choose optimal revisions; repository is larger and '
256 b'slower than it could be; interaction with other '
256 b'slower than it could be; interaction with other '
257 b'repositories may require extra network and CPU '
257 b'repositories may require extra network and CPU '
258 b'resources, making "hg push" and "hg pull" slower'
258 b'resources, making "hg push" and "hg pull" slower'
259 )
259 )
260
260
261 upgrademessage = _(
261 upgrademessage = _(
262 b'repository storage will be able to create '
262 b'repository storage will be able to create '
263 b'optimal deltas; new repository data will be '
263 b'optimal deltas; new repository data will be '
264 b'smaller and read times should decrease; '
264 b'smaller and read times should decrease; '
265 b'interacting with other repositories using this '
265 b'interacting with other repositories using this '
266 b'storage model should require less network and '
266 b'storage model should require less network and '
267 b'CPU resources, making "hg push" and "hg pull" '
267 b'CPU resources, making "hg push" and "hg pull" '
268 b'faster'
268 b'faster'
269 )
269 )
270
270
271
271
272 @registerformatvariant
272 @registerformatvariant
273 class sharesafe(requirementformatvariant):
273 class sharesafe(requirementformatvariant):
274 name = b'share-safe'
274 name = b'share-safe'
275 _requirement = requirements.SHARESAFE_REQUIREMENT
275 _requirement = requirements.SHARESAFE_REQUIREMENT
276
276
277 default = False
277 default = False
278
278
279 description = _(
279 description = _(
280 b'old shared repositories do not share source repository '
280 b'old shared repositories do not share source repository '
281 b'requirements and config. This leads to various problems '
281 b'requirements and config. This leads to various problems '
282 b'when the source repository format is upgraded or some new '
282 b'when the source repository format is upgraded or some new '
283 b'extensions are enabled.'
283 b'extensions are enabled.'
284 )
284 )
285
285
286 upgrademessage = _(
286 upgrademessage = _(
287 b'Upgrades a repository to share-safe format so that future '
287 b'Upgrades a repository to share-safe format so that future '
288 b'shares of this repository share its requirements and configs.'
288 b'shares of this repository share its requirements and configs.'
289 )
289 )
290
290
291 postdowngrademessage = _(
291 postdowngrademessage = _(
292 b'repository downgraded to not use share safe mode, '
292 b'repository downgraded to not use share safe mode, '
293 b'existing shares will not work and needs to'
293 b'existing shares will not work and needs to'
294 b' be reshared.'
294 b' be reshared.'
295 )
295 )
296
296
297 postupgrademessage = _(
297 postupgrademessage = _(
298 b'repository upgraded to share safe mode, existing'
298 b'repository upgraded to share safe mode, existing'
299 b' shares will still work in old non-safe mode. '
299 b' shares will still work in old non-safe mode. '
300 b'Re-share existing shares to use them in safe mode'
300 b'Re-share existing shares to use them in safe mode'
301 b' New shares will be created in safe mode.'
301 b' New shares will be created in safe mode.'
302 )
302 )
303
303
304 # upgrade only needs to change the requirements
304 # upgrade only needs to change the requirements
305 touches_filelogs = False
305 touches_filelogs = False
306 touches_manifests = False
306 touches_manifests = False
307 touches_changelog = False
307 touches_changelog = False
308 touches_requirements = True
308 touches_requirements = True
309
309
310
310
311 @registerformatvariant
311 @registerformatvariant
312 class sparserevlog(requirementformatvariant):
312 class sparserevlog(requirementformatvariant):
313 name = b'sparserevlog'
313 name = b'sparserevlog'
314
314
315 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
315 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
316
316
317 default = True
317 default = True
318
318
319 description = _(
319 description = _(
320 b'in order to limit disk reading and memory usage on older '
320 b'in order to limit disk reading and memory usage on older '
321 b'version, the span of a delta chain from its root to its '
321 b'version, the span of a delta chain from its root to its '
322 b'end is limited, whatever the relevant data in this span. '
322 b'end is limited, whatever the relevant data in this span. '
323 b'This can severly limit Mercurial ability to build good '
323 b'This can severly limit Mercurial ability to build good '
324 b'chain of delta resulting is much more storage space being '
324 b'chain of delta resulting is much more storage space being '
325 b'taken and limit reusability of on disk delta during '
325 b'taken and limit reusability of on disk delta during '
326 b'exchange.'
326 b'exchange.'
327 )
327 )
328
328
329 upgrademessage = _(
329 upgrademessage = _(
330 b'Revlog supports delta chain with more unused data '
330 b'Revlog supports delta chain with more unused data '
331 b'between payload. These gaps will be skipped at read '
331 b'between payload. These gaps will be skipped at read '
332 b'time. This allows for better delta chains, making a '
332 b'time. This allows for better delta chains, making a '
333 b'better compression and faster exchange with server.'
333 b'better compression and faster exchange with server.'
334 )
334 )
335
335
336
336
337 @registerformatvariant
337 @registerformatvariant
338 class persistentnodemap(requirementformatvariant):
338 class persistentnodemap(requirementformatvariant):
339 name = b'persistent-nodemap'
339 name = b'persistent-nodemap'
340
340
341 _requirement = requirements.NODEMAP_REQUIREMENT
341 _requirement = requirements.NODEMAP_REQUIREMENT
342
342
343 default = False
343 default = False
344
344
345 description = _(
345 description = _(
346 b'persist the node -> rev mapping on disk to speedup lookup'
346 b'persist the node -> rev mapping on disk to speedup lookup'
347 )
347 )
348
348
349 upgrademessage = _(b'Speedup revision lookup by node id.')
349 upgrademessage = _(b'Speedup revision lookup by node id.')
350
350
351
351
352 @registerformatvariant
352 @registerformatvariant
353 class copiessdc(requirementformatvariant):
353 class copiessdc(requirementformatvariant):
354 name = b'copies-sdc'
354 name = b'copies-sdc'
355
355
356 _requirement = requirements.COPIESSDC_REQUIREMENT
356 _requirement = requirements.COPIESSDC_REQUIREMENT
357
357
358 default = False
358 default = False
359
359
360 description = _(b'Stores copies information alongside changesets.')
360 description = _(b'Stores copies information alongside changesets.')
361
361
362 upgrademessage = _(
362 upgrademessage = _(
363 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
363 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
364 )
364 )
365
365
366
366
367 @registerformatvariant
367 @registerformatvariant
368 class revlogv2(requirementformatvariant):
368 class revlogv2(requirementformatvariant):
369 name = b'revlog-v2'
369 name = b'revlog-v2'
370 _requirement = requirements.REVLOGV2_REQUIREMENT
370 _requirement = requirements.REVLOGV2_REQUIREMENT
371 default = False
371 default = False
372 description = _(b'Version 2 of the revlog.')
372 description = _(b'Version 2 of the revlog.')
373 upgrademessage = _(b'very experimental')
373 upgrademessage = _(b'very experimental')
374
374
375
375
376 @registerformatvariant
376 @registerformatvariant
377 class changelogv2(requirementformatvariant):
377 class changelogv2(requirementformatvariant):
378 name = b'changelog-v2'
378 name = b'changelog-v2'
379 _requirement = requirements.CHANGELOGV2_REQUIREMENT
379 _requirement = requirements.CHANGELOGV2_REQUIREMENT
380 default = False
380 default = False
381 description = _(b'An iteration of the revlog focussed on changelog needs.')
381 description = _(b'An iteration of the revlog focussed on changelog needs.')
382 upgrademessage = _(b'quite experimental')
382 upgrademessage = _(b'quite experimental')
383
383
384
384
385 @registerformatvariant
385 @registerformatvariant
386 class removecldeltachain(formatvariant):
386 class removecldeltachain(formatvariant):
387 name = b'plain-cl-delta'
387 name = b'plain-cl-delta'
388
388
389 default = True
389 default = True
390
390
391 description = _(
391 description = _(
392 b'changelog storage is using deltas instead of '
392 b'changelog storage is using deltas instead of '
393 b'raw entries; changelog reading and any '
393 b'raw entries; changelog reading and any '
394 b'operation relying on changelog data are slower '
394 b'operation relying on changelog data are slower '
395 b'than they could be'
395 b'than they could be'
396 )
396 )
397
397
398 upgrademessage = _(
398 upgrademessage = _(
399 b'changelog storage will be reformated to '
399 b'changelog storage will be reformated to '
400 b'store raw entries; changelog reading will be '
400 b'store raw entries; changelog reading will be '
401 b'faster; changelog size may be reduced'
401 b'faster; changelog size may be reduced'
402 )
402 )
403
403
404 @staticmethod
404 @staticmethod
405 def fromrepo(repo):
405 def fromrepo(repo):
406 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
406 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
407 # changelogs with deltas.
407 # changelogs with deltas.
408 cl = repo.changelog
408 cl = repo.changelog
409 chainbase = cl.chainbase
409 chainbase = cl.chainbase
410 return all(rev == chainbase(rev) for rev in cl)
410 return all(rev == chainbase(rev) for rev in cl)
411
411
412 @staticmethod
412 @staticmethod
413 def fromconfig(repo):
413 def fromconfig(repo):
414 return True
414 return True
415
415
416
416
417 _has_zstd = (
417 _has_zstd = (
418 b'zstd' in util.compengines
418 b'zstd' in util.compengines
419 and util.compengines[b'zstd'].available()
419 and util.compengines[b'zstd'].available()
420 and util.compengines[b'zstd'].revlogheader()
420 and util.compengines[b'zstd'].revlogheader()
421 )
421 )
422
422
423
423
424 @registerformatvariant
424 @registerformatvariant
425 class compressionengine(formatvariant):
425 class compressionengine(formatvariant):
426 name = b'compression'
426 name = b'compression'
427
427
428 if _has_zstd:
428 if _has_zstd:
429 default = b'zstd'
429 default = b'zstd'
430 else:
430 else:
431 default = b'zlib'
431 default = b'zlib'
432
432
433 description = _(
433 description = _(
434 b'Compresion algorithm used to compress data. '
434 b'Compresion algorithm used to compress data. '
435 b'Some engine are faster than other'
435 b'Some engine are faster than other'
436 )
436 )
437
437
438 upgrademessage = _(
438 upgrademessage = _(
439 b'revlog content will be recompressed with the new algorithm.'
439 b'revlog content will be recompressed with the new algorithm.'
440 )
440 )
441
441
442 @classmethod
442 @classmethod
443 def fromrepo(cls, repo):
443 def fromrepo(cls, repo):
444 # we allow multiple compression engine requirement to co-exist because
444 # we allow multiple compression engine requirement to co-exist because
445 # strickly speaking, revlog seems to support mixed compression style.
445 # strickly speaking, revlog seems to support mixed compression style.
446 #
446 #
447 # The compression used for new entries will be "the last one"
447 # The compression used for new entries will be "the last one"
448 compression = b'zlib'
448 compression = b'zlib'
449 for req in repo.requirements:
449 for req in repo.requirements:
450 prefix = req.startswith
450 prefix = req.startswith
451 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
451 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
452 compression = req.split(b'-', 2)[2]
452 compression = req.split(b'-', 2)[2]
453 return compression
453 return compression
454
454
455 @classmethod
455 @classmethod
456 def fromconfig(cls, repo):
456 def fromconfig(cls, repo):
457 compengines = repo.ui.configlist(b'format', b'revlog-compression')
457 compengines = repo.ui.configlist(b'format', b'revlog-compression')
458 # return the first valid value as the selection code would do
458 # return the first valid value as the selection code would do
459 for comp in compengines:
459 for comp in compengines:
460 if comp in util.compengines:
460 if comp in util.compengines:
461 e = util.compengines[comp]
461 e = util.compengines[comp]
462 if e.available() and e.revlogheader():
462 if e.available() and e.revlogheader():
463 return comp
463 return comp
464
464
465 # no valide compression found lets display it all for clarity
465 # no valide compression found lets display it all for clarity
466 return b','.join(compengines)
466 return b','.join(compengines)
467
467
468
468
469 @registerformatvariant
469 @registerformatvariant
470 class compressionlevel(formatvariant):
470 class compressionlevel(formatvariant):
471 name = b'compression-level'
471 name = b'compression-level'
472 default = b'default'
472 default = b'default'
473
473
474 description = _(b'compression level')
474 description = _(b'compression level')
475
475
476 upgrademessage = _(b'revlog content will be recompressed')
476 upgrademessage = _(b'revlog content will be recompressed')
477
477
478 @classmethod
478 @classmethod
479 def fromrepo(cls, repo):
479 def fromrepo(cls, repo):
480 comp = compressionengine.fromrepo(repo)
480 comp = compressionengine.fromrepo(repo)
481 level = None
481 level = None
482 if comp == b'zlib':
482 if comp == b'zlib':
483 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
483 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
484 elif comp == b'zstd':
484 elif comp == b'zstd':
485 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
485 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
486 if level is None:
486 if level is None:
487 return b'default'
487 return b'default'
488 return bytes(level)
488 return bytes(level)
489
489
490 @classmethod
490 @classmethod
491 def fromconfig(cls, repo):
491 def fromconfig(cls, repo):
492 comp = compressionengine.fromconfig(repo)
492 comp = compressionengine.fromconfig(repo)
493 level = None
493 level = None
494 if comp == b'zlib':
494 if comp == b'zlib':
495 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
495 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 elif comp == b'zstd':
496 elif comp == b'zstd':
497 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
497 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 if level is None:
498 if level is None:
499 return b'default'
499 return b'default'
500 return bytes(level)
500 return bytes(level)
501
501
502
502
503 def find_format_upgrades(repo):
503 def find_format_upgrades(repo):
504 """returns a list of format upgrades which can be perform on the repo"""
504 """returns a list of format upgrades which can be perform on the repo"""
505 upgrades = []
505 upgrades = []
506
506
507 # We could detect lack of revlogv1 and store here, but they were added
507 # We could detect lack of revlogv1 and store here, but they were added
508 # in 0.9.2 and we don't support upgrading repos without these
508 # in 0.9.2 and we don't support upgrading repos without these
509 # requirements, so let's not bother.
509 # requirements, so let's not bother.
510
510
511 for fv in allformatvariant:
511 for fv in allformatvariant:
512 if not fv.fromrepo(repo):
512 if not fv.fromrepo(repo):
513 upgrades.append(fv)
513 upgrades.append(fv)
514
514
515 return upgrades
515 return upgrades
516
516
517
517
518 def find_format_downgrades(repo):
518 def find_format_downgrades(repo):
519 """returns a list of format downgrades which will be performed on the repo
519 """returns a list of format downgrades which will be performed on the repo
520 because of disabled config option for them"""
520 because of disabled config option for them"""
521
521
522 downgrades = []
522 downgrades = []
523
523
524 for fv in allformatvariant:
524 for fv in allformatvariant:
525 if fv.name == b'compression':
525 if fv.name == b'compression':
526 # If there is a compression change between repository
526 # If there is a compression change between repository
527 # and config, destination repository compression will change
527 # and config, destination repository compression will change
528 # and current compression will be removed.
528 # and current compression will be removed.
529 if fv.fromrepo(repo) != fv.fromconfig(repo):
529 if fv.fromrepo(repo) != fv.fromconfig(repo):
530 downgrades.append(fv)
530 downgrades.append(fv)
531 continue
531 continue
532 # format variant exist in repo but does not exist in new repository
532 # format variant exist in repo but does not exist in new repository
533 # config
533 # config
534 if fv.fromrepo(repo) and not fv.fromconfig(repo):
534 if fv.fromrepo(repo) and not fv.fromconfig(repo):
535 downgrades.append(fv)
535 downgrades.append(fv)
536
536
537 return downgrades
537 return downgrades
538
538
539
539
540 ALL_OPTIMISATIONS = []
540 ALL_OPTIMISATIONS = []
541
541
542
542
543 def register_optimization(obj):
543 def register_optimization(obj):
544 ALL_OPTIMISATIONS.append(obj)
544 ALL_OPTIMISATIONS.append(obj)
545 return obj
545 return obj
546
546
547
547
548 register_optimization(
548 register_optimization(
549 improvement(
549 improvement(
550 name=b're-delta-parent',
550 name=b're-delta-parent',
551 type=OPTIMISATION,
551 type=OPTIMISATION,
552 description=_(
552 description=_(
553 b'deltas within internal storage will be recalculated to '
553 b'deltas within internal storage will be recalculated to '
554 b'choose an optimal base revision where this was not '
554 b'choose an optimal base revision where this was not '
555 b'already done; the size of the repository may shrink and '
555 b'already done; the size of the repository may shrink and '
556 b'various operations may become faster; the first time '
556 b'various operations may become faster; the first time '
557 b'this optimization is performed could slow down upgrade '
557 b'this optimization is performed could slow down upgrade '
558 b'execution considerably; subsequent invocations should '
558 b'execution considerably; subsequent invocations should '
559 b'not run noticeably slower'
559 b'not run noticeably slower'
560 ),
560 ),
561 upgrademessage=_(
561 upgrademessage=_(
562 b'deltas within internal storage will choose a new '
562 b'deltas within internal storage will choose a new '
563 b'base revision if needed'
563 b'base revision if needed'
564 ),
564 ),
565 )
565 )
566 )
566 )
567
567
568 register_optimization(
568 register_optimization(
569 improvement(
569 improvement(
570 name=b're-delta-multibase',
570 name=b're-delta-multibase',
571 type=OPTIMISATION,
571 type=OPTIMISATION,
572 description=_(
572 description=_(
573 b'deltas within internal storage will be recalculated '
573 b'deltas within internal storage will be recalculated '
574 b'against multiple base revision and the smallest '
574 b'against multiple base revision and the smallest '
575 b'difference will be used; the size of the repository may '
575 b'difference will be used; the size of the repository may '
576 b'shrink significantly when there are many merges; this '
576 b'shrink significantly when there are many merges; this '
577 b'optimization will slow down execution in proportion to '
577 b'optimization will slow down execution in proportion to '
578 b'the number of merges in the repository and the amount '
578 b'the number of merges in the repository and the amount '
579 b'of files in the repository; this slow down should not '
579 b'of files in the repository; this slow down should not '
580 b'be significant unless there are tens of thousands of '
580 b'be significant unless there are tens of thousands of '
581 b'files and thousands of merges'
581 b'files and thousands of merges'
582 ),
582 ),
583 upgrademessage=_(
583 upgrademessage=_(
584 b'deltas within internal storage will choose an '
584 b'deltas within internal storage will choose an '
585 b'optimal delta by computing deltas against multiple '
585 b'optimal delta by computing deltas against multiple '
586 b'parents; may slow down execution time '
586 b'parents; may slow down execution time '
587 b'significantly'
587 b'significantly'
588 ),
588 ),
589 )
589 )
590 )
590 )
591
591
592 register_optimization(
592 register_optimization(
593 improvement(
593 improvement(
594 name=b're-delta-all',
594 name=b're-delta-all',
595 type=OPTIMISATION,
595 type=OPTIMISATION,
596 description=_(
596 description=_(
597 b'deltas within internal storage will always be '
597 b'deltas within internal storage will always be '
598 b'recalculated without reusing prior deltas; this will '
598 b'recalculated without reusing prior deltas; this will '
599 b'likely make execution run several times slower; this '
599 b'likely make execution run several times slower; this '
600 b'optimization is typically not needed'
600 b'optimization is typically not needed'
601 ),
601 ),
602 upgrademessage=_(
602 upgrademessage=_(
603 b'deltas within internal storage will be fully '
603 b'deltas within internal storage will be fully '
604 b'recomputed; this will likely drastically slow down '
604 b'recomputed; this will likely drastically slow down '
605 b'execution time'
605 b'execution time'
606 ),
606 ),
607 )
607 )
608 )
608 )
609
609
610 register_optimization(
610 register_optimization(
611 improvement(
611 improvement(
612 name=b're-delta-fulladd',
612 name=b're-delta-fulladd',
613 type=OPTIMISATION,
613 type=OPTIMISATION,
614 description=_(
614 description=_(
615 b'every revision will be re-added as if it was new '
615 b'every revision will be re-added as if it was new '
616 b'content. It will go through the full storage '
616 b'content. It will go through the full storage '
617 b'mechanism giving extensions a chance to process it '
617 b'mechanism giving extensions a chance to process it '
618 b'(eg. lfs). This is similar to "re-delta-all" but even '
618 b'(eg. lfs). This is similar to "re-delta-all" but even '
619 b'slower since more logic is involved.'
619 b'slower since more logic is involved.'
620 ),
620 ),
621 upgrademessage=_(
621 upgrademessage=_(
622 b'each revision will be added as new content to the '
622 b'each revision will be added as new content to the '
623 b'internal storage; this will likely drastically slow '
623 b'internal storage; this will likely drastically slow '
624 b'down execution time, but some extensions might need '
624 b'down execution time, but some extensions might need '
625 b'it'
625 b'it'
626 ),
626 ),
627 )
627 )
628 )
628 )
629
629
630
630
631 def findoptimizations(repo):
631 def findoptimizations(repo):
632 """Determine optimisation that could be used during upgrade"""
632 """Determine optimisation that could be used during upgrade"""
633 # These are unconditionally added. There is logic later that figures out
633 # These are unconditionally added. There is logic later that figures out
634 # which ones to apply.
634 # which ones to apply.
635 return list(ALL_OPTIMISATIONS)
635 return list(ALL_OPTIMISATIONS)
636
636
637
637
638 def determine_upgrade_actions(
638 def determine_upgrade_actions(
639 repo, format_upgrades, optimizations, sourcereqs, destreqs
639 repo, format_upgrades, optimizations, sourcereqs, destreqs
640 ):
640 ):
641 """Determine upgrade actions that will be performed.
641 """Determine upgrade actions that will be performed.
642
642
643 Given a list of improvements as returned by ``find_format_upgrades`` and
643 Given a list of improvements as returned by ``find_format_upgrades`` and
644 ``findoptimizations``, determine the list of upgrade actions that
644 ``findoptimizations``, determine the list of upgrade actions that
645 will be performed.
645 will be performed.
646
646
647 The role of this function is to filter improvements if needed, apply
647 The role of this function is to filter improvements if needed, apply
648 recommended optimizations from the improvements list that make sense,
648 recommended optimizations from the improvements list that make sense,
649 etc.
649 etc.
650
650
651 Returns a list of action names.
651 Returns a list of action names.
652 """
652 """
653 newactions = []
653 newactions = []
654
654
655 for d in format_upgrades:
655 for d in format_upgrades:
656 name = d._requirement
656 name = d._requirement
657
657
658 # If the action is a requirement that doesn't show up in the
658 # If the action is a requirement that doesn't show up in the
659 # destination requirements, prune the action.
659 # destination requirements, prune the action.
660 if name is not None and name not in destreqs:
660 if name is not None and name not in destreqs:
661 continue
661 continue
662
662
663 newactions.append(d)
663 newactions.append(d)
664
664
665 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
665 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
666
666
667 # FUTURE consider adding some optimizations here for certain transitions.
667 # FUTURE consider adding some optimizations here for certain transitions.
668 # e.g. adding generaldelta could schedule parent redeltas.
668 # e.g. adding generaldelta could schedule parent redeltas.
669
669
670 return newactions
670 return newactions
671
671
672
672
673 class UpgradeOperation(object):
673 class UpgradeOperation(object):
674 """represent the work to be done during an upgrade"""
674 """represent the work to be done during an upgrade"""
675
675
676 def __init__(
676 def __init__(
677 self,
677 self,
678 ui,
678 ui,
679 new_requirements,
679 new_requirements,
680 current_requirements,
680 current_requirements,
681 upgrade_actions,
681 upgrade_actions,
682 removed_actions,
682 removed_actions,
683 revlogs_to_process,
683 revlogs_to_process,
684 backup_store,
684 backup_store,
685 ):
685 ):
686 self.ui = ui
686 self.ui = ui
687 self.new_requirements = new_requirements
687 self.new_requirements = new_requirements
688 self.current_requirements = current_requirements
688 self.current_requirements = current_requirements
689 # list of upgrade actions the operation will perform
689 # list of upgrade actions the operation will perform
690 self.upgrade_actions = upgrade_actions
690 self.upgrade_actions = upgrade_actions
691 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
691 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
692 self.removed_actions = removed_actions
692 self.removed_actions = removed_actions
693 self.revlogs_to_process = revlogs_to_process
693 self.revlogs_to_process = revlogs_to_process
694 # requirements which will be added by the operation
694 # requirements which will be added by the operation
695 self._added_requirements = (
695 self._added_requirements = (
696 self.new_requirements - self.current_requirements
696 self.new_requirements - self.current_requirements
697 )
697 )
698 # requirements which will be removed by the operation
698 # requirements which will be removed by the operation
699 self._removed_requirements = (
699 self._removed_requirements = (
700 self.current_requirements - self.new_requirements
700 self.current_requirements - self.new_requirements
701 )
701 )
702 # requirements which will be preserved by the operation
702 # requirements which will be preserved by the operation
703 self._preserved_requirements = (
703 self._preserved_requirements = (
704 self.current_requirements & self.new_requirements
704 self.current_requirements & self.new_requirements
705 )
705 )
706 # optimizations which are not used and it's recommended that they
706 # optimizations which are not used and it's recommended that they
707 # should use them
707 # should use them
708 all_optimizations = findoptimizations(None)
708 all_optimizations = findoptimizations(None)
709 self.unused_optimizations = [
709 self.unused_optimizations = [
710 i for i in all_optimizations if i not in self.upgrade_actions
710 i for i in all_optimizations if i not in self.upgrade_actions
711 ]
711 ]
712
712
713 # delta reuse mode of this upgrade operation
713 # delta reuse mode of this upgrade operation
714 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
714 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
715 if b're-delta-all' in self._upgrade_actions_names:
715 if b're-delta-all' in self._upgrade_actions_names:
716 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
716 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
717 elif b're-delta-parent' in self._upgrade_actions_names:
717 elif b're-delta-parent' in self._upgrade_actions_names:
718 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
718 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
719 elif b're-delta-multibase' in self._upgrade_actions_names:
719 elif b're-delta-multibase' in self._upgrade_actions_names:
720 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
720 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
721 elif b're-delta-fulladd' in self._upgrade_actions_names:
721 elif b're-delta-fulladd' in self._upgrade_actions_names:
722 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
722 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
723
723
724 # should this operation force re-delta of both parents
724 # should this operation force re-delta of both parents
725 self.force_re_delta_both_parents = (
725 self.force_re_delta_both_parents = (
726 b're-delta-multibase' in self._upgrade_actions_names
726 b're-delta-multibase' in self._upgrade_actions_names
727 )
727 )
728
728
729 # should this operation create a backup of the store
729 # should this operation create a backup of the store
730 self.backup_store = backup_store
730 self.backup_store = backup_store
731
731
732 # whether the operation touches different revlogs at all or not
732 # whether the operation touches different revlogs at all or not
733 self.touches_filelogs = self._touches_filelogs()
733 self.touches_filelogs = self._touches_filelogs()
734 self.touches_manifests = self._touches_manifests()
734 self.touches_manifests = self._touches_manifests()
735 self.touches_changelog = self._touches_changelog()
735 self.touches_changelog = self._touches_changelog()
736 # whether the operation touches requirements file or not
736 # whether the operation touches requirements file or not
737 self.touches_requirements = self._touches_requirements()
737 self.touches_requirements = self._touches_requirements()
738 self.touches_store = (
738 self.touches_store = (
739 self.touches_filelogs
739 self.touches_filelogs
740 or self.touches_manifests
740 or self.touches_manifests
741 or self.touches_changelog
741 or self.touches_changelog
742 )
742 )
743 # does the operation only touches repository requirement
743 # does the operation only touches repository requirement
744 self.requirements_only = (
744 self.requirements_only = (
745 self.touches_requirements and not self.touches_store
745 self.touches_requirements and not self.touches_store
746 )
746 )
747
747
748 def _touches_filelogs(self):
748 def _touches_filelogs(self):
749 for a in self.upgrade_actions:
749 for a in self.upgrade_actions:
750 # in optimisations, we re-process the revlogs again
750 # in optimisations, we re-process the revlogs again
751 if a.type == OPTIMISATION:
751 if a.type == OPTIMISATION:
752 return True
752 return True
753 elif a.touches_filelogs:
753 elif a.touches_filelogs:
754 return True
754 return True
755 for a in self.removed_actions:
755 for a in self.removed_actions:
756 if a.touches_filelogs:
756 if a.touches_filelogs:
757 return True
757 return True
758 return False
758 return False
759
759
760 def _touches_manifests(self):
760 def _touches_manifests(self):
761 for a in self.upgrade_actions:
761 for a in self.upgrade_actions:
762 # in optimisations, we re-process the revlogs again
762 # in optimisations, we re-process the revlogs again
763 if a.type == OPTIMISATION:
763 if a.type == OPTIMISATION:
764 return True
764 return True
765 elif a.touches_manifests:
765 elif a.touches_manifests:
766 return True
766 return True
767 for a in self.removed_actions:
767 for a in self.removed_actions:
768 if a.touches_manifests:
768 if a.touches_manifests:
769 return True
769 return True
770 return False
770 return False
771
771
772 def _touches_changelog(self):
772 def _touches_changelog(self):
773 for a in self.upgrade_actions:
773 for a in self.upgrade_actions:
774 # in optimisations, we re-process the revlogs again
774 # in optimisations, we re-process the revlogs again
775 if a.type == OPTIMISATION:
775 if a.type == OPTIMISATION:
776 return True
776 return True
777 elif a.touches_changelog:
777 elif a.touches_changelog:
778 return True
778 return True
779 for a in self.removed_actions:
779 for a in self.removed_actions:
780 if a.touches_changelog:
780 if a.touches_changelog:
781 return True
781 return True
782 return False
782 return False
783
783
784 def _touches_requirements(self):
784 def _touches_requirements(self):
785 for a in self.upgrade_actions:
785 for a in self.upgrade_actions:
786 # optimisations are used to re-process revlogs and does not result
786 # optimisations are used to re-process revlogs and does not result
787 # in a requirement being added or removed
787 # in a requirement being added or removed
788 if a.type == OPTIMISATION:
788 if a.type == OPTIMISATION:
789 pass
789 pass
790 elif a.touches_requirements:
790 elif a.touches_requirements:
791 return True
791 return True
792 for a in self.removed_actions:
792 for a in self.removed_actions:
793 if a.touches_requirements:
793 if a.touches_requirements:
794 return True
794 return True
795
795
796 return False
796 return False
797
797
798 def _write_labeled(self, l, label):
798 def _write_labeled(self, l, label):
799 """
799 """
800 Utility function to aid writing of a list under one label
800 Utility function to aid writing of a list under one label
801 """
801 """
802 first = True
802 first = True
803 for r in sorted(l):
803 for r in sorted(l):
804 if not first:
804 if not first:
805 self.ui.write(b', ')
805 self.ui.write(b', ')
806 self.ui.write(r, label=label)
806 self.ui.write(r, label=label)
807 first = False
807 first = False
808
808
809 def print_requirements(self):
809 def print_requirements(self):
810 self.ui.write(_(b'requirements\n'))
810 self.ui.write(_(b'requirements\n'))
811 self.ui.write(_(b' preserved: '))
811 self.ui.write(_(b' preserved: '))
812 self._write_labeled(
812 self._write_labeled(
813 self._preserved_requirements, "upgrade-repo.requirement.preserved"
813 self._preserved_requirements, "upgrade-repo.requirement.preserved"
814 )
814 )
815 self.ui.write((b'\n'))
815 self.ui.write((b'\n'))
816 if self._removed_requirements:
816 if self._removed_requirements:
817 self.ui.write(_(b' removed: '))
817 self.ui.write(_(b' removed: '))
818 self._write_labeled(
818 self._write_labeled(
819 self._removed_requirements, "upgrade-repo.requirement.removed"
819 self._removed_requirements, "upgrade-repo.requirement.removed"
820 )
820 )
821 self.ui.write((b'\n'))
821 self.ui.write((b'\n'))
822 if self._added_requirements:
822 if self._added_requirements:
823 self.ui.write(_(b' added: '))
823 self.ui.write(_(b' added: '))
824 self._write_labeled(
824 self._write_labeled(
825 self._added_requirements, "upgrade-repo.requirement.added"
825 self._added_requirements, "upgrade-repo.requirement.added"
826 )
826 )
827 self.ui.write((b'\n'))
827 self.ui.write((b'\n'))
828 self.ui.write(b'\n')
828 self.ui.write(b'\n')
829
829
830 def print_optimisations(self):
830 def print_optimisations(self):
831 optimisations = [
831 optimisations = [
832 a for a in self.upgrade_actions if a.type == OPTIMISATION
832 a for a in self.upgrade_actions if a.type == OPTIMISATION
833 ]
833 ]
834 optimisations.sort(key=lambda a: a.name)
834 optimisations.sort(key=lambda a: a.name)
835 if optimisations:
835 if optimisations:
836 self.ui.write(_(b'optimisations: '))
836 self.ui.write(_(b'optimisations: '))
837 self._write_labeled(
837 self._write_labeled(
838 [a.name for a in optimisations],
838 [a.name for a in optimisations],
839 "upgrade-repo.optimisation.performed",
839 "upgrade-repo.optimisation.performed",
840 )
840 )
841 self.ui.write(b'\n\n')
841 self.ui.write(b'\n\n')
842
842
843 def print_upgrade_actions(self):
843 def print_upgrade_actions(self):
844 for a in self.upgrade_actions:
844 for a in self.upgrade_actions:
845 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
845 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
846
846
847 def print_affected_revlogs(self):
847 def print_affected_revlogs(self):
848 if not self.revlogs_to_process:
848 if not self.revlogs_to_process:
849 self.ui.write((b'no revlogs to process\n'))
849 self.ui.write((b'no revlogs to process\n'))
850 else:
850 else:
851 self.ui.write((b'processed revlogs:\n'))
851 self.ui.write((b'processed revlogs:\n'))
852 for r in sorted(self.revlogs_to_process):
852 for r in sorted(self.revlogs_to_process):
853 self.ui.write((b' - %s\n' % r))
853 self.ui.write((b' - %s\n' % r))
854 self.ui.write((b'\n'))
854 self.ui.write((b'\n'))
855
855
856 def print_unused_optimizations(self):
856 def print_unused_optimizations(self):
857 for i in self.unused_optimizations:
857 for i in self.unused_optimizations:
858 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
858 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
859
859
860 def has_upgrade_action(self, name):
860 def has_upgrade_action(self, name):
861 """Check whether the upgrade operation will perform this action"""
861 """Check whether the upgrade operation will perform this action"""
862 return name in self._upgrade_actions_names
862 return name in self._upgrade_actions_names
863
863
864 def print_post_op_messages(self):
864 def print_post_op_messages(self):
865 """print post upgrade operation warning messages"""
865 """print post upgrade operation warning messages"""
866 for a in self.upgrade_actions:
866 for a in self.upgrade_actions:
867 if a.postupgrademessage is not None:
867 if a.postupgrademessage is not None:
868 self.ui.warn(b'%s\n' % a.postupgrademessage)
868 self.ui.warn(b'%s\n' % a.postupgrademessage)
869 for a in self.removed_actions:
869 for a in self.removed_actions:
870 if a.postdowngrademessage is not None:
870 if a.postdowngrademessage is not None:
871 self.ui.warn(b'%s\n' % a.postdowngrademessage)
871 self.ui.warn(b'%s\n' % a.postdowngrademessage)
872
872
873
873
874 ### Code checking if a repository can got through the upgrade process at all. #
874 ### Code checking if a repository can got through the upgrade process at all. #
875
875
876
876
877 def requiredsourcerequirements(repo):
877 def requiredsourcerequirements(repo):
878 """Obtain requirements required to be present to upgrade a repo.
878 """Obtain requirements required to be present to upgrade a repo.
879
879
880 An upgrade will not be allowed if the repository doesn't have the
880 An upgrade will not be allowed if the repository doesn't have the
881 requirements returned by this function.
881 requirements returned by this function.
882 """
882 """
883 return {
883 return {
884 # Introduced in Mercurial 0.9.2.
884 # Introduced in Mercurial 0.9.2.
885 requirements.STORE_REQUIREMENT,
885 requirements.STORE_REQUIREMENT,
886 }
886 }
887
887
888
888
889 def blocksourcerequirements(repo):
889 def blocksourcerequirements(repo):
890 """Obtain requirements that will prevent an upgrade from occurring.
890 """Obtain requirements that will prevent an upgrade from occurring.
891
891
892 An upgrade cannot be performed if the source repository contains a
892 An upgrade cannot be performed if the source repository contains a
893 requirements in the returned set.
893 requirements in the returned set.
894 """
894 """
895 return {
895 return {
896 # The upgrade code does not yet support these experimental features.
896 # The upgrade code does not yet support these experimental features.
897 # This is an artificial limitation.
897 # This is an artificial limitation.
898 requirements.TREEMANIFEST_REQUIREMENT,
898 requirements.TREEMANIFEST_REQUIREMENT,
899 # This was a precursor to generaldelta and was never enabled by default.
899 # This was a precursor to generaldelta and was never enabled by default.
900 # It should (hopefully) not exist in the wild.
900 # It should (hopefully) not exist in the wild.
901 b'parentdelta',
901 b'parentdelta',
902 # Upgrade should operate on the actual store, not the shared link.
902 # Upgrade should operate on the actual store, not the shared link.
903 requirements.SHARED_REQUIREMENT,
903 requirements.SHARED_REQUIREMENT,
904 }
904 }
905
905
906
906
907 def check_revlog_version(reqs):
907 def check_revlog_version(reqs):
908 """Check that the requirements contain at least one Revlog version"""
908 """Check that the requirements contain at least one Revlog version"""
909 all_revlogs = {
909 all_revlogs = {
910 requirements.REVLOGV1_REQUIREMENT,
910 requirements.REVLOGV1_REQUIREMENT,
911 requirements.REVLOGV2_REQUIREMENT,
911 requirements.REVLOGV2_REQUIREMENT,
912 }
912 }
913 if not all_revlogs.intersection(reqs):
913 if not all_revlogs.intersection(reqs):
914 msg = _(b'cannot upgrade repository; missing a revlog version')
914 msg = _(b'cannot upgrade repository; missing a revlog version')
915 raise error.Abort(msg)
915 raise error.Abort(msg)
916
916
917
917
918 def check_source_requirements(repo):
918 def check_source_requirements(repo):
919 """Ensure that no existing requirements prevent the repository upgrade"""
919 """Ensure that no existing requirements prevent the repository upgrade"""
920
920
921 check_revlog_version(repo.requirements)
921 check_revlog_version(repo.requirements)
922 required = requiredsourcerequirements(repo)
922 required = requiredsourcerequirements(repo)
923 missingreqs = required - repo.requirements
923 missingreqs = required - repo.requirements
924 if missingreqs:
924 if missingreqs:
925 msg = _(b'cannot upgrade repository; requirement missing: %s')
925 msg = _(b'cannot upgrade repository; requirement missing: %s')
926 missingreqs = b', '.join(sorted(missingreqs))
926 missingreqs = b', '.join(sorted(missingreqs))
927 raise error.Abort(msg % missingreqs)
927 raise error.Abort(msg % missingreqs)
928
928
929 blocking = blocksourcerequirements(repo)
929 blocking = blocksourcerequirements(repo)
930 blockingreqs = blocking & repo.requirements
930 blockingreqs = blocking & repo.requirements
931 if blockingreqs:
931 if blockingreqs:
932 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
932 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
933 blockingreqs = b', '.join(sorted(blockingreqs))
933 blockingreqs = b', '.join(sorted(blockingreqs))
934 raise error.Abort(m % blockingreqs)
934 raise error.Abort(m % blockingreqs)
935
935
936
936
937 ### Verify the validity of the planned requirement changes ####################
937 ### Verify the validity of the planned requirement changes ####################
938
938
939
939
940 def supportremovedrequirements(repo):
940 def supportremovedrequirements(repo):
941 """Obtain requirements that can be removed during an upgrade.
941 """Obtain requirements that can be removed during an upgrade.
942
942
943 If an upgrade were to create a repository that dropped a requirement,
943 If an upgrade were to create a repository that dropped a requirement,
944 the dropped requirement must appear in the returned set for the upgrade
944 the dropped requirement must appear in the returned set for the upgrade
945 to be allowed.
945 to be allowed.
946 """
946 """
947 supported = {
947 supported = {
948 requirements.SPARSEREVLOG_REQUIREMENT,
948 requirements.SPARSEREVLOG_REQUIREMENT,
949 requirements.COPIESSDC_REQUIREMENT,
949 requirements.COPIESSDC_REQUIREMENT,
950 requirements.NODEMAP_REQUIREMENT,
950 requirements.NODEMAP_REQUIREMENT,
951 requirements.SHARESAFE_REQUIREMENT,
951 requirements.SHARESAFE_REQUIREMENT,
952 requirements.REVLOGV2_REQUIREMENT,
952 requirements.REVLOGV2_REQUIREMENT,
953 requirements.CHANGELOGV2_REQUIREMENT,
953 requirements.CHANGELOGV2_REQUIREMENT,
954 requirements.REVLOGV1_REQUIREMENT,
954 requirements.REVLOGV1_REQUIREMENT,
955 }
955 }
956 for name in compression.compengines:
956 for name in compression.compengines:
957 engine = compression.compengines[name]
957 engine = compression.compengines[name]
958 if engine.available() and engine.revlogheader():
958 if engine.available() and engine.revlogheader():
959 supported.add(b'exp-compression-%s' % name)
959 supported.add(b'exp-compression-%s' % name)
960 if engine.name() == b'zstd':
960 if engine.name() == b'zstd':
961 supported.add(b'revlog-compression-zstd')
961 supported.add(b'revlog-compression-zstd')
962 return supported
962 return supported
963
963
964
964
965 def supporteddestrequirements(repo):
965 def supporteddestrequirements(repo):
966 """Obtain requirements that upgrade supports in the destination.
966 """Obtain requirements that upgrade supports in the destination.
967
967
968 If the result of the upgrade would create requirements not in this set,
968 If the result of the upgrade would create requirements not in this set,
969 the upgrade is disallowed.
969 the upgrade is disallowed.
970
970
971 Extensions should monkeypatch this to add their custom requirements.
971 Extensions should monkeypatch this to add their custom requirements.
972 """
972 """
973 supported = {
973 supported = {
974 requirements.DOTENCODE_REQUIREMENT,
974 requirements.DOTENCODE_REQUIREMENT,
975 requirements.FNCACHE_REQUIREMENT,
975 requirements.FNCACHE_REQUIREMENT,
976 requirements.GENERALDELTA_REQUIREMENT,
976 requirements.GENERALDELTA_REQUIREMENT,
977 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
977 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
978 requirements.STORE_REQUIREMENT,
978 requirements.STORE_REQUIREMENT,
979 requirements.SPARSEREVLOG_REQUIREMENT,
979 requirements.SPARSEREVLOG_REQUIREMENT,
980 requirements.COPIESSDC_REQUIREMENT,
980 requirements.COPIESSDC_REQUIREMENT,
981 requirements.NODEMAP_REQUIREMENT,
981 requirements.NODEMAP_REQUIREMENT,
982 requirements.SHARESAFE_REQUIREMENT,
982 requirements.SHARESAFE_REQUIREMENT,
983 requirements.REVLOGV2_REQUIREMENT,
983 requirements.REVLOGV2_REQUIREMENT,
984 requirements.CHANGELOGV2_REQUIREMENT,
984 requirements.CHANGELOGV2_REQUIREMENT,
985 requirements.DIRSTATE_V2_REQUIREMENT,
985 }
986 }
986 for name in compression.compengines:
987 for name in compression.compengines:
987 engine = compression.compengines[name]
988 engine = compression.compengines[name]
988 if engine.available() and engine.revlogheader():
989 if engine.available() and engine.revlogheader():
989 supported.add(b'exp-compression-%s' % name)
990 supported.add(b'exp-compression-%s' % name)
990 if engine.name() == b'zstd':
991 if engine.name() == b'zstd':
991 supported.add(b'revlog-compression-zstd')
992 supported.add(b'revlog-compression-zstd')
992 return supported
993 return supported
993
994
994
995
995 def allowednewrequirements(repo):
996 def allowednewrequirements(repo):
996 """Obtain requirements that can be added to a repository during upgrade.
997 """Obtain requirements that can be added to a repository during upgrade.
997
998
998 This is used to disallow proposed requirements from being added when
999 This is used to disallow proposed requirements from being added when
999 they weren't present before.
1000 they weren't present before.
1000
1001
1001 We use a list of allowed requirement additions instead of a list of known
1002 We use a list of allowed requirement additions instead of a list of known
1002 bad additions because the whitelist approach is safer and will prevent
1003 bad additions because the whitelist approach is safer and will prevent
1003 future, unknown requirements from accidentally being added.
1004 future, unknown requirements from accidentally being added.
1004 """
1005 """
1005 supported = {
1006 supported = {
1006 requirements.DOTENCODE_REQUIREMENT,
1007 requirements.DOTENCODE_REQUIREMENT,
1007 requirements.FNCACHE_REQUIREMENT,
1008 requirements.FNCACHE_REQUIREMENT,
1008 requirements.GENERALDELTA_REQUIREMENT,
1009 requirements.GENERALDELTA_REQUIREMENT,
1009 requirements.SPARSEREVLOG_REQUIREMENT,
1010 requirements.SPARSEREVLOG_REQUIREMENT,
1010 requirements.COPIESSDC_REQUIREMENT,
1011 requirements.COPIESSDC_REQUIREMENT,
1011 requirements.NODEMAP_REQUIREMENT,
1012 requirements.NODEMAP_REQUIREMENT,
1012 requirements.SHARESAFE_REQUIREMENT,
1013 requirements.SHARESAFE_REQUIREMENT,
1013 requirements.REVLOGV1_REQUIREMENT,
1014 requirements.REVLOGV1_REQUIREMENT,
1014 requirements.REVLOGV2_REQUIREMENT,
1015 requirements.REVLOGV2_REQUIREMENT,
1015 requirements.CHANGELOGV2_REQUIREMENT,
1016 requirements.CHANGELOGV2_REQUIREMENT,
1016 }
1017 }
1017 for name in compression.compengines:
1018 for name in compression.compengines:
1018 engine = compression.compengines[name]
1019 engine = compression.compengines[name]
1019 if engine.available() and engine.revlogheader():
1020 if engine.available() and engine.revlogheader():
1020 supported.add(b'exp-compression-%s' % name)
1021 supported.add(b'exp-compression-%s' % name)
1021 if engine.name() == b'zstd':
1022 if engine.name() == b'zstd':
1022 supported.add(b'revlog-compression-zstd')
1023 supported.add(b'revlog-compression-zstd')
1023 return supported
1024 return supported
1024
1025
1025
1026
1026 def check_requirements_changes(repo, new_reqs):
1027 def check_requirements_changes(repo, new_reqs):
1027 old_reqs = repo.requirements
1028 old_reqs = repo.requirements
1028 check_revlog_version(repo.requirements)
1029 check_revlog_version(repo.requirements)
1029 support_removal = supportremovedrequirements(repo)
1030 support_removal = supportremovedrequirements(repo)
1030 no_remove_reqs = old_reqs - new_reqs - support_removal
1031 no_remove_reqs = old_reqs - new_reqs - support_removal
1031 if no_remove_reqs:
1032 if no_remove_reqs:
1032 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1033 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1033 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1034 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1034 raise error.Abort(msg % no_remove_reqs)
1035 raise error.Abort(msg % no_remove_reqs)
1035
1036
1036 support_addition = allowednewrequirements(repo)
1037 support_addition = allowednewrequirements(repo)
1037 no_add_reqs = new_reqs - old_reqs - support_addition
1038 no_add_reqs = new_reqs - old_reqs - support_addition
1038 if no_add_reqs:
1039 if no_add_reqs:
1039 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1040 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1040 no_add_reqs = b', '.join(sorted(no_add_reqs))
1041 no_add_reqs = b', '.join(sorted(no_add_reqs))
1041 raise error.Abort(m + no_add_reqs)
1042 raise error.Abort(m + no_add_reqs)
1042
1043
1043 supported = supporteddestrequirements(repo)
1044 supported = supporteddestrequirements(repo)
1044 unsupported_reqs = new_reqs - supported
1045 unsupported_reqs = new_reqs - supported
1045 if unsupported_reqs:
1046 if unsupported_reqs:
1046 msg = _(
1047 msg = _(
1047 b'cannot upgrade repository; do not support destination '
1048 b'cannot upgrade repository; do not support destination '
1048 b'requirement: %s'
1049 b'requirement: %s'
1049 )
1050 )
1050 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1051 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1051 raise error.Abort(msg % unsupported_reqs)
1052 raise error.Abort(msg % unsupported_reqs)
General Comments 0
You need to be logged in to leave comments. Login now