##// END OF EJS Templates
sidedatacopies: add a new requirement for storing copies into sidedata...
marmoute -
r43407:81efc4a2 default
parent child Browse files
Show More
@@ -1,1534 +1,1540 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section, configprefix + b'nodates', default=False,
136 section, configprefix + b'nodates', default=False,
137 )
137 )
138 coreconfigitem(
138 coreconfigitem(
139 section, configprefix + b'showfunc', default=False,
139 section, configprefix + b'showfunc', default=False,
140 )
140 )
141 coreconfigitem(
141 coreconfigitem(
142 section, configprefix + b'unified', default=None,
142 section, configprefix + b'unified', default=None,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section, configprefix + b'git', default=False,
145 section, configprefix + b'git', default=False,
146 )
146 )
147 coreconfigitem(
147 coreconfigitem(
148 section, configprefix + b'ignorews', default=False,
148 section, configprefix + b'ignorews', default=False,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section, configprefix + b'ignorewsamount', default=False,
151 section, configprefix + b'ignorewsamount', default=False,
152 )
152 )
153 coreconfigitem(
153 coreconfigitem(
154 section, configprefix + b'ignoreblanklines', default=False,
154 section, configprefix + b'ignoreblanklines', default=False,
155 )
155 )
156 coreconfigitem(
156 coreconfigitem(
157 section, configprefix + b'ignorewseol', default=False,
157 section, configprefix + b'ignorewseol', default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section, configprefix + b'nobinary', default=False,
160 section, configprefix + b'nobinary', default=False,
161 )
161 )
162 coreconfigitem(
162 coreconfigitem(
163 section, configprefix + b'noprefix', default=False,
163 section, configprefix + b'noprefix', default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section, configprefix + b'word-diff', default=False,
166 section, configprefix + b'word-diff', default=False,
167 )
167 )
168
168
169
169
170 coreconfigitem(
170 coreconfigitem(
171 b'alias', b'.*', default=dynamicdefault, generic=True,
171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 )
172 )
173 coreconfigitem(
173 coreconfigitem(
174 b'auth', b'cookiefile', default=None,
174 b'auth', b'cookiefile', default=None,
175 )
175 )
176 _registerdiffopts(section=b'annotate')
176 _registerdiffopts(section=b'annotate')
177 # bookmarks.pushing: internal hack for discovery
177 # bookmarks.pushing: internal hack for discovery
178 coreconfigitem(
178 coreconfigitem(
179 b'bookmarks', b'pushing', default=list,
179 b'bookmarks', b'pushing', default=list,
180 )
180 )
181 # bundle.mainreporoot: internal hack for bundlerepo
181 # bundle.mainreporoot: internal hack for bundlerepo
182 coreconfigitem(
182 coreconfigitem(
183 b'bundle', b'mainreporoot', default=b'',
183 b'bundle', b'mainreporoot', default=b'',
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 b'censor', b'policy', default=b'abort', experimental=True,
186 b'censor', b'policy', default=b'abort', experimental=True,
187 )
187 )
188 coreconfigitem(
188 coreconfigitem(
189 b'chgserver', b'idletimeout', default=3600,
189 b'chgserver', b'idletimeout', default=3600,
190 )
190 )
191 coreconfigitem(
191 coreconfigitem(
192 b'chgserver', b'skiphash', default=False,
192 b'chgserver', b'skiphash', default=False,
193 )
193 )
194 coreconfigitem(
194 coreconfigitem(
195 b'cmdserver', b'log', default=None,
195 b'cmdserver', b'log', default=None,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'cmdserver', b'max-log-files', default=7,
198 b'cmdserver', b'max-log-files', default=7,
199 )
199 )
200 coreconfigitem(
200 coreconfigitem(
201 b'cmdserver', b'max-log-size', default=b'1 MB',
201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 )
202 )
203 coreconfigitem(
203 coreconfigitem(
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 )
205 )
206 coreconfigitem(
206 coreconfigitem(
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 )
208 )
209 coreconfigitem(
209 coreconfigitem(
210 b'cmdserver',
210 b'cmdserver',
211 b'track-log',
211 b'track-log',
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 )
213 )
214 coreconfigitem(
214 coreconfigitem(
215 b'color', b'.*', default=None, generic=True,
215 b'color', b'.*', default=None, generic=True,
216 )
216 )
217 coreconfigitem(
217 coreconfigitem(
218 b'color', b'mode', default=b'auto',
218 b'color', b'mode', default=b'auto',
219 )
219 )
220 coreconfigitem(
220 coreconfigitem(
221 b'color', b'pagermode', default=dynamicdefault,
221 b'color', b'pagermode', default=dynamicdefault,
222 )
222 )
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 coreconfigitem(
224 coreconfigitem(
225 b'commands', b'commit.post-status', default=False,
225 b'commands', b'commit.post-status', default=False,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'commands', b'grep.all-files', default=False, experimental=True,
228 b'commands', b'grep.all-files', default=False, experimental=True,
229 )
229 )
230 coreconfigitem(
230 coreconfigitem(
231 b'commands', b'resolve.confirm', default=False,
231 b'commands', b'resolve.confirm', default=False,
232 )
232 )
233 coreconfigitem(
233 coreconfigitem(
234 b'commands', b'resolve.explicit-re-merge', default=False,
234 b'commands', b'resolve.explicit-re-merge', default=False,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'commands', b'resolve.mark-check', default=b'none',
237 b'commands', b'resolve.mark-check', default=b'none',
238 )
238 )
239 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
239 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
240 coreconfigitem(
240 coreconfigitem(
241 b'commands', b'show.aliasprefix', default=list,
241 b'commands', b'show.aliasprefix', default=list,
242 )
242 )
243 coreconfigitem(
243 coreconfigitem(
244 b'commands', b'status.relative', default=False,
244 b'commands', b'status.relative', default=False,
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'commands', b'status.skipstates', default=[], experimental=True,
247 b'commands', b'status.skipstates', default=[], experimental=True,
248 )
248 )
249 coreconfigitem(
249 coreconfigitem(
250 b'commands', b'status.terse', default=b'',
250 b'commands', b'status.terse', default=b'',
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'commands', b'status.verbose', default=False,
253 b'commands', b'status.verbose', default=False,
254 )
254 )
255 coreconfigitem(
255 coreconfigitem(
256 b'commands', b'update.check', default=None,
256 b'commands', b'update.check', default=None,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'commands', b'update.requiredest', default=False,
259 b'commands', b'update.requiredest', default=False,
260 )
260 )
261 coreconfigitem(
261 coreconfigitem(
262 b'committemplate', b'.*', default=None, generic=True,
262 b'committemplate', b'.*', default=None, generic=True,
263 )
263 )
264 coreconfigitem(
264 coreconfigitem(
265 b'convert', b'bzr.saverev', default=True,
265 b'convert', b'bzr.saverev', default=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'convert', b'cvsps.cache', default=True,
268 b'convert', b'cvsps.cache', default=True,
269 )
269 )
270 coreconfigitem(
270 coreconfigitem(
271 b'convert', b'cvsps.fuzz', default=60,
271 b'convert', b'cvsps.fuzz', default=60,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'convert', b'cvsps.logencoding', default=None,
274 b'convert', b'cvsps.logencoding', default=None,
275 )
275 )
276 coreconfigitem(
276 coreconfigitem(
277 b'convert', b'cvsps.mergefrom', default=None,
277 b'convert', b'cvsps.mergefrom', default=None,
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'convert', b'cvsps.mergeto', default=None,
280 b'convert', b'cvsps.mergeto', default=None,
281 )
281 )
282 coreconfigitem(
282 coreconfigitem(
283 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
283 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
284 )
284 )
285 coreconfigitem(
285 coreconfigitem(
286 b'convert', b'git.extrakeys', default=list,
286 b'convert', b'git.extrakeys', default=list,
287 )
287 )
288 coreconfigitem(
288 coreconfigitem(
289 b'convert', b'git.findcopiesharder', default=False,
289 b'convert', b'git.findcopiesharder', default=False,
290 )
290 )
291 coreconfigitem(
291 coreconfigitem(
292 b'convert', b'git.remoteprefix', default=b'remote',
292 b'convert', b'git.remoteprefix', default=b'remote',
293 )
293 )
294 coreconfigitem(
294 coreconfigitem(
295 b'convert', b'git.renamelimit', default=400,
295 b'convert', b'git.renamelimit', default=400,
296 )
296 )
297 coreconfigitem(
297 coreconfigitem(
298 b'convert', b'git.saverev', default=True,
298 b'convert', b'git.saverev', default=True,
299 )
299 )
300 coreconfigitem(
300 coreconfigitem(
301 b'convert', b'git.similarity', default=50,
301 b'convert', b'git.similarity', default=50,
302 )
302 )
303 coreconfigitem(
303 coreconfigitem(
304 b'convert', b'git.skipsubmodules', default=False,
304 b'convert', b'git.skipsubmodules', default=False,
305 )
305 )
306 coreconfigitem(
306 coreconfigitem(
307 b'convert', b'hg.clonebranches', default=False,
307 b'convert', b'hg.clonebranches', default=False,
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'convert', b'hg.ignoreerrors', default=False,
310 b'convert', b'hg.ignoreerrors', default=False,
311 )
311 )
312 coreconfigitem(
312 coreconfigitem(
313 b'convert', b'hg.preserve-hash', default=False,
313 b'convert', b'hg.preserve-hash', default=False,
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'convert', b'hg.revs', default=None,
316 b'convert', b'hg.revs', default=None,
317 )
317 )
318 coreconfigitem(
318 coreconfigitem(
319 b'convert', b'hg.saverev', default=False,
319 b'convert', b'hg.saverev', default=False,
320 )
320 )
321 coreconfigitem(
321 coreconfigitem(
322 b'convert', b'hg.sourcename', default=None,
322 b'convert', b'hg.sourcename', default=None,
323 )
323 )
324 coreconfigitem(
324 coreconfigitem(
325 b'convert', b'hg.startrev', default=None,
325 b'convert', b'hg.startrev', default=None,
326 )
326 )
327 coreconfigitem(
327 coreconfigitem(
328 b'convert', b'hg.tagsbranch', default=b'default',
328 b'convert', b'hg.tagsbranch', default=b'default',
329 )
329 )
330 coreconfigitem(
330 coreconfigitem(
331 b'convert', b'hg.usebranchnames', default=True,
331 b'convert', b'hg.usebranchnames', default=True,
332 )
332 )
333 coreconfigitem(
333 coreconfigitem(
334 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
334 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
335 )
335 )
336 coreconfigitem(
336 coreconfigitem(
337 b'convert', b'localtimezone', default=False,
337 b'convert', b'localtimezone', default=False,
338 )
338 )
339 coreconfigitem(
339 coreconfigitem(
340 b'convert', b'p4.encoding', default=dynamicdefault,
340 b'convert', b'p4.encoding', default=dynamicdefault,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'convert', b'p4.startrev', default=0,
343 b'convert', b'p4.startrev', default=0,
344 )
344 )
345 coreconfigitem(
345 coreconfigitem(
346 b'convert', b'skiptags', default=False,
346 b'convert', b'skiptags', default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'convert', b'svn.debugsvnlog', default=True,
349 b'convert', b'svn.debugsvnlog', default=True,
350 )
350 )
351 coreconfigitem(
351 coreconfigitem(
352 b'convert', b'svn.trunk', default=None,
352 b'convert', b'svn.trunk', default=None,
353 )
353 )
354 coreconfigitem(
354 coreconfigitem(
355 b'convert', b'svn.tags', default=None,
355 b'convert', b'svn.tags', default=None,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'convert', b'svn.branches', default=None,
358 b'convert', b'svn.branches', default=None,
359 )
359 )
360 coreconfigitem(
360 coreconfigitem(
361 b'convert', b'svn.startrev', default=0,
361 b'convert', b'svn.startrev', default=0,
362 )
362 )
363 coreconfigitem(
363 coreconfigitem(
364 b'debug', b'dirstate.delaywrite', default=0,
364 b'debug', b'dirstate.delaywrite', default=0,
365 )
365 )
366 coreconfigitem(
366 coreconfigitem(
367 b'defaults', b'.*', default=None, generic=True,
367 b'defaults', b'.*', default=None, generic=True,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'devel', b'all-warnings', default=False,
370 b'devel', b'all-warnings', default=False,
371 )
371 )
372 coreconfigitem(
372 coreconfigitem(
373 b'devel', b'bundle2.debug', default=False,
373 b'devel', b'bundle2.debug', default=False,
374 )
374 )
375 coreconfigitem(
375 coreconfigitem(
376 b'devel', b'bundle.delta', default=b'',
376 b'devel', b'bundle.delta', default=b'',
377 )
377 )
378 coreconfigitem(
378 coreconfigitem(
379 b'devel', b'cache-vfs', default=None,
379 b'devel', b'cache-vfs', default=None,
380 )
380 )
381 coreconfigitem(
381 coreconfigitem(
382 b'devel', b'check-locks', default=False,
382 b'devel', b'check-locks', default=False,
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'devel', b'check-relroot', default=False,
385 b'devel', b'check-relroot', default=False,
386 )
386 )
387 coreconfigitem(
387 coreconfigitem(
388 b'devel', b'default-date', default=None,
388 b'devel', b'default-date', default=None,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'devel', b'deprec-warn', default=False,
391 b'devel', b'deprec-warn', default=False,
392 )
392 )
393 coreconfigitem(
393 coreconfigitem(
394 b'devel', b'disableloaddefaultcerts', default=False,
394 b'devel', b'disableloaddefaultcerts', default=False,
395 )
395 )
396 coreconfigitem(
396 coreconfigitem(
397 b'devel', b'warn-empty-changegroup', default=False,
397 b'devel', b'warn-empty-changegroup', default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'devel', b'legacy.exchange', default=list,
400 b'devel', b'legacy.exchange', default=list,
401 )
401 )
402 coreconfigitem(
402 coreconfigitem(
403 b'devel', b'servercafile', default=b'',
403 b'devel', b'servercafile', default=b'',
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'devel', b'serverexactprotocol', default=b'',
406 b'devel', b'serverexactprotocol', default=b'',
407 )
407 )
408 coreconfigitem(
408 coreconfigitem(
409 b'devel', b'serverrequirecert', default=False,
409 b'devel', b'serverrequirecert', default=False,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'devel', b'strip-obsmarkers', default=True,
412 b'devel', b'strip-obsmarkers', default=True,
413 )
413 )
414 coreconfigitem(
414 coreconfigitem(
415 b'devel', b'warn-config', default=None,
415 b'devel', b'warn-config', default=None,
416 )
416 )
417 coreconfigitem(
417 coreconfigitem(
418 b'devel', b'warn-config-default', default=None,
418 b'devel', b'warn-config-default', default=None,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'devel', b'user.obsmarker', default=None,
421 b'devel', b'user.obsmarker', default=None,
422 )
422 )
423 coreconfigitem(
423 coreconfigitem(
424 b'devel', b'warn-config-unknown', default=None,
424 b'devel', b'warn-config-unknown', default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'devel', b'debug.copies', default=False,
427 b'devel', b'debug.copies', default=False,
428 )
428 )
429 coreconfigitem(
429 coreconfigitem(
430 b'devel', b'debug.extensions', default=False,
430 b'devel', b'debug.extensions', default=False,
431 )
431 )
432 coreconfigitem(
432 coreconfigitem(
433 b'devel', b'debug.peer-request', default=False,
433 b'devel', b'debug.peer-request', default=False,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'devel', b'discovery.randomize', default=True,
436 b'devel', b'discovery.randomize', default=True,
437 )
437 )
438 _registerdiffopts(section=b'diff')
438 _registerdiffopts(section=b'diff')
439 coreconfigitem(
439 coreconfigitem(
440 b'email', b'bcc', default=None,
440 b'email', b'bcc', default=None,
441 )
441 )
442 coreconfigitem(
442 coreconfigitem(
443 b'email', b'cc', default=None,
443 b'email', b'cc', default=None,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'email', b'charsets', default=list,
446 b'email', b'charsets', default=list,
447 )
447 )
448 coreconfigitem(
448 coreconfigitem(
449 b'email', b'from', default=None,
449 b'email', b'from', default=None,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'email', b'method', default=b'smtp',
452 b'email', b'method', default=b'smtp',
453 )
453 )
454 coreconfigitem(
454 coreconfigitem(
455 b'email', b'reply-to', default=None,
455 b'email', b'reply-to', default=None,
456 )
456 )
457 coreconfigitem(
457 coreconfigitem(
458 b'email', b'to', default=None,
458 b'email', b'to', default=None,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'experimental', b'archivemetatemplate', default=dynamicdefault,
461 b'experimental', b'archivemetatemplate', default=dynamicdefault,
462 )
462 )
463 coreconfigitem(
463 coreconfigitem(
464 b'experimental', b'auto-publish', default=b'publish',
464 b'experimental', b'auto-publish', default=b'publish',
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'experimental', b'bundle-phases', default=False,
467 b'experimental', b'bundle-phases', default=False,
468 )
468 )
469 coreconfigitem(
469 coreconfigitem(
470 b'experimental', b'bundle2-advertise', default=True,
470 b'experimental', b'bundle2-advertise', default=True,
471 )
471 )
472 coreconfigitem(
472 coreconfigitem(
473 b'experimental', b'bundle2-output-capture', default=False,
473 b'experimental', b'bundle2-output-capture', default=False,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'experimental', b'bundle2.pushback', default=False,
476 b'experimental', b'bundle2.pushback', default=False,
477 )
477 )
478 coreconfigitem(
478 coreconfigitem(
479 b'experimental', b'bundle2lazylocking', default=False,
479 b'experimental', b'bundle2lazylocking', default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'experimental', b'bundlecomplevel', default=None,
482 b'experimental', b'bundlecomplevel', default=None,
483 )
483 )
484 coreconfigitem(
484 coreconfigitem(
485 b'experimental', b'bundlecomplevel.bzip2', default=None,
485 b'experimental', b'bundlecomplevel.bzip2', default=None,
486 )
486 )
487 coreconfigitem(
487 coreconfigitem(
488 b'experimental', b'bundlecomplevel.gzip', default=None,
488 b'experimental', b'bundlecomplevel.gzip', default=None,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'experimental', b'bundlecomplevel.none', default=None,
491 b'experimental', b'bundlecomplevel.none', default=None,
492 )
492 )
493 coreconfigitem(
493 coreconfigitem(
494 b'experimental', b'bundlecomplevel.zstd', default=None,
494 b'experimental', b'bundlecomplevel.zstd', default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'experimental', b'changegroup3', default=False,
497 b'experimental', b'changegroup3', default=False,
498 )
498 )
499 coreconfigitem(
499 coreconfigitem(
500 b'experimental', b'cleanup-as-archived', default=False,
500 b'experimental', b'cleanup-as-archived', default=False,
501 )
501 )
502 coreconfigitem(
502 coreconfigitem(
503 b'experimental', b'clientcompressionengines', default=list,
503 b'experimental', b'clientcompressionengines', default=list,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'experimental', b'copytrace', default=b'on',
506 b'experimental', b'copytrace', default=b'on',
507 )
507 )
508 coreconfigitem(
508 coreconfigitem(
509 b'experimental', b'copytrace.movecandidateslimit', default=100,
509 b'experimental', b'copytrace.movecandidateslimit', default=100,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'experimental', b'copytrace.sourcecommitlimit', default=100,
512 b'experimental', b'copytrace.sourcecommitlimit', default=100,
513 )
513 )
514 coreconfigitem(
514 coreconfigitem(
515 b'experimental', b'copies.read-from', default=b"filelog-only",
515 b'experimental', b'copies.read-from', default=b"filelog-only",
516 )
516 )
517 coreconfigitem(
517 coreconfigitem(
518 b'experimental', b'copies.write-to', default=b'filelog-only',
518 b'experimental', b'copies.write-to', default=b'filelog-only',
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'experimental', b'crecordtest', default=None,
521 b'experimental', b'crecordtest', default=None,
522 )
522 )
523 coreconfigitem(
523 coreconfigitem(
524 b'experimental', b'directaccess', default=False,
524 b'experimental', b'directaccess', default=False,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'experimental', b'directaccess.revnums', default=False,
527 b'experimental', b'directaccess.revnums', default=False,
528 )
528 )
529 coreconfigitem(
529 coreconfigitem(
530 b'experimental', b'editortmpinhg', default=False,
530 b'experimental', b'editortmpinhg', default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'experimental', b'evolution', default=list,
533 b'experimental', b'evolution', default=list,
534 )
534 )
535 coreconfigitem(
535 coreconfigitem(
536 b'experimental',
536 b'experimental',
537 b'evolution.allowdivergence',
537 b'evolution.allowdivergence',
538 default=False,
538 default=False,
539 alias=[(b'experimental', b'allowdivergence')],
539 alias=[(b'experimental', b'allowdivergence')],
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'experimental', b'evolution.allowunstable', default=None,
542 b'experimental', b'evolution.allowunstable', default=None,
543 )
543 )
544 coreconfigitem(
544 coreconfigitem(
545 b'experimental', b'evolution.createmarkers', default=None,
545 b'experimental', b'evolution.createmarkers', default=None,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'experimental',
548 b'experimental',
549 b'evolution.effect-flags',
549 b'evolution.effect-flags',
550 default=True,
550 default=True,
551 alias=[(b'experimental', b'effect-flags')],
551 alias=[(b'experimental', b'effect-flags')],
552 )
552 )
553 coreconfigitem(
553 coreconfigitem(
554 b'experimental', b'evolution.exchange', default=None,
554 b'experimental', b'evolution.exchange', default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'experimental', b'evolution.bundle-obsmarker', default=False,
557 b'experimental', b'evolution.bundle-obsmarker', default=False,
558 )
558 )
559 coreconfigitem(
559 coreconfigitem(
560 b'experimental', b'log.topo', default=False,
560 b'experimental', b'log.topo', default=False,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'experimental', b'evolution.report-instabilities', default=True,
563 b'experimental', b'evolution.report-instabilities', default=True,
564 )
564 )
565 coreconfigitem(
565 coreconfigitem(
566 b'experimental', b'evolution.track-operation', default=True,
566 b'experimental', b'evolution.track-operation', default=True,
567 )
567 )
568 # repo-level config to exclude a revset visibility
568 # repo-level config to exclude a revset visibility
569 #
569 #
570 # The target use case is to use `share` to expose different subset of the same
570 # The target use case is to use `share` to expose different subset of the same
571 # repository, especially server side. See also `server.view`.
571 # repository, especially server side. See also `server.view`.
572 coreconfigitem(
572 coreconfigitem(
573 b'experimental', b'extra-filter-revs', default=None,
573 b'experimental', b'extra-filter-revs', default=None,
574 )
574 )
575 coreconfigitem(
575 coreconfigitem(
576 b'experimental', b'maxdeltachainspan', default=-1,
576 b'experimental', b'maxdeltachainspan', default=-1,
577 )
577 )
578 coreconfigitem(
578 coreconfigitem(
579 b'experimental', b'mergetempdirprefix', default=None,
579 b'experimental', b'mergetempdirprefix', default=None,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'experimental', b'mmapindexthreshold', default=None,
582 b'experimental', b'mmapindexthreshold', default=None,
583 )
583 )
584 coreconfigitem(
584 coreconfigitem(
585 b'experimental', b'narrow', default=False,
585 b'experimental', b'narrow', default=False,
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'experimental', b'nonnormalparanoidcheck', default=False,
588 b'experimental', b'nonnormalparanoidcheck', default=False,
589 )
589 )
590 coreconfigitem(
590 coreconfigitem(
591 b'experimental', b'exportableenviron', default=list,
591 b'experimental', b'exportableenviron', default=list,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'experimental', b'extendedheader.index', default=None,
594 b'experimental', b'extendedheader.index', default=None,
595 )
595 )
596 coreconfigitem(
596 coreconfigitem(
597 b'experimental', b'extendedheader.similarity', default=False,
597 b'experimental', b'extendedheader.similarity', default=False,
598 )
598 )
599 coreconfigitem(
599 coreconfigitem(
600 b'experimental', b'graphshorten', default=False,
600 b'experimental', b'graphshorten', default=False,
601 )
601 )
602 coreconfigitem(
602 coreconfigitem(
603 b'experimental', b'graphstyle.parent', default=dynamicdefault,
603 b'experimental', b'graphstyle.parent', default=dynamicdefault,
604 )
604 )
605 coreconfigitem(
605 coreconfigitem(
606 b'experimental', b'graphstyle.missing', default=dynamicdefault,
606 b'experimental', b'graphstyle.missing', default=dynamicdefault,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
609 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
610 )
610 )
611 coreconfigitem(
611 coreconfigitem(
612 b'experimental', b'hook-track-tags', default=False,
612 b'experimental', b'hook-track-tags', default=False,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'experimental', b'httppeer.advertise-v2', default=False,
615 b'experimental', b'httppeer.advertise-v2', default=False,
616 )
616 )
617 coreconfigitem(
617 coreconfigitem(
618 b'experimental', b'httppeer.v2-encoder-order', default=None,
618 b'experimental', b'httppeer.v2-encoder-order', default=None,
619 )
619 )
620 coreconfigitem(
620 coreconfigitem(
621 b'experimental', b'httppostargs', default=False,
621 b'experimental', b'httppostargs', default=False,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'experimental', b'mergedriver', default=None,
624 b'experimental', b'mergedriver', default=None,
625 )
625 )
626 coreconfigitem(b'experimental', b'nointerrupt', default=False)
626 coreconfigitem(b'experimental', b'nointerrupt', default=False)
627 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
627 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
628
628
629 coreconfigitem(
629 coreconfigitem(
630 b'experimental', b'obsmarkers-exchange-debug', default=False,
630 b'experimental', b'obsmarkers-exchange-debug', default=False,
631 )
631 )
632 coreconfigitem(
632 coreconfigitem(
633 b'experimental', b'remotenames', default=False,
633 b'experimental', b'remotenames', default=False,
634 )
634 )
635 coreconfigitem(
635 coreconfigitem(
636 b'experimental', b'removeemptydirs', default=True,
636 b'experimental', b'removeemptydirs', default=True,
637 )
637 )
638 coreconfigitem(
638 coreconfigitem(
639 b'experimental', b'revert.interactive.select-to-keep', default=False,
639 b'experimental', b'revert.interactive.select-to-keep', default=False,
640 )
640 )
641 coreconfigitem(
641 coreconfigitem(
642 b'experimental', b'revisions.prefixhexnode', default=False,
642 b'experimental', b'revisions.prefixhexnode', default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'experimental', b'revlogv2', default=None,
645 b'experimental', b'revlogv2', default=None,
646 )
646 )
647 coreconfigitem(
647 coreconfigitem(
648 b'experimental', b'revisions.disambiguatewithin', default=None,
648 b'experimental', b'revisions.disambiguatewithin', default=None,
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
651 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
652 )
652 )
653 coreconfigitem(
653 coreconfigitem(
654 b'experimental',
654 b'experimental',
655 b'server.manifestdata.recommended-batch-size',
655 b'server.manifestdata.recommended-batch-size',
656 default=100000,
656 default=100000,
657 )
657 )
658 coreconfigitem(
658 coreconfigitem(
659 b'experimental', b'server.stream-narrow-clones', default=False,
659 b'experimental', b'server.stream-narrow-clones', default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'experimental', b'single-head-per-branch', default=False,
662 b'experimental', b'single-head-per-branch', default=False,
663 )
663 )
664 coreconfigitem(
664 coreconfigitem(
665 b'experimental',
665 b'experimental',
666 b'single-head-per-branch:account-closed-heads',
666 b'single-head-per-branch:account-closed-heads',
667 default=False,
667 default=False,
668 )
668 )
669 coreconfigitem(
669 coreconfigitem(
670 b'experimental', b'sshserver.support-v2', default=False,
670 b'experimental', b'sshserver.support-v2', default=False,
671 )
671 )
672 coreconfigitem(
672 coreconfigitem(
673 b'experimental', b'sparse-read', default=False,
673 b'experimental', b'sparse-read', default=False,
674 )
674 )
675 coreconfigitem(
675 coreconfigitem(
676 b'experimental', b'sparse-read.density-threshold', default=0.50,
676 b'experimental', b'sparse-read.density-threshold', default=0.50,
677 )
677 )
678 coreconfigitem(
678 coreconfigitem(
679 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
679 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'experimental', b'treemanifest', default=False,
682 b'experimental', b'treemanifest', default=False,
683 )
683 )
684 coreconfigitem(
684 coreconfigitem(
685 b'experimental', b'update.atomic-file', default=False,
685 b'experimental', b'update.atomic-file', default=False,
686 )
686 )
687 coreconfigitem(
687 coreconfigitem(
688 b'experimental', b'sshpeer.advertise-v2', default=False,
688 b'experimental', b'sshpeer.advertise-v2', default=False,
689 )
689 )
690 coreconfigitem(
690 coreconfigitem(
691 b'experimental', b'web.apiserver', default=False,
691 b'experimental', b'web.apiserver', default=False,
692 )
692 )
693 coreconfigitem(
693 coreconfigitem(
694 b'experimental', b'web.api.http-v2', default=False,
694 b'experimental', b'web.api.http-v2', default=False,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'experimental', b'web.api.debugreflect', default=False,
697 b'experimental', b'web.api.debugreflect', default=False,
698 )
698 )
699 coreconfigitem(
699 coreconfigitem(
700 b'experimental', b'worker.wdir-get-thread-safe', default=False,
700 b'experimental', b'worker.wdir-get-thread-safe', default=False,
701 )
701 )
702 coreconfigitem(
702 coreconfigitem(
703 b'experimental', b'xdiff', default=False,
703 b'experimental', b'xdiff', default=False,
704 )
704 )
705 coreconfigitem(
705 coreconfigitem(
706 b'extensions', b'.*', default=None, generic=True,
706 b'extensions', b'.*', default=None, generic=True,
707 )
707 )
708 coreconfigitem(
708 coreconfigitem(
709 b'extdata', b'.*', default=None, generic=True,
709 b'extdata', b'.*', default=None, generic=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'format', b'bookmarks-in-store', default=False,
712 b'format', b'bookmarks-in-store', default=False,
713 )
713 )
714 coreconfigitem(
714 coreconfigitem(
715 b'format', b'chunkcachesize', default=None, experimental=True,
715 b'format', b'chunkcachesize', default=None, experimental=True,
716 )
716 )
717 coreconfigitem(
717 coreconfigitem(
718 b'format', b'dotencode', default=True,
718 b'format', b'dotencode', default=True,
719 )
719 )
720 coreconfigitem(
720 coreconfigitem(
721 b'format', b'generaldelta', default=False, experimental=True,
721 b'format', b'generaldelta', default=False, experimental=True,
722 )
722 )
723 coreconfigitem(
723 coreconfigitem(
724 b'format', b'manifestcachesize', default=None, experimental=True,
724 b'format', b'manifestcachesize', default=None, experimental=True,
725 )
725 )
726 coreconfigitem(
726 coreconfigitem(
727 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
727 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
728 )
728 )
729 coreconfigitem(
729 coreconfigitem(
730 b'format', b'obsstore-version', default=None,
730 b'format', b'obsstore-version', default=None,
731 )
731 )
732 coreconfigitem(
732 coreconfigitem(
733 b'format', b'sparse-revlog', default=True,
733 b'format', b'sparse-revlog', default=True,
734 )
734 )
735 coreconfigitem(
735 coreconfigitem(
736 b'format',
736 b'format',
737 b'revlog-compression',
737 b'revlog-compression',
738 default=b'zlib',
738 default=b'zlib',
739 alias=[(b'experimental', b'format.compression')],
739 alias=[(b'experimental', b'format.compression')],
740 )
740 )
741 coreconfigitem(
741 coreconfigitem(
742 b'format', b'usefncache', default=True,
742 b'format', b'usefncache', default=True,
743 )
743 )
744 coreconfigitem(
744 coreconfigitem(
745 b'format', b'usegeneraldelta', default=True,
745 b'format', b'usegeneraldelta', default=True,
746 )
746 )
747 coreconfigitem(
747 coreconfigitem(
748 b'format', b'usestore', default=True,
748 b'format', b'usestore', default=True,
749 )
749 )
750 coreconfigitem(
750 coreconfigitem(
751 b'format',
752 b'exp-use-copies-side-data-changeset',
753 default=False,
754 experimental=True,
755 )
756 coreconfigitem(
751 b'format', b'use-side-data', default=False, experimental=True,
757 b'format', b'use-side-data', default=False, experimental=True,
752 )
758 )
753 coreconfigitem(
759 coreconfigitem(
754 b'format', b'internal-phase', default=False, experimental=True,
760 b'format', b'internal-phase', default=False, experimental=True,
755 )
761 )
756 coreconfigitem(
762 coreconfigitem(
757 b'fsmonitor', b'warn_when_unused', default=True,
763 b'fsmonitor', b'warn_when_unused', default=True,
758 )
764 )
759 coreconfigitem(
765 coreconfigitem(
760 b'fsmonitor', b'warn_update_file_count', default=50000,
766 b'fsmonitor', b'warn_update_file_count', default=50000,
761 )
767 )
762 coreconfigitem(
768 coreconfigitem(
763 b'help', br'hidden-command\..*', default=False, generic=True,
769 b'help', br'hidden-command\..*', default=False, generic=True,
764 )
770 )
765 coreconfigitem(
771 coreconfigitem(
766 b'help', br'hidden-topic\..*', default=False, generic=True,
772 b'help', br'hidden-topic\..*', default=False, generic=True,
767 )
773 )
768 coreconfigitem(
774 coreconfigitem(
769 b'hooks', b'.*', default=dynamicdefault, generic=True,
775 b'hooks', b'.*', default=dynamicdefault, generic=True,
770 )
776 )
771 coreconfigitem(
777 coreconfigitem(
772 b'hgweb-paths', b'.*', default=list, generic=True,
778 b'hgweb-paths', b'.*', default=list, generic=True,
773 )
779 )
774 coreconfigitem(
780 coreconfigitem(
775 b'hostfingerprints', b'.*', default=list, generic=True,
781 b'hostfingerprints', b'.*', default=list, generic=True,
776 )
782 )
777 coreconfigitem(
783 coreconfigitem(
778 b'hostsecurity', b'ciphers', default=None,
784 b'hostsecurity', b'ciphers', default=None,
779 )
785 )
780 coreconfigitem(
786 coreconfigitem(
781 b'hostsecurity', b'disabletls10warning', default=False,
787 b'hostsecurity', b'disabletls10warning', default=False,
782 )
788 )
783 coreconfigitem(
789 coreconfigitem(
784 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
790 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
785 )
791 )
786 coreconfigitem(
792 coreconfigitem(
787 b'hostsecurity',
793 b'hostsecurity',
788 b'.*:minimumprotocol$',
794 b'.*:minimumprotocol$',
789 default=dynamicdefault,
795 default=dynamicdefault,
790 generic=True,
796 generic=True,
791 )
797 )
792 coreconfigitem(
798 coreconfigitem(
793 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
799 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
794 )
800 )
795 coreconfigitem(
801 coreconfigitem(
796 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
802 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
797 )
803 )
798 coreconfigitem(
804 coreconfigitem(
799 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
805 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
800 )
806 )
801
807
802 coreconfigitem(
808 coreconfigitem(
803 b'http_proxy', b'always', default=False,
809 b'http_proxy', b'always', default=False,
804 )
810 )
805 coreconfigitem(
811 coreconfigitem(
806 b'http_proxy', b'host', default=None,
812 b'http_proxy', b'host', default=None,
807 )
813 )
808 coreconfigitem(
814 coreconfigitem(
809 b'http_proxy', b'no', default=list,
815 b'http_proxy', b'no', default=list,
810 )
816 )
811 coreconfigitem(
817 coreconfigitem(
812 b'http_proxy', b'passwd', default=None,
818 b'http_proxy', b'passwd', default=None,
813 )
819 )
814 coreconfigitem(
820 coreconfigitem(
815 b'http_proxy', b'user', default=None,
821 b'http_proxy', b'user', default=None,
816 )
822 )
817
823
818 coreconfigitem(
824 coreconfigitem(
819 b'http', b'timeout', default=None,
825 b'http', b'timeout', default=None,
820 )
826 )
821
827
822 coreconfigitem(
828 coreconfigitem(
823 b'logtoprocess', b'commandexception', default=None,
829 b'logtoprocess', b'commandexception', default=None,
824 )
830 )
825 coreconfigitem(
831 coreconfigitem(
826 b'logtoprocess', b'commandfinish', default=None,
832 b'logtoprocess', b'commandfinish', default=None,
827 )
833 )
828 coreconfigitem(
834 coreconfigitem(
829 b'logtoprocess', b'command', default=None,
835 b'logtoprocess', b'command', default=None,
830 )
836 )
831 coreconfigitem(
837 coreconfigitem(
832 b'logtoprocess', b'develwarn', default=None,
838 b'logtoprocess', b'develwarn', default=None,
833 )
839 )
834 coreconfigitem(
840 coreconfigitem(
835 b'logtoprocess', b'uiblocked', default=None,
841 b'logtoprocess', b'uiblocked', default=None,
836 )
842 )
837 coreconfigitem(
843 coreconfigitem(
838 b'merge', b'checkunknown', default=b'abort',
844 b'merge', b'checkunknown', default=b'abort',
839 )
845 )
840 coreconfigitem(
846 coreconfigitem(
841 b'merge', b'checkignored', default=b'abort',
847 b'merge', b'checkignored', default=b'abort',
842 )
848 )
843 coreconfigitem(
849 coreconfigitem(
844 b'experimental', b'merge.checkpathconflicts', default=False,
850 b'experimental', b'merge.checkpathconflicts', default=False,
845 )
851 )
846 coreconfigitem(
852 coreconfigitem(
847 b'merge', b'followcopies', default=True,
853 b'merge', b'followcopies', default=True,
848 )
854 )
849 coreconfigitem(
855 coreconfigitem(
850 b'merge', b'on-failure', default=b'continue',
856 b'merge', b'on-failure', default=b'continue',
851 )
857 )
852 coreconfigitem(
858 coreconfigitem(
853 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
859 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
854 )
860 )
855 coreconfigitem(
861 coreconfigitem(
856 b'merge', b'strict-capability-check', default=False,
862 b'merge', b'strict-capability-check', default=False,
857 )
863 )
858 coreconfigitem(
864 coreconfigitem(
859 b'merge-tools', b'.*', default=None, generic=True,
865 b'merge-tools', b'.*', default=None, generic=True,
860 )
866 )
861 coreconfigitem(
867 coreconfigitem(
862 b'merge-tools',
868 b'merge-tools',
863 br'.*\.args$',
869 br'.*\.args$',
864 default=b"$local $base $other",
870 default=b"$local $base $other",
865 generic=True,
871 generic=True,
866 priority=-1,
872 priority=-1,
867 )
873 )
868 coreconfigitem(
874 coreconfigitem(
869 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
875 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
870 )
876 )
871 coreconfigitem(
877 coreconfigitem(
872 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
878 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
873 )
879 )
874 coreconfigitem(
880 coreconfigitem(
875 b'merge-tools',
881 b'merge-tools',
876 br'.*\.checkchanged$',
882 br'.*\.checkchanged$',
877 default=False,
883 default=False,
878 generic=True,
884 generic=True,
879 priority=-1,
885 priority=-1,
880 )
886 )
881 coreconfigitem(
887 coreconfigitem(
882 b'merge-tools',
888 b'merge-tools',
883 br'.*\.executable$',
889 br'.*\.executable$',
884 default=dynamicdefault,
890 default=dynamicdefault,
885 generic=True,
891 generic=True,
886 priority=-1,
892 priority=-1,
887 )
893 )
888 coreconfigitem(
894 coreconfigitem(
889 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
895 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
890 )
896 )
891 coreconfigitem(
897 coreconfigitem(
892 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
898 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
893 )
899 )
894 coreconfigitem(
900 coreconfigitem(
895 b'merge-tools',
901 b'merge-tools',
896 br'.*\.mergemarkers$',
902 br'.*\.mergemarkers$',
897 default=b'basic',
903 default=b'basic',
898 generic=True,
904 generic=True,
899 priority=-1,
905 priority=-1,
900 )
906 )
901 coreconfigitem(
907 coreconfigitem(
902 b'merge-tools',
908 b'merge-tools',
903 br'.*\.mergemarkertemplate$',
909 br'.*\.mergemarkertemplate$',
904 default=dynamicdefault, # take from ui.mergemarkertemplate
910 default=dynamicdefault, # take from ui.mergemarkertemplate
905 generic=True,
911 generic=True,
906 priority=-1,
912 priority=-1,
907 )
913 )
908 coreconfigitem(
914 coreconfigitem(
909 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
915 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
910 )
916 )
911 coreconfigitem(
917 coreconfigitem(
912 b'merge-tools',
918 b'merge-tools',
913 br'.*\.premerge$',
919 br'.*\.premerge$',
914 default=dynamicdefault,
920 default=dynamicdefault,
915 generic=True,
921 generic=True,
916 priority=-1,
922 priority=-1,
917 )
923 )
918 coreconfigitem(
924 coreconfigitem(
919 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
925 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
920 )
926 )
921 coreconfigitem(
927 coreconfigitem(
922 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
928 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
923 )
929 )
924 coreconfigitem(
930 coreconfigitem(
925 b'pager', b'ignore', default=list,
931 b'pager', b'ignore', default=list,
926 )
932 )
927 coreconfigitem(
933 coreconfigitem(
928 b'pager', b'pager', default=dynamicdefault,
934 b'pager', b'pager', default=dynamicdefault,
929 )
935 )
930 coreconfigitem(
936 coreconfigitem(
931 b'patch', b'eol', default=b'strict',
937 b'patch', b'eol', default=b'strict',
932 )
938 )
933 coreconfigitem(
939 coreconfigitem(
934 b'patch', b'fuzz', default=2,
940 b'patch', b'fuzz', default=2,
935 )
941 )
936 coreconfigitem(
942 coreconfigitem(
937 b'paths', b'default', default=None,
943 b'paths', b'default', default=None,
938 )
944 )
939 coreconfigitem(
945 coreconfigitem(
940 b'paths', b'default-push', default=None,
946 b'paths', b'default-push', default=None,
941 )
947 )
942 coreconfigitem(
948 coreconfigitem(
943 b'paths', b'.*', default=None, generic=True,
949 b'paths', b'.*', default=None, generic=True,
944 )
950 )
945 coreconfigitem(
951 coreconfigitem(
946 b'phases', b'checksubrepos', default=b'follow',
952 b'phases', b'checksubrepos', default=b'follow',
947 )
953 )
948 coreconfigitem(
954 coreconfigitem(
949 b'phases', b'new-commit', default=b'draft',
955 b'phases', b'new-commit', default=b'draft',
950 )
956 )
951 coreconfigitem(
957 coreconfigitem(
952 b'phases', b'publish', default=True,
958 b'phases', b'publish', default=True,
953 )
959 )
954 coreconfigitem(
960 coreconfigitem(
955 b'profiling', b'enabled', default=False,
961 b'profiling', b'enabled', default=False,
956 )
962 )
957 coreconfigitem(
963 coreconfigitem(
958 b'profiling', b'format', default=b'text',
964 b'profiling', b'format', default=b'text',
959 )
965 )
960 coreconfigitem(
966 coreconfigitem(
961 b'profiling', b'freq', default=1000,
967 b'profiling', b'freq', default=1000,
962 )
968 )
963 coreconfigitem(
969 coreconfigitem(
964 b'profiling', b'limit', default=30,
970 b'profiling', b'limit', default=30,
965 )
971 )
966 coreconfigitem(
972 coreconfigitem(
967 b'profiling', b'nested', default=0,
973 b'profiling', b'nested', default=0,
968 )
974 )
969 coreconfigitem(
975 coreconfigitem(
970 b'profiling', b'output', default=None,
976 b'profiling', b'output', default=None,
971 )
977 )
972 coreconfigitem(
978 coreconfigitem(
973 b'profiling', b'showmax', default=0.999,
979 b'profiling', b'showmax', default=0.999,
974 )
980 )
975 coreconfigitem(
981 coreconfigitem(
976 b'profiling', b'showmin', default=dynamicdefault,
982 b'profiling', b'showmin', default=dynamicdefault,
977 )
983 )
978 coreconfigitem(
984 coreconfigitem(
979 b'profiling', b'showtime', default=True,
985 b'profiling', b'showtime', default=True,
980 )
986 )
981 coreconfigitem(
987 coreconfigitem(
982 b'profiling', b'sort', default=b'inlinetime',
988 b'profiling', b'sort', default=b'inlinetime',
983 )
989 )
984 coreconfigitem(
990 coreconfigitem(
985 b'profiling', b'statformat', default=b'hotpath',
991 b'profiling', b'statformat', default=b'hotpath',
986 )
992 )
987 coreconfigitem(
993 coreconfigitem(
988 b'profiling', b'time-track', default=dynamicdefault,
994 b'profiling', b'time-track', default=dynamicdefault,
989 )
995 )
990 coreconfigitem(
996 coreconfigitem(
991 b'profiling', b'type', default=b'stat',
997 b'profiling', b'type', default=b'stat',
992 )
998 )
993 coreconfigitem(
999 coreconfigitem(
994 b'progress', b'assume-tty', default=False,
1000 b'progress', b'assume-tty', default=False,
995 )
1001 )
996 coreconfigitem(
1002 coreconfigitem(
997 b'progress', b'changedelay', default=1,
1003 b'progress', b'changedelay', default=1,
998 )
1004 )
999 coreconfigitem(
1005 coreconfigitem(
1000 b'progress', b'clear-complete', default=True,
1006 b'progress', b'clear-complete', default=True,
1001 )
1007 )
1002 coreconfigitem(
1008 coreconfigitem(
1003 b'progress', b'debug', default=False,
1009 b'progress', b'debug', default=False,
1004 )
1010 )
1005 coreconfigitem(
1011 coreconfigitem(
1006 b'progress', b'delay', default=3,
1012 b'progress', b'delay', default=3,
1007 )
1013 )
1008 coreconfigitem(
1014 coreconfigitem(
1009 b'progress', b'disable', default=False,
1015 b'progress', b'disable', default=False,
1010 )
1016 )
1011 coreconfigitem(
1017 coreconfigitem(
1012 b'progress', b'estimateinterval', default=60.0,
1018 b'progress', b'estimateinterval', default=60.0,
1013 )
1019 )
1014 coreconfigitem(
1020 coreconfigitem(
1015 b'progress',
1021 b'progress',
1016 b'format',
1022 b'format',
1017 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1023 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1018 )
1024 )
1019 coreconfigitem(
1025 coreconfigitem(
1020 b'progress', b'refresh', default=0.1,
1026 b'progress', b'refresh', default=0.1,
1021 )
1027 )
1022 coreconfigitem(
1028 coreconfigitem(
1023 b'progress', b'width', default=dynamicdefault,
1029 b'progress', b'width', default=dynamicdefault,
1024 )
1030 )
1025 coreconfigitem(
1031 coreconfigitem(
1026 b'push', b'pushvars.server', default=False,
1032 b'push', b'pushvars.server', default=False,
1027 )
1033 )
1028 coreconfigitem(
1034 coreconfigitem(
1029 b'rewrite',
1035 b'rewrite',
1030 b'backup-bundle',
1036 b'backup-bundle',
1031 default=True,
1037 default=True,
1032 alias=[(b'ui', b'history-editing-backup')],
1038 alias=[(b'ui', b'history-editing-backup')],
1033 )
1039 )
1034 coreconfigitem(
1040 coreconfigitem(
1035 b'rewrite', b'update-timestamp', default=False,
1041 b'rewrite', b'update-timestamp', default=False,
1036 )
1042 )
1037 coreconfigitem(
1043 coreconfigitem(
1038 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1044 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1039 )
1045 )
1040 coreconfigitem(
1046 coreconfigitem(
1041 b'storage',
1047 b'storage',
1042 b'revlog.optimize-delta-parent-choice',
1048 b'revlog.optimize-delta-parent-choice',
1043 default=True,
1049 default=True,
1044 alias=[(b'format', b'aggressivemergedeltas')],
1050 alias=[(b'format', b'aggressivemergedeltas')],
1045 )
1051 )
1046 coreconfigitem(
1052 coreconfigitem(
1047 b'storage', b'revlog.reuse-external-delta', default=True,
1053 b'storage', b'revlog.reuse-external-delta', default=True,
1048 )
1054 )
1049 coreconfigitem(
1055 coreconfigitem(
1050 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1056 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1051 )
1057 )
1052 coreconfigitem(
1058 coreconfigitem(
1053 b'storage', b'revlog.zlib.level', default=None,
1059 b'storage', b'revlog.zlib.level', default=None,
1054 )
1060 )
1055 coreconfigitem(
1061 coreconfigitem(
1056 b'storage', b'revlog.zstd.level', default=None,
1062 b'storage', b'revlog.zstd.level', default=None,
1057 )
1063 )
1058 coreconfigitem(
1064 coreconfigitem(
1059 b'server', b'bookmarks-pushkey-compat', default=True,
1065 b'server', b'bookmarks-pushkey-compat', default=True,
1060 )
1066 )
1061 coreconfigitem(
1067 coreconfigitem(
1062 b'server', b'bundle1', default=True,
1068 b'server', b'bundle1', default=True,
1063 )
1069 )
1064 coreconfigitem(
1070 coreconfigitem(
1065 b'server', b'bundle1gd', default=None,
1071 b'server', b'bundle1gd', default=None,
1066 )
1072 )
1067 coreconfigitem(
1073 coreconfigitem(
1068 b'server', b'bundle1.pull', default=None,
1074 b'server', b'bundle1.pull', default=None,
1069 )
1075 )
1070 coreconfigitem(
1076 coreconfigitem(
1071 b'server', b'bundle1gd.pull', default=None,
1077 b'server', b'bundle1gd.pull', default=None,
1072 )
1078 )
1073 coreconfigitem(
1079 coreconfigitem(
1074 b'server', b'bundle1.push', default=None,
1080 b'server', b'bundle1.push', default=None,
1075 )
1081 )
1076 coreconfigitem(
1082 coreconfigitem(
1077 b'server', b'bundle1gd.push', default=None,
1083 b'server', b'bundle1gd.push', default=None,
1078 )
1084 )
1079 coreconfigitem(
1085 coreconfigitem(
1080 b'server',
1086 b'server',
1081 b'bundle2.stream',
1087 b'bundle2.stream',
1082 default=True,
1088 default=True,
1083 alias=[(b'experimental', b'bundle2.stream')],
1089 alias=[(b'experimental', b'bundle2.stream')],
1084 )
1090 )
1085 coreconfigitem(
1091 coreconfigitem(
1086 b'server', b'compressionengines', default=list,
1092 b'server', b'compressionengines', default=list,
1087 )
1093 )
1088 coreconfigitem(
1094 coreconfigitem(
1089 b'server', b'concurrent-push-mode', default=b'strict',
1095 b'server', b'concurrent-push-mode', default=b'strict',
1090 )
1096 )
1091 coreconfigitem(
1097 coreconfigitem(
1092 b'server', b'disablefullbundle', default=False,
1098 b'server', b'disablefullbundle', default=False,
1093 )
1099 )
1094 coreconfigitem(
1100 coreconfigitem(
1095 b'server', b'maxhttpheaderlen', default=1024,
1101 b'server', b'maxhttpheaderlen', default=1024,
1096 )
1102 )
1097 coreconfigitem(
1103 coreconfigitem(
1098 b'server', b'pullbundle', default=False,
1104 b'server', b'pullbundle', default=False,
1099 )
1105 )
1100 coreconfigitem(
1106 coreconfigitem(
1101 b'server', b'preferuncompressed', default=False,
1107 b'server', b'preferuncompressed', default=False,
1102 )
1108 )
1103 coreconfigitem(
1109 coreconfigitem(
1104 b'server', b'streamunbundle', default=False,
1110 b'server', b'streamunbundle', default=False,
1105 )
1111 )
1106 coreconfigitem(
1112 coreconfigitem(
1107 b'server', b'uncompressed', default=True,
1113 b'server', b'uncompressed', default=True,
1108 )
1114 )
1109 coreconfigitem(
1115 coreconfigitem(
1110 b'server', b'uncompressedallowsecret', default=False,
1116 b'server', b'uncompressedallowsecret', default=False,
1111 )
1117 )
1112 coreconfigitem(
1118 coreconfigitem(
1113 b'server', b'view', default=b'served',
1119 b'server', b'view', default=b'served',
1114 )
1120 )
1115 coreconfigitem(
1121 coreconfigitem(
1116 b'server', b'validate', default=False,
1122 b'server', b'validate', default=False,
1117 )
1123 )
1118 coreconfigitem(
1124 coreconfigitem(
1119 b'server', b'zliblevel', default=-1,
1125 b'server', b'zliblevel', default=-1,
1120 )
1126 )
1121 coreconfigitem(
1127 coreconfigitem(
1122 b'server', b'zstdlevel', default=3,
1128 b'server', b'zstdlevel', default=3,
1123 )
1129 )
1124 coreconfigitem(
1130 coreconfigitem(
1125 b'share', b'pool', default=None,
1131 b'share', b'pool', default=None,
1126 )
1132 )
1127 coreconfigitem(
1133 coreconfigitem(
1128 b'share', b'poolnaming', default=b'identity',
1134 b'share', b'poolnaming', default=b'identity',
1129 )
1135 )
1130 coreconfigitem(
1136 coreconfigitem(
1131 b'shelve', b'maxbackups', default=10,
1137 b'shelve', b'maxbackups', default=10,
1132 )
1138 )
1133 coreconfigitem(
1139 coreconfigitem(
1134 b'smtp', b'host', default=None,
1140 b'smtp', b'host', default=None,
1135 )
1141 )
1136 coreconfigitem(
1142 coreconfigitem(
1137 b'smtp', b'local_hostname', default=None,
1143 b'smtp', b'local_hostname', default=None,
1138 )
1144 )
1139 coreconfigitem(
1145 coreconfigitem(
1140 b'smtp', b'password', default=None,
1146 b'smtp', b'password', default=None,
1141 )
1147 )
1142 coreconfigitem(
1148 coreconfigitem(
1143 b'smtp', b'port', default=dynamicdefault,
1149 b'smtp', b'port', default=dynamicdefault,
1144 )
1150 )
1145 coreconfigitem(
1151 coreconfigitem(
1146 b'smtp', b'tls', default=b'none',
1152 b'smtp', b'tls', default=b'none',
1147 )
1153 )
1148 coreconfigitem(
1154 coreconfigitem(
1149 b'smtp', b'username', default=None,
1155 b'smtp', b'username', default=None,
1150 )
1156 )
1151 coreconfigitem(
1157 coreconfigitem(
1152 b'sparse', b'missingwarning', default=True, experimental=True,
1158 b'sparse', b'missingwarning', default=True, experimental=True,
1153 )
1159 )
1154 coreconfigitem(
1160 coreconfigitem(
1155 b'subrepos',
1161 b'subrepos',
1156 b'allowed',
1162 b'allowed',
1157 default=dynamicdefault, # to make backporting simpler
1163 default=dynamicdefault, # to make backporting simpler
1158 )
1164 )
1159 coreconfigitem(
1165 coreconfigitem(
1160 b'subrepos', b'hg:allowed', default=dynamicdefault,
1166 b'subrepos', b'hg:allowed', default=dynamicdefault,
1161 )
1167 )
1162 coreconfigitem(
1168 coreconfigitem(
1163 b'subrepos', b'git:allowed', default=dynamicdefault,
1169 b'subrepos', b'git:allowed', default=dynamicdefault,
1164 )
1170 )
1165 coreconfigitem(
1171 coreconfigitem(
1166 b'subrepos', b'svn:allowed', default=dynamicdefault,
1172 b'subrepos', b'svn:allowed', default=dynamicdefault,
1167 )
1173 )
1168 coreconfigitem(
1174 coreconfigitem(
1169 b'templates', b'.*', default=None, generic=True,
1175 b'templates', b'.*', default=None, generic=True,
1170 )
1176 )
1171 coreconfigitem(
1177 coreconfigitem(
1172 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1178 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1173 )
1179 )
1174 coreconfigitem(
1180 coreconfigitem(
1175 b'trusted', b'groups', default=list,
1181 b'trusted', b'groups', default=list,
1176 )
1182 )
1177 coreconfigitem(
1183 coreconfigitem(
1178 b'trusted', b'users', default=list,
1184 b'trusted', b'users', default=list,
1179 )
1185 )
1180 coreconfigitem(
1186 coreconfigitem(
1181 b'ui', b'_usedassubrepo', default=False,
1187 b'ui', b'_usedassubrepo', default=False,
1182 )
1188 )
1183 coreconfigitem(
1189 coreconfigitem(
1184 b'ui', b'allowemptycommit', default=False,
1190 b'ui', b'allowemptycommit', default=False,
1185 )
1191 )
1186 coreconfigitem(
1192 coreconfigitem(
1187 b'ui', b'archivemeta', default=True,
1193 b'ui', b'archivemeta', default=True,
1188 )
1194 )
1189 coreconfigitem(
1195 coreconfigitem(
1190 b'ui', b'askusername', default=False,
1196 b'ui', b'askusername', default=False,
1191 )
1197 )
1192 coreconfigitem(
1198 coreconfigitem(
1193 b'ui', b'clonebundlefallback', default=False,
1199 b'ui', b'clonebundlefallback', default=False,
1194 )
1200 )
1195 coreconfigitem(
1201 coreconfigitem(
1196 b'ui', b'clonebundleprefers', default=list,
1202 b'ui', b'clonebundleprefers', default=list,
1197 )
1203 )
1198 coreconfigitem(
1204 coreconfigitem(
1199 b'ui', b'clonebundles', default=True,
1205 b'ui', b'clonebundles', default=True,
1200 )
1206 )
1201 coreconfigitem(
1207 coreconfigitem(
1202 b'ui', b'color', default=b'auto',
1208 b'ui', b'color', default=b'auto',
1203 )
1209 )
1204 coreconfigitem(
1210 coreconfigitem(
1205 b'ui', b'commitsubrepos', default=False,
1211 b'ui', b'commitsubrepos', default=False,
1206 )
1212 )
1207 coreconfigitem(
1213 coreconfigitem(
1208 b'ui', b'debug', default=False,
1214 b'ui', b'debug', default=False,
1209 )
1215 )
1210 coreconfigitem(
1216 coreconfigitem(
1211 b'ui', b'debugger', default=None,
1217 b'ui', b'debugger', default=None,
1212 )
1218 )
1213 coreconfigitem(
1219 coreconfigitem(
1214 b'ui', b'editor', default=dynamicdefault,
1220 b'ui', b'editor', default=dynamicdefault,
1215 )
1221 )
1216 coreconfigitem(
1222 coreconfigitem(
1217 b'ui', b'fallbackencoding', default=None,
1223 b'ui', b'fallbackencoding', default=None,
1218 )
1224 )
1219 coreconfigitem(
1225 coreconfigitem(
1220 b'ui', b'forcecwd', default=None,
1226 b'ui', b'forcecwd', default=None,
1221 )
1227 )
1222 coreconfigitem(
1228 coreconfigitem(
1223 b'ui', b'forcemerge', default=None,
1229 b'ui', b'forcemerge', default=None,
1224 )
1230 )
1225 coreconfigitem(
1231 coreconfigitem(
1226 b'ui', b'formatdebug', default=False,
1232 b'ui', b'formatdebug', default=False,
1227 )
1233 )
1228 coreconfigitem(
1234 coreconfigitem(
1229 b'ui', b'formatjson', default=False,
1235 b'ui', b'formatjson', default=False,
1230 )
1236 )
1231 coreconfigitem(
1237 coreconfigitem(
1232 b'ui', b'formatted', default=None,
1238 b'ui', b'formatted', default=None,
1233 )
1239 )
1234 coreconfigitem(
1240 coreconfigitem(
1235 b'ui', b'graphnodetemplate', default=None,
1241 b'ui', b'graphnodetemplate', default=None,
1236 )
1242 )
1237 coreconfigitem(
1243 coreconfigitem(
1238 b'ui', b'interactive', default=None,
1244 b'ui', b'interactive', default=None,
1239 )
1245 )
1240 coreconfigitem(
1246 coreconfigitem(
1241 b'ui', b'interface', default=None,
1247 b'ui', b'interface', default=None,
1242 )
1248 )
1243 coreconfigitem(
1249 coreconfigitem(
1244 b'ui', b'interface.chunkselector', default=None,
1250 b'ui', b'interface.chunkselector', default=None,
1245 )
1251 )
1246 coreconfigitem(
1252 coreconfigitem(
1247 b'ui', b'large-file-limit', default=10000000,
1253 b'ui', b'large-file-limit', default=10000000,
1248 )
1254 )
1249 coreconfigitem(
1255 coreconfigitem(
1250 b'ui', b'logblockedtimes', default=False,
1256 b'ui', b'logblockedtimes', default=False,
1251 )
1257 )
1252 coreconfigitem(
1258 coreconfigitem(
1253 b'ui', b'logtemplate', default=None,
1259 b'ui', b'logtemplate', default=None,
1254 )
1260 )
1255 coreconfigitem(
1261 coreconfigitem(
1256 b'ui', b'merge', default=None,
1262 b'ui', b'merge', default=None,
1257 )
1263 )
1258 coreconfigitem(
1264 coreconfigitem(
1259 b'ui', b'mergemarkers', default=b'basic',
1265 b'ui', b'mergemarkers', default=b'basic',
1260 )
1266 )
1261 coreconfigitem(
1267 coreconfigitem(
1262 b'ui',
1268 b'ui',
1263 b'mergemarkertemplate',
1269 b'mergemarkertemplate',
1264 default=(
1270 default=(
1265 b'{node|short} '
1271 b'{node|short} '
1266 b'{ifeq(tags, "tip", "", '
1272 b'{ifeq(tags, "tip", "", '
1267 b'ifeq(tags, "", "", "{tags} "))}'
1273 b'ifeq(tags, "", "", "{tags} "))}'
1268 b'{if(bookmarks, "{bookmarks} ")}'
1274 b'{if(bookmarks, "{bookmarks} ")}'
1269 b'{ifeq(branch, "default", "", "{branch} ")}'
1275 b'{ifeq(branch, "default", "", "{branch} ")}'
1270 b'- {author|user}: {desc|firstline}'
1276 b'- {author|user}: {desc|firstline}'
1271 ),
1277 ),
1272 )
1278 )
1273 coreconfigitem(
1279 coreconfigitem(
1274 b'ui', b'message-output', default=b'stdio',
1280 b'ui', b'message-output', default=b'stdio',
1275 )
1281 )
1276 coreconfigitem(
1282 coreconfigitem(
1277 b'ui', b'nontty', default=False,
1283 b'ui', b'nontty', default=False,
1278 )
1284 )
1279 coreconfigitem(
1285 coreconfigitem(
1280 b'ui', b'origbackuppath', default=None,
1286 b'ui', b'origbackuppath', default=None,
1281 )
1287 )
1282 coreconfigitem(
1288 coreconfigitem(
1283 b'ui', b'paginate', default=True,
1289 b'ui', b'paginate', default=True,
1284 )
1290 )
1285 coreconfigitem(
1291 coreconfigitem(
1286 b'ui', b'patch', default=None,
1292 b'ui', b'patch', default=None,
1287 )
1293 )
1288 coreconfigitem(
1294 coreconfigitem(
1289 b'ui', b'pre-merge-tool-output-template', default=None,
1295 b'ui', b'pre-merge-tool-output-template', default=None,
1290 )
1296 )
1291 coreconfigitem(
1297 coreconfigitem(
1292 b'ui', b'portablefilenames', default=b'warn',
1298 b'ui', b'portablefilenames', default=b'warn',
1293 )
1299 )
1294 coreconfigitem(
1300 coreconfigitem(
1295 b'ui', b'promptecho', default=False,
1301 b'ui', b'promptecho', default=False,
1296 )
1302 )
1297 coreconfigitem(
1303 coreconfigitem(
1298 b'ui', b'quiet', default=False,
1304 b'ui', b'quiet', default=False,
1299 )
1305 )
1300 coreconfigitem(
1306 coreconfigitem(
1301 b'ui', b'quietbookmarkmove', default=False,
1307 b'ui', b'quietbookmarkmove', default=False,
1302 )
1308 )
1303 coreconfigitem(
1309 coreconfigitem(
1304 b'ui', b'relative-paths', default=b'legacy',
1310 b'ui', b'relative-paths', default=b'legacy',
1305 )
1311 )
1306 coreconfigitem(
1312 coreconfigitem(
1307 b'ui', b'remotecmd', default=b'hg',
1313 b'ui', b'remotecmd', default=b'hg',
1308 )
1314 )
1309 coreconfigitem(
1315 coreconfigitem(
1310 b'ui', b'report_untrusted', default=True,
1316 b'ui', b'report_untrusted', default=True,
1311 )
1317 )
1312 coreconfigitem(
1318 coreconfigitem(
1313 b'ui', b'rollback', default=True,
1319 b'ui', b'rollback', default=True,
1314 )
1320 )
1315 coreconfigitem(
1321 coreconfigitem(
1316 b'ui', b'signal-safe-lock', default=True,
1322 b'ui', b'signal-safe-lock', default=True,
1317 )
1323 )
1318 coreconfigitem(
1324 coreconfigitem(
1319 b'ui', b'slash', default=False,
1325 b'ui', b'slash', default=False,
1320 )
1326 )
1321 coreconfigitem(
1327 coreconfigitem(
1322 b'ui', b'ssh', default=b'ssh',
1328 b'ui', b'ssh', default=b'ssh',
1323 )
1329 )
1324 coreconfigitem(
1330 coreconfigitem(
1325 b'ui', b'ssherrorhint', default=None,
1331 b'ui', b'ssherrorhint', default=None,
1326 )
1332 )
1327 coreconfigitem(
1333 coreconfigitem(
1328 b'ui', b'statuscopies', default=False,
1334 b'ui', b'statuscopies', default=False,
1329 )
1335 )
1330 coreconfigitem(
1336 coreconfigitem(
1331 b'ui', b'strict', default=False,
1337 b'ui', b'strict', default=False,
1332 )
1338 )
1333 coreconfigitem(
1339 coreconfigitem(
1334 b'ui', b'style', default=b'',
1340 b'ui', b'style', default=b'',
1335 )
1341 )
1336 coreconfigitem(
1342 coreconfigitem(
1337 b'ui', b'supportcontact', default=None,
1343 b'ui', b'supportcontact', default=None,
1338 )
1344 )
1339 coreconfigitem(
1345 coreconfigitem(
1340 b'ui', b'textwidth', default=78,
1346 b'ui', b'textwidth', default=78,
1341 )
1347 )
1342 coreconfigitem(
1348 coreconfigitem(
1343 b'ui', b'timeout', default=b'600',
1349 b'ui', b'timeout', default=b'600',
1344 )
1350 )
1345 coreconfigitem(
1351 coreconfigitem(
1346 b'ui', b'timeout.warn', default=0,
1352 b'ui', b'timeout.warn', default=0,
1347 )
1353 )
1348 coreconfigitem(
1354 coreconfigitem(
1349 b'ui', b'traceback', default=False,
1355 b'ui', b'traceback', default=False,
1350 )
1356 )
1351 coreconfigitem(
1357 coreconfigitem(
1352 b'ui', b'tweakdefaults', default=False,
1358 b'ui', b'tweakdefaults', default=False,
1353 )
1359 )
1354 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1360 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1355 coreconfigitem(
1361 coreconfigitem(
1356 b'ui', b'verbose', default=False,
1362 b'ui', b'verbose', default=False,
1357 )
1363 )
1358 coreconfigitem(
1364 coreconfigitem(
1359 b'verify', b'skipflags', default=None,
1365 b'verify', b'skipflags', default=None,
1360 )
1366 )
1361 coreconfigitem(
1367 coreconfigitem(
1362 b'web', b'allowbz2', default=False,
1368 b'web', b'allowbz2', default=False,
1363 )
1369 )
1364 coreconfigitem(
1370 coreconfigitem(
1365 b'web', b'allowgz', default=False,
1371 b'web', b'allowgz', default=False,
1366 )
1372 )
1367 coreconfigitem(
1373 coreconfigitem(
1368 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1374 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1369 )
1375 )
1370 coreconfigitem(
1376 coreconfigitem(
1371 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1377 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1372 )
1378 )
1373 coreconfigitem(
1379 coreconfigitem(
1374 b'web', b'allowzip', default=False,
1380 b'web', b'allowzip', default=False,
1375 )
1381 )
1376 coreconfigitem(
1382 coreconfigitem(
1377 b'web', b'archivesubrepos', default=False,
1383 b'web', b'archivesubrepos', default=False,
1378 )
1384 )
1379 coreconfigitem(
1385 coreconfigitem(
1380 b'web', b'cache', default=True,
1386 b'web', b'cache', default=True,
1381 )
1387 )
1382 coreconfigitem(
1388 coreconfigitem(
1383 b'web', b'comparisoncontext', default=5,
1389 b'web', b'comparisoncontext', default=5,
1384 )
1390 )
1385 coreconfigitem(
1391 coreconfigitem(
1386 b'web', b'contact', default=None,
1392 b'web', b'contact', default=None,
1387 )
1393 )
1388 coreconfigitem(
1394 coreconfigitem(
1389 b'web', b'deny_push', default=list,
1395 b'web', b'deny_push', default=list,
1390 )
1396 )
1391 coreconfigitem(
1397 coreconfigitem(
1392 b'web', b'guessmime', default=False,
1398 b'web', b'guessmime', default=False,
1393 )
1399 )
1394 coreconfigitem(
1400 coreconfigitem(
1395 b'web', b'hidden', default=False,
1401 b'web', b'hidden', default=False,
1396 )
1402 )
1397 coreconfigitem(
1403 coreconfigitem(
1398 b'web', b'labels', default=list,
1404 b'web', b'labels', default=list,
1399 )
1405 )
1400 coreconfigitem(
1406 coreconfigitem(
1401 b'web', b'logoimg', default=b'hglogo.png',
1407 b'web', b'logoimg', default=b'hglogo.png',
1402 )
1408 )
1403 coreconfigitem(
1409 coreconfigitem(
1404 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1410 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1405 )
1411 )
1406 coreconfigitem(
1412 coreconfigitem(
1407 b'web', b'accesslog', default=b'-',
1413 b'web', b'accesslog', default=b'-',
1408 )
1414 )
1409 coreconfigitem(
1415 coreconfigitem(
1410 b'web', b'address', default=b'',
1416 b'web', b'address', default=b'',
1411 )
1417 )
1412 coreconfigitem(
1418 coreconfigitem(
1413 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1419 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1414 )
1420 )
1415 coreconfigitem(
1421 coreconfigitem(
1416 b'web', b'allow_read', default=list,
1422 b'web', b'allow_read', default=list,
1417 )
1423 )
1418 coreconfigitem(
1424 coreconfigitem(
1419 b'web', b'baseurl', default=None,
1425 b'web', b'baseurl', default=None,
1420 )
1426 )
1421 coreconfigitem(
1427 coreconfigitem(
1422 b'web', b'cacerts', default=None,
1428 b'web', b'cacerts', default=None,
1423 )
1429 )
1424 coreconfigitem(
1430 coreconfigitem(
1425 b'web', b'certificate', default=None,
1431 b'web', b'certificate', default=None,
1426 )
1432 )
1427 coreconfigitem(
1433 coreconfigitem(
1428 b'web', b'collapse', default=False,
1434 b'web', b'collapse', default=False,
1429 )
1435 )
1430 coreconfigitem(
1436 coreconfigitem(
1431 b'web', b'csp', default=None,
1437 b'web', b'csp', default=None,
1432 )
1438 )
1433 coreconfigitem(
1439 coreconfigitem(
1434 b'web', b'deny_read', default=list,
1440 b'web', b'deny_read', default=list,
1435 )
1441 )
1436 coreconfigitem(
1442 coreconfigitem(
1437 b'web', b'descend', default=True,
1443 b'web', b'descend', default=True,
1438 )
1444 )
1439 coreconfigitem(
1445 coreconfigitem(
1440 b'web', b'description', default=b"",
1446 b'web', b'description', default=b"",
1441 )
1447 )
1442 coreconfigitem(
1448 coreconfigitem(
1443 b'web', b'encoding', default=lambda: encoding.encoding,
1449 b'web', b'encoding', default=lambda: encoding.encoding,
1444 )
1450 )
1445 coreconfigitem(
1451 coreconfigitem(
1446 b'web', b'errorlog', default=b'-',
1452 b'web', b'errorlog', default=b'-',
1447 )
1453 )
1448 coreconfigitem(
1454 coreconfigitem(
1449 b'web', b'ipv6', default=False,
1455 b'web', b'ipv6', default=False,
1450 )
1456 )
1451 coreconfigitem(
1457 coreconfigitem(
1452 b'web', b'maxchanges', default=10,
1458 b'web', b'maxchanges', default=10,
1453 )
1459 )
1454 coreconfigitem(
1460 coreconfigitem(
1455 b'web', b'maxfiles', default=10,
1461 b'web', b'maxfiles', default=10,
1456 )
1462 )
1457 coreconfigitem(
1463 coreconfigitem(
1458 b'web', b'maxshortchanges', default=60,
1464 b'web', b'maxshortchanges', default=60,
1459 )
1465 )
1460 coreconfigitem(
1466 coreconfigitem(
1461 b'web', b'motd', default=b'',
1467 b'web', b'motd', default=b'',
1462 )
1468 )
1463 coreconfigitem(
1469 coreconfigitem(
1464 b'web', b'name', default=dynamicdefault,
1470 b'web', b'name', default=dynamicdefault,
1465 )
1471 )
1466 coreconfigitem(
1472 coreconfigitem(
1467 b'web', b'port', default=8000,
1473 b'web', b'port', default=8000,
1468 )
1474 )
1469 coreconfigitem(
1475 coreconfigitem(
1470 b'web', b'prefix', default=b'',
1476 b'web', b'prefix', default=b'',
1471 )
1477 )
1472 coreconfigitem(
1478 coreconfigitem(
1473 b'web', b'push_ssl', default=True,
1479 b'web', b'push_ssl', default=True,
1474 )
1480 )
1475 coreconfigitem(
1481 coreconfigitem(
1476 b'web', b'refreshinterval', default=20,
1482 b'web', b'refreshinterval', default=20,
1477 )
1483 )
1478 coreconfigitem(
1484 coreconfigitem(
1479 b'web', b'server-header', default=None,
1485 b'web', b'server-header', default=None,
1480 )
1486 )
1481 coreconfigitem(
1487 coreconfigitem(
1482 b'web', b'static', default=None,
1488 b'web', b'static', default=None,
1483 )
1489 )
1484 coreconfigitem(
1490 coreconfigitem(
1485 b'web', b'staticurl', default=None,
1491 b'web', b'staticurl', default=None,
1486 )
1492 )
1487 coreconfigitem(
1493 coreconfigitem(
1488 b'web', b'stripes', default=1,
1494 b'web', b'stripes', default=1,
1489 )
1495 )
1490 coreconfigitem(
1496 coreconfigitem(
1491 b'web', b'style', default=b'paper',
1497 b'web', b'style', default=b'paper',
1492 )
1498 )
1493 coreconfigitem(
1499 coreconfigitem(
1494 b'web', b'templates', default=None,
1500 b'web', b'templates', default=None,
1495 )
1501 )
1496 coreconfigitem(
1502 coreconfigitem(
1497 b'web', b'view', default=b'served', experimental=True,
1503 b'web', b'view', default=b'served', experimental=True,
1498 )
1504 )
1499 coreconfigitem(
1505 coreconfigitem(
1500 b'worker', b'backgroundclose', default=dynamicdefault,
1506 b'worker', b'backgroundclose', default=dynamicdefault,
1501 )
1507 )
1502 # Windows defaults to a limit of 512 open files. A buffer of 128
1508 # Windows defaults to a limit of 512 open files. A buffer of 128
1503 # should give us enough headway.
1509 # should give us enough headway.
1504 coreconfigitem(
1510 coreconfigitem(
1505 b'worker', b'backgroundclosemaxqueue', default=384,
1511 b'worker', b'backgroundclosemaxqueue', default=384,
1506 )
1512 )
1507 coreconfigitem(
1513 coreconfigitem(
1508 b'worker', b'backgroundcloseminfilecount', default=2048,
1514 b'worker', b'backgroundcloseminfilecount', default=2048,
1509 )
1515 )
1510 coreconfigitem(
1516 coreconfigitem(
1511 b'worker', b'backgroundclosethreadcount', default=4,
1517 b'worker', b'backgroundclosethreadcount', default=4,
1512 )
1518 )
1513 coreconfigitem(
1519 coreconfigitem(
1514 b'worker', b'enabled', default=True,
1520 b'worker', b'enabled', default=True,
1515 )
1521 )
1516 coreconfigitem(
1522 coreconfigitem(
1517 b'worker', b'numcpus', default=None,
1523 b'worker', b'numcpus', default=None,
1518 )
1524 )
1519
1525
1520 # Rebase related configuration moved to core because other extension are doing
1526 # Rebase related configuration moved to core because other extension are doing
1521 # strange things. For example, shelve import the extensions to reuse some bit
1527 # strange things. For example, shelve import the extensions to reuse some bit
1522 # without formally loading it.
1528 # without formally loading it.
1523 coreconfigitem(
1529 coreconfigitem(
1524 b'commands', b'rebase.requiredest', default=False,
1530 b'commands', b'rebase.requiredest', default=False,
1525 )
1531 )
1526 coreconfigitem(
1532 coreconfigitem(
1527 b'experimental', b'rebaseskipobsolete', default=True,
1533 b'experimental', b'rebaseskipobsolete', default=True,
1528 )
1534 )
1529 coreconfigitem(
1535 coreconfigitem(
1530 b'rebase', b'singletransaction', default=False,
1536 b'rebase', b'singletransaction', default=False,
1531 )
1537 )
1532 coreconfigitem(
1538 coreconfigitem(
1533 b'rebase', b'experimental.inmemory', default=False,
1539 b'rebase', b'experimental.inmemory', default=False,
1534 )
1540 )
@@ -1,3695 +1,3704 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 changegroup,
34 changegroup,
35 color,
35 color,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 merge as mergemod,
48 merge as mergemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 procutil,
77 procutil,
78 stringutil,
78 stringutil,
79 )
79 )
80
80
81 from .revlogutils import constants as revlogconst
81 from .revlogutils import constants as revlogconst
82
82
83 release = lockmod.release
83 release = lockmod.release
84 urlerr = util.urlerr
84 urlerr = util.urlerr
85 urlreq = util.urlreq
85 urlreq = util.urlreq
86
86
87 # set of (path, vfs-location) tuples. vfs-location is:
87 # set of (path, vfs-location) tuples. vfs-location is:
88 # - 'plain for vfs relative paths
88 # - 'plain for vfs relative paths
89 # - '' for svfs relative paths
89 # - '' for svfs relative paths
90 _cachedfiles = set()
90 _cachedfiles = set()
91
91
92
92
93 class _basefilecache(scmutil.filecache):
93 class _basefilecache(scmutil.filecache):
94 """All filecache usage on repo are done for logic that should be unfiltered
94 """All filecache usage on repo are done for logic that should be unfiltered
95 """
95 """
96
96
97 def __get__(self, repo, type=None):
97 def __get__(self, repo, type=None):
98 if repo is None:
98 if repo is None:
99 return self
99 return self
100 # proxy to unfiltered __dict__ since filtered repo has no entry
100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 unfi = repo.unfiltered()
101 unfi = repo.unfiltered()
102 try:
102 try:
103 return unfi.__dict__[self.sname]
103 return unfi.__dict__[self.sname]
104 except KeyError:
104 except KeyError:
105 pass
105 pass
106 return super(_basefilecache, self).__get__(unfi, type)
106 return super(_basefilecache, self).__get__(unfi, type)
107
107
108 def set(self, repo, value):
108 def set(self, repo, value):
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110
110
111
111
112 class repofilecache(_basefilecache):
112 class repofilecache(_basefilecache):
113 """filecache for files in .hg but outside of .hg/store"""
113 """filecache for files in .hg but outside of .hg/store"""
114
114
115 def __init__(self, *paths):
115 def __init__(self, *paths):
116 super(repofilecache, self).__init__(*paths)
116 super(repofilecache, self).__init__(*paths)
117 for path in paths:
117 for path in paths:
118 _cachedfiles.add((path, b'plain'))
118 _cachedfiles.add((path, b'plain'))
119
119
120 def join(self, obj, fname):
120 def join(self, obj, fname):
121 return obj.vfs.join(fname)
121 return obj.vfs.join(fname)
122
122
123
123
124 class storecache(_basefilecache):
124 class storecache(_basefilecache):
125 """filecache for files in the store"""
125 """filecache for files in the store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(storecache, self).__init__(*paths)
128 super(storecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b''))
130 _cachedfiles.add((path, b''))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.sjoin(fname)
133 return obj.sjoin(fname)
134
134
135
135
136 class mixedrepostorecache(_basefilecache):
136 class mixedrepostorecache(_basefilecache):
137 """filecache for a mix files in .hg/store and outside"""
137 """filecache for a mix files in .hg/store and outside"""
138
138
139 def __init__(self, *pathsandlocations):
139 def __init__(self, *pathsandlocations):
140 # scmutil.filecache only uses the path for passing back into our
140 # scmutil.filecache only uses the path for passing back into our
141 # join(), so we can safely pass a list of paths and locations
141 # join(), so we can safely pass a list of paths and locations
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
144
144
145 def join(self, obj, fnameandlocation):
145 def join(self, obj, fnameandlocation):
146 fname, location = fnameandlocation
146 fname, location = fnameandlocation
147 if location == b'plain':
147 if location == b'plain':
148 return obj.vfs.join(fname)
148 return obj.vfs.join(fname)
149 else:
149 else:
150 if location != b'':
150 if location != b'':
151 raise error.ProgrammingError(
151 raise error.ProgrammingError(
152 b'unexpected location: %s' % location
152 b'unexpected location: %s' % location
153 )
153 )
154 return obj.sjoin(fname)
154 return obj.sjoin(fname)
155
155
156
156
157 def isfilecached(repo, name):
157 def isfilecached(repo, name):
158 """check if a repo has already cached "name" filecache-ed property
158 """check if a repo has already cached "name" filecache-ed property
159
159
160 This returns (cachedobj-or-None, iscached) tuple.
160 This returns (cachedobj-or-None, iscached) tuple.
161 """
161 """
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 if not cacheentry:
163 if not cacheentry:
164 return None, False
164 return None, False
165 return cacheentry.obj, True
165 return cacheentry.obj, True
166
166
167
167
168 class unfilteredpropertycache(util.propertycache):
168 class unfilteredpropertycache(util.propertycache):
169 """propertycache that apply to unfiltered repo only"""
169 """propertycache that apply to unfiltered repo only"""
170
170
171 def __get__(self, repo, type=None):
171 def __get__(self, repo, type=None):
172 unfi = repo.unfiltered()
172 unfi = repo.unfiltered()
173 if unfi is repo:
173 if unfi is repo:
174 return super(unfilteredpropertycache, self).__get__(unfi)
174 return super(unfilteredpropertycache, self).__get__(unfi)
175 return getattr(unfi, self.name)
175 return getattr(unfi, self.name)
176
176
177
177
178 class filteredpropertycache(util.propertycache):
178 class filteredpropertycache(util.propertycache):
179 """propertycache that must take filtering in account"""
179 """propertycache that must take filtering in account"""
180
180
181 def cachevalue(self, obj, value):
181 def cachevalue(self, obj, value):
182 object.__setattr__(obj, self.name, value)
182 object.__setattr__(obj, self.name, value)
183
183
184
184
185 def hasunfilteredcache(repo, name):
185 def hasunfilteredcache(repo, name):
186 """check if a repo has an unfilteredpropertycache value for <name>"""
186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 return name in vars(repo.unfiltered())
187 return name in vars(repo.unfiltered())
188
188
189
189
190 def unfilteredmethod(orig):
190 def unfilteredmethod(orig):
191 """decorate method that always need to be run on unfiltered version"""
191 """decorate method that always need to be run on unfiltered version"""
192
192
193 def wrapper(repo, *args, **kwargs):
193 def wrapper(repo, *args, **kwargs):
194 return orig(repo.unfiltered(), *args, **kwargs)
194 return orig(repo.unfiltered(), *args, **kwargs)
195
195
196 return wrapper
196 return wrapper
197
197
198
198
199 moderncaps = {
199 moderncaps = {
200 b'lookup',
200 b'lookup',
201 b'branchmap',
201 b'branchmap',
202 b'pushkey',
202 b'pushkey',
203 b'known',
203 b'known',
204 b'getbundle',
204 b'getbundle',
205 b'unbundle',
205 b'unbundle',
206 }
206 }
207 legacycaps = moderncaps.union({b'changegroupsubset'})
207 legacycaps = moderncaps.union({b'changegroupsubset'})
208
208
209
209
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 class localcommandexecutor(object):
211 class localcommandexecutor(object):
212 def __init__(self, peer):
212 def __init__(self, peer):
213 self._peer = peer
213 self._peer = peer
214 self._sent = False
214 self._sent = False
215 self._closed = False
215 self._closed = False
216
216
217 def __enter__(self):
217 def __enter__(self):
218 return self
218 return self
219
219
220 def __exit__(self, exctype, excvalue, exctb):
220 def __exit__(self, exctype, excvalue, exctb):
221 self.close()
221 self.close()
222
222
223 def callcommand(self, command, args):
223 def callcommand(self, command, args):
224 if self._sent:
224 if self._sent:
225 raise error.ProgrammingError(
225 raise error.ProgrammingError(
226 b'callcommand() cannot be used after sendcommands()'
226 b'callcommand() cannot be used after sendcommands()'
227 )
227 )
228
228
229 if self._closed:
229 if self._closed:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after close()'
231 b'callcommand() cannot be used after close()'
232 )
232 )
233
233
234 # We don't need to support anything fancy. Just call the named
234 # We don't need to support anything fancy. Just call the named
235 # method on the peer and return a resolved future.
235 # method on the peer and return a resolved future.
236 fn = getattr(self._peer, pycompat.sysstr(command))
236 fn = getattr(self._peer, pycompat.sysstr(command))
237
237
238 f = pycompat.futures.Future()
238 f = pycompat.futures.Future()
239
239
240 try:
240 try:
241 result = fn(**pycompat.strkwargs(args))
241 result = fn(**pycompat.strkwargs(args))
242 except Exception:
242 except Exception:
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 else:
244 else:
245 f.set_result(result)
245 f.set_result(result)
246
246
247 return f
247 return f
248
248
249 def sendcommands(self):
249 def sendcommands(self):
250 self._sent = True
250 self._sent = True
251
251
252 def close(self):
252 def close(self):
253 self._closed = True
253 self._closed = True
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommands)
256 @interfaceutil.implementer(repository.ipeercommands)
257 class localpeer(repository.peer):
257 class localpeer(repository.peer):
258 '''peer for a local repo; reflects only the most recent API'''
258 '''peer for a local repo; reflects only the most recent API'''
259
259
260 def __init__(self, repo, caps=None):
260 def __init__(self, repo, caps=None):
261 super(localpeer, self).__init__()
261 super(localpeer, self).__init__()
262
262
263 if caps is None:
263 if caps is None:
264 caps = moderncaps.copy()
264 caps = moderncaps.copy()
265 self._repo = repo.filtered(b'served')
265 self._repo = repo.filtered(b'served')
266 self.ui = repo.ui
266 self.ui = repo.ui
267 self._caps = repo._restrictcapabilities(caps)
267 self._caps = repo._restrictcapabilities(caps)
268
268
269 # Begin of _basepeer interface.
269 # Begin of _basepeer interface.
270
270
271 def url(self):
271 def url(self):
272 return self._repo.url()
272 return self._repo.url()
273
273
274 def local(self):
274 def local(self):
275 return self._repo
275 return self._repo
276
276
277 def peer(self):
277 def peer(self):
278 return self
278 return self
279
279
280 def canpush(self):
280 def canpush(self):
281 return True
281 return True
282
282
283 def close(self):
283 def close(self):
284 self._repo.close()
284 self._repo.close()
285
285
286 # End of _basepeer interface.
286 # End of _basepeer interface.
287
287
288 # Begin of _basewirecommands interface.
288 # Begin of _basewirecommands interface.
289
289
290 def branchmap(self):
290 def branchmap(self):
291 return self._repo.branchmap()
291 return self._repo.branchmap()
292
292
293 def capabilities(self):
293 def capabilities(self):
294 return self._caps
294 return self._caps
295
295
296 def clonebundles(self):
296 def clonebundles(self):
297 return self._repo.tryread(b'clonebundles.manifest')
297 return self._repo.tryread(b'clonebundles.manifest')
298
298
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 """Used to test argument passing over the wire"""
300 """Used to test argument passing over the wire"""
301 return b"%s %s %s %s %s" % (
301 return b"%s %s %s %s %s" % (
302 one,
302 one,
303 two,
303 two,
304 pycompat.bytestr(three),
304 pycompat.bytestr(three),
305 pycompat.bytestr(four),
305 pycompat.bytestr(four),
306 pycompat.bytestr(five),
306 pycompat.bytestr(five),
307 )
307 )
308
308
309 def getbundle(
309 def getbundle(
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 ):
311 ):
312 chunks = exchange.getbundlechunks(
312 chunks = exchange.getbundlechunks(
313 self._repo,
313 self._repo,
314 source,
314 source,
315 heads=heads,
315 heads=heads,
316 common=common,
316 common=common,
317 bundlecaps=bundlecaps,
317 bundlecaps=bundlecaps,
318 **kwargs
318 **kwargs
319 )[1]
319 )[1]
320 cb = util.chunkbuffer(chunks)
320 cb = util.chunkbuffer(chunks)
321
321
322 if exchange.bundle2requested(bundlecaps):
322 if exchange.bundle2requested(bundlecaps):
323 # When requesting a bundle2, getbundle returns a stream to make the
323 # When requesting a bundle2, getbundle returns a stream to make the
324 # wire level function happier. We need to build a proper object
324 # wire level function happier. We need to build a proper object
325 # from it in local peer.
325 # from it in local peer.
326 return bundle2.getunbundler(self.ui, cb)
326 return bundle2.getunbundler(self.ui, cb)
327 else:
327 else:
328 return changegroup.getunbundler(b'01', cb, None)
328 return changegroup.getunbundler(b'01', cb, None)
329
329
330 def heads(self):
330 def heads(self):
331 return self._repo.heads()
331 return self._repo.heads()
332
332
333 def known(self, nodes):
333 def known(self, nodes):
334 return self._repo.known(nodes)
334 return self._repo.known(nodes)
335
335
336 def listkeys(self, namespace):
336 def listkeys(self, namespace):
337 return self._repo.listkeys(namespace)
337 return self._repo.listkeys(namespace)
338
338
339 def lookup(self, key):
339 def lookup(self, key):
340 return self._repo.lookup(key)
340 return self._repo.lookup(key)
341
341
342 def pushkey(self, namespace, key, old, new):
342 def pushkey(self, namespace, key, old, new):
343 return self._repo.pushkey(namespace, key, old, new)
343 return self._repo.pushkey(namespace, key, old, new)
344
344
345 def stream_out(self):
345 def stream_out(self):
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347
347
348 def unbundle(self, bundle, heads, url):
348 def unbundle(self, bundle, heads, url):
349 """apply a bundle on a repo
349 """apply a bundle on a repo
350
350
351 This function handles the repo locking itself."""
351 This function handles the repo locking itself."""
352 try:
352 try:
353 try:
353 try:
354 bundle = exchange.readbundle(self.ui, bundle, None)
354 bundle = exchange.readbundle(self.ui, bundle, None)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 if util.safehasattr(ret, b'getchunks'):
356 if util.safehasattr(ret, b'getchunks'):
357 # This is a bundle20 object, turn it into an unbundler.
357 # This is a bundle20 object, turn it into an unbundler.
358 # This little dance should be dropped eventually when the
358 # This little dance should be dropped eventually when the
359 # API is finally improved.
359 # API is finally improved.
360 stream = util.chunkbuffer(ret.getchunks())
360 stream = util.chunkbuffer(ret.getchunks())
361 ret = bundle2.getunbundler(self.ui, stream)
361 ret = bundle2.getunbundler(self.ui, stream)
362 return ret
362 return ret
363 except Exception as exc:
363 except Exception as exc:
364 # If the exception contains output salvaged from a bundle2
364 # If the exception contains output salvaged from a bundle2
365 # reply, we need to make sure it is printed before continuing
365 # reply, we need to make sure it is printed before continuing
366 # to fail. So we build a bundle2 with such output and consume
366 # to fail. So we build a bundle2 with such output and consume
367 # it directly.
367 # it directly.
368 #
368 #
369 # This is not very elegant but allows a "simple" solution for
369 # This is not very elegant but allows a "simple" solution for
370 # issue4594
370 # issue4594
371 output = getattr(exc, '_bundle2salvagedoutput', ())
371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 if output:
372 if output:
373 bundler = bundle2.bundle20(self._repo.ui)
373 bundler = bundle2.bundle20(self._repo.ui)
374 for out in output:
374 for out in output:
375 bundler.addpart(out)
375 bundler.addpart(out)
376 stream = util.chunkbuffer(bundler.getchunks())
376 stream = util.chunkbuffer(bundler.getchunks())
377 b = bundle2.getunbundler(self.ui, stream)
377 b = bundle2.getunbundler(self.ui, stream)
378 bundle2.processbundle(self._repo, b)
378 bundle2.processbundle(self._repo, b)
379 raise
379 raise
380 except error.PushRaced as exc:
380 except error.PushRaced as exc:
381 raise error.ResponseError(
381 raise error.ResponseError(
382 _(b'push failed:'), stringutil.forcebytestr(exc)
382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 )
383 )
384
384
385 # End of _basewirecommands interface.
385 # End of _basewirecommands interface.
386
386
387 # Begin of peer interface.
387 # Begin of peer interface.
388
388
389 def commandexecutor(self):
389 def commandexecutor(self):
390 return localcommandexecutor(self)
390 return localcommandexecutor(self)
391
391
392 # End of peer interface.
392 # End of peer interface.
393
393
394
394
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 class locallegacypeer(localpeer):
396 class locallegacypeer(localpeer):
397 '''peer extension which implements legacy methods too; used for tests with
397 '''peer extension which implements legacy methods too; used for tests with
398 restricted capabilities'''
398 restricted capabilities'''
399
399
400 def __init__(self, repo):
400 def __init__(self, repo):
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402
402
403 # Begin of baselegacywirecommands interface.
403 # Begin of baselegacywirecommands interface.
404
404
405 def between(self, pairs):
405 def between(self, pairs):
406 return self._repo.between(pairs)
406 return self._repo.between(pairs)
407
407
408 def branches(self, nodes):
408 def branches(self, nodes):
409 return self._repo.branches(nodes)
409 return self._repo.branches(nodes)
410
410
411 def changegroup(self, nodes, source):
411 def changegroup(self, nodes, source):
412 outgoing = discovery.outgoing(
412 outgoing = discovery.outgoing(
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 )
414 )
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416
416
417 def changegroupsubset(self, bases, heads, source):
417 def changegroupsubset(self, bases, heads, source):
418 outgoing = discovery.outgoing(
418 outgoing = discovery.outgoing(
419 self._repo, missingroots=bases, missingheads=heads
419 self._repo, missingroots=bases, missingheads=heads
420 )
420 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
422
423 # End of baselegacywirecommands interface.
423 # End of baselegacywirecommands interface.
424
424
425
425
426 # Increment the sub-version when the revlog v2 format changes to lock out old
426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # clients.
427 # clients.
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429
429
430 # A repository with the sparserevlog feature will have delta chains that
430 # A repository with the sparserevlog feature will have delta chains that
431 # can spread over a larger span. Sparse reading cuts these large spans into
431 # can spread over a larger span. Sparse reading cuts these large spans into
432 # pieces, so that each piece isn't too big.
432 # pieces, so that each piece isn't too big.
433 # Without the sparserevlog capability, reading from the repository could use
433 # Without the sparserevlog capability, reading from the repository could use
434 # huge amounts of memory, because the whole span would be read at once,
434 # huge amounts of memory, because the whole span would be read at once,
435 # including all the intermediate revisions that aren't pertinent for the chain.
435 # including all the intermediate revisions that aren't pertinent for the chain.
436 # This is why once a repository has enabled sparse-read, it becomes required.
436 # This is why once a repository has enabled sparse-read, it becomes required.
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438
438
439 # A repository with the sidedataflag requirement will allow to store extra
439 # A repository with the sidedataflag requirement will allow to store extra
440 # information for revision without altering their original hashes.
440 # information for revision without altering their original hashes.
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442
442
443 # A repository with the the copies-sidedata-changeset requirement will store
444 # copies related information in changeset's sidedata.
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446
443 # Functions receiving (ui, features) that extensions can register to impact
447 # Functions receiving (ui, features) that extensions can register to impact
444 # the ability to load repositories with custom requirements. Only
448 # the ability to load repositories with custom requirements. Only
445 # functions defined in loaded extensions are called.
449 # functions defined in loaded extensions are called.
446 #
450 #
447 # The function receives a set of requirement strings that the repository
451 # The function receives a set of requirement strings that the repository
448 # is capable of opening. Functions will typically add elements to the
452 # is capable of opening. Functions will typically add elements to the
449 # set to reflect that the extension knows how to handle that requirements.
453 # set to reflect that the extension knows how to handle that requirements.
450 featuresetupfuncs = set()
454 featuresetupfuncs = set()
451
455
452
456
453 def makelocalrepository(baseui, path, intents=None):
457 def makelocalrepository(baseui, path, intents=None):
454 """Create a local repository object.
458 """Create a local repository object.
455
459
456 Given arguments needed to construct a local repository, this function
460 Given arguments needed to construct a local repository, this function
457 performs various early repository loading functionality (such as
461 performs various early repository loading functionality (such as
458 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
459 the repository can be opened, derives a type suitable for representing
463 the repository can be opened, derives a type suitable for representing
460 that repository, and returns an instance of it.
464 that repository, and returns an instance of it.
461
465
462 The returned object conforms to the ``repository.completelocalrepository``
466 The returned object conforms to the ``repository.completelocalrepository``
463 interface.
467 interface.
464
468
465 The repository type is derived by calling a series of factory functions
469 The repository type is derived by calling a series of factory functions
466 for each aspect/interface of the final repository. These are defined by
470 for each aspect/interface of the final repository. These are defined by
467 ``REPO_INTERFACES``.
471 ``REPO_INTERFACES``.
468
472
469 Each factory function is called to produce a type implementing a specific
473 Each factory function is called to produce a type implementing a specific
470 interface. The cumulative list of returned types will be combined into a
474 interface. The cumulative list of returned types will be combined into a
471 new type and that type will be instantiated to represent the local
475 new type and that type will be instantiated to represent the local
472 repository.
476 repository.
473
477
474 The factory functions each receive various state that may be consulted
478 The factory functions each receive various state that may be consulted
475 as part of deriving a type.
479 as part of deriving a type.
476
480
477 Extensions should wrap these factory functions to customize repository type
481 Extensions should wrap these factory functions to customize repository type
478 creation. Note that an extension's wrapped function may be called even if
482 creation. Note that an extension's wrapped function may be called even if
479 that extension is not loaded for the repo being constructed. Extensions
483 that extension is not loaded for the repo being constructed. Extensions
480 should check if their ``__name__`` appears in the
484 should check if their ``__name__`` appears in the
481 ``extensionmodulenames`` set passed to the factory function and no-op if
485 ``extensionmodulenames`` set passed to the factory function and no-op if
482 not.
486 not.
483 """
487 """
484 ui = baseui.copy()
488 ui = baseui.copy()
485 # Prevent copying repo configuration.
489 # Prevent copying repo configuration.
486 ui.copy = baseui.copy
490 ui.copy = baseui.copy
487
491
488 # Working directory VFS rooted at repository root.
492 # Working directory VFS rooted at repository root.
489 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
490
494
491 # Main VFS for .hg/ directory.
495 # Main VFS for .hg/ directory.
492 hgpath = wdirvfs.join(b'.hg')
496 hgpath = wdirvfs.join(b'.hg')
493 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
494
498
495 # The .hg/ path should exist and should be a directory. All other
499 # The .hg/ path should exist and should be a directory. All other
496 # cases are errors.
500 # cases are errors.
497 if not hgvfs.isdir():
501 if not hgvfs.isdir():
498 try:
502 try:
499 hgvfs.stat()
503 hgvfs.stat()
500 except OSError as e:
504 except OSError as e:
501 if e.errno != errno.ENOENT:
505 if e.errno != errno.ENOENT:
502 raise
506 raise
503
507
504 raise error.RepoError(_(b'repository %s not found') % path)
508 raise error.RepoError(_(b'repository %s not found') % path)
505
509
506 # .hg/requires file contains a newline-delimited list of
510 # .hg/requires file contains a newline-delimited list of
507 # features/capabilities the opener (us) must have in order to use
511 # features/capabilities the opener (us) must have in order to use
508 # the repository. This file was introduced in Mercurial 0.9.2,
512 # the repository. This file was introduced in Mercurial 0.9.2,
509 # which means very old repositories may not have one. We assume
513 # which means very old repositories may not have one. We assume
510 # a missing file translates to no requirements.
514 # a missing file translates to no requirements.
511 try:
515 try:
512 requirements = set(hgvfs.read(b'requires').splitlines())
516 requirements = set(hgvfs.read(b'requires').splitlines())
513 except IOError as e:
517 except IOError as e:
514 if e.errno != errno.ENOENT:
518 if e.errno != errno.ENOENT:
515 raise
519 raise
516 requirements = set()
520 requirements = set()
517
521
518 # The .hg/hgrc file may load extensions or contain config options
522 # The .hg/hgrc file may load extensions or contain config options
519 # that influence repository construction. Attempt to load it and
523 # that influence repository construction. Attempt to load it and
520 # process any new extensions that it may have pulled in.
524 # process any new extensions that it may have pulled in.
521 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
522 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
523 extensions.loadall(ui)
527 extensions.loadall(ui)
524 extensions.populateui(ui)
528 extensions.populateui(ui)
525
529
526 # Set of module names of extensions loaded for this repository.
530 # Set of module names of extensions loaded for this repository.
527 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
528
532
529 supportedrequirements = gathersupportedrequirements(ui)
533 supportedrequirements = gathersupportedrequirements(ui)
530
534
531 # We first validate the requirements are known.
535 # We first validate the requirements are known.
532 ensurerequirementsrecognized(requirements, supportedrequirements)
536 ensurerequirementsrecognized(requirements, supportedrequirements)
533
537
534 # Then we validate that the known set is reasonable to use together.
538 # Then we validate that the known set is reasonable to use together.
535 ensurerequirementscompatible(ui, requirements)
539 ensurerequirementscompatible(ui, requirements)
536
540
537 # TODO there are unhandled edge cases related to opening repositories with
541 # TODO there are unhandled edge cases related to opening repositories with
538 # shared storage. If storage is shared, we should also test for requirements
542 # shared storage. If storage is shared, we should also test for requirements
539 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
540 # that repo, as that repo may load extensions needed to open it. This is a
544 # that repo, as that repo may load extensions needed to open it. This is a
541 # bit complicated because we don't want the other hgrc to overwrite settings
545 # bit complicated because we don't want the other hgrc to overwrite settings
542 # in this hgrc.
546 # in this hgrc.
543 #
547 #
544 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
545 # file when sharing repos. But if a requirement is added after the share is
549 # file when sharing repos. But if a requirement is added after the share is
546 # performed, thereby introducing a new requirement for the opener, we may
550 # performed, thereby introducing a new requirement for the opener, we may
547 # will not see that and could encounter a run-time error interacting with
551 # will not see that and could encounter a run-time error interacting with
548 # that shared store since it has an unknown-to-us requirement.
552 # that shared store since it has an unknown-to-us requirement.
549
553
550 # At this point, we know we should be capable of opening the repository.
554 # At this point, we know we should be capable of opening the repository.
551 # Now get on with doing that.
555 # Now get on with doing that.
552
556
553 features = set()
557 features = set()
554
558
555 # The "store" part of the repository holds versioned data. How it is
559 # The "store" part of the repository holds versioned data. How it is
556 # accessed is determined by various requirements. The ``shared`` or
560 # accessed is determined by various requirements. The ``shared`` or
557 # ``relshared`` requirements indicate the store lives in the path contained
561 # ``relshared`` requirements indicate the store lives in the path contained
558 # in the ``.hg/sharedpath`` file. This is an absolute path for
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
559 # ``shared`` and relative to ``.hg/`` for ``relshared``.
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
560 if b'shared' in requirements or b'relshared' in requirements:
564 if b'shared' in requirements or b'relshared' in requirements:
561 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
562 if b'relshared' in requirements:
566 if b'relshared' in requirements:
563 sharedpath = hgvfs.join(sharedpath)
567 sharedpath = hgvfs.join(sharedpath)
564
568
565 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
566
570
567 if not sharedvfs.exists():
571 if not sharedvfs.exists():
568 raise error.RepoError(
572 raise error.RepoError(
569 _(b'.hg/sharedpath points to nonexistent directory %s')
573 _(b'.hg/sharedpath points to nonexistent directory %s')
570 % sharedvfs.base
574 % sharedvfs.base
571 )
575 )
572
576
573 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
574
578
575 storebasepath = sharedvfs.base
579 storebasepath = sharedvfs.base
576 cachepath = sharedvfs.join(b'cache')
580 cachepath = sharedvfs.join(b'cache')
577 else:
581 else:
578 storebasepath = hgvfs.base
582 storebasepath = hgvfs.base
579 cachepath = hgvfs.join(b'cache')
583 cachepath = hgvfs.join(b'cache')
580 wcachepath = hgvfs.join(b'wcache')
584 wcachepath = hgvfs.join(b'wcache')
581
585
582 # The store has changed over time and the exact layout is dictated by
586 # The store has changed over time and the exact layout is dictated by
583 # requirements. The store interface abstracts differences across all
587 # requirements. The store interface abstracts differences across all
584 # of them.
588 # of them.
585 store = makestore(
589 store = makestore(
586 requirements,
590 requirements,
587 storebasepath,
591 storebasepath,
588 lambda base: vfsmod.vfs(base, cacheaudited=True),
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
589 )
593 )
590 hgvfs.createmode = store.createmode
594 hgvfs.createmode = store.createmode
591
595
592 storevfs = store.vfs
596 storevfs = store.vfs
593 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
594
598
595 # The cache vfs is used to manage cache files.
599 # The cache vfs is used to manage cache files.
596 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
597 cachevfs.createmode = store.createmode
601 cachevfs.createmode = store.createmode
598 # The cache vfs is used to manage cache files related to the working copy
602 # The cache vfs is used to manage cache files related to the working copy
599 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
600 wcachevfs.createmode = store.createmode
604 wcachevfs.createmode = store.createmode
601
605
602 # Now resolve the type for the repository object. We do this by repeatedly
606 # Now resolve the type for the repository object. We do this by repeatedly
603 # calling a factory function to produces types for specific aspects of the
607 # calling a factory function to produces types for specific aspects of the
604 # repo's operation. The aggregate returned types are used as base classes
608 # repo's operation. The aggregate returned types are used as base classes
605 # for a dynamically-derived type, which will represent our new repository.
609 # for a dynamically-derived type, which will represent our new repository.
606
610
607 bases = []
611 bases = []
608 extrastate = {}
612 extrastate = {}
609
613
610 for iface, fn in REPO_INTERFACES:
614 for iface, fn in REPO_INTERFACES:
611 # We pass all potentially useful state to give extensions tons of
615 # We pass all potentially useful state to give extensions tons of
612 # flexibility.
616 # flexibility.
613 typ = fn()(
617 typ = fn()(
614 ui=ui,
618 ui=ui,
615 intents=intents,
619 intents=intents,
616 requirements=requirements,
620 requirements=requirements,
617 features=features,
621 features=features,
618 wdirvfs=wdirvfs,
622 wdirvfs=wdirvfs,
619 hgvfs=hgvfs,
623 hgvfs=hgvfs,
620 store=store,
624 store=store,
621 storevfs=storevfs,
625 storevfs=storevfs,
622 storeoptions=storevfs.options,
626 storeoptions=storevfs.options,
623 cachevfs=cachevfs,
627 cachevfs=cachevfs,
624 wcachevfs=wcachevfs,
628 wcachevfs=wcachevfs,
625 extensionmodulenames=extensionmodulenames,
629 extensionmodulenames=extensionmodulenames,
626 extrastate=extrastate,
630 extrastate=extrastate,
627 baseclasses=bases,
631 baseclasses=bases,
628 )
632 )
629
633
630 if not isinstance(typ, type):
634 if not isinstance(typ, type):
631 raise error.ProgrammingError(
635 raise error.ProgrammingError(
632 b'unable to construct type for %s' % iface
636 b'unable to construct type for %s' % iface
633 )
637 )
634
638
635 bases.append(typ)
639 bases.append(typ)
636
640
637 # type() allows you to use characters in type names that wouldn't be
641 # type() allows you to use characters in type names that wouldn't be
638 # recognized as Python symbols in source code. We abuse that to add
642 # recognized as Python symbols in source code. We abuse that to add
639 # rich information about our constructed repo.
643 # rich information about our constructed repo.
640 name = pycompat.sysstr(
644 name = pycompat.sysstr(
641 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
642 )
646 )
643
647
644 cls = type(name, tuple(bases), {})
648 cls = type(name, tuple(bases), {})
645
649
646 return cls(
650 return cls(
647 baseui=baseui,
651 baseui=baseui,
648 ui=ui,
652 ui=ui,
649 origroot=path,
653 origroot=path,
650 wdirvfs=wdirvfs,
654 wdirvfs=wdirvfs,
651 hgvfs=hgvfs,
655 hgvfs=hgvfs,
652 requirements=requirements,
656 requirements=requirements,
653 supportedrequirements=supportedrequirements,
657 supportedrequirements=supportedrequirements,
654 sharedpath=storebasepath,
658 sharedpath=storebasepath,
655 store=store,
659 store=store,
656 cachevfs=cachevfs,
660 cachevfs=cachevfs,
657 wcachevfs=wcachevfs,
661 wcachevfs=wcachevfs,
658 features=features,
662 features=features,
659 intents=intents,
663 intents=intents,
660 )
664 )
661
665
662
666
663 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
664 """Load hgrc files/content into a ui instance.
668 """Load hgrc files/content into a ui instance.
665
669
666 This is called during repository opening to load any additional
670 This is called during repository opening to load any additional
667 config files or settings relevant to the current repository.
671 config files or settings relevant to the current repository.
668
672
669 Returns a bool indicating whether any additional configs were loaded.
673 Returns a bool indicating whether any additional configs were loaded.
670
674
671 Extensions should monkeypatch this function to modify how per-repo
675 Extensions should monkeypatch this function to modify how per-repo
672 configs are loaded. For example, an extension may wish to pull in
676 configs are loaded. For example, an extension may wish to pull in
673 configs from alternate files or sources.
677 configs from alternate files or sources.
674 """
678 """
675 try:
679 try:
676 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
677 return True
681 return True
678 except IOError:
682 except IOError:
679 return False
683 return False
680
684
681
685
682 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
683 """Perform additional actions after .hg/hgrc is loaded.
687 """Perform additional actions after .hg/hgrc is loaded.
684
688
685 This function is called during repository loading immediately after
689 This function is called during repository loading immediately after
686 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
687
691
688 The function can be used to validate configs, automatically add
692 The function can be used to validate configs, automatically add
689 options (including extensions) based on requirements, etc.
693 options (including extensions) based on requirements, etc.
690 """
694 """
691
695
692 # Map of requirements to list of extensions to load automatically when
696 # Map of requirements to list of extensions to load automatically when
693 # requirement is present.
697 # requirement is present.
694 autoextensions = {
698 autoextensions = {
695 b'largefiles': [b'largefiles'],
699 b'largefiles': [b'largefiles'],
696 b'lfs': [b'lfs'],
700 b'lfs': [b'lfs'],
697 }
701 }
698
702
699 for requirement, names in sorted(autoextensions.items()):
703 for requirement, names in sorted(autoextensions.items()):
700 if requirement not in requirements:
704 if requirement not in requirements:
701 continue
705 continue
702
706
703 for name in names:
707 for name in names:
704 if not ui.hasconfig(b'extensions', name):
708 if not ui.hasconfig(b'extensions', name):
705 ui.setconfig(b'extensions', name, b'', source=b'autoload')
709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
706
710
707
711
708 def gathersupportedrequirements(ui):
712 def gathersupportedrequirements(ui):
709 """Determine the complete set of recognized requirements."""
713 """Determine the complete set of recognized requirements."""
710 # Start with all requirements supported by this file.
714 # Start with all requirements supported by this file.
711 supported = set(localrepository._basesupported)
715 supported = set(localrepository._basesupported)
712
716
713 # Execute ``featuresetupfuncs`` entries if they belong to an extension
717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
714 # relevant to this ui instance.
718 # relevant to this ui instance.
715 modules = {m.__name__ for n, m in extensions.extensions(ui)}
719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
716
720
717 for fn in featuresetupfuncs:
721 for fn in featuresetupfuncs:
718 if fn.__module__ in modules:
722 if fn.__module__ in modules:
719 fn(ui, supported)
723 fn(ui, supported)
720
724
721 # Add derived requirements from registered compression engines.
725 # Add derived requirements from registered compression engines.
722 for name in util.compengines:
726 for name in util.compengines:
723 engine = util.compengines[name]
727 engine = util.compengines[name]
724 if engine.available() and engine.revlogheader():
728 if engine.available() and engine.revlogheader():
725 supported.add(b'exp-compression-%s' % name)
729 supported.add(b'exp-compression-%s' % name)
726 if engine.name() == b'zstd':
730 if engine.name() == b'zstd':
727 supported.add(b'revlog-compression-zstd')
731 supported.add(b'revlog-compression-zstd')
728
732
729 return supported
733 return supported
730
734
731
735
732 def ensurerequirementsrecognized(requirements, supported):
736 def ensurerequirementsrecognized(requirements, supported):
733 """Validate that a set of local requirements is recognized.
737 """Validate that a set of local requirements is recognized.
734
738
735 Receives a set of requirements. Raises an ``error.RepoError`` if there
739 Receives a set of requirements. Raises an ``error.RepoError`` if there
736 exists any requirement in that set that currently loaded code doesn't
740 exists any requirement in that set that currently loaded code doesn't
737 recognize.
741 recognize.
738
742
739 Returns a set of supported requirements.
743 Returns a set of supported requirements.
740 """
744 """
741 missing = set()
745 missing = set()
742
746
743 for requirement in requirements:
747 for requirement in requirements:
744 if requirement in supported:
748 if requirement in supported:
745 continue
749 continue
746
750
747 if not requirement or not requirement[0:1].isalnum():
751 if not requirement or not requirement[0:1].isalnum():
748 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
749
753
750 missing.add(requirement)
754 missing.add(requirement)
751
755
752 if missing:
756 if missing:
753 raise error.RequirementError(
757 raise error.RequirementError(
754 _(b'repository requires features unknown to this Mercurial: %s')
758 _(b'repository requires features unknown to this Mercurial: %s')
755 % b' '.join(sorted(missing)),
759 % b' '.join(sorted(missing)),
756 hint=_(
760 hint=_(
757 b'see https://mercurial-scm.org/wiki/MissingRequirement '
761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
758 b'for more information'
762 b'for more information'
759 ),
763 ),
760 )
764 )
761
765
762
766
763 def ensurerequirementscompatible(ui, requirements):
767 def ensurerequirementscompatible(ui, requirements):
764 """Validates that a set of recognized requirements is mutually compatible.
768 """Validates that a set of recognized requirements is mutually compatible.
765
769
766 Some requirements may not be compatible with others or require
770 Some requirements may not be compatible with others or require
767 config options that aren't enabled. This function is called during
771 config options that aren't enabled. This function is called during
768 repository opening to ensure that the set of requirements needed
772 repository opening to ensure that the set of requirements needed
769 to open a repository is sane and compatible with config options.
773 to open a repository is sane and compatible with config options.
770
774
771 Extensions can monkeypatch this function to perform additional
775 Extensions can monkeypatch this function to perform additional
772 checking.
776 checking.
773
777
774 ``error.RepoError`` should be raised on failure.
778 ``error.RepoError`` should be raised on failure.
775 """
779 """
776 if b'exp-sparse' in requirements and not sparse.enabled:
780 if b'exp-sparse' in requirements and not sparse.enabled:
777 raise error.RepoError(
781 raise error.RepoError(
778 _(
782 _(
779 b'repository is using sparse feature but '
783 b'repository is using sparse feature but '
780 b'sparse is not enabled; enable the '
784 b'sparse is not enabled; enable the '
781 b'"sparse" extensions to access'
785 b'"sparse" extensions to access'
782 )
786 )
783 )
787 )
784
788
785
789
786 def makestore(requirements, path, vfstype):
790 def makestore(requirements, path, vfstype):
787 """Construct a storage object for a repository."""
791 """Construct a storage object for a repository."""
788 if b'store' in requirements:
792 if b'store' in requirements:
789 if b'fncache' in requirements:
793 if b'fncache' in requirements:
790 return storemod.fncachestore(
794 return storemod.fncachestore(
791 path, vfstype, b'dotencode' in requirements
795 path, vfstype, b'dotencode' in requirements
792 )
796 )
793
797
794 return storemod.encodedstore(path, vfstype)
798 return storemod.encodedstore(path, vfstype)
795
799
796 return storemod.basicstore(path, vfstype)
800 return storemod.basicstore(path, vfstype)
797
801
798
802
799 def resolvestorevfsoptions(ui, requirements, features):
803 def resolvestorevfsoptions(ui, requirements, features):
800 """Resolve the options to pass to the store vfs opener.
804 """Resolve the options to pass to the store vfs opener.
801
805
802 The returned dict is used to influence behavior of the storage layer.
806 The returned dict is used to influence behavior of the storage layer.
803 """
807 """
804 options = {}
808 options = {}
805
809
806 if b'treemanifest' in requirements:
810 if b'treemanifest' in requirements:
807 options[b'treemanifest'] = True
811 options[b'treemanifest'] = True
808
812
809 # experimental config: format.manifestcachesize
813 # experimental config: format.manifestcachesize
810 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
811 if manifestcachesize is not None:
815 if manifestcachesize is not None:
812 options[b'manifestcachesize'] = manifestcachesize
816 options[b'manifestcachesize'] = manifestcachesize
813
817
814 # In the absence of another requirement superseding a revlog-related
818 # In the absence of another requirement superseding a revlog-related
815 # requirement, we have to assume the repo is using revlog version 0.
819 # requirement, we have to assume the repo is using revlog version 0.
816 # This revlog format is super old and we don't bother trying to parse
820 # This revlog format is super old and we don't bother trying to parse
817 # opener options for it because those options wouldn't do anything
821 # opener options for it because those options wouldn't do anything
818 # meaningful on such old repos.
822 # meaningful on such old repos.
819 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
820 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
821 else: # explicitly mark repo as using revlogv0
825 else: # explicitly mark repo as using revlogv0
822 options[b'revlogv0'] = True
826 options[b'revlogv0'] = True
823
827
824 writecopiesto = ui.config(b'experimental', b'copies.write-to')
828 writecopiesto = ui.config(b'experimental', b'copies.write-to')
825 copiesextramode = (b'changeset-only', b'compatibility')
829 copiesextramode = (b'changeset-only', b'compatibility')
826 if writecopiesto in copiesextramode:
830 if writecopiesto in copiesextramode:
827 options[b'copies-storage'] = b'extra'
831 options[b'copies-storage'] = b'extra'
828
832
829 return options
833 return options
830
834
831
835
832 def resolverevlogstorevfsoptions(ui, requirements, features):
836 def resolverevlogstorevfsoptions(ui, requirements, features):
833 """Resolve opener options specific to revlogs."""
837 """Resolve opener options specific to revlogs."""
834
838
835 options = {}
839 options = {}
836 options[b'flagprocessors'] = {}
840 options[b'flagprocessors'] = {}
837
841
838 if b'revlogv1' in requirements:
842 if b'revlogv1' in requirements:
839 options[b'revlogv1'] = True
843 options[b'revlogv1'] = True
840 if REVLOGV2_REQUIREMENT in requirements:
844 if REVLOGV2_REQUIREMENT in requirements:
841 options[b'revlogv2'] = True
845 options[b'revlogv2'] = True
842
846
843 if b'generaldelta' in requirements:
847 if b'generaldelta' in requirements:
844 options[b'generaldelta'] = True
848 options[b'generaldelta'] = True
845
849
846 # experimental config: format.chunkcachesize
850 # experimental config: format.chunkcachesize
847 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
851 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
848 if chunkcachesize is not None:
852 if chunkcachesize is not None:
849 options[b'chunkcachesize'] = chunkcachesize
853 options[b'chunkcachesize'] = chunkcachesize
850
854
851 deltabothparents = ui.configbool(
855 deltabothparents = ui.configbool(
852 b'storage', b'revlog.optimize-delta-parent-choice'
856 b'storage', b'revlog.optimize-delta-parent-choice'
853 )
857 )
854 options[b'deltabothparents'] = deltabothparents
858 options[b'deltabothparents'] = deltabothparents
855
859
856 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
860 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
857 lazydeltabase = False
861 lazydeltabase = False
858 if lazydelta:
862 if lazydelta:
859 lazydeltabase = ui.configbool(
863 lazydeltabase = ui.configbool(
860 b'storage', b'revlog.reuse-external-delta-parent'
864 b'storage', b'revlog.reuse-external-delta-parent'
861 )
865 )
862 if lazydeltabase is None:
866 if lazydeltabase is None:
863 lazydeltabase = not scmutil.gddeltaconfig(ui)
867 lazydeltabase = not scmutil.gddeltaconfig(ui)
864 options[b'lazydelta'] = lazydelta
868 options[b'lazydelta'] = lazydelta
865 options[b'lazydeltabase'] = lazydeltabase
869 options[b'lazydeltabase'] = lazydeltabase
866
870
867 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
871 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
868 if 0 <= chainspan:
872 if 0 <= chainspan:
869 options[b'maxdeltachainspan'] = chainspan
873 options[b'maxdeltachainspan'] = chainspan
870
874
871 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
875 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
872 if mmapindexthreshold is not None:
876 if mmapindexthreshold is not None:
873 options[b'mmapindexthreshold'] = mmapindexthreshold
877 options[b'mmapindexthreshold'] = mmapindexthreshold
874
878
875 withsparseread = ui.configbool(b'experimental', b'sparse-read')
879 withsparseread = ui.configbool(b'experimental', b'sparse-read')
876 srdensitythres = float(
880 srdensitythres = float(
877 ui.config(b'experimental', b'sparse-read.density-threshold')
881 ui.config(b'experimental', b'sparse-read.density-threshold')
878 )
882 )
879 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
883 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
880 options[b'with-sparse-read'] = withsparseread
884 options[b'with-sparse-read'] = withsparseread
881 options[b'sparse-read-density-threshold'] = srdensitythres
885 options[b'sparse-read-density-threshold'] = srdensitythres
882 options[b'sparse-read-min-gap-size'] = srmingapsize
886 options[b'sparse-read-min-gap-size'] = srmingapsize
883
887
884 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
888 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
885 options[b'sparse-revlog'] = sparserevlog
889 options[b'sparse-revlog'] = sparserevlog
886 if sparserevlog:
890 if sparserevlog:
887 options[b'generaldelta'] = True
891 options[b'generaldelta'] = True
888
892
889 sidedata = SIDEDATA_REQUIREMENT in requirements
893 sidedata = SIDEDATA_REQUIREMENT in requirements
890 options[b'side-data'] = sidedata
894 options[b'side-data'] = sidedata
891
895
892 maxchainlen = None
896 maxchainlen = None
893 if sparserevlog:
897 if sparserevlog:
894 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
898 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
895 # experimental config: format.maxchainlen
899 # experimental config: format.maxchainlen
896 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
900 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
897 if maxchainlen is not None:
901 if maxchainlen is not None:
898 options[b'maxchainlen'] = maxchainlen
902 options[b'maxchainlen'] = maxchainlen
899
903
900 for r in requirements:
904 for r in requirements:
901 # we allow multiple compression engine requirement to co-exist because
905 # we allow multiple compression engine requirement to co-exist because
902 # strickly speaking, revlog seems to support mixed compression style.
906 # strickly speaking, revlog seems to support mixed compression style.
903 #
907 #
904 # The compression used for new entries will be "the last one"
908 # The compression used for new entries will be "the last one"
905 prefix = r.startswith
909 prefix = r.startswith
906 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
910 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
907 options[b'compengine'] = r.split(b'-', 2)[2]
911 options[b'compengine'] = r.split(b'-', 2)[2]
908
912
909 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
913 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
910 if options[b'zlib.level'] is not None:
914 if options[b'zlib.level'] is not None:
911 if not (0 <= options[b'zlib.level'] <= 9):
915 if not (0 <= options[b'zlib.level'] <= 9):
912 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
916 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
913 raise error.Abort(msg % options[b'zlib.level'])
917 raise error.Abort(msg % options[b'zlib.level'])
914 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
918 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
915 if options[b'zstd.level'] is not None:
919 if options[b'zstd.level'] is not None:
916 if not (0 <= options[b'zstd.level'] <= 22):
920 if not (0 <= options[b'zstd.level'] <= 22):
917 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
921 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
918 raise error.Abort(msg % options[b'zstd.level'])
922 raise error.Abort(msg % options[b'zstd.level'])
919
923
920 if repository.NARROW_REQUIREMENT in requirements:
924 if repository.NARROW_REQUIREMENT in requirements:
921 options[b'enableellipsis'] = True
925 options[b'enableellipsis'] = True
922
926
923 return options
927 return options
924
928
925
929
926 def makemain(**kwargs):
930 def makemain(**kwargs):
927 """Produce a type conforming to ``ilocalrepositorymain``."""
931 """Produce a type conforming to ``ilocalrepositorymain``."""
928 return localrepository
932 return localrepository
929
933
930
934
931 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
935 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
932 class revlogfilestorage(object):
936 class revlogfilestorage(object):
933 """File storage when using revlogs."""
937 """File storage when using revlogs."""
934
938
935 def file(self, path):
939 def file(self, path):
936 if path[0] == b'/':
940 if path[0] == b'/':
937 path = path[1:]
941 path = path[1:]
938
942
939 return filelog.filelog(self.svfs, path)
943 return filelog.filelog(self.svfs, path)
940
944
941
945
942 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
946 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
943 class revlognarrowfilestorage(object):
947 class revlognarrowfilestorage(object):
944 """File storage when using revlogs and narrow files."""
948 """File storage when using revlogs and narrow files."""
945
949
946 def file(self, path):
950 def file(self, path):
947 if path[0] == b'/':
951 if path[0] == b'/':
948 path = path[1:]
952 path = path[1:]
949
953
950 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
954 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
951
955
952
956
953 def makefilestorage(requirements, features, **kwargs):
957 def makefilestorage(requirements, features, **kwargs):
954 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
958 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
955 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
959 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
956 features.add(repository.REPO_FEATURE_STREAM_CLONE)
960 features.add(repository.REPO_FEATURE_STREAM_CLONE)
957
961
958 if repository.NARROW_REQUIREMENT in requirements:
962 if repository.NARROW_REQUIREMENT in requirements:
959 return revlognarrowfilestorage
963 return revlognarrowfilestorage
960 else:
964 else:
961 return revlogfilestorage
965 return revlogfilestorage
962
966
963
967
964 # List of repository interfaces and factory functions for them. Each
968 # List of repository interfaces and factory functions for them. Each
965 # will be called in order during ``makelocalrepository()`` to iteratively
969 # will be called in order during ``makelocalrepository()`` to iteratively
966 # derive the final type for a local repository instance. We capture the
970 # derive the final type for a local repository instance. We capture the
967 # function as a lambda so we don't hold a reference and the module-level
971 # function as a lambda so we don't hold a reference and the module-level
968 # functions can be wrapped.
972 # functions can be wrapped.
969 REPO_INTERFACES = [
973 REPO_INTERFACES = [
970 (repository.ilocalrepositorymain, lambda: makemain),
974 (repository.ilocalrepositorymain, lambda: makemain),
971 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
975 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
972 ]
976 ]
973
977
974
978
975 @interfaceutil.implementer(repository.ilocalrepositorymain)
979 @interfaceutil.implementer(repository.ilocalrepositorymain)
976 class localrepository(object):
980 class localrepository(object):
977 """Main class for representing local repositories.
981 """Main class for representing local repositories.
978
982
979 All local repositories are instances of this class.
983 All local repositories are instances of this class.
980
984
981 Constructed on its own, instances of this class are not usable as
985 Constructed on its own, instances of this class are not usable as
982 repository objects. To obtain a usable repository object, call
986 repository objects. To obtain a usable repository object, call
983 ``hg.repository()``, ``localrepo.instance()``, or
987 ``hg.repository()``, ``localrepo.instance()``, or
984 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
988 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
985 ``instance()`` adds support for creating new repositories.
989 ``instance()`` adds support for creating new repositories.
986 ``hg.repository()`` adds more extension integration, including calling
990 ``hg.repository()`` adds more extension integration, including calling
987 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
991 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
988 used.
992 used.
989 """
993 """
990
994
991 # obsolete experimental requirements:
995 # obsolete experimental requirements:
992 # - manifestv2: An experimental new manifest format that allowed
996 # - manifestv2: An experimental new manifest format that allowed
993 # for stem compression of long paths. Experiment ended up not
997 # for stem compression of long paths. Experiment ended up not
994 # being successful (repository sizes went up due to worse delta
998 # being successful (repository sizes went up due to worse delta
995 # chains), and the code was deleted in 4.6.
999 # chains), and the code was deleted in 4.6.
996 supportedformats = {
1000 supportedformats = {
997 b'revlogv1',
1001 b'revlogv1',
998 b'generaldelta',
1002 b'generaldelta',
999 b'treemanifest',
1003 b'treemanifest',
1004 COPIESSDC_REQUIREMENT,
1000 REVLOGV2_REQUIREMENT,
1005 REVLOGV2_REQUIREMENT,
1001 SIDEDATA_REQUIREMENT,
1006 SIDEDATA_REQUIREMENT,
1002 SPARSEREVLOG_REQUIREMENT,
1007 SPARSEREVLOG_REQUIREMENT,
1003 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1008 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1004 }
1009 }
1005 _basesupported = supportedformats | {
1010 _basesupported = supportedformats | {
1006 b'store',
1011 b'store',
1007 b'fncache',
1012 b'fncache',
1008 b'shared',
1013 b'shared',
1009 b'relshared',
1014 b'relshared',
1010 b'dotencode',
1015 b'dotencode',
1011 b'exp-sparse',
1016 b'exp-sparse',
1012 b'internal-phase',
1017 b'internal-phase',
1013 }
1018 }
1014
1019
1015 # list of prefix for file which can be written without 'wlock'
1020 # list of prefix for file which can be written without 'wlock'
1016 # Extensions should extend this list when needed
1021 # Extensions should extend this list when needed
1017 _wlockfreeprefix = {
1022 _wlockfreeprefix = {
1018 # We migh consider requiring 'wlock' for the next
1023 # We migh consider requiring 'wlock' for the next
1019 # two, but pretty much all the existing code assume
1024 # two, but pretty much all the existing code assume
1020 # wlock is not needed so we keep them excluded for
1025 # wlock is not needed so we keep them excluded for
1021 # now.
1026 # now.
1022 b'hgrc',
1027 b'hgrc',
1023 b'requires',
1028 b'requires',
1024 # XXX cache is a complicatged business someone
1029 # XXX cache is a complicatged business someone
1025 # should investigate this in depth at some point
1030 # should investigate this in depth at some point
1026 b'cache/',
1031 b'cache/',
1027 # XXX shouldn't be dirstate covered by the wlock?
1032 # XXX shouldn't be dirstate covered by the wlock?
1028 b'dirstate',
1033 b'dirstate',
1029 # XXX bisect was still a bit too messy at the time
1034 # XXX bisect was still a bit too messy at the time
1030 # this changeset was introduced. Someone should fix
1035 # this changeset was introduced. Someone should fix
1031 # the remainig bit and drop this line
1036 # the remainig bit and drop this line
1032 b'bisect.state',
1037 b'bisect.state',
1033 }
1038 }
1034
1039
1035 def __init__(
1040 def __init__(
1036 self,
1041 self,
1037 baseui,
1042 baseui,
1038 ui,
1043 ui,
1039 origroot,
1044 origroot,
1040 wdirvfs,
1045 wdirvfs,
1041 hgvfs,
1046 hgvfs,
1042 requirements,
1047 requirements,
1043 supportedrequirements,
1048 supportedrequirements,
1044 sharedpath,
1049 sharedpath,
1045 store,
1050 store,
1046 cachevfs,
1051 cachevfs,
1047 wcachevfs,
1052 wcachevfs,
1048 features,
1053 features,
1049 intents=None,
1054 intents=None,
1050 ):
1055 ):
1051 """Create a new local repository instance.
1056 """Create a new local repository instance.
1052
1057
1053 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1058 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1054 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1059 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1055 object.
1060 object.
1056
1061
1057 Arguments:
1062 Arguments:
1058
1063
1059 baseui
1064 baseui
1060 ``ui.ui`` instance that ``ui`` argument was based off of.
1065 ``ui.ui`` instance that ``ui`` argument was based off of.
1061
1066
1062 ui
1067 ui
1063 ``ui.ui`` instance for use by the repository.
1068 ``ui.ui`` instance for use by the repository.
1064
1069
1065 origroot
1070 origroot
1066 ``bytes`` path to working directory root of this repository.
1071 ``bytes`` path to working directory root of this repository.
1067
1072
1068 wdirvfs
1073 wdirvfs
1069 ``vfs.vfs`` rooted at the working directory.
1074 ``vfs.vfs`` rooted at the working directory.
1070
1075
1071 hgvfs
1076 hgvfs
1072 ``vfs.vfs`` rooted at .hg/
1077 ``vfs.vfs`` rooted at .hg/
1073
1078
1074 requirements
1079 requirements
1075 ``set`` of bytestrings representing repository opening requirements.
1080 ``set`` of bytestrings representing repository opening requirements.
1076
1081
1077 supportedrequirements
1082 supportedrequirements
1078 ``set`` of bytestrings representing repository requirements that we
1083 ``set`` of bytestrings representing repository requirements that we
1079 know how to open. May be a supetset of ``requirements``.
1084 know how to open. May be a supetset of ``requirements``.
1080
1085
1081 sharedpath
1086 sharedpath
1082 ``bytes`` Defining path to storage base directory. Points to a
1087 ``bytes`` Defining path to storage base directory. Points to a
1083 ``.hg/`` directory somewhere.
1088 ``.hg/`` directory somewhere.
1084
1089
1085 store
1090 store
1086 ``store.basicstore`` (or derived) instance providing access to
1091 ``store.basicstore`` (or derived) instance providing access to
1087 versioned storage.
1092 versioned storage.
1088
1093
1089 cachevfs
1094 cachevfs
1090 ``vfs.vfs`` used for cache files.
1095 ``vfs.vfs`` used for cache files.
1091
1096
1092 wcachevfs
1097 wcachevfs
1093 ``vfs.vfs`` used for cache files related to the working copy.
1098 ``vfs.vfs`` used for cache files related to the working copy.
1094
1099
1095 features
1100 features
1096 ``set`` of bytestrings defining features/capabilities of this
1101 ``set`` of bytestrings defining features/capabilities of this
1097 instance.
1102 instance.
1098
1103
1099 intents
1104 intents
1100 ``set`` of system strings indicating what this repo will be used
1105 ``set`` of system strings indicating what this repo will be used
1101 for.
1106 for.
1102 """
1107 """
1103 self.baseui = baseui
1108 self.baseui = baseui
1104 self.ui = ui
1109 self.ui = ui
1105 self.origroot = origroot
1110 self.origroot = origroot
1106 # vfs rooted at working directory.
1111 # vfs rooted at working directory.
1107 self.wvfs = wdirvfs
1112 self.wvfs = wdirvfs
1108 self.root = wdirvfs.base
1113 self.root = wdirvfs.base
1109 # vfs rooted at .hg/. Used to access most non-store paths.
1114 # vfs rooted at .hg/. Used to access most non-store paths.
1110 self.vfs = hgvfs
1115 self.vfs = hgvfs
1111 self.path = hgvfs.base
1116 self.path = hgvfs.base
1112 self.requirements = requirements
1117 self.requirements = requirements
1113 self.supported = supportedrequirements
1118 self.supported = supportedrequirements
1114 self.sharedpath = sharedpath
1119 self.sharedpath = sharedpath
1115 self.store = store
1120 self.store = store
1116 self.cachevfs = cachevfs
1121 self.cachevfs = cachevfs
1117 self.wcachevfs = wcachevfs
1122 self.wcachevfs = wcachevfs
1118 self.features = features
1123 self.features = features
1119
1124
1120 self.filtername = None
1125 self.filtername = None
1121
1126
1122 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1127 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1123 b'devel', b'check-locks'
1128 b'devel', b'check-locks'
1124 ):
1129 ):
1125 self.vfs.audit = self._getvfsward(self.vfs.audit)
1130 self.vfs.audit = self._getvfsward(self.vfs.audit)
1126 # A list of callback to shape the phase if no data were found.
1131 # A list of callback to shape the phase if no data were found.
1127 # Callback are in the form: func(repo, roots) --> processed root.
1132 # Callback are in the form: func(repo, roots) --> processed root.
1128 # This list it to be filled by extension during repo setup
1133 # This list it to be filled by extension during repo setup
1129 self._phasedefaults = []
1134 self._phasedefaults = []
1130
1135
1131 color.setup(self.ui)
1136 color.setup(self.ui)
1132
1137
1133 self.spath = self.store.path
1138 self.spath = self.store.path
1134 self.svfs = self.store.vfs
1139 self.svfs = self.store.vfs
1135 self.sjoin = self.store.join
1140 self.sjoin = self.store.join
1136 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1141 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1137 b'devel', b'check-locks'
1142 b'devel', b'check-locks'
1138 ):
1143 ):
1139 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1144 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1140 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1145 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1141 else: # standard vfs
1146 else: # standard vfs
1142 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1147 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1143
1148
1144 self._dirstatevalidatewarned = False
1149 self._dirstatevalidatewarned = False
1145
1150
1146 self._branchcaches = branchmap.BranchMapCache()
1151 self._branchcaches = branchmap.BranchMapCache()
1147 self._revbranchcache = None
1152 self._revbranchcache = None
1148 self._filterpats = {}
1153 self._filterpats = {}
1149 self._datafilters = {}
1154 self._datafilters = {}
1150 self._transref = self._lockref = self._wlockref = None
1155 self._transref = self._lockref = self._wlockref = None
1151
1156
1152 # A cache for various files under .hg/ that tracks file changes,
1157 # A cache for various files under .hg/ that tracks file changes,
1153 # (used by the filecache decorator)
1158 # (used by the filecache decorator)
1154 #
1159 #
1155 # Maps a property name to its util.filecacheentry
1160 # Maps a property name to its util.filecacheentry
1156 self._filecache = {}
1161 self._filecache = {}
1157
1162
1158 # hold sets of revision to be filtered
1163 # hold sets of revision to be filtered
1159 # should be cleared when something might have changed the filter value:
1164 # should be cleared when something might have changed the filter value:
1160 # - new changesets,
1165 # - new changesets,
1161 # - phase change,
1166 # - phase change,
1162 # - new obsolescence marker,
1167 # - new obsolescence marker,
1163 # - working directory parent change,
1168 # - working directory parent change,
1164 # - bookmark changes
1169 # - bookmark changes
1165 self.filteredrevcache = {}
1170 self.filteredrevcache = {}
1166
1171
1167 # post-dirstate-status hooks
1172 # post-dirstate-status hooks
1168 self._postdsstatus = []
1173 self._postdsstatus = []
1169
1174
1170 # generic mapping between names and nodes
1175 # generic mapping between names and nodes
1171 self.names = namespaces.namespaces()
1176 self.names = namespaces.namespaces()
1172
1177
1173 # Key to signature value.
1178 # Key to signature value.
1174 self._sparsesignaturecache = {}
1179 self._sparsesignaturecache = {}
1175 # Signature to cached matcher instance.
1180 # Signature to cached matcher instance.
1176 self._sparsematchercache = {}
1181 self._sparsematchercache = {}
1177
1182
1178 self._extrafilterid = repoview.extrafilter(ui)
1183 self._extrafilterid = repoview.extrafilter(ui)
1179
1184
1180 def _getvfsward(self, origfunc):
1185 def _getvfsward(self, origfunc):
1181 """build a ward for self.vfs"""
1186 """build a ward for self.vfs"""
1182 rref = weakref.ref(self)
1187 rref = weakref.ref(self)
1183
1188
1184 def checkvfs(path, mode=None):
1189 def checkvfs(path, mode=None):
1185 ret = origfunc(path, mode=mode)
1190 ret = origfunc(path, mode=mode)
1186 repo = rref()
1191 repo = rref()
1187 if (
1192 if (
1188 repo is None
1193 repo is None
1189 or not util.safehasattr(repo, b'_wlockref')
1194 or not util.safehasattr(repo, b'_wlockref')
1190 or not util.safehasattr(repo, b'_lockref')
1195 or not util.safehasattr(repo, b'_lockref')
1191 ):
1196 ):
1192 return
1197 return
1193 if mode in (None, b'r', b'rb'):
1198 if mode in (None, b'r', b'rb'):
1194 return
1199 return
1195 if path.startswith(repo.path):
1200 if path.startswith(repo.path):
1196 # truncate name relative to the repository (.hg)
1201 # truncate name relative to the repository (.hg)
1197 path = path[len(repo.path) + 1 :]
1202 path = path[len(repo.path) + 1 :]
1198 if path.startswith(b'cache/'):
1203 if path.startswith(b'cache/'):
1199 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1204 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1200 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1205 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1201 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1206 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1202 # journal is covered by 'lock'
1207 # journal is covered by 'lock'
1203 if repo._currentlock(repo._lockref) is None:
1208 if repo._currentlock(repo._lockref) is None:
1204 repo.ui.develwarn(
1209 repo.ui.develwarn(
1205 b'write with no lock: "%s"' % path,
1210 b'write with no lock: "%s"' % path,
1206 stacklevel=3,
1211 stacklevel=3,
1207 config=b'check-locks',
1212 config=b'check-locks',
1208 )
1213 )
1209 elif repo._currentlock(repo._wlockref) is None:
1214 elif repo._currentlock(repo._wlockref) is None:
1210 # rest of vfs files are covered by 'wlock'
1215 # rest of vfs files are covered by 'wlock'
1211 #
1216 #
1212 # exclude special files
1217 # exclude special files
1213 for prefix in self._wlockfreeprefix:
1218 for prefix in self._wlockfreeprefix:
1214 if path.startswith(prefix):
1219 if path.startswith(prefix):
1215 return
1220 return
1216 repo.ui.develwarn(
1221 repo.ui.develwarn(
1217 b'write with no wlock: "%s"' % path,
1222 b'write with no wlock: "%s"' % path,
1218 stacklevel=3,
1223 stacklevel=3,
1219 config=b'check-locks',
1224 config=b'check-locks',
1220 )
1225 )
1221 return ret
1226 return ret
1222
1227
1223 return checkvfs
1228 return checkvfs
1224
1229
1225 def _getsvfsward(self, origfunc):
1230 def _getsvfsward(self, origfunc):
1226 """build a ward for self.svfs"""
1231 """build a ward for self.svfs"""
1227 rref = weakref.ref(self)
1232 rref = weakref.ref(self)
1228
1233
1229 def checksvfs(path, mode=None):
1234 def checksvfs(path, mode=None):
1230 ret = origfunc(path, mode=mode)
1235 ret = origfunc(path, mode=mode)
1231 repo = rref()
1236 repo = rref()
1232 if repo is None or not util.safehasattr(repo, b'_lockref'):
1237 if repo is None or not util.safehasattr(repo, b'_lockref'):
1233 return
1238 return
1234 if mode in (None, b'r', b'rb'):
1239 if mode in (None, b'r', b'rb'):
1235 return
1240 return
1236 if path.startswith(repo.sharedpath):
1241 if path.startswith(repo.sharedpath):
1237 # truncate name relative to the repository (.hg)
1242 # truncate name relative to the repository (.hg)
1238 path = path[len(repo.sharedpath) + 1 :]
1243 path = path[len(repo.sharedpath) + 1 :]
1239 if repo._currentlock(repo._lockref) is None:
1244 if repo._currentlock(repo._lockref) is None:
1240 repo.ui.develwarn(
1245 repo.ui.develwarn(
1241 b'write with no lock: "%s"' % path, stacklevel=4
1246 b'write with no lock: "%s"' % path, stacklevel=4
1242 )
1247 )
1243 return ret
1248 return ret
1244
1249
1245 return checksvfs
1250 return checksvfs
1246
1251
1247 def close(self):
1252 def close(self):
1248 self._writecaches()
1253 self._writecaches()
1249
1254
1250 def _writecaches(self):
1255 def _writecaches(self):
1251 if self._revbranchcache:
1256 if self._revbranchcache:
1252 self._revbranchcache.write()
1257 self._revbranchcache.write()
1253
1258
1254 def _restrictcapabilities(self, caps):
1259 def _restrictcapabilities(self, caps):
1255 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1260 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1256 caps = set(caps)
1261 caps = set(caps)
1257 capsblob = bundle2.encodecaps(
1262 capsblob = bundle2.encodecaps(
1258 bundle2.getrepocaps(self, role=b'client')
1263 bundle2.getrepocaps(self, role=b'client')
1259 )
1264 )
1260 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1265 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1261 return caps
1266 return caps
1262
1267
1263 def _writerequirements(self):
1268 def _writerequirements(self):
1264 scmutil.writerequires(self.vfs, self.requirements)
1269 scmutil.writerequires(self.vfs, self.requirements)
1265
1270
1266 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1271 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1267 # self -> auditor -> self._checknested -> self
1272 # self -> auditor -> self._checknested -> self
1268
1273
1269 @property
1274 @property
1270 def auditor(self):
1275 def auditor(self):
1271 # This is only used by context.workingctx.match in order to
1276 # This is only used by context.workingctx.match in order to
1272 # detect files in subrepos.
1277 # detect files in subrepos.
1273 return pathutil.pathauditor(self.root, callback=self._checknested)
1278 return pathutil.pathauditor(self.root, callback=self._checknested)
1274
1279
1275 @property
1280 @property
1276 def nofsauditor(self):
1281 def nofsauditor(self):
1277 # This is only used by context.basectx.match in order to detect
1282 # This is only used by context.basectx.match in order to detect
1278 # files in subrepos.
1283 # files in subrepos.
1279 return pathutil.pathauditor(
1284 return pathutil.pathauditor(
1280 self.root, callback=self._checknested, realfs=False, cached=True
1285 self.root, callback=self._checknested, realfs=False, cached=True
1281 )
1286 )
1282
1287
1283 def _checknested(self, path):
1288 def _checknested(self, path):
1284 """Determine if path is a legal nested repository."""
1289 """Determine if path is a legal nested repository."""
1285 if not path.startswith(self.root):
1290 if not path.startswith(self.root):
1286 return False
1291 return False
1287 subpath = path[len(self.root) + 1 :]
1292 subpath = path[len(self.root) + 1 :]
1288 normsubpath = util.pconvert(subpath)
1293 normsubpath = util.pconvert(subpath)
1289
1294
1290 # XXX: Checking against the current working copy is wrong in
1295 # XXX: Checking against the current working copy is wrong in
1291 # the sense that it can reject things like
1296 # the sense that it can reject things like
1292 #
1297 #
1293 # $ hg cat -r 10 sub/x.txt
1298 # $ hg cat -r 10 sub/x.txt
1294 #
1299 #
1295 # if sub/ is no longer a subrepository in the working copy
1300 # if sub/ is no longer a subrepository in the working copy
1296 # parent revision.
1301 # parent revision.
1297 #
1302 #
1298 # However, it can of course also allow things that would have
1303 # However, it can of course also allow things that would have
1299 # been rejected before, such as the above cat command if sub/
1304 # been rejected before, such as the above cat command if sub/
1300 # is a subrepository now, but was a normal directory before.
1305 # is a subrepository now, but was a normal directory before.
1301 # The old path auditor would have rejected by mistake since it
1306 # The old path auditor would have rejected by mistake since it
1302 # panics when it sees sub/.hg/.
1307 # panics when it sees sub/.hg/.
1303 #
1308 #
1304 # All in all, checking against the working copy seems sensible
1309 # All in all, checking against the working copy seems sensible
1305 # since we want to prevent access to nested repositories on
1310 # since we want to prevent access to nested repositories on
1306 # the filesystem *now*.
1311 # the filesystem *now*.
1307 ctx = self[None]
1312 ctx = self[None]
1308 parts = util.splitpath(subpath)
1313 parts = util.splitpath(subpath)
1309 while parts:
1314 while parts:
1310 prefix = b'/'.join(parts)
1315 prefix = b'/'.join(parts)
1311 if prefix in ctx.substate:
1316 if prefix in ctx.substate:
1312 if prefix == normsubpath:
1317 if prefix == normsubpath:
1313 return True
1318 return True
1314 else:
1319 else:
1315 sub = ctx.sub(prefix)
1320 sub = ctx.sub(prefix)
1316 return sub.checknested(subpath[len(prefix) + 1 :])
1321 return sub.checknested(subpath[len(prefix) + 1 :])
1317 else:
1322 else:
1318 parts.pop()
1323 parts.pop()
1319 return False
1324 return False
1320
1325
1321 def peer(self):
1326 def peer(self):
1322 return localpeer(self) # not cached to avoid reference cycle
1327 return localpeer(self) # not cached to avoid reference cycle
1323
1328
1324 def unfiltered(self):
1329 def unfiltered(self):
1325 """Return unfiltered version of the repository
1330 """Return unfiltered version of the repository
1326
1331
1327 Intended to be overwritten by filtered repo."""
1332 Intended to be overwritten by filtered repo."""
1328 return self
1333 return self
1329
1334
1330 def filtered(self, name, visibilityexceptions=None):
1335 def filtered(self, name, visibilityexceptions=None):
1331 """Return a filtered version of a repository
1336 """Return a filtered version of a repository
1332
1337
1333 The `name` parameter is the identifier of the requested view. This
1338 The `name` parameter is the identifier of the requested view. This
1334 will return a repoview object set "exactly" to the specified view.
1339 will return a repoview object set "exactly" to the specified view.
1335
1340
1336 This function does not apply recursive filtering to a repository. For
1341 This function does not apply recursive filtering to a repository. For
1337 example calling `repo.filtered("served")` will return a repoview using
1342 example calling `repo.filtered("served")` will return a repoview using
1338 the "served" view, regardless of the initial view used by `repo`.
1343 the "served" view, regardless of the initial view used by `repo`.
1339
1344
1340 In other word, there is always only one level of `repoview` "filtering".
1345 In other word, there is always only one level of `repoview` "filtering".
1341 """
1346 """
1342 if self._extrafilterid is not None and b'%' not in name:
1347 if self._extrafilterid is not None and b'%' not in name:
1343 name = name + b'%' + self._extrafilterid
1348 name = name + b'%' + self._extrafilterid
1344
1349
1345 cls = repoview.newtype(self.unfiltered().__class__)
1350 cls = repoview.newtype(self.unfiltered().__class__)
1346 return cls(self, name, visibilityexceptions)
1351 return cls(self, name, visibilityexceptions)
1347
1352
1348 @mixedrepostorecache(
1353 @mixedrepostorecache(
1349 (b'bookmarks', b'plain'),
1354 (b'bookmarks', b'plain'),
1350 (b'bookmarks.current', b'plain'),
1355 (b'bookmarks.current', b'plain'),
1351 (b'bookmarks', b''),
1356 (b'bookmarks', b''),
1352 (b'00changelog.i', b''),
1357 (b'00changelog.i', b''),
1353 )
1358 )
1354 def _bookmarks(self):
1359 def _bookmarks(self):
1355 # Since the multiple files involved in the transaction cannot be
1360 # Since the multiple files involved in the transaction cannot be
1356 # written atomically (with current repository format), there is a race
1361 # written atomically (with current repository format), there is a race
1357 # condition here.
1362 # condition here.
1358 #
1363 #
1359 # 1) changelog content A is read
1364 # 1) changelog content A is read
1360 # 2) outside transaction update changelog to content B
1365 # 2) outside transaction update changelog to content B
1361 # 3) outside transaction update bookmark file referring to content B
1366 # 3) outside transaction update bookmark file referring to content B
1362 # 4) bookmarks file content is read and filtered against changelog-A
1367 # 4) bookmarks file content is read and filtered against changelog-A
1363 #
1368 #
1364 # When this happens, bookmarks against nodes missing from A are dropped.
1369 # When this happens, bookmarks against nodes missing from A are dropped.
1365 #
1370 #
1366 # Having this happening during read is not great, but it become worse
1371 # Having this happening during read is not great, but it become worse
1367 # when this happen during write because the bookmarks to the "unknown"
1372 # when this happen during write because the bookmarks to the "unknown"
1368 # nodes will be dropped for good. However, writes happen within locks.
1373 # nodes will be dropped for good. However, writes happen within locks.
1369 # This locking makes it possible to have a race free consistent read.
1374 # This locking makes it possible to have a race free consistent read.
1370 # For this purpose data read from disc before locking are
1375 # For this purpose data read from disc before locking are
1371 # "invalidated" right after the locks are taken. This invalidations are
1376 # "invalidated" right after the locks are taken. This invalidations are
1372 # "light", the `filecache` mechanism keep the data in memory and will
1377 # "light", the `filecache` mechanism keep the data in memory and will
1373 # reuse them if the underlying files did not changed. Not parsing the
1378 # reuse them if the underlying files did not changed. Not parsing the
1374 # same data multiple times helps performances.
1379 # same data multiple times helps performances.
1375 #
1380 #
1376 # Unfortunately in the case describe above, the files tracked by the
1381 # Unfortunately in the case describe above, the files tracked by the
1377 # bookmarks file cache might not have changed, but the in-memory
1382 # bookmarks file cache might not have changed, but the in-memory
1378 # content is still "wrong" because we used an older changelog content
1383 # content is still "wrong" because we used an older changelog content
1379 # to process the on-disk data. So after locking, the changelog would be
1384 # to process the on-disk data. So after locking, the changelog would be
1380 # refreshed but `_bookmarks` would be preserved.
1385 # refreshed but `_bookmarks` would be preserved.
1381 # Adding `00changelog.i` to the list of tracked file is not
1386 # Adding `00changelog.i` to the list of tracked file is not
1382 # enough, because at the time we build the content for `_bookmarks` in
1387 # enough, because at the time we build the content for `_bookmarks` in
1383 # (4), the changelog file has already diverged from the content used
1388 # (4), the changelog file has already diverged from the content used
1384 # for loading `changelog` in (1)
1389 # for loading `changelog` in (1)
1385 #
1390 #
1386 # To prevent the issue, we force the changelog to be explicitly
1391 # To prevent the issue, we force the changelog to be explicitly
1387 # reloaded while computing `_bookmarks`. The data race can still happen
1392 # reloaded while computing `_bookmarks`. The data race can still happen
1388 # without the lock (with a narrower window), but it would no longer go
1393 # without the lock (with a narrower window), but it would no longer go
1389 # undetected during the lock time refresh.
1394 # undetected during the lock time refresh.
1390 #
1395 #
1391 # The new schedule is as follow
1396 # The new schedule is as follow
1392 #
1397 #
1393 # 1) filecache logic detect that `_bookmarks` needs to be computed
1398 # 1) filecache logic detect that `_bookmarks` needs to be computed
1394 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1399 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1395 # 3) We force `changelog` filecache to be tested
1400 # 3) We force `changelog` filecache to be tested
1396 # 4) cachestat for `changelog` are captured (for changelog)
1401 # 4) cachestat for `changelog` are captured (for changelog)
1397 # 5) `_bookmarks` is computed and cached
1402 # 5) `_bookmarks` is computed and cached
1398 #
1403 #
1399 # The step in (3) ensure we have a changelog at least as recent as the
1404 # The step in (3) ensure we have a changelog at least as recent as the
1400 # cache stat computed in (1). As a result at locking time:
1405 # cache stat computed in (1). As a result at locking time:
1401 # * if the changelog did not changed since (1) -> we can reuse the data
1406 # * if the changelog did not changed since (1) -> we can reuse the data
1402 # * otherwise -> the bookmarks get refreshed.
1407 # * otherwise -> the bookmarks get refreshed.
1403 self._refreshchangelog()
1408 self._refreshchangelog()
1404 return bookmarks.bmstore(self)
1409 return bookmarks.bmstore(self)
1405
1410
1406 def _refreshchangelog(self):
1411 def _refreshchangelog(self):
1407 """make sure the in memory changelog match the on-disk one"""
1412 """make sure the in memory changelog match the on-disk one"""
1408 if b'changelog' in vars(self) and self.currenttransaction() is None:
1413 if b'changelog' in vars(self) and self.currenttransaction() is None:
1409 del self.changelog
1414 del self.changelog
1410
1415
1411 @property
1416 @property
1412 def _activebookmark(self):
1417 def _activebookmark(self):
1413 return self._bookmarks.active
1418 return self._bookmarks.active
1414
1419
1415 # _phasesets depend on changelog. what we need is to call
1420 # _phasesets depend on changelog. what we need is to call
1416 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1421 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1417 # can't be easily expressed in filecache mechanism.
1422 # can't be easily expressed in filecache mechanism.
1418 @storecache(b'phaseroots', b'00changelog.i')
1423 @storecache(b'phaseroots', b'00changelog.i')
1419 def _phasecache(self):
1424 def _phasecache(self):
1420 return phases.phasecache(self, self._phasedefaults)
1425 return phases.phasecache(self, self._phasedefaults)
1421
1426
1422 @storecache(b'obsstore')
1427 @storecache(b'obsstore')
1423 def obsstore(self):
1428 def obsstore(self):
1424 return obsolete.makestore(self.ui, self)
1429 return obsolete.makestore(self.ui, self)
1425
1430
1426 @storecache(b'00changelog.i')
1431 @storecache(b'00changelog.i')
1427 def changelog(self):
1432 def changelog(self):
1428 return self.store.changelog(txnutil.mayhavepending(self.root))
1433 return self.store.changelog(txnutil.mayhavepending(self.root))
1429
1434
1430 @storecache(b'00manifest.i')
1435 @storecache(b'00manifest.i')
1431 def manifestlog(self):
1436 def manifestlog(self):
1432 return self.store.manifestlog(self, self._storenarrowmatch)
1437 return self.store.manifestlog(self, self._storenarrowmatch)
1433
1438
1434 @repofilecache(b'dirstate')
1439 @repofilecache(b'dirstate')
1435 def dirstate(self):
1440 def dirstate(self):
1436 return self._makedirstate()
1441 return self._makedirstate()
1437
1442
1438 def _makedirstate(self):
1443 def _makedirstate(self):
1439 """Extension point for wrapping the dirstate per-repo."""
1444 """Extension point for wrapping the dirstate per-repo."""
1440 sparsematchfn = lambda: sparse.matcher(self)
1445 sparsematchfn = lambda: sparse.matcher(self)
1441
1446
1442 return dirstate.dirstate(
1447 return dirstate.dirstate(
1443 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1448 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1444 )
1449 )
1445
1450
1446 def _dirstatevalidate(self, node):
1451 def _dirstatevalidate(self, node):
1447 try:
1452 try:
1448 self.changelog.rev(node)
1453 self.changelog.rev(node)
1449 return node
1454 return node
1450 except error.LookupError:
1455 except error.LookupError:
1451 if not self._dirstatevalidatewarned:
1456 if not self._dirstatevalidatewarned:
1452 self._dirstatevalidatewarned = True
1457 self._dirstatevalidatewarned = True
1453 self.ui.warn(
1458 self.ui.warn(
1454 _(b"warning: ignoring unknown working parent %s!\n")
1459 _(b"warning: ignoring unknown working parent %s!\n")
1455 % short(node)
1460 % short(node)
1456 )
1461 )
1457 return nullid
1462 return nullid
1458
1463
1459 @storecache(narrowspec.FILENAME)
1464 @storecache(narrowspec.FILENAME)
1460 def narrowpats(self):
1465 def narrowpats(self):
1461 """matcher patterns for this repository's narrowspec
1466 """matcher patterns for this repository's narrowspec
1462
1467
1463 A tuple of (includes, excludes).
1468 A tuple of (includes, excludes).
1464 """
1469 """
1465 return narrowspec.load(self)
1470 return narrowspec.load(self)
1466
1471
1467 @storecache(narrowspec.FILENAME)
1472 @storecache(narrowspec.FILENAME)
1468 def _storenarrowmatch(self):
1473 def _storenarrowmatch(self):
1469 if repository.NARROW_REQUIREMENT not in self.requirements:
1474 if repository.NARROW_REQUIREMENT not in self.requirements:
1470 return matchmod.always()
1475 return matchmod.always()
1471 include, exclude = self.narrowpats
1476 include, exclude = self.narrowpats
1472 return narrowspec.match(self.root, include=include, exclude=exclude)
1477 return narrowspec.match(self.root, include=include, exclude=exclude)
1473
1478
1474 @storecache(narrowspec.FILENAME)
1479 @storecache(narrowspec.FILENAME)
1475 def _narrowmatch(self):
1480 def _narrowmatch(self):
1476 if repository.NARROW_REQUIREMENT not in self.requirements:
1481 if repository.NARROW_REQUIREMENT not in self.requirements:
1477 return matchmod.always()
1482 return matchmod.always()
1478 narrowspec.checkworkingcopynarrowspec(self)
1483 narrowspec.checkworkingcopynarrowspec(self)
1479 include, exclude = self.narrowpats
1484 include, exclude = self.narrowpats
1480 return narrowspec.match(self.root, include=include, exclude=exclude)
1485 return narrowspec.match(self.root, include=include, exclude=exclude)
1481
1486
1482 def narrowmatch(self, match=None, includeexact=False):
1487 def narrowmatch(self, match=None, includeexact=False):
1483 """matcher corresponding the the repo's narrowspec
1488 """matcher corresponding the the repo's narrowspec
1484
1489
1485 If `match` is given, then that will be intersected with the narrow
1490 If `match` is given, then that will be intersected with the narrow
1486 matcher.
1491 matcher.
1487
1492
1488 If `includeexact` is True, then any exact matches from `match` will
1493 If `includeexact` is True, then any exact matches from `match` will
1489 be included even if they're outside the narrowspec.
1494 be included even if they're outside the narrowspec.
1490 """
1495 """
1491 if match:
1496 if match:
1492 if includeexact and not self._narrowmatch.always():
1497 if includeexact and not self._narrowmatch.always():
1493 # do not exclude explicitly-specified paths so that they can
1498 # do not exclude explicitly-specified paths so that they can
1494 # be warned later on
1499 # be warned later on
1495 em = matchmod.exact(match.files())
1500 em = matchmod.exact(match.files())
1496 nm = matchmod.unionmatcher([self._narrowmatch, em])
1501 nm = matchmod.unionmatcher([self._narrowmatch, em])
1497 return matchmod.intersectmatchers(match, nm)
1502 return matchmod.intersectmatchers(match, nm)
1498 return matchmod.intersectmatchers(match, self._narrowmatch)
1503 return matchmod.intersectmatchers(match, self._narrowmatch)
1499 return self._narrowmatch
1504 return self._narrowmatch
1500
1505
1501 def setnarrowpats(self, newincludes, newexcludes):
1506 def setnarrowpats(self, newincludes, newexcludes):
1502 narrowspec.save(self, newincludes, newexcludes)
1507 narrowspec.save(self, newincludes, newexcludes)
1503 self.invalidate(clearfilecache=True)
1508 self.invalidate(clearfilecache=True)
1504
1509
1505 def __getitem__(self, changeid):
1510 def __getitem__(self, changeid):
1506 if changeid is None:
1511 if changeid is None:
1507 return context.workingctx(self)
1512 return context.workingctx(self)
1508 if isinstance(changeid, context.basectx):
1513 if isinstance(changeid, context.basectx):
1509 return changeid
1514 return changeid
1510 if isinstance(changeid, slice):
1515 if isinstance(changeid, slice):
1511 # wdirrev isn't contiguous so the slice shouldn't include it
1516 # wdirrev isn't contiguous so the slice shouldn't include it
1512 return [
1517 return [
1513 self[i]
1518 self[i]
1514 for i in pycompat.xrange(*changeid.indices(len(self)))
1519 for i in pycompat.xrange(*changeid.indices(len(self)))
1515 if i not in self.changelog.filteredrevs
1520 if i not in self.changelog.filteredrevs
1516 ]
1521 ]
1517 try:
1522 try:
1518 if isinstance(changeid, int):
1523 if isinstance(changeid, int):
1519 node = self.changelog.node(changeid)
1524 node = self.changelog.node(changeid)
1520 rev = changeid
1525 rev = changeid
1521 elif changeid == b'null':
1526 elif changeid == b'null':
1522 node = nullid
1527 node = nullid
1523 rev = nullrev
1528 rev = nullrev
1524 elif changeid == b'tip':
1529 elif changeid == b'tip':
1525 node = self.changelog.tip()
1530 node = self.changelog.tip()
1526 rev = self.changelog.rev(node)
1531 rev = self.changelog.rev(node)
1527 elif changeid == b'.':
1532 elif changeid == b'.':
1528 # this is a hack to delay/avoid loading obsmarkers
1533 # this is a hack to delay/avoid loading obsmarkers
1529 # when we know that '.' won't be hidden
1534 # when we know that '.' won't be hidden
1530 node = self.dirstate.p1()
1535 node = self.dirstate.p1()
1531 rev = self.unfiltered().changelog.rev(node)
1536 rev = self.unfiltered().changelog.rev(node)
1532 elif len(changeid) == 20:
1537 elif len(changeid) == 20:
1533 try:
1538 try:
1534 node = changeid
1539 node = changeid
1535 rev = self.changelog.rev(changeid)
1540 rev = self.changelog.rev(changeid)
1536 except error.FilteredLookupError:
1541 except error.FilteredLookupError:
1537 changeid = hex(changeid) # for the error message
1542 changeid = hex(changeid) # for the error message
1538 raise
1543 raise
1539 except LookupError:
1544 except LookupError:
1540 # check if it might have come from damaged dirstate
1545 # check if it might have come from damaged dirstate
1541 #
1546 #
1542 # XXX we could avoid the unfiltered if we had a recognizable
1547 # XXX we could avoid the unfiltered if we had a recognizable
1543 # exception for filtered changeset access
1548 # exception for filtered changeset access
1544 if (
1549 if (
1545 self.local()
1550 self.local()
1546 and changeid in self.unfiltered().dirstate.parents()
1551 and changeid in self.unfiltered().dirstate.parents()
1547 ):
1552 ):
1548 msg = _(b"working directory has unknown parent '%s'!")
1553 msg = _(b"working directory has unknown parent '%s'!")
1549 raise error.Abort(msg % short(changeid))
1554 raise error.Abort(msg % short(changeid))
1550 changeid = hex(changeid) # for the error message
1555 changeid = hex(changeid) # for the error message
1551 raise
1556 raise
1552
1557
1553 elif len(changeid) == 40:
1558 elif len(changeid) == 40:
1554 node = bin(changeid)
1559 node = bin(changeid)
1555 rev = self.changelog.rev(node)
1560 rev = self.changelog.rev(node)
1556 else:
1561 else:
1557 raise error.ProgrammingError(
1562 raise error.ProgrammingError(
1558 b"unsupported changeid '%s' of type %s"
1563 b"unsupported changeid '%s' of type %s"
1559 % (changeid, type(changeid))
1564 % (changeid, type(changeid))
1560 )
1565 )
1561
1566
1562 return context.changectx(self, rev, node)
1567 return context.changectx(self, rev, node)
1563
1568
1564 except (error.FilteredIndexError, error.FilteredLookupError):
1569 except (error.FilteredIndexError, error.FilteredLookupError):
1565 raise error.FilteredRepoLookupError(
1570 raise error.FilteredRepoLookupError(
1566 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1571 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1567 )
1572 )
1568 except (IndexError, LookupError):
1573 except (IndexError, LookupError):
1569 raise error.RepoLookupError(
1574 raise error.RepoLookupError(
1570 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1575 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1571 )
1576 )
1572 except error.WdirUnsupported:
1577 except error.WdirUnsupported:
1573 return context.workingctx(self)
1578 return context.workingctx(self)
1574
1579
1575 def __contains__(self, changeid):
1580 def __contains__(self, changeid):
1576 """True if the given changeid exists
1581 """True if the given changeid exists
1577
1582
1578 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1583 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1579 specified.
1584 specified.
1580 """
1585 """
1581 try:
1586 try:
1582 self[changeid]
1587 self[changeid]
1583 return True
1588 return True
1584 except error.RepoLookupError:
1589 except error.RepoLookupError:
1585 return False
1590 return False
1586
1591
1587 def __nonzero__(self):
1592 def __nonzero__(self):
1588 return True
1593 return True
1589
1594
1590 __bool__ = __nonzero__
1595 __bool__ = __nonzero__
1591
1596
1592 def __len__(self):
1597 def __len__(self):
1593 # no need to pay the cost of repoview.changelog
1598 # no need to pay the cost of repoview.changelog
1594 unfi = self.unfiltered()
1599 unfi = self.unfiltered()
1595 return len(unfi.changelog)
1600 return len(unfi.changelog)
1596
1601
1597 def __iter__(self):
1602 def __iter__(self):
1598 return iter(self.changelog)
1603 return iter(self.changelog)
1599
1604
1600 def revs(self, expr, *args):
1605 def revs(self, expr, *args):
1601 '''Find revisions matching a revset.
1606 '''Find revisions matching a revset.
1602
1607
1603 The revset is specified as a string ``expr`` that may contain
1608 The revset is specified as a string ``expr`` that may contain
1604 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1609 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1605
1610
1606 Revset aliases from the configuration are not expanded. To expand
1611 Revset aliases from the configuration are not expanded. To expand
1607 user aliases, consider calling ``scmutil.revrange()`` or
1612 user aliases, consider calling ``scmutil.revrange()`` or
1608 ``repo.anyrevs([expr], user=True)``.
1613 ``repo.anyrevs([expr], user=True)``.
1609
1614
1610 Returns a revset.abstractsmartset, which is a list-like interface
1615 Returns a revset.abstractsmartset, which is a list-like interface
1611 that contains integer revisions.
1616 that contains integer revisions.
1612 '''
1617 '''
1613 tree = revsetlang.spectree(expr, *args)
1618 tree = revsetlang.spectree(expr, *args)
1614 return revset.makematcher(tree)(self)
1619 return revset.makematcher(tree)(self)
1615
1620
1616 def set(self, expr, *args):
1621 def set(self, expr, *args):
1617 '''Find revisions matching a revset and emit changectx instances.
1622 '''Find revisions matching a revset and emit changectx instances.
1618
1623
1619 This is a convenience wrapper around ``revs()`` that iterates the
1624 This is a convenience wrapper around ``revs()`` that iterates the
1620 result and is a generator of changectx instances.
1625 result and is a generator of changectx instances.
1621
1626
1622 Revset aliases from the configuration are not expanded. To expand
1627 Revset aliases from the configuration are not expanded. To expand
1623 user aliases, consider calling ``scmutil.revrange()``.
1628 user aliases, consider calling ``scmutil.revrange()``.
1624 '''
1629 '''
1625 for r in self.revs(expr, *args):
1630 for r in self.revs(expr, *args):
1626 yield self[r]
1631 yield self[r]
1627
1632
1628 def anyrevs(self, specs, user=False, localalias=None):
1633 def anyrevs(self, specs, user=False, localalias=None):
1629 '''Find revisions matching one of the given revsets.
1634 '''Find revisions matching one of the given revsets.
1630
1635
1631 Revset aliases from the configuration are not expanded by default. To
1636 Revset aliases from the configuration are not expanded by default. To
1632 expand user aliases, specify ``user=True``. To provide some local
1637 expand user aliases, specify ``user=True``. To provide some local
1633 definitions overriding user aliases, set ``localalias`` to
1638 definitions overriding user aliases, set ``localalias`` to
1634 ``{name: definitionstring}``.
1639 ``{name: definitionstring}``.
1635 '''
1640 '''
1636 if user:
1641 if user:
1637 m = revset.matchany(
1642 m = revset.matchany(
1638 self.ui,
1643 self.ui,
1639 specs,
1644 specs,
1640 lookup=revset.lookupfn(self),
1645 lookup=revset.lookupfn(self),
1641 localalias=localalias,
1646 localalias=localalias,
1642 )
1647 )
1643 else:
1648 else:
1644 m = revset.matchany(None, specs, localalias=localalias)
1649 m = revset.matchany(None, specs, localalias=localalias)
1645 return m(self)
1650 return m(self)
1646
1651
1647 def url(self):
1652 def url(self):
1648 return b'file:' + self.root
1653 return b'file:' + self.root
1649
1654
1650 def hook(self, name, throw=False, **args):
1655 def hook(self, name, throw=False, **args):
1651 """Call a hook, passing this repo instance.
1656 """Call a hook, passing this repo instance.
1652
1657
1653 This a convenience method to aid invoking hooks. Extensions likely
1658 This a convenience method to aid invoking hooks. Extensions likely
1654 won't call this unless they have registered a custom hook or are
1659 won't call this unless they have registered a custom hook or are
1655 replacing code that is expected to call a hook.
1660 replacing code that is expected to call a hook.
1656 """
1661 """
1657 return hook.hook(self.ui, self, name, throw, **args)
1662 return hook.hook(self.ui, self, name, throw, **args)
1658
1663
1659 @filteredpropertycache
1664 @filteredpropertycache
1660 def _tagscache(self):
1665 def _tagscache(self):
1661 '''Returns a tagscache object that contains various tags related
1666 '''Returns a tagscache object that contains various tags related
1662 caches.'''
1667 caches.'''
1663
1668
1664 # This simplifies its cache management by having one decorated
1669 # This simplifies its cache management by having one decorated
1665 # function (this one) and the rest simply fetch things from it.
1670 # function (this one) and the rest simply fetch things from it.
1666 class tagscache(object):
1671 class tagscache(object):
1667 def __init__(self):
1672 def __init__(self):
1668 # These two define the set of tags for this repository. tags
1673 # These two define the set of tags for this repository. tags
1669 # maps tag name to node; tagtypes maps tag name to 'global' or
1674 # maps tag name to node; tagtypes maps tag name to 'global' or
1670 # 'local'. (Global tags are defined by .hgtags across all
1675 # 'local'. (Global tags are defined by .hgtags across all
1671 # heads, and local tags are defined in .hg/localtags.)
1676 # heads, and local tags are defined in .hg/localtags.)
1672 # They constitute the in-memory cache of tags.
1677 # They constitute the in-memory cache of tags.
1673 self.tags = self.tagtypes = None
1678 self.tags = self.tagtypes = None
1674
1679
1675 self.nodetagscache = self.tagslist = None
1680 self.nodetagscache = self.tagslist = None
1676
1681
1677 cache = tagscache()
1682 cache = tagscache()
1678 cache.tags, cache.tagtypes = self._findtags()
1683 cache.tags, cache.tagtypes = self._findtags()
1679
1684
1680 return cache
1685 return cache
1681
1686
1682 def tags(self):
1687 def tags(self):
1683 '''return a mapping of tag to node'''
1688 '''return a mapping of tag to node'''
1684 t = {}
1689 t = {}
1685 if self.changelog.filteredrevs:
1690 if self.changelog.filteredrevs:
1686 tags, tt = self._findtags()
1691 tags, tt = self._findtags()
1687 else:
1692 else:
1688 tags = self._tagscache.tags
1693 tags = self._tagscache.tags
1689 rev = self.changelog.rev
1694 rev = self.changelog.rev
1690 for k, v in pycompat.iteritems(tags):
1695 for k, v in pycompat.iteritems(tags):
1691 try:
1696 try:
1692 # ignore tags to unknown nodes
1697 # ignore tags to unknown nodes
1693 rev(v)
1698 rev(v)
1694 t[k] = v
1699 t[k] = v
1695 except (error.LookupError, ValueError):
1700 except (error.LookupError, ValueError):
1696 pass
1701 pass
1697 return t
1702 return t
1698
1703
1699 def _findtags(self):
1704 def _findtags(self):
1700 '''Do the hard work of finding tags. Return a pair of dicts
1705 '''Do the hard work of finding tags. Return a pair of dicts
1701 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1706 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1702 maps tag name to a string like \'global\' or \'local\'.
1707 maps tag name to a string like \'global\' or \'local\'.
1703 Subclasses or extensions are free to add their own tags, but
1708 Subclasses or extensions are free to add their own tags, but
1704 should be aware that the returned dicts will be retained for the
1709 should be aware that the returned dicts will be retained for the
1705 duration of the localrepo object.'''
1710 duration of the localrepo object.'''
1706
1711
1707 # XXX what tagtype should subclasses/extensions use? Currently
1712 # XXX what tagtype should subclasses/extensions use? Currently
1708 # mq and bookmarks add tags, but do not set the tagtype at all.
1713 # mq and bookmarks add tags, but do not set the tagtype at all.
1709 # Should each extension invent its own tag type? Should there
1714 # Should each extension invent its own tag type? Should there
1710 # be one tagtype for all such "virtual" tags? Or is the status
1715 # be one tagtype for all such "virtual" tags? Or is the status
1711 # quo fine?
1716 # quo fine?
1712
1717
1713 # map tag name to (node, hist)
1718 # map tag name to (node, hist)
1714 alltags = tagsmod.findglobaltags(self.ui, self)
1719 alltags = tagsmod.findglobaltags(self.ui, self)
1715 # map tag name to tag type
1720 # map tag name to tag type
1716 tagtypes = dict((tag, b'global') for tag in alltags)
1721 tagtypes = dict((tag, b'global') for tag in alltags)
1717
1722
1718 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1723 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1719
1724
1720 # Build the return dicts. Have to re-encode tag names because
1725 # Build the return dicts. Have to re-encode tag names because
1721 # the tags module always uses UTF-8 (in order not to lose info
1726 # the tags module always uses UTF-8 (in order not to lose info
1722 # writing to the cache), but the rest of Mercurial wants them in
1727 # writing to the cache), but the rest of Mercurial wants them in
1723 # local encoding.
1728 # local encoding.
1724 tags = {}
1729 tags = {}
1725 for (name, (node, hist)) in pycompat.iteritems(alltags):
1730 for (name, (node, hist)) in pycompat.iteritems(alltags):
1726 if node != nullid:
1731 if node != nullid:
1727 tags[encoding.tolocal(name)] = node
1732 tags[encoding.tolocal(name)] = node
1728 tags[b'tip'] = self.changelog.tip()
1733 tags[b'tip'] = self.changelog.tip()
1729 tagtypes = dict(
1734 tagtypes = dict(
1730 [
1735 [
1731 (encoding.tolocal(name), value)
1736 (encoding.tolocal(name), value)
1732 for (name, value) in pycompat.iteritems(tagtypes)
1737 for (name, value) in pycompat.iteritems(tagtypes)
1733 ]
1738 ]
1734 )
1739 )
1735 return (tags, tagtypes)
1740 return (tags, tagtypes)
1736
1741
1737 def tagtype(self, tagname):
1742 def tagtype(self, tagname):
1738 '''
1743 '''
1739 return the type of the given tag. result can be:
1744 return the type of the given tag. result can be:
1740
1745
1741 'local' : a local tag
1746 'local' : a local tag
1742 'global' : a global tag
1747 'global' : a global tag
1743 None : tag does not exist
1748 None : tag does not exist
1744 '''
1749 '''
1745
1750
1746 return self._tagscache.tagtypes.get(tagname)
1751 return self._tagscache.tagtypes.get(tagname)
1747
1752
1748 def tagslist(self):
1753 def tagslist(self):
1749 '''return a list of tags ordered by revision'''
1754 '''return a list of tags ordered by revision'''
1750 if not self._tagscache.tagslist:
1755 if not self._tagscache.tagslist:
1751 l = []
1756 l = []
1752 for t, n in pycompat.iteritems(self.tags()):
1757 for t, n in pycompat.iteritems(self.tags()):
1753 l.append((self.changelog.rev(n), t, n))
1758 l.append((self.changelog.rev(n), t, n))
1754 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1759 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1755
1760
1756 return self._tagscache.tagslist
1761 return self._tagscache.tagslist
1757
1762
1758 def nodetags(self, node):
1763 def nodetags(self, node):
1759 '''return the tags associated with a node'''
1764 '''return the tags associated with a node'''
1760 if not self._tagscache.nodetagscache:
1765 if not self._tagscache.nodetagscache:
1761 nodetagscache = {}
1766 nodetagscache = {}
1762 for t, n in pycompat.iteritems(self._tagscache.tags):
1767 for t, n in pycompat.iteritems(self._tagscache.tags):
1763 nodetagscache.setdefault(n, []).append(t)
1768 nodetagscache.setdefault(n, []).append(t)
1764 for tags in pycompat.itervalues(nodetagscache):
1769 for tags in pycompat.itervalues(nodetagscache):
1765 tags.sort()
1770 tags.sort()
1766 self._tagscache.nodetagscache = nodetagscache
1771 self._tagscache.nodetagscache = nodetagscache
1767 return self._tagscache.nodetagscache.get(node, [])
1772 return self._tagscache.nodetagscache.get(node, [])
1768
1773
1769 def nodebookmarks(self, node):
1774 def nodebookmarks(self, node):
1770 """return the list of bookmarks pointing to the specified node"""
1775 """return the list of bookmarks pointing to the specified node"""
1771 return self._bookmarks.names(node)
1776 return self._bookmarks.names(node)
1772
1777
1773 def branchmap(self):
1778 def branchmap(self):
1774 '''returns a dictionary {branch: [branchheads]} with branchheads
1779 '''returns a dictionary {branch: [branchheads]} with branchheads
1775 ordered by increasing revision number'''
1780 ordered by increasing revision number'''
1776 return self._branchcaches[self]
1781 return self._branchcaches[self]
1777
1782
1778 @unfilteredmethod
1783 @unfilteredmethod
1779 def revbranchcache(self):
1784 def revbranchcache(self):
1780 if not self._revbranchcache:
1785 if not self._revbranchcache:
1781 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1786 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1782 return self._revbranchcache
1787 return self._revbranchcache
1783
1788
1784 def branchtip(self, branch, ignoremissing=False):
1789 def branchtip(self, branch, ignoremissing=False):
1785 '''return the tip node for a given branch
1790 '''return the tip node for a given branch
1786
1791
1787 If ignoremissing is True, then this method will not raise an error.
1792 If ignoremissing is True, then this method will not raise an error.
1788 This is helpful for callers that only expect None for a missing branch
1793 This is helpful for callers that only expect None for a missing branch
1789 (e.g. namespace).
1794 (e.g. namespace).
1790
1795
1791 '''
1796 '''
1792 try:
1797 try:
1793 return self.branchmap().branchtip(branch)
1798 return self.branchmap().branchtip(branch)
1794 except KeyError:
1799 except KeyError:
1795 if not ignoremissing:
1800 if not ignoremissing:
1796 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1801 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1797 else:
1802 else:
1798 pass
1803 pass
1799
1804
1800 def lookup(self, key):
1805 def lookup(self, key):
1801 node = scmutil.revsymbol(self, key).node()
1806 node = scmutil.revsymbol(self, key).node()
1802 if node is None:
1807 if node is None:
1803 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1808 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1804 return node
1809 return node
1805
1810
1806 def lookupbranch(self, key):
1811 def lookupbranch(self, key):
1807 if self.branchmap().hasbranch(key):
1812 if self.branchmap().hasbranch(key):
1808 return key
1813 return key
1809
1814
1810 return scmutil.revsymbol(self, key).branch()
1815 return scmutil.revsymbol(self, key).branch()
1811
1816
1812 def known(self, nodes):
1817 def known(self, nodes):
1813 cl = self.changelog
1818 cl = self.changelog
1814 nm = cl.nodemap
1819 nm = cl.nodemap
1815 filtered = cl.filteredrevs
1820 filtered = cl.filteredrevs
1816 result = []
1821 result = []
1817 for n in nodes:
1822 for n in nodes:
1818 r = nm.get(n)
1823 r = nm.get(n)
1819 resp = not (r is None or r in filtered)
1824 resp = not (r is None or r in filtered)
1820 result.append(resp)
1825 result.append(resp)
1821 return result
1826 return result
1822
1827
1823 def local(self):
1828 def local(self):
1824 return self
1829 return self
1825
1830
1826 def publishing(self):
1831 def publishing(self):
1827 # it's safe (and desirable) to trust the publish flag unconditionally
1832 # it's safe (and desirable) to trust the publish flag unconditionally
1828 # so that we don't finalize changes shared between users via ssh or nfs
1833 # so that we don't finalize changes shared between users via ssh or nfs
1829 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1834 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1830
1835
1831 def cancopy(self):
1836 def cancopy(self):
1832 # so statichttprepo's override of local() works
1837 # so statichttprepo's override of local() works
1833 if not self.local():
1838 if not self.local():
1834 return False
1839 return False
1835 if not self.publishing():
1840 if not self.publishing():
1836 return True
1841 return True
1837 # if publishing we can't copy if there is filtered content
1842 # if publishing we can't copy if there is filtered content
1838 return not self.filtered(b'visible').changelog.filteredrevs
1843 return not self.filtered(b'visible').changelog.filteredrevs
1839
1844
1840 def shared(self):
1845 def shared(self):
1841 '''the type of shared repository (None if not shared)'''
1846 '''the type of shared repository (None if not shared)'''
1842 if self.sharedpath != self.path:
1847 if self.sharedpath != self.path:
1843 return b'store'
1848 return b'store'
1844 return None
1849 return None
1845
1850
1846 def wjoin(self, f, *insidef):
1851 def wjoin(self, f, *insidef):
1847 return self.vfs.reljoin(self.root, f, *insidef)
1852 return self.vfs.reljoin(self.root, f, *insidef)
1848
1853
1849 def setparents(self, p1, p2=nullid):
1854 def setparents(self, p1, p2=nullid):
1850 with self.dirstate.parentchange():
1855 with self.dirstate.parentchange():
1851 copies = self.dirstate.setparents(p1, p2)
1856 copies = self.dirstate.setparents(p1, p2)
1852 pctx = self[p1]
1857 pctx = self[p1]
1853 if copies:
1858 if copies:
1854 # Adjust copy records, the dirstate cannot do it, it
1859 # Adjust copy records, the dirstate cannot do it, it
1855 # requires access to parents manifests. Preserve them
1860 # requires access to parents manifests. Preserve them
1856 # only for entries added to first parent.
1861 # only for entries added to first parent.
1857 for f in copies:
1862 for f in copies:
1858 if f not in pctx and copies[f] in pctx:
1863 if f not in pctx and copies[f] in pctx:
1859 self.dirstate.copy(copies[f], f)
1864 self.dirstate.copy(copies[f], f)
1860 if p2 == nullid:
1865 if p2 == nullid:
1861 for f, s in sorted(self.dirstate.copies().items()):
1866 for f, s in sorted(self.dirstate.copies().items()):
1862 if f not in pctx and s not in pctx:
1867 if f not in pctx and s not in pctx:
1863 self.dirstate.copy(None, f)
1868 self.dirstate.copy(None, f)
1864
1869
1865 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1870 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1866 """changeid must be a changeset revision, if specified.
1871 """changeid must be a changeset revision, if specified.
1867 fileid can be a file revision or node."""
1872 fileid can be a file revision or node."""
1868 return context.filectx(
1873 return context.filectx(
1869 self, path, changeid, fileid, changectx=changectx
1874 self, path, changeid, fileid, changectx=changectx
1870 )
1875 )
1871
1876
1872 def getcwd(self):
1877 def getcwd(self):
1873 return self.dirstate.getcwd()
1878 return self.dirstate.getcwd()
1874
1879
1875 def pathto(self, f, cwd=None):
1880 def pathto(self, f, cwd=None):
1876 return self.dirstate.pathto(f, cwd)
1881 return self.dirstate.pathto(f, cwd)
1877
1882
1878 def _loadfilter(self, filter):
1883 def _loadfilter(self, filter):
1879 if filter not in self._filterpats:
1884 if filter not in self._filterpats:
1880 l = []
1885 l = []
1881 for pat, cmd in self.ui.configitems(filter):
1886 for pat, cmd in self.ui.configitems(filter):
1882 if cmd == b'!':
1887 if cmd == b'!':
1883 continue
1888 continue
1884 mf = matchmod.match(self.root, b'', [pat])
1889 mf = matchmod.match(self.root, b'', [pat])
1885 fn = None
1890 fn = None
1886 params = cmd
1891 params = cmd
1887 for name, filterfn in pycompat.iteritems(self._datafilters):
1892 for name, filterfn in pycompat.iteritems(self._datafilters):
1888 if cmd.startswith(name):
1893 if cmd.startswith(name):
1889 fn = filterfn
1894 fn = filterfn
1890 params = cmd[len(name) :].lstrip()
1895 params = cmd[len(name) :].lstrip()
1891 break
1896 break
1892 if not fn:
1897 if not fn:
1893 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1898 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1894 # Wrap old filters not supporting keyword arguments
1899 # Wrap old filters not supporting keyword arguments
1895 if not pycompat.getargspec(fn)[2]:
1900 if not pycompat.getargspec(fn)[2]:
1896 oldfn = fn
1901 oldfn = fn
1897 fn = lambda s, c, **kwargs: oldfn(s, c)
1902 fn = lambda s, c, **kwargs: oldfn(s, c)
1898 l.append((mf, fn, params))
1903 l.append((mf, fn, params))
1899 self._filterpats[filter] = l
1904 self._filterpats[filter] = l
1900 return self._filterpats[filter]
1905 return self._filterpats[filter]
1901
1906
1902 def _filter(self, filterpats, filename, data):
1907 def _filter(self, filterpats, filename, data):
1903 for mf, fn, cmd in filterpats:
1908 for mf, fn, cmd in filterpats:
1904 if mf(filename):
1909 if mf(filename):
1905 self.ui.debug(b"filtering %s through %s\n" % (filename, cmd))
1910 self.ui.debug(b"filtering %s through %s\n" % (filename, cmd))
1906 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1911 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1907 break
1912 break
1908
1913
1909 return data
1914 return data
1910
1915
1911 @unfilteredpropertycache
1916 @unfilteredpropertycache
1912 def _encodefilterpats(self):
1917 def _encodefilterpats(self):
1913 return self._loadfilter(b'encode')
1918 return self._loadfilter(b'encode')
1914
1919
1915 @unfilteredpropertycache
1920 @unfilteredpropertycache
1916 def _decodefilterpats(self):
1921 def _decodefilterpats(self):
1917 return self._loadfilter(b'decode')
1922 return self._loadfilter(b'decode')
1918
1923
1919 def adddatafilter(self, name, filter):
1924 def adddatafilter(self, name, filter):
1920 self._datafilters[name] = filter
1925 self._datafilters[name] = filter
1921
1926
1922 def wread(self, filename):
1927 def wread(self, filename):
1923 if self.wvfs.islink(filename):
1928 if self.wvfs.islink(filename):
1924 data = self.wvfs.readlink(filename)
1929 data = self.wvfs.readlink(filename)
1925 else:
1930 else:
1926 data = self.wvfs.read(filename)
1931 data = self.wvfs.read(filename)
1927 return self._filter(self._encodefilterpats, filename, data)
1932 return self._filter(self._encodefilterpats, filename, data)
1928
1933
1929 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1934 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1930 """write ``data`` into ``filename`` in the working directory
1935 """write ``data`` into ``filename`` in the working directory
1931
1936
1932 This returns length of written (maybe decoded) data.
1937 This returns length of written (maybe decoded) data.
1933 """
1938 """
1934 data = self._filter(self._decodefilterpats, filename, data)
1939 data = self._filter(self._decodefilterpats, filename, data)
1935 if b'l' in flags:
1940 if b'l' in flags:
1936 self.wvfs.symlink(data, filename)
1941 self.wvfs.symlink(data, filename)
1937 else:
1942 else:
1938 self.wvfs.write(
1943 self.wvfs.write(
1939 filename, data, backgroundclose=backgroundclose, **kwargs
1944 filename, data, backgroundclose=backgroundclose, **kwargs
1940 )
1945 )
1941 if b'x' in flags:
1946 if b'x' in flags:
1942 self.wvfs.setflags(filename, False, True)
1947 self.wvfs.setflags(filename, False, True)
1943 else:
1948 else:
1944 self.wvfs.setflags(filename, False, False)
1949 self.wvfs.setflags(filename, False, False)
1945 return len(data)
1950 return len(data)
1946
1951
1947 def wwritedata(self, filename, data):
1952 def wwritedata(self, filename, data):
1948 return self._filter(self._decodefilterpats, filename, data)
1953 return self._filter(self._decodefilterpats, filename, data)
1949
1954
1950 def currenttransaction(self):
1955 def currenttransaction(self):
1951 """return the current transaction or None if non exists"""
1956 """return the current transaction or None if non exists"""
1952 if self._transref:
1957 if self._transref:
1953 tr = self._transref()
1958 tr = self._transref()
1954 else:
1959 else:
1955 tr = None
1960 tr = None
1956
1961
1957 if tr and tr.running():
1962 if tr and tr.running():
1958 return tr
1963 return tr
1959 return None
1964 return None
1960
1965
1961 def transaction(self, desc, report=None):
1966 def transaction(self, desc, report=None):
1962 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1967 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1963 b'devel', b'check-locks'
1968 b'devel', b'check-locks'
1964 ):
1969 ):
1965 if self._currentlock(self._lockref) is None:
1970 if self._currentlock(self._lockref) is None:
1966 raise error.ProgrammingError(b'transaction requires locking')
1971 raise error.ProgrammingError(b'transaction requires locking')
1967 tr = self.currenttransaction()
1972 tr = self.currenttransaction()
1968 if tr is not None:
1973 if tr is not None:
1969 return tr.nest(name=desc)
1974 return tr.nest(name=desc)
1970
1975
1971 # abort here if the journal already exists
1976 # abort here if the journal already exists
1972 if self.svfs.exists(b"journal"):
1977 if self.svfs.exists(b"journal"):
1973 raise error.RepoError(
1978 raise error.RepoError(
1974 _(b"abandoned transaction found"),
1979 _(b"abandoned transaction found"),
1975 hint=_(b"run 'hg recover' to clean up transaction"),
1980 hint=_(b"run 'hg recover' to clean up transaction"),
1976 )
1981 )
1977
1982
1978 idbase = b"%.40f#%f" % (random.random(), time.time())
1983 idbase = b"%.40f#%f" % (random.random(), time.time())
1979 ha = hex(hashlib.sha1(idbase).digest())
1984 ha = hex(hashlib.sha1(idbase).digest())
1980 txnid = b'TXN:' + ha
1985 txnid = b'TXN:' + ha
1981 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
1986 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
1982
1987
1983 self._writejournal(desc)
1988 self._writejournal(desc)
1984 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1989 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1985 if report:
1990 if report:
1986 rp = report
1991 rp = report
1987 else:
1992 else:
1988 rp = self.ui.warn
1993 rp = self.ui.warn
1989 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
1994 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
1990 # we must avoid cyclic reference between repo and transaction.
1995 # we must avoid cyclic reference between repo and transaction.
1991 reporef = weakref.ref(self)
1996 reporef = weakref.ref(self)
1992 # Code to track tag movement
1997 # Code to track tag movement
1993 #
1998 #
1994 # Since tags are all handled as file content, it is actually quite hard
1999 # Since tags are all handled as file content, it is actually quite hard
1995 # to track these movement from a code perspective. So we fallback to a
2000 # to track these movement from a code perspective. So we fallback to a
1996 # tracking at the repository level. One could envision to track changes
2001 # tracking at the repository level. One could envision to track changes
1997 # to the '.hgtags' file through changegroup apply but that fails to
2002 # to the '.hgtags' file through changegroup apply but that fails to
1998 # cope with case where transaction expose new heads without changegroup
2003 # cope with case where transaction expose new heads without changegroup
1999 # being involved (eg: phase movement).
2004 # being involved (eg: phase movement).
2000 #
2005 #
2001 # For now, We gate the feature behind a flag since this likely comes
2006 # For now, We gate the feature behind a flag since this likely comes
2002 # with performance impacts. The current code run more often than needed
2007 # with performance impacts. The current code run more often than needed
2003 # and do not use caches as much as it could. The current focus is on
2008 # and do not use caches as much as it could. The current focus is on
2004 # the behavior of the feature so we disable it by default. The flag
2009 # the behavior of the feature so we disable it by default. The flag
2005 # will be removed when we are happy with the performance impact.
2010 # will be removed when we are happy with the performance impact.
2006 #
2011 #
2007 # Once this feature is no longer experimental move the following
2012 # Once this feature is no longer experimental move the following
2008 # documentation to the appropriate help section:
2013 # documentation to the appropriate help section:
2009 #
2014 #
2010 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2015 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2011 # tags (new or changed or deleted tags). In addition the details of
2016 # tags (new or changed or deleted tags). In addition the details of
2012 # these changes are made available in a file at:
2017 # these changes are made available in a file at:
2013 # ``REPOROOT/.hg/changes/tags.changes``.
2018 # ``REPOROOT/.hg/changes/tags.changes``.
2014 # Make sure you check for HG_TAG_MOVED before reading that file as it
2019 # Make sure you check for HG_TAG_MOVED before reading that file as it
2015 # might exist from a previous transaction even if no tag were touched
2020 # might exist from a previous transaction even if no tag were touched
2016 # in this one. Changes are recorded in a line base format::
2021 # in this one. Changes are recorded in a line base format::
2017 #
2022 #
2018 # <action> <hex-node> <tag-name>\n
2023 # <action> <hex-node> <tag-name>\n
2019 #
2024 #
2020 # Actions are defined as follow:
2025 # Actions are defined as follow:
2021 # "-R": tag is removed,
2026 # "-R": tag is removed,
2022 # "+A": tag is added,
2027 # "+A": tag is added,
2023 # "-M": tag is moved (old value),
2028 # "-M": tag is moved (old value),
2024 # "+M": tag is moved (new value),
2029 # "+M": tag is moved (new value),
2025 tracktags = lambda x: None
2030 tracktags = lambda x: None
2026 # experimental config: experimental.hook-track-tags
2031 # experimental config: experimental.hook-track-tags
2027 shouldtracktags = self.ui.configbool(
2032 shouldtracktags = self.ui.configbool(
2028 b'experimental', b'hook-track-tags'
2033 b'experimental', b'hook-track-tags'
2029 )
2034 )
2030 if desc != b'strip' and shouldtracktags:
2035 if desc != b'strip' and shouldtracktags:
2031 oldheads = self.changelog.headrevs()
2036 oldheads = self.changelog.headrevs()
2032
2037
2033 def tracktags(tr2):
2038 def tracktags(tr2):
2034 repo = reporef()
2039 repo = reporef()
2035 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2040 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2036 newheads = repo.changelog.headrevs()
2041 newheads = repo.changelog.headrevs()
2037 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2042 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2038 # notes: we compare lists here.
2043 # notes: we compare lists here.
2039 # As we do it only once buiding set would not be cheaper
2044 # As we do it only once buiding set would not be cheaper
2040 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2045 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2041 if changes:
2046 if changes:
2042 tr2.hookargs[b'tag_moved'] = b'1'
2047 tr2.hookargs[b'tag_moved'] = b'1'
2043 with repo.vfs(
2048 with repo.vfs(
2044 b'changes/tags.changes', b'w', atomictemp=True
2049 b'changes/tags.changes', b'w', atomictemp=True
2045 ) as changesfile:
2050 ) as changesfile:
2046 # note: we do not register the file to the transaction
2051 # note: we do not register the file to the transaction
2047 # because we needs it to still exist on the transaction
2052 # because we needs it to still exist on the transaction
2048 # is close (for txnclose hooks)
2053 # is close (for txnclose hooks)
2049 tagsmod.writediff(changesfile, changes)
2054 tagsmod.writediff(changesfile, changes)
2050
2055
2051 def validate(tr2):
2056 def validate(tr2):
2052 """will run pre-closing hooks"""
2057 """will run pre-closing hooks"""
2053 # XXX the transaction API is a bit lacking here so we take a hacky
2058 # XXX the transaction API is a bit lacking here so we take a hacky
2054 # path for now
2059 # path for now
2055 #
2060 #
2056 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2061 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2057 # dict is copied before these run. In addition we needs the data
2062 # dict is copied before these run. In addition we needs the data
2058 # available to in memory hooks too.
2063 # available to in memory hooks too.
2059 #
2064 #
2060 # Moreover, we also need to make sure this runs before txnclose
2065 # Moreover, we also need to make sure this runs before txnclose
2061 # hooks and there is no "pending" mechanism that would execute
2066 # hooks and there is no "pending" mechanism that would execute
2062 # logic only if hooks are about to run.
2067 # logic only if hooks are about to run.
2063 #
2068 #
2064 # Fixing this limitation of the transaction is also needed to track
2069 # Fixing this limitation of the transaction is also needed to track
2065 # other families of changes (bookmarks, phases, obsolescence).
2070 # other families of changes (bookmarks, phases, obsolescence).
2066 #
2071 #
2067 # This will have to be fixed before we remove the experimental
2072 # This will have to be fixed before we remove the experimental
2068 # gating.
2073 # gating.
2069 tracktags(tr2)
2074 tracktags(tr2)
2070 repo = reporef()
2075 repo = reporef()
2071
2076
2072 r = repo.ui.configsuboptions(
2077 r = repo.ui.configsuboptions(
2073 b'experimental', b'single-head-per-branch'
2078 b'experimental', b'single-head-per-branch'
2074 )
2079 )
2075 singlehead, singleheadsub = r
2080 singlehead, singleheadsub = r
2076 if singlehead:
2081 if singlehead:
2077 accountclosed = singleheadsub.get(
2082 accountclosed = singleheadsub.get(
2078 b"account-closed-heads", False
2083 b"account-closed-heads", False
2079 )
2084 )
2080 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2085 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2081 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2086 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2082 for name, (old, new) in sorted(
2087 for name, (old, new) in sorted(
2083 tr.changes[b'bookmarks'].items()
2088 tr.changes[b'bookmarks'].items()
2084 ):
2089 ):
2085 args = tr.hookargs.copy()
2090 args = tr.hookargs.copy()
2086 args.update(bookmarks.preparehookargs(name, old, new))
2091 args.update(bookmarks.preparehookargs(name, old, new))
2087 repo.hook(
2092 repo.hook(
2088 b'pretxnclose-bookmark',
2093 b'pretxnclose-bookmark',
2089 throw=True,
2094 throw=True,
2090 **pycompat.strkwargs(args)
2095 **pycompat.strkwargs(args)
2091 )
2096 )
2092 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2097 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2093 cl = repo.unfiltered().changelog
2098 cl = repo.unfiltered().changelog
2094 for rev, (old, new) in tr.changes[b'phases'].items():
2099 for rev, (old, new) in tr.changes[b'phases'].items():
2095 args = tr.hookargs.copy()
2100 args = tr.hookargs.copy()
2096 node = hex(cl.node(rev))
2101 node = hex(cl.node(rev))
2097 args.update(phases.preparehookargs(node, old, new))
2102 args.update(phases.preparehookargs(node, old, new))
2098 repo.hook(
2103 repo.hook(
2099 b'pretxnclose-phase',
2104 b'pretxnclose-phase',
2100 throw=True,
2105 throw=True,
2101 **pycompat.strkwargs(args)
2106 **pycompat.strkwargs(args)
2102 )
2107 )
2103
2108
2104 repo.hook(
2109 repo.hook(
2105 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2110 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2106 )
2111 )
2107
2112
2108 def releasefn(tr, success):
2113 def releasefn(tr, success):
2109 repo = reporef()
2114 repo = reporef()
2110 if repo is None:
2115 if repo is None:
2111 # If the repo has been GC'd (and this release function is being
2116 # If the repo has been GC'd (and this release function is being
2112 # called from transaction.__del__), there's not much we can do,
2117 # called from transaction.__del__), there's not much we can do,
2113 # so just leave the unfinished transaction there and let the
2118 # so just leave the unfinished transaction there and let the
2114 # user run `hg recover`.
2119 # user run `hg recover`.
2115 return
2120 return
2116 if success:
2121 if success:
2117 # this should be explicitly invoked here, because
2122 # this should be explicitly invoked here, because
2118 # in-memory changes aren't written out at closing
2123 # in-memory changes aren't written out at closing
2119 # transaction, if tr.addfilegenerator (via
2124 # transaction, if tr.addfilegenerator (via
2120 # dirstate.write or so) isn't invoked while
2125 # dirstate.write or so) isn't invoked while
2121 # transaction running
2126 # transaction running
2122 repo.dirstate.write(None)
2127 repo.dirstate.write(None)
2123 else:
2128 else:
2124 # discard all changes (including ones already written
2129 # discard all changes (including ones already written
2125 # out) in this transaction
2130 # out) in this transaction
2126 narrowspec.restorebackup(self, b'journal.narrowspec')
2131 narrowspec.restorebackup(self, b'journal.narrowspec')
2127 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2132 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2128 repo.dirstate.restorebackup(None, b'journal.dirstate')
2133 repo.dirstate.restorebackup(None, b'journal.dirstate')
2129
2134
2130 repo.invalidate(clearfilecache=True)
2135 repo.invalidate(clearfilecache=True)
2131
2136
2132 tr = transaction.transaction(
2137 tr = transaction.transaction(
2133 rp,
2138 rp,
2134 self.svfs,
2139 self.svfs,
2135 vfsmap,
2140 vfsmap,
2136 b"journal",
2141 b"journal",
2137 b"undo",
2142 b"undo",
2138 aftertrans(renames),
2143 aftertrans(renames),
2139 self.store.createmode,
2144 self.store.createmode,
2140 validator=validate,
2145 validator=validate,
2141 releasefn=releasefn,
2146 releasefn=releasefn,
2142 checkambigfiles=_cachedfiles,
2147 checkambigfiles=_cachedfiles,
2143 name=desc,
2148 name=desc,
2144 )
2149 )
2145 tr.changes[b'origrepolen'] = len(self)
2150 tr.changes[b'origrepolen'] = len(self)
2146 tr.changes[b'obsmarkers'] = set()
2151 tr.changes[b'obsmarkers'] = set()
2147 tr.changes[b'phases'] = {}
2152 tr.changes[b'phases'] = {}
2148 tr.changes[b'bookmarks'] = {}
2153 tr.changes[b'bookmarks'] = {}
2149
2154
2150 tr.hookargs[b'txnid'] = txnid
2155 tr.hookargs[b'txnid'] = txnid
2151 tr.hookargs[b'txnname'] = desc
2156 tr.hookargs[b'txnname'] = desc
2152 # note: writing the fncache only during finalize mean that the file is
2157 # note: writing the fncache only during finalize mean that the file is
2153 # outdated when running hooks. As fncache is used for streaming clone,
2158 # outdated when running hooks. As fncache is used for streaming clone,
2154 # this is not expected to break anything that happen during the hooks.
2159 # this is not expected to break anything that happen during the hooks.
2155 tr.addfinalize(b'flush-fncache', self.store.write)
2160 tr.addfinalize(b'flush-fncache', self.store.write)
2156
2161
2157 def txnclosehook(tr2):
2162 def txnclosehook(tr2):
2158 """To be run if transaction is successful, will schedule a hook run
2163 """To be run if transaction is successful, will schedule a hook run
2159 """
2164 """
2160 # Don't reference tr2 in hook() so we don't hold a reference.
2165 # Don't reference tr2 in hook() so we don't hold a reference.
2161 # This reduces memory consumption when there are multiple
2166 # This reduces memory consumption when there are multiple
2162 # transactions per lock. This can likely go away if issue5045
2167 # transactions per lock. This can likely go away if issue5045
2163 # fixes the function accumulation.
2168 # fixes the function accumulation.
2164 hookargs = tr2.hookargs
2169 hookargs = tr2.hookargs
2165
2170
2166 def hookfunc():
2171 def hookfunc():
2167 repo = reporef()
2172 repo = reporef()
2168 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2173 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2169 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2174 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2170 for name, (old, new) in bmchanges:
2175 for name, (old, new) in bmchanges:
2171 args = tr.hookargs.copy()
2176 args = tr.hookargs.copy()
2172 args.update(bookmarks.preparehookargs(name, old, new))
2177 args.update(bookmarks.preparehookargs(name, old, new))
2173 repo.hook(
2178 repo.hook(
2174 b'txnclose-bookmark',
2179 b'txnclose-bookmark',
2175 throw=False,
2180 throw=False,
2176 **pycompat.strkwargs(args)
2181 **pycompat.strkwargs(args)
2177 )
2182 )
2178
2183
2179 if hook.hashook(repo.ui, b'txnclose-phase'):
2184 if hook.hashook(repo.ui, b'txnclose-phase'):
2180 cl = repo.unfiltered().changelog
2185 cl = repo.unfiltered().changelog
2181 phasemv = sorted(tr.changes[b'phases'].items())
2186 phasemv = sorted(tr.changes[b'phases'].items())
2182 for rev, (old, new) in phasemv:
2187 for rev, (old, new) in phasemv:
2183 args = tr.hookargs.copy()
2188 args = tr.hookargs.copy()
2184 node = hex(cl.node(rev))
2189 node = hex(cl.node(rev))
2185 args.update(phases.preparehookargs(node, old, new))
2190 args.update(phases.preparehookargs(node, old, new))
2186 repo.hook(
2191 repo.hook(
2187 b'txnclose-phase',
2192 b'txnclose-phase',
2188 throw=False,
2193 throw=False,
2189 **pycompat.strkwargs(args)
2194 **pycompat.strkwargs(args)
2190 )
2195 )
2191
2196
2192 repo.hook(
2197 repo.hook(
2193 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2198 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2194 )
2199 )
2195
2200
2196 reporef()._afterlock(hookfunc)
2201 reporef()._afterlock(hookfunc)
2197
2202
2198 tr.addfinalize(b'txnclose-hook', txnclosehook)
2203 tr.addfinalize(b'txnclose-hook', txnclosehook)
2199 # Include a leading "-" to make it happen before the transaction summary
2204 # Include a leading "-" to make it happen before the transaction summary
2200 # reports registered via scmutil.registersummarycallback() whose names
2205 # reports registered via scmutil.registersummarycallback() whose names
2201 # are 00-txnreport etc. That way, the caches will be warm when the
2206 # are 00-txnreport etc. That way, the caches will be warm when the
2202 # callbacks run.
2207 # callbacks run.
2203 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2208 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2204
2209
2205 def txnaborthook(tr2):
2210 def txnaborthook(tr2):
2206 """To be run if transaction is aborted
2211 """To be run if transaction is aborted
2207 """
2212 """
2208 reporef().hook(
2213 reporef().hook(
2209 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2214 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2210 )
2215 )
2211
2216
2212 tr.addabort(b'txnabort-hook', txnaborthook)
2217 tr.addabort(b'txnabort-hook', txnaborthook)
2213 # avoid eager cache invalidation. in-memory data should be identical
2218 # avoid eager cache invalidation. in-memory data should be identical
2214 # to stored data if transaction has no error.
2219 # to stored data if transaction has no error.
2215 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2220 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2216 self._transref = weakref.ref(tr)
2221 self._transref = weakref.ref(tr)
2217 scmutil.registersummarycallback(self, tr, desc)
2222 scmutil.registersummarycallback(self, tr, desc)
2218 return tr
2223 return tr
2219
2224
2220 def _journalfiles(self):
2225 def _journalfiles(self):
2221 return (
2226 return (
2222 (self.svfs, b'journal'),
2227 (self.svfs, b'journal'),
2223 (self.svfs, b'journal.narrowspec'),
2228 (self.svfs, b'journal.narrowspec'),
2224 (self.vfs, b'journal.narrowspec.dirstate'),
2229 (self.vfs, b'journal.narrowspec.dirstate'),
2225 (self.vfs, b'journal.dirstate'),
2230 (self.vfs, b'journal.dirstate'),
2226 (self.vfs, b'journal.branch'),
2231 (self.vfs, b'journal.branch'),
2227 (self.vfs, b'journal.desc'),
2232 (self.vfs, b'journal.desc'),
2228 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2233 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2229 (self.svfs, b'journal.phaseroots'),
2234 (self.svfs, b'journal.phaseroots'),
2230 )
2235 )
2231
2236
2232 def undofiles(self):
2237 def undofiles(self):
2233 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2238 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2234
2239
2235 @unfilteredmethod
2240 @unfilteredmethod
2236 def _writejournal(self, desc):
2241 def _writejournal(self, desc):
2237 self.dirstate.savebackup(None, b'journal.dirstate')
2242 self.dirstate.savebackup(None, b'journal.dirstate')
2238 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2243 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2239 narrowspec.savebackup(self, b'journal.narrowspec')
2244 narrowspec.savebackup(self, b'journal.narrowspec')
2240 self.vfs.write(
2245 self.vfs.write(
2241 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2246 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2242 )
2247 )
2243 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2248 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2244 bookmarksvfs = bookmarks.bookmarksvfs(self)
2249 bookmarksvfs = bookmarks.bookmarksvfs(self)
2245 bookmarksvfs.write(
2250 bookmarksvfs.write(
2246 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2251 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2247 )
2252 )
2248 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2253 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2249
2254
2250 def recover(self):
2255 def recover(self):
2251 with self.lock():
2256 with self.lock():
2252 if self.svfs.exists(b"journal"):
2257 if self.svfs.exists(b"journal"):
2253 self.ui.status(_(b"rolling back interrupted transaction\n"))
2258 self.ui.status(_(b"rolling back interrupted transaction\n"))
2254 vfsmap = {
2259 vfsmap = {
2255 b'': self.svfs,
2260 b'': self.svfs,
2256 b'plain': self.vfs,
2261 b'plain': self.vfs,
2257 }
2262 }
2258 transaction.rollback(
2263 transaction.rollback(
2259 self.svfs,
2264 self.svfs,
2260 vfsmap,
2265 vfsmap,
2261 b"journal",
2266 b"journal",
2262 self.ui.warn,
2267 self.ui.warn,
2263 checkambigfiles=_cachedfiles,
2268 checkambigfiles=_cachedfiles,
2264 )
2269 )
2265 self.invalidate()
2270 self.invalidate()
2266 return True
2271 return True
2267 else:
2272 else:
2268 self.ui.warn(_(b"no interrupted transaction available\n"))
2273 self.ui.warn(_(b"no interrupted transaction available\n"))
2269 return False
2274 return False
2270
2275
2271 def rollback(self, dryrun=False, force=False):
2276 def rollback(self, dryrun=False, force=False):
2272 wlock = lock = dsguard = None
2277 wlock = lock = dsguard = None
2273 try:
2278 try:
2274 wlock = self.wlock()
2279 wlock = self.wlock()
2275 lock = self.lock()
2280 lock = self.lock()
2276 if self.svfs.exists(b"undo"):
2281 if self.svfs.exists(b"undo"):
2277 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2282 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2278
2283
2279 return self._rollback(dryrun, force, dsguard)
2284 return self._rollback(dryrun, force, dsguard)
2280 else:
2285 else:
2281 self.ui.warn(_(b"no rollback information available\n"))
2286 self.ui.warn(_(b"no rollback information available\n"))
2282 return 1
2287 return 1
2283 finally:
2288 finally:
2284 release(dsguard, lock, wlock)
2289 release(dsguard, lock, wlock)
2285
2290
2286 @unfilteredmethod # Until we get smarter cache management
2291 @unfilteredmethod # Until we get smarter cache management
2287 def _rollback(self, dryrun, force, dsguard):
2292 def _rollback(self, dryrun, force, dsguard):
2288 ui = self.ui
2293 ui = self.ui
2289 try:
2294 try:
2290 args = self.vfs.read(b'undo.desc').splitlines()
2295 args = self.vfs.read(b'undo.desc').splitlines()
2291 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2296 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2292 if len(args) >= 3:
2297 if len(args) >= 3:
2293 detail = args[2]
2298 detail = args[2]
2294 oldtip = oldlen - 1
2299 oldtip = oldlen - 1
2295
2300
2296 if detail and ui.verbose:
2301 if detail and ui.verbose:
2297 msg = _(
2302 msg = _(
2298 b'repository tip rolled back to revision %d'
2303 b'repository tip rolled back to revision %d'
2299 b' (undo %s: %s)\n'
2304 b' (undo %s: %s)\n'
2300 ) % (oldtip, desc, detail)
2305 ) % (oldtip, desc, detail)
2301 else:
2306 else:
2302 msg = _(
2307 msg = _(
2303 b'repository tip rolled back to revision %d (undo %s)\n'
2308 b'repository tip rolled back to revision %d (undo %s)\n'
2304 ) % (oldtip, desc)
2309 ) % (oldtip, desc)
2305 except IOError:
2310 except IOError:
2306 msg = _(b'rolling back unknown transaction\n')
2311 msg = _(b'rolling back unknown transaction\n')
2307 desc = None
2312 desc = None
2308
2313
2309 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2314 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2310 raise error.Abort(
2315 raise error.Abort(
2311 _(
2316 _(
2312 b'rollback of last commit while not checked out '
2317 b'rollback of last commit while not checked out '
2313 b'may lose data'
2318 b'may lose data'
2314 ),
2319 ),
2315 hint=_(b'use -f to force'),
2320 hint=_(b'use -f to force'),
2316 )
2321 )
2317
2322
2318 ui.status(msg)
2323 ui.status(msg)
2319 if dryrun:
2324 if dryrun:
2320 return 0
2325 return 0
2321
2326
2322 parents = self.dirstate.parents()
2327 parents = self.dirstate.parents()
2323 self.destroying()
2328 self.destroying()
2324 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2329 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2325 transaction.rollback(
2330 transaction.rollback(
2326 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2331 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2327 )
2332 )
2328 bookmarksvfs = bookmarks.bookmarksvfs(self)
2333 bookmarksvfs = bookmarks.bookmarksvfs(self)
2329 if bookmarksvfs.exists(b'undo.bookmarks'):
2334 if bookmarksvfs.exists(b'undo.bookmarks'):
2330 bookmarksvfs.rename(
2335 bookmarksvfs.rename(
2331 b'undo.bookmarks', b'bookmarks', checkambig=True
2336 b'undo.bookmarks', b'bookmarks', checkambig=True
2332 )
2337 )
2333 if self.svfs.exists(b'undo.phaseroots'):
2338 if self.svfs.exists(b'undo.phaseroots'):
2334 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2339 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2335 self.invalidate()
2340 self.invalidate()
2336
2341
2337 parentgone = any(p not in self.changelog.nodemap for p in parents)
2342 parentgone = any(p not in self.changelog.nodemap for p in parents)
2338 if parentgone:
2343 if parentgone:
2339 # prevent dirstateguard from overwriting already restored one
2344 # prevent dirstateguard from overwriting already restored one
2340 dsguard.close()
2345 dsguard.close()
2341
2346
2342 narrowspec.restorebackup(self, b'undo.narrowspec')
2347 narrowspec.restorebackup(self, b'undo.narrowspec')
2343 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2348 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2344 self.dirstate.restorebackup(None, b'undo.dirstate')
2349 self.dirstate.restorebackup(None, b'undo.dirstate')
2345 try:
2350 try:
2346 branch = self.vfs.read(b'undo.branch')
2351 branch = self.vfs.read(b'undo.branch')
2347 self.dirstate.setbranch(encoding.tolocal(branch))
2352 self.dirstate.setbranch(encoding.tolocal(branch))
2348 except IOError:
2353 except IOError:
2349 ui.warn(
2354 ui.warn(
2350 _(
2355 _(
2351 b'named branch could not be reset: '
2356 b'named branch could not be reset: '
2352 b'current branch is still \'%s\'\n'
2357 b'current branch is still \'%s\'\n'
2353 )
2358 )
2354 % self.dirstate.branch()
2359 % self.dirstate.branch()
2355 )
2360 )
2356
2361
2357 parents = tuple([p.rev() for p in self[None].parents()])
2362 parents = tuple([p.rev() for p in self[None].parents()])
2358 if len(parents) > 1:
2363 if len(parents) > 1:
2359 ui.status(
2364 ui.status(
2360 _(
2365 _(
2361 b'working directory now based on '
2366 b'working directory now based on '
2362 b'revisions %d and %d\n'
2367 b'revisions %d and %d\n'
2363 )
2368 )
2364 % parents
2369 % parents
2365 )
2370 )
2366 else:
2371 else:
2367 ui.status(
2372 ui.status(
2368 _(b'working directory now based on revision %d\n') % parents
2373 _(b'working directory now based on revision %d\n') % parents
2369 )
2374 )
2370 mergemod.mergestate.clean(self, self[b'.'].node())
2375 mergemod.mergestate.clean(self, self[b'.'].node())
2371
2376
2372 # TODO: if we know which new heads may result from this rollback, pass
2377 # TODO: if we know which new heads may result from this rollback, pass
2373 # them to destroy(), which will prevent the branchhead cache from being
2378 # them to destroy(), which will prevent the branchhead cache from being
2374 # invalidated.
2379 # invalidated.
2375 self.destroyed()
2380 self.destroyed()
2376 return 0
2381 return 0
2377
2382
2378 def _buildcacheupdater(self, newtransaction):
2383 def _buildcacheupdater(self, newtransaction):
2379 """called during transaction to build the callback updating cache
2384 """called during transaction to build the callback updating cache
2380
2385
2381 Lives on the repository to help extension who might want to augment
2386 Lives on the repository to help extension who might want to augment
2382 this logic. For this purpose, the created transaction is passed to the
2387 this logic. For this purpose, the created transaction is passed to the
2383 method.
2388 method.
2384 """
2389 """
2385 # we must avoid cyclic reference between repo and transaction.
2390 # we must avoid cyclic reference between repo and transaction.
2386 reporef = weakref.ref(self)
2391 reporef = weakref.ref(self)
2387
2392
2388 def updater(tr):
2393 def updater(tr):
2389 repo = reporef()
2394 repo = reporef()
2390 repo.updatecaches(tr)
2395 repo.updatecaches(tr)
2391
2396
2392 return updater
2397 return updater
2393
2398
2394 @unfilteredmethod
2399 @unfilteredmethod
2395 def updatecaches(self, tr=None, full=False):
2400 def updatecaches(self, tr=None, full=False):
2396 """warm appropriate caches
2401 """warm appropriate caches
2397
2402
2398 If this function is called after a transaction closed. The transaction
2403 If this function is called after a transaction closed. The transaction
2399 will be available in the 'tr' argument. This can be used to selectively
2404 will be available in the 'tr' argument. This can be used to selectively
2400 update caches relevant to the changes in that transaction.
2405 update caches relevant to the changes in that transaction.
2401
2406
2402 If 'full' is set, make sure all caches the function knows about have
2407 If 'full' is set, make sure all caches the function knows about have
2403 up-to-date data. Even the ones usually loaded more lazily.
2408 up-to-date data. Even the ones usually loaded more lazily.
2404 """
2409 """
2405 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2410 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2406 # During strip, many caches are invalid but
2411 # During strip, many caches are invalid but
2407 # later call to `destroyed` will refresh them.
2412 # later call to `destroyed` will refresh them.
2408 return
2413 return
2409
2414
2410 if tr is None or tr.changes[b'origrepolen'] < len(self):
2415 if tr is None or tr.changes[b'origrepolen'] < len(self):
2411 # accessing the 'ser ved' branchmap should refresh all the others,
2416 # accessing the 'ser ved' branchmap should refresh all the others,
2412 self.ui.debug(b'updating the branch cache\n')
2417 self.ui.debug(b'updating the branch cache\n')
2413 self.filtered(b'served').branchmap()
2418 self.filtered(b'served').branchmap()
2414 self.filtered(b'served.hidden').branchmap()
2419 self.filtered(b'served.hidden').branchmap()
2415
2420
2416 if full:
2421 if full:
2417 unfi = self.unfiltered()
2422 unfi = self.unfiltered()
2418 rbc = unfi.revbranchcache()
2423 rbc = unfi.revbranchcache()
2419 for r in unfi.changelog:
2424 for r in unfi.changelog:
2420 rbc.branchinfo(r)
2425 rbc.branchinfo(r)
2421 rbc.write()
2426 rbc.write()
2422
2427
2423 # ensure the working copy parents are in the manifestfulltextcache
2428 # ensure the working copy parents are in the manifestfulltextcache
2424 for ctx in self[b'.'].parents():
2429 for ctx in self[b'.'].parents():
2425 ctx.manifest() # accessing the manifest is enough
2430 ctx.manifest() # accessing the manifest is enough
2426
2431
2427 # accessing fnode cache warms the cache
2432 # accessing fnode cache warms the cache
2428 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2433 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2429 # accessing tags warm the cache
2434 # accessing tags warm the cache
2430 self.tags()
2435 self.tags()
2431 self.filtered(b'served').tags()
2436 self.filtered(b'served').tags()
2432
2437
2433 # The `full` arg is documented as updating even the lazily-loaded
2438 # The `full` arg is documented as updating even the lazily-loaded
2434 # caches immediately, so we're forcing a write to cause these caches
2439 # caches immediately, so we're forcing a write to cause these caches
2435 # to be warmed up even if they haven't explicitly been requested
2440 # to be warmed up even if they haven't explicitly been requested
2436 # yet (if they've never been used by hg, they won't ever have been
2441 # yet (if they've never been used by hg, they won't ever have been
2437 # written, even if they're a subset of another kind of cache that
2442 # written, even if they're a subset of another kind of cache that
2438 # *has* been used).
2443 # *has* been used).
2439 for filt in repoview.filtertable.keys():
2444 for filt in repoview.filtertable.keys():
2440 filtered = self.filtered(filt)
2445 filtered = self.filtered(filt)
2441 filtered.branchmap().write(filtered)
2446 filtered.branchmap().write(filtered)
2442
2447
2443 def invalidatecaches(self):
2448 def invalidatecaches(self):
2444
2449
2445 if r'_tagscache' in vars(self):
2450 if r'_tagscache' in vars(self):
2446 # can't use delattr on proxy
2451 # can't use delattr on proxy
2447 del self.__dict__[r'_tagscache']
2452 del self.__dict__[r'_tagscache']
2448
2453
2449 self._branchcaches.clear()
2454 self._branchcaches.clear()
2450 self.invalidatevolatilesets()
2455 self.invalidatevolatilesets()
2451 self._sparsesignaturecache.clear()
2456 self._sparsesignaturecache.clear()
2452
2457
2453 def invalidatevolatilesets(self):
2458 def invalidatevolatilesets(self):
2454 self.filteredrevcache.clear()
2459 self.filteredrevcache.clear()
2455 obsolete.clearobscaches(self)
2460 obsolete.clearobscaches(self)
2456
2461
2457 def invalidatedirstate(self):
2462 def invalidatedirstate(self):
2458 '''Invalidates the dirstate, causing the next call to dirstate
2463 '''Invalidates the dirstate, causing the next call to dirstate
2459 to check if it was modified since the last time it was read,
2464 to check if it was modified since the last time it was read,
2460 rereading it if it has.
2465 rereading it if it has.
2461
2466
2462 This is different to dirstate.invalidate() that it doesn't always
2467 This is different to dirstate.invalidate() that it doesn't always
2463 rereads the dirstate. Use dirstate.invalidate() if you want to
2468 rereads the dirstate. Use dirstate.invalidate() if you want to
2464 explicitly read the dirstate again (i.e. restoring it to a previous
2469 explicitly read the dirstate again (i.e. restoring it to a previous
2465 known good state).'''
2470 known good state).'''
2466 if hasunfilteredcache(self, r'dirstate'):
2471 if hasunfilteredcache(self, r'dirstate'):
2467 for k in self.dirstate._filecache:
2472 for k in self.dirstate._filecache:
2468 try:
2473 try:
2469 delattr(self.dirstate, k)
2474 delattr(self.dirstate, k)
2470 except AttributeError:
2475 except AttributeError:
2471 pass
2476 pass
2472 delattr(self.unfiltered(), r'dirstate')
2477 delattr(self.unfiltered(), r'dirstate')
2473
2478
2474 def invalidate(self, clearfilecache=False):
2479 def invalidate(self, clearfilecache=False):
2475 '''Invalidates both store and non-store parts other than dirstate
2480 '''Invalidates both store and non-store parts other than dirstate
2476
2481
2477 If a transaction is running, invalidation of store is omitted,
2482 If a transaction is running, invalidation of store is omitted,
2478 because discarding in-memory changes might cause inconsistency
2483 because discarding in-memory changes might cause inconsistency
2479 (e.g. incomplete fncache causes unintentional failure, but
2484 (e.g. incomplete fncache causes unintentional failure, but
2480 redundant one doesn't).
2485 redundant one doesn't).
2481 '''
2486 '''
2482 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2487 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2483 for k in list(self._filecache.keys()):
2488 for k in list(self._filecache.keys()):
2484 # dirstate is invalidated separately in invalidatedirstate()
2489 # dirstate is invalidated separately in invalidatedirstate()
2485 if k == b'dirstate':
2490 if k == b'dirstate':
2486 continue
2491 continue
2487 if (
2492 if (
2488 k == b'changelog'
2493 k == b'changelog'
2489 and self.currenttransaction()
2494 and self.currenttransaction()
2490 and self.changelog._delayed
2495 and self.changelog._delayed
2491 ):
2496 ):
2492 # The changelog object may store unwritten revisions. We don't
2497 # The changelog object may store unwritten revisions. We don't
2493 # want to lose them.
2498 # want to lose them.
2494 # TODO: Solve the problem instead of working around it.
2499 # TODO: Solve the problem instead of working around it.
2495 continue
2500 continue
2496
2501
2497 if clearfilecache:
2502 if clearfilecache:
2498 del self._filecache[k]
2503 del self._filecache[k]
2499 try:
2504 try:
2500 delattr(unfiltered, k)
2505 delattr(unfiltered, k)
2501 except AttributeError:
2506 except AttributeError:
2502 pass
2507 pass
2503 self.invalidatecaches()
2508 self.invalidatecaches()
2504 if not self.currenttransaction():
2509 if not self.currenttransaction():
2505 # TODO: Changing contents of store outside transaction
2510 # TODO: Changing contents of store outside transaction
2506 # causes inconsistency. We should make in-memory store
2511 # causes inconsistency. We should make in-memory store
2507 # changes detectable, and abort if changed.
2512 # changes detectable, and abort if changed.
2508 self.store.invalidatecaches()
2513 self.store.invalidatecaches()
2509
2514
2510 def invalidateall(self):
2515 def invalidateall(self):
2511 '''Fully invalidates both store and non-store parts, causing the
2516 '''Fully invalidates both store and non-store parts, causing the
2512 subsequent operation to reread any outside changes.'''
2517 subsequent operation to reread any outside changes.'''
2513 # extension should hook this to invalidate its caches
2518 # extension should hook this to invalidate its caches
2514 self.invalidate()
2519 self.invalidate()
2515 self.invalidatedirstate()
2520 self.invalidatedirstate()
2516
2521
2517 @unfilteredmethod
2522 @unfilteredmethod
2518 def _refreshfilecachestats(self, tr):
2523 def _refreshfilecachestats(self, tr):
2519 """Reload stats of cached files so that they are flagged as valid"""
2524 """Reload stats of cached files so that they are flagged as valid"""
2520 for k, ce in self._filecache.items():
2525 for k, ce in self._filecache.items():
2521 k = pycompat.sysstr(k)
2526 k = pycompat.sysstr(k)
2522 if k == r'dirstate' or k not in self.__dict__:
2527 if k == r'dirstate' or k not in self.__dict__:
2523 continue
2528 continue
2524 ce.refresh()
2529 ce.refresh()
2525
2530
2526 def _lock(
2531 def _lock(
2527 self,
2532 self,
2528 vfs,
2533 vfs,
2529 lockname,
2534 lockname,
2530 wait,
2535 wait,
2531 releasefn,
2536 releasefn,
2532 acquirefn,
2537 acquirefn,
2533 desc,
2538 desc,
2534 inheritchecker=None,
2539 inheritchecker=None,
2535 parentenvvar=None,
2540 parentenvvar=None,
2536 ):
2541 ):
2537 parentlock = None
2542 parentlock = None
2538 # the contents of parentenvvar are used by the underlying lock to
2543 # the contents of parentenvvar are used by the underlying lock to
2539 # determine whether it can be inherited
2544 # determine whether it can be inherited
2540 if parentenvvar is not None:
2545 if parentenvvar is not None:
2541 parentlock = encoding.environ.get(parentenvvar)
2546 parentlock = encoding.environ.get(parentenvvar)
2542
2547
2543 timeout = 0
2548 timeout = 0
2544 warntimeout = 0
2549 warntimeout = 0
2545 if wait:
2550 if wait:
2546 timeout = self.ui.configint(b"ui", b"timeout")
2551 timeout = self.ui.configint(b"ui", b"timeout")
2547 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2552 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2548 # internal config: ui.signal-safe-lock
2553 # internal config: ui.signal-safe-lock
2549 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2554 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2550
2555
2551 l = lockmod.trylock(
2556 l = lockmod.trylock(
2552 self.ui,
2557 self.ui,
2553 vfs,
2558 vfs,
2554 lockname,
2559 lockname,
2555 timeout,
2560 timeout,
2556 warntimeout,
2561 warntimeout,
2557 releasefn=releasefn,
2562 releasefn=releasefn,
2558 acquirefn=acquirefn,
2563 acquirefn=acquirefn,
2559 desc=desc,
2564 desc=desc,
2560 inheritchecker=inheritchecker,
2565 inheritchecker=inheritchecker,
2561 parentlock=parentlock,
2566 parentlock=parentlock,
2562 signalsafe=signalsafe,
2567 signalsafe=signalsafe,
2563 )
2568 )
2564 return l
2569 return l
2565
2570
2566 def _afterlock(self, callback):
2571 def _afterlock(self, callback):
2567 """add a callback to be run when the repository is fully unlocked
2572 """add a callback to be run when the repository is fully unlocked
2568
2573
2569 The callback will be executed when the outermost lock is released
2574 The callback will be executed when the outermost lock is released
2570 (with wlock being higher level than 'lock')."""
2575 (with wlock being higher level than 'lock')."""
2571 for ref in (self._wlockref, self._lockref):
2576 for ref in (self._wlockref, self._lockref):
2572 l = ref and ref()
2577 l = ref and ref()
2573 if l and l.held:
2578 if l and l.held:
2574 l.postrelease.append(callback)
2579 l.postrelease.append(callback)
2575 break
2580 break
2576 else: # no lock have been found.
2581 else: # no lock have been found.
2577 callback()
2582 callback()
2578
2583
2579 def lock(self, wait=True):
2584 def lock(self, wait=True):
2580 '''Lock the repository store (.hg/store) and return a weak reference
2585 '''Lock the repository store (.hg/store) and return a weak reference
2581 to the lock. Use this before modifying the store (e.g. committing or
2586 to the lock. Use this before modifying the store (e.g. committing or
2582 stripping). If you are opening a transaction, get a lock as well.)
2587 stripping). If you are opening a transaction, get a lock as well.)
2583
2588
2584 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2589 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2585 'wlock' first to avoid a dead-lock hazard.'''
2590 'wlock' first to avoid a dead-lock hazard.'''
2586 l = self._currentlock(self._lockref)
2591 l = self._currentlock(self._lockref)
2587 if l is not None:
2592 if l is not None:
2588 l.lock()
2593 l.lock()
2589 return l
2594 return l
2590
2595
2591 l = self._lock(
2596 l = self._lock(
2592 vfs=self.svfs,
2597 vfs=self.svfs,
2593 lockname=b"lock",
2598 lockname=b"lock",
2594 wait=wait,
2599 wait=wait,
2595 releasefn=None,
2600 releasefn=None,
2596 acquirefn=self.invalidate,
2601 acquirefn=self.invalidate,
2597 desc=_(b'repository %s') % self.origroot,
2602 desc=_(b'repository %s') % self.origroot,
2598 )
2603 )
2599 self._lockref = weakref.ref(l)
2604 self._lockref = weakref.ref(l)
2600 return l
2605 return l
2601
2606
2602 def _wlockchecktransaction(self):
2607 def _wlockchecktransaction(self):
2603 if self.currenttransaction() is not None:
2608 if self.currenttransaction() is not None:
2604 raise error.LockInheritanceContractViolation(
2609 raise error.LockInheritanceContractViolation(
2605 b'wlock cannot be inherited in the middle of a transaction'
2610 b'wlock cannot be inherited in the middle of a transaction'
2606 )
2611 )
2607
2612
2608 def wlock(self, wait=True):
2613 def wlock(self, wait=True):
2609 '''Lock the non-store parts of the repository (everything under
2614 '''Lock the non-store parts of the repository (everything under
2610 .hg except .hg/store) and return a weak reference to the lock.
2615 .hg except .hg/store) and return a weak reference to the lock.
2611
2616
2612 Use this before modifying files in .hg.
2617 Use this before modifying files in .hg.
2613
2618
2614 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2619 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2615 'wlock' first to avoid a dead-lock hazard.'''
2620 'wlock' first to avoid a dead-lock hazard.'''
2616 l = self._wlockref and self._wlockref()
2621 l = self._wlockref and self._wlockref()
2617 if l is not None and l.held:
2622 if l is not None and l.held:
2618 l.lock()
2623 l.lock()
2619 return l
2624 return l
2620
2625
2621 # We do not need to check for non-waiting lock acquisition. Such
2626 # We do not need to check for non-waiting lock acquisition. Such
2622 # acquisition would not cause dead-lock as they would just fail.
2627 # acquisition would not cause dead-lock as they would just fail.
2623 if wait and (
2628 if wait and (
2624 self.ui.configbool(b'devel', b'all-warnings')
2629 self.ui.configbool(b'devel', b'all-warnings')
2625 or self.ui.configbool(b'devel', b'check-locks')
2630 or self.ui.configbool(b'devel', b'check-locks')
2626 ):
2631 ):
2627 if self._currentlock(self._lockref) is not None:
2632 if self._currentlock(self._lockref) is not None:
2628 self.ui.develwarn(b'"wlock" acquired after "lock"')
2633 self.ui.develwarn(b'"wlock" acquired after "lock"')
2629
2634
2630 def unlock():
2635 def unlock():
2631 if self.dirstate.pendingparentchange():
2636 if self.dirstate.pendingparentchange():
2632 self.dirstate.invalidate()
2637 self.dirstate.invalidate()
2633 else:
2638 else:
2634 self.dirstate.write(None)
2639 self.dirstate.write(None)
2635
2640
2636 self._filecache[b'dirstate'].refresh()
2641 self._filecache[b'dirstate'].refresh()
2637
2642
2638 l = self._lock(
2643 l = self._lock(
2639 self.vfs,
2644 self.vfs,
2640 b"wlock",
2645 b"wlock",
2641 wait,
2646 wait,
2642 unlock,
2647 unlock,
2643 self.invalidatedirstate,
2648 self.invalidatedirstate,
2644 _(b'working directory of %s') % self.origroot,
2649 _(b'working directory of %s') % self.origroot,
2645 inheritchecker=self._wlockchecktransaction,
2650 inheritchecker=self._wlockchecktransaction,
2646 parentenvvar=b'HG_WLOCK_LOCKER',
2651 parentenvvar=b'HG_WLOCK_LOCKER',
2647 )
2652 )
2648 self._wlockref = weakref.ref(l)
2653 self._wlockref = weakref.ref(l)
2649 return l
2654 return l
2650
2655
2651 def _currentlock(self, lockref):
2656 def _currentlock(self, lockref):
2652 """Returns the lock if it's held, or None if it's not."""
2657 """Returns the lock if it's held, or None if it's not."""
2653 if lockref is None:
2658 if lockref is None:
2654 return None
2659 return None
2655 l = lockref()
2660 l = lockref()
2656 if l is None or not l.held:
2661 if l is None or not l.held:
2657 return None
2662 return None
2658 return l
2663 return l
2659
2664
2660 def currentwlock(self):
2665 def currentwlock(self):
2661 """Returns the wlock if it's held, or None if it's not."""
2666 """Returns the wlock if it's held, or None if it's not."""
2662 return self._currentlock(self._wlockref)
2667 return self._currentlock(self._wlockref)
2663
2668
2664 def _filecommit(
2669 def _filecommit(
2665 self,
2670 self,
2666 fctx,
2671 fctx,
2667 manifest1,
2672 manifest1,
2668 manifest2,
2673 manifest2,
2669 linkrev,
2674 linkrev,
2670 tr,
2675 tr,
2671 changelist,
2676 changelist,
2672 includecopymeta,
2677 includecopymeta,
2673 ):
2678 ):
2674 """
2679 """
2675 commit an individual file as part of a larger transaction
2680 commit an individual file as part of a larger transaction
2676 """
2681 """
2677
2682
2678 fname = fctx.path()
2683 fname = fctx.path()
2679 fparent1 = manifest1.get(fname, nullid)
2684 fparent1 = manifest1.get(fname, nullid)
2680 fparent2 = manifest2.get(fname, nullid)
2685 fparent2 = manifest2.get(fname, nullid)
2681 if isinstance(fctx, context.filectx):
2686 if isinstance(fctx, context.filectx):
2682 node = fctx.filenode()
2687 node = fctx.filenode()
2683 if node in [fparent1, fparent2]:
2688 if node in [fparent1, fparent2]:
2684 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2689 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2685 if (
2690 if (
2686 fparent1 != nullid
2691 fparent1 != nullid
2687 and manifest1.flags(fname) != fctx.flags()
2692 and manifest1.flags(fname) != fctx.flags()
2688 ) or (
2693 ) or (
2689 fparent2 != nullid
2694 fparent2 != nullid
2690 and manifest2.flags(fname) != fctx.flags()
2695 and manifest2.flags(fname) != fctx.flags()
2691 ):
2696 ):
2692 changelist.append(fname)
2697 changelist.append(fname)
2693 return node
2698 return node
2694
2699
2695 flog = self.file(fname)
2700 flog = self.file(fname)
2696 meta = {}
2701 meta = {}
2697 cfname = fctx.copysource()
2702 cfname = fctx.copysource()
2698 if cfname and cfname != fname:
2703 if cfname and cfname != fname:
2699 # Mark the new revision of this file as a copy of another
2704 # Mark the new revision of this file as a copy of another
2700 # file. This copy data will effectively act as a parent
2705 # file. This copy data will effectively act as a parent
2701 # of this new revision. If this is a merge, the first
2706 # of this new revision. If this is a merge, the first
2702 # parent will be the nullid (meaning "look up the copy data")
2707 # parent will be the nullid (meaning "look up the copy data")
2703 # and the second one will be the other parent. For example:
2708 # and the second one will be the other parent. For example:
2704 #
2709 #
2705 # 0 --- 1 --- 3 rev1 changes file foo
2710 # 0 --- 1 --- 3 rev1 changes file foo
2706 # \ / rev2 renames foo to bar and changes it
2711 # \ / rev2 renames foo to bar and changes it
2707 # \- 2 -/ rev3 should have bar with all changes and
2712 # \- 2 -/ rev3 should have bar with all changes and
2708 # should record that bar descends from
2713 # should record that bar descends from
2709 # bar in rev2 and foo in rev1
2714 # bar in rev2 and foo in rev1
2710 #
2715 #
2711 # this allows this merge to succeed:
2716 # this allows this merge to succeed:
2712 #
2717 #
2713 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2718 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2714 # \ / merging rev3 and rev4 should use bar@rev2
2719 # \ / merging rev3 and rev4 should use bar@rev2
2715 # \- 2 --- 4 as the merge base
2720 # \- 2 --- 4 as the merge base
2716 #
2721 #
2717
2722
2718 cnode = manifest1.get(cfname)
2723 cnode = manifest1.get(cfname)
2719 newfparent = fparent2
2724 newfparent = fparent2
2720
2725
2721 if manifest2: # branch merge
2726 if manifest2: # branch merge
2722 if fparent2 == nullid or cnode is None: # copied on remote side
2727 if fparent2 == nullid or cnode is None: # copied on remote side
2723 if cfname in manifest2:
2728 if cfname in manifest2:
2724 cnode = manifest2[cfname]
2729 cnode = manifest2[cfname]
2725 newfparent = fparent1
2730 newfparent = fparent1
2726
2731
2727 # Here, we used to search backwards through history to try to find
2732 # Here, we used to search backwards through history to try to find
2728 # where the file copy came from if the source of a copy was not in
2733 # where the file copy came from if the source of a copy was not in
2729 # the parent directory. However, this doesn't actually make sense to
2734 # the parent directory. However, this doesn't actually make sense to
2730 # do (what does a copy from something not in your working copy even
2735 # do (what does a copy from something not in your working copy even
2731 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2736 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2732 # the user that copy information was dropped, so if they didn't
2737 # the user that copy information was dropped, so if they didn't
2733 # expect this outcome it can be fixed, but this is the correct
2738 # expect this outcome it can be fixed, but this is the correct
2734 # behavior in this circumstance.
2739 # behavior in this circumstance.
2735
2740
2736 if cnode:
2741 if cnode:
2737 self.ui.debug(
2742 self.ui.debug(
2738 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2743 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2739 )
2744 )
2740 if includecopymeta:
2745 if includecopymeta:
2741 meta[b"copy"] = cfname
2746 meta[b"copy"] = cfname
2742 meta[b"copyrev"] = hex(cnode)
2747 meta[b"copyrev"] = hex(cnode)
2743 fparent1, fparent2 = nullid, newfparent
2748 fparent1, fparent2 = nullid, newfparent
2744 else:
2749 else:
2745 self.ui.warn(
2750 self.ui.warn(
2746 _(
2751 _(
2747 b"warning: can't find ancestor for '%s' "
2752 b"warning: can't find ancestor for '%s' "
2748 b"copied from '%s'!\n"
2753 b"copied from '%s'!\n"
2749 )
2754 )
2750 % (fname, cfname)
2755 % (fname, cfname)
2751 )
2756 )
2752
2757
2753 elif fparent1 == nullid:
2758 elif fparent1 == nullid:
2754 fparent1, fparent2 = fparent2, nullid
2759 fparent1, fparent2 = fparent2, nullid
2755 elif fparent2 != nullid:
2760 elif fparent2 != nullid:
2756 # is one parent an ancestor of the other?
2761 # is one parent an ancestor of the other?
2757 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2762 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2758 if fparent1 in fparentancestors:
2763 if fparent1 in fparentancestors:
2759 fparent1, fparent2 = fparent2, nullid
2764 fparent1, fparent2 = fparent2, nullid
2760 elif fparent2 in fparentancestors:
2765 elif fparent2 in fparentancestors:
2761 fparent2 = nullid
2766 fparent2 = nullid
2762
2767
2763 # is the file changed?
2768 # is the file changed?
2764 text = fctx.data()
2769 text = fctx.data()
2765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2770 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2766 changelist.append(fname)
2771 changelist.append(fname)
2767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2772 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2768 # are just the flags changed during merge?
2773 # are just the flags changed during merge?
2769 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2774 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2770 changelist.append(fname)
2775 changelist.append(fname)
2771
2776
2772 return fparent1
2777 return fparent1
2773
2778
2774 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2779 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2775 """check for commit arguments that aren't committable"""
2780 """check for commit arguments that aren't committable"""
2776 if match.isexact() or match.prefix():
2781 if match.isexact() or match.prefix():
2777 matched = set(status.modified + status.added + status.removed)
2782 matched = set(status.modified + status.added + status.removed)
2778
2783
2779 for f in match.files():
2784 for f in match.files():
2780 f = self.dirstate.normalize(f)
2785 f = self.dirstate.normalize(f)
2781 if f == b'.' or f in matched or f in wctx.substate:
2786 if f == b'.' or f in matched or f in wctx.substate:
2782 continue
2787 continue
2783 if f in status.deleted:
2788 if f in status.deleted:
2784 fail(f, _(b'file not found!'))
2789 fail(f, _(b'file not found!'))
2785 if f in vdirs: # visited directory
2790 if f in vdirs: # visited directory
2786 d = f + b'/'
2791 d = f + b'/'
2787 for mf in matched:
2792 for mf in matched:
2788 if mf.startswith(d):
2793 if mf.startswith(d):
2789 break
2794 break
2790 else:
2795 else:
2791 fail(f, _(b"no match under directory!"))
2796 fail(f, _(b"no match under directory!"))
2792 elif f not in self.dirstate:
2797 elif f not in self.dirstate:
2793 fail(f, _(b"file not tracked!"))
2798 fail(f, _(b"file not tracked!"))
2794
2799
2795 @unfilteredmethod
2800 @unfilteredmethod
2796 def commit(
2801 def commit(
2797 self,
2802 self,
2798 text=b"",
2803 text=b"",
2799 user=None,
2804 user=None,
2800 date=None,
2805 date=None,
2801 match=None,
2806 match=None,
2802 force=False,
2807 force=False,
2803 editor=False,
2808 editor=False,
2804 extra=None,
2809 extra=None,
2805 ):
2810 ):
2806 """Add a new revision to current repository.
2811 """Add a new revision to current repository.
2807
2812
2808 Revision information is gathered from the working directory,
2813 Revision information is gathered from the working directory,
2809 match can be used to filter the committed files. If editor is
2814 match can be used to filter the committed files. If editor is
2810 supplied, it is called to get a commit message.
2815 supplied, it is called to get a commit message.
2811 """
2816 """
2812 if extra is None:
2817 if extra is None:
2813 extra = {}
2818 extra = {}
2814
2819
2815 def fail(f, msg):
2820 def fail(f, msg):
2816 raise error.Abort(b'%s: %s' % (f, msg))
2821 raise error.Abort(b'%s: %s' % (f, msg))
2817
2822
2818 if not match:
2823 if not match:
2819 match = matchmod.always()
2824 match = matchmod.always()
2820
2825
2821 if not force:
2826 if not force:
2822 vdirs = []
2827 vdirs = []
2823 match.explicitdir = vdirs.append
2828 match.explicitdir = vdirs.append
2824 match.bad = fail
2829 match.bad = fail
2825
2830
2826 # lock() for recent changelog (see issue4368)
2831 # lock() for recent changelog (see issue4368)
2827 with self.wlock(), self.lock():
2832 with self.wlock(), self.lock():
2828 wctx = self[None]
2833 wctx = self[None]
2829 merge = len(wctx.parents()) > 1
2834 merge = len(wctx.parents()) > 1
2830
2835
2831 if not force and merge and not match.always():
2836 if not force and merge and not match.always():
2832 raise error.Abort(
2837 raise error.Abort(
2833 _(
2838 _(
2834 b'cannot partially commit a merge '
2839 b'cannot partially commit a merge '
2835 b'(do not specify files or patterns)'
2840 b'(do not specify files or patterns)'
2836 )
2841 )
2837 )
2842 )
2838
2843
2839 status = self.status(match=match, clean=force)
2844 status = self.status(match=match, clean=force)
2840 if force:
2845 if force:
2841 status.modified.extend(
2846 status.modified.extend(
2842 status.clean
2847 status.clean
2843 ) # mq may commit clean files
2848 ) # mq may commit clean files
2844
2849
2845 # check subrepos
2850 # check subrepos
2846 subs, commitsubs, newstate = subrepoutil.precommit(
2851 subs, commitsubs, newstate = subrepoutil.precommit(
2847 self.ui, wctx, status, match, force=force
2852 self.ui, wctx, status, match, force=force
2848 )
2853 )
2849
2854
2850 # make sure all explicit patterns are matched
2855 # make sure all explicit patterns are matched
2851 if not force:
2856 if not force:
2852 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2857 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2853
2858
2854 cctx = context.workingcommitctx(
2859 cctx = context.workingcommitctx(
2855 self, status, text, user, date, extra
2860 self, status, text, user, date, extra
2856 )
2861 )
2857
2862
2858 # internal config: ui.allowemptycommit
2863 # internal config: ui.allowemptycommit
2859 allowemptycommit = (
2864 allowemptycommit = (
2860 wctx.branch() != wctx.p1().branch()
2865 wctx.branch() != wctx.p1().branch()
2861 or extra.get(b'close')
2866 or extra.get(b'close')
2862 or merge
2867 or merge
2863 or cctx.files()
2868 or cctx.files()
2864 or self.ui.configbool(b'ui', b'allowemptycommit')
2869 or self.ui.configbool(b'ui', b'allowemptycommit')
2865 )
2870 )
2866 if not allowemptycommit:
2871 if not allowemptycommit:
2867 return None
2872 return None
2868
2873
2869 if merge and cctx.deleted():
2874 if merge and cctx.deleted():
2870 raise error.Abort(_(b"cannot commit merge with missing files"))
2875 raise error.Abort(_(b"cannot commit merge with missing files"))
2871
2876
2872 ms = mergemod.mergestate.read(self)
2877 ms = mergemod.mergestate.read(self)
2873 mergeutil.checkunresolved(ms)
2878 mergeutil.checkunresolved(ms)
2874
2879
2875 if editor:
2880 if editor:
2876 cctx._text = editor(self, cctx, subs)
2881 cctx._text = editor(self, cctx, subs)
2877 edited = text != cctx._text
2882 edited = text != cctx._text
2878
2883
2879 # Save commit message in case this transaction gets rolled back
2884 # Save commit message in case this transaction gets rolled back
2880 # (e.g. by a pretxncommit hook). Leave the content alone on
2885 # (e.g. by a pretxncommit hook). Leave the content alone on
2881 # the assumption that the user will use the same editor again.
2886 # the assumption that the user will use the same editor again.
2882 msgfn = self.savecommitmessage(cctx._text)
2887 msgfn = self.savecommitmessage(cctx._text)
2883
2888
2884 # commit subs and write new state
2889 # commit subs and write new state
2885 if subs:
2890 if subs:
2886 uipathfn = scmutil.getuipathfn(self)
2891 uipathfn = scmutil.getuipathfn(self)
2887 for s in sorted(commitsubs):
2892 for s in sorted(commitsubs):
2888 sub = wctx.sub(s)
2893 sub = wctx.sub(s)
2889 self.ui.status(
2894 self.ui.status(
2890 _(b'committing subrepository %s\n')
2895 _(b'committing subrepository %s\n')
2891 % uipathfn(subrepoutil.subrelpath(sub))
2896 % uipathfn(subrepoutil.subrelpath(sub))
2892 )
2897 )
2893 sr = sub.commit(cctx._text, user, date)
2898 sr = sub.commit(cctx._text, user, date)
2894 newstate[s] = (newstate[s][0], sr)
2899 newstate[s] = (newstate[s][0], sr)
2895 subrepoutil.writestate(self, newstate)
2900 subrepoutil.writestate(self, newstate)
2896
2901
2897 p1, p2 = self.dirstate.parents()
2902 p1, p2 = self.dirstate.parents()
2898 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2903 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2899 try:
2904 try:
2900 self.hook(
2905 self.hook(
2901 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2906 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2902 )
2907 )
2903 with self.transaction(b'commit'):
2908 with self.transaction(b'commit'):
2904 ret = self.commitctx(cctx, True)
2909 ret = self.commitctx(cctx, True)
2905 # update bookmarks, dirstate and mergestate
2910 # update bookmarks, dirstate and mergestate
2906 bookmarks.update(self, [p1, p2], ret)
2911 bookmarks.update(self, [p1, p2], ret)
2907 cctx.markcommitted(ret)
2912 cctx.markcommitted(ret)
2908 ms.reset()
2913 ms.reset()
2909 except: # re-raises
2914 except: # re-raises
2910 if edited:
2915 if edited:
2911 self.ui.write(
2916 self.ui.write(
2912 _(b'note: commit message saved in %s\n') % msgfn
2917 _(b'note: commit message saved in %s\n') % msgfn
2913 )
2918 )
2914 raise
2919 raise
2915
2920
2916 def commithook():
2921 def commithook():
2917 # hack for command that use a temporary commit (eg: histedit)
2922 # hack for command that use a temporary commit (eg: histedit)
2918 # temporary commit got stripped before hook release
2923 # temporary commit got stripped before hook release
2919 if self.changelog.hasnode(ret):
2924 if self.changelog.hasnode(ret):
2920 self.hook(
2925 self.hook(
2921 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2926 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2922 )
2927 )
2923
2928
2924 self._afterlock(commithook)
2929 self._afterlock(commithook)
2925 return ret
2930 return ret
2926
2931
2927 @unfilteredmethod
2932 @unfilteredmethod
2928 def commitctx(self, ctx, error=False, origctx=None):
2933 def commitctx(self, ctx, error=False, origctx=None):
2929 """Add a new revision to current repository.
2934 """Add a new revision to current repository.
2930 Revision information is passed via the context argument.
2935 Revision information is passed via the context argument.
2931
2936
2932 ctx.files() should list all files involved in this commit, i.e.
2937 ctx.files() should list all files involved in this commit, i.e.
2933 modified/added/removed files. On merge, it may be wider than the
2938 modified/added/removed files. On merge, it may be wider than the
2934 ctx.files() to be committed, since any file nodes derived directly
2939 ctx.files() to be committed, since any file nodes derived directly
2935 from p1 or p2 are excluded from the committed ctx.files().
2940 from p1 or p2 are excluded from the committed ctx.files().
2936
2941
2937 origctx is for convert to work around the problem that bug
2942 origctx is for convert to work around the problem that bug
2938 fixes to the files list in changesets change hashes. For
2943 fixes to the files list in changesets change hashes. For
2939 convert to be the identity, it can pass an origctx and this
2944 convert to be the identity, it can pass an origctx and this
2940 function will use the same files list when it makes sense to
2945 function will use the same files list when it makes sense to
2941 do so.
2946 do so.
2942 """
2947 """
2943
2948
2944 p1, p2 = ctx.p1(), ctx.p2()
2949 p1, p2 = ctx.p1(), ctx.p2()
2945 user = ctx.user()
2950 user = ctx.user()
2946
2951
2947 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2952 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2948 writefilecopymeta = writecopiesto != b'changeset-only'
2953 writefilecopymeta = writecopiesto != b'changeset-only'
2949 writechangesetcopy = writecopiesto in (
2954 writechangesetcopy = writecopiesto in (
2950 b'changeset-only',
2955 b'changeset-only',
2951 b'compatibility',
2956 b'compatibility',
2952 )
2957 )
2953 p1copies, p2copies = None, None
2958 p1copies, p2copies = None, None
2954 if writechangesetcopy:
2959 if writechangesetcopy:
2955 p1copies = ctx.p1copies()
2960 p1copies = ctx.p1copies()
2956 p2copies = ctx.p2copies()
2961 p2copies = ctx.p2copies()
2957 filesadded, filesremoved = None, None
2962 filesadded, filesremoved = None, None
2958 with self.lock(), self.transaction(b"commit") as tr:
2963 with self.lock(), self.transaction(b"commit") as tr:
2959 trp = weakref.proxy(tr)
2964 trp = weakref.proxy(tr)
2960
2965
2961 if ctx.manifestnode():
2966 if ctx.manifestnode():
2962 # reuse an existing manifest revision
2967 # reuse an existing manifest revision
2963 self.ui.debug(b'reusing known manifest\n')
2968 self.ui.debug(b'reusing known manifest\n')
2964 mn = ctx.manifestnode()
2969 mn = ctx.manifestnode()
2965 files = ctx.files()
2970 files = ctx.files()
2966 if writechangesetcopy:
2971 if writechangesetcopy:
2967 filesadded = ctx.filesadded()
2972 filesadded = ctx.filesadded()
2968 filesremoved = ctx.filesremoved()
2973 filesremoved = ctx.filesremoved()
2969 elif ctx.files():
2974 elif ctx.files():
2970 m1ctx = p1.manifestctx()
2975 m1ctx = p1.manifestctx()
2971 m2ctx = p2.manifestctx()
2976 m2ctx = p2.manifestctx()
2972 mctx = m1ctx.copy()
2977 mctx = m1ctx.copy()
2973
2978
2974 m = mctx.read()
2979 m = mctx.read()
2975 m1 = m1ctx.read()
2980 m1 = m1ctx.read()
2976 m2 = m2ctx.read()
2981 m2 = m2ctx.read()
2977
2982
2978 # check in files
2983 # check in files
2979 added = []
2984 added = []
2980 changed = []
2985 changed = []
2981 removed = list(ctx.removed())
2986 removed = list(ctx.removed())
2982 linkrev = len(self)
2987 linkrev = len(self)
2983 self.ui.note(_(b"committing files:\n"))
2988 self.ui.note(_(b"committing files:\n"))
2984 uipathfn = scmutil.getuipathfn(self)
2989 uipathfn = scmutil.getuipathfn(self)
2985 for f in sorted(ctx.modified() + ctx.added()):
2990 for f in sorted(ctx.modified() + ctx.added()):
2986 self.ui.note(uipathfn(f) + b"\n")
2991 self.ui.note(uipathfn(f) + b"\n")
2987 try:
2992 try:
2988 fctx = ctx[f]
2993 fctx = ctx[f]
2989 if fctx is None:
2994 if fctx is None:
2990 removed.append(f)
2995 removed.append(f)
2991 else:
2996 else:
2992 added.append(f)
2997 added.append(f)
2993 m[f] = self._filecommit(
2998 m[f] = self._filecommit(
2994 fctx,
2999 fctx,
2995 m1,
3000 m1,
2996 m2,
3001 m2,
2997 linkrev,
3002 linkrev,
2998 trp,
3003 trp,
2999 changed,
3004 changed,
3000 writefilecopymeta,
3005 writefilecopymeta,
3001 )
3006 )
3002 m.setflag(f, fctx.flags())
3007 m.setflag(f, fctx.flags())
3003 except OSError:
3008 except OSError:
3004 self.ui.warn(
3009 self.ui.warn(
3005 _(b"trouble committing %s!\n") % uipathfn(f)
3010 _(b"trouble committing %s!\n") % uipathfn(f)
3006 )
3011 )
3007 raise
3012 raise
3008 except IOError as inst:
3013 except IOError as inst:
3009 errcode = getattr(inst, 'errno', errno.ENOENT)
3014 errcode = getattr(inst, 'errno', errno.ENOENT)
3010 if error or errcode and errcode != errno.ENOENT:
3015 if error or errcode and errcode != errno.ENOENT:
3011 self.ui.warn(
3016 self.ui.warn(
3012 _(b"trouble committing %s!\n") % uipathfn(f)
3017 _(b"trouble committing %s!\n") % uipathfn(f)
3013 )
3018 )
3014 raise
3019 raise
3015
3020
3016 # update manifest
3021 # update manifest
3017 removed = [f for f in removed if f in m1 or f in m2]
3022 removed = [f for f in removed if f in m1 or f in m2]
3018 drop = sorted([f for f in removed if f in m])
3023 drop = sorted([f for f in removed if f in m])
3019 for f in drop:
3024 for f in drop:
3020 del m[f]
3025 del m[f]
3021 if p2.rev() != nullrev:
3026 if p2.rev() != nullrev:
3022
3027
3023 @util.cachefunc
3028 @util.cachefunc
3024 def mas():
3029 def mas():
3025 p1n = p1.node()
3030 p1n = p1.node()
3026 p2n = p2.node()
3031 p2n = p2.node()
3027 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3032 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3028 if not cahs:
3033 if not cahs:
3029 cahs = [nullrev]
3034 cahs = [nullrev]
3030 return [self[r].manifest() for r in cahs]
3035 return [self[r].manifest() for r in cahs]
3031
3036
3032 def deletionfromparent(f):
3037 def deletionfromparent(f):
3033 # When a file is removed relative to p1 in a merge, this
3038 # When a file is removed relative to p1 in a merge, this
3034 # function determines whether the absence is due to a
3039 # function determines whether the absence is due to a
3035 # deletion from a parent, or whether the merge commit
3040 # deletion from a parent, or whether the merge commit
3036 # itself deletes the file. We decide this by doing a
3041 # itself deletes the file. We decide this by doing a
3037 # simplified three way merge of the manifest entry for
3042 # simplified three way merge of the manifest entry for
3038 # the file. There are two ways we decide the merge
3043 # the file. There are two ways we decide the merge
3039 # itself didn't delete a file:
3044 # itself didn't delete a file:
3040 # - neither parent (nor the merge) contain the file
3045 # - neither parent (nor the merge) contain the file
3041 # - exactly one parent contains the file, and that
3046 # - exactly one parent contains the file, and that
3042 # parent has the same filelog entry as the merge
3047 # parent has the same filelog entry as the merge
3043 # ancestor (or all of them if there two). In other
3048 # ancestor (or all of them if there two). In other
3044 # words, that parent left the file unchanged while the
3049 # words, that parent left the file unchanged while the
3045 # other one deleted it.
3050 # other one deleted it.
3046 # One way to think about this is that deleting a file is
3051 # One way to think about this is that deleting a file is
3047 # similar to emptying it, so the list of changed files
3052 # similar to emptying it, so the list of changed files
3048 # should be similar either way. The computation
3053 # should be similar either way. The computation
3049 # described above is not done directly in _filecommit
3054 # described above is not done directly in _filecommit
3050 # when creating the list of changed files, however
3055 # when creating the list of changed files, however
3051 # it does something very similar by comparing filelog
3056 # it does something very similar by comparing filelog
3052 # nodes.
3057 # nodes.
3053 if f in m1:
3058 if f in m1:
3054 return f not in m2 and all(
3059 return f not in m2 and all(
3055 f in ma and ma.find(f) == m1.find(f)
3060 f in ma and ma.find(f) == m1.find(f)
3056 for ma in mas()
3061 for ma in mas()
3057 )
3062 )
3058 elif f in m2:
3063 elif f in m2:
3059 return all(
3064 return all(
3060 f in ma and ma.find(f) == m2.find(f)
3065 f in ma and ma.find(f) == m2.find(f)
3061 for ma in mas()
3066 for ma in mas()
3062 )
3067 )
3063 else:
3068 else:
3064 return True
3069 return True
3065
3070
3066 removed = [f for f in removed if not deletionfromparent(f)]
3071 removed = [f for f in removed if not deletionfromparent(f)]
3067
3072
3068 files = changed + removed
3073 files = changed + removed
3069 md = None
3074 md = None
3070 if not files:
3075 if not files:
3071 # if no "files" actually changed in terms of the changelog,
3076 # if no "files" actually changed in terms of the changelog,
3072 # try hard to detect unmodified manifest entry so that the
3077 # try hard to detect unmodified manifest entry so that the
3073 # exact same commit can be reproduced later on convert.
3078 # exact same commit can be reproduced later on convert.
3074 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3079 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3075 if not files and md:
3080 if not files and md:
3076 self.ui.debug(
3081 self.ui.debug(
3077 b'not reusing manifest (no file change in '
3082 b'not reusing manifest (no file change in '
3078 b'changelog, but manifest differs)\n'
3083 b'changelog, but manifest differs)\n'
3079 )
3084 )
3080 if files or md:
3085 if files or md:
3081 self.ui.note(_(b"committing manifest\n"))
3086 self.ui.note(_(b"committing manifest\n"))
3082 # we're using narrowmatch here since it's already applied at
3087 # we're using narrowmatch here since it's already applied at
3083 # other stages (such as dirstate.walk), so we're already
3088 # other stages (such as dirstate.walk), so we're already
3084 # ignoring things outside of narrowspec in most cases. The
3089 # ignoring things outside of narrowspec in most cases. The
3085 # one case where we might have files outside the narrowspec
3090 # one case where we might have files outside the narrowspec
3086 # at this point is merges, and we already error out in the
3091 # at this point is merges, and we already error out in the
3087 # case where the merge has files outside of the narrowspec,
3092 # case where the merge has files outside of the narrowspec,
3088 # so this is safe.
3093 # so this is safe.
3089 mn = mctx.write(
3094 mn = mctx.write(
3090 trp,
3095 trp,
3091 linkrev,
3096 linkrev,
3092 p1.manifestnode(),
3097 p1.manifestnode(),
3093 p2.manifestnode(),
3098 p2.manifestnode(),
3094 added,
3099 added,
3095 drop,
3100 drop,
3096 match=self.narrowmatch(),
3101 match=self.narrowmatch(),
3097 )
3102 )
3098
3103
3099 if writechangesetcopy:
3104 if writechangesetcopy:
3100 filesadded = [
3105 filesadded = [
3101 f for f in changed if not (f in m1 or f in m2)
3106 f for f in changed if not (f in m1 or f in m2)
3102 ]
3107 ]
3103 filesremoved = removed
3108 filesremoved = removed
3104 else:
3109 else:
3105 self.ui.debug(
3110 self.ui.debug(
3106 b'reusing manifest from p1 (listed files '
3111 b'reusing manifest from p1 (listed files '
3107 b'actually unchanged)\n'
3112 b'actually unchanged)\n'
3108 )
3113 )
3109 mn = p1.manifestnode()
3114 mn = p1.manifestnode()
3110 else:
3115 else:
3111 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3116 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3112 mn = p1.manifestnode()
3117 mn = p1.manifestnode()
3113 files = []
3118 files = []
3114
3119
3115 if writecopiesto == b'changeset-only':
3120 if writecopiesto == b'changeset-only':
3116 # If writing only to changeset extras, use None to indicate that
3121 # If writing only to changeset extras, use None to indicate that
3117 # no entry should be written. If writing to both, write an empty
3122 # no entry should be written. If writing to both, write an empty
3118 # entry to prevent the reader from falling back to reading
3123 # entry to prevent the reader from falling back to reading
3119 # filelogs.
3124 # filelogs.
3120 p1copies = p1copies or None
3125 p1copies = p1copies or None
3121 p2copies = p2copies or None
3126 p2copies = p2copies or None
3122 filesadded = filesadded or None
3127 filesadded = filesadded or None
3123 filesremoved = filesremoved or None
3128 filesremoved = filesremoved or None
3124
3129
3125 if origctx and origctx.manifestnode() == mn:
3130 if origctx and origctx.manifestnode() == mn:
3126 files = origctx.files()
3131 files = origctx.files()
3127
3132
3128 # update changelog
3133 # update changelog
3129 self.ui.note(_(b"committing changelog\n"))
3134 self.ui.note(_(b"committing changelog\n"))
3130 self.changelog.delayupdate(tr)
3135 self.changelog.delayupdate(tr)
3131 n = self.changelog.add(
3136 n = self.changelog.add(
3132 mn,
3137 mn,
3133 files,
3138 files,
3134 ctx.description(),
3139 ctx.description(),
3135 trp,
3140 trp,
3136 p1.node(),
3141 p1.node(),
3137 p2.node(),
3142 p2.node(),
3138 user,
3143 user,
3139 ctx.date(),
3144 ctx.date(),
3140 ctx.extra().copy(),
3145 ctx.extra().copy(),
3141 p1copies,
3146 p1copies,
3142 p2copies,
3147 p2copies,
3143 filesadded,
3148 filesadded,
3144 filesremoved,
3149 filesremoved,
3145 )
3150 )
3146 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3151 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3147 self.hook(
3152 self.hook(
3148 b'pretxncommit',
3153 b'pretxncommit',
3149 throw=True,
3154 throw=True,
3150 node=hex(n),
3155 node=hex(n),
3151 parent1=xp1,
3156 parent1=xp1,
3152 parent2=xp2,
3157 parent2=xp2,
3153 )
3158 )
3154 # set the new commit is proper phase
3159 # set the new commit is proper phase
3155 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3160 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3156 if targetphase:
3161 if targetphase:
3157 # retract boundary do not alter parent changeset.
3162 # retract boundary do not alter parent changeset.
3158 # if a parent have higher the resulting phase will
3163 # if a parent have higher the resulting phase will
3159 # be compliant anyway
3164 # be compliant anyway
3160 #
3165 #
3161 # if minimal phase was 0 we don't need to retract anything
3166 # if minimal phase was 0 we don't need to retract anything
3162 phases.registernew(self, tr, targetphase, [n])
3167 phases.registernew(self, tr, targetphase, [n])
3163 return n
3168 return n
3164
3169
3165 @unfilteredmethod
3170 @unfilteredmethod
3166 def destroying(self):
3171 def destroying(self):
3167 '''Inform the repository that nodes are about to be destroyed.
3172 '''Inform the repository that nodes are about to be destroyed.
3168 Intended for use by strip and rollback, so there's a common
3173 Intended for use by strip and rollback, so there's a common
3169 place for anything that has to be done before destroying history.
3174 place for anything that has to be done before destroying history.
3170
3175
3171 This is mostly useful for saving state that is in memory and waiting
3176 This is mostly useful for saving state that is in memory and waiting
3172 to be flushed when the current lock is released. Because a call to
3177 to be flushed when the current lock is released. Because a call to
3173 destroyed is imminent, the repo will be invalidated causing those
3178 destroyed is imminent, the repo will be invalidated causing those
3174 changes to stay in memory (waiting for the next unlock), or vanish
3179 changes to stay in memory (waiting for the next unlock), or vanish
3175 completely.
3180 completely.
3176 '''
3181 '''
3177 # When using the same lock to commit and strip, the phasecache is left
3182 # When using the same lock to commit and strip, the phasecache is left
3178 # dirty after committing. Then when we strip, the repo is invalidated,
3183 # dirty after committing. Then when we strip, the repo is invalidated,
3179 # causing those changes to disappear.
3184 # causing those changes to disappear.
3180 if b'_phasecache' in vars(self):
3185 if b'_phasecache' in vars(self):
3181 self._phasecache.write()
3186 self._phasecache.write()
3182
3187
3183 @unfilteredmethod
3188 @unfilteredmethod
3184 def destroyed(self):
3189 def destroyed(self):
3185 '''Inform the repository that nodes have been destroyed.
3190 '''Inform the repository that nodes have been destroyed.
3186 Intended for use by strip and rollback, so there's a common
3191 Intended for use by strip and rollback, so there's a common
3187 place for anything that has to be done after destroying history.
3192 place for anything that has to be done after destroying history.
3188 '''
3193 '''
3189 # When one tries to:
3194 # When one tries to:
3190 # 1) destroy nodes thus calling this method (e.g. strip)
3195 # 1) destroy nodes thus calling this method (e.g. strip)
3191 # 2) use phasecache somewhere (e.g. commit)
3196 # 2) use phasecache somewhere (e.g. commit)
3192 #
3197 #
3193 # then 2) will fail because the phasecache contains nodes that were
3198 # then 2) will fail because the phasecache contains nodes that were
3194 # removed. We can either remove phasecache from the filecache,
3199 # removed. We can either remove phasecache from the filecache,
3195 # causing it to reload next time it is accessed, or simply filter
3200 # causing it to reload next time it is accessed, or simply filter
3196 # the removed nodes now and write the updated cache.
3201 # the removed nodes now and write the updated cache.
3197 self._phasecache.filterunknown(self)
3202 self._phasecache.filterunknown(self)
3198 self._phasecache.write()
3203 self._phasecache.write()
3199
3204
3200 # refresh all repository caches
3205 # refresh all repository caches
3201 self.updatecaches()
3206 self.updatecaches()
3202
3207
3203 # Ensure the persistent tag cache is updated. Doing it now
3208 # Ensure the persistent tag cache is updated. Doing it now
3204 # means that the tag cache only has to worry about destroyed
3209 # means that the tag cache only has to worry about destroyed
3205 # heads immediately after a strip/rollback. That in turn
3210 # heads immediately after a strip/rollback. That in turn
3206 # guarantees that "cachetip == currenttip" (comparing both rev
3211 # guarantees that "cachetip == currenttip" (comparing both rev
3207 # and node) always means no nodes have been added or destroyed.
3212 # and node) always means no nodes have been added or destroyed.
3208
3213
3209 # XXX this is suboptimal when qrefresh'ing: we strip the current
3214 # XXX this is suboptimal when qrefresh'ing: we strip the current
3210 # head, refresh the tag cache, then immediately add a new head.
3215 # head, refresh the tag cache, then immediately add a new head.
3211 # But I think doing it this way is necessary for the "instant
3216 # But I think doing it this way is necessary for the "instant
3212 # tag cache retrieval" case to work.
3217 # tag cache retrieval" case to work.
3213 self.invalidate()
3218 self.invalidate()
3214
3219
3215 def status(
3220 def status(
3216 self,
3221 self,
3217 node1=b'.',
3222 node1=b'.',
3218 node2=None,
3223 node2=None,
3219 match=None,
3224 match=None,
3220 ignored=False,
3225 ignored=False,
3221 clean=False,
3226 clean=False,
3222 unknown=False,
3227 unknown=False,
3223 listsubrepos=False,
3228 listsubrepos=False,
3224 ):
3229 ):
3225 '''a convenience method that calls node1.status(node2)'''
3230 '''a convenience method that calls node1.status(node2)'''
3226 return self[node1].status(
3231 return self[node1].status(
3227 node2, match, ignored, clean, unknown, listsubrepos
3232 node2, match, ignored, clean, unknown, listsubrepos
3228 )
3233 )
3229
3234
3230 def addpostdsstatus(self, ps):
3235 def addpostdsstatus(self, ps):
3231 """Add a callback to run within the wlock, at the point at which status
3236 """Add a callback to run within the wlock, at the point at which status
3232 fixups happen.
3237 fixups happen.
3233
3238
3234 On status completion, callback(wctx, status) will be called with the
3239 On status completion, callback(wctx, status) will be called with the
3235 wlock held, unless the dirstate has changed from underneath or the wlock
3240 wlock held, unless the dirstate has changed from underneath or the wlock
3236 couldn't be grabbed.
3241 couldn't be grabbed.
3237
3242
3238 Callbacks should not capture and use a cached copy of the dirstate --
3243 Callbacks should not capture and use a cached copy of the dirstate --
3239 it might change in the meanwhile. Instead, they should access the
3244 it might change in the meanwhile. Instead, they should access the
3240 dirstate via wctx.repo().dirstate.
3245 dirstate via wctx.repo().dirstate.
3241
3246
3242 This list is emptied out after each status run -- extensions should
3247 This list is emptied out after each status run -- extensions should
3243 make sure it adds to this list each time dirstate.status is called.
3248 make sure it adds to this list each time dirstate.status is called.
3244 Extensions should also make sure they don't call this for statuses
3249 Extensions should also make sure they don't call this for statuses
3245 that don't involve the dirstate.
3250 that don't involve the dirstate.
3246 """
3251 """
3247
3252
3248 # The list is located here for uniqueness reasons -- it is actually
3253 # The list is located here for uniqueness reasons -- it is actually
3249 # managed by the workingctx, but that isn't unique per-repo.
3254 # managed by the workingctx, but that isn't unique per-repo.
3250 self._postdsstatus.append(ps)
3255 self._postdsstatus.append(ps)
3251
3256
3252 def postdsstatus(self):
3257 def postdsstatus(self):
3253 """Used by workingctx to get the list of post-dirstate-status hooks."""
3258 """Used by workingctx to get the list of post-dirstate-status hooks."""
3254 return self._postdsstatus
3259 return self._postdsstatus
3255
3260
3256 def clearpostdsstatus(self):
3261 def clearpostdsstatus(self):
3257 """Used by workingctx to clear post-dirstate-status hooks."""
3262 """Used by workingctx to clear post-dirstate-status hooks."""
3258 del self._postdsstatus[:]
3263 del self._postdsstatus[:]
3259
3264
3260 def heads(self, start=None):
3265 def heads(self, start=None):
3261 if start is None:
3266 if start is None:
3262 cl = self.changelog
3267 cl = self.changelog
3263 headrevs = reversed(cl.headrevs())
3268 headrevs = reversed(cl.headrevs())
3264 return [cl.node(rev) for rev in headrevs]
3269 return [cl.node(rev) for rev in headrevs]
3265
3270
3266 heads = self.changelog.heads(start)
3271 heads = self.changelog.heads(start)
3267 # sort the output in rev descending order
3272 # sort the output in rev descending order
3268 return sorted(heads, key=self.changelog.rev, reverse=True)
3273 return sorted(heads, key=self.changelog.rev, reverse=True)
3269
3274
3270 def branchheads(self, branch=None, start=None, closed=False):
3275 def branchheads(self, branch=None, start=None, closed=False):
3271 '''return a (possibly filtered) list of heads for the given branch
3276 '''return a (possibly filtered) list of heads for the given branch
3272
3277
3273 Heads are returned in topological order, from newest to oldest.
3278 Heads are returned in topological order, from newest to oldest.
3274 If branch is None, use the dirstate branch.
3279 If branch is None, use the dirstate branch.
3275 If start is not None, return only heads reachable from start.
3280 If start is not None, return only heads reachable from start.
3276 If closed is True, return heads that are marked as closed as well.
3281 If closed is True, return heads that are marked as closed as well.
3277 '''
3282 '''
3278 if branch is None:
3283 if branch is None:
3279 branch = self[None].branch()
3284 branch = self[None].branch()
3280 branches = self.branchmap()
3285 branches = self.branchmap()
3281 if not branches.hasbranch(branch):
3286 if not branches.hasbranch(branch):
3282 return []
3287 return []
3283 # the cache returns heads ordered lowest to highest
3288 # the cache returns heads ordered lowest to highest
3284 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3289 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3285 if start is not None:
3290 if start is not None:
3286 # filter out the heads that cannot be reached from startrev
3291 # filter out the heads that cannot be reached from startrev
3287 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3292 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3288 bheads = [h for h in bheads if h in fbheads]
3293 bheads = [h for h in bheads if h in fbheads]
3289 return bheads
3294 return bheads
3290
3295
3291 def branches(self, nodes):
3296 def branches(self, nodes):
3292 if not nodes:
3297 if not nodes:
3293 nodes = [self.changelog.tip()]
3298 nodes = [self.changelog.tip()]
3294 b = []
3299 b = []
3295 for n in nodes:
3300 for n in nodes:
3296 t = n
3301 t = n
3297 while True:
3302 while True:
3298 p = self.changelog.parents(n)
3303 p = self.changelog.parents(n)
3299 if p[1] != nullid or p[0] == nullid:
3304 if p[1] != nullid or p[0] == nullid:
3300 b.append((t, n, p[0], p[1]))
3305 b.append((t, n, p[0], p[1]))
3301 break
3306 break
3302 n = p[0]
3307 n = p[0]
3303 return b
3308 return b
3304
3309
3305 def between(self, pairs):
3310 def between(self, pairs):
3306 r = []
3311 r = []
3307
3312
3308 for top, bottom in pairs:
3313 for top, bottom in pairs:
3309 n, l, i = top, [], 0
3314 n, l, i = top, [], 0
3310 f = 1
3315 f = 1
3311
3316
3312 while n != bottom and n != nullid:
3317 while n != bottom and n != nullid:
3313 p = self.changelog.parents(n)[0]
3318 p = self.changelog.parents(n)[0]
3314 if i == f:
3319 if i == f:
3315 l.append(n)
3320 l.append(n)
3316 f = f * 2
3321 f = f * 2
3317 n = p
3322 n = p
3318 i += 1
3323 i += 1
3319
3324
3320 r.append(l)
3325 r.append(l)
3321
3326
3322 return r
3327 return r
3323
3328
3324 def checkpush(self, pushop):
3329 def checkpush(self, pushop):
3325 """Extensions can override this function if additional checks have
3330 """Extensions can override this function if additional checks have
3326 to be performed before pushing, or call it if they override push
3331 to be performed before pushing, or call it if they override push
3327 command.
3332 command.
3328 """
3333 """
3329
3334
3330 @unfilteredpropertycache
3335 @unfilteredpropertycache
3331 def prepushoutgoinghooks(self):
3336 def prepushoutgoinghooks(self):
3332 """Return util.hooks consists of a pushop with repo, remote, outgoing
3337 """Return util.hooks consists of a pushop with repo, remote, outgoing
3333 methods, which are called before pushing changesets.
3338 methods, which are called before pushing changesets.
3334 """
3339 """
3335 return util.hooks()
3340 return util.hooks()
3336
3341
3337 def pushkey(self, namespace, key, old, new):
3342 def pushkey(self, namespace, key, old, new):
3338 try:
3343 try:
3339 tr = self.currenttransaction()
3344 tr = self.currenttransaction()
3340 hookargs = {}
3345 hookargs = {}
3341 if tr is not None:
3346 if tr is not None:
3342 hookargs.update(tr.hookargs)
3347 hookargs.update(tr.hookargs)
3343 hookargs = pycompat.strkwargs(hookargs)
3348 hookargs = pycompat.strkwargs(hookargs)
3344 hookargs[r'namespace'] = namespace
3349 hookargs[r'namespace'] = namespace
3345 hookargs[r'key'] = key
3350 hookargs[r'key'] = key
3346 hookargs[r'old'] = old
3351 hookargs[r'old'] = old
3347 hookargs[r'new'] = new
3352 hookargs[r'new'] = new
3348 self.hook(b'prepushkey', throw=True, **hookargs)
3353 self.hook(b'prepushkey', throw=True, **hookargs)
3349 except error.HookAbort as exc:
3354 except error.HookAbort as exc:
3350 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3355 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3351 if exc.hint:
3356 if exc.hint:
3352 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3357 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3353 return False
3358 return False
3354 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3359 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3355 ret = pushkey.push(self, namespace, key, old, new)
3360 ret = pushkey.push(self, namespace, key, old, new)
3356
3361
3357 def runhook():
3362 def runhook():
3358 self.hook(
3363 self.hook(
3359 b'pushkey',
3364 b'pushkey',
3360 namespace=namespace,
3365 namespace=namespace,
3361 key=key,
3366 key=key,
3362 old=old,
3367 old=old,
3363 new=new,
3368 new=new,
3364 ret=ret,
3369 ret=ret,
3365 )
3370 )
3366
3371
3367 self._afterlock(runhook)
3372 self._afterlock(runhook)
3368 return ret
3373 return ret
3369
3374
3370 def listkeys(self, namespace):
3375 def listkeys(self, namespace):
3371 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3376 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3372 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3377 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3373 values = pushkey.list(self, namespace)
3378 values = pushkey.list(self, namespace)
3374 self.hook(b'listkeys', namespace=namespace, values=values)
3379 self.hook(b'listkeys', namespace=namespace, values=values)
3375 return values
3380 return values
3376
3381
3377 def debugwireargs(self, one, two, three=None, four=None, five=None):
3382 def debugwireargs(self, one, two, three=None, four=None, five=None):
3378 '''used to test argument passing over the wire'''
3383 '''used to test argument passing over the wire'''
3379 return b"%s %s %s %s %s" % (
3384 return b"%s %s %s %s %s" % (
3380 one,
3385 one,
3381 two,
3386 two,
3382 pycompat.bytestr(three),
3387 pycompat.bytestr(three),
3383 pycompat.bytestr(four),
3388 pycompat.bytestr(four),
3384 pycompat.bytestr(five),
3389 pycompat.bytestr(five),
3385 )
3390 )
3386
3391
3387 def savecommitmessage(self, text):
3392 def savecommitmessage(self, text):
3388 fp = self.vfs(b'last-message.txt', b'wb')
3393 fp = self.vfs(b'last-message.txt', b'wb')
3389 try:
3394 try:
3390 fp.write(text)
3395 fp.write(text)
3391 finally:
3396 finally:
3392 fp.close()
3397 fp.close()
3393 return self.pathto(fp.name[len(self.root) + 1 :])
3398 return self.pathto(fp.name[len(self.root) + 1 :])
3394
3399
3395
3400
3396 # used to avoid circular references so destructors work
3401 # used to avoid circular references so destructors work
3397 def aftertrans(files):
3402 def aftertrans(files):
3398 renamefiles = [tuple(t) for t in files]
3403 renamefiles = [tuple(t) for t in files]
3399
3404
3400 def a():
3405 def a():
3401 for vfs, src, dest in renamefiles:
3406 for vfs, src, dest in renamefiles:
3402 # if src and dest refer to a same file, vfs.rename is a no-op,
3407 # if src and dest refer to a same file, vfs.rename is a no-op,
3403 # leaving both src and dest on disk. delete dest to make sure
3408 # leaving both src and dest on disk. delete dest to make sure
3404 # the rename couldn't be such a no-op.
3409 # the rename couldn't be such a no-op.
3405 vfs.tryunlink(dest)
3410 vfs.tryunlink(dest)
3406 try:
3411 try:
3407 vfs.rename(src, dest)
3412 vfs.rename(src, dest)
3408 except OSError: # journal file does not yet exist
3413 except OSError: # journal file does not yet exist
3409 pass
3414 pass
3410
3415
3411 return a
3416 return a
3412
3417
3413
3418
3414 def undoname(fn):
3419 def undoname(fn):
3415 base, name = os.path.split(fn)
3420 base, name = os.path.split(fn)
3416 assert name.startswith(b'journal')
3421 assert name.startswith(b'journal')
3417 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3422 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3418
3423
3419
3424
3420 def instance(ui, path, create, intents=None, createopts=None):
3425 def instance(ui, path, create, intents=None, createopts=None):
3421 localpath = util.urllocalpath(path)
3426 localpath = util.urllocalpath(path)
3422 if create:
3427 if create:
3423 createrepository(ui, localpath, createopts=createopts)
3428 createrepository(ui, localpath, createopts=createopts)
3424
3429
3425 return makelocalrepository(ui, localpath, intents=intents)
3430 return makelocalrepository(ui, localpath, intents=intents)
3426
3431
3427
3432
3428 def islocal(path):
3433 def islocal(path):
3429 return True
3434 return True
3430
3435
3431
3436
3432 def defaultcreateopts(ui, createopts=None):
3437 def defaultcreateopts(ui, createopts=None):
3433 """Populate the default creation options for a repository.
3438 """Populate the default creation options for a repository.
3434
3439
3435 A dictionary of explicitly requested creation options can be passed
3440 A dictionary of explicitly requested creation options can be passed
3436 in. Missing keys will be populated.
3441 in. Missing keys will be populated.
3437 """
3442 """
3438 createopts = dict(createopts or {})
3443 createopts = dict(createopts or {})
3439
3444
3440 if b'backend' not in createopts:
3445 if b'backend' not in createopts:
3441 # experimental config: storage.new-repo-backend
3446 # experimental config: storage.new-repo-backend
3442 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3447 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3443
3448
3444 return createopts
3449 return createopts
3445
3450
3446
3451
3447 def newreporequirements(ui, createopts):
3452 def newreporequirements(ui, createopts):
3448 """Determine the set of requirements for a new local repository.
3453 """Determine the set of requirements for a new local repository.
3449
3454
3450 Extensions can wrap this function to specify custom requirements for
3455 Extensions can wrap this function to specify custom requirements for
3451 new repositories.
3456 new repositories.
3452 """
3457 """
3453 # If the repo is being created from a shared repository, we copy
3458 # If the repo is being created from a shared repository, we copy
3454 # its requirements.
3459 # its requirements.
3455 if b'sharedrepo' in createopts:
3460 if b'sharedrepo' in createopts:
3456 requirements = set(createopts[b'sharedrepo'].requirements)
3461 requirements = set(createopts[b'sharedrepo'].requirements)
3457 if createopts.get(b'sharedrelative'):
3462 if createopts.get(b'sharedrelative'):
3458 requirements.add(b'relshared')
3463 requirements.add(b'relshared')
3459 else:
3464 else:
3460 requirements.add(b'shared')
3465 requirements.add(b'shared')
3461
3466
3462 return requirements
3467 return requirements
3463
3468
3464 if b'backend' not in createopts:
3469 if b'backend' not in createopts:
3465 raise error.ProgrammingError(
3470 raise error.ProgrammingError(
3466 b'backend key not present in createopts; '
3471 b'backend key not present in createopts; '
3467 b'was defaultcreateopts() called?'
3472 b'was defaultcreateopts() called?'
3468 )
3473 )
3469
3474
3470 if createopts[b'backend'] != b'revlogv1':
3475 if createopts[b'backend'] != b'revlogv1':
3471 raise error.Abort(
3476 raise error.Abort(
3472 _(
3477 _(
3473 b'unable to determine repository requirements for '
3478 b'unable to determine repository requirements for '
3474 b'storage backend: %s'
3479 b'storage backend: %s'
3475 )
3480 )
3476 % createopts[b'backend']
3481 % createopts[b'backend']
3477 )
3482 )
3478
3483
3479 requirements = {b'revlogv1'}
3484 requirements = {b'revlogv1'}
3480 if ui.configbool(b'format', b'usestore'):
3485 if ui.configbool(b'format', b'usestore'):
3481 requirements.add(b'store')
3486 requirements.add(b'store')
3482 if ui.configbool(b'format', b'usefncache'):
3487 if ui.configbool(b'format', b'usefncache'):
3483 requirements.add(b'fncache')
3488 requirements.add(b'fncache')
3484 if ui.configbool(b'format', b'dotencode'):
3489 if ui.configbool(b'format', b'dotencode'):
3485 requirements.add(b'dotencode')
3490 requirements.add(b'dotencode')
3486
3491
3487 compengine = ui.config(b'format', b'revlog-compression')
3492 compengine = ui.config(b'format', b'revlog-compression')
3488 if compengine not in util.compengines:
3493 if compengine not in util.compengines:
3489 raise error.Abort(
3494 raise error.Abort(
3490 _(
3495 _(
3491 b'compression engine %s defined by '
3496 b'compression engine %s defined by '
3492 b'format.revlog-compression not available'
3497 b'format.revlog-compression not available'
3493 )
3498 )
3494 % compengine,
3499 % compengine,
3495 hint=_(
3500 hint=_(
3496 b'run "hg debuginstall" to list available '
3501 b'run "hg debuginstall" to list available '
3497 b'compression engines'
3502 b'compression engines'
3498 ),
3503 ),
3499 )
3504 )
3500
3505
3501 # zlib is the historical default and doesn't need an explicit requirement.
3506 # zlib is the historical default and doesn't need an explicit requirement.
3502 elif compengine == b'zstd':
3507 elif compengine == b'zstd':
3503 requirements.add(b'revlog-compression-zstd')
3508 requirements.add(b'revlog-compression-zstd')
3504 elif compengine != b'zlib':
3509 elif compengine != b'zlib':
3505 requirements.add(b'exp-compression-%s' % compengine)
3510 requirements.add(b'exp-compression-%s' % compengine)
3506
3511
3507 if scmutil.gdinitconfig(ui):
3512 if scmutil.gdinitconfig(ui):
3508 requirements.add(b'generaldelta')
3513 requirements.add(b'generaldelta')
3509 if ui.configbool(b'format', b'sparse-revlog'):
3514 if ui.configbool(b'format', b'sparse-revlog'):
3510 requirements.add(SPARSEREVLOG_REQUIREMENT)
3515 requirements.add(SPARSEREVLOG_REQUIREMENT)
3511
3516
3512 # experimental config: format.use-side-data
3517 # experimental config: format.use-side-data
3513 if ui.configbool(b'format', b'use-side-data'):
3518 if ui.configbool(b'format', b'use-side-data'):
3514 requirements.add(SIDEDATA_REQUIREMENT)
3519 requirements.add(SIDEDATA_REQUIREMENT)
3520 # experimental config: format.exp-use-copies-side-data-changeset
3521 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3522 requirements.add(SIDEDATA_REQUIREMENT)
3523 requirements.add(COPIESSDC_REQUIREMENT)
3515 if ui.configbool(b'experimental', b'treemanifest'):
3524 if ui.configbool(b'experimental', b'treemanifest'):
3516 requirements.add(b'treemanifest')
3525 requirements.add(b'treemanifest')
3517
3526
3518 revlogv2 = ui.config(b'experimental', b'revlogv2')
3527 revlogv2 = ui.config(b'experimental', b'revlogv2')
3519 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3528 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3520 requirements.remove(b'revlogv1')
3529 requirements.remove(b'revlogv1')
3521 # generaldelta is implied by revlogv2.
3530 # generaldelta is implied by revlogv2.
3522 requirements.discard(b'generaldelta')
3531 requirements.discard(b'generaldelta')
3523 requirements.add(REVLOGV2_REQUIREMENT)
3532 requirements.add(REVLOGV2_REQUIREMENT)
3524 # experimental config: format.internal-phase
3533 # experimental config: format.internal-phase
3525 if ui.configbool(b'format', b'internal-phase'):
3534 if ui.configbool(b'format', b'internal-phase'):
3526 requirements.add(b'internal-phase')
3535 requirements.add(b'internal-phase')
3527
3536
3528 if createopts.get(b'narrowfiles'):
3537 if createopts.get(b'narrowfiles'):
3529 requirements.add(repository.NARROW_REQUIREMENT)
3538 requirements.add(repository.NARROW_REQUIREMENT)
3530
3539
3531 if createopts.get(b'lfs'):
3540 if createopts.get(b'lfs'):
3532 requirements.add(b'lfs')
3541 requirements.add(b'lfs')
3533
3542
3534 if ui.configbool(b'format', b'bookmarks-in-store'):
3543 if ui.configbool(b'format', b'bookmarks-in-store'):
3535 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3544 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3536
3545
3537 return requirements
3546 return requirements
3538
3547
3539
3548
3540 def filterknowncreateopts(ui, createopts):
3549 def filterknowncreateopts(ui, createopts):
3541 """Filters a dict of repo creation options against options that are known.
3550 """Filters a dict of repo creation options against options that are known.
3542
3551
3543 Receives a dict of repo creation options and returns a dict of those
3552 Receives a dict of repo creation options and returns a dict of those
3544 options that we don't know how to handle.
3553 options that we don't know how to handle.
3545
3554
3546 This function is called as part of repository creation. If the
3555 This function is called as part of repository creation. If the
3547 returned dict contains any items, repository creation will not
3556 returned dict contains any items, repository creation will not
3548 be allowed, as it means there was a request to create a repository
3557 be allowed, as it means there was a request to create a repository
3549 with options not recognized by loaded code.
3558 with options not recognized by loaded code.
3550
3559
3551 Extensions can wrap this function to filter out creation options
3560 Extensions can wrap this function to filter out creation options
3552 they know how to handle.
3561 they know how to handle.
3553 """
3562 """
3554 known = {
3563 known = {
3555 b'backend',
3564 b'backend',
3556 b'lfs',
3565 b'lfs',
3557 b'narrowfiles',
3566 b'narrowfiles',
3558 b'sharedrepo',
3567 b'sharedrepo',
3559 b'sharedrelative',
3568 b'sharedrelative',
3560 b'shareditems',
3569 b'shareditems',
3561 b'shallowfilestore',
3570 b'shallowfilestore',
3562 }
3571 }
3563
3572
3564 return {k: v for k, v in createopts.items() if k not in known}
3573 return {k: v for k, v in createopts.items() if k not in known}
3565
3574
3566
3575
3567 def createrepository(ui, path, createopts=None):
3576 def createrepository(ui, path, createopts=None):
3568 """Create a new repository in a vfs.
3577 """Create a new repository in a vfs.
3569
3578
3570 ``path`` path to the new repo's working directory.
3579 ``path`` path to the new repo's working directory.
3571 ``createopts`` options for the new repository.
3580 ``createopts`` options for the new repository.
3572
3581
3573 The following keys for ``createopts`` are recognized:
3582 The following keys for ``createopts`` are recognized:
3574
3583
3575 backend
3584 backend
3576 The storage backend to use.
3585 The storage backend to use.
3577 lfs
3586 lfs
3578 Repository will be created with ``lfs`` requirement. The lfs extension
3587 Repository will be created with ``lfs`` requirement. The lfs extension
3579 will automatically be loaded when the repository is accessed.
3588 will automatically be loaded when the repository is accessed.
3580 narrowfiles
3589 narrowfiles
3581 Set up repository to support narrow file storage.
3590 Set up repository to support narrow file storage.
3582 sharedrepo
3591 sharedrepo
3583 Repository object from which storage should be shared.
3592 Repository object from which storage should be shared.
3584 sharedrelative
3593 sharedrelative
3585 Boolean indicating if the path to the shared repo should be
3594 Boolean indicating if the path to the shared repo should be
3586 stored as relative. By default, the pointer to the "parent" repo
3595 stored as relative. By default, the pointer to the "parent" repo
3587 is stored as an absolute path.
3596 is stored as an absolute path.
3588 shareditems
3597 shareditems
3589 Set of items to share to the new repository (in addition to storage).
3598 Set of items to share to the new repository (in addition to storage).
3590 shallowfilestore
3599 shallowfilestore
3591 Indicates that storage for files should be shallow (not all ancestor
3600 Indicates that storage for files should be shallow (not all ancestor
3592 revisions are known).
3601 revisions are known).
3593 """
3602 """
3594 createopts = defaultcreateopts(ui, createopts=createopts)
3603 createopts = defaultcreateopts(ui, createopts=createopts)
3595
3604
3596 unknownopts = filterknowncreateopts(ui, createopts)
3605 unknownopts = filterknowncreateopts(ui, createopts)
3597
3606
3598 if not isinstance(unknownopts, dict):
3607 if not isinstance(unknownopts, dict):
3599 raise error.ProgrammingError(
3608 raise error.ProgrammingError(
3600 b'filterknowncreateopts() did not return a dict'
3609 b'filterknowncreateopts() did not return a dict'
3601 )
3610 )
3602
3611
3603 if unknownopts:
3612 if unknownopts:
3604 raise error.Abort(
3613 raise error.Abort(
3605 _(
3614 _(
3606 b'unable to create repository because of unknown '
3615 b'unable to create repository because of unknown '
3607 b'creation option: %s'
3616 b'creation option: %s'
3608 )
3617 )
3609 % b', '.join(sorted(unknownopts)),
3618 % b', '.join(sorted(unknownopts)),
3610 hint=_(b'is a required extension not loaded?'),
3619 hint=_(b'is a required extension not loaded?'),
3611 )
3620 )
3612
3621
3613 requirements = newreporequirements(ui, createopts=createopts)
3622 requirements = newreporequirements(ui, createopts=createopts)
3614
3623
3615 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3624 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3616
3625
3617 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3626 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3618 if hgvfs.exists():
3627 if hgvfs.exists():
3619 raise error.RepoError(_(b'repository %s already exists') % path)
3628 raise error.RepoError(_(b'repository %s already exists') % path)
3620
3629
3621 if b'sharedrepo' in createopts:
3630 if b'sharedrepo' in createopts:
3622 sharedpath = createopts[b'sharedrepo'].sharedpath
3631 sharedpath = createopts[b'sharedrepo'].sharedpath
3623
3632
3624 if createopts.get(b'sharedrelative'):
3633 if createopts.get(b'sharedrelative'):
3625 try:
3634 try:
3626 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3635 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3627 except (IOError, ValueError) as e:
3636 except (IOError, ValueError) as e:
3628 # ValueError is raised on Windows if the drive letters differ
3637 # ValueError is raised on Windows if the drive letters differ
3629 # on each path.
3638 # on each path.
3630 raise error.Abort(
3639 raise error.Abort(
3631 _(b'cannot calculate relative path'),
3640 _(b'cannot calculate relative path'),
3632 hint=stringutil.forcebytestr(e),
3641 hint=stringutil.forcebytestr(e),
3633 )
3642 )
3634
3643
3635 if not wdirvfs.exists():
3644 if not wdirvfs.exists():
3636 wdirvfs.makedirs()
3645 wdirvfs.makedirs()
3637
3646
3638 hgvfs.makedir(notindexed=True)
3647 hgvfs.makedir(notindexed=True)
3639 if b'sharedrepo' not in createopts:
3648 if b'sharedrepo' not in createopts:
3640 hgvfs.mkdir(b'cache')
3649 hgvfs.mkdir(b'cache')
3641 hgvfs.mkdir(b'wcache')
3650 hgvfs.mkdir(b'wcache')
3642
3651
3643 if b'store' in requirements and b'sharedrepo' not in createopts:
3652 if b'store' in requirements and b'sharedrepo' not in createopts:
3644 hgvfs.mkdir(b'store')
3653 hgvfs.mkdir(b'store')
3645
3654
3646 # We create an invalid changelog outside the store so very old
3655 # We create an invalid changelog outside the store so very old
3647 # Mercurial versions (which didn't know about the requirements
3656 # Mercurial versions (which didn't know about the requirements
3648 # file) encounter an error on reading the changelog. This
3657 # file) encounter an error on reading the changelog. This
3649 # effectively locks out old clients and prevents them from
3658 # effectively locks out old clients and prevents them from
3650 # mucking with a repo in an unknown format.
3659 # mucking with a repo in an unknown format.
3651 #
3660 #
3652 # The revlog header has version 2, which won't be recognized by
3661 # The revlog header has version 2, which won't be recognized by
3653 # such old clients.
3662 # such old clients.
3654 hgvfs.append(
3663 hgvfs.append(
3655 b'00changelog.i',
3664 b'00changelog.i',
3656 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3665 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3657 b'layout',
3666 b'layout',
3658 )
3667 )
3659
3668
3660 scmutil.writerequires(hgvfs, requirements)
3669 scmutil.writerequires(hgvfs, requirements)
3661
3670
3662 # Write out file telling readers where to find the shared store.
3671 # Write out file telling readers where to find the shared store.
3663 if b'sharedrepo' in createopts:
3672 if b'sharedrepo' in createopts:
3664 hgvfs.write(b'sharedpath', sharedpath)
3673 hgvfs.write(b'sharedpath', sharedpath)
3665
3674
3666 if createopts.get(b'shareditems'):
3675 if createopts.get(b'shareditems'):
3667 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3676 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3668 hgvfs.write(b'shared', shared)
3677 hgvfs.write(b'shared', shared)
3669
3678
3670
3679
3671 def poisonrepository(repo):
3680 def poisonrepository(repo):
3672 """Poison a repository instance so it can no longer be used."""
3681 """Poison a repository instance so it can no longer be used."""
3673 # Perform any cleanup on the instance.
3682 # Perform any cleanup on the instance.
3674 repo.close()
3683 repo.close()
3675
3684
3676 # Our strategy is to replace the type of the object with one that
3685 # Our strategy is to replace the type of the object with one that
3677 # has all attribute lookups result in error.
3686 # has all attribute lookups result in error.
3678 #
3687 #
3679 # But we have to allow the close() method because some constructors
3688 # But we have to allow the close() method because some constructors
3680 # of repos call close() on repo references.
3689 # of repos call close() on repo references.
3681 class poisonedrepository(object):
3690 class poisonedrepository(object):
3682 def __getattribute__(self, item):
3691 def __getattribute__(self, item):
3683 if item == r'close':
3692 if item == r'close':
3684 return object.__getattribute__(self, item)
3693 return object.__getattribute__(self, item)
3685
3694
3686 raise error.ProgrammingError(
3695 raise error.ProgrammingError(
3687 b'repo instances should not be used after unshare'
3696 b'repo instances should not be used after unshare'
3688 )
3697 )
3689
3698
3690 def close(self):
3699 def close(self):
3691 pass
3700 pass
3692
3701
3693 # We may have a repoview, which intercepts __setattr__. So be sure
3702 # We may have a repoview, which intercepts __setattr__. So be sure
3694 # we operate at the lowest level possible.
3703 # we operate at the lowest level possible.
3695 object.__setattr__(repo, r'__class__', poisonedrepository)
3704 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now