##// END OF EJS Templates
nodemap: move the mode option to storage.revlog.nodemap.mode...
marmoute -
r45300:d36283e2 default
parent child Browse files
Show More
@@ -1,1574 +1,1575 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section, configprefix + b'nodates', default=False,
136 section, configprefix + b'nodates', default=False,
137 )
137 )
138 coreconfigitem(
138 coreconfigitem(
139 section, configprefix + b'showfunc', default=False,
139 section, configprefix + b'showfunc', default=False,
140 )
140 )
141 coreconfigitem(
141 coreconfigitem(
142 section, configprefix + b'unified', default=None,
142 section, configprefix + b'unified', default=None,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section, configprefix + b'git', default=False,
145 section, configprefix + b'git', default=False,
146 )
146 )
147 coreconfigitem(
147 coreconfigitem(
148 section, configprefix + b'ignorews', default=False,
148 section, configprefix + b'ignorews', default=False,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section, configprefix + b'ignorewsamount', default=False,
151 section, configprefix + b'ignorewsamount', default=False,
152 )
152 )
153 coreconfigitem(
153 coreconfigitem(
154 section, configprefix + b'ignoreblanklines', default=False,
154 section, configprefix + b'ignoreblanklines', default=False,
155 )
155 )
156 coreconfigitem(
156 coreconfigitem(
157 section, configprefix + b'ignorewseol', default=False,
157 section, configprefix + b'ignorewseol', default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section, configprefix + b'nobinary', default=False,
160 section, configprefix + b'nobinary', default=False,
161 )
161 )
162 coreconfigitem(
162 coreconfigitem(
163 section, configprefix + b'noprefix', default=False,
163 section, configprefix + b'noprefix', default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section, configprefix + b'word-diff', default=False,
166 section, configprefix + b'word-diff', default=False,
167 )
167 )
168
168
169
169
170 coreconfigitem(
170 coreconfigitem(
171 b'alias', b'.*', default=dynamicdefault, generic=True,
171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 )
172 )
173 coreconfigitem(
173 coreconfigitem(
174 b'auth', b'cookiefile', default=None,
174 b'auth', b'cookiefile', default=None,
175 )
175 )
176 _registerdiffopts(section=b'annotate')
176 _registerdiffopts(section=b'annotate')
177 # bookmarks.pushing: internal hack for discovery
177 # bookmarks.pushing: internal hack for discovery
178 coreconfigitem(
178 coreconfigitem(
179 b'bookmarks', b'pushing', default=list,
179 b'bookmarks', b'pushing', default=list,
180 )
180 )
181 # bundle.mainreporoot: internal hack for bundlerepo
181 # bundle.mainreporoot: internal hack for bundlerepo
182 coreconfigitem(
182 coreconfigitem(
183 b'bundle', b'mainreporoot', default=b'',
183 b'bundle', b'mainreporoot', default=b'',
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 b'censor', b'policy', default=b'abort', experimental=True,
186 b'censor', b'policy', default=b'abort', experimental=True,
187 )
187 )
188 coreconfigitem(
188 coreconfigitem(
189 b'chgserver', b'idletimeout', default=3600,
189 b'chgserver', b'idletimeout', default=3600,
190 )
190 )
191 coreconfigitem(
191 coreconfigitem(
192 b'chgserver', b'skiphash', default=False,
192 b'chgserver', b'skiphash', default=False,
193 )
193 )
194 coreconfigitem(
194 coreconfigitem(
195 b'cmdserver', b'log', default=None,
195 b'cmdserver', b'log', default=None,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'cmdserver', b'max-log-files', default=7,
198 b'cmdserver', b'max-log-files', default=7,
199 )
199 )
200 coreconfigitem(
200 coreconfigitem(
201 b'cmdserver', b'max-log-size', default=b'1 MB',
201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 )
202 )
203 coreconfigitem(
203 coreconfigitem(
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 )
205 )
206 coreconfigitem(
206 coreconfigitem(
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 )
208 )
209 coreconfigitem(
209 coreconfigitem(
210 b'cmdserver',
210 b'cmdserver',
211 b'track-log',
211 b'track-log',
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 )
213 )
214 coreconfigitem(
214 coreconfigitem(
215 b'color', b'.*', default=None, generic=True,
215 b'color', b'.*', default=None, generic=True,
216 )
216 )
217 coreconfigitem(
217 coreconfigitem(
218 b'color', b'mode', default=b'auto',
218 b'color', b'mode', default=b'auto',
219 )
219 )
220 coreconfigitem(
220 coreconfigitem(
221 b'color', b'pagermode', default=dynamicdefault,
221 b'color', b'pagermode', default=dynamicdefault,
222 )
222 )
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 coreconfigitem(
224 coreconfigitem(
225 b'commands', b'commit.post-status', default=False,
225 b'commands', b'commit.post-status', default=False,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'commands', b'grep.all-files', default=False, experimental=True,
228 b'commands', b'grep.all-files', default=False, experimental=True,
229 )
229 )
230 coreconfigitem(
230 coreconfigitem(
231 b'commands', b'merge.require-rev', default=False,
231 b'commands', b'merge.require-rev', default=False,
232 )
232 )
233 coreconfigitem(
233 coreconfigitem(
234 b'commands', b'push.require-revs', default=False,
234 b'commands', b'push.require-revs', default=False,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'commands', b'resolve.confirm', default=False,
237 b'commands', b'resolve.confirm', default=False,
238 )
238 )
239 coreconfigitem(
239 coreconfigitem(
240 b'commands', b'resolve.explicit-re-merge', default=False,
240 b'commands', b'resolve.explicit-re-merge', default=False,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'commands', b'resolve.mark-check', default=b'none',
243 b'commands', b'resolve.mark-check', default=b'none',
244 )
244 )
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 coreconfigitem(
246 coreconfigitem(
247 b'commands', b'show.aliasprefix', default=list,
247 b'commands', b'show.aliasprefix', default=list,
248 )
248 )
249 coreconfigitem(
249 coreconfigitem(
250 b'commands', b'status.relative', default=False,
250 b'commands', b'status.relative', default=False,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'commands', b'status.skipstates', default=[], experimental=True,
253 b'commands', b'status.skipstates', default=[], experimental=True,
254 )
254 )
255 coreconfigitem(
255 coreconfigitem(
256 b'commands', b'status.terse', default=b'',
256 b'commands', b'status.terse', default=b'',
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'commands', b'status.verbose', default=False,
259 b'commands', b'status.verbose', default=False,
260 )
260 )
261 coreconfigitem(
261 coreconfigitem(
262 b'commands', b'update.check', default=None,
262 b'commands', b'update.check', default=None,
263 )
263 )
264 coreconfigitem(
264 coreconfigitem(
265 b'commands', b'update.requiredest', default=False,
265 b'commands', b'update.requiredest', default=False,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'committemplate', b'.*', default=None, generic=True,
268 b'committemplate', b'.*', default=None, generic=True,
269 )
269 )
270 coreconfigitem(
270 coreconfigitem(
271 b'convert', b'bzr.saverev', default=True,
271 b'convert', b'bzr.saverev', default=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'convert', b'cvsps.cache', default=True,
274 b'convert', b'cvsps.cache', default=True,
275 )
275 )
276 coreconfigitem(
276 coreconfigitem(
277 b'convert', b'cvsps.fuzz', default=60,
277 b'convert', b'cvsps.fuzz', default=60,
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'convert', b'cvsps.logencoding', default=None,
280 b'convert', b'cvsps.logencoding', default=None,
281 )
281 )
282 coreconfigitem(
282 coreconfigitem(
283 b'convert', b'cvsps.mergefrom', default=None,
283 b'convert', b'cvsps.mergefrom', default=None,
284 )
284 )
285 coreconfigitem(
285 coreconfigitem(
286 b'convert', b'cvsps.mergeto', default=None,
286 b'convert', b'cvsps.mergeto', default=None,
287 )
287 )
288 coreconfigitem(
288 coreconfigitem(
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 )
290 )
291 coreconfigitem(
291 coreconfigitem(
292 b'convert', b'git.extrakeys', default=list,
292 b'convert', b'git.extrakeys', default=list,
293 )
293 )
294 coreconfigitem(
294 coreconfigitem(
295 b'convert', b'git.findcopiesharder', default=False,
295 b'convert', b'git.findcopiesharder', default=False,
296 )
296 )
297 coreconfigitem(
297 coreconfigitem(
298 b'convert', b'git.remoteprefix', default=b'remote',
298 b'convert', b'git.remoteprefix', default=b'remote',
299 )
299 )
300 coreconfigitem(
300 coreconfigitem(
301 b'convert', b'git.renamelimit', default=400,
301 b'convert', b'git.renamelimit', default=400,
302 )
302 )
303 coreconfigitem(
303 coreconfigitem(
304 b'convert', b'git.saverev', default=True,
304 b'convert', b'git.saverev', default=True,
305 )
305 )
306 coreconfigitem(
306 coreconfigitem(
307 b'convert', b'git.similarity', default=50,
307 b'convert', b'git.similarity', default=50,
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'convert', b'git.skipsubmodules', default=False,
310 b'convert', b'git.skipsubmodules', default=False,
311 )
311 )
312 coreconfigitem(
312 coreconfigitem(
313 b'convert', b'hg.clonebranches', default=False,
313 b'convert', b'hg.clonebranches', default=False,
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'convert', b'hg.ignoreerrors', default=False,
316 b'convert', b'hg.ignoreerrors', default=False,
317 )
317 )
318 coreconfigitem(
318 coreconfigitem(
319 b'convert', b'hg.preserve-hash', default=False,
319 b'convert', b'hg.preserve-hash', default=False,
320 )
320 )
321 coreconfigitem(
321 coreconfigitem(
322 b'convert', b'hg.revs', default=None,
322 b'convert', b'hg.revs', default=None,
323 )
323 )
324 coreconfigitem(
324 coreconfigitem(
325 b'convert', b'hg.saverev', default=False,
325 b'convert', b'hg.saverev', default=False,
326 )
326 )
327 coreconfigitem(
327 coreconfigitem(
328 b'convert', b'hg.sourcename', default=None,
328 b'convert', b'hg.sourcename', default=None,
329 )
329 )
330 coreconfigitem(
330 coreconfigitem(
331 b'convert', b'hg.startrev', default=None,
331 b'convert', b'hg.startrev', default=None,
332 )
332 )
333 coreconfigitem(
333 coreconfigitem(
334 b'convert', b'hg.tagsbranch', default=b'default',
334 b'convert', b'hg.tagsbranch', default=b'default',
335 )
335 )
336 coreconfigitem(
336 coreconfigitem(
337 b'convert', b'hg.usebranchnames', default=True,
337 b'convert', b'hg.usebranchnames', default=True,
338 )
338 )
339 coreconfigitem(
339 coreconfigitem(
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'convert', b'localtimezone', default=False,
343 b'convert', b'localtimezone', default=False,
344 )
344 )
345 coreconfigitem(
345 coreconfigitem(
346 b'convert', b'p4.encoding', default=dynamicdefault,
346 b'convert', b'p4.encoding', default=dynamicdefault,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'convert', b'p4.startrev', default=0,
349 b'convert', b'p4.startrev', default=0,
350 )
350 )
351 coreconfigitem(
351 coreconfigitem(
352 b'convert', b'skiptags', default=False,
352 b'convert', b'skiptags', default=False,
353 )
353 )
354 coreconfigitem(
354 coreconfigitem(
355 b'convert', b'svn.debugsvnlog', default=True,
355 b'convert', b'svn.debugsvnlog', default=True,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'convert', b'svn.trunk', default=None,
358 b'convert', b'svn.trunk', default=None,
359 )
359 )
360 coreconfigitem(
360 coreconfigitem(
361 b'convert', b'svn.tags', default=None,
361 b'convert', b'svn.tags', default=None,
362 )
362 )
363 coreconfigitem(
363 coreconfigitem(
364 b'convert', b'svn.branches', default=None,
364 b'convert', b'svn.branches', default=None,
365 )
365 )
366 coreconfigitem(
366 coreconfigitem(
367 b'convert', b'svn.startrev', default=0,
367 b'convert', b'svn.startrev', default=0,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'debug', b'dirstate.delaywrite', default=0,
370 b'debug', b'dirstate.delaywrite', default=0,
371 )
371 )
372 coreconfigitem(
372 coreconfigitem(
373 b'defaults', b'.*', default=None, generic=True,
373 b'defaults', b'.*', default=None, generic=True,
374 )
374 )
375 coreconfigitem(
375 coreconfigitem(
376 b'devel', b'all-warnings', default=False,
376 b'devel', b'all-warnings', default=False,
377 )
377 )
378 coreconfigitem(
378 coreconfigitem(
379 b'devel', b'bundle2.debug', default=False,
379 b'devel', b'bundle2.debug', default=False,
380 )
380 )
381 coreconfigitem(
381 coreconfigitem(
382 b'devel', b'bundle.delta', default=b'',
382 b'devel', b'bundle.delta', default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'devel', b'cache-vfs', default=None,
385 b'devel', b'cache-vfs', default=None,
386 )
386 )
387 coreconfigitem(
387 coreconfigitem(
388 b'devel', b'check-locks', default=False,
388 b'devel', b'check-locks', default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'devel', b'check-relroot', default=False,
391 b'devel', b'check-relroot', default=False,
392 )
392 )
393 coreconfigitem(
393 coreconfigitem(
394 b'devel', b'default-date', default=None,
394 b'devel', b'default-date', default=None,
395 )
395 )
396 coreconfigitem(
396 coreconfigitem(
397 b'devel', b'deprec-warn', default=False,
397 b'devel', b'deprec-warn', default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'devel', b'disableloaddefaultcerts', default=False,
400 b'devel', b'disableloaddefaultcerts', default=False,
401 )
401 )
402 coreconfigitem(
402 coreconfigitem(
403 b'devel', b'warn-empty-changegroup', default=False,
403 b'devel', b'warn-empty-changegroup', default=False,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'devel', b'legacy.exchange', default=list,
406 b'devel', b'legacy.exchange', default=list,
407 )
407 )
408 coreconfigitem(
408 coreconfigitem(
409 b'devel', b'persistent-nodemap', default=False,
409 b'devel', b'persistent-nodemap', default=False,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'devel', b'servercafile', default=b'',
412 b'devel', b'servercafile', default=b'',
413 )
413 )
414 coreconfigitem(
414 coreconfigitem(
415 b'devel', b'serverexactprotocol', default=b'',
415 b'devel', b'serverexactprotocol', default=b'',
416 )
416 )
417 coreconfigitem(
417 coreconfigitem(
418 b'devel', b'serverrequirecert', default=False,
418 b'devel', b'serverrequirecert', default=False,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'devel', b'strip-obsmarkers', default=True,
421 b'devel', b'strip-obsmarkers', default=True,
422 )
422 )
423 coreconfigitem(
423 coreconfigitem(
424 b'devel', b'warn-config', default=None,
424 b'devel', b'warn-config', default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'devel', b'warn-config-default', default=None,
427 b'devel', b'warn-config-default', default=None,
428 )
428 )
429 coreconfigitem(
429 coreconfigitem(
430 b'devel', b'user.obsmarker', default=None,
430 b'devel', b'user.obsmarker', default=None,
431 )
431 )
432 coreconfigitem(
432 coreconfigitem(
433 b'devel', b'warn-config-unknown', default=None,
433 b'devel', b'warn-config-unknown', default=None,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'devel', b'debug.copies', default=False,
436 b'devel', b'debug.copies', default=False,
437 )
437 )
438 coreconfigitem(
438 coreconfigitem(
439 b'devel', b'debug.extensions', default=False,
439 b'devel', b'debug.extensions', default=False,
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'devel', b'debug.repo-filters', default=False,
442 b'devel', b'debug.repo-filters', default=False,
443 )
443 )
444 coreconfigitem(
444 coreconfigitem(
445 b'devel', b'debug.peer-request', default=False,
445 b'devel', b'debug.peer-request', default=False,
446 )
446 )
447 coreconfigitem(
447 coreconfigitem(
448 b'devel', b'discovery.randomize', default=True,
448 b'devel', b'discovery.randomize', default=True,
449 )
449 )
450 _registerdiffopts(section=b'diff')
450 _registerdiffopts(section=b'diff')
451 coreconfigitem(
451 coreconfigitem(
452 b'email', b'bcc', default=None,
452 b'email', b'bcc', default=None,
453 )
453 )
454 coreconfigitem(
454 coreconfigitem(
455 b'email', b'cc', default=None,
455 b'email', b'cc', default=None,
456 )
456 )
457 coreconfigitem(
457 coreconfigitem(
458 b'email', b'charsets', default=list,
458 b'email', b'charsets', default=list,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'email', b'from', default=None,
461 b'email', b'from', default=None,
462 )
462 )
463 coreconfigitem(
463 coreconfigitem(
464 b'email', b'method', default=b'smtp',
464 b'email', b'method', default=b'smtp',
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'email', b'reply-to', default=None,
467 b'email', b'reply-to', default=None,
468 )
468 )
469 coreconfigitem(
469 coreconfigitem(
470 b'email', b'to', default=None,
470 b'email', b'to', default=None,
471 )
471 )
472 coreconfigitem(
472 coreconfigitem(
473 b'experimental', b'archivemetatemplate', default=dynamicdefault,
473 b'experimental', b'archivemetatemplate', default=dynamicdefault,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'experimental', b'auto-publish', default=b'publish',
476 b'experimental', b'auto-publish', default=b'publish',
477 )
477 )
478 coreconfigitem(
478 coreconfigitem(
479 b'experimental', b'bundle-phases', default=False,
479 b'experimental', b'bundle-phases', default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'experimental', b'bundle2-advertise', default=True,
482 b'experimental', b'bundle2-advertise', default=True,
483 )
483 )
484 coreconfigitem(
484 coreconfigitem(
485 b'experimental', b'bundle2-output-capture', default=False,
485 b'experimental', b'bundle2-output-capture', default=False,
486 )
486 )
487 coreconfigitem(
487 coreconfigitem(
488 b'experimental', b'bundle2.pushback', default=False,
488 b'experimental', b'bundle2.pushback', default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'experimental', b'bundle2lazylocking', default=False,
491 b'experimental', b'bundle2lazylocking', default=False,
492 )
492 )
493 coreconfigitem(
493 coreconfigitem(
494 b'experimental', b'bundlecomplevel', default=None,
494 b'experimental', b'bundlecomplevel', default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'experimental', b'bundlecomplevel.bzip2', default=None,
497 b'experimental', b'bundlecomplevel.bzip2', default=None,
498 )
498 )
499 coreconfigitem(
499 coreconfigitem(
500 b'experimental', b'bundlecomplevel.gzip', default=None,
500 b'experimental', b'bundlecomplevel.gzip', default=None,
501 )
501 )
502 coreconfigitem(
502 coreconfigitem(
503 b'experimental', b'bundlecomplevel.none', default=None,
503 b'experimental', b'bundlecomplevel.none', default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'experimental', b'bundlecomplevel.zstd', default=None,
506 b'experimental', b'bundlecomplevel.zstd', default=None,
507 )
507 )
508 coreconfigitem(
508 coreconfigitem(
509 b'experimental', b'changegroup3', default=False,
509 b'experimental', b'changegroup3', default=False,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'experimental', b'cleanup-as-archived', default=False,
512 b'experimental', b'cleanup-as-archived', default=False,
513 )
513 )
514 coreconfigitem(
514 coreconfigitem(
515 b'experimental', b'clientcompressionengines', default=list,
515 b'experimental', b'clientcompressionengines', default=list,
516 )
516 )
517 coreconfigitem(
517 coreconfigitem(
518 b'experimental', b'copytrace', default=b'on',
518 b'experimental', b'copytrace', default=b'on',
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'experimental', b'copytrace.movecandidateslimit', default=100,
521 b'experimental', b'copytrace.movecandidateslimit', default=100,
522 )
522 )
523 coreconfigitem(
523 coreconfigitem(
524 b'experimental', b'copytrace.sourcecommitlimit', default=100,
524 b'experimental', b'copytrace.sourcecommitlimit', default=100,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'experimental', b'copies.read-from', default=b"filelog-only",
527 b'experimental', b'copies.read-from', default=b"filelog-only",
528 )
528 )
529 coreconfigitem(
529 coreconfigitem(
530 b'experimental', b'copies.write-to', default=b'filelog-only',
530 b'experimental', b'copies.write-to', default=b'filelog-only',
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'experimental', b'crecordtest', default=None,
533 b'experimental', b'crecordtest', default=None,
534 )
534 )
535 coreconfigitem(
535 coreconfigitem(
536 b'experimental', b'directaccess', default=False,
536 b'experimental', b'directaccess', default=False,
537 )
537 )
538 coreconfigitem(
538 coreconfigitem(
539 b'experimental', b'directaccess.revnums', default=False,
539 b'experimental', b'directaccess.revnums', default=False,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'experimental', b'editortmpinhg', default=False,
542 b'experimental', b'editortmpinhg', default=False,
543 )
543 )
544 coreconfigitem(
544 coreconfigitem(
545 b'experimental', b'evolution', default=list,
545 b'experimental', b'evolution', default=list,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'experimental',
548 b'experimental',
549 b'evolution.allowdivergence',
549 b'evolution.allowdivergence',
550 default=False,
550 default=False,
551 alias=[(b'experimental', b'allowdivergence')],
551 alias=[(b'experimental', b'allowdivergence')],
552 )
552 )
553 coreconfigitem(
553 coreconfigitem(
554 b'experimental', b'evolution.allowunstable', default=None,
554 b'experimental', b'evolution.allowunstable', default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'experimental', b'evolution.createmarkers', default=None,
557 b'experimental', b'evolution.createmarkers', default=None,
558 )
558 )
559 coreconfigitem(
559 coreconfigitem(
560 b'experimental',
560 b'experimental',
561 b'evolution.effect-flags',
561 b'evolution.effect-flags',
562 default=True,
562 default=True,
563 alias=[(b'experimental', b'effect-flags')],
563 alias=[(b'experimental', b'effect-flags')],
564 )
564 )
565 coreconfigitem(
565 coreconfigitem(
566 b'experimental', b'evolution.exchange', default=None,
566 b'experimental', b'evolution.exchange', default=None,
567 )
567 )
568 coreconfigitem(
568 coreconfigitem(
569 b'experimental', b'evolution.bundle-obsmarker', default=False,
569 b'experimental', b'evolution.bundle-obsmarker', default=False,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'experimental', b'log.topo', default=False,
572 b'experimental', b'log.topo', default=False,
573 )
573 )
574 coreconfigitem(
574 coreconfigitem(
575 b'experimental', b'evolution.report-instabilities', default=True,
575 b'experimental', b'evolution.report-instabilities', default=True,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'experimental', b'evolution.track-operation', default=True,
578 b'experimental', b'evolution.track-operation', default=True,
579 )
579 )
580 # repo-level config to exclude a revset visibility
580 # repo-level config to exclude a revset visibility
581 #
581 #
582 # The target use case is to use `share` to expose different subset of the same
582 # The target use case is to use `share` to expose different subset of the same
583 # repository, especially server side. See also `server.view`.
583 # repository, especially server side. See also `server.view`.
584 coreconfigitem(
584 coreconfigitem(
585 b'experimental', b'extra-filter-revs', default=None,
585 b'experimental', b'extra-filter-revs', default=None,
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'experimental', b'maxdeltachainspan', default=-1,
588 b'experimental', b'maxdeltachainspan', default=-1,
589 )
589 )
590 coreconfigitem(
590 coreconfigitem(
591 b'experimental', b'mergetempdirprefix', default=None,
591 b'experimental', b'mergetempdirprefix', default=None,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'experimental', b'mmapindexthreshold', default=None,
594 b'experimental', b'mmapindexthreshold', default=None,
595 )
595 )
596 coreconfigitem(
596 coreconfigitem(
597 b'experimental', b'narrow', default=False,
597 b'experimental', b'narrow', default=False,
598 )
598 )
599 coreconfigitem(
599 coreconfigitem(
600 b'experimental', b'nonnormalparanoidcheck', default=False,
600 b'experimental', b'nonnormalparanoidcheck', default=False,
601 )
601 )
602 coreconfigitem(
602 coreconfigitem(
603 b'experimental', b'exportableenviron', default=list,
603 b'experimental', b'exportableenviron', default=list,
604 )
604 )
605 coreconfigitem(
605 coreconfigitem(
606 b'experimental', b'extendedheader.index', default=None,
606 b'experimental', b'extendedheader.index', default=None,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'experimental', b'extendedheader.similarity', default=False,
609 b'experimental', b'extendedheader.similarity', default=False,
610 )
610 )
611 coreconfigitem(
611 coreconfigitem(
612 b'experimental', b'graphshorten', default=False,
612 b'experimental', b'graphshorten', default=False,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'experimental', b'graphstyle.parent', default=dynamicdefault,
615 b'experimental', b'graphstyle.parent', default=dynamicdefault,
616 )
616 )
617 coreconfigitem(
617 coreconfigitem(
618 b'experimental', b'graphstyle.missing', default=dynamicdefault,
618 b'experimental', b'graphstyle.missing', default=dynamicdefault,
619 )
619 )
620 coreconfigitem(
620 coreconfigitem(
621 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
621 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'experimental', b'hook-track-tags', default=False,
624 b'experimental', b'hook-track-tags', default=False,
625 )
625 )
626 coreconfigitem(
626 coreconfigitem(
627 b'experimental', b'httppeer.advertise-v2', default=False,
627 b'experimental', b'httppeer.advertise-v2', default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'experimental', b'httppeer.v2-encoder-order', default=None,
630 b'experimental', b'httppeer.v2-encoder-order', default=None,
631 )
631 )
632 coreconfigitem(
632 coreconfigitem(
633 b'experimental', b'httppostargs', default=False,
633 b'experimental', b'httppostargs', default=False,
634 )
634 )
635 coreconfigitem(
635 coreconfigitem(
636 b'experimental', b'mergedriver', default=None,
636 b'experimental', b'mergedriver', default=None,
637 )
637 )
638 coreconfigitem(b'experimental', b'nointerrupt', default=False)
638 coreconfigitem(b'experimental', b'nointerrupt', default=False)
639 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
639 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
640
640
641 coreconfigitem(
641 coreconfigitem(
642 b'experimental', b'obsmarkers-exchange-debug', default=False,
642 b'experimental', b'obsmarkers-exchange-debug', default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'experimental', b'remotenames', default=False,
645 b'experimental', b'remotenames', default=False,
646 )
646 )
647 coreconfigitem(
647 coreconfigitem(
648 b'experimental', b'removeemptydirs', default=True,
648 b'experimental', b'removeemptydirs', default=True,
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'experimental', b'revert.interactive.select-to-keep', default=False,
651 b'experimental', b'revert.interactive.select-to-keep', default=False,
652 )
652 )
653 coreconfigitem(
653 coreconfigitem(
654 b'experimental', b'revisions.prefixhexnode', default=False,
654 b'experimental', b'revisions.prefixhexnode', default=False,
655 )
655 )
656 coreconfigitem(
656 coreconfigitem(
657 b'experimental', b'revlogv2', default=None,
657 b'experimental', b'revlogv2', default=None,
658 )
658 )
659 coreconfigitem(
659 coreconfigitem(
660 b'experimental', b'revisions.disambiguatewithin', default=None,
660 b'experimental', b'revisions.disambiguatewithin', default=None,
661 )
661 )
662 coreconfigitem(
662 coreconfigitem(
663 b'experimental', b'rust.index', default=False,
663 b'experimental', b'rust.index', default=False,
664 )
664 )
665 coreconfigitem(
665 coreconfigitem(
666 b'experimental', b'exp-persistent-nodemap.mode', default=b'compat',
667 )
668 coreconfigitem(
669 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
666 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
670 )
667 )
671 coreconfigitem(
668 coreconfigitem(
672 b'experimental',
669 b'experimental',
673 b'server.manifestdata.recommended-batch-size',
670 b'server.manifestdata.recommended-batch-size',
674 default=100000,
671 default=100000,
675 )
672 )
676 coreconfigitem(
673 coreconfigitem(
677 b'experimental', b'server.stream-narrow-clones', default=False,
674 b'experimental', b'server.stream-narrow-clones', default=False,
678 )
675 )
679 coreconfigitem(
676 coreconfigitem(
680 b'experimental', b'single-head-per-branch', default=False,
677 b'experimental', b'single-head-per-branch', default=False,
681 )
678 )
682 coreconfigitem(
679 coreconfigitem(
683 b'experimental',
680 b'experimental',
684 b'single-head-per-branch:account-closed-heads',
681 b'single-head-per-branch:account-closed-heads',
685 default=False,
682 default=False,
686 )
683 )
687 coreconfigitem(
684 coreconfigitem(
688 b'experimental', b'sshserver.support-v2', default=False,
685 b'experimental', b'sshserver.support-v2', default=False,
689 )
686 )
690 coreconfigitem(
687 coreconfigitem(
691 b'experimental', b'sparse-read', default=False,
688 b'experimental', b'sparse-read', default=False,
692 )
689 )
693 coreconfigitem(
690 coreconfigitem(
694 b'experimental', b'sparse-read.density-threshold', default=0.50,
691 b'experimental', b'sparse-read.density-threshold', default=0.50,
695 )
692 )
696 coreconfigitem(
693 coreconfigitem(
697 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
694 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
698 )
695 )
699 coreconfigitem(
696 coreconfigitem(
700 b'experimental', b'treemanifest', default=False,
697 b'experimental', b'treemanifest', default=False,
701 )
698 )
702 coreconfigitem(
699 coreconfigitem(
703 b'experimental', b'update.atomic-file', default=False,
700 b'experimental', b'update.atomic-file', default=False,
704 )
701 )
705 coreconfigitem(
702 coreconfigitem(
706 b'experimental', b'sshpeer.advertise-v2', default=False,
703 b'experimental', b'sshpeer.advertise-v2', default=False,
707 )
704 )
708 coreconfigitem(
705 coreconfigitem(
709 b'experimental', b'web.apiserver', default=False,
706 b'experimental', b'web.apiserver', default=False,
710 )
707 )
711 coreconfigitem(
708 coreconfigitem(
712 b'experimental', b'web.api.http-v2', default=False,
709 b'experimental', b'web.api.http-v2', default=False,
713 )
710 )
714 coreconfigitem(
711 coreconfigitem(
715 b'experimental', b'web.api.debugreflect', default=False,
712 b'experimental', b'web.api.debugreflect', default=False,
716 )
713 )
717 coreconfigitem(
714 coreconfigitem(
718 b'experimental', b'worker.wdir-get-thread-safe', default=False,
715 b'experimental', b'worker.wdir-get-thread-safe', default=False,
719 )
716 )
720 coreconfigitem(
717 coreconfigitem(
721 b'experimental', b'worker.repository-upgrade', default=False,
718 b'experimental', b'worker.repository-upgrade', default=False,
722 )
719 )
723 coreconfigitem(
720 coreconfigitem(
724 b'experimental', b'xdiff', default=False,
721 b'experimental', b'xdiff', default=False,
725 )
722 )
726 coreconfigitem(
723 coreconfigitem(
727 b'extensions', b'.*', default=None, generic=True,
724 b'extensions', b'.*', default=None, generic=True,
728 )
725 )
729 coreconfigitem(
726 coreconfigitem(
730 b'extdata', b'.*', default=None, generic=True,
727 b'extdata', b'.*', default=None, generic=True,
731 )
728 )
732 coreconfigitem(
729 coreconfigitem(
733 b'format', b'bookmarks-in-store', default=False,
730 b'format', b'bookmarks-in-store', default=False,
734 )
731 )
735 coreconfigitem(
732 coreconfigitem(
736 b'format', b'chunkcachesize', default=None, experimental=True,
733 b'format', b'chunkcachesize', default=None, experimental=True,
737 )
734 )
738 coreconfigitem(
735 coreconfigitem(
739 b'format', b'dotencode', default=True,
736 b'format', b'dotencode', default=True,
740 )
737 )
741 coreconfigitem(
738 coreconfigitem(
742 b'format', b'generaldelta', default=False, experimental=True,
739 b'format', b'generaldelta', default=False, experimental=True,
743 )
740 )
744 coreconfigitem(
741 coreconfigitem(
745 b'format', b'manifestcachesize', default=None, experimental=True,
742 b'format', b'manifestcachesize', default=None, experimental=True,
746 )
743 )
747 coreconfigitem(
744 coreconfigitem(
748 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
745 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
749 )
746 )
750 coreconfigitem(
747 coreconfigitem(
751 b'format', b'obsstore-version', default=None,
748 b'format', b'obsstore-version', default=None,
752 )
749 )
753 coreconfigitem(
750 coreconfigitem(
754 b'format', b'sparse-revlog', default=True,
751 b'format', b'sparse-revlog', default=True,
755 )
752 )
756 coreconfigitem(
753 coreconfigitem(
757 b'format',
754 b'format',
758 b'revlog-compression',
755 b'revlog-compression',
759 default=lambda: [b'zlib'],
756 default=lambda: [b'zlib'],
760 alias=[(b'experimental', b'format.compression')],
757 alias=[(b'experimental', b'format.compression')],
761 )
758 )
762 coreconfigitem(
759 coreconfigitem(
763 b'format', b'usefncache', default=True,
760 b'format', b'usefncache', default=True,
764 )
761 )
765 coreconfigitem(
762 coreconfigitem(
766 b'format', b'usegeneraldelta', default=True,
763 b'format', b'usegeneraldelta', default=True,
767 )
764 )
768 coreconfigitem(
765 coreconfigitem(
769 b'format', b'usestore', default=True,
766 b'format', b'usestore', default=True,
770 )
767 )
771 # Right now, the only efficient implement of the nodemap logic is in Rust, so
768 # Right now, the only efficient implement of the nodemap logic is in Rust, so
772 # the persistent nodemap feature needs to stay experimental as long as the Rust
769 # the persistent nodemap feature needs to stay experimental as long as the Rust
773 # extensions are an experimental feature.
770 # extensions are an experimental feature.
774 coreconfigitem(
771 coreconfigitem(
775 b'format', b'use-persistent-nodemap', default=False, experimental=True
772 b'format', b'use-persistent-nodemap', default=False, experimental=True
776 )
773 )
777 coreconfigitem(
774 coreconfigitem(
778 b'format',
775 b'format',
779 b'exp-use-copies-side-data-changeset',
776 b'exp-use-copies-side-data-changeset',
780 default=False,
777 default=False,
781 experimental=True,
778 experimental=True,
782 )
779 )
783 coreconfigitem(
780 coreconfigitem(
784 b'format', b'exp-use-side-data', default=False, experimental=True,
781 b'format', b'exp-use-side-data', default=False, experimental=True,
785 )
782 )
786 coreconfigitem(
783 coreconfigitem(
787 b'format', b'internal-phase', default=False, experimental=True,
784 b'format', b'internal-phase', default=False, experimental=True,
788 )
785 )
789 coreconfigitem(
786 coreconfigitem(
790 b'fsmonitor', b'warn_when_unused', default=True,
787 b'fsmonitor', b'warn_when_unused', default=True,
791 )
788 )
792 coreconfigitem(
789 coreconfigitem(
793 b'fsmonitor', b'warn_update_file_count', default=50000,
790 b'fsmonitor', b'warn_update_file_count', default=50000,
794 )
791 )
795 coreconfigitem(
792 coreconfigitem(
796 b'help', br'hidden-command\..*', default=False, generic=True,
793 b'help', br'hidden-command\..*', default=False, generic=True,
797 )
794 )
798 coreconfigitem(
795 coreconfigitem(
799 b'help', br'hidden-topic\..*', default=False, generic=True,
796 b'help', br'hidden-topic\..*', default=False, generic=True,
800 )
797 )
801 coreconfigitem(
798 coreconfigitem(
802 b'hooks', b'.*', default=dynamicdefault, generic=True,
799 b'hooks', b'.*', default=dynamicdefault, generic=True,
803 )
800 )
804 coreconfigitem(
801 coreconfigitem(
805 b'hgweb-paths', b'.*', default=list, generic=True,
802 b'hgweb-paths', b'.*', default=list, generic=True,
806 )
803 )
807 coreconfigitem(
804 coreconfigitem(
808 b'hostfingerprints', b'.*', default=list, generic=True,
805 b'hostfingerprints', b'.*', default=list, generic=True,
809 )
806 )
810 coreconfigitem(
807 coreconfigitem(
811 b'hostsecurity', b'ciphers', default=None,
808 b'hostsecurity', b'ciphers', default=None,
812 )
809 )
813 coreconfigitem(
810 coreconfigitem(
814 b'hostsecurity', b'disabletls10warning', default=False,
811 b'hostsecurity', b'disabletls10warning', default=False,
815 )
812 )
816 coreconfigitem(
813 coreconfigitem(
817 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
814 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
818 )
815 )
819 coreconfigitem(
816 coreconfigitem(
820 b'hostsecurity',
817 b'hostsecurity',
821 b'.*:minimumprotocol$',
818 b'.*:minimumprotocol$',
822 default=dynamicdefault,
819 default=dynamicdefault,
823 generic=True,
820 generic=True,
824 )
821 )
825 coreconfigitem(
822 coreconfigitem(
826 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
823 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
827 )
824 )
828 coreconfigitem(
825 coreconfigitem(
829 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
826 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
830 )
827 )
831 coreconfigitem(
828 coreconfigitem(
832 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
829 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
833 )
830 )
834
831
835 coreconfigitem(
832 coreconfigitem(
836 b'http_proxy', b'always', default=False,
833 b'http_proxy', b'always', default=False,
837 )
834 )
838 coreconfigitem(
835 coreconfigitem(
839 b'http_proxy', b'host', default=None,
836 b'http_proxy', b'host', default=None,
840 )
837 )
841 coreconfigitem(
838 coreconfigitem(
842 b'http_proxy', b'no', default=list,
839 b'http_proxy', b'no', default=list,
843 )
840 )
844 coreconfigitem(
841 coreconfigitem(
845 b'http_proxy', b'passwd', default=None,
842 b'http_proxy', b'passwd', default=None,
846 )
843 )
847 coreconfigitem(
844 coreconfigitem(
848 b'http_proxy', b'user', default=None,
845 b'http_proxy', b'user', default=None,
849 )
846 )
850
847
851 coreconfigitem(
848 coreconfigitem(
852 b'http', b'timeout', default=None,
849 b'http', b'timeout', default=None,
853 )
850 )
854
851
855 coreconfigitem(
852 coreconfigitem(
856 b'logtoprocess', b'commandexception', default=None,
853 b'logtoprocess', b'commandexception', default=None,
857 )
854 )
858 coreconfigitem(
855 coreconfigitem(
859 b'logtoprocess', b'commandfinish', default=None,
856 b'logtoprocess', b'commandfinish', default=None,
860 )
857 )
861 coreconfigitem(
858 coreconfigitem(
862 b'logtoprocess', b'command', default=None,
859 b'logtoprocess', b'command', default=None,
863 )
860 )
864 coreconfigitem(
861 coreconfigitem(
865 b'logtoprocess', b'develwarn', default=None,
862 b'logtoprocess', b'develwarn', default=None,
866 )
863 )
867 coreconfigitem(
864 coreconfigitem(
868 b'logtoprocess', b'uiblocked', default=None,
865 b'logtoprocess', b'uiblocked', default=None,
869 )
866 )
870 coreconfigitem(
867 coreconfigitem(
871 b'merge', b'checkunknown', default=b'abort',
868 b'merge', b'checkunknown', default=b'abort',
872 )
869 )
873 coreconfigitem(
870 coreconfigitem(
874 b'merge', b'checkignored', default=b'abort',
871 b'merge', b'checkignored', default=b'abort',
875 )
872 )
876 coreconfigitem(
873 coreconfigitem(
877 b'experimental', b'merge.checkpathconflicts', default=False,
874 b'experimental', b'merge.checkpathconflicts', default=False,
878 )
875 )
879 coreconfigitem(
876 coreconfigitem(
880 b'merge', b'followcopies', default=True,
877 b'merge', b'followcopies', default=True,
881 )
878 )
882 coreconfigitem(
879 coreconfigitem(
883 b'merge', b'on-failure', default=b'continue',
880 b'merge', b'on-failure', default=b'continue',
884 )
881 )
885 coreconfigitem(
882 coreconfigitem(
886 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
883 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
887 )
884 )
888 coreconfigitem(
885 coreconfigitem(
889 b'merge', b'strict-capability-check', default=False,
886 b'merge', b'strict-capability-check', default=False,
890 )
887 )
891 coreconfigitem(
888 coreconfigitem(
892 b'merge-tools', b'.*', default=None, generic=True,
889 b'merge-tools', b'.*', default=None, generic=True,
893 )
890 )
894 coreconfigitem(
891 coreconfigitem(
895 b'merge-tools',
892 b'merge-tools',
896 br'.*\.args$',
893 br'.*\.args$',
897 default=b"$local $base $other",
894 default=b"$local $base $other",
898 generic=True,
895 generic=True,
899 priority=-1,
896 priority=-1,
900 )
897 )
901 coreconfigitem(
898 coreconfigitem(
902 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
899 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
903 )
900 )
904 coreconfigitem(
901 coreconfigitem(
905 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
902 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
906 )
903 )
907 coreconfigitem(
904 coreconfigitem(
908 b'merge-tools',
905 b'merge-tools',
909 br'.*\.checkchanged$',
906 br'.*\.checkchanged$',
910 default=False,
907 default=False,
911 generic=True,
908 generic=True,
912 priority=-1,
909 priority=-1,
913 )
910 )
914 coreconfigitem(
911 coreconfigitem(
915 b'merge-tools',
912 b'merge-tools',
916 br'.*\.executable$',
913 br'.*\.executable$',
917 default=dynamicdefault,
914 default=dynamicdefault,
918 generic=True,
915 generic=True,
919 priority=-1,
916 priority=-1,
920 )
917 )
921 coreconfigitem(
918 coreconfigitem(
922 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
919 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
923 )
920 )
924 coreconfigitem(
921 coreconfigitem(
925 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
922 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
926 )
923 )
927 coreconfigitem(
924 coreconfigitem(
928 b'merge-tools',
925 b'merge-tools',
929 br'.*\.mergemarkers$',
926 br'.*\.mergemarkers$',
930 default=b'basic',
927 default=b'basic',
931 generic=True,
928 generic=True,
932 priority=-1,
929 priority=-1,
933 )
930 )
934 coreconfigitem(
931 coreconfigitem(
935 b'merge-tools',
932 b'merge-tools',
936 br'.*\.mergemarkertemplate$',
933 br'.*\.mergemarkertemplate$',
937 default=dynamicdefault, # take from ui.mergemarkertemplate
934 default=dynamicdefault, # take from ui.mergemarkertemplate
938 generic=True,
935 generic=True,
939 priority=-1,
936 priority=-1,
940 )
937 )
941 coreconfigitem(
938 coreconfigitem(
942 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
939 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
943 )
940 )
944 coreconfigitem(
941 coreconfigitem(
945 b'merge-tools',
942 b'merge-tools',
946 br'.*\.premerge$',
943 br'.*\.premerge$',
947 default=dynamicdefault,
944 default=dynamicdefault,
948 generic=True,
945 generic=True,
949 priority=-1,
946 priority=-1,
950 )
947 )
951 coreconfigitem(
948 coreconfigitem(
952 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
949 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
953 )
950 )
954 coreconfigitem(
951 coreconfigitem(
955 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
952 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
956 )
953 )
957 coreconfigitem(
954 coreconfigitem(
958 b'pager', b'ignore', default=list,
955 b'pager', b'ignore', default=list,
959 )
956 )
960 coreconfigitem(
957 coreconfigitem(
961 b'pager', b'pager', default=dynamicdefault,
958 b'pager', b'pager', default=dynamicdefault,
962 )
959 )
963 coreconfigitem(
960 coreconfigitem(
964 b'patch', b'eol', default=b'strict',
961 b'patch', b'eol', default=b'strict',
965 )
962 )
966 coreconfigitem(
963 coreconfigitem(
967 b'patch', b'fuzz', default=2,
964 b'patch', b'fuzz', default=2,
968 )
965 )
969 coreconfigitem(
966 coreconfigitem(
970 b'paths', b'default', default=None,
967 b'paths', b'default', default=None,
971 )
968 )
972 coreconfigitem(
969 coreconfigitem(
973 b'paths', b'default-push', default=None,
970 b'paths', b'default-push', default=None,
974 )
971 )
975 coreconfigitem(
972 coreconfigitem(
976 b'paths', b'.*', default=None, generic=True,
973 b'paths', b'.*', default=None, generic=True,
977 )
974 )
978 coreconfigitem(
975 coreconfigitem(
979 b'phases', b'checksubrepos', default=b'follow',
976 b'phases', b'checksubrepos', default=b'follow',
980 )
977 )
981 coreconfigitem(
978 coreconfigitem(
982 b'phases', b'new-commit', default=b'draft',
979 b'phases', b'new-commit', default=b'draft',
983 )
980 )
984 coreconfigitem(
981 coreconfigitem(
985 b'phases', b'publish', default=True,
982 b'phases', b'publish', default=True,
986 )
983 )
987 coreconfigitem(
984 coreconfigitem(
988 b'profiling', b'enabled', default=False,
985 b'profiling', b'enabled', default=False,
989 )
986 )
990 coreconfigitem(
987 coreconfigitem(
991 b'profiling', b'format', default=b'text',
988 b'profiling', b'format', default=b'text',
992 )
989 )
993 coreconfigitem(
990 coreconfigitem(
994 b'profiling', b'freq', default=1000,
991 b'profiling', b'freq', default=1000,
995 )
992 )
996 coreconfigitem(
993 coreconfigitem(
997 b'profiling', b'limit', default=30,
994 b'profiling', b'limit', default=30,
998 )
995 )
999 coreconfigitem(
996 coreconfigitem(
1000 b'profiling', b'nested', default=0,
997 b'profiling', b'nested', default=0,
1001 )
998 )
1002 coreconfigitem(
999 coreconfigitem(
1003 b'profiling', b'output', default=None,
1000 b'profiling', b'output', default=None,
1004 )
1001 )
1005 coreconfigitem(
1002 coreconfigitem(
1006 b'profiling', b'showmax', default=0.999,
1003 b'profiling', b'showmax', default=0.999,
1007 )
1004 )
1008 coreconfigitem(
1005 coreconfigitem(
1009 b'profiling', b'showmin', default=dynamicdefault,
1006 b'profiling', b'showmin', default=dynamicdefault,
1010 )
1007 )
1011 coreconfigitem(
1008 coreconfigitem(
1012 b'profiling', b'showtime', default=True,
1009 b'profiling', b'showtime', default=True,
1013 )
1010 )
1014 coreconfigitem(
1011 coreconfigitem(
1015 b'profiling', b'sort', default=b'inlinetime',
1012 b'profiling', b'sort', default=b'inlinetime',
1016 )
1013 )
1017 coreconfigitem(
1014 coreconfigitem(
1018 b'profiling', b'statformat', default=b'hotpath',
1015 b'profiling', b'statformat', default=b'hotpath',
1019 )
1016 )
1020 coreconfigitem(
1017 coreconfigitem(
1021 b'profiling', b'time-track', default=dynamicdefault,
1018 b'profiling', b'time-track', default=dynamicdefault,
1022 )
1019 )
1023 coreconfigitem(
1020 coreconfigitem(
1024 b'profiling', b'type', default=b'stat',
1021 b'profiling', b'type', default=b'stat',
1025 )
1022 )
1026 coreconfigitem(
1023 coreconfigitem(
1027 b'progress', b'assume-tty', default=False,
1024 b'progress', b'assume-tty', default=False,
1028 )
1025 )
1029 coreconfigitem(
1026 coreconfigitem(
1030 b'progress', b'changedelay', default=1,
1027 b'progress', b'changedelay', default=1,
1031 )
1028 )
1032 coreconfigitem(
1029 coreconfigitem(
1033 b'progress', b'clear-complete', default=True,
1030 b'progress', b'clear-complete', default=True,
1034 )
1031 )
1035 coreconfigitem(
1032 coreconfigitem(
1036 b'progress', b'debug', default=False,
1033 b'progress', b'debug', default=False,
1037 )
1034 )
1038 coreconfigitem(
1035 coreconfigitem(
1039 b'progress', b'delay', default=3,
1036 b'progress', b'delay', default=3,
1040 )
1037 )
1041 coreconfigitem(
1038 coreconfigitem(
1042 b'progress', b'disable', default=False,
1039 b'progress', b'disable', default=False,
1043 )
1040 )
1044 coreconfigitem(
1041 coreconfigitem(
1045 b'progress', b'estimateinterval', default=60.0,
1042 b'progress', b'estimateinterval', default=60.0,
1046 )
1043 )
1047 coreconfigitem(
1044 coreconfigitem(
1048 b'progress',
1045 b'progress',
1049 b'format',
1046 b'format',
1050 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1047 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1051 )
1048 )
1052 coreconfigitem(
1049 coreconfigitem(
1053 b'progress', b'refresh', default=0.1,
1050 b'progress', b'refresh', default=0.1,
1054 )
1051 )
1055 coreconfigitem(
1052 coreconfigitem(
1056 b'progress', b'width', default=dynamicdefault,
1053 b'progress', b'width', default=dynamicdefault,
1057 )
1054 )
1058 coreconfigitem(
1055 coreconfigitem(
1059 b'pull', b'confirm', default=False,
1056 b'pull', b'confirm', default=False,
1060 )
1057 )
1061 coreconfigitem(
1058 coreconfigitem(
1062 b'push', b'pushvars.server', default=False,
1059 b'push', b'pushvars.server', default=False,
1063 )
1060 )
1064 coreconfigitem(
1061 coreconfigitem(
1065 b'rewrite',
1062 b'rewrite',
1066 b'backup-bundle',
1063 b'backup-bundle',
1067 default=True,
1064 default=True,
1068 alias=[(b'ui', b'history-editing-backup')],
1065 alias=[(b'ui', b'history-editing-backup')],
1069 )
1066 )
1070 coreconfigitem(
1067 coreconfigitem(
1071 b'rewrite', b'update-timestamp', default=False,
1068 b'rewrite', b'update-timestamp', default=False,
1072 )
1069 )
1073 coreconfigitem(
1070 coreconfigitem(
1074 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1071 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1075 )
1072 )
1076 coreconfigitem(
1073 coreconfigitem(
1077 b'storage',
1074 b'storage',
1078 b'revlog.optimize-delta-parent-choice',
1075 b'revlog.optimize-delta-parent-choice',
1079 default=True,
1076 default=True,
1080 alias=[(b'format', b'aggressivemergedeltas')],
1077 alias=[(b'format', b'aggressivemergedeltas')],
1081 )
1078 )
1082 # experimental as long as rust is experimental (or a C version is implemented)
1079 # experimental as long as rust is experimental (or a C version is implemented)
1083 coreconfigitem(
1080 coreconfigitem(
1084 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1081 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1085 )
1082 )
1083 # experimental as long as format.use-persistent-nodemap is.
1084 coreconfigitem(
1085 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1086 )
1086 coreconfigitem(
1087 coreconfigitem(
1087 b'storage', b'revlog.reuse-external-delta', default=True,
1088 b'storage', b'revlog.reuse-external-delta', default=True,
1088 )
1089 )
1089 coreconfigitem(
1090 coreconfigitem(
1090 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1091 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1091 )
1092 )
1092 coreconfigitem(
1093 coreconfigitem(
1093 b'storage', b'revlog.zlib.level', default=None,
1094 b'storage', b'revlog.zlib.level', default=None,
1094 )
1095 )
1095 coreconfigitem(
1096 coreconfigitem(
1096 b'storage', b'revlog.zstd.level', default=None,
1097 b'storage', b'revlog.zstd.level', default=None,
1097 )
1098 )
1098 coreconfigitem(
1099 coreconfigitem(
1099 b'server', b'bookmarks-pushkey-compat', default=True,
1100 b'server', b'bookmarks-pushkey-compat', default=True,
1100 )
1101 )
1101 coreconfigitem(
1102 coreconfigitem(
1102 b'server', b'bundle1', default=True,
1103 b'server', b'bundle1', default=True,
1103 )
1104 )
1104 coreconfigitem(
1105 coreconfigitem(
1105 b'server', b'bundle1gd', default=None,
1106 b'server', b'bundle1gd', default=None,
1106 )
1107 )
1107 coreconfigitem(
1108 coreconfigitem(
1108 b'server', b'bundle1.pull', default=None,
1109 b'server', b'bundle1.pull', default=None,
1109 )
1110 )
1110 coreconfigitem(
1111 coreconfigitem(
1111 b'server', b'bundle1gd.pull', default=None,
1112 b'server', b'bundle1gd.pull', default=None,
1112 )
1113 )
1113 coreconfigitem(
1114 coreconfigitem(
1114 b'server', b'bundle1.push', default=None,
1115 b'server', b'bundle1.push', default=None,
1115 )
1116 )
1116 coreconfigitem(
1117 coreconfigitem(
1117 b'server', b'bundle1gd.push', default=None,
1118 b'server', b'bundle1gd.push', default=None,
1118 )
1119 )
1119 coreconfigitem(
1120 coreconfigitem(
1120 b'server',
1121 b'server',
1121 b'bundle2.stream',
1122 b'bundle2.stream',
1122 default=True,
1123 default=True,
1123 alias=[(b'experimental', b'bundle2.stream')],
1124 alias=[(b'experimental', b'bundle2.stream')],
1124 )
1125 )
1125 coreconfigitem(
1126 coreconfigitem(
1126 b'server', b'compressionengines', default=list,
1127 b'server', b'compressionengines', default=list,
1127 )
1128 )
1128 coreconfigitem(
1129 coreconfigitem(
1129 b'server', b'concurrent-push-mode', default=b'check-related',
1130 b'server', b'concurrent-push-mode', default=b'check-related',
1130 )
1131 )
1131 coreconfigitem(
1132 coreconfigitem(
1132 b'server', b'disablefullbundle', default=False,
1133 b'server', b'disablefullbundle', default=False,
1133 )
1134 )
1134 coreconfigitem(
1135 coreconfigitem(
1135 b'server', b'maxhttpheaderlen', default=1024,
1136 b'server', b'maxhttpheaderlen', default=1024,
1136 )
1137 )
1137 coreconfigitem(
1138 coreconfigitem(
1138 b'server', b'pullbundle', default=False,
1139 b'server', b'pullbundle', default=False,
1139 )
1140 )
1140 coreconfigitem(
1141 coreconfigitem(
1141 b'server', b'preferuncompressed', default=False,
1142 b'server', b'preferuncompressed', default=False,
1142 )
1143 )
1143 coreconfigitem(
1144 coreconfigitem(
1144 b'server', b'streamunbundle', default=False,
1145 b'server', b'streamunbundle', default=False,
1145 )
1146 )
1146 coreconfigitem(
1147 coreconfigitem(
1147 b'server', b'uncompressed', default=True,
1148 b'server', b'uncompressed', default=True,
1148 )
1149 )
1149 coreconfigitem(
1150 coreconfigitem(
1150 b'server', b'uncompressedallowsecret', default=False,
1151 b'server', b'uncompressedallowsecret', default=False,
1151 )
1152 )
1152 coreconfigitem(
1153 coreconfigitem(
1153 b'server', b'view', default=b'served',
1154 b'server', b'view', default=b'served',
1154 )
1155 )
1155 coreconfigitem(
1156 coreconfigitem(
1156 b'server', b'validate', default=False,
1157 b'server', b'validate', default=False,
1157 )
1158 )
1158 coreconfigitem(
1159 coreconfigitem(
1159 b'server', b'zliblevel', default=-1,
1160 b'server', b'zliblevel', default=-1,
1160 )
1161 )
1161 coreconfigitem(
1162 coreconfigitem(
1162 b'server', b'zstdlevel', default=3,
1163 b'server', b'zstdlevel', default=3,
1163 )
1164 )
1164 coreconfigitem(
1165 coreconfigitem(
1165 b'share', b'pool', default=None,
1166 b'share', b'pool', default=None,
1166 )
1167 )
1167 coreconfigitem(
1168 coreconfigitem(
1168 b'share', b'poolnaming', default=b'identity',
1169 b'share', b'poolnaming', default=b'identity',
1169 )
1170 )
1170 coreconfigitem(
1171 coreconfigitem(
1171 b'shelve', b'maxbackups', default=10,
1172 b'shelve', b'maxbackups', default=10,
1172 )
1173 )
1173 coreconfigitem(
1174 coreconfigitem(
1174 b'smtp', b'host', default=None,
1175 b'smtp', b'host', default=None,
1175 )
1176 )
1176 coreconfigitem(
1177 coreconfigitem(
1177 b'smtp', b'local_hostname', default=None,
1178 b'smtp', b'local_hostname', default=None,
1178 )
1179 )
1179 coreconfigitem(
1180 coreconfigitem(
1180 b'smtp', b'password', default=None,
1181 b'smtp', b'password', default=None,
1181 )
1182 )
1182 coreconfigitem(
1183 coreconfigitem(
1183 b'smtp', b'port', default=dynamicdefault,
1184 b'smtp', b'port', default=dynamicdefault,
1184 )
1185 )
1185 coreconfigitem(
1186 coreconfigitem(
1186 b'smtp', b'tls', default=b'none',
1187 b'smtp', b'tls', default=b'none',
1187 )
1188 )
1188 coreconfigitem(
1189 coreconfigitem(
1189 b'smtp', b'username', default=None,
1190 b'smtp', b'username', default=None,
1190 )
1191 )
1191 coreconfigitem(
1192 coreconfigitem(
1192 b'sparse', b'missingwarning', default=True, experimental=True,
1193 b'sparse', b'missingwarning', default=True, experimental=True,
1193 )
1194 )
1194 coreconfigitem(
1195 coreconfigitem(
1195 b'subrepos',
1196 b'subrepos',
1196 b'allowed',
1197 b'allowed',
1197 default=dynamicdefault, # to make backporting simpler
1198 default=dynamicdefault, # to make backporting simpler
1198 )
1199 )
1199 coreconfigitem(
1200 coreconfigitem(
1200 b'subrepos', b'hg:allowed', default=dynamicdefault,
1201 b'subrepos', b'hg:allowed', default=dynamicdefault,
1201 )
1202 )
1202 coreconfigitem(
1203 coreconfigitem(
1203 b'subrepos', b'git:allowed', default=dynamicdefault,
1204 b'subrepos', b'git:allowed', default=dynamicdefault,
1204 )
1205 )
1205 coreconfigitem(
1206 coreconfigitem(
1206 b'subrepos', b'svn:allowed', default=dynamicdefault,
1207 b'subrepos', b'svn:allowed', default=dynamicdefault,
1207 )
1208 )
1208 coreconfigitem(
1209 coreconfigitem(
1209 b'templates', b'.*', default=None, generic=True,
1210 b'templates', b'.*', default=None, generic=True,
1210 )
1211 )
1211 coreconfigitem(
1212 coreconfigitem(
1212 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1213 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1213 )
1214 )
1214 coreconfigitem(
1215 coreconfigitem(
1215 b'trusted', b'groups', default=list,
1216 b'trusted', b'groups', default=list,
1216 )
1217 )
1217 coreconfigitem(
1218 coreconfigitem(
1218 b'trusted', b'users', default=list,
1219 b'trusted', b'users', default=list,
1219 )
1220 )
1220 coreconfigitem(
1221 coreconfigitem(
1221 b'ui', b'_usedassubrepo', default=False,
1222 b'ui', b'_usedassubrepo', default=False,
1222 )
1223 )
1223 coreconfigitem(
1224 coreconfigitem(
1224 b'ui', b'allowemptycommit', default=False,
1225 b'ui', b'allowemptycommit', default=False,
1225 )
1226 )
1226 coreconfigitem(
1227 coreconfigitem(
1227 b'ui', b'archivemeta', default=True,
1228 b'ui', b'archivemeta', default=True,
1228 )
1229 )
1229 coreconfigitem(
1230 coreconfigitem(
1230 b'ui', b'askusername', default=False,
1231 b'ui', b'askusername', default=False,
1231 )
1232 )
1232 coreconfigitem(
1233 coreconfigitem(
1233 b'ui', b'clonebundlefallback', default=False,
1234 b'ui', b'clonebundlefallback', default=False,
1234 )
1235 )
1235 coreconfigitem(
1236 coreconfigitem(
1236 b'ui', b'clonebundleprefers', default=list,
1237 b'ui', b'clonebundleprefers', default=list,
1237 )
1238 )
1238 coreconfigitem(
1239 coreconfigitem(
1239 b'ui', b'clonebundles', default=True,
1240 b'ui', b'clonebundles', default=True,
1240 )
1241 )
1241 coreconfigitem(
1242 coreconfigitem(
1242 b'ui', b'color', default=b'auto',
1243 b'ui', b'color', default=b'auto',
1243 )
1244 )
1244 coreconfigitem(
1245 coreconfigitem(
1245 b'ui', b'commitsubrepos', default=False,
1246 b'ui', b'commitsubrepos', default=False,
1246 )
1247 )
1247 coreconfigitem(
1248 coreconfigitem(
1248 b'ui', b'debug', default=False,
1249 b'ui', b'debug', default=False,
1249 )
1250 )
1250 coreconfigitem(
1251 coreconfigitem(
1251 b'ui', b'debugger', default=None,
1252 b'ui', b'debugger', default=None,
1252 )
1253 )
1253 coreconfigitem(
1254 coreconfigitem(
1254 b'ui', b'editor', default=dynamicdefault,
1255 b'ui', b'editor', default=dynamicdefault,
1255 )
1256 )
1256 coreconfigitem(
1257 coreconfigitem(
1257 b'ui', b'fallbackencoding', default=None,
1258 b'ui', b'fallbackencoding', default=None,
1258 )
1259 )
1259 coreconfigitem(
1260 coreconfigitem(
1260 b'ui', b'forcecwd', default=None,
1261 b'ui', b'forcecwd', default=None,
1261 )
1262 )
1262 coreconfigitem(
1263 coreconfigitem(
1263 b'ui', b'forcemerge', default=None,
1264 b'ui', b'forcemerge', default=None,
1264 )
1265 )
1265 coreconfigitem(
1266 coreconfigitem(
1266 b'ui', b'formatdebug', default=False,
1267 b'ui', b'formatdebug', default=False,
1267 )
1268 )
1268 coreconfigitem(
1269 coreconfigitem(
1269 b'ui', b'formatjson', default=False,
1270 b'ui', b'formatjson', default=False,
1270 )
1271 )
1271 coreconfigitem(
1272 coreconfigitem(
1272 b'ui', b'formatted', default=None,
1273 b'ui', b'formatted', default=None,
1273 )
1274 )
1274 coreconfigitem(
1275 coreconfigitem(
1275 b'ui', b'graphnodetemplate', default=None,
1276 b'ui', b'graphnodetemplate', default=None,
1276 )
1277 )
1277 coreconfigitem(
1278 coreconfigitem(
1278 b'ui', b'interactive', default=None,
1279 b'ui', b'interactive', default=None,
1279 )
1280 )
1280 coreconfigitem(
1281 coreconfigitem(
1281 b'ui', b'interface', default=None,
1282 b'ui', b'interface', default=None,
1282 )
1283 )
1283 coreconfigitem(
1284 coreconfigitem(
1284 b'ui', b'interface.chunkselector', default=None,
1285 b'ui', b'interface.chunkselector', default=None,
1285 )
1286 )
1286 coreconfigitem(
1287 coreconfigitem(
1287 b'ui', b'large-file-limit', default=10000000,
1288 b'ui', b'large-file-limit', default=10000000,
1288 )
1289 )
1289 coreconfigitem(
1290 coreconfigitem(
1290 b'ui', b'logblockedtimes', default=False,
1291 b'ui', b'logblockedtimes', default=False,
1291 )
1292 )
1292 coreconfigitem(
1293 coreconfigitem(
1293 b'ui', b'logtemplate', default=None,
1294 b'ui', b'logtemplate', default=None,
1294 )
1295 )
1295 coreconfigitem(
1296 coreconfigitem(
1296 b'ui', b'merge', default=None,
1297 b'ui', b'merge', default=None,
1297 )
1298 )
1298 coreconfigitem(
1299 coreconfigitem(
1299 b'ui', b'mergemarkers', default=b'basic',
1300 b'ui', b'mergemarkers', default=b'basic',
1300 )
1301 )
1301 coreconfigitem(
1302 coreconfigitem(
1302 b'ui',
1303 b'ui',
1303 b'mergemarkertemplate',
1304 b'mergemarkertemplate',
1304 default=(
1305 default=(
1305 b'{node|short} '
1306 b'{node|short} '
1306 b'{ifeq(tags, "tip", "", '
1307 b'{ifeq(tags, "tip", "", '
1307 b'ifeq(tags, "", "", "{tags} "))}'
1308 b'ifeq(tags, "", "", "{tags} "))}'
1308 b'{if(bookmarks, "{bookmarks} ")}'
1309 b'{if(bookmarks, "{bookmarks} ")}'
1309 b'{ifeq(branch, "default", "", "{branch} ")}'
1310 b'{ifeq(branch, "default", "", "{branch} ")}'
1310 b'- {author|user}: {desc|firstline}'
1311 b'- {author|user}: {desc|firstline}'
1311 ),
1312 ),
1312 )
1313 )
1313 coreconfigitem(
1314 coreconfigitem(
1314 b'ui', b'message-output', default=b'stdio',
1315 b'ui', b'message-output', default=b'stdio',
1315 )
1316 )
1316 coreconfigitem(
1317 coreconfigitem(
1317 b'ui', b'nontty', default=False,
1318 b'ui', b'nontty', default=False,
1318 )
1319 )
1319 coreconfigitem(
1320 coreconfigitem(
1320 b'ui', b'origbackuppath', default=None,
1321 b'ui', b'origbackuppath', default=None,
1321 )
1322 )
1322 coreconfigitem(
1323 coreconfigitem(
1323 b'ui', b'paginate', default=True,
1324 b'ui', b'paginate', default=True,
1324 )
1325 )
1325 coreconfigitem(
1326 coreconfigitem(
1326 b'ui', b'patch', default=None,
1327 b'ui', b'patch', default=None,
1327 )
1328 )
1328 coreconfigitem(
1329 coreconfigitem(
1329 b'ui', b'pre-merge-tool-output-template', default=None,
1330 b'ui', b'pre-merge-tool-output-template', default=None,
1330 )
1331 )
1331 coreconfigitem(
1332 coreconfigitem(
1332 b'ui', b'portablefilenames', default=b'warn',
1333 b'ui', b'portablefilenames', default=b'warn',
1333 )
1334 )
1334 coreconfigitem(
1335 coreconfigitem(
1335 b'ui', b'promptecho', default=False,
1336 b'ui', b'promptecho', default=False,
1336 )
1337 )
1337 coreconfigitem(
1338 coreconfigitem(
1338 b'ui', b'quiet', default=False,
1339 b'ui', b'quiet', default=False,
1339 )
1340 )
1340 coreconfigitem(
1341 coreconfigitem(
1341 b'ui', b'quietbookmarkmove', default=False,
1342 b'ui', b'quietbookmarkmove', default=False,
1342 )
1343 )
1343 coreconfigitem(
1344 coreconfigitem(
1344 b'ui', b'relative-paths', default=b'legacy',
1345 b'ui', b'relative-paths', default=b'legacy',
1345 )
1346 )
1346 coreconfigitem(
1347 coreconfigitem(
1347 b'ui', b'remotecmd', default=b'hg',
1348 b'ui', b'remotecmd', default=b'hg',
1348 )
1349 )
1349 coreconfigitem(
1350 coreconfigitem(
1350 b'ui', b'report_untrusted', default=True,
1351 b'ui', b'report_untrusted', default=True,
1351 )
1352 )
1352 coreconfigitem(
1353 coreconfigitem(
1353 b'ui', b'rollback', default=True,
1354 b'ui', b'rollback', default=True,
1354 )
1355 )
1355 coreconfigitem(
1356 coreconfigitem(
1356 b'ui', b'signal-safe-lock', default=True,
1357 b'ui', b'signal-safe-lock', default=True,
1357 )
1358 )
1358 coreconfigitem(
1359 coreconfigitem(
1359 b'ui', b'slash', default=False,
1360 b'ui', b'slash', default=False,
1360 )
1361 )
1361 coreconfigitem(
1362 coreconfigitem(
1362 b'ui', b'ssh', default=b'ssh',
1363 b'ui', b'ssh', default=b'ssh',
1363 )
1364 )
1364 coreconfigitem(
1365 coreconfigitem(
1365 b'ui', b'ssherrorhint', default=None,
1366 b'ui', b'ssherrorhint', default=None,
1366 )
1367 )
1367 coreconfigitem(
1368 coreconfigitem(
1368 b'ui', b'statuscopies', default=False,
1369 b'ui', b'statuscopies', default=False,
1369 )
1370 )
1370 coreconfigitem(
1371 coreconfigitem(
1371 b'ui', b'strict', default=False,
1372 b'ui', b'strict', default=False,
1372 )
1373 )
1373 coreconfigitem(
1374 coreconfigitem(
1374 b'ui', b'style', default=b'',
1375 b'ui', b'style', default=b'',
1375 )
1376 )
1376 coreconfigitem(
1377 coreconfigitem(
1377 b'ui', b'supportcontact', default=None,
1378 b'ui', b'supportcontact', default=None,
1378 )
1379 )
1379 coreconfigitem(
1380 coreconfigitem(
1380 b'ui', b'textwidth', default=78,
1381 b'ui', b'textwidth', default=78,
1381 )
1382 )
1382 coreconfigitem(
1383 coreconfigitem(
1383 b'ui', b'timeout', default=b'600',
1384 b'ui', b'timeout', default=b'600',
1384 )
1385 )
1385 coreconfigitem(
1386 coreconfigitem(
1386 b'ui', b'timeout.warn', default=0,
1387 b'ui', b'timeout.warn', default=0,
1387 )
1388 )
1388 coreconfigitem(
1389 coreconfigitem(
1389 b'ui', b'traceback', default=False,
1390 b'ui', b'traceback', default=False,
1390 )
1391 )
1391 coreconfigitem(
1392 coreconfigitem(
1392 b'ui', b'tweakdefaults', default=False,
1393 b'ui', b'tweakdefaults', default=False,
1393 )
1394 )
1394 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1395 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1395 coreconfigitem(
1396 coreconfigitem(
1396 b'ui', b'verbose', default=False,
1397 b'ui', b'verbose', default=False,
1397 )
1398 )
1398 coreconfigitem(
1399 coreconfigitem(
1399 b'verify', b'skipflags', default=None,
1400 b'verify', b'skipflags', default=None,
1400 )
1401 )
1401 coreconfigitem(
1402 coreconfigitem(
1402 b'web', b'allowbz2', default=False,
1403 b'web', b'allowbz2', default=False,
1403 )
1404 )
1404 coreconfigitem(
1405 coreconfigitem(
1405 b'web', b'allowgz', default=False,
1406 b'web', b'allowgz', default=False,
1406 )
1407 )
1407 coreconfigitem(
1408 coreconfigitem(
1408 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1409 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1409 )
1410 )
1410 coreconfigitem(
1411 coreconfigitem(
1411 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1412 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1412 )
1413 )
1413 coreconfigitem(
1414 coreconfigitem(
1414 b'web', b'allowzip', default=False,
1415 b'web', b'allowzip', default=False,
1415 )
1416 )
1416 coreconfigitem(
1417 coreconfigitem(
1417 b'web', b'archivesubrepos', default=False,
1418 b'web', b'archivesubrepos', default=False,
1418 )
1419 )
1419 coreconfigitem(
1420 coreconfigitem(
1420 b'web', b'cache', default=True,
1421 b'web', b'cache', default=True,
1421 )
1422 )
1422 coreconfigitem(
1423 coreconfigitem(
1423 b'web', b'comparisoncontext', default=5,
1424 b'web', b'comparisoncontext', default=5,
1424 )
1425 )
1425 coreconfigitem(
1426 coreconfigitem(
1426 b'web', b'contact', default=None,
1427 b'web', b'contact', default=None,
1427 )
1428 )
1428 coreconfigitem(
1429 coreconfigitem(
1429 b'web', b'deny_push', default=list,
1430 b'web', b'deny_push', default=list,
1430 )
1431 )
1431 coreconfigitem(
1432 coreconfigitem(
1432 b'web', b'guessmime', default=False,
1433 b'web', b'guessmime', default=False,
1433 )
1434 )
1434 coreconfigitem(
1435 coreconfigitem(
1435 b'web', b'hidden', default=False,
1436 b'web', b'hidden', default=False,
1436 )
1437 )
1437 coreconfigitem(
1438 coreconfigitem(
1438 b'web', b'labels', default=list,
1439 b'web', b'labels', default=list,
1439 )
1440 )
1440 coreconfigitem(
1441 coreconfigitem(
1441 b'web', b'logoimg', default=b'hglogo.png',
1442 b'web', b'logoimg', default=b'hglogo.png',
1442 )
1443 )
1443 coreconfigitem(
1444 coreconfigitem(
1444 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1445 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1445 )
1446 )
1446 coreconfigitem(
1447 coreconfigitem(
1447 b'web', b'accesslog', default=b'-',
1448 b'web', b'accesslog', default=b'-',
1448 )
1449 )
1449 coreconfigitem(
1450 coreconfigitem(
1450 b'web', b'address', default=b'',
1451 b'web', b'address', default=b'',
1451 )
1452 )
1452 coreconfigitem(
1453 coreconfigitem(
1453 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1454 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1454 )
1455 )
1455 coreconfigitem(
1456 coreconfigitem(
1456 b'web', b'allow_read', default=list,
1457 b'web', b'allow_read', default=list,
1457 )
1458 )
1458 coreconfigitem(
1459 coreconfigitem(
1459 b'web', b'baseurl', default=None,
1460 b'web', b'baseurl', default=None,
1460 )
1461 )
1461 coreconfigitem(
1462 coreconfigitem(
1462 b'web', b'cacerts', default=None,
1463 b'web', b'cacerts', default=None,
1463 )
1464 )
1464 coreconfigitem(
1465 coreconfigitem(
1465 b'web', b'certificate', default=None,
1466 b'web', b'certificate', default=None,
1466 )
1467 )
1467 coreconfigitem(
1468 coreconfigitem(
1468 b'web', b'collapse', default=False,
1469 b'web', b'collapse', default=False,
1469 )
1470 )
1470 coreconfigitem(
1471 coreconfigitem(
1471 b'web', b'csp', default=None,
1472 b'web', b'csp', default=None,
1472 )
1473 )
1473 coreconfigitem(
1474 coreconfigitem(
1474 b'web', b'deny_read', default=list,
1475 b'web', b'deny_read', default=list,
1475 )
1476 )
1476 coreconfigitem(
1477 coreconfigitem(
1477 b'web', b'descend', default=True,
1478 b'web', b'descend', default=True,
1478 )
1479 )
1479 coreconfigitem(
1480 coreconfigitem(
1480 b'web', b'description', default=b"",
1481 b'web', b'description', default=b"",
1481 )
1482 )
1482 coreconfigitem(
1483 coreconfigitem(
1483 b'web', b'encoding', default=lambda: encoding.encoding,
1484 b'web', b'encoding', default=lambda: encoding.encoding,
1484 )
1485 )
1485 coreconfigitem(
1486 coreconfigitem(
1486 b'web', b'errorlog', default=b'-',
1487 b'web', b'errorlog', default=b'-',
1487 )
1488 )
1488 coreconfigitem(
1489 coreconfigitem(
1489 b'web', b'ipv6', default=False,
1490 b'web', b'ipv6', default=False,
1490 )
1491 )
1491 coreconfigitem(
1492 coreconfigitem(
1492 b'web', b'maxchanges', default=10,
1493 b'web', b'maxchanges', default=10,
1493 )
1494 )
1494 coreconfigitem(
1495 coreconfigitem(
1495 b'web', b'maxfiles', default=10,
1496 b'web', b'maxfiles', default=10,
1496 )
1497 )
1497 coreconfigitem(
1498 coreconfigitem(
1498 b'web', b'maxshortchanges', default=60,
1499 b'web', b'maxshortchanges', default=60,
1499 )
1500 )
1500 coreconfigitem(
1501 coreconfigitem(
1501 b'web', b'motd', default=b'',
1502 b'web', b'motd', default=b'',
1502 )
1503 )
1503 coreconfigitem(
1504 coreconfigitem(
1504 b'web', b'name', default=dynamicdefault,
1505 b'web', b'name', default=dynamicdefault,
1505 )
1506 )
1506 coreconfigitem(
1507 coreconfigitem(
1507 b'web', b'port', default=8000,
1508 b'web', b'port', default=8000,
1508 )
1509 )
1509 coreconfigitem(
1510 coreconfigitem(
1510 b'web', b'prefix', default=b'',
1511 b'web', b'prefix', default=b'',
1511 )
1512 )
1512 coreconfigitem(
1513 coreconfigitem(
1513 b'web', b'push_ssl', default=True,
1514 b'web', b'push_ssl', default=True,
1514 )
1515 )
1515 coreconfigitem(
1516 coreconfigitem(
1516 b'web', b'refreshinterval', default=20,
1517 b'web', b'refreshinterval', default=20,
1517 )
1518 )
1518 coreconfigitem(
1519 coreconfigitem(
1519 b'web', b'server-header', default=None,
1520 b'web', b'server-header', default=None,
1520 )
1521 )
1521 coreconfigitem(
1522 coreconfigitem(
1522 b'web', b'static', default=None,
1523 b'web', b'static', default=None,
1523 )
1524 )
1524 coreconfigitem(
1525 coreconfigitem(
1525 b'web', b'staticurl', default=None,
1526 b'web', b'staticurl', default=None,
1526 )
1527 )
1527 coreconfigitem(
1528 coreconfigitem(
1528 b'web', b'stripes', default=1,
1529 b'web', b'stripes', default=1,
1529 )
1530 )
1530 coreconfigitem(
1531 coreconfigitem(
1531 b'web', b'style', default=b'paper',
1532 b'web', b'style', default=b'paper',
1532 )
1533 )
1533 coreconfigitem(
1534 coreconfigitem(
1534 b'web', b'templates', default=None,
1535 b'web', b'templates', default=None,
1535 )
1536 )
1536 coreconfigitem(
1537 coreconfigitem(
1537 b'web', b'view', default=b'served', experimental=True,
1538 b'web', b'view', default=b'served', experimental=True,
1538 )
1539 )
1539 coreconfigitem(
1540 coreconfigitem(
1540 b'worker', b'backgroundclose', default=dynamicdefault,
1541 b'worker', b'backgroundclose', default=dynamicdefault,
1541 )
1542 )
1542 # Windows defaults to a limit of 512 open files. A buffer of 128
1543 # Windows defaults to a limit of 512 open files. A buffer of 128
1543 # should give us enough headway.
1544 # should give us enough headway.
1544 coreconfigitem(
1545 coreconfigitem(
1545 b'worker', b'backgroundclosemaxqueue', default=384,
1546 b'worker', b'backgroundclosemaxqueue', default=384,
1546 )
1547 )
1547 coreconfigitem(
1548 coreconfigitem(
1548 b'worker', b'backgroundcloseminfilecount', default=2048,
1549 b'worker', b'backgroundcloseminfilecount', default=2048,
1549 )
1550 )
1550 coreconfigitem(
1551 coreconfigitem(
1551 b'worker', b'backgroundclosethreadcount', default=4,
1552 b'worker', b'backgroundclosethreadcount', default=4,
1552 )
1553 )
1553 coreconfigitem(
1554 coreconfigitem(
1554 b'worker', b'enabled', default=True,
1555 b'worker', b'enabled', default=True,
1555 )
1556 )
1556 coreconfigitem(
1557 coreconfigitem(
1557 b'worker', b'numcpus', default=None,
1558 b'worker', b'numcpus', default=None,
1558 )
1559 )
1559
1560
1560 # Rebase related configuration moved to core because other extension are doing
1561 # Rebase related configuration moved to core because other extension are doing
1561 # strange things. For example, shelve import the extensions to reuse some bit
1562 # strange things. For example, shelve import the extensions to reuse some bit
1562 # without formally loading it.
1563 # without formally loading it.
1563 coreconfigitem(
1564 coreconfigitem(
1564 b'commands', b'rebase.requiredest', default=False,
1565 b'commands', b'rebase.requiredest', default=False,
1565 )
1566 )
1566 coreconfigitem(
1567 coreconfigitem(
1567 b'experimental', b'rebaseskipobsolete', default=True,
1568 b'experimental', b'rebaseskipobsolete', default=True,
1568 )
1569 )
1569 coreconfigitem(
1570 coreconfigitem(
1570 b'rebase', b'singletransaction', default=False,
1571 b'rebase', b'singletransaction', default=False,
1571 )
1572 )
1572 coreconfigitem(
1573 coreconfigitem(
1573 b'rebase', b'experimental.inmemory', default=False,
1574 b'rebase', b'experimental.inmemory', default=False,
1574 )
1575 )
@@ -1,3828 +1,3828 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 rcutil,
56 rcutil,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 hashutil,
77 hashutil,
78 procutil,
78 procutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 from .revlogutils import constants as revlogconst
82 from .revlogutils import constants as revlogconst
83
83
84 release = lockmod.release
84 release = lockmod.release
85 urlerr = util.urlerr
85 urlerr = util.urlerr
86 urlreq = util.urlreq
86 urlreq = util.urlreq
87
87
88 # set of (path, vfs-location) tuples. vfs-location is:
88 # set of (path, vfs-location) tuples. vfs-location is:
89 # - 'plain for vfs relative paths
89 # - 'plain for vfs relative paths
90 # - '' for svfs relative paths
90 # - '' for svfs relative paths
91 _cachedfiles = set()
91 _cachedfiles = set()
92
92
93
93
94 class _basefilecache(scmutil.filecache):
94 class _basefilecache(scmutil.filecache):
95 """All filecache usage on repo are done for logic that should be unfiltered
95 """All filecache usage on repo are done for logic that should be unfiltered
96 """
96 """
97
97
98 def __get__(self, repo, type=None):
98 def __get__(self, repo, type=None):
99 if repo is None:
99 if repo is None:
100 return self
100 return self
101 # proxy to unfiltered __dict__ since filtered repo has no entry
101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 unfi = repo.unfiltered()
102 unfi = repo.unfiltered()
103 try:
103 try:
104 return unfi.__dict__[self.sname]
104 return unfi.__dict__[self.sname]
105 except KeyError:
105 except KeyError:
106 pass
106 pass
107 return super(_basefilecache, self).__get__(unfi, type)
107 return super(_basefilecache, self).__get__(unfi, type)
108
108
109 def set(self, repo, value):
109 def set(self, repo, value):
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111
111
112
112
113 class repofilecache(_basefilecache):
113 class repofilecache(_basefilecache):
114 """filecache for files in .hg but outside of .hg/store"""
114 """filecache for files in .hg but outside of .hg/store"""
115
115
116 def __init__(self, *paths):
116 def __init__(self, *paths):
117 super(repofilecache, self).__init__(*paths)
117 super(repofilecache, self).__init__(*paths)
118 for path in paths:
118 for path in paths:
119 _cachedfiles.add((path, b'plain'))
119 _cachedfiles.add((path, b'plain'))
120
120
121 def join(self, obj, fname):
121 def join(self, obj, fname):
122 return obj.vfs.join(fname)
122 return obj.vfs.join(fname)
123
123
124
124
125 class storecache(_basefilecache):
125 class storecache(_basefilecache):
126 """filecache for files in the store"""
126 """filecache for files in the store"""
127
127
128 def __init__(self, *paths):
128 def __init__(self, *paths):
129 super(storecache, self).__init__(*paths)
129 super(storecache, self).__init__(*paths)
130 for path in paths:
130 for path in paths:
131 _cachedfiles.add((path, b''))
131 _cachedfiles.add((path, b''))
132
132
133 def join(self, obj, fname):
133 def join(self, obj, fname):
134 return obj.sjoin(fname)
134 return obj.sjoin(fname)
135
135
136
136
137 class mixedrepostorecache(_basefilecache):
137 class mixedrepostorecache(_basefilecache):
138 """filecache for a mix files in .hg/store and outside"""
138 """filecache for a mix files in .hg/store and outside"""
139
139
140 def __init__(self, *pathsandlocations):
140 def __init__(self, *pathsandlocations):
141 # scmutil.filecache only uses the path for passing back into our
141 # scmutil.filecache only uses the path for passing back into our
142 # join(), so we can safely pass a list of paths and locations
142 # join(), so we can safely pass a list of paths and locations
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
145
145
146 def join(self, obj, fnameandlocation):
146 def join(self, obj, fnameandlocation):
147 fname, location = fnameandlocation
147 fname, location = fnameandlocation
148 if location == b'plain':
148 if location == b'plain':
149 return obj.vfs.join(fname)
149 return obj.vfs.join(fname)
150 else:
150 else:
151 if location != b'':
151 if location != b'':
152 raise error.ProgrammingError(
152 raise error.ProgrammingError(
153 b'unexpected location: %s' % location
153 b'unexpected location: %s' % location
154 )
154 )
155 return obj.sjoin(fname)
155 return obj.sjoin(fname)
156
156
157
157
158 def isfilecached(repo, name):
158 def isfilecached(repo, name):
159 """check if a repo has already cached "name" filecache-ed property
159 """check if a repo has already cached "name" filecache-ed property
160
160
161 This returns (cachedobj-or-None, iscached) tuple.
161 This returns (cachedobj-or-None, iscached) tuple.
162 """
162 """
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 if not cacheentry:
164 if not cacheentry:
165 return None, False
165 return None, False
166 return cacheentry.obj, True
166 return cacheentry.obj, True
167
167
168
168
169 class unfilteredpropertycache(util.propertycache):
169 class unfilteredpropertycache(util.propertycache):
170 """propertycache that apply to unfiltered repo only"""
170 """propertycache that apply to unfiltered repo only"""
171
171
172 def __get__(self, repo, type=None):
172 def __get__(self, repo, type=None):
173 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
174 if unfi is repo:
174 if unfi is repo:
175 return super(unfilteredpropertycache, self).__get__(unfi)
175 return super(unfilteredpropertycache, self).__get__(unfi)
176 return getattr(unfi, self.name)
176 return getattr(unfi, self.name)
177
177
178
178
179 class filteredpropertycache(util.propertycache):
179 class filteredpropertycache(util.propertycache):
180 """propertycache that must take filtering in account"""
180 """propertycache that must take filtering in account"""
181
181
182 def cachevalue(self, obj, value):
182 def cachevalue(self, obj, value):
183 object.__setattr__(obj, self.name, value)
183 object.__setattr__(obj, self.name, value)
184
184
185
185
186 def hasunfilteredcache(repo, name):
186 def hasunfilteredcache(repo, name):
187 """check if a repo has an unfilteredpropertycache value for <name>"""
187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 return name in vars(repo.unfiltered())
188 return name in vars(repo.unfiltered())
189
189
190
190
191 def unfilteredmethod(orig):
191 def unfilteredmethod(orig):
192 """decorate method that always need to be run on unfiltered version"""
192 """decorate method that always need to be run on unfiltered version"""
193
193
194 def wrapper(repo, *args, **kwargs):
194 def wrapper(repo, *args, **kwargs):
195 return orig(repo.unfiltered(), *args, **kwargs)
195 return orig(repo.unfiltered(), *args, **kwargs)
196
196
197 return wrapper
197 return wrapper
198
198
199
199
200 moderncaps = {
200 moderncaps = {
201 b'lookup',
201 b'lookup',
202 b'branchmap',
202 b'branchmap',
203 b'pushkey',
203 b'pushkey',
204 b'known',
204 b'known',
205 b'getbundle',
205 b'getbundle',
206 b'unbundle',
206 b'unbundle',
207 }
207 }
208 legacycaps = moderncaps.union({b'changegroupsubset'})
208 legacycaps = moderncaps.union({b'changegroupsubset'})
209
209
210
210
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 class localcommandexecutor(object):
212 class localcommandexecutor(object):
213 def __init__(self, peer):
213 def __init__(self, peer):
214 self._peer = peer
214 self._peer = peer
215 self._sent = False
215 self._sent = False
216 self._closed = False
216 self._closed = False
217
217
218 def __enter__(self):
218 def __enter__(self):
219 return self
219 return self
220
220
221 def __exit__(self, exctype, excvalue, exctb):
221 def __exit__(self, exctype, excvalue, exctb):
222 self.close()
222 self.close()
223
223
224 def callcommand(self, command, args):
224 def callcommand(self, command, args):
225 if self._sent:
225 if self._sent:
226 raise error.ProgrammingError(
226 raise error.ProgrammingError(
227 b'callcommand() cannot be used after sendcommands()'
227 b'callcommand() cannot be used after sendcommands()'
228 )
228 )
229
229
230 if self._closed:
230 if self._closed:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after close()'
232 b'callcommand() cannot be used after close()'
233 )
233 )
234
234
235 # We don't need to support anything fancy. Just call the named
235 # We don't need to support anything fancy. Just call the named
236 # method on the peer and return a resolved future.
236 # method on the peer and return a resolved future.
237 fn = getattr(self._peer, pycompat.sysstr(command))
237 fn = getattr(self._peer, pycompat.sysstr(command))
238
238
239 f = pycompat.futures.Future()
239 f = pycompat.futures.Future()
240
240
241 try:
241 try:
242 result = fn(**pycompat.strkwargs(args))
242 result = fn(**pycompat.strkwargs(args))
243 except Exception:
243 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 else:
245 else:
246 f.set_result(result)
246 f.set_result(result)
247
247
248 return f
248 return f
249
249
250 def sendcommands(self):
250 def sendcommands(self):
251 self._sent = True
251 self._sent = True
252
252
253 def close(self):
253 def close(self):
254 self._closed = True
254 self._closed = True
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommands)
257 @interfaceutil.implementer(repository.ipeercommands)
258 class localpeer(repository.peer):
258 class localpeer(repository.peer):
259 '''peer for a local repo; reflects only the most recent API'''
259 '''peer for a local repo; reflects only the most recent API'''
260
260
261 def __init__(self, repo, caps=None):
261 def __init__(self, repo, caps=None):
262 super(localpeer, self).__init__()
262 super(localpeer, self).__init__()
263
263
264 if caps is None:
264 if caps is None:
265 caps = moderncaps.copy()
265 caps = moderncaps.copy()
266 self._repo = repo.filtered(b'served')
266 self._repo = repo.filtered(b'served')
267 self.ui = repo.ui
267 self.ui = repo.ui
268 self._caps = repo._restrictcapabilities(caps)
268 self._caps = repo._restrictcapabilities(caps)
269
269
270 # Begin of _basepeer interface.
270 # Begin of _basepeer interface.
271
271
272 def url(self):
272 def url(self):
273 return self._repo.url()
273 return self._repo.url()
274
274
275 def local(self):
275 def local(self):
276 return self._repo
276 return self._repo
277
277
278 def peer(self):
278 def peer(self):
279 return self
279 return self
280
280
281 def canpush(self):
281 def canpush(self):
282 return True
282 return True
283
283
284 def close(self):
284 def close(self):
285 self._repo.close()
285 self._repo.close()
286
286
287 # End of _basepeer interface.
287 # End of _basepeer interface.
288
288
289 # Begin of _basewirecommands interface.
289 # Begin of _basewirecommands interface.
290
290
291 def branchmap(self):
291 def branchmap(self):
292 return self._repo.branchmap()
292 return self._repo.branchmap()
293
293
294 def capabilities(self):
294 def capabilities(self):
295 return self._caps
295 return self._caps
296
296
297 def clonebundles(self):
297 def clonebundles(self):
298 return self._repo.tryread(b'clonebundles.manifest')
298 return self._repo.tryread(b'clonebundles.manifest')
299
299
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 """Used to test argument passing over the wire"""
301 """Used to test argument passing over the wire"""
302 return b"%s %s %s %s %s" % (
302 return b"%s %s %s %s %s" % (
303 one,
303 one,
304 two,
304 two,
305 pycompat.bytestr(three),
305 pycompat.bytestr(three),
306 pycompat.bytestr(four),
306 pycompat.bytestr(four),
307 pycompat.bytestr(five),
307 pycompat.bytestr(five),
308 )
308 )
309
309
310 def getbundle(
310 def getbundle(
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 ):
312 ):
313 chunks = exchange.getbundlechunks(
313 chunks = exchange.getbundlechunks(
314 self._repo,
314 self._repo,
315 source,
315 source,
316 heads=heads,
316 heads=heads,
317 common=common,
317 common=common,
318 bundlecaps=bundlecaps,
318 bundlecaps=bundlecaps,
319 **kwargs
319 **kwargs
320 )[1]
320 )[1]
321 cb = util.chunkbuffer(chunks)
321 cb = util.chunkbuffer(chunks)
322
322
323 if exchange.bundle2requested(bundlecaps):
323 if exchange.bundle2requested(bundlecaps):
324 # When requesting a bundle2, getbundle returns a stream to make the
324 # When requesting a bundle2, getbundle returns a stream to make the
325 # wire level function happier. We need to build a proper object
325 # wire level function happier. We need to build a proper object
326 # from it in local peer.
326 # from it in local peer.
327 return bundle2.getunbundler(self.ui, cb)
327 return bundle2.getunbundler(self.ui, cb)
328 else:
328 else:
329 return changegroup.getunbundler(b'01', cb, None)
329 return changegroup.getunbundler(b'01', cb, None)
330
330
331 def heads(self):
331 def heads(self):
332 return self._repo.heads()
332 return self._repo.heads()
333
333
334 def known(self, nodes):
334 def known(self, nodes):
335 return self._repo.known(nodes)
335 return self._repo.known(nodes)
336
336
337 def listkeys(self, namespace):
337 def listkeys(self, namespace):
338 return self._repo.listkeys(namespace)
338 return self._repo.listkeys(namespace)
339
339
340 def lookup(self, key):
340 def lookup(self, key):
341 return self._repo.lookup(key)
341 return self._repo.lookup(key)
342
342
343 def pushkey(self, namespace, key, old, new):
343 def pushkey(self, namespace, key, old, new):
344 return self._repo.pushkey(namespace, key, old, new)
344 return self._repo.pushkey(namespace, key, old, new)
345
345
346 def stream_out(self):
346 def stream_out(self):
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348
348
349 def unbundle(self, bundle, heads, url):
349 def unbundle(self, bundle, heads, url):
350 """apply a bundle on a repo
350 """apply a bundle on a repo
351
351
352 This function handles the repo locking itself."""
352 This function handles the repo locking itself."""
353 try:
353 try:
354 try:
354 try:
355 bundle = exchange.readbundle(self.ui, bundle, None)
355 bundle = exchange.readbundle(self.ui, bundle, None)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 if util.safehasattr(ret, b'getchunks'):
357 if util.safehasattr(ret, b'getchunks'):
358 # This is a bundle20 object, turn it into an unbundler.
358 # This is a bundle20 object, turn it into an unbundler.
359 # This little dance should be dropped eventually when the
359 # This little dance should be dropped eventually when the
360 # API is finally improved.
360 # API is finally improved.
361 stream = util.chunkbuffer(ret.getchunks())
361 stream = util.chunkbuffer(ret.getchunks())
362 ret = bundle2.getunbundler(self.ui, stream)
362 ret = bundle2.getunbundler(self.ui, stream)
363 return ret
363 return ret
364 except Exception as exc:
364 except Exception as exc:
365 # If the exception contains output salvaged from a bundle2
365 # If the exception contains output salvaged from a bundle2
366 # reply, we need to make sure it is printed before continuing
366 # reply, we need to make sure it is printed before continuing
367 # to fail. So we build a bundle2 with such output and consume
367 # to fail. So we build a bundle2 with such output and consume
368 # it directly.
368 # it directly.
369 #
369 #
370 # This is not very elegant but allows a "simple" solution for
370 # This is not very elegant but allows a "simple" solution for
371 # issue4594
371 # issue4594
372 output = getattr(exc, '_bundle2salvagedoutput', ())
372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 if output:
373 if output:
374 bundler = bundle2.bundle20(self._repo.ui)
374 bundler = bundle2.bundle20(self._repo.ui)
375 for out in output:
375 for out in output:
376 bundler.addpart(out)
376 bundler.addpart(out)
377 stream = util.chunkbuffer(bundler.getchunks())
377 stream = util.chunkbuffer(bundler.getchunks())
378 b = bundle2.getunbundler(self.ui, stream)
378 b = bundle2.getunbundler(self.ui, stream)
379 bundle2.processbundle(self._repo, b)
379 bundle2.processbundle(self._repo, b)
380 raise
380 raise
381 except error.PushRaced as exc:
381 except error.PushRaced as exc:
382 raise error.ResponseError(
382 raise error.ResponseError(
383 _(b'push failed:'), stringutil.forcebytestr(exc)
383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 )
384 )
385
385
386 # End of _basewirecommands interface.
386 # End of _basewirecommands interface.
387
387
388 # Begin of peer interface.
388 # Begin of peer interface.
389
389
390 def commandexecutor(self):
390 def commandexecutor(self):
391 return localcommandexecutor(self)
391 return localcommandexecutor(self)
392
392
393 # End of peer interface.
393 # End of peer interface.
394
394
395
395
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 class locallegacypeer(localpeer):
397 class locallegacypeer(localpeer):
398 '''peer extension which implements legacy methods too; used for tests with
398 '''peer extension which implements legacy methods too; used for tests with
399 restricted capabilities'''
399 restricted capabilities'''
400
400
401 def __init__(self, repo):
401 def __init__(self, repo):
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403
403
404 # Begin of baselegacywirecommands interface.
404 # Begin of baselegacywirecommands interface.
405
405
406 def between(self, pairs):
406 def between(self, pairs):
407 return self._repo.between(pairs)
407 return self._repo.between(pairs)
408
408
409 def branches(self, nodes):
409 def branches(self, nodes):
410 return self._repo.branches(nodes)
410 return self._repo.branches(nodes)
411
411
412 def changegroup(self, nodes, source):
412 def changegroup(self, nodes, source):
413 outgoing = discovery.outgoing(
413 outgoing = discovery.outgoing(
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 )
415 )
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417
417
418 def changegroupsubset(self, bases, heads, source):
418 def changegroupsubset(self, bases, heads, source):
419 outgoing = discovery.outgoing(
419 outgoing = discovery.outgoing(
420 self._repo, missingroots=bases, missingheads=heads
420 self._repo, missingroots=bases, missingheads=heads
421 )
421 )
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423
423
424 # End of baselegacywirecommands interface.
424 # End of baselegacywirecommands interface.
425
425
426
426
427 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # clients.
428 # clients.
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430
430
431 # A repository with the sparserevlog feature will have delta chains that
431 # A repository with the sparserevlog feature will have delta chains that
432 # can spread over a larger span. Sparse reading cuts these large spans into
432 # can spread over a larger span. Sparse reading cuts these large spans into
433 # pieces, so that each piece isn't too big.
433 # pieces, so that each piece isn't too big.
434 # Without the sparserevlog capability, reading from the repository could use
434 # Without the sparserevlog capability, reading from the repository could use
435 # huge amounts of memory, because the whole span would be read at once,
435 # huge amounts of memory, because the whole span would be read at once,
436 # including all the intermediate revisions that aren't pertinent for the chain.
436 # including all the intermediate revisions that aren't pertinent for the chain.
437 # This is why once a repository has enabled sparse-read, it becomes required.
437 # This is why once a repository has enabled sparse-read, it becomes required.
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439
439
440 # A repository with the sidedataflag requirement will allow to store extra
440 # A repository with the sidedataflag requirement will allow to store extra
441 # information for revision without altering their original hashes.
441 # information for revision without altering their original hashes.
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443
443
444 # A repository with the the copies-sidedata-changeset requirement will store
444 # A repository with the the copies-sidedata-changeset requirement will store
445 # copies related information in changeset's sidedata.
445 # copies related information in changeset's sidedata.
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447
447
448 # The repository use persistent nodemap for the changelog and the manifest.
448 # The repository use persistent nodemap for the changelog and the manifest.
449 NODEMAP_REQUIREMENT = b'persistent-nodemap'
449 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450
450
451 # Functions receiving (ui, features) that extensions can register to impact
451 # Functions receiving (ui, features) that extensions can register to impact
452 # the ability to load repositories with custom requirements. Only
452 # the ability to load repositories with custom requirements. Only
453 # functions defined in loaded extensions are called.
453 # functions defined in loaded extensions are called.
454 #
454 #
455 # The function receives a set of requirement strings that the repository
455 # The function receives a set of requirement strings that the repository
456 # is capable of opening. Functions will typically add elements to the
456 # is capable of opening. Functions will typically add elements to the
457 # set to reflect that the extension knows how to handle that requirements.
457 # set to reflect that the extension knows how to handle that requirements.
458 featuresetupfuncs = set()
458 featuresetupfuncs = set()
459
459
460
460
461 def makelocalrepository(baseui, path, intents=None):
461 def makelocalrepository(baseui, path, intents=None):
462 """Create a local repository object.
462 """Create a local repository object.
463
463
464 Given arguments needed to construct a local repository, this function
464 Given arguments needed to construct a local repository, this function
465 performs various early repository loading functionality (such as
465 performs various early repository loading functionality (such as
466 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
466 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 the repository can be opened, derives a type suitable for representing
467 the repository can be opened, derives a type suitable for representing
468 that repository, and returns an instance of it.
468 that repository, and returns an instance of it.
469
469
470 The returned object conforms to the ``repository.completelocalrepository``
470 The returned object conforms to the ``repository.completelocalrepository``
471 interface.
471 interface.
472
472
473 The repository type is derived by calling a series of factory functions
473 The repository type is derived by calling a series of factory functions
474 for each aspect/interface of the final repository. These are defined by
474 for each aspect/interface of the final repository. These are defined by
475 ``REPO_INTERFACES``.
475 ``REPO_INTERFACES``.
476
476
477 Each factory function is called to produce a type implementing a specific
477 Each factory function is called to produce a type implementing a specific
478 interface. The cumulative list of returned types will be combined into a
478 interface. The cumulative list of returned types will be combined into a
479 new type and that type will be instantiated to represent the local
479 new type and that type will be instantiated to represent the local
480 repository.
480 repository.
481
481
482 The factory functions each receive various state that may be consulted
482 The factory functions each receive various state that may be consulted
483 as part of deriving a type.
483 as part of deriving a type.
484
484
485 Extensions should wrap these factory functions to customize repository type
485 Extensions should wrap these factory functions to customize repository type
486 creation. Note that an extension's wrapped function may be called even if
486 creation. Note that an extension's wrapped function may be called even if
487 that extension is not loaded for the repo being constructed. Extensions
487 that extension is not loaded for the repo being constructed. Extensions
488 should check if their ``__name__`` appears in the
488 should check if their ``__name__`` appears in the
489 ``extensionmodulenames`` set passed to the factory function and no-op if
489 ``extensionmodulenames`` set passed to the factory function and no-op if
490 not.
490 not.
491 """
491 """
492 ui = baseui.copy()
492 ui = baseui.copy()
493 # Prevent copying repo configuration.
493 # Prevent copying repo configuration.
494 ui.copy = baseui.copy
494 ui.copy = baseui.copy
495
495
496 # Working directory VFS rooted at repository root.
496 # Working directory VFS rooted at repository root.
497 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
497 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498
498
499 # Main VFS for .hg/ directory.
499 # Main VFS for .hg/ directory.
500 hgpath = wdirvfs.join(b'.hg')
500 hgpath = wdirvfs.join(b'.hg')
501 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
501 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502
502
503 # The .hg/ path should exist and should be a directory. All other
503 # The .hg/ path should exist and should be a directory. All other
504 # cases are errors.
504 # cases are errors.
505 if not hgvfs.isdir():
505 if not hgvfs.isdir():
506 try:
506 try:
507 hgvfs.stat()
507 hgvfs.stat()
508 except OSError as e:
508 except OSError as e:
509 if e.errno != errno.ENOENT:
509 if e.errno != errno.ENOENT:
510 raise
510 raise
511
511
512 raise error.RepoError(_(b'repository %s not found') % path)
512 raise error.RepoError(_(b'repository %s not found') % path)
513
513
514 # .hg/requires file contains a newline-delimited list of
514 # .hg/requires file contains a newline-delimited list of
515 # features/capabilities the opener (us) must have in order to use
515 # features/capabilities the opener (us) must have in order to use
516 # the repository. This file was introduced in Mercurial 0.9.2,
516 # the repository. This file was introduced in Mercurial 0.9.2,
517 # which means very old repositories may not have one. We assume
517 # which means very old repositories may not have one. We assume
518 # a missing file translates to no requirements.
518 # a missing file translates to no requirements.
519 try:
519 try:
520 requirements = set(hgvfs.read(b'requires').splitlines())
520 requirements = set(hgvfs.read(b'requires').splitlines())
521 except IOError as e:
521 except IOError as e:
522 if e.errno != errno.ENOENT:
522 if e.errno != errno.ENOENT:
523 raise
523 raise
524 requirements = set()
524 requirements = set()
525
525
526 # The .hg/hgrc file may load extensions or contain config options
526 # The .hg/hgrc file may load extensions or contain config options
527 # that influence repository construction. Attempt to load it and
527 # that influence repository construction. Attempt to load it and
528 # process any new extensions that it may have pulled in.
528 # process any new extensions that it may have pulled in.
529 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
529 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
530 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
530 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
531 extensions.loadall(ui)
531 extensions.loadall(ui)
532 extensions.populateui(ui)
532 extensions.populateui(ui)
533
533
534 # Set of module names of extensions loaded for this repository.
534 # Set of module names of extensions loaded for this repository.
535 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
535 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
536
536
537 supportedrequirements = gathersupportedrequirements(ui)
537 supportedrequirements = gathersupportedrequirements(ui)
538
538
539 # We first validate the requirements are known.
539 # We first validate the requirements are known.
540 ensurerequirementsrecognized(requirements, supportedrequirements)
540 ensurerequirementsrecognized(requirements, supportedrequirements)
541
541
542 # Then we validate that the known set is reasonable to use together.
542 # Then we validate that the known set is reasonable to use together.
543 ensurerequirementscompatible(ui, requirements)
543 ensurerequirementscompatible(ui, requirements)
544
544
545 # TODO there are unhandled edge cases related to opening repositories with
545 # TODO there are unhandled edge cases related to opening repositories with
546 # shared storage. If storage is shared, we should also test for requirements
546 # shared storage. If storage is shared, we should also test for requirements
547 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
547 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
548 # that repo, as that repo may load extensions needed to open it. This is a
548 # that repo, as that repo may load extensions needed to open it. This is a
549 # bit complicated because we don't want the other hgrc to overwrite settings
549 # bit complicated because we don't want the other hgrc to overwrite settings
550 # in this hgrc.
550 # in this hgrc.
551 #
551 #
552 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
552 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
553 # file when sharing repos. But if a requirement is added after the share is
553 # file when sharing repos. But if a requirement is added after the share is
554 # performed, thereby introducing a new requirement for the opener, we may
554 # performed, thereby introducing a new requirement for the opener, we may
555 # will not see that and could encounter a run-time error interacting with
555 # will not see that and could encounter a run-time error interacting with
556 # that shared store since it has an unknown-to-us requirement.
556 # that shared store since it has an unknown-to-us requirement.
557
557
558 # At this point, we know we should be capable of opening the repository.
558 # At this point, we know we should be capable of opening the repository.
559 # Now get on with doing that.
559 # Now get on with doing that.
560
560
561 features = set()
561 features = set()
562
562
563 # The "store" part of the repository holds versioned data. How it is
563 # The "store" part of the repository holds versioned data. How it is
564 # accessed is determined by various requirements. The ``shared`` or
564 # accessed is determined by various requirements. The ``shared`` or
565 # ``relshared`` requirements indicate the store lives in the path contained
565 # ``relshared`` requirements indicate the store lives in the path contained
566 # in the ``.hg/sharedpath`` file. This is an absolute path for
566 # in the ``.hg/sharedpath`` file. This is an absolute path for
567 # ``shared`` and relative to ``.hg/`` for ``relshared``.
567 # ``shared`` and relative to ``.hg/`` for ``relshared``.
568 if b'shared' in requirements or b'relshared' in requirements:
568 if b'shared' in requirements or b'relshared' in requirements:
569 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
569 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
570 if b'relshared' in requirements:
570 if b'relshared' in requirements:
571 sharedpath = hgvfs.join(sharedpath)
571 sharedpath = hgvfs.join(sharedpath)
572
572
573 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
573 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
574
574
575 if not sharedvfs.exists():
575 if not sharedvfs.exists():
576 raise error.RepoError(
576 raise error.RepoError(
577 _(b'.hg/sharedpath points to nonexistent directory %s')
577 _(b'.hg/sharedpath points to nonexistent directory %s')
578 % sharedvfs.base
578 % sharedvfs.base
579 )
579 )
580
580
581 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
581 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
582
582
583 storebasepath = sharedvfs.base
583 storebasepath = sharedvfs.base
584 cachepath = sharedvfs.join(b'cache')
584 cachepath = sharedvfs.join(b'cache')
585 else:
585 else:
586 storebasepath = hgvfs.base
586 storebasepath = hgvfs.base
587 cachepath = hgvfs.join(b'cache')
587 cachepath = hgvfs.join(b'cache')
588 wcachepath = hgvfs.join(b'wcache')
588 wcachepath = hgvfs.join(b'wcache')
589
589
590 # The store has changed over time and the exact layout is dictated by
590 # The store has changed over time and the exact layout is dictated by
591 # requirements. The store interface abstracts differences across all
591 # requirements. The store interface abstracts differences across all
592 # of them.
592 # of them.
593 store = makestore(
593 store = makestore(
594 requirements,
594 requirements,
595 storebasepath,
595 storebasepath,
596 lambda base: vfsmod.vfs(base, cacheaudited=True),
596 lambda base: vfsmod.vfs(base, cacheaudited=True),
597 )
597 )
598 hgvfs.createmode = store.createmode
598 hgvfs.createmode = store.createmode
599
599
600 storevfs = store.vfs
600 storevfs = store.vfs
601 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
601 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
602
602
603 # The cache vfs is used to manage cache files.
603 # The cache vfs is used to manage cache files.
604 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
604 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
605 cachevfs.createmode = store.createmode
605 cachevfs.createmode = store.createmode
606 # The cache vfs is used to manage cache files related to the working copy
606 # The cache vfs is used to manage cache files related to the working copy
607 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
607 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
608 wcachevfs.createmode = store.createmode
608 wcachevfs.createmode = store.createmode
609
609
610 # Now resolve the type for the repository object. We do this by repeatedly
610 # Now resolve the type for the repository object. We do this by repeatedly
611 # calling a factory function to produces types for specific aspects of the
611 # calling a factory function to produces types for specific aspects of the
612 # repo's operation. The aggregate returned types are used as base classes
612 # repo's operation. The aggregate returned types are used as base classes
613 # for a dynamically-derived type, which will represent our new repository.
613 # for a dynamically-derived type, which will represent our new repository.
614
614
615 bases = []
615 bases = []
616 extrastate = {}
616 extrastate = {}
617
617
618 for iface, fn in REPO_INTERFACES:
618 for iface, fn in REPO_INTERFACES:
619 # We pass all potentially useful state to give extensions tons of
619 # We pass all potentially useful state to give extensions tons of
620 # flexibility.
620 # flexibility.
621 typ = fn()(
621 typ = fn()(
622 ui=ui,
622 ui=ui,
623 intents=intents,
623 intents=intents,
624 requirements=requirements,
624 requirements=requirements,
625 features=features,
625 features=features,
626 wdirvfs=wdirvfs,
626 wdirvfs=wdirvfs,
627 hgvfs=hgvfs,
627 hgvfs=hgvfs,
628 store=store,
628 store=store,
629 storevfs=storevfs,
629 storevfs=storevfs,
630 storeoptions=storevfs.options,
630 storeoptions=storevfs.options,
631 cachevfs=cachevfs,
631 cachevfs=cachevfs,
632 wcachevfs=wcachevfs,
632 wcachevfs=wcachevfs,
633 extensionmodulenames=extensionmodulenames,
633 extensionmodulenames=extensionmodulenames,
634 extrastate=extrastate,
634 extrastate=extrastate,
635 baseclasses=bases,
635 baseclasses=bases,
636 )
636 )
637
637
638 if not isinstance(typ, type):
638 if not isinstance(typ, type):
639 raise error.ProgrammingError(
639 raise error.ProgrammingError(
640 b'unable to construct type for %s' % iface
640 b'unable to construct type for %s' % iface
641 )
641 )
642
642
643 bases.append(typ)
643 bases.append(typ)
644
644
645 # type() allows you to use characters in type names that wouldn't be
645 # type() allows you to use characters in type names that wouldn't be
646 # recognized as Python symbols in source code. We abuse that to add
646 # recognized as Python symbols in source code. We abuse that to add
647 # rich information about our constructed repo.
647 # rich information about our constructed repo.
648 name = pycompat.sysstr(
648 name = pycompat.sysstr(
649 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
649 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
650 )
650 )
651
651
652 cls = type(name, tuple(bases), {})
652 cls = type(name, tuple(bases), {})
653
653
654 return cls(
654 return cls(
655 baseui=baseui,
655 baseui=baseui,
656 ui=ui,
656 ui=ui,
657 origroot=path,
657 origroot=path,
658 wdirvfs=wdirvfs,
658 wdirvfs=wdirvfs,
659 hgvfs=hgvfs,
659 hgvfs=hgvfs,
660 requirements=requirements,
660 requirements=requirements,
661 supportedrequirements=supportedrequirements,
661 supportedrequirements=supportedrequirements,
662 sharedpath=storebasepath,
662 sharedpath=storebasepath,
663 store=store,
663 store=store,
664 cachevfs=cachevfs,
664 cachevfs=cachevfs,
665 wcachevfs=wcachevfs,
665 wcachevfs=wcachevfs,
666 features=features,
666 features=features,
667 intents=intents,
667 intents=intents,
668 )
668 )
669
669
670
670
671 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
671 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
672 """Load hgrc files/content into a ui instance.
672 """Load hgrc files/content into a ui instance.
673
673
674 This is called during repository opening to load any additional
674 This is called during repository opening to load any additional
675 config files or settings relevant to the current repository.
675 config files or settings relevant to the current repository.
676
676
677 Returns a bool indicating whether any additional configs were loaded.
677 Returns a bool indicating whether any additional configs were loaded.
678
678
679 Extensions should monkeypatch this function to modify how per-repo
679 Extensions should monkeypatch this function to modify how per-repo
680 configs are loaded. For example, an extension may wish to pull in
680 configs are loaded. For example, an extension may wish to pull in
681 configs from alternate files or sources.
681 configs from alternate files or sources.
682 """
682 """
683 if not rcutil.use_repo_hgrc():
683 if not rcutil.use_repo_hgrc():
684 return False
684 return False
685 try:
685 try:
686 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
686 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
687 return True
687 return True
688 except IOError:
688 except IOError:
689 return False
689 return False
690
690
691
691
692 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
692 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
693 """Perform additional actions after .hg/hgrc is loaded.
693 """Perform additional actions after .hg/hgrc is loaded.
694
694
695 This function is called during repository loading immediately after
695 This function is called during repository loading immediately after
696 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
696 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
697
697
698 The function can be used to validate configs, automatically add
698 The function can be used to validate configs, automatically add
699 options (including extensions) based on requirements, etc.
699 options (including extensions) based on requirements, etc.
700 """
700 """
701
701
702 # Map of requirements to list of extensions to load automatically when
702 # Map of requirements to list of extensions to load automatically when
703 # requirement is present.
703 # requirement is present.
704 autoextensions = {
704 autoextensions = {
705 b'git': [b'git'],
705 b'git': [b'git'],
706 b'largefiles': [b'largefiles'],
706 b'largefiles': [b'largefiles'],
707 b'lfs': [b'lfs'],
707 b'lfs': [b'lfs'],
708 }
708 }
709
709
710 for requirement, names in sorted(autoextensions.items()):
710 for requirement, names in sorted(autoextensions.items()):
711 if requirement not in requirements:
711 if requirement not in requirements:
712 continue
712 continue
713
713
714 for name in names:
714 for name in names:
715 if not ui.hasconfig(b'extensions', name):
715 if not ui.hasconfig(b'extensions', name):
716 ui.setconfig(b'extensions', name, b'', source=b'autoload')
716 ui.setconfig(b'extensions', name, b'', source=b'autoload')
717
717
718
718
719 def gathersupportedrequirements(ui):
719 def gathersupportedrequirements(ui):
720 """Determine the complete set of recognized requirements."""
720 """Determine the complete set of recognized requirements."""
721 # Start with all requirements supported by this file.
721 # Start with all requirements supported by this file.
722 supported = set(localrepository._basesupported)
722 supported = set(localrepository._basesupported)
723
723
724 # Execute ``featuresetupfuncs`` entries if they belong to an extension
724 # Execute ``featuresetupfuncs`` entries if they belong to an extension
725 # relevant to this ui instance.
725 # relevant to this ui instance.
726 modules = {m.__name__ for n, m in extensions.extensions(ui)}
726 modules = {m.__name__ for n, m in extensions.extensions(ui)}
727
727
728 for fn in featuresetupfuncs:
728 for fn in featuresetupfuncs:
729 if fn.__module__ in modules:
729 if fn.__module__ in modules:
730 fn(ui, supported)
730 fn(ui, supported)
731
731
732 # Add derived requirements from registered compression engines.
732 # Add derived requirements from registered compression engines.
733 for name in util.compengines:
733 for name in util.compengines:
734 engine = util.compengines[name]
734 engine = util.compengines[name]
735 if engine.available() and engine.revlogheader():
735 if engine.available() and engine.revlogheader():
736 supported.add(b'exp-compression-%s' % name)
736 supported.add(b'exp-compression-%s' % name)
737 if engine.name() == b'zstd':
737 if engine.name() == b'zstd':
738 supported.add(b'revlog-compression-zstd')
738 supported.add(b'revlog-compression-zstd')
739
739
740 return supported
740 return supported
741
741
742
742
743 def ensurerequirementsrecognized(requirements, supported):
743 def ensurerequirementsrecognized(requirements, supported):
744 """Validate that a set of local requirements is recognized.
744 """Validate that a set of local requirements is recognized.
745
745
746 Receives a set of requirements. Raises an ``error.RepoError`` if there
746 Receives a set of requirements. Raises an ``error.RepoError`` if there
747 exists any requirement in that set that currently loaded code doesn't
747 exists any requirement in that set that currently loaded code doesn't
748 recognize.
748 recognize.
749
749
750 Returns a set of supported requirements.
750 Returns a set of supported requirements.
751 """
751 """
752 missing = set()
752 missing = set()
753
753
754 for requirement in requirements:
754 for requirement in requirements:
755 if requirement in supported:
755 if requirement in supported:
756 continue
756 continue
757
757
758 if not requirement or not requirement[0:1].isalnum():
758 if not requirement or not requirement[0:1].isalnum():
759 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
759 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
760
760
761 missing.add(requirement)
761 missing.add(requirement)
762
762
763 if missing:
763 if missing:
764 raise error.RequirementError(
764 raise error.RequirementError(
765 _(b'repository requires features unknown to this Mercurial: %s')
765 _(b'repository requires features unknown to this Mercurial: %s')
766 % b' '.join(sorted(missing)),
766 % b' '.join(sorted(missing)),
767 hint=_(
767 hint=_(
768 b'see https://mercurial-scm.org/wiki/MissingRequirement '
768 b'see https://mercurial-scm.org/wiki/MissingRequirement '
769 b'for more information'
769 b'for more information'
770 ),
770 ),
771 )
771 )
772
772
773
773
774 def ensurerequirementscompatible(ui, requirements):
774 def ensurerequirementscompatible(ui, requirements):
775 """Validates that a set of recognized requirements is mutually compatible.
775 """Validates that a set of recognized requirements is mutually compatible.
776
776
777 Some requirements may not be compatible with others or require
777 Some requirements may not be compatible with others or require
778 config options that aren't enabled. This function is called during
778 config options that aren't enabled. This function is called during
779 repository opening to ensure that the set of requirements needed
779 repository opening to ensure that the set of requirements needed
780 to open a repository is sane and compatible with config options.
780 to open a repository is sane and compatible with config options.
781
781
782 Extensions can monkeypatch this function to perform additional
782 Extensions can monkeypatch this function to perform additional
783 checking.
783 checking.
784
784
785 ``error.RepoError`` should be raised on failure.
785 ``error.RepoError`` should be raised on failure.
786 """
786 """
787 if b'exp-sparse' in requirements and not sparse.enabled:
787 if b'exp-sparse' in requirements and not sparse.enabled:
788 raise error.RepoError(
788 raise error.RepoError(
789 _(
789 _(
790 b'repository is using sparse feature but '
790 b'repository is using sparse feature but '
791 b'sparse is not enabled; enable the '
791 b'sparse is not enabled; enable the '
792 b'"sparse" extensions to access'
792 b'"sparse" extensions to access'
793 )
793 )
794 )
794 )
795
795
796
796
797 def makestore(requirements, path, vfstype):
797 def makestore(requirements, path, vfstype):
798 """Construct a storage object for a repository."""
798 """Construct a storage object for a repository."""
799 if b'store' in requirements:
799 if b'store' in requirements:
800 if b'fncache' in requirements:
800 if b'fncache' in requirements:
801 return storemod.fncachestore(
801 return storemod.fncachestore(
802 path, vfstype, b'dotencode' in requirements
802 path, vfstype, b'dotencode' in requirements
803 )
803 )
804
804
805 return storemod.encodedstore(path, vfstype)
805 return storemod.encodedstore(path, vfstype)
806
806
807 return storemod.basicstore(path, vfstype)
807 return storemod.basicstore(path, vfstype)
808
808
809
809
810 def resolvestorevfsoptions(ui, requirements, features):
810 def resolvestorevfsoptions(ui, requirements, features):
811 """Resolve the options to pass to the store vfs opener.
811 """Resolve the options to pass to the store vfs opener.
812
812
813 The returned dict is used to influence behavior of the storage layer.
813 The returned dict is used to influence behavior of the storage layer.
814 """
814 """
815 options = {}
815 options = {}
816
816
817 if b'treemanifest' in requirements:
817 if b'treemanifest' in requirements:
818 options[b'treemanifest'] = True
818 options[b'treemanifest'] = True
819
819
820 # experimental config: format.manifestcachesize
820 # experimental config: format.manifestcachesize
821 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
821 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
822 if manifestcachesize is not None:
822 if manifestcachesize is not None:
823 options[b'manifestcachesize'] = manifestcachesize
823 options[b'manifestcachesize'] = manifestcachesize
824
824
825 # In the absence of another requirement superseding a revlog-related
825 # In the absence of another requirement superseding a revlog-related
826 # requirement, we have to assume the repo is using revlog version 0.
826 # requirement, we have to assume the repo is using revlog version 0.
827 # This revlog format is super old and we don't bother trying to parse
827 # This revlog format is super old and we don't bother trying to parse
828 # opener options for it because those options wouldn't do anything
828 # opener options for it because those options wouldn't do anything
829 # meaningful on such old repos.
829 # meaningful on such old repos.
830 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
830 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
831 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
831 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
832 else: # explicitly mark repo as using revlogv0
832 else: # explicitly mark repo as using revlogv0
833 options[b'revlogv0'] = True
833 options[b'revlogv0'] = True
834
834
835 if COPIESSDC_REQUIREMENT in requirements:
835 if COPIESSDC_REQUIREMENT in requirements:
836 options[b'copies-storage'] = b'changeset-sidedata'
836 options[b'copies-storage'] = b'changeset-sidedata'
837 else:
837 else:
838 writecopiesto = ui.config(b'experimental', b'copies.write-to')
838 writecopiesto = ui.config(b'experimental', b'copies.write-to')
839 copiesextramode = (b'changeset-only', b'compatibility')
839 copiesextramode = (b'changeset-only', b'compatibility')
840 if writecopiesto in copiesextramode:
840 if writecopiesto in copiesextramode:
841 options[b'copies-storage'] = b'extra'
841 options[b'copies-storage'] = b'extra'
842
842
843 return options
843 return options
844
844
845
845
846 def resolverevlogstorevfsoptions(ui, requirements, features):
846 def resolverevlogstorevfsoptions(ui, requirements, features):
847 """Resolve opener options specific to revlogs."""
847 """Resolve opener options specific to revlogs."""
848
848
849 options = {}
849 options = {}
850 options[b'flagprocessors'] = {}
850 options[b'flagprocessors'] = {}
851
851
852 if b'revlogv1' in requirements:
852 if b'revlogv1' in requirements:
853 options[b'revlogv1'] = True
853 options[b'revlogv1'] = True
854 if REVLOGV2_REQUIREMENT in requirements:
854 if REVLOGV2_REQUIREMENT in requirements:
855 options[b'revlogv2'] = True
855 options[b'revlogv2'] = True
856
856
857 if b'generaldelta' in requirements:
857 if b'generaldelta' in requirements:
858 options[b'generaldelta'] = True
858 options[b'generaldelta'] = True
859
859
860 # experimental config: format.chunkcachesize
860 # experimental config: format.chunkcachesize
861 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
861 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
862 if chunkcachesize is not None:
862 if chunkcachesize is not None:
863 options[b'chunkcachesize'] = chunkcachesize
863 options[b'chunkcachesize'] = chunkcachesize
864
864
865 deltabothparents = ui.configbool(
865 deltabothparents = ui.configbool(
866 b'storage', b'revlog.optimize-delta-parent-choice'
866 b'storage', b'revlog.optimize-delta-parent-choice'
867 )
867 )
868 options[b'deltabothparents'] = deltabothparents
868 options[b'deltabothparents'] = deltabothparents
869
869
870 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
870 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
871 lazydeltabase = False
871 lazydeltabase = False
872 if lazydelta:
872 if lazydelta:
873 lazydeltabase = ui.configbool(
873 lazydeltabase = ui.configbool(
874 b'storage', b'revlog.reuse-external-delta-parent'
874 b'storage', b'revlog.reuse-external-delta-parent'
875 )
875 )
876 if lazydeltabase is None:
876 if lazydeltabase is None:
877 lazydeltabase = not scmutil.gddeltaconfig(ui)
877 lazydeltabase = not scmutil.gddeltaconfig(ui)
878 options[b'lazydelta'] = lazydelta
878 options[b'lazydelta'] = lazydelta
879 options[b'lazydeltabase'] = lazydeltabase
879 options[b'lazydeltabase'] = lazydeltabase
880
880
881 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
881 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
882 if 0 <= chainspan:
882 if 0 <= chainspan:
883 options[b'maxdeltachainspan'] = chainspan
883 options[b'maxdeltachainspan'] = chainspan
884
884
885 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
885 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
886 if mmapindexthreshold is not None:
886 if mmapindexthreshold is not None:
887 options[b'mmapindexthreshold'] = mmapindexthreshold
887 options[b'mmapindexthreshold'] = mmapindexthreshold
888
888
889 withsparseread = ui.configbool(b'experimental', b'sparse-read')
889 withsparseread = ui.configbool(b'experimental', b'sparse-read')
890 srdensitythres = float(
890 srdensitythres = float(
891 ui.config(b'experimental', b'sparse-read.density-threshold')
891 ui.config(b'experimental', b'sparse-read.density-threshold')
892 )
892 )
893 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
893 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
894 options[b'with-sparse-read'] = withsparseread
894 options[b'with-sparse-read'] = withsparseread
895 options[b'sparse-read-density-threshold'] = srdensitythres
895 options[b'sparse-read-density-threshold'] = srdensitythres
896 options[b'sparse-read-min-gap-size'] = srmingapsize
896 options[b'sparse-read-min-gap-size'] = srmingapsize
897
897
898 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
898 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
899 options[b'sparse-revlog'] = sparserevlog
899 options[b'sparse-revlog'] = sparserevlog
900 if sparserevlog:
900 if sparserevlog:
901 options[b'generaldelta'] = True
901 options[b'generaldelta'] = True
902
902
903 sidedata = SIDEDATA_REQUIREMENT in requirements
903 sidedata = SIDEDATA_REQUIREMENT in requirements
904 options[b'side-data'] = sidedata
904 options[b'side-data'] = sidedata
905
905
906 maxchainlen = None
906 maxchainlen = None
907 if sparserevlog:
907 if sparserevlog:
908 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
908 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
909 # experimental config: format.maxchainlen
909 # experimental config: format.maxchainlen
910 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
910 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
911 if maxchainlen is not None:
911 if maxchainlen is not None:
912 options[b'maxchainlen'] = maxchainlen
912 options[b'maxchainlen'] = maxchainlen
913
913
914 for r in requirements:
914 for r in requirements:
915 # we allow multiple compression engine requirement to co-exist because
915 # we allow multiple compression engine requirement to co-exist because
916 # strickly speaking, revlog seems to support mixed compression style.
916 # strickly speaking, revlog seems to support mixed compression style.
917 #
917 #
918 # The compression used for new entries will be "the last one"
918 # The compression used for new entries will be "the last one"
919 prefix = r.startswith
919 prefix = r.startswith
920 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
920 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
921 options[b'compengine'] = r.split(b'-', 2)[2]
921 options[b'compengine'] = r.split(b'-', 2)[2]
922
922
923 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
923 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
924 if options[b'zlib.level'] is not None:
924 if options[b'zlib.level'] is not None:
925 if not (0 <= options[b'zlib.level'] <= 9):
925 if not (0 <= options[b'zlib.level'] <= 9):
926 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
926 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
927 raise error.Abort(msg % options[b'zlib.level'])
927 raise error.Abort(msg % options[b'zlib.level'])
928 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
928 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
929 if options[b'zstd.level'] is not None:
929 if options[b'zstd.level'] is not None:
930 if not (0 <= options[b'zstd.level'] <= 22):
930 if not (0 <= options[b'zstd.level'] <= 22):
931 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
931 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
932 raise error.Abort(msg % options[b'zstd.level'])
932 raise error.Abort(msg % options[b'zstd.level'])
933
933
934 if repository.NARROW_REQUIREMENT in requirements:
934 if repository.NARROW_REQUIREMENT in requirements:
935 options[b'enableellipsis'] = True
935 options[b'enableellipsis'] = True
936
936
937 if ui.configbool(b'experimental', b'rust.index'):
937 if ui.configbool(b'experimental', b'rust.index'):
938 options[b'rust.index'] = True
938 options[b'rust.index'] = True
939 if NODEMAP_REQUIREMENT in requirements:
939 if NODEMAP_REQUIREMENT in requirements:
940 options[b'persistent-nodemap'] = True
940 options[b'persistent-nodemap'] = True
941 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
941 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
942 options[b'persistent-nodemap.mmap'] = True
942 options[b'persistent-nodemap.mmap'] = True
943 epnm = ui.config(b'experimental', b'exp-persistent-nodemap.mode')
943 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
944 options[b'persistent-nodemap.mode'] = epnm
944 options[b'persistent-nodemap.mode'] = epnm
945 if ui.configbool(b'devel', b'persistent-nodemap'):
945 if ui.configbool(b'devel', b'persistent-nodemap'):
946 options[b'devel-force-nodemap'] = True
946 options[b'devel-force-nodemap'] = True
947
947
948 return options
948 return options
949
949
950
950
951 def makemain(**kwargs):
951 def makemain(**kwargs):
952 """Produce a type conforming to ``ilocalrepositorymain``."""
952 """Produce a type conforming to ``ilocalrepositorymain``."""
953 return localrepository
953 return localrepository
954
954
955
955
956 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
956 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
957 class revlogfilestorage(object):
957 class revlogfilestorage(object):
958 """File storage when using revlogs."""
958 """File storage when using revlogs."""
959
959
960 def file(self, path):
960 def file(self, path):
961 if path[0] == b'/':
961 if path[0] == b'/':
962 path = path[1:]
962 path = path[1:]
963
963
964 return filelog.filelog(self.svfs, path)
964 return filelog.filelog(self.svfs, path)
965
965
966
966
967 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
967 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
968 class revlognarrowfilestorage(object):
968 class revlognarrowfilestorage(object):
969 """File storage when using revlogs and narrow files."""
969 """File storage when using revlogs and narrow files."""
970
970
971 def file(self, path):
971 def file(self, path):
972 if path[0] == b'/':
972 if path[0] == b'/':
973 path = path[1:]
973 path = path[1:]
974
974
975 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
975 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
976
976
977
977
978 def makefilestorage(requirements, features, **kwargs):
978 def makefilestorage(requirements, features, **kwargs):
979 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
979 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
980 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
980 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
981 features.add(repository.REPO_FEATURE_STREAM_CLONE)
981 features.add(repository.REPO_FEATURE_STREAM_CLONE)
982
982
983 if repository.NARROW_REQUIREMENT in requirements:
983 if repository.NARROW_REQUIREMENT in requirements:
984 return revlognarrowfilestorage
984 return revlognarrowfilestorage
985 else:
985 else:
986 return revlogfilestorage
986 return revlogfilestorage
987
987
988
988
989 # List of repository interfaces and factory functions for them. Each
989 # List of repository interfaces and factory functions for them. Each
990 # will be called in order during ``makelocalrepository()`` to iteratively
990 # will be called in order during ``makelocalrepository()`` to iteratively
991 # derive the final type for a local repository instance. We capture the
991 # derive the final type for a local repository instance. We capture the
992 # function as a lambda so we don't hold a reference and the module-level
992 # function as a lambda so we don't hold a reference and the module-level
993 # functions can be wrapped.
993 # functions can be wrapped.
994 REPO_INTERFACES = [
994 REPO_INTERFACES = [
995 (repository.ilocalrepositorymain, lambda: makemain),
995 (repository.ilocalrepositorymain, lambda: makemain),
996 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
996 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
997 ]
997 ]
998
998
999
999
1000 @interfaceutil.implementer(repository.ilocalrepositorymain)
1000 @interfaceutil.implementer(repository.ilocalrepositorymain)
1001 class localrepository(object):
1001 class localrepository(object):
1002 """Main class for representing local repositories.
1002 """Main class for representing local repositories.
1003
1003
1004 All local repositories are instances of this class.
1004 All local repositories are instances of this class.
1005
1005
1006 Constructed on its own, instances of this class are not usable as
1006 Constructed on its own, instances of this class are not usable as
1007 repository objects. To obtain a usable repository object, call
1007 repository objects. To obtain a usable repository object, call
1008 ``hg.repository()``, ``localrepo.instance()``, or
1008 ``hg.repository()``, ``localrepo.instance()``, or
1009 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1009 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1010 ``instance()`` adds support for creating new repositories.
1010 ``instance()`` adds support for creating new repositories.
1011 ``hg.repository()`` adds more extension integration, including calling
1011 ``hg.repository()`` adds more extension integration, including calling
1012 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1012 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1013 used.
1013 used.
1014 """
1014 """
1015
1015
1016 # obsolete experimental requirements:
1016 # obsolete experimental requirements:
1017 # - manifestv2: An experimental new manifest format that allowed
1017 # - manifestv2: An experimental new manifest format that allowed
1018 # for stem compression of long paths. Experiment ended up not
1018 # for stem compression of long paths. Experiment ended up not
1019 # being successful (repository sizes went up due to worse delta
1019 # being successful (repository sizes went up due to worse delta
1020 # chains), and the code was deleted in 4.6.
1020 # chains), and the code was deleted in 4.6.
1021 supportedformats = {
1021 supportedformats = {
1022 b'revlogv1',
1022 b'revlogv1',
1023 b'generaldelta',
1023 b'generaldelta',
1024 b'treemanifest',
1024 b'treemanifest',
1025 COPIESSDC_REQUIREMENT,
1025 COPIESSDC_REQUIREMENT,
1026 REVLOGV2_REQUIREMENT,
1026 REVLOGV2_REQUIREMENT,
1027 SIDEDATA_REQUIREMENT,
1027 SIDEDATA_REQUIREMENT,
1028 SPARSEREVLOG_REQUIREMENT,
1028 SPARSEREVLOG_REQUIREMENT,
1029 NODEMAP_REQUIREMENT,
1029 NODEMAP_REQUIREMENT,
1030 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1030 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1031 }
1031 }
1032 _basesupported = supportedformats | {
1032 _basesupported = supportedformats | {
1033 b'store',
1033 b'store',
1034 b'fncache',
1034 b'fncache',
1035 b'shared',
1035 b'shared',
1036 b'relshared',
1036 b'relshared',
1037 b'dotencode',
1037 b'dotencode',
1038 b'exp-sparse',
1038 b'exp-sparse',
1039 b'internal-phase',
1039 b'internal-phase',
1040 }
1040 }
1041
1041
1042 # list of prefix for file which can be written without 'wlock'
1042 # list of prefix for file which can be written without 'wlock'
1043 # Extensions should extend this list when needed
1043 # Extensions should extend this list when needed
1044 _wlockfreeprefix = {
1044 _wlockfreeprefix = {
1045 # We migh consider requiring 'wlock' for the next
1045 # We migh consider requiring 'wlock' for the next
1046 # two, but pretty much all the existing code assume
1046 # two, but pretty much all the existing code assume
1047 # wlock is not needed so we keep them excluded for
1047 # wlock is not needed so we keep them excluded for
1048 # now.
1048 # now.
1049 b'hgrc',
1049 b'hgrc',
1050 b'requires',
1050 b'requires',
1051 # XXX cache is a complicatged business someone
1051 # XXX cache is a complicatged business someone
1052 # should investigate this in depth at some point
1052 # should investigate this in depth at some point
1053 b'cache/',
1053 b'cache/',
1054 # XXX shouldn't be dirstate covered by the wlock?
1054 # XXX shouldn't be dirstate covered by the wlock?
1055 b'dirstate',
1055 b'dirstate',
1056 # XXX bisect was still a bit too messy at the time
1056 # XXX bisect was still a bit too messy at the time
1057 # this changeset was introduced. Someone should fix
1057 # this changeset was introduced. Someone should fix
1058 # the remainig bit and drop this line
1058 # the remainig bit and drop this line
1059 b'bisect.state',
1059 b'bisect.state',
1060 }
1060 }
1061
1061
1062 def __init__(
1062 def __init__(
1063 self,
1063 self,
1064 baseui,
1064 baseui,
1065 ui,
1065 ui,
1066 origroot,
1066 origroot,
1067 wdirvfs,
1067 wdirvfs,
1068 hgvfs,
1068 hgvfs,
1069 requirements,
1069 requirements,
1070 supportedrequirements,
1070 supportedrequirements,
1071 sharedpath,
1071 sharedpath,
1072 store,
1072 store,
1073 cachevfs,
1073 cachevfs,
1074 wcachevfs,
1074 wcachevfs,
1075 features,
1075 features,
1076 intents=None,
1076 intents=None,
1077 ):
1077 ):
1078 """Create a new local repository instance.
1078 """Create a new local repository instance.
1079
1079
1080 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1080 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1081 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1081 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1082 object.
1082 object.
1083
1083
1084 Arguments:
1084 Arguments:
1085
1085
1086 baseui
1086 baseui
1087 ``ui.ui`` instance that ``ui`` argument was based off of.
1087 ``ui.ui`` instance that ``ui`` argument was based off of.
1088
1088
1089 ui
1089 ui
1090 ``ui.ui`` instance for use by the repository.
1090 ``ui.ui`` instance for use by the repository.
1091
1091
1092 origroot
1092 origroot
1093 ``bytes`` path to working directory root of this repository.
1093 ``bytes`` path to working directory root of this repository.
1094
1094
1095 wdirvfs
1095 wdirvfs
1096 ``vfs.vfs`` rooted at the working directory.
1096 ``vfs.vfs`` rooted at the working directory.
1097
1097
1098 hgvfs
1098 hgvfs
1099 ``vfs.vfs`` rooted at .hg/
1099 ``vfs.vfs`` rooted at .hg/
1100
1100
1101 requirements
1101 requirements
1102 ``set`` of bytestrings representing repository opening requirements.
1102 ``set`` of bytestrings representing repository opening requirements.
1103
1103
1104 supportedrequirements
1104 supportedrequirements
1105 ``set`` of bytestrings representing repository requirements that we
1105 ``set`` of bytestrings representing repository requirements that we
1106 know how to open. May be a supetset of ``requirements``.
1106 know how to open. May be a supetset of ``requirements``.
1107
1107
1108 sharedpath
1108 sharedpath
1109 ``bytes`` Defining path to storage base directory. Points to a
1109 ``bytes`` Defining path to storage base directory. Points to a
1110 ``.hg/`` directory somewhere.
1110 ``.hg/`` directory somewhere.
1111
1111
1112 store
1112 store
1113 ``store.basicstore`` (or derived) instance providing access to
1113 ``store.basicstore`` (or derived) instance providing access to
1114 versioned storage.
1114 versioned storage.
1115
1115
1116 cachevfs
1116 cachevfs
1117 ``vfs.vfs`` used for cache files.
1117 ``vfs.vfs`` used for cache files.
1118
1118
1119 wcachevfs
1119 wcachevfs
1120 ``vfs.vfs`` used for cache files related to the working copy.
1120 ``vfs.vfs`` used for cache files related to the working copy.
1121
1121
1122 features
1122 features
1123 ``set`` of bytestrings defining features/capabilities of this
1123 ``set`` of bytestrings defining features/capabilities of this
1124 instance.
1124 instance.
1125
1125
1126 intents
1126 intents
1127 ``set`` of system strings indicating what this repo will be used
1127 ``set`` of system strings indicating what this repo will be used
1128 for.
1128 for.
1129 """
1129 """
1130 self.baseui = baseui
1130 self.baseui = baseui
1131 self.ui = ui
1131 self.ui = ui
1132 self.origroot = origroot
1132 self.origroot = origroot
1133 # vfs rooted at working directory.
1133 # vfs rooted at working directory.
1134 self.wvfs = wdirvfs
1134 self.wvfs = wdirvfs
1135 self.root = wdirvfs.base
1135 self.root = wdirvfs.base
1136 # vfs rooted at .hg/. Used to access most non-store paths.
1136 # vfs rooted at .hg/. Used to access most non-store paths.
1137 self.vfs = hgvfs
1137 self.vfs = hgvfs
1138 self.path = hgvfs.base
1138 self.path = hgvfs.base
1139 self.requirements = requirements
1139 self.requirements = requirements
1140 self.supported = supportedrequirements
1140 self.supported = supportedrequirements
1141 self.sharedpath = sharedpath
1141 self.sharedpath = sharedpath
1142 self.store = store
1142 self.store = store
1143 self.cachevfs = cachevfs
1143 self.cachevfs = cachevfs
1144 self.wcachevfs = wcachevfs
1144 self.wcachevfs = wcachevfs
1145 self.features = features
1145 self.features = features
1146
1146
1147 self.filtername = None
1147 self.filtername = None
1148
1148
1149 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1149 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 b'devel', b'check-locks'
1150 b'devel', b'check-locks'
1151 ):
1151 ):
1152 self.vfs.audit = self._getvfsward(self.vfs.audit)
1152 self.vfs.audit = self._getvfsward(self.vfs.audit)
1153 # A list of callback to shape the phase if no data were found.
1153 # A list of callback to shape the phase if no data were found.
1154 # Callback are in the form: func(repo, roots) --> processed root.
1154 # Callback are in the form: func(repo, roots) --> processed root.
1155 # This list it to be filled by extension during repo setup
1155 # This list it to be filled by extension during repo setup
1156 self._phasedefaults = []
1156 self._phasedefaults = []
1157
1157
1158 color.setup(self.ui)
1158 color.setup(self.ui)
1159
1159
1160 self.spath = self.store.path
1160 self.spath = self.store.path
1161 self.svfs = self.store.vfs
1161 self.svfs = self.store.vfs
1162 self.sjoin = self.store.join
1162 self.sjoin = self.store.join
1163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1164 b'devel', b'check-locks'
1164 b'devel', b'check-locks'
1165 ):
1165 ):
1166 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1166 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1167 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1167 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1168 else: # standard vfs
1168 else: # standard vfs
1169 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1169 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1170
1170
1171 self._dirstatevalidatewarned = False
1171 self._dirstatevalidatewarned = False
1172
1172
1173 self._branchcaches = branchmap.BranchMapCache()
1173 self._branchcaches = branchmap.BranchMapCache()
1174 self._revbranchcache = None
1174 self._revbranchcache = None
1175 self._filterpats = {}
1175 self._filterpats = {}
1176 self._datafilters = {}
1176 self._datafilters = {}
1177 self._transref = self._lockref = self._wlockref = None
1177 self._transref = self._lockref = self._wlockref = None
1178
1178
1179 # A cache for various files under .hg/ that tracks file changes,
1179 # A cache for various files under .hg/ that tracks file changes,
1180 # (used by the filecache decorator)
1180 # (used by the filecache decorator)
1181 #
1181 #
1182 # Maps a property name to its util.filecacheentry
1182 # Maps a property name to its util.filecacheentry
1183 self._filecache = {}
1183 self._filecache = {}
1184
1184
1185 # hold sets of revision to be filtered
1185 # hold sets of revision to be filtered
1186 # should be cleared when something might have changed the filter value:
1186 # should be cleared when something might have changed the filter value:
1187 # - new changesets,
1187 # - new changesets,
1188 # - phase change,
1188 # - phase change,
1189 # - new obsolescence marker,
1189 # - new obsolescence marker,
1190 # - working directory parent change,
1190 # - working directory parent change,
1191 # - bookmark changes
1191 # - bookmark changes
1192 self.filteredrevcache = {}
1192 self.filteredrevcache = {}
1193
1193
1194 # post-dirstate-status hooks
1194 # post-dirstate-status hooks
1195 self._postdsstatus = []
1195 self._postdsstatus = []
1196
1196
1197 # generic mapping between names and nodes
1197 # generic mapping between names and nodes
1198 self.names = namespaces.namespaces()
1198 self.names = namespaces.namespaces()
1199
1199
1200 # Key to signature value.
1200 # Key to signature value.
1201 self._sparsesignaturecache = {}
1201 self._sparsesignaturecache = {}
1202 # Signature to cached matcher instance.
1202 # Signature to cached matcher instance.
1203 self._sparsematchercache = {}
1203 self._sparsematchercache = {}
1204
1204
1205 self._extrafilterid = repoview.extrafilter(ui)
1205 self._extrafilterid = repoview.extrafilter(ui)
1206
1206
1207 self.filecopiesmode = None
1207 self.filecopiesmode = None
1208 if COPIESSDC_REQUIREMENT in self.requirements:
1208 if COPIESSDC_REQUIREMENT in self.requirements:
1209 self.filecopiesmode = b'changeset-sidedata'
1209 self.filecopiesmode = b'changeset-sidedata'
1210
1210
1211 def _getvfsward(self, origfunc):
1211 def _getvfsward(self, origfunc):
1212 """build a ward for self.vfs"""
1212 """build a ward for self.vfs"""
1213 rref = weakref.ref(self)
1213 rref = weakref.ref(self)
1214
1214
1215 def checkvfs(path, mode=None):
1215 def checkvfs(path, mode=None):
1216 ret = origfunc(path, mode=mode)
1216 ret = origfunc(path, mode=mode)
1217 repo = rref()
1217 repo = rref()
1218 if (
1218 if (
1219 repo is None
1219 repo is None
1220 or not util.safehasattr(repo, b'_wlockref')
1220 or not util.safehasattr(repo, b'_wlockref')
1221 or not util.safehasattr(repo, b'_lockref')
1221 or not util.safehasattr(repo, b'_lockref')
1222 ):
1222 ):
1223 return
1223 return
1224 if mode in (None, b'r', b'rb'):
1224 if mode in (None, b'r', b'rb'):
1225 return
1225 return
1226 if path.startswith(repo.path):
1226 if path.startswith(repo.path):
1227 # truncate name relative to the repository (.hg)
1227 # truncate name relative to the repository (.hg)
1228 path = path[len(repo.path) + 1 :]
1228 path = path[len(repo.path) + 1 :]
1229 if path.startswith(b'cache/'):
1229 if path.startswith(b'cache/'):
1230 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1230 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1231 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1231 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1232 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1232 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1233 # journal is covered by 'lock'
1233 # journal is covered by 'lock'
1234 if repo._currentlock(repo._lockref) is None:
1234 if repo._currentlock(repo._lockref) is None:
1235 repo.ui.develwarn(
1235 repo.ui.develwarn(
1236 b'write with no lock: "%s"' % path,
1236 b'write with no lock: "%s"' % path,
1237 stacklevel=3,
1237 stacklevel=3,
1238 config=b'check-locks',
1238 config=b'check-locks',
1239 )
1239 )
1240 elif repo._currentlock(repo._wlockref) is None:
1240 elif repo._currentlock(repo._wlockref) is None:
1241 # rest of vfs files are covered by 'wlock'
1241 # rest of vfs files are covered by 'wlock'
1242 #
1242 #
1243 # exclude special files
1243 # exclude special files
1244 for prefix in self._wlockfreeprefix:
1244 for prefix in self._wlockfreeprefix:
1245 if path.startswith(prefix):
1245 if path.startswith(prefix):
1246 return
1246 return
1247 repo.ui.develwarn(
1247 repo.ui.develwarn(
1248 b'write with no wlock: "%s"' % path,
1248 b'write with no wlock: "%s"' % path,
1249 stacklevel=3,
1249 stacklevel=3,
1250 config=b'check-locks',
1250 config=b'check-locks',
1251 )
1251 )
1252 return ret
1252 return ret
1253
1253
1254 return checkvfs
1254 return checkvfs
1255
1255
1256 def _getsvfsward(self, origfunc):
1256 def _getsvfsward(self, origfunc):
1257 """build a ward for self.svfs"""
1257 """build a ward for self.svfs"""
1258 rref = weakref.ref(self)
1258 rref = weakref.ref(self)
1259
1259
1260 def checksvfs(path, mode=None):
1260 def checksvfs(path, mode=None):
1261 ret = origfunc(path, mode=mode)
1261 ret = origfunc(path, mode=mode)
1262 repo = rref()
1262 repo = rref()
1263 if repo is None or not util.safehasattr(repo, b'_lockref'):
1263 if repo is None or not util.safehasattr(repo, b'_lockref'):
1264 return
1264 return
1265 if mode in (None, b'r', b'rb'):
1265 if mode in (None, b'r', b'rb'):
1266 return
1266 return
1267 if path.startswith(repo.sharedpath):
1267 if path.startswith(repo.sharedpath):
1268 # truncate name relative to the repository (.hg)
1268 # truncate name relative to the repository (.hg)
1269 path = path[len(repo.sharedpath) + 1 :]
1269 path = path[len(repo.sharedpath) + 1 :]
1270 if repo._currentlock(repo._lockref) is None:
1270 if repo._currentlock(repo._lockref) is None:
1271 repo.ui.develwarn(
1271 repo.ui.develwarn(
1272 b'write with no lock: "%s"' % path, stacklevel=4
1272 b'write with no lock: "%s"' % path, stacklevel=4
1273 )
1273 )
1274 return ret
1274 return ret
1275
1275
1276 return checksvfs
1276 return checksvfs
1277
1277
1278 def close(self):
1278 def close(self):
1279 self._writecaches()
1279 self._writecaches()
1280
1280
1281 def _writecaches(self):
1281 def _writecaches(self):
1282 if self._revbranchcache:
1282 if self._revbranchcache:
1283 self._revbranchcache.write()
1283 self._revbranchcache.write()
1284
1284
1285 def _restrictcapabilities(self, caps):
1285 def _restrictcapabilities(self, caps):
1286 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1286 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1287 caps = set(caps)
1287 caps = set(caps)
1288 capsblob = bundle2.encodecaps(
1288 capsblob = bundle2.encodecaps(
1289 bundle2.getrepocaps(self, role=b'client')
1289 bundle2.getrepocaps(self, role=b'client')
1290 )
1290 )
1291 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1291 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1292 return caps
1292 return caps
1293
1293
1294 def _writerequirements(self):
1294 def _writerequirements(self):
1295 scmutil.writerequires(self.vfs, self.requirements)
1295 scmutil.writerequires(self.vfs, self.requirements)
1296
1296
1297 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1297 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1298 # self -> auditor -> self._checknested -> self
1298 # self -> auditor -> self._checknested -> self
1299
1299
1300 @property
1300 @property
1301 def auditor(self):
1301 def auditor(self):
1302 # This is only used by context.workingctx.match in order to
1302 # This is only used by context.workingctx.match in order to
1303 # detect files in subrepos.
1303 # detect files in subrepos.
1304 return pathutil.pathauditor(self.root, callback=self._checknested)
1304 return pathutil.pathauditor(self.root, callback=self._checknested)
1305
1305
1306 @property
1306 @property
1307 def nofsauditor(self):
1307 def nofsauditor(self):
1308 # This is only used by context.basectx.match in order to detect
1308 # This is only used by context.basectx.match in order to detect
1309 # files in subrepos.
1309 # files in subrepos.
1310 return pathutil.pathauditor(
1310 return pathutil.pathauditor(
1311 self.root, callback=self._checknested, realfs=False, cached=True
1311 self.root, callback=self._checknested, realfs=False, cached=True
1312 )
1312 )
1313
1313
1314 def _checknested(self, path):
1314 def _checknested(self, path):
1315 """Determine if path is a legal nested repository."""
1315 """Determine if path is a legal nested repository."""
1316 if not path.startswith(self.root):
1316 if not path.startswith(self.root):
1317 return False
1317 return False
1318 subpath = path[len(self.root) + 1 :]
1318 subpath = path[len(self.root) + 1 :]
1319 normsubpath = util.pconvert(subpath)
1319 normsubpath = util.pconvert(subpath)
1320
1320
1321 # XXX: Checking against the current working copy is wrong in
1321 # XXX: Checking against the current working copy is wrong in
1322 # the sense that it can reject things like
1322 # the sense that it can reject things like
1323 #
1323 #
1324 # $ hg cat -r 10 sub/x.txt
1324 # $ hg cat -r 10 sub/x.txt
1325 #
1325 #
1326 # if sub/ is no longer a subrepository in the working copy
1326 # if sub/ is no longer a subrepository in the working copy
1327 # parent revision.
1327 # parent revision.
1328 #
1328 #
1329 # However, it can of course also allow things that would have
1329 # However, it can of course also allow things that would have
1330 # been rejected before, such as the above cat command if sub/
1330 # been rejected before, such as the above cat command if sub/
1331 # is a subrepository now, but was a normal directory before.
1331 # is a subrepository now, but was a normal directory before.
1332 # The old path auditor would have rejected by mistake since it
1332 # The old path auditor would have rejected by mistake since it
1333 # panics when it sees sub/.hg/.
1333 # panics when it sees sub/.hg/.
1334 #
1334 #
1335 # All in all, checking against the working copy seems sensible
1335 # All in all, checking against the working copy seems sensible
1336 # since we want to prevent access to nested repositories on
1336 # since we want to prevent access to nested repositories on
1337 # the filesystem *now*.
1337 # the filesystem *now*.
1338 ctx = self[None]
1338 ctx = self[None]
1339 parts = util.splitpath(subpath)
1339 parts = util.splitpath(subpath)
1340 while parts:
1340 while parts:
1341 prefix = b'/'.join(parts)
1341 prefix = b'/'.join(parts)
1342 if prefix in ctx.substate:
1342 if prefix in ctx.substate:
1343 if prefix == normsubpath:
1343 if prefix == normsubpath:
1344 return True
1344 return True
1345 else:
1345 else:
1346 sub = ctx.sub(prefix)
1346 sub = ctx.sub(prefix)
1347 return sub.checknested(subpath[len(prefix) + 1 :])
1347 return sub.checknested(subpath[len(prefix) + 1 :])
1348 else:
1348 else:
1349 parts.pop()
1349 parts.pop()
1350 return False
1350 return False
1351
1351
1352 def peer(self):
1352 def peer(self):
1353 return localpeer(self) # not cached to avoid reference cycle
1353 return localpeer(self) # not cached to avoid reference cycle
1354
1354
1355 def unfiltered(self):
1355 def unfiltered(self):
1356 """Return unfiltered version of the repository
1356 """Return unfiltered version of the repository
1357
1357
1358 Intended to be overwritten by filtered repo."""
1358 Intended to be overwritten by filtered repo."""
1359 return self
1359 return self
1360
1360
1361 def filtered(self, name, visibilityexceptions=None):
1361 def filtered(self, name, visibilityexceptions=None):
1362 """Return a filtered version of a repository
1362 """Return a filtered version of a repository
1363
1363
1364 The `name` parameter is the identifier of the requested view. This
1364 The `name` parameter is the identifier of the requested view. This
1365 will return a repoview object set "exactly" to the specified view.
1365 will return a repoview object set "exactly" to the specified view.
1366
1366
1367 This function does not apply recursive filtering to a repository. For
1367 This function does not apply recursive filtering to a repository. For
1368 example calling `repo.filtered("served")` will return a repoview using
1368 example calling `repo.filtered("served")` will return a repoview using
1369 the "served" view, regardless of the initial view used by `repo`.
1369 the "served" view, regardless of the initial view used by `repo`.
1370
1370
1371 In other word, there is always only one level of `repoview` "filtering".
1371 In other word, there is always only one level of `repoview` "filtering".
1372 """
1372 """
1373 if self._extrafilterid is not None and b'%' not in name:
1373 if self._extrafilterid is not None and b'%' not in name:
1374 name = name + b'%' + self._extrafilterid
1374 name = name + b'%' + self._extrafilterid
1375
1375
1376 cls = repoview.newtype(self.unfiltered().__class__)
1376 cls = repoview.newtype(self.unfiltered().__class__)
1377 return cls(self, name, visibilityexceptions)
1377 return cls(self, name, visibilityexceptions)
1378
1378
1379 @mixedrepostorecache(
1379 @mixedrepostorecache(
1380 (b'bookmarks', b'plain'),
1380 (b'bookmarks', b'plain'),
1381 (b'bookmarks.current', b'plain'),
1381 (b'bookmarks.current', b'plain'),
1382 (b'bookmarks', b''),
1382 (b'bookmarks', b''),
1383 (b'00changelog.i', b''),
1383 (b'00changelog.i', b''),
1384 )
1384 )
1385 def _bookmarks(self):
1385 def _bookmarks(self):
1386 # Since the multiple files involved in the transaction cannot be
1386 # Since the multiple files involved in the transaction cannot be
1387 # written atomically (with current repository format), there is a race
1387 # written atomically (with current repository format), there is a race
1388 # condition here.
1388 # condition here.
1389 #
1389 #
1390 # 1) changelog content A is read
1390 # 1) changelog content A is read
1391 # 2) outside transaction update changelog to content B
1391 # 2) outside transaction update changelog to content B
1392 # 3) outside transaction update bookmark file referring to content B
1392 # 3) outside transaction update bookmark file referring to content B
1393 # 4) bookmarks file content is read and filtered against changelog-A
1393 # 4) bookmarks file content is read and filtered against changelog-A
1394 #
1394 #
1395 # When this happens, bookmarks against nodes missing from A are dropped.
1395 # When this happens, bookmarks against nodes missing from A are dropped.
1396 #
1396 #
1397 # Having this happening during read is not great, but it become worse
1397 # Having this happening during read is not great, but it become worse
1398 # when this happen during write because the bookmarks to the "unknown"
1398 # when this happen during write because the bookmarks to the "unknown"
1399 # nodes will be dropped for good. However, writes happen within locks.
1399 # nodes will be dropped for good. However, writes happen within locks.
1400 # This locking makes it possible to have a race free consistent read.
1400 # This locking makes it possible to have a race free consistent read.
1401 # For this purpose data read from disc before locking are
1401 # For this purpose data read from disc before locking are
1402 # "invalidated" right after the locks are taken. This invalidations are
1402 # "invalidated" right after the locks are taken. This invalidations are
1403 # "light", the `filecache` mechanism keep the data in memory and will
1403 # "light", the `filecache` mechanism keep the data in memory and will
1404 # reuse them if the underlying files did not changed. Not parsing the
1404 # reuse them if the underlying files did not changed. Not parsing the
1405 # same data multiple times helps performances.
1405 # same data multiple times helps performances.
1406 #
1406 #
1407 # Unfortunately in the case describe above, the files tracked by the
1407 # Unfortunately in the case describe above, the files tracked by the
1408 # bookmarks file cache might not have changed, but the in-memory
1408 # bookmarks file cache might not have changed, but the in-memory
1409 # content is still "wrong" because we used an older changelog content
1409 # content is still "wrong" because we used an older changelog content
1410 # to process the on-disk data. So after locking, the changelog would be
1410 # to process the on-disk data. So after locking, the changelog would be
1411 # refreshed but `_bookmarks` would be preserved.
1411 # refreshed but `_bookmarks` would be preserved.
1412 # Adding `00changelog.i` to the list of tracked file is not
1412 # Adding `00changelog.i` to the list of tracked file is not
1413 # enough, because at the time we build the content for `_bookmarks` in
1413 # enough, because at the time we build the content for `_bookmarks` in
1414 # (4), the changelog file has already diverged from the content used
1414 # (4), the changelog file has already diverged from the content used
1415 # for loading `changelog` in (1)
1415 # for loading `changelog` in (1)
1416 #
1416 #
1417 # To prevent the issue, we force the changelog to be explicitly
1417 # To prevent the issue, we force the changelog to be explicitly
1418 # reloaded while computing `_bookmarks`. The data race can still happen
1418 # reloaded while computing `_bookmarks`. The data race can still happen
1419 # without the lock (with a narrower window), but it would no longer go
1419 # without the lock (with a narrower window), but it would no longer go
1420 # undetected during the lock time refresh.
1420 # undetected during the lock time refresh.
1421 #
1421 #
1422 # The new schedule is as follow
1422 # The new schedule is as follow
1423 #
1423 #
1424 # 1) filecache logic detect that `_bookmarks` needs to be computed
1424 # 1) filecache logic detect that `_bookmarks` needs to be computed
1425 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1425 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1426 # 3) We force `changelog` filecache to be tested
1426 # 3) We force `changelog` filecache to be tested
1427 # 4) cachestat for `changelog` are captured (for changelog)
1427 # 4) cachestat for `changelog` are captured (for changelog)
1428 # 5) `_bookmarks` is computed and cached
1428 # 5) `_bookmarks` is computed and cached
1429 #
1429 #
1430 # The step in (3) ensure we have a changelog at least as recent as the
1430 # The step in (3) ensure we have a changelog at least as recent as the
1431 # cache stat computed in (1). As a result at locking time:
1431 # cache stat computed in (1). As a result at locking time:
1432 # * if the changelog did not changed since (1) -> we can reuse the data
1432 # * if the changelog did not changed since (1) -> we can reuse the data
1433 # * otherwise -> the bookmarks get refreshed.
1433 # * otherwise -> the bookmarks get refreshed.
1434 self._refreshchangelog()
1434 self._refreshchangelog()
1435 return bookmarks.bmstore(self)
1435 return bookmarks.bmstore(self)
1436
1436
1437 def _refreshchangelog(self):
1437 def _refreshchangelog(self):
1438 """make sure the in memory changelog match the on-disk one"""
1438 """make sure the in memory changelog match the on-disk one"""
1439 if 'changelog' in vars(self) and self.currenttransaction() is None:
1439 if 'changelog' in vars(self) and self.currenttransaction() is None:
1440 del self.changelog
1440 del self.changelog
1441
1441
1442 @property
1442 @property
1443 def _activebookmark(self):
1443 def _activebookmark(self):
1444 return self._bookmarks.active
1444 return self._bookmarks.active
1445
1445
1446 # _phasesets depend on changelog. what we need is to call
1446 # _phasesets depend on changelog. what we need is to call
1447 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1447 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1448 # can't be easily expressed in filecache mechanism.
1448 # can't be easily expressed in filecache mechanism.
1449 @storecache(b'phaseroots', b'00changelog.i')
1449 @storecache(b'phaseroots', b'00changelog.i')
1450 def _phasecache(self):
1450 def _phasecache(self):
1451 return phases.phasecache(self, self._phasedefaults)
1451 return phases.phasecache(self, self._phasedefaults)
1452
1452
1453 @storecache(b'obsstore')
1453 @storecache(b'obsstore')
1454 def obsstore(self):
1454 def obsstore(self):
1455 return obsolete.makestore(self.ui, self)
1455 return obsolete.makestore(self.ui, self)
1456
1456
1457 @storecache(b'00changelog.i')
1457 @storecache(b'00changelog.i')
1458 def changelog(self):
1458 def changelog(self):
1459 return self.store.changelog(txnutil.mayhavepending(self.root))
1459 return self.store.changelog(txnutil.mayhavepending(self.root))
1460
1460
1461 @storecache(b'00manifest.i')
1461 @storecache(b'00manifest.i')
1462 def manifestlog(self):
1462 def manifestlog(self):
1463 return self.store.manifestlog(self, self._storenarrowmatch)
1463 return self.store.manifestlog(self, self._storenarrowmatch)
1464
1464
1465 @repofilecache(b'dirstate')
1465 @repofilecache(b'dirstate')
1466 def dirstate(self):
1466 def dirstate(self):
1467 return self._makedirstate()
1467 return self._makedirstate()
1468
1468
1469 def _makedirstate(self):
1469 def _makedirstate(self):
1470 """Extension point for wrapping the dirstate per-repo."""
1470 """Extension point for wrapping the dirstate per-repo."""
1471 sparsematchfn = lambda: sparse.matcher(self)
1471 sparsematchfn = lambda: sparse.matcher(self)
1472
1472
1473 return dirstate.dirstate(
1473 return dirstate.dirstate(
1474 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1474 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1475 )
1475 )
1476
1476
1477 def _dirstatevalidate(self, node):
1477 def _dirstatevalidate(self, node):
1478 try:
1478 try:
1479 self.changelog.rev(node)
1479 self.changelog.rev(node)
1480 return node
1480 return node
1481 except error.LookupError:
1481 except error.LookupError:
1482 if not self._dirstatevalidatewarned:
1482 if not self._dirstatevalidatewarned:
1483 self._dirstatevalidatewarned = True
1483 self._dirstatevalidatewarned = True
1484 self.ui.warn(
1484 self.ui.warn(
1485 _(b"warning: ignoring unknown working parent %s!\n")
1485 _(b"warning: ignoring unknown working parent %s!\n")
1486 % short(node)
1486 % short(node)
1487 )
1487 )
1488 return nullid
1488 return nullid
1489
1489
1490 @storecache(narrowspec.FILENAME)
1490 @storecache(narrowspec.FILENAME)
1491 def narrowpats(self):
1491 def narrowpats(self):
1492 """matcher patterns for this repository's narrowspec
1492 """matcher patterns for this repository's narrowspec
1493
1493
1494 A tuple of (includes, excludes).
1494 A tuple of (includes, excludes).
1495 """
1495 """
1496 return narrowspec.load(self)
1496 return narrowspec.load(self)
1497
1497
1498 @storecache(narrowspec.FILENAME)
1498 @storecache(narrowspec.FILENAME)
1499 def _storenarrowmatch(self):
1499 def _storenarrowmatch(self):
1500 if repository.NARROW_REQUIREMENT not in self.requirements:
1500 if repository.NARROW_REQUIREMENT not in self.requirements:
1501 return matchmod.always()
1501 return matchmod.always()
1502 include, exclude = self.narrowpats
1502 include, exclude = self.narrowpats
1503 return narrowspec.match(self.root, include=include, exclude=exclude)
1503 return narrowspec.match(self.root, include=include, exclude=exclude)
1504
1504
1505 @storecache(narrowspec.FILENAME)
1505 @storecache(narrowspec.FILENAME)
1506 def _narrowmatch(self):
1506 def _narrowmatch(self):
1507 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 if repository.NARROW_REQUIREMENT not in self.requirements:
1508 return matchmod.always()
1508 return matchmod.always()
1509 narrowspec.checkworkingcopynarrowspec(self)
1509 narrowspec.checkworkingcopynarrowspec(self)
1510 include, exclude = self.narrowpats
1510 include, exclude = self.narrowpats
1511 return narrowspec.match(self.root, include=include, exclude=exclude)
1511 return narrowspec.match(self.root, include=include, exclude=exclude)
1512
1512
1513 def narrowmatch(self, match=None, includeexact=False):
1513 def narrowmatch(self, match=None, includeexact=False):
1514 """matcher corresponding the the repo's narrowspec
1514 """matcher corresponding the the repo's narrowspec
1515
1515
1516 If `match` is given, then that will be intersected with the narrow
1516 If `match` is given, then that will be intersected with the narrow
1517 matcher.
1517 matcher.
1518
1518
1519 If `includeexact` is True, then any exact matches from `match` will
1519 If `includeexact` is True, then any exact matches from `match` will
1520 be included even if they're outside the narrowspec.
1520 be included even if they're outside the narrowspec.
1521 """
1521 """
1522 if match:
1522 if match:
1523 if includeexact and not self._narrowmatch.always():
1523 if includeexact and not self._narrowmatch.always():
1524 # do not exclude explicitly-specified paths so that they can
1524 # do not exclude explicitly-specified paths so that they can
1525 # be warned later on
1525 # be warned later on
1526 em = matchmod.exact(match.files())
1526 em = matchmod.exact(match.files())
1527 nm = matchmod.unionmatcher([self._narrowmatch, em])
1527 nm = matchmod.unionmatcher([self._narrowmatch, em])
1528 return matchmod.intersectmatchers(match, nm)
1528 return matchmod.intersectmatchers(match, nm)
1529 return matchmod.intersectmatchers(match, self._narrowmatch)
1529 return matchmod.intersectmatchers(match, self._narrowmatch)
1530 return self._narrowmatch
1530 return self._narrowmatch
1531
1531
1532 def setnarrowpats(self, newincludes, newexcludes):
1532 def setnarrowpats(self, newincludes, newexcludes):
1533 narrowspec.save(self, newincludes, newexcludes)
1533 narrowspec.save(self, newincludes, newexcludes)
1534 self.invalidate(clearfilecache=True)
1534 self.invalidate(clearfilecache=True)
1535
1535
1536 @unfilteredpropertycache
1536 @unfilteredpropertycache
1537 def _quick_access_changeid_null(self):
1537 def _quick_access_changeid_null(self):
1538 return {
1538 return {
1539 b'null': (nullrev, nullid),
1539 b'null': (nullrev, nullid),
1540 nullrev: (nullrev, nullid),
1540 nullrev: (nullrev, nullid),
1541 nullid: (nullrev, nullid),
1541 nullid: (nullrev, nullid),
1542 }
1542 }
1543
1543
1544 @unfilteredpropertycache
1544 @unfilteredpropertycache
1545 def _quick_access_changeid_wc(self):
1545 def _quick_access_changeid_wc(self):
1546 # also fast path access to the working copy parents
1546 # also fast path access to the working copy parents
1547 # however, only do it for filter that ensure wc is visible.
1547 # however, only do it for filter that ensure wc is visible.
1548 quick = {}
1548 quick = {}
1549 cl = self.unfiltered().changelog
1549 cl = self.unfiltered().changelog
1550 for node in self.dirstate.parents():
1550 for node in self.dirstate.parents():
1551 if node == nullid:
1551 if node == nullid:
1552 continue
1552 continue
1553 rev = cl.index.get_rev(node)
1553 rev = cl.index.get_rev(node)
1554 if rev is None:
1554 if rev is None:
1555 # unknown working copy parent case:
1555 # unknown working copy parent case:
1556 #
1556 #
1557 # skip the fast path and let higher code deal with it
1557 # skip the fast path and let higher code deal with it
1558 continue
1558 continue
1559 pair = (rev, node)
1559 pair = (rev, node)
1560 quick[rev] = pair
1560 quick[rev] = pair
1561 quick[node] = pair
1561 quick[node] = pair
1562 # also add the parents of the parents
1562 # also add the parents of the parents
1563 for r in cl.parentrevs(rev):
1563 for r in cl.parentrevs(rev):
1564 if r == nullrev:
1564 if r == nullrev:
1565 continue
1565 continue
1566 n = cl.node(r)
1566 n = cl.node(r)
1567 pair = (r, n)
1567 pair = (r, n)
1568 quick[r] = pair
1568 quick[r] = pair
1569 quick[n] = pair
1569 quick[n] = pair
1570 p1node = self.dirstate.p1()
1570 p1node = self.dirstate.p1()
1571 if p1node != nullid:
1571 if p1node != nullid:
1572 quick[b'.'] = quick[p1node]
1572 quick[b'.'] = quick[p1node]
1573 return quick
1573 return quick
1574
1574
1575 @unfilteredmethod
1575 @unfilteredmethod
1576 def _quick_access_changeid_invalidate(self):
1576 def _quick_access_changeid_invalidate(self):
1577 if '_quick_access_changeid_wc' in vars(self):
1577 if '_quick_access_changeid_wc' in vars(self):
1578 del self.__dict__['_quick_access_changeid_wc']
1578 del self.__dict__['_quick_access_changeid_wc']
1579
1579
1580 @property
1580 @property
1581 def _quick_access_changeid(self):
1581 def _quick_access_changeid(self):
1582 """an helper dictionnary for __getitem__ calls
1582 """an helper dictionnary for __getitem__ calls
1583
1583
1584 This contains a list of symbol we can recognise right away without
1584 This contains a list of symbol we can recognise right away without
1585 further processing.
1585 further processing.
1586 """
1586 """
1587 mapping = self._quick_access_changeid_null
1587 mapping = self._quick_access_changeid_null
1588 if self.filtername in repoview.filter_has_wc:
1588 if self.filtername in repoview.filter_has_wc:
1589 mapping = mapping.copy()
1589 mapping = mapping.copy()
1590 mapping.update(self._quick_access_changeid_wc)
1590 mapping.update(self._quick_access_changeid_wc)
1591 return mapping
1591 return mapping
1592
1592
1593 def __getitem__(self, changeid):
1593 def __getitem__(self, changeid):
1594 # dealing with special cases
1594 # dealing with special cases
1595 if changeid is None:
1595 if changeid is None:
1596 return context.workingctx(self)
1596 return context.workingctx(self)
1597 if isinstance(changeid, context.basectx):
1597 if isinstance(changeid, context.basectx):
1598 return changeid
1598 return changeid
1599
1599
1600 # dealing with multiple revisions
1600 # dealing with multiple revisions
1601 if isinstance(changeid, slice):
1601 if isinstance(changeid, slice):
1602 # wdirrev isn't contiguous so the slice shouldn't include it
1602 # wdirrev isn't contiguous so the slice shouldn't include it
1603 return [
1603 return [
1604 self[i]
1604 self[i]
1605 for i in pycompat.xrange(*changeid.indices(len(self)))
1605 for i in pycompat.xrange(*changeid.indices(len(self)))
1606 if i not in self.changelog.filteredrevs
1606 if i not in self.changelog.filteredrevs
1607 ]
1607 ]
1608
1608
1609 # dealing with some special values
1609 # dealing with some special values
1610 quick_access = self._quick_access_changeid.get(changeid)
1610 quick_access = self._quick_access_changeid.get(changeid)
1611 if quick_access is not None:
1611 if quick_access is not None:
1612 rev, node = quick_access
1612 rev, node = quick_access
1613 return context.changectx(self, rev, node, maybe_filtered=False)
1613 return context.changectx(self, rev, node, maybe_filtered=False)
1614 if changeid == b'tip':
1614 if changeid == b'tip':
1615 node = self.changelog.tip()
1615 node = self.changelog.tip()
1616 rev = self.changelog.rev(node)
1616 rev = self.changelog.rev(node)
1617 return context.changectx(self, rev, node)
1617 return context.changectx(self, rev, node)
1618
1618
1619 # dealing with arbitrary values
1619 # dealing with arbitrary values
1620 try:
1620 try:
1621 if isinstance(changeid, int):
1621 if isinstance(changeid, int):
1622 node = self.changelog.node(changeid)
1622 node = self.changelog.node(changeid)
1623 rev = changeid
1623 rev = changeid
1624 elif changeid == b'.':
1624 elif changeid == b'.':
1625 # this is a hack to delay/avoid loading obsmarkers
1625 # this is a hack to delay/avoid loading obsmarkers
1626 # when we know that '.' won't be hidden
1626 # when we know that '.' won't be hidden
1627 node = self.dirstate.p1()
1627 node = self.dirstate.p1()
1628 rev = self.unfiltered().changelog.rev(node)
1628 rev = self.unfiltered().changelog.rev(node)
1629 elif len(changeid) == 20:
1629 elif len(changeid) == 20:
1630 try:
1630 try:
1631 node = changeid
1631 node = changeid
1632 rev = self.changelog.rev(changeid)
1632 rev = self.changelog.rev(changeid)
1633 except error.FilteredLookupError:
1633 except error.FilteredLookupError:
1634 changeid = hex(changeid) # for the error message
1634 changeid = hex(changeid) # for the error message
1635 raise
1635 raise
1636 except LookupError:
1636 except LookupError:
1637 # check if it might have come from damaged dirstate
1637 # check if it might have come from damaged dirstate
1638 #
1638 #
1639 # XXX we could avoid the unfiltered if we had a recognizable
1639 # XXX we could avoid the unfiltered if we had a recognizable
1640 # exception for filtered changeset access
1640 # exception for filtered changeset access
1641 if (
1641 if (
1642 self.local()
1642 self.local()
1643 and changeid in self.unfiltered().dirstate.parents()
1643 and changeid in self.unfiltered().dirstate.parents()
1644 ):
1644 ):
1645 msg = _(b"working directory has unknown parent '%s'!")
1645 msg = _(b"working directory has unknown parent '%s'!")
1646 raise error.Abort(msg % short(changeid))
1646 raise error.Abort(msg % short(changeid))
1647 changeid = hex(changeid) # for the error message
1647 changeid = hex(changeid) # for the error message
1648 raise
1648 raise
1649
1649
1650 elif len(changeid) == 40:
1650 elif len(changeid) == 40:
1651 node = bin(changeid)
1651 node = bin(changeid)
1652 rev = self.changelog.rev(node)
1652 rev = self.changelog.rev(node)
1653 else:
1653 else:
1654 raise error.ProgrammingError(
1654 raise error.ProgrammingError(
1655 b"unsupported changeid '%s' of type %s"
1655 b"unsupported changeid '%s' of type %s"
1656 % (changeid, pycompat.bytestr(type(changeid)))
1656 % (changeid, pycompat.bytestr(type(changeid)))
1657 )
1657 )
1658
1658
1659 return context.changectx(self, rev, node)
1659 return context.changectx(self, rev, node)
1660
1660
1661 except (error.FilteredIndexError, error.FilteredLookupError):
1661 except (error.FilteredIndexError, error.FilteredLookupError):
1662 raise error.FilteredRepoLookupError(
1662 raise error.FilteredRepoLookupError(
1663 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1663 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1664 )
1664 )
1665 except (IndexError, LookupError):
1665 except (IndexError, LookupError):
1666 raise error.RepoLookupError(
1666 raise error.RepoLookupError(
1667 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1667 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1668 )
1668 )
1669 except error.WdirUnsupported:
1669 except error.WdirUnsupported:
1670 return context.workingctx(self)
1670 return context.workingctx(self)
1671
1671
1672 def __contains__(self, changeid):
1672 def __contains__(self, changeid):
1673 """True if the given changeid exists
1673 """True if the given changeid exists
1674
1674
1675 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1675 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1676 specified.
1676 specified.
1677 """
1677 """
1678 try:
1678 try:
1679 self[changeid]
1679 self[changeid]
1680 return True
1680 return True
1681 except error.RepoLookupError:
1681 except error.RepoLookupError:
1682 return False
1682 return False
1683
1683
1684 def __nonzero__(self):
1684 def __nonzero__(self):
1685 return True
1685 return True
1686
1686
1687 __bool__ = __nonzero__
1687 __bool__ = __nonzero__
1688
1688
1689 def __len__(self):
1689 def __len__(self):
1690 # no need to pay the cost of repoview.changelog
1690 # no need to pay the cost of repoview.changelog
1691 unfi = self.unfiltered()
1691 unfi = self.unfiltered()
1692 return len(unfi.changelog)
1692 return len(unfi.changelog)
1693
1693
1694 def __iter__(self):
1694 def __iter__(self):
1695 return iter(self.changelog)
1695 return iter(self.changelog)
1696
1696
1697 def revs(self, expr, *args):
1697 def revs(self, expr, *args):
1698 '''Find revisions matching a revset.
1698 '''Find revisions matching a revset.
1699
1699
1700 The revset is specified as a string ``expr`` that may contain
1700 The revset is specified as a string ``expr`` that may contain
1701 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1701 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1702
1702
1703 Revset aliases from the configuration are not expanded. To expand
1703 Revset aliases from the configuration are not expanded. To expand
1704 user aliases, consider calling ``scmutil.revrange()`` or
1704 user aliases, consider calling ``scmutil.revrange()`` or
1705 ``repo.anyrevs([expr], user=True)``.
1705 ``repo.anyrevs([expr], user=True)``.
1706
1706
1707 Returns a smartset.abstractsmartset, which is a list-like interface
1707 Returns a smartset.abstractsmartset, which is a list-like interface
1708 that contains integer revisions.
1708 that contains integer revisions.
1709 '''
1709 '''
1710 tree = revsetlang.spectree(expr, *args)
1710 tree = revsetlang.spectree(expr, *args)
1711 return revset.makematcher(tree)(self)
1711 return revset.makematcher(tree)(self)
1712
1712
1713 def set(self, expr, *args):
1713 def set(self, expr, *args):
1714 '''Find revisions matching a revset and emit changectx instances.
1714 '''Find revisions matching a revset and emit changectx instances.
1715
1715
1716 This is a convenience wrapper around ``revs()`` that iterates the
1716 This is a convenience wrapper around ``revs()`` that iterates the
1717 result and is a generator of changectx instances.
1717 result and is a generator of changectx instances.
1718
1718
1719 Revset aliases from the configuration are not expanded. To expand
1719 Revset aliases from the configuration are not expanded. To expand
1720 user aliases, consider calling ``scmutil.revrange()``.
1720 user aliases, consider calling ``scmutil.revrange()``.
1721 '''
1721 '''
1722 for r in self.revs(expr, *args):
1722 for r in self.revs(expr, *args):
1723 yield self[r]
1723 yield self[r]
1724
1724
1725 def anyrevs(self, specs, user=False, localalias=None):
1725 def anyrevs(self, specs, user=False, localalias=None):
1726 '''Find revisions matching one of the given revsets.
1726 '''Find revisions matching one of the given revsets.
1727
1727
1728 Revset aliases from the configuration are not expanded by default. To
1728 Revset aliases from the configuration are not expanded by default. To
1729 expand user aliases, specify ``user=True``. To provide some local
1729 expand user aliases, specify ``user=True``. To provide some local
1730 definitions overriding user aliases, set ``localalias`` to
1730 definitions overriding user aliases, set ``localalias`` to
1731 ``{name: definitionstring}``.
1731 ``{name: definitionstring}``.
1732 '''
1732 '''
1733 if specs == [b'null']:
1733 if specs == [b'null']:
1734 return revset.baseset([nullrev])
1734 return revset.baseset([nullrev])
1735 if specs == [b'.']:
1735 if specs == [b'.']:
1736 quick_data = self._quick_access_changeid.get(b'.')
1736 quick_data = self._quick_access_changeid.get(b'.')
1737 if quick_data is not None:
1737 if quick_data is not None:
1738 return revset.baseset([quick_data[0]])
1738 return revset.baseset([quick_data[0]])
1739 if user:
1739 if user:
1740 m = revset.matchany(
1740 m = revset.matchany(
1741 self.ui,
1741 self.ui,
1742 specs,
1742 specs,
1743 lookup=revset.lookupfn(self),
1743 lookup=revset.lookupfn(self),
1744 localalias=localalias,
1744 localalias=localalias,
1745 )
1745 )
1746 else:
1746 else:
1747 m = revset.matchany(None, specs, localalias=localalias)
1747 m = revset.matchany(None, specs, localalias=localalias)
1748 return m(self)
1748 return m(self)
1749
1749
1750 def url(self):
1750 def url(self):
1751 return b'file:' + self.root
1751 return b'file:' + self.root
1752
1752
1753 def hook(self, name, throw=False, **args):
1753 def hook(self, name, throw=False, **args):
1754 """Call a hook, passing this repo instance.
1754 """Call a hook, passing this repo instance.
1755
1755
1756 This a convenience method to aid invoking hooks. Extensions likely
1756 This a convenience method to aid invoking hooks. Extensions likely
1757 won't call this unless they have registered a custom hook or are
1757 won't call this unless they have registered a custom hook or are
1758 replacing code that is expected to call a hook.
1758 replacing code that is expected to call a hook.
1759 """
1759 """
1760 return hook.hook(self.ui, self, name, throw, **args)
1760 return hook.hook(self.ui, self, name, throw, **args)
1761
1761
1762 @filteredpropertycache
1762 @filteredpropertycache
1763 def _tagscache(self):
1763 def _tagscache(self):
1764 '''Returns a tagscache object that contains various tags related
1764 '''Returns a tagscache object that contains various tags related
1765 caches.'''
1765 caches.'''
1766
1766
1767 # This simplifies its cache management by having one decorated
1767 # This simplifies its cache management by having one decorated
1768 # function (this one) and the rest simply fetch things from it.
1768 # function (this one) and the rest simply fetch things from it.
1769 class tagscache(object):
1769 class tagscache(object):
1770 def __init__(self):
1770 def __init__(self):
1771 # These two define the set of tags for this repository. tags
1771 # These two define the set of tags for this repository. tags
1772 # maps tag name to node; tagtypes maps tag name to 'global' or
1772 # maps tag name to node; tagtypes maps tag name to 'global' or
1773 # 'local'. (Global tags are defined by .hgtags across all
1773 # 'local'. (Global tags are defined by .hgtags across all
1774 # heads, and local tags are defined in .hg/localtags.)
1774 # heads, and local tags are defined in .hg/localtags.)
1775 # They constitute the in-memory cache of tags.
1775 # They constitute the in-memory cache of tags.
1776 self.tags = self.tagtypes = None
1776 self.tags = self.tagtypes = None
1777
1777
1778 self.nodetagscache = self.tagslist = None
1778 self.nodetagscache = self.tagslist = None
1779
1779
1780 cache = tagscache()
1780 cache = tagscache()
1781 cache.tags, cache.tagtypes = self._findtags()
1781 cache.tags, cache.tagtypes = self._findtags()
1782
1782
1783 return cache
1783 return cache
1784
1784
1785 def tags(self):
1785 def tags(self):
1786 '''return a mapping of tag to node'''
1786 '''return a mapping of tag to node'''
1787 t = {}
1787 t = {}
1788 if self.changelog.filteredrevs:
1788 if self.changelog.filteredrevs:
1789 tags, tt = self._findtags()
1789 tags, tt = self._findtags()
1790 else:
1790 else:
1791 tags = self._tagscache.tags
1791 tags = self._tagscache.tags
1792 rev = self.changelog.rev
1792 rev = self.changelog.rev
1793 for k, v in pycompat.iteritems(tags):
1793 for k, v in pycompat.iteritems(tags):
1794 try:
1794 try:
1795 # ignore tags to unknown nodes
1795 # ignore tags to unknown nodes
1796 rev(v)
1796 rev(v)
1797 t[k] = v
1797 t[k] = v
1798 except (error.LookupError, ValueError):
1798 except (error.LookupError, ValueError):
1799 pass
1799 pass
1800 return t
1800 return t
1801
1801
1802 def _findtags(self):
1802 def _findtags(self):
1803 '''Do the hard work of finding tags. Return a pair of dicts
1803 '''Do the hard work of finding tags. Return a pair of dicts
1804 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1804 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1805 maps tag name to a string like \'global\' or \'local\'.
1805 maps tag name to a string like \'global\' or \'local\'.
1806 Subclasses or extensions are free to add their own tags, but
1806 Subclasses or extensions are free to add their own tags, but
1807 should be aware that the returned dicts will be retained for the
1807 should be aware that the returned dicts will be retained for the
1808 duration of the localrepo object.'''
1808 duration of the localrepo object.'''
1809
1809
1810 # XXX what tagtype should subclasses/extensions use? Currently
1810 # XXX what tagtype should subclasses/extensions use? Currently
1811 # mq and bookmarks add tags, but do not set the tagtype at all.
1811 # mq and bookmarks add tags, but do not set the tagtype at all.
1812 # Should each extension invent its own tag type? Should there
1812 # Should each extension invent its own tag type? Should there
1813 # be one tagtype for all such "virtual" tags? Or is the status
1813 # be one tagtype for all such "virtual" tags? Or is the status
1814 # quo fine?
1814 # quo fine?
1815
1815
1816 # map tag name to (node, hist)
1816 # map tag name to (node, hist)
1817 alltags = tagsmod.findglobaltags(self.ui, self)
1817 alltags = tagsmod.findglobaltags(self.ui, self)
1818 # map tag name to tag type
1818 # map tag name to tag type
1819 tagtypes = {tag: b'global' for tag in alltags}
1819 tagtypes = {tag: b'global' for tag in alltags}
1820
1820
1821 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1821 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1822
1822
1823 # Build the return dicts. Have to re-encode tag names because
1823 # Build the return dicts. Have to re-encode tag names because
1824 # the tags module always uses UTF-8 (in order not to lose info
1824 # the tags module always uses UTF-8 (in order not to lose info
1825 # writing to the cache), but the rest of Mercurial wants them in
1825 # writing to the cache), but the rest of Mercurial wants them in
1826 # local encoding.
1826 # local encoding.
1827 tags = {}
1827 tags = {}
1828 for (name, (node, hist)) in pycompat.iteritems(alltags):
1828 for (name, (node, hist)) in pycompat.iteritems(alltags):
1829 if node != nullid:
1829 if node != nullid:
1830 tags[encoding.tolocal(name)] = node
1830 tags[encoding.tolocal(name)] = node
1831 tags[b'tip'] = self.changelog.tip()
1831 tags[b'tip'] = self.changelog.tip()
1832 tagtypes = {
1832 tagtypes = {
1833 encoding.tolocal(name): value
1833 encoding.tolocal(name): value
1834 for (name, value) in pycompat.iteritems(tagtypes)
1834 for (name, value) in pycompat.iteritems(tagtypes)
1835 }
1835 }
1836 return (tags, tagtypes)
1836 return (tags, tagtypes)
1837
1837
1838 def tagtype(self, tagname):
1838 def tagtype(self, tagname):
1839 '''
1839 '''
1840 return the type of the given tag. result can be:
1840 return the type of the given tag. result can be:
1841
1841
1842 'local' : a local tag
1842 'local' : a local tag
1843 'global' : a global tag
1843 'global' : a global tag
1844 None : tag does not exist
1844 None : tag does not exist
1845 '''
1845 '''
1846
1846
1847 return self._tagscache.tagtypes.get(tagname)
1847 return self._tagscache.tagtypes.get(tagname)
1848
1848
1849 def tagslist(self):
1849 def tagslist(self):
1850 '''return a list of tags ordered by revision'''
1850 '''return a list of tags ordered by revision'''
1851 if not self._tagscache.tagslist:
1851 if not self._tagscache.tagslist:
1852 l = []
1852 l = []
1853 for t, n in pycompat.iteritems(self.tags()):
1853 for t, n in pycompat.iteritems(self.tags()):
1854 l.append((self.changelog.rev(n), t, n))
1854 l.append((self.changelog.rev(n), t, n))
1855 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1855 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1856
1856
1857 return self._tagscache.tagslist
1857 return self._tagscache.tagslist
1858
1858
1859 def nodetags(self, node):
1859 def nodetags(self, node):
1860 '''return the tags associated with a node'''
1860 '''return the tags associated with a node'''
1861 if not self._tagscache.nodetagscache:
1861 if not self._tagscache.nodetagscache:
1862 nodetagscache = {}
1862 nodetagscache = {}
1863 for t, n in pycompat.iteritems(self._tagscache.tags):
1863 for t, n in pycompat.iteritems(self._tagscache.tags):
1864 nodetagscache.setdefault(n, []).append(t)
1864 nodetagscache.setdefault(n, []).append(t)
1865 for tags in pycompat.itervalues(nodetagscache):
1865 for tags in pycompat.itervalues(nodetagscache):
1866 tags.sort()
1866 tags.sort()
1867 self._tagscache.nodetagscache = nodetagscache
1867 self._tagscache.nodetagscache = nodetagscache
1868 return self._tagscache.nodetagscache.get(node, [])
1868 return self._tagscache.nodetagscache.get(node, [])
1869
1869
1870 def nodebookmarks(self, node):
1870 def nodebookmarks(self, node):
1871 """return the list of bookmarks pointing to the specified node"""
1871 """return the list of bookmarks pointing to the specified node"""
1872 return self._bookmarks.names(node)
1872 return self._bookmarks.names(node)
1873
1873
1874 def branchmap(self):
1874 def branchmap(self):
1875 '''returns a dictionary {branch: [branchheads]} with branchheads
1875 '''returns a dictionary {branch: [branchheads]} with branchheads
1876 ordered by increasing revision number'''
1876 ordered by increasing revision number'''
1877 return self._branchcaches[self]
1877 return self._branchcaches[self]
1878
1878
1879 @unfilteredmethod
1879 @unfilteredmethod
1880 def revbranchcache(self):
1880 def revbranchcache(self):
1881 if not self._revbranchcache:
1881 if not self._revbranchcache:
1882 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1882 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1883 return self._revbranchcache
1883 return self._revbranchcache
1884
1884
1885 def branchtip(self, branch, ignoremissing=False):
1885 def branchtip(self, branch, ignoremissing=False):
1886 '''return the tip node for a given branch
1886 '''return the tip node for a given branch
1887
1887
1888 If ignoremissing is True, then this method will not raise an error.
1888 If ignoremissing is True, then this method will not raise an error.
1889 This is helpful for callers that only expect None for a missing branch
1889 This is helpful for callers that only expect None for a missing branch
1890 (e.g. namespace).
1890 (e.g. namespace).
1891
1891
1892 '''
1892 '''
1893 try:
1893 try:
1894 return self.branchmap().branchtip(branch)
1894 return self.branchmap().branchtip(branch)
1895 except KeyError:
1895 except KeyError:
1896 if not ignoremissing:
1896 if not ignoremissing:
1897 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1897 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1898 else:
1898 else:
1899 pass
1899 pass
1900
1900
1901 def lookup(self, key):
1901 def lookup(self, key):
1902 node = scmutil.revsymbol(self, key).node()
1902 node = scmutil.revsymbol(self, key).node()
1903 if node is None:
1903 if node is None:
1904 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1904 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1905 return node
1905 return node
1906
1906
1907 def lookupbranch(self, key):
1907 def lookupbranch(self, key):
1908 if self.branchmap().hasbranch(key):
1908 if self.branchmap().hasbranch(key):
1909 return key
1909 return key
1910
1910
1911 return scmutil.revsymbol(self, key).branch()
1911 return scmutil.revsymbol(self, key).branch()
1912
1912
1913 def known(self, nodes):
1913 def known(self, nodes):
1914 cl = self.changelog
1914 cl = self.changelog
1915 get_rev = cl.index.get_rev
1915 get_rev = cl.index.get_rev
1916 filtered = cl.filteredrevs
1916 filtered = cl.filteredrevs
1917 result = []
1917 result = []
1918 for n in nodes:
1918 for n in nodes:
1919 r = get_rev(n)
1919 r = get_rev(n)
1920 resp = not (r is None or r in filtered)
1920 resp = not (r is None or r in filtered)
1921 result.append(resp)
1921 result.append(resp)
1922 return result
1922 return result
1923
1923
1924 def local(self):
1924 def local(self):
1925 return self
1925 return self
1926
1926
1927 def publishing(self):
1927 def publishing(self):
1928 # it's safe (and desirable) to trust the publish flag unconditionally
1928 # it's safe (and desirable) to trust the publish flag unconditionally
1929 # so that we don't finalize changes shared between users via ssh or nfs
1929 # so that we don't finalize changes shared between users via ssh or nfs
1930 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1930 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1931
1931
1932 def cancopy(self):
1932 def cancopy(self):
1933 # so statichttprepo's override of local() works
1933 # so statichttprepo's override of local() works
1934 if not self.local():
1934 if not self.local():
1935 return False
1935 return False
1936 if not self.publishing():
1936 if not self.publishing():
1937 return True
1937 return True
1938 # if publishing we can't copy if there is filtered content
1938 # if publishing we can't copy if there is filtered content
1939 return not self.filtered(b'visible').changelog.filteredrevs
1939 return not self.filtered(b'visible').changelog.filteredrevs
1940
1940
1941 def shared(self):
1941 def shared(self):
1942 '''the type of shared repository (None if not shared)'''
1942 '''the type of shared repository (None if not shared)'''
1943 if self.sharedpath != self.path:
1943 if self.sharedpath != self.path:
1944 return b'store'
1944 return b'store'
1945 return None
1945 return None
1946
1946
1947 def wjoin(self, f, *insidef):
1947 def wjoin(self, f, *insidef):
1948 return self.vfs.reljoin(self.root, f, *insidef)
1948 return self.vfs.reljoin(self.root, f, *insidef)
1949
1949
1950 def setparents(self, p1, p2=nullid):
1950 def setparents(self, p1, p2=nullid):
1951 self[None].setparents(p1, p2)
1951 self[None].setparents(p1, p2)
1952 self._quick_access_changeid_invalidate()
1952 self._quick_access_changeid_invalidate()
1953
1953
1954 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1954 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1955 """changeid must be a changeset revision, if specified.
1955 """changeid must be a changeset revision, if specified.
1956 fileid can be a file revision or node."""
1956 fileid can be a file revision or node."""
1957 return context.filectx(
1957 return context.filectx(
1958 self, path, changeid, fileid, changectx=changectx
1958 self, path, changeid, fileid, changectx=changectx
1959 )
1959 )
1960
1960
1961 def getcwd(self):
1961 def getcwd(self):
1962 return self.dirstate.getcwd()
1962 return self.dirstate.getcwd()
1963
1963
1964 def pathto(self, f, cwd=None):
1964 def pathto(self, f, cwd=None):
1965 return self.dirstate.pathto(f, cwd)
1965 return self.dirstate.pathto(f, cwd)
1966
1966
1967 def _loadfilter(self, filter):
1967 def _loadfilter(self, filter):
1968 if filter not in self._filterpats:
1968 if filter not in self._filterpats:
1969 l = []
1969 l = []
1970 for pat, cmd in self.ui.configitems(filter):
1970 for pat, cmd in self.ui.configitems(filter):
1971 if cmd == b'!':
1971 if cmd == b'!':
1972 continue
1972 continue
1973 mf = matchmod.match(self.root, b'', [pat])
1973 mf = matchmod.match(self.root, b'', [pat])
1974 fn = None
1974 fn = None
1975 params = cmd
1975 params = cmd
1976 for name, filterfn in pycompat.iteritems(self._datafilters):
1976 for name, filterfn in pycompat.iteritems(self._datafilters):
1977 if cmd.startswith(name):
1977 if cmd.startswith(name):
1978 fn = filterfn
1978 fn = filterfn
1979 params = cmd[len(name) :].lstrip()
1979 params = cmd[len(name) :].lstrip()
1980 break
1980 break
1981 if not fn:
1981 if not fn:
1982 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1982 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1983 fn.__name__ = 'commandfilter'
1983 fn.__name__ = 'commandfilter'
1984 # Wrap old filters not supporting keyword arguments
1984 # Wrap old filters not supporting keyword arguments
1985 if not pycompat.getargspec(fn)[2]:
1985 if not pycompat.getargspec(fn)[2]:
1986 oldfn = fn
1986 oldfn = fn
1987 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1987 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1988 fn.__name__ = 'compat-' + oldfn.__name__
1988 fn.__name__ = 'compat-' + oldfn.__name__
1989 l.append((mf, fn, params))
1989 l.append((mf, fn, params))
1990 self._filterpats[filter] = l
1990 self._filterpats[filter] = l
1991 return self._filterpats[filter]
1991 return self._filterpats[filter]
1992
1992
1993 def _filter(self, filterpats, filename, data):
1993 def _filter(self, filterpats, filename, data):
1994 for mf, fn, cmd in filterpats:
1994 for mf, fn, cmd in filterpats:
1995 if mf(filename):
1995 if mf(filename):
1996 self.ui.debug(
1996 self.ui.debug(
1997 b"filtering %s through %s\n"
1997 b"filtering %s through %s\n"
1998 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1998 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1999 )
1999 )
2000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2001 break
2001 break
2002
2002
2003 return data
2003 return data
2004
2004
2005 @unfilteredpropertycache
2005 @unfilteredpropertycache
2006 def _encodefilterpats(self):
2006 def _encodefilterpats(self):
2007 return self._loadfilter(b'encode')
2007 return self._loadfilter(b'encode')
2008
2008
2009 @unfilteredpropertycache
2009 @unfilteredpropertycache
2010 def _decodefilterpats(self):
2010 def _decodefilterpats(self):
2011 return self._loadfilter(b'decode')
2011 return self._loadfilter(b'decode')
2012
2012
2013 def adddatafilter(self, name, filter):
2013 def adddatafilter(self, name, filter):
2014 self._datafilters[name] = filter
2014 self._datafilters[name] = filter
2015
2015
2016 def wread(self, filename):
2016 def wread(self, filename):
2017 if self.wvfs.islink(filename):
2017 if self.wvfs.islink(filename):
2018 data = self.wvfs.readlink(filename)
2018 data = self.wvfs.readlink(filename)
2019 else:
2019 else:
2020 data = self.wvfs.read(filename)
2020 data = self.wvfs.read(filename)
2021 return self._filter(self._encodefilterpats, filename, data)
2021 return self._filter(self._encodefilterpats, filename, data)
2022
2022
2023 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2023 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2024 """write ``data`` into ``filename`` in the working directory
2024 """write ``data`` into ``filename`` in the working directory
2025
2025
2026 This returns length of written (maybe decoded) data.
2026 This returns length of written (maybe decoded) data.
2027 """
2027 """
2028 data = self._filter(self._decodefilterpats, filename, data)
2028 data = self._filter(self._decodefilterpats, filename, data)
2029 if b'l' in flags:
2029 if b'l' in flags:
2030 self.wvfs.symlink(data, filename)
2030 self.wvfs.symlink(data, filename)
2031 else:
2031 else:
2032 self.wvfs.write(
2032 self.wvfs.write(
2033 filename, data, backgroundclose=backgroundclose, **kwargs
2033 filename, data, backgroundclose=backgroundclose, **kwargs
2034 )
2034 )
2035 if b'x' in flags:
2035 if b'x' in flags:
2036 self.wvfs.setflags(filename, False, True)
2036 self.wvfs.setflags(filename, False, True)
2037 else:
2037 else:
2038 self.wvfs.setflags(filename, False, False)
2038 self.wvfs.setflags(filename, False, False)
2039 return len(data)
2039 return len(data)
2040
2040
2041 def wwritedata(self, filename, data):
2041 def wwritedata(self, filename, data):
2042 return self._filter(self._decodefilterpats, filename, data)
2042 return self._filter(self._decodefilterpats, filename, data)
2043
2043
2044 def currenttransaction(self):
2044 def currenttransaction(self):
2045 """return the current transaction or None if non exists"""
2045 """return the current transaction or None if non exists"""
2046 if self._transref:
2046 if self._transref:
2047 tr = self._transref()
2047 tr = self._transref()
2048 else:
2048 else:
2049 tr = None
2049 tr = None
2050
2050
2051 if tr and tr.running():
2051 if tr and tr.running():
2052 return tr
2052 return tr
2053 return None
2053 return None
2054
2054
2055 def transaction(self, desc, report=None):
2055 def transaction(self, desc, report=None):
2056 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2056 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2057 b'devel', b'check-locks'
2057 b'devel', b'check-locks'
2058 ):
2058 ):
2059 if self._currentlock(self._lockref) is None:
2059 if self._currentlock(self._lockref) is None:
2060 raise error.ProgrammingError(b'transaction requires locking')
2060 raise error.ProgrammingError(b'transaction requires locking')
2061 tr = self.currenttransaction()
2061 tr = self.currenttransaction()
2062 if tr is not None:
2062 if tr is not None:
2063 return tr.nest(name=desc)
2063 return tr.nest(name=desc)
2064
2064
2065 # abort here if the journal already exists
2065 # abort here if the journal already exists
2066 if self.svfs.exists(b"journal"):
2066 if self.svfs.exists(b"journal"):
2067 raise error.RepoError(
2067 raise error.RepoError(
2068 _(b"abandoned transaction found"),
2068 _(b"abandoned transaction found"),
2069 hint=_(b"run 'hg recover' to clean up transaction"),
2069 hint=_(b"run 'hg recover' to clean up transaction"),
2070 )
2070 )
2071
2071
2072 idbase = b"%.40f#%f" % (random.random(), time.time())
2072 idbase = b"%.40f#%f" % (random.random(), time.time())
2073 ha = hex(hashutil.sha1(idbase).digest())
2073 ha = hex(hashutil.sha1(idbase).digest())
2074 txnid = b'TXN:' + ha
2074 txnid = b'TXN:' + ha
2075 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2075 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2076
2076
2077 self._writejournal(desc)
2077 self._writejournal(desc)
2078 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2078 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2079 if report:
2079 if report:
2080 rp = report
2080 rp = report
2081 else:
2081 else:
2082 rp = self.ui.warn
2082 rp = self.ui.warn
2083 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2083 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2084 # we must avoid cyclic reference between repo and transaction.
2084 # we must avoid cyclic reference between repo and transaction.
2085 reporef = weakref.ref(self)
2085 reporef = weakref.ref(self)
2086 # Code to track tag movement
2086 # Code to track tag movement
2087 #
2087 #
2088 # Since tags are all handled as file content, it is actually quite hard
2088 # Since tags are all handled as file content, it is actually quite hard
2089 # to track these movement from a code perspective. So we fallback to a
2089 # to track these movement from a code perspective. So we fallback to a
2090 # tracking at the repository level. One could envision to track changes
2090 # tracking at the repository level. One could envision to track changes
2091 # to the '.hgtags' file through changegroup apply but that fails to
2091 # to the '.hgtags' file through changegroup apply but that fails to
2092 # cope with case where transaction expose new heads without changegroup
2092 # cope with case where transaction expose new heads without changegroup
2093 # being involved (eg: phase movement).
2093 # being involved (eg: phase movement).
2094 #
2094 #
2095 # For now, We gate the feature behind a flag since this likely comes
2095 # For now, We gate the feature behind a flag since this likely comes
2096 # with performance impacts. The current code run more often than needed
2096 # with performance impacts. The current code run more often than needed
2097 # and do not use caches as much as it could. The current focus is on
2097 # and do not use caches as much as it could. The current focus is on
2098 # the behavior of the feature so we disable it by default. The flag
2098 # the behavior of the feature so we disable it by default. The flag
2099 # will be removed when we are happy with the performance impact.
2099 # will be removed when we are happy with the performance impact.
2100 #
2100 #
2101 # Once this feature is no longer experimental move the following
2101 # Once this feature is no longer experimental move the following
2102 # documentation to the appropriate help section:
2102 # documentation to the appropriate help section:
2103 #
2103 #
2104 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2104 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2105 # tags (new or changed or deleted tags). In addition the details of
2105 # tags (new or changed or deleted tags). In addition the details of
2106 # these changes are made available in a file at:
2106 # these changes are made available in a file at:
2107 # ``REPOROOT/.hg/changes/tags.changes``.
2107 # ``REPOROOT/.hg/changes/tags.changes``.
2108 # Make sure you check for HG_TAG_MOVED before reading that file as it
2108 # Make sure you check for HG_TAG_MOVED before reading that file as it
2109 # might exist from a previous transaction even if no tag were touched
2109 # might exist from a previous transaction even if no tag were touched
2110 # in this one. Changes are recorded in a line base format::
2110 # in this one. Changes are recorded in a line base format::
2111 #
2111 #
2112 # <action> <hex-node> <tag-name>\n
2112 # <action> <hex-node> <tag-name>\n
2113 #
2113 #
2114 # Actions are defined as follow:
2114 # Actions are defined as follow:
2115 # "-R": tag is removed,
2115 # "-R": tag is removed,
2116 # "+A": tag is added,
2116 # "+A": tag is added,
2117 # "-M": tag is moved (old value),
2117 # "-M": tag is moved (old value),
2118 # "+M": tag is moved (new value),
2118 # "+M": tag is moved (new value),
2119 tracktags = lambda x: None
2119 tracktags = lambda x: None
2120 # experimental config: experimental.hook-track-tags
2120 # experimental config: experimental.hook-track-tags
2121 shouldtracktags = self.ui.configbool(
2121 shouldtracktags = self.ui.configbool(
2122 b'experimental', b'hook-track-tags'
2122 b'experimental', b'hook-track-tags'
2123 )
2123 )
2124 if desc != b'strip' and shouldtracktags:
2124 if desc != b'strip' and shouldtracktags:
2125 oldheads = self.changelog.headrevs()
2125 oldheads = self.changelog.headrevs()
2126
2126
2127 def tracktags(tr2):
2127 def tracktags(tr2):
2128 repo = reporef()
2128 repo = reporef()
2129 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2129 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2130 newheads = repo.changelog.headrevs()
2130 newheads = repo.changelog.headrevs()
2131 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2131 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2132 # notes: we compare lists here.
2132 # notes: we compare lists here.
2133 # As we do it only once buiding set would not be cheaper
2133 # As we do it only once buiding set would not be cheaper
2134 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2134 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2135 if changes:
2135 if changes:
2136 tr2.hookargs[b'tag_moved'] = b'1'
2136 tr2.hookargs[b'tag_moved'] = b'1'
2137 with repo.vfs(
2137 with repo.vfs(
2138 b'changes/tags.changes', b'w', atomictemp=True
2138 b'changes/tags.changes', b'w', atomictemp=True
2139 ) as changesfile:
2139 ) as changesfile:
2140 # note: we do not register the file to the transaction
2140 # note: we do not register the file to the transaction
2141 # because we needs it to still exist on the transaction
2141 # because we needs it to still exist on the transaction
2142 # is close (for txnclose hooks)
2142 # is close (for txnclose hooks)
2143 tagsmod.writediff(changesfile, changes)
2143 tagsmod.writediff(changesfile, changes)
2144
2144
2145 def validate(tr2):
2145 def validate(tr2):
2146 """will run pre-closing hooks"""
2146 """will run pre-closing hooks"""
2147 # XXX the transaction API is a bit lacking here so we take a hacky
2147 # XXX the transaction API is a bit lacking here so we take a hacky
2148 # path for now
2148 # path for now
2149 #
2149 #
2150 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2150 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2151 # dict is copied before these run. In addition we needs the data
2151 # dict is copied before these run. In addition we needs the data
2152 # available to in memory hooks too.
2152 # available to in memory hooks too.
2153 #
2153 #
2154 # Moreover, we also need to make sure this runs before txnclose
2154 # Moreover, we also need to make sure this runs before txnclose
2155 # hooks and there is no "pending" mechanism that would execute
2155 # hooks and there is no "pending" mechanism that would execute
2156 # logic only if hooks are about to run.
2156 # logic only if hooks are about to run.
2157 #
2157 #
2158 # Fixing this limitation of the transaction is also needed to track
2158 # Fixing this limitation of the transaction is also needed to track
2159 # other families of changes (bookmarks, phases, obsolescence).
2159 # other families of changes (bookmarks, phases, obsolescence).
2160 #
2160 #
2161 # This will have to be fixed before we remove the experimental
2161 # This will have to be fixed before we remove the experimental
2162 # gating.
2162 # gating.
2163 tracktags(tr2)
2163 tracktags(tr2)
2164 repo = reporef()
2164 repo = reporef()
2165
2165
2166 singleheadopt = (b'experimental', b'single-head-per-branch')
2166 singleheadopt = (b'experimental', b'single-head-per-branch')
2167 singlehead = repo.ui.configbool(*singleheadopt)
2167 singlehead = repo.ui.configbool(*singleheadopt)
2168 if singlehead:
2168 if singlehead:
2169 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2169 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2170 accountclosed = singleheadsub.get(
2170 accountclosed = singleheadsub.get(
2171 b"account-closed-heads", False
2171 b"account-closed-heads", False
2172 )
2172 )
2173 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2173 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2174 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2174 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2175 for name, (old, new) in sorted(
2175 for name, (old, new) in sorted(
2176 tr.changes[b'bookmarks'].items()
2176 tr.changes[b'bookmarks'].items()
2177 ):
2177 ):
2178 args = tr.hookargs.copy()
2178 args = tr.hookargs.copy()
2179 args.update(bookmarks.preparehookargs(name, old, new))
2179 args.update(bookmarks.preparehookargs(name, old, new))
2180 repo.hook(
2180 repo.hook(
2181 b'pretxnclose-bookmark',
2181 b'pretxnclose-bookmark',
2182 throw=True,
2182 throw=True,
2183 **pycompat.strkwargs(args)
2183 **pycompat.strkwargs(args)
2184 )
2184 )
2185 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2185 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2186 cl = repo.unfiltered().changelog
2186 cl = repo.unfiltered().changelog
2187 for revs, (old, new) in tr.changes[b'phases']:
2187 for revs, (old, new) in tr.changes[b'phases']:
2188 for rev in revs:
2188 for rev in revs:
2189 args = tr.hookargs.copy()
2189 args = tr.hookargs.copy()
2190 node = hex(cl.node(rev))
2190 node = hex(cl.node(rev))
2191 args.update(phases.preparehookargs(node, old, new))
2191 args.update(phases.preparehookargs(node, old, new))
2192 repo.hook(
2192 repo.hook(
2193 b'pretxnclose-phase',
2193 b'pretxnclose-phase',
2194 throw=True,
2194 throw=True,
2195 **pycompat.strkwargs(args)
2195 **pycompat.strkwargs(args)
2196 )
2196 )
2197
2197
2198 repo.hook(
2198 repo.hook(
2199 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2199 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2200 )
2200 )
2201
2201
2202 def releasefn(tr, success):
2202 def releasefn(tr, success):
2203 repo = reporef()
2203 repo = reporef()
2204 if repo is None:
2204 if repo is None:
2205 # If the repo has been GC'd (and this release function is being
2205 # If the repo has been GC'd (and this release function is being
2206 # called from transaction.__del__), there's not much we can do,
2206 # called from transaction.__del__), there's not much we can do,
2207 # so just leave the unfinished transaction there and let the
2207 # so just leave the unfinished transaction there and let the
2208 # user run `hg recover`.
2208 # user run `hg recover`.
2209 return
2209 return
2210 if success:
2210 if success:
2211 # this should be explicitly invoked here, because
2211 # this should be explicitly invoked here, because
2212 # in-memory changes aren't written out at closing
2212 # in-memory changes aren't written out at closing
2213 # transaction, if tr.addfilegenerator (via
2213 # transaction, if tr.addfilegenerator (via
2214 # dirstate.write or so) isn't invoked while
2214 # dirstate.write or so) isn't invoked while
2215 # transaction running
2215 # transaction running
2216 repo.dirstate.write(None)
2216 repo.dirstate.write(None)
2217 else:
2217 else:
2218 # discard all changes (including ones already written
2218 # discard all changes (including ones already written
2219 # out) in this transaction
2219 # out) in this transaction
2220 narrowspec.restorebackup(self, b'journal.narrowspec')
2220 narrowspec.restorebackup(self, b'journal.narrowspec')
2221 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2221 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2222 repo.dirstate.restorebackup(None, b'journal.dirstate')
2222 repo.dirstate.restorebackup(None, b'journal.dirstate')
2223
2223
2224 repo.invalidate(clearfilecache=True)
2224 repo.invalidate(clearfilecache=True)
2225
2225
2226 tr = transaction.transaction(
2226 tr = transaction.transaction(
2227 rp,
2227 rp,
2228 self.svfs,
2228 self.svfs,
2229 vfsmap,
2229 vfsmap,
2230 b"journal",
2230 b"journal",
2231 b"undo",
2231 b"undo",
2232 aftertrans(renames),
2232 aftertrans(renames),
2233 self.store.createmode,
2233 self.store.createmode,
2234 validator=validate,
2234 validator=validate,
2235 releasefn=releasefn,
2235 releasefn=releasefn,
2236 checkambigfiles=_cachedfiles,
2236 checkambigfiles=_cachedfiles,
2237 name=desc,
2237 name=desc,
2238 )
2238 )
2239 tr.changes[b'origrepolen'] = len(self)
2239 tr.changes[b'origrepolen'] = len(self)
2240 tr.changes[b'obsmarkers'] = set()
2240 tr.changes[b'obsmarkers'] = set()
2241 tr.changes[b'phases'] = []
2241 tr.changes[b'phases'] = []
2242 tr.changes[b'bookmarks'] = {}
2242 tr.changes[b'bookmarks'] = {}
2243
2243
2244 tr.hookargs[b'txnid'] = txnid
2244 tr.hookargs[b'txnid'] = txnid
2245 tr.hookargs[b'txnname'] = desc
2245 tr.hookargs[b'txnname'] = desc
2246 # note: writing the fncache only during finalize mean that the file is
2246 # note: writing the fncache only during finalize mean that the file is
2247 # outdated when running hooks. As fncache is used for streaming clone,
2247 # outdated when running hooks. As fncache is used for streaming clone,
2248 # this is not expected to break anything that happen during the hooks.
2248 # this is not expected to break anything that happen during the hooks.
2249 tr.addfinalize(b'flush-fncache', self.store.write)
2249 tr.addfinalize(b'flush-fncache', self.store.write)
2250
2250
2251 def txnclosehook(tr2):
2251 def txnclosehook(tr2):
2252 """To be run if transaction is successful, will schedule a hook run
2252 """To be run if transaction is successful, will schedule a hook run
2253 """
2253 """
2254 # Don't reference tr2 in hook() so we don't hold a reference.
2254 # Don't reference tr2 in hook() so we don't hold a reference.
2255 # This reduces memory consumption when there are multiple
2255 # This reduces memory consumption when there are multiple
2256 # transactions per lock. This can likely go away if issue5045
2256 # transactions per lock. This can likely go away if issue5045
2257 # fixes the function accumulation.
2257 # fixes the function accumulation.
2258 hookargs = tr2.hookargs
2258 hookargs = tr2.hookargs
2259
2259
2260 def hookfunc(unused_success):
2260 def hookfunc(unused_success):
2261 repo = reporef()
2261 repo = reporef()
2262 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2262 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2263 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2263 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2264 for name, (old, new) in bmchanges:
2264 for name, (old, new) in bmchanges:
2265 args = tr.hookargs.copy()
2265 args = tr.hookargs.copy()
2266 args.update(bookmarks.preparehookargs(name, old, new))
2266 args.update(bookmarks.preparehookargs(name, old, new))
2267 repo.hook(
2267 repo.hook(
2268 b'txnclose-bookmark',
2268 b'txnclose-bookmark',
2269 throw=False,
2269 throw=False,
2270 **pycompat.strkwargs(args)
2270 **pycompat.strkwargs(args)
2271 )
2271 )
2272
2272
2273 if hook.hashook(repo.ui, b'txnclose-phase'):
2273 if hook.hashook(repo.ui, b'txnclose-phase'):
2274 cl = repo.unfiltered().changelog
2274 cl = repo.unfiltered().changelog
2275 phasemv = sorted(
2275 phasemv = sorted(
2276 tr.changes[b'phases'], key=lambda r: r[0][0]
2276 tr.changes[b'phases'], key=lambda r: r[0][0]
2277 )
2277 )
2278 for revs, (old, new) in phasemv:
2278 for revs, (old, new) in phasemv:
2279 for rev in revs:
2279 for rev in revs:
2280 args = tr.hookargs.copy()
2280 args = tr.hookargs.copy()
2281 node = hex(cl.node(rev))
2281 node = hex(cl.node(rev))
2282 args.update(phases.preparehookargs(node, old, new))
2282 args.update(phases.preparehookargs(node, old, new))
2283 repo.hook(
2283 repo.hook(
2284 b'txnclose-phase',
2284 b'txnclose-phase',
2285 throw=False,
2285 throw=False,
2286 **pycompat.strkwargs(args)
2286 **pycompat.strkwargs(args)
2287 )
2287 )
2288
2288
2289 repo.hook(
2289 repo.hook(
2290 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2290 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2291 )
2291 )
2292
2292
2293 reporef()._afterlock(hookfunc)
2293 reporef()._afterlock(hookfunc)
2294
2294
2295 tr.addfinalize(b'txnclose-hook', txnclosehook)
2295 tr.addfinalize(b'txnclose-hook', txnclosehook)
2296 # Include a leading "-" to make it happen before the transaction summary
2296 # Include a leading "-" to make it happen before the transaction summary
2297 # reports registered via scmutil.registersummarycallback() whose names
2297 # reports registered via scmutil.registersummarycallback() whose names
2298 # are 00-txnreport etc. That way, the caches will be warm when the
2298 # are 00-txnreport etc. That way, the caches will be warm when the
2299 # callbacks run.
2299 # callbacks run.
2300 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2300 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2301
2301
2302 def txnaborthook(tr2):
2302 def txnaborthook(tr2):
2303 """To be run if transaction is aborted
2303 """To be run if transaction is aborted
2304 """
2304 """
2305 reporef().hook(
2305 reporef().hook(
2306 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2306 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2307 )
2307 )
2308
2308
2309 tr.addabort(b'txnabort-hook', txnaborthook)
2309 tr.addabort(b'txnabort-hook', txnaborthook)
2310 # avoid eager cache invalidation. in-memory data should be identical
2310 # avoid eager cache invalidation. in-memory data should be identical
2311 # to stored data if transaction has no error.
2311 # to stored data if transaction has no error.
2312 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2312 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2313 self._transref = weakref.ref(tr)
2313 self._transref = weakref.ref(tr)
2314 scmutil.registersummarycallback(self, tr, desc)
2314 scmutil.registersummarycallback(self, tr, desc)
2315 return tr
2315 return tr
2316
2316
2317 def _journalfiles(self):
2317 def _journalfiles(self):
2318 return (
2318 return (
2319 (self.svfs, b'journal'),
2319 (self.svfs, b'journal'),
2320 (self.svfs, b'journal.narrowspec'),
2320 (self.svfs, b'journal.narrowspec'),
2321 (self.vfs, b'journal.narrowspec.dirstate'),
2321 (self.vfs, b'journal.narrowspec.dirstate'),
2322 (self.vfs, b'journal.dirstate'),
2322 (self.vfs, b'journal.dirstate'),
2323 (self.vfs, b'journal.branch'),
2323 (self.vfs, b'journal.branch'),
2324 (self.vfs, b'journal.desc'),
2324 (self.vfs, b'journal.desc'),
2325 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2325 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2326 (self.svfs, b'journal.phaseroots'),
2326 (self.svfs, b'journal.phaseroots'),
2327 )
2327 )
2328
2328
2329 def undofiles(self):
2329 def undofiles(self):
2330 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2330 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2331
2331
2332 @unfilteredmethod
2332 @unfilteredmethod
2333 def _writejournal(self, desc):
2333 def _writejournal(self, desc):
2334 self.dirstate.savebackup(None, b'journal.dirstate')
2334 self.dirstate.savebackup(None, b'journal.dirstate')
2335 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2335 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2336 narrowspec.savebackup(self, b'journal.narrowspec')
2336 narrowspec.savebackup(self, b'journal.narrowspec')
2337 self.vfs.write(
2337 self.vfs.write(
2338 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2338 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2339 )
2339 )
2340 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2340 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2341 bookmarksvfs = bookmarks.bookmarksvfs(self)
2341 bookmarksvfs = bookmarks.bookmarksvfs(self)
2342 bookmarksvfs.write(
2342 bookmarksvfs.write(
2343 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2343 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2344 )
2344 )
2345 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2345 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2346
2346
2347 def recover(self):
2347 def recover(self):
2348 with self.lock():
2348 with self.lock():
2349 if self.svfs.exists(b"journal"):
2349 if self.svfs.exists(b"journal"):
2350 self.ui.status(_(b"rolling back interrupted transaction\n"))
2350 self.ui.status(_(b"rolling back interrupted transaction\n"))
2351 vfsmap = {
2351 vfsmap = {
2352 b'': self.svfs,
2352 b'': self.svfs,
2353 b'plain': self.vfs,
2353 b'plain': self.vfs,
2354 }
2354 }
2355 transaction.rollback(
2355 transaction.rollback(
2356 self.svfs,
2356 self.svfs,
2357 vfsmap,
2357 vfsmap,
2358 b"journal",
2358 b"journal",
2359 self.ui.warn,
2359 self.ui.warn,
2360 checkambigfiles=_cachedfiles,
2360 checkambigfiles=_cachedfiles,
2361 )
2361 )
2362 self.invalidate()
2362 self.invalidate()
2363 return True
2363 return True
2364 else:
2364 else:
2365 self.ui.warn(_(b"no interrupted transaction available\n"))
2365 self.ui.warn(_(b"no interrupted transaction available\n"))
2366 return False
2366 return False
2367
2367
2368 def rollback(self, dryrun=False, force=False):
2368 def rollback(self, dryrun=False, force=False):
2369 wlock = lock = dsguard = None
2369 wlock = lock = dsguard = None
2370 try:
2370 try:
2371 wlock = self.wlock()
2371 wlock = self.wlock()
2372 lock = self.lock()
2372 lock = self.lock()
2373 if self.svfs.exists(b"undo"):
2373 if self.svfs.exists(b"undo"):
2374 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2374 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2375
2375
2376 return self._rollback(dryrun, force, dsguard)
2376 return self._rollback(dryrun, force, dsguard)
2377 else:
2377 else:
2378 self.ui.warn(_(b"no rollback information available\n"))
2378 self.ui.warn(_(b"no rollback information available\n"))
2379 return 1
2379 return 1
2380 finally:
2380 finally:
2381 release(dsguard, lock, wlock)
2381 release(dsguard, lock, wlock)
2382
2382
2383 @unfilteredmethod # Until we get smarter cache management
2383 @unfilteredmethod # Until we get smarter cache management
2384 def _rollback(self, dryrun, force, dsguard):
2384 def _rollback(self, dryrun, force, dsguard):
2385 ui = self.ui
2385 ui = self.ui
2386 try:
2386 try:
2387 args = self.vfs.read(b'undo.desc').splitlines()
2387 args = self.vfs.read(b'undo.desc').splitlines()
2388 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2388 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2389 if len(args) >= 3:
2389 if len(args) >= 3:
2390 detail = args[2]
2390 detail = args[2]
2391 oldtip = oldlen - 1
2391 oldtip = oldlen - 1
2392
2392
2393 if detail and ui.verbose:
2393 if detail and ui.verbose:
2394 msg = _(
2394 msg = _(
2395 b'repository tip rolled back to revision %d'
2395 b'repository tip rolled back to revision %d'
2396 b' (undo %s: %s)\n'
2396 b' (undo %s: %s)\n'
2397 ) % (oldtip, desc, detail)
2397 ) % (oldtip, desc, detail)
2398 else:
2398 else:
2399 msg = _(
2399 msg = _(
2400 b'repository tip rolled back to revision %d (undo %s)\n'
2400 b'repository tip rolled back to revision %d (undo %s)\n'
2401 ) % (oldtip, desc)
2401 ) % (oldtip, desc)
2402 except IOError:
2402 except IOError:
2403 msg = _(b'rolling back unknown transaction\n')
2403 msg = _(b'rolling back unknown transaction\n')
2404 desc = None
2404 desc = None
2405
2405
2406 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2406 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2407 raise error.Abort(
2407 raise error.Abort(
2408 _(
2408 _(
2409 b'rollback of last commit while not checked out '
2409 b'rollback of last commit while not checked out '
2410 b'may lose data'
2410 b'may lose data'
2411 ),
2411 ),
2412 hint=_(b'use -f to force'),
2412 hint=_(b'use -f to force'),
2413 )
2413 )
2414
2414
2415 ui.status(msg)
2415 ui.status(msg)
2416 if dryrun:
2416 if dryrun:
2417 return 0
2417 return 0
2418
2418
2419 parents = self.dirstate.parents()
2419 parents = self.dirstate.parents()
2420 self.destroying()
2420 self.destroying()
2421 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2421 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2422 transaction.rollback(
2422 transaction.rollback(
2423 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2423 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2424 )
2424 )
2425 bookmarksvfs = bookmarks.bookmarksvfs(self)
2425 bookmarksvfs = bookmarks.bookmarksvfs(self)
2426 if bookmarksvfs.exists(b'undo.bookmarks'):
2426 if bookmarksvfs.exists(b'undo.bookmarks'):
2427 bookmarksvfs.rename(
2427 bookmarksvfs.rename(
2428 b'undo.bookmarks', b'bookmarks', checkambig=True
2428 b'undo.bookmarks', b'bookmarks', checkambig=True
2429 )
2429 )
2430 if self.svfs.exists(b'undo.phaseroots'):
2430 if self.svfs.exists(b'undo.phaseroots'):
2431 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2431 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2432 self.invalidate()
2432 self.invalidate()
2433
2433
2434 has_node = self.changelog.index.has_node
2434 has_node = self.changelog.index.has_node
2435 parentgone = any(not has_node(p) for p in parents)
2435 parentgone = any(not has_node(p) for p in parents)
2436 if parentgone:
2436 if parentgone:
2437 # prevent dirstateguard from overwriting already restored one
2437 # prevent dirstateguard from overwriting already restored one
2438 dsguard.close()
2438 dsguard.close()
2439
2439
2440 narrowspec.restorebackup(self, b'undo.narrowspec')
2440 narrowspec.restorebackup(self, b'undo.narrowspec')
2441 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2441 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2442 self.dirstate.restorebackup(None, b'undo.dirstate')
2442 self.dirstate.restorebackup(None, b'undo.dirstate')
2443 try:
2443 try:
2444 branch = self.vfs.read(b'undo.branch')
2444 branch = self.vfs.read(b'undo.branch')
2445 self.dirstate.setbranch(encoding.tolocal(branch))
2445 self.dirstate.setbranch(encoding.tolocal(branch))
2446 except IOError:
2446 except IOError:
2447 ui.warn(
2447 ui.warn(
2448 _(
2448 _(
2449 b'named branch could not be reset: '
2449 b'named branch could not be reset: '
2450 b'current branch is still \'%s\'\n'
2450 b'current branch is still \'%s\'\n'
2451 )
2451 )
2452 % self.dirstate.branch()
2452 % self.dirstate.branch()
2453 )
2453 )
2454
2454
2455 parents = tuple([p.rev() for p in self[None].parents()])
2455 parents = tuple([p.rev() for p in self[None].parents()])
2456 if len(parents) > 1:
2456 if len(parents) > 1:
2457 ui.status(
2457 ui.status(
2458 _(
2458 _(
2459 b'working directory now based on '
2459 b'working directory now based on '
2460 b'revisions %d and %d\n'
2460 b'revisions %d and %d\n'
2461 )
2461 )
2462 % parents
2462 % parents
2463 )
2463 )
2464 else:
2464 else:
2465 ui.status(
2465 ui.status(
2466 _(b'working directory now based on revision %d\n') % parents
2466 _(b'working directory now based on revision %d\n') % parents
2467 )
2467 )
2468 mergemod.mergestate.clean(self, self[b'.'].node())
2468 mergemod.mergestate.clean(self, self[b'.'].node())
2469
2469
2470 # TODO: if we know which new heads may result from this rollback, pass
2470 # TODO: if we know which new heads may result from this rollback, pass
2471 # them to destroy(), which will prevent the branchhead cache from being
2471 # them to destroy(), which will prevent the branchhead cache from being
2472 # invalidated.
2472 # invalidated.
2473 self.destroyed()
2473 self.destroyed()
2474 return 0
2474 return 0
2475
2475
2476 def _buildcacheupdater(self, newtransaction):
2476 def _buildcacheupdater(self, newtransaction):
2477 """called during transaction to build the callback updating cache
2477 """called during transaction to build the callback updating cache
2478
2478
2479 Lives on the repository to help extension who might want to augment
2479 Lives on the repository to help extension who might want to augment
2480 this logic. For this purpose, the created transaction is passed to the
2480 this logic. For this purpose, the created transaction is passed to the
2481 method.
2481 method.
2482 """
2482 """
2483 # we must avoid cyclic reference between repo and transaction.
2483 # we must avoid cyclic reference between repo and transaction.
2484 reporef = weakref.ref(self)
2484 reporef = weakref.ref(self)
2485
2485
2486 def updater(tr):
2486 def updater(tr):
2487 repo = reporef()
2487 repo = reporef()
2488 repo.updatecaches(tr)
2488 repo.updatecaches(tr)
2489
2489
2490 return updater
2490 return updater
2491
2491
2492 @unfilteredmethod
2492 @unfilteredmethod
2493 def updatecaches(self, tr=None, full=False):
2493 def updatecaches(self, tr=None, full=False):
2494 """warm appropriate caches
2494 """warm appropriate caches
2495
2495
2496 If this function is called after a transaction closed. The transaction
2496 If this function is called after a transaction closed. The transaction
2497 will be available in the 'tr' argument. This can be used to selectively
2497 will be available in the 'tr' argument. This can be used to selectively
2498 update caches relevant to the changes in that transaction.
2498 update caches relevant to the changes in that transaction.
2499
2499
2500 If 'full' is set, make sure all caches the function knows about have
2500 If 'full' is set, make sure all caches the function knows about have
2501 up-to-date data. Even the ones usually loaded more lazily.
2501 up-to-date data. Even the ones usually loaded more lazily.
2502 """
2502 """
2503 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2503 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2504 # During strip, many caches are invalid but
2504 # During strip, many caches are invalid but
2505 # later call to `destroyed` will refresh them.
2505 # later call to `destroyed` will refresh them.
2506 return
2506 return
2507
2507
2508 if tr is None or tr.changes[b'origrepolen'] < len(self):
2508 if tr is None or tr.changes[b'origrepolen'] < len(self):
2509 # accessing the 'ser ved' branchmap should refresh all the others,
2509 # accessing the 'ser ved' branchmap should refresh all the others,
2510 self.ui.debug(b'updating the branch cache\n')
2510 self.ui.debug(b'updating the branch cache\n')
2511 self.filtered(b'served').branchmap()
2511 self.filtered(b'served').branchmap()
2512 self.filtered(b'served.hidden').branchmap()
2512 self.filtered(b'served.hidden').branchmap()
2513
2513
2514 if full:
2514 if full:
2515 unfi = self.unfiltered()
2515 unfi = self.unfiltered()
2516
2516
2517 self.changelog.update_caches(transaction=tr)
2517 self.changelog.update_caches(transaction=tr)
2518 self.manifestlog.update_caches(transaction=tr)
2518 self.manifestlog.update_caches(transaction=tr)
2519
2519
2520 rbc = unfi.revbranchcache()
2520 rbc = unfi.revbranchcache()
2521 for r in unfi.changelog:
2521 for r in unfi.changelog:
2522 rbc.branchinfo(r)
2522 rbc.branchinfo(r)
2523 rbc.write()
2523 rbc.write()
2524
2524
2525 # ensure the working copy parents are in the manifestfulltextcache
2525 # ensure the working copy parents are in the manifestfulltextcache
2526 for ctx in self[b'.'].parents():
2526 for ctx in self[b'.'].parents():
2527 ctx.manifest() # accessing the manifest is enough
2527 ctx.manifest() # accessing the manifest is enough
2528
2528
2529 # accessing fnode cache warms the cache
2529 # accessing fnode cache warms the cache
2530 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2530 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2531 # accessing tags warm the cache
2531 # accessing tags warm the cache
2532 self.tags()
2532 self.tags()
2533 self.filtered(b'served').tags()
2533 self.filtered(b'served').tags()
2534
2534
2535 # The `full` arg is documented as updating even the lazily-loaded
2535 # The `full` arg is documented as updating even the lazily-loaded
2536 # caches immediately, so we're forcing a write to cause these caches
2536 # caches immediately, so we're forcing a write to cause these caches
2537 # to be warmed up even if they haven't explicitly been requested
2537 # to be warmed up even if they haven't explicitly been requested
2538 # yet (if they've never been used by hg, they won't ever have been
2538 # yet (if they've never been used by hg, they won't ever have been
2539 # written, even if they're a subset of another kind of cache that
2539 # written, even if they're a subset of another kind of cache that
2540 # *has* been used).
2540 # *has* been used).
2541 for filt in repoview.filtertable.keys():
2541 for filt in repoview.filtertable.keys():
2542 filtered = self.filtered(filt)
2542 filtered = self.filtered(filt)
2543 filtered.branchmap().write(filtered)
2543 filtered.branchmap().write(filtered)
2544
2544
2545 def invalidatecaches(self):
2545 def invalidatecaches(self):
2546
2546
2547 if '_tagscache' in vars(self):
2547 if '_tagscache' in vars(self):
2548 # can't use delattr on proxy
2548 # can't use delattr on proxy
2549 del self.__dict__['_tagscache']
2549 del self.__dict__['_tagscache']
2550
2550
2551 self._branchcaches.clear()
2551 self._branchcaches.clear()
2552 self.invalidatevolatilesets()
2552 self.invalidatevolatilesets()
2553 self._sparsesignaturecache.clear()
2553 self._sparsesignaturecache.clear()
2554
2554
2555 def invalidatevolatilesets(self):
2555 def invalidatevolatilesets(self):
2556 self.filteredrevcache.clear()
2556 self.filteredrevcache.clear()
2557 obsolete.clearobscaches(self)
2557 obsolete.clearobscaches(self)
2558 self._quick_access_changeid_invalidate()
2558 self._quick_access_changeid_invalidate()
2559
2559
2560 def invalidatedirstate(self):
2560 def invalidatedirstate(self):
2561 '''Invalidates the dirstate, causing the next call to dirstate
2561 '''Invalidates the dirstate, causing the next call to dirstate
2562 to check if it was modified since the last time it was read,
2562 to check if it was modified since the last time it was read,
2563 rereading it if it has.
2563 rereading it if it has.
2564
2564
2565 This is different to dirstate.invalidate() that it doesn't always
2565 This is different to dirstate.invalidate() that it doesn't always
2566 rereads the dirstate. Use dirstate.invalidate() if you want to
2566 rereads the dirstate. Use dirstate.invalidate() if you want to
2567 explicitly read the dirstate again (i.e. restoring it to a previous
2567 explicitly read the dirstate again (i.e. restoring it to a previous
2568 known good state).'''
2568 known good state).'''
2569 if hasunfilteredcache(self, 'dirstate'):
2569 if hasunfilteredcache(self, 'dirstate'):
2570 for k in self.dirstate._filecache:
2570 for k in self.dirstate._filecache:
2571 try:
2571 try:
2572 delattr(self.dirstate, k)
2572 delattr(self.dirstate, k)
2573 except AttributeError:
2573 except AttributeError:
2574 pass
2574 pass
2575 delattr(self.unfiltered(), 'dirstate')
2575 delattr(self.unfiltered(), 'dirstate')
2576
2576
2577 def invalidate(self, clearfilecache=False):
2577 def invalidate(self, clearfilecache=False):
2578 '''Invalidates both store and non-store parts other than dirstate
2578 '''Invalidates both store and non-store parts other than dirstate
2579
2579
2580 If a transaction is running, invalidation of store is omitted,
2580 If a transaction is running, invalidation of store is omitted,
2581 because discarding in-memory changes might cause inconsistency
2581 because discarding in-memory changes might cause inconsistency
2582 (e.g. incomplete fncache causes unintentional failure, but
2582 (e.g. incomplete fncache causes unintentional failure, but
2583 redundant one doesn't).
2583 redundant one doesn't).
2584 '''
2584 '''
2585 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2585 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2586 for k in list(self._filecache.keys()):
2586 for k in list(self._filecache.keys()):
2587 # dirstate is invalidated separately in invalidatedirstate()
2587 # dirstate is invalidated separately in invalidatedirstate()
2588 if k == b'dirstate':
2588 if k == b'dirstate':
2589 continue
2589 continue
2590 if (
2590 if (
2591 k == b'changelog'
2591 k == b'changelog'
2592 and self.currenttransaction()
2592 and self.currenttransaction()
2593 and self.changelog._delayed
2593 and self.changelog._delayed
2594 ):
2594 ):
2595 # The changelog object may store unwritten revisions. We don't
2595 # The changelog object may store unwritten revisions. We don't
2596 # want to lose them.
2596 # want to lose them.
2597 # TODO: Solve the problem instead of working around it.
2597 # TODO: Solve the problem instead of working around it.
2598 continue
2598 continue
2599
2599
2600 if clearfilecache:
2600 if clearfilecache:
2601 del self._filecache[k]
2601 del self._filecache[k]
2602 try:
2602 try:
2603 delattr(unfiltered, k)
2603 delattr(unfiltered, k)
2604 except AttributeError:
2604 except AttributeError:
2605 pass
2605 pass
2606 self.invalidatecaches()
2606 self.invalidatecaches()
2607 if not self.currenttransaction():
2607 if not self.currenttransaction():
2608 # TODO: Changing contents of store outside transaction
2608 # TODO: Changing contents of store outside transaction
2609 # causes inconsistency. We should make in-memory store
2609 # causes inconsistency. We should make in-memory store
2610 # changes detectable, and abort if changed.
2610 # changes detectable, and abort if changed.
2611 self.store.invalidatecaches()
2611 self.store.invalidatecaches()
2612
2612
2613 def invalidateall(self):
2613 def invalidateall(self):
2614 '''Fully invalidates both store and non-store parts, causing the
2614 '''Fully invalidates both store and non-store parts, causing the
2615 subsequent operation to reread any outside changes.'''
2615 subsequent operation to reread any outside changes.'''
2616 # extension should hook this to invalidate its caches
2616 # extension should hook this to invalidate its caches
2617 self.invalidate()
2617 self.invalidate()
2618 self.invalidatedirstate()
2618 self.invalidatedirstate()
2619
2619
2620 @unfilteredmethod
2620 @unfilteredmethod
2621 def _refreshfilecachestats(self, tr):
2621 def _refreshfilecachestats(self, tr):
2622 """Reload stats of cached files so that they are flagged as valid"""
2622 """Reload stats of cached files so that they are flagged as valid"""
2623 for k, ce in self._filecache.items():
2623 for k, ce in self._filecache.items():
2624 k = pycompat.sysstr(k)
2624 k = pycompat.sysstr(k)
2625 if k == 'dirstate' or k not in self.__dict__:
2625 if k == 'dirstate' or k not in self.__dict__:
2626 continue
2626 continue
2627 ce.refresh()
2627 ce.refresh()
2628
2628
2629 def _lock(
2629 def _lock(
2630 self,
2630 self,
2631 vfs,
2631 vfs,
2632 lockname,
2632 lockname,
2633 wait,
2633 wait,
2634 releasefn,
2634 releasefn,
2635 acquirefn,
2635 acquirefn,
2636 desc,
2636 desc,
2637 inheritchecker=None,
2637 inheritchecker=None,
2638 parentenvvar=None,
2638 parentenvvar=None,
2639 ):
2639 ):
2640 parentlock = None
2640 parentlock = None
2641 # the contents of parentenvvar are used by the underlying lock to
2641 # the contents of parentenvvar are used by the underlying lock to
2642 # determine whether it can be inherited
2642 # determine whether it can be inherited
2643 if parentenvvar is not None:
2643 if parentenvvar is not None:
2644 parentlock = encoding.environ.get(parentenvvar)
2644 parentlock = encoding.environ.get(parentenvvar)
2645
2645
2646 timeout = 0
2646 timeout = 0
2647 warntimeout = 0
2647 warntimeout = 0
2648 if wait:
2648 if wait:
2649 timeout = self.ui.configint(b"ui", b"timeout")
2649 timeout = self.ui.configint(b"ui", b"timeout")
2650 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2650 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2651 # internal config: ui.signal-safe-lock
2651 # internal config: ui.signal-safe-lock
2652 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2652 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2653
2653
2654 l = lockmod.trylock(
2654 l = lockmod.trylock(
2655 self.ui,
2655 self.ui,
2656 vfs,
2656 vfs,
2657 lockname,
2657 lockname,
2658 timeout,
2658 timeout,
2659 warntimeout,
2659 warntimeout,
2660 releasefn=releasefn,
2660 releasefn=releasefn,
2661 acquirefn=acquirefn,
2661 acquirefn=acquirefn,
2662 desc=desc,
2662 desc=desc,
2663 inheritchecker=inheritchecker,
2663 inheritchecker=inheritchecker,
2664 parentlock=parentlock,
2664 parentlock=parentlock,
2665 signalsafe=signalsafe,
2665 signalsafe=signalsafe,
2666 )
2666 )
2667 return l
2667 return l
2668
2668
2669 def _afterlock(self, callback):
2669 def _afterlock(self, callback):
2670 """add a callback to be run when the repository is fully unlocked
2670 """add a callback to be run when the repository is fully unlocked
2671
2671
2672 The callback will be executed when the outermost lock is released
2672 The callback will be executed when the outermost lock is released
2673 (with wlock being higher level than 'lock')."""
2673 (with wlock being higher level than 'lock')."""
2674 for ref in (self._wlockref, self._lockref):
2674 for ref in (self._wlockref, self._lockref):
2675 l = ref and ref()
2675 l = ref and ref()
2676 if l and l.held:
2676 if l and l.held:
2677 l.postrelease.append(callback)
2677 l.postrelease.append(callback)
2678 break
2678 break
2679 else: # no lock have been found.
2679 else: # no lock have been found.
2680 callback(True)
2680 callback(True)
2681
2681
2682 def lock(self, wait=True):
2682 def lock(self, wait=True):
2683 '''Lock the repository store (.hg/store) and return a weak reference
2683 '''Lock the repository store (.hg/store) and return a weak reference
2684 to the lock. Use this before modifying the store (e.g. committing or
2684 to the lock. Use this before modifying the store (e.g. committing or
2685 stripping). If you are opening a transaction, get a lock as well.)
2685 stripping). If you are opening a transaction, get a lock as well.)
2686
2686
2687 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2687 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2688 'wlock' first to avoid a dead-lock hazard.'''
2688 'wlock' first to avoid a dead-lock hazard.'''
2689 l = self._currentlock(self._lockref)
2689 l = self._currentlock(self._lockref)
2690 if l is not None:
2690 if l is not None:
2691 l.lock()
2691 l.lock()
2692 return l
2692 return l
2693
2693
2694 l = self._lock(
2694 l = self._lock(
2695 vfs=self.svfs,
2695 vfs=self.svfs,
2696 lockname=b"lock",
2696 lockname=b"lock",
2697 wait=wait,
2697 wait=wait,
2698 releasefn=None,
2698 releasefn=None,
2699 acquirefn=self.invalidate,
2699 acquirefn=self.invalidate,
2700 desc=_(b'repository %s') % self.origroot,
2700 desc=_(b'repository %s') % self.origroot,
2701 )
2701 )
2702 self._lockref = weakref.ref(l)
2702 self._lockref = weakref.ref(l)
2703 return l
2703 return l
2704
2704
2705 def _wlockchecktransaction(self):
2705 def _wlockchecktransaction(self):
2706 if self.currenttransaction() is not None:
2706 if self.currenttransaction() is not None:
2707 raise error.LockInheritanceContractViolation(
2707 raise error.LockInheritanceContractViolation(
2708 b'wlock cannot be inherited in the middle of a transaction'
2708 b'wlock cannot be inherited in the middle of a transaction'
2709 )
2709 )
2710
2710
2711 def wlock(self, wait=True):
2711 def wlock(self, wait=True):
2712 '''Lock the non-store parts of the repository (everything under
2712 '''Lock the non-store parts of the repository (everything under
2713 .hg except .hg/store) and return a weak reference to the lock.
2713 .hg except .hg/store) and return a weak reference to the lock.
2714
2714
2715 Use this before modifying files in .hg.
2715 Use this before modifying files in .hg.
2716
2716
2717 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2717 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2718 'wlock' first to avoid a dead-lock hazard.'''
2718 'wlock' first to avoid a dead-lock hazard.'''
2719 l = self._wlockref and self._wlockref()
2719 l = self._wlockref and self._wlockref()
2720 if l is not None and l.held:
2720 if l is not None and l.held:
2721 l.lock()
2721 l.lock()
2722 return l
2722 return l
2723
2723
2724 # We do not need to check for non-waiting lock acquisition. Such
2724 # We do not need to check for non-waiting lock acquisition. Such
2725 # acquisition would not cause dead-lock as they would just fail.
2725 # acquisition would not cause dead-lock as they would just fail.
2726 if wait and (
2726 if wait and (
2727 self.ui.configbool(b'devel', b'all-warnings')
2727 self.ui.configbool(b'devel', b'all-warnings')
2728 or self.ui.configbool(b'devel', b'check-locks')
2728 or self.ui.configbool(b'devel', b'check-locks')
2729 ):
2729 ):
2730 if self._currentlock(self._lockref) is not None:
2730 if self._currentlock(self._lockref) is not None:
2731 self.ui.develwarn(b'"wlock" acquired after "lock"')
2731 self.ui.develwarn(b'"wlock" acquired after "lock"')
2732
2732
2733 def unlock():
2733 def unlock():
2734 if self.dirstate.pendingparentchange():
2734 if self.dirstate.pendingparentchange():
2735 self.dirstate.invalidate()
2735 self.dirstate.invalidate()
2736 else:
2736 else:
2737 self.dirstate.write(None)
2737 self.dirstate.write(None)
2738
2738
2739 self._filecache[b'dirstate'].refresh()
2739 self._filecache[b'dirstate'].refresh()
2740
2740
2741 l = self._lock(
2741 l = self._lock(
2742 self.vfs,
2742 self.vfs,
2743 b"wlock",
2743 b"wlock",
2744 wait,
2744 wait,
2745 unlock,
2745 unlock,
2746 self.invalidatedirstate,
2746 self.invalidatedirstate,
2747 _(b'working directory of %s') % self.origroot,
2747 _(b'working directory of %s') % self.origroot,
2748 inheritchecker=self._wlockchecktransaction,
2748 inheritchecker=self._wlockchecktransaction,
2749 parentenvvar=b'HG_WLOCK_LOCKER',
2749 parentenvvar=b'HG_WLOCK_LOCKER',
2750 )
2750 )
2751 self._wlockref = weakref.ref(l)
2751 self._wlockref = weakref.ref(l)
2752 return l
2752 return l
2753
2753
2754 def _currentlock(self, lockref):
2754 def _currentlock(self, lockref):
2755 """Returns the lock if it's held, or None if it's not."""
2755 """Returns the lock if it's held, or None if it's not."""
2756 if lockref is None:
2756 if lockref is None:
2757 return None
2757 return None
2758 l = lockref()
2758 l = lockref()
2759 if l is None or not l.held:
2759 if l is None or not l.held:
2760 return None
2760 return None
2761 return l
2761 return l
2762
2762
2763 def currentwlock(self):
2763 def currentwlock(self):
2764 """Returns the wlock if it's held, or None if it's not."""
2764 """Returns the wlock if it's held, or None if it's not."""
2765 return self._currentlock(self._wlockref)
2765 return self._currentlock(self._wlockref)
2766
2766
2767 def _filecommit(
2767 def _filecommit(
2768 self,
2768 self,
2769 fctx,
2769 fctx,
2770 manifest1,
2770 manifest1,
2771 manifest2,
2771 manifest2,
2772 linkrev,
2772 linkrev,
2773 tr,
2773 tr,
2774 changelist,
2774 changelist,
2775 includecopymeta,
2775 includecopymeta,
2776 ):
2776 ):
2777 """
2777 """
2778 commit an individual file as part of a larger transaction
2778 commit an individual file as part of a larger transaction
2779 """
2779 """
2780
2780
2781 fname = fctx.path()
2781 fname = fctx.path()
2782 fparent1 = manifest1.get(fname, nullid)
2782 fparent1 = manifest1.get(fname, nullid)
2783 fparent2 = manifest2.get(fname, nullid)
2783 fparent2 = manifest2.get(fname, nullid)
2784 if isinstance(fctx, context.filectx):
2784 if isinstance(fctx, context.filectx):
2785 node = fctx.filenode()
2785 node = fctx.filenode()
2786 if node in [fparent1, fparent2]:
2786 if node in [fparent1, fparent2]:
2787 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2787 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2788 if (
2788 if (
2789 fparent1 != nullid
2789 fparent1 != nullid
2790 and manifest1.flags(fname) != fctx.flags()
2790 and manifest1.flags(fname) != fctx.flags()
2791 ) or (
2791 ) or (
2792 fparent2 != nullid
2792 fparent2 != nullid
2793 and manifest2.flags(fname) != fctx.flags()
2793 and manifest2.flags(fname) != fctx.flags()
2794 ):
2794 ):
2795 changelist.append(fname)
2795 changelist.append(fname)
2796 return node
2796 return node
2797
2797
2798 flog = self.file(fname)
2798 flog = self.file(fname)
2799 meta = {}
2799 meta = {}
2800 cfname = fctx.copysource()
2800 cfname = fctx.copysource()
2801 if cfname and cfname != fname:
2801 if cfname and cfname != fname:
2802 # Mark the new revision of this file as a copy of another
2802 # Mark the new revision of this file as a copy of another
2803 # file. This copy data will effectively act as a parent
2803 # file. This copy data will effectively act as a parent
2804 # of this new revision. If this is a merge, the first
2804 # of this new revision. If this is a merge, the first
2805 # parent will be the nullid (meaning "look up the copy data")
2805 # parent will be the nullid (meaning "look up the copy data")
2806 # and the second one will be the other parent. For example:
2806 # and the second one will be the other parent. For example:
2807 #
2807 #
2808 # 0 --- 1 --- 3 rev1 changes file foo
2808 # 0 --- 1 --- 3 rev1 changes file foo
2809 # \ / rev2 renames foo to bar and changes it
2809 # \ / rev2 renames foo to bar and changes it
2810 # \- 2 -/ rev3 should have bar with all changes and
2810 # \- 2 -/ rev3 should have bar with all changes and
2811 # should record that bar descends from
2811 # should record that bar descends from
2812 # bar in rev2 and foo in rev1
2812 # bar in rev2 and foo in rev1
2813 #
2813 #
2814 # this allows this merge to succeed:
2814 # this allows this merge to succeed:
2815 #
2815 #
2816 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2816 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2817 # \ / merging rev3 and rev4 should use bar@rev2
2817 # \ / merging rev3 and rev4 should use bar@rev2
2818 # \- 2 --- 4 as the merge base
2818 # \- 2 --- 4 as the merge base
2819 #
2819 #
2820
2820
2821 cnode = manifest1.get(cfname)
2821 cnode = manifest1.get(cfname)
2822 newfparent = fparent2
2822 newfparent = fparent2
2823
2823
2824 if manifest2: # branch merge
2824 if manifest2: # branch merge
2825 if fparent2 == nullid or cnode is None: # copied on remote side
2825 if fparent2 == nullid or cnode is None: # copied on remote side
2826 if cfname in manifest2:
2826 if cfname in manifest2:
2827 cnode = manifest2[cfname]
2827 cnode = manifest2[cfname]
2828 newfparent = fparent1
2828 newfparent = fparent1
2829
2829
2830 # Here, we used to search backwards through history to try to find
2830 # Here, we used to search backwards through history to try to find
2831 # where the file copy came from if the source of a copy was not in
2831 # where the file copy came from if the source of a copy was not in
2832 # the parent directory. However, this doesn't actually make sense to
2832 # the parent directory. However, this doesn't actually make sense to
2833 # do (what does a copy from something not in your working copy even
2833 # do (what does a copy from something not in your working copy even
2834 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2834 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2835 # the user that copy information was dropped, so if they didn't
2835 # the user that copy information was dropped, so if they didn't
2836 # expect this outcome it can be fixed, but this is the correct
2836 # expect this outcome it can be fixed, but this is the correct
2837 # behavior in this circumstance.
2837 # behavior in this circumstance.
2838
2838
2839 if cnode:
2839 if cnode:
2840 self.ui.debug(
2840 self.ui.debug(
2841 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2841 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2842 )
2842 )
2843 if includecopymeta:
2843 if includecopymeta:
2844 meta[b"copy"] = cfname
2844 meta[b"copy"] = cfname
2845 meta[b"copyrev"] = hex(cnode)
2845 meta[b"copyrev"] = hex(cnode)
2846 fparent1, fparent2 = nullid, newfparent
2846 fparent1, fparent2 = nullid, newfparent
2847 else:
2847 else:
2848 self.ui.warn(
2848 self.ui.warn(
2849 _(
2849 _(
2850 b"warning: can't find ancestor for '%s' "
2850 b"warning: can't find ancestor for '%s' "
2851 b"copied from '%s'!\n"
2851 b"copied from '%s'!\n"
2852 )
2852 )
2853 % (fname, cfname)
2853 % (fname, cfname)
2854 )
2854 )
2855
2855
2856 elif fparent1 == nullid:
2856 elif fparent1 == nullid:
2857 fparent1, fparent2 = fparent2, nullid
2857 fparent1, fparent2 = fparent2, nullid
2858 elif fparent2 != nullid:
2858 elif fparent2 != nullid:
2859 # is one parent an ancestor of the other?
2859 # is one parent an ancestor of the other?
2860 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2860 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2861 if fparent1 in fparentancestors:
2861 if fparent1 in fparentancestors:
2862 fparent1, fparent2 = fparent2, nullid
2862 fparent1, fparent2 = fparent2, nullid
2863 elif fparent2 in fparentancestors:
2863 elif fparent2 in fparentancestors:
2864 fparent2 = nullid
2864 fparent2 = nullid
2865 elif not fparentancestors:
2865 elif not fparentancestors:
2866 # TODO: this whole if-else might be simplified much more
2866 # TODO: this whole if-else might be simplified much more
2867 ms = mergemod.mergestate.read(self)
2867 ms = mergemod.mergestate.read(self)
2868 if (
2868 if (
2869 fname in ms
2869 fname in ms
2870 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2870 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2871 ):
2871 ):
2872 fparent1, fparent2 = fparent2, nullid
2872 fparent1, fparent2 = fparent2, nullid
2873
2873
2874 # is the file changed?
2874 # is the file changed?
2875 text = fctx.data()
2875 text = fctx.data()
2876 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2876 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2877 changelist.append(fname)
2877 changelist.append(fname)
2878 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2878 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2879 # are just the flags changed during merge?
2879 # are just the flags changed during merge?
2880 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2880 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2881 changelist.append(fname)
2881 changelist.append(fname)
2882
2882
2883 return fparent1
2883 return fparent1
2884
2884
2885 def checkcommitpatterns(self, wctx, match, status, fail):
2885 def checkcommitpatterns(self, wctx, match, status, fail):
2886 """check for commit arguments that aren't committable"""
2886 """check for commit arguments that aren't committable"""
2887 if match.isexact() or match.prefix():
2887 if match.isexact() or match.prefix():
2888 matched = set(status.modified + status.added + status.removed)
2888 matched = set(status.modified + status.added + status.removed)
2889
2889
2890 for f in match.files():
2890 for f in match.files():
2891 f = self.dirstate.normalize(f)
2891 f = self.dirstate.normalize(f)
2892 if f == b'.' or f in matched or f in wctx.substate:
2892 if f == b'.' or f in matched or f in wctx.substate:
2893 continue
2893 continue
2894 if f in status.deleted:
2894 if f in status.deleted:
2895 fail(f, _(b'file not found!'))
2895 fail(f, _(b'file not found!'))
2896 # Is it a directory that exists or used to exist?
2896 # Is it a directory that exists or used to exist?
2897 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2897 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2898 d = f + b'/'
2898 d = f + b'/'
2899 for mf in matched:
2899 for mf in matched:
2900 if mf.startswith(d):
2900 if mf.startswith(d):
2901 break
2901 break
2902 else:
2902 else:
2903 fail(f, _(b"no match under directory!"))
2903 fail(f, _(b"no match under directory!"))
2904 elif f not in self.dirstate:
2904 elif f not in self.dirstate:
2905 fail(f, _(b"file not tracked!"))
2905 fail(f, _(b"file not tracked!"))
2906
2906
2907 @unfilteredmethod
2907 @unfilteredmethod
2908 def commit(
2908 def commit(
2909 self,
2909 self,
2910 text=b"",
2910 text=b"",
2911 user=None,
2911 user=None,
2912 date=None,
2912 date=None,
2913 match=None,
2913 match=None,
2914 force=False,
2914 force=False,
2915 editor=None,
2915 editor=None,
2916 extra=None,
2916 extra=None,
2917 ):
2917 ):
2918 """Add a new revision to current repository.
2918 """Add a new revision to current repository.
2919
2919
2920 Revision information is gathered from the working directory,
2920 Revision information is gathered from the working directory,
2921 match can be used to filter the committed files. If editor is
2921 match can be used to filter the committed files. If editor is
2922 supplied, it is called to get a commit message.
2922 supplied, it is called to get a commit message.
2923 """
2923 """
2924 if extra is None:
2924 if extra is None:
2925 extra = {}
2925 extra = {}
2926
2926
2927 def fail(f, msg):
2927 def fail(f, msg):
2928 raise error.Abort(b'%s: %s' % (f, msg))
2928 raise error.Abort(b'%s: %s' % (f, msg))
2929
2929
2930 if not match:
2930 if not match:
2931 match = matchmod.always()
2931 match = matchmod.always()
2932
2932
2933 if not force:
2933 if not force:
2934 match.bad = fail
2934 match.bad = fail
2935
2935
2936 # lock() for recent changelog (see issue4368)
2936 # lock() for recent changelog (see issue4368)
2937 with self.wlock(), self.lock():
2937 with self.wlock(), self.lock():
2938 wctx = self[None]
2938 wctx = self[None]
2939 merge = len(wctx.parents()) > 1
2939 merge = len(wctx.parents()) > 1
2940
2940
2941 if not force and merge and not match.always():
2941 if not force and merge and not match.always():
2942 raise error.Abort(
2942 raise error.Abort(
2943 _(
2943 _(
2944 b'cannot partially commit a merge '
2944 b'cannot partially commit a merge '
2945 b'(do not specify files or patterns)'
2945 b'(do not specify files or patterns)'
2946 )
2946 )
2947 )
2947 )
2948
2948
2949 status = self.status(match=match, clean=force)
2949 status = self.status(match=match, clean=force)
2950 if force:
2950 if force:
2951 status.modified.extend(
2951 status.modified.extend(
2952 status.clean
2952 status.clean
2953 ) # mq may commit clean files
2953 ) # mq may commit clean files
2954
2954
2955 # check subrepos
2955 # check subrepos
2956 subs, commitsubs, newstate = subrepoutil.precommit(
2956 subs, commitsubs, newstate = subrepoutil.precommit(
2957 self.ui, wctx, status, match, force=force
2957 self.ui, wctx, status, match, force=force
2958 )
2958 )
2959
2959
2960 # make sure all explicit patterns are matched
2960 # make sure all explicit patterns are matched
2961 if not force:
2961 if not force:
2962 self.checkcommitpatterns(wctx, match, status, fail)
2962 self.checkcommitpatterns(wctx, match, status, fail)
2963
2963
2964 cctx = context.workingcommitctx(
2964 cctx = context.workingcommitctx(
2965 self, status, text, user, date, extra
2965 self, status, text, user, date, extra
2966 )
2966 )
2967
2967
2968 ms = mergemod.mergestate.read(self)
2968 ms = mergemod.mergestate.read(self)
2969 mergeutil.checkunresolved(ms)
2969 mergeutil.checkunresolved(ms)
2970
2970
2971 # internal config: ui.allowemptycommit
2971 # internal config: ui.allowemptycommit
2972 allowemptycommit = (
2972 allowemptycommit = (
2973 wctx.branch() != wctx.p1().branch()
2973 wctx.branch() != wctx.p1().branch()
2974 or extra.get(b'close')
2974 or extra.get(b'close')
2975 or merge
2975 or merge
2976 or cctx.files()
2976 or cctx.files()
2977 or self.ui.configbool(b'ui', b'allowemptycommit')
2977 or self.ui.configbool(b'ui', b'allowemptycommit')
2978 )
2978 )
2979 if not allowemptycommit:
2979 if not allowemptycommit:
2980 self.ui.debug(b'nothing to commit, clearing merge state\n')
2980 self.ui.debug(b'nothing to commit, clearing merge state\n')
2981 ms.reset()
2981 ms.reset()
2982 return None
2982 return None
2983
2983
2984 if merge and cctx.deleted():
2984 if merge and cctx.deleted():
2985 raise error.Abort(_(b"cannot commit merge with missing files"))
2985 raise error.Abort(_(b"cannot commit merge with missing files"))
2986
2986
2987 if editor:
2987 if editor:
2988 cctx._text = editor(self, cctx, subs)
2988 cctx._text = editor(self, cctx, subs)
2989 edited = text != cctx._text
2989 edited = text != cctx._text
2990
2990
2991 # Save commit message in case this transaction gets rolled back
2991 # Save commit message in case this transaction gets rolled back
2992 # (e.g. by a pretxncommit hook). Leave the content alone on
2992 # (e.g. by a pretxncommit hook). Leave the content alone on
2993 # the assumption that the user will use the same editor again.
2993 # the assumption that the user will use the same editor again.
2994 msgfn = self.savecommitmessage(cctx._text)
2994 msgfn = self.savecommitmessage(cctx._text)
2995
2995
2996 # commit subs and write new state
2996 # commit subs and write new state
2997 if subs:
2997 if subs:
2998 uipathfn = scmutil.getuipathfn(self)
2998 uipathfn = scmutil.getuipathfn(self)
2999 for s in sorted(commitsubs):
2999 for s in sorted(commitsubs):
3000 sub = wctx.sub(s)
3000 sub = wctx.sub(s)
3001 self.ui.status(
3001 self.ui.status(
3002 _(b'committing subrepository %s\n')
3002 _(b'committing subrepository %s\n')
3003 % uipathfn(subrepoutil.subrelpath(sub))
3003 % uipathfn(subrepoutil.subrelpath(sub))
3004 )
3004 )
3005 sr = sub.commit(cctx._text, user, date)
3005 sr = sub.commit(cctx._text, user, date)
3006 newstate[s] = (newstate[s][0], sr)
3006 newstate[s] = (newstate[s][0], sr)
3007 subrepoutil.writestate(self, newstate)
3007 subrepoutil.writestate(self, newstate)
3008
3008
3009 p1, p2 = self.dirstate.parents()
3009 p1, p2 = self.dirstate.parents()
3010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3011 try:
3011 try:
3012 self.hook(
3012 self.hook(
3013 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3013 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3014 )
3014 )
3015 with self.transaction(b'commit'):
3015 with self.transaction(b'commit'):
3016 ret = self.commitctx(cctx, True)
3016 ret = self.commitctx(cctx, True)
3017 # update bookmarks, dirstate and mergestate
3017 # update bookmarks, dirstate and mergestate
3018 bookmarks.update(self, [p1, p2], ret)
3018 bookmarks.update(self, [p1, p2], ret)
3019 cctx.markcommitted(ret)
3019 cctx.markcommitted(ret)
3020 ms.reset()
3020 ms.reset()
3021 except: # re-raises
3021 except: # re-raises
3022 if edited:
3022 if edited:
3023 self.ui.write(
3023 self.ui.write(
3024 _(b'note: commit message saved in %s\n') % msgfn
3024 _(b'note: commit message saved in %s\n') % msgfn
3025 )
3025 )
3026 self.ui.write(
3026 self.ui.write(
3027 _(
3027 _(
3028 b"note: use 'hg commit --logfile "
3028 b"note: use 'hg commit --logfile "
3029 b".hg/last-message.txt --edit' to reuse it\n"
3029 b".hg/last-message.txt --edit' to reuse it\n"
3030 )
3030 )
3031 )
3031 )
3032 raise
3032 raise
3033
3033
3034 def commithook(unused_success):
3034 def commithook(unused_success):
3035 # hack for command that use a temporary commit (eg: histedit)
3035 # hack for command that use a temporary commit (eg: histedit)
3036 # temporary commit got stripped before hook release
3036 # temporary commit got stripped before hook release
3037 if self.changelog.hasnode(ret):
3037 if self.changelog.hasnode(ret):
3038 self.hook(
3038 self.hook(
3039 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3039 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3040 )
3040 )
3041
3041
3042 self._afterlock(commithook)
3042 self._afterlock(commithook)
3043 return ret
3043 return ret
3044
3044
3045 @unfilteredmethod
3045 @unfilteredmethod
3046 def commitctx(self, ctx, error=False, origctx=None):
3046 def commitctx(self, ctx, error=False, origctx=None):
3047 """Add a new revision to current repository.
3047 """Add a new revision to current repository.
3048 Revision information is passed via the context argument.
3048 Revision information is passed via the context argument.
3049
3049
3050 ctx.files() should list all files involved in this commit, i.e.
3050 ctx.files() should list all files involved in this commit, i.e.
3051 modified/added/removed files. On merge, it may be wider than the
3051 modified/added/removed files. On merge, it may be wider than the
3052 ctx.files() to be committed, since any file nodes derived directly
3052 ctx.files() to be committed, since any file nodes derived directly
3053 from p1 or p2 are excluded from the committed ctx.files().
3053 from p1 or p2 are excluded from the committed ctx.files().
3054
3054
3055 origctx is for convert to work around the problem that bug
3055 origctx is for convert to work around the problem that bug
3056 fixes to the files list in changesets change hashes. For
3056 fixes to the files list in changesets change hashes. For
3057 convert to be the identity, it can pass an origctx and this
3057 convert to be the identity, it can pass an origctx and this
3058 function will use the same files list when it makes sense to
3058 function will use the same files list when it makes sense to
3059 do so.
3059 do so.
3060 """
3060 """
3061
3061
3062 p1, p2 = ctx.p1(), ctx.p2()
3062 p1, p2 = ctx.p1(), ctx.p2()
3063 user = ctx.user()
3063 user = ctx.user()
3064
3064
3065 if self.filecopiesmode == b'changeset-sidedata':
3065 if self.filecopiesmode == b'changeset-sidedata':
3066 writechangesetcopy = True
3066 writechangesetcopy = True
3067 writefilecopymeta = True
3067 writefilecopymeta = True
3068 writecopiesto = None
3068 writecopiesto = None
3069 else:
3069 else:
3070 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3070 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3071 writefilecopymeta = writecopiesto != b'changeset-only'
3071 writefilecopymeta = writecopiesto != b'changeset-only'
3072 writechangesetcopy = writecopiesto in (
3072 writechangesetcopy = writecopiesto in (
3073 b'changeset-only',
3073 b'changeset-only',
3074 b'compatibility',
3074 b'compatibility',
3075 )
3075 )
3076 p1copies, p2copies = None, None
3076 p1copies, p2copies = None, None
3077 if writechangesetcopy:
3077 if writechangesetcopy:
3078 p1copies = ctx.p1copies()
3078 p1copies = ctx.p1copies()
3079 p2copies = ctx.p2copies()
3079 p2copies = ctx.p2copies()
3080 filesadded, filesremoved = None, None
3080 filesadded, filesremoved = None, None
3081 with self.lock(), self.transaction(b"commit") as tr:
3081 with self.lock(), self.transaction(b"commit") as tr:
3082 trp = weakref.proxy(tr)
3082 trp = weakref.proxy(tr)
3083
3083
3084 if ctx.manifestnode():
3084 if ctx.manifestnode():
3085 # reuse an existing manifest revision
3085 # reuse an existing manifest revision
3086 self.ui.debug(b'reusing known manifest\n')
3086 self.ui.debug(b'reusing known manifest\n')
3087 mn = ctx.manifestnode()
3087 mn = ctx.manifestnode()
3088 files = ctx.files()
3088 files = ctx.files()
3089 if writechangesetcopy:
3089 if writechangesetcopy:
3090 filesadded = ctx.filesadded()
3090 filesadded = ctx.filesadded()
3091 filesremoved = ctx.filesremoved()
3091 filesremoved = ctx.filesremoved()
3092 elif ctx.files():
3092 elif ctx.files():
3093 m1ctx = p1.manifestctx()
3093 m1ctx = p1.manifestctx()
3094 m2ctx = p2.manifestctx()
3094 m2ctx = p2.manifestctx()
3095 mctx = m1ctx.copy()
3095 mctx = m1ctx.copy()
3096
3096
3097 m = mctx.read()
3097 m = mctx.read()
3098 m1 = m1ctx.read()
3098 m1 = m1ctx.read()
3099 m2 = m2ctx.read()
3099 m2 = m2ctx.read()
3100
3100
3101 # check in files
3101 # check in files
3102 added = []
3102 added = []
3103 changed = []
3103 changed = []
3104 removed = list(ctx.removed())
3104 removed = list(ctx.removed())
3105 linkrev = len(self)
3105 linkrev = len(self)
3106 self.ui.note(_(b"committing files:\n"))
3106 self.ui.note(_(b"committing files:\n"))
3107 uipathfn = scmutil.getuipathfn(self)
3107 uipathfn = scmutil.getuipathfn(self)
3108 for f in sorted(ctx.modified() + ctx.added()):
3108 for f in sorted(ctx.modified() + ctx.added()):
3109 self.ui.note(uipathfn(f) + b"\n")
3109 self.ui.note(uipathfn(f) + b"\n")
3110 try:
3110 try:
3111 fctx = ctx[f]
3111 fctx = ctx[f]
3112 if fctx is None:
3112 if fctx is None:
3113 removed.append(f)
3113 removed.append(f)
3114 else:
3114 else:
3115 added.append(f)
3115 added.append(f)
3116 m[f] = self._filecommit(
3116 m[f] = self._filecommit(
3117 fctx,
3117 fctx,
3118 m1,
3118 m1,
3119 m2,
3119 m2,
3120 linkrev,
3120 linkrev,
3121 trp,
3121 trp,
3122 changed,
3122 changed,
3123 writefilecopymeta,
3123 writefilecopymeta,
3124 )
3124 )
3125 m.setflag(f, fctx.flags())
3125 m.setflag(f, fctx.flags())
3126 except OSError:
3126 except OSError:
3127 self.ui.warn(
3127 self.ui.warn(
3128 _(b"trouble committing %s!\n") % uipathfn(f)
3128 _(b"trouble committing %s!\n") % uipathfn(f)
3129 )
3129 )
3130 raise
3130 raise
3131 except IOError as inst:
3131 except IOError as inst:
3132 errcode = getattr(inst, 'errno', errno.ENOENT)
3132 errcode = getattr(inst, 'errno', errno.ENOENT)
3133 if error or errcode and errcode != errno.ENOENT:
3133 if error or errcode and errcode != errno.ENOENT:
3134 self.ui.warn(
3134 self.ui.warn(
3135 _(b"trouble committing %s!\n") % uipathfn(f)
3135 _(b"trouble committing %s!\n") % uipathfn(f)
3136 )
3136 )
3137 raise
3137 raise
3138
3138
3139 # update manifest
3139 # update manifest
3140 removed = [f for f in removed if f in m1 or f in m2]
3140 removed = [f for f in removed if f in m1 or f in m2]
3141 drop = sorted([f for f in removed if f in m])
3141 drop = sorted([f for f in removed if f in m])
3142 for f in drop:
3142 for f in drop:
3143 del m[f]
3143 del m[f]
3144 if p2.rev() != nullrev:
3144 if p2.rev() != nullrev:
3145
3145
3146 @util.cachefunc
3146 @util.cachefunc
3147 def mas():
3147 def mas():
3148 p1n = p1.node()
3148 p1n = p1.node()
3149 p2n = p2.node()
3149 p2n = p2.node()
3150 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3150 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3151 if not cahs:
3151 if not cahs:
3152 cahs = [nullrev]
3152 cahs = [nullrev]
3153 return [self[r].manifest() for r in cahs]
3153 return [self[r].manifest() for r in cahs]
3154
3154
3155 def deletionfromparent(f):
3155 def deletionfromparent(f):
3156 # When a file is removed relative to p1 in a merge, this
3156 # When a file is removed relative to p1 in a merge, this
3157 # function determines whether the absence is due to a
3157 # function determines whether the absence is due to a
3158 # deletion from a parent, or whether the merge commit
3158 # deletion from a parent, or whether the merge commit
3159 # itself deletes the file. We decide this by doing a
3159 # itself deletes the file. We decide this by doing a
3160 # simplified three way merge of the manifest entry for
3160 # simplified three way merge of the manifest entry for
3161 # the file. There are two ways we decide the merge
3161 # the file. There are two ways we decide the merge
3162 # itself didn't delete a file:
3162 # itself didn't delete a file:
3163 # - neither parent (nor the merge) contain the file
3163 # - neither parent (nor the merge) contain the file
3164 # - exactly one parent contains the file, and that
3164 # - exactly one parent contains the file, and that
3165 # parent has the same filelog entry as the merge
3165 # parent has the same filelog entry as the merge
3166 # ancestor (or all of them if there two). In other
3166 # ancestor (or all of them if there two). In other
3167 # words, that parent left the file unchanged while the
3167 # words, that parent left the file unchanged while the
3168 # other one deleted it.
3168 # other one deleted it.
3169 # One way to think about this is that deleting a file is
3169 # One way to think about this is that deleting a file is
3170 # similar to emptying it, so the list of changed files
3170 # similar to emptying it, so the list of changed files
3171 # should be similar either way. The computation
3171 # should be similar either way. The computation
3172 # described above is not done directly in _filecommit
3172 # described above is not done directly in _filecommit
3173 # when creating the list of changed files, however
3173 # when creating the list of changed files, however
3174 # it does something very similar by comparing filelog
3174 # it does something very similar by comparing filelog
3175 # nodes.
3175 # nodes.
3176 if f in m1:
3176 if f in m1:
3177 return f not in m2 and all(
3177 return f not in m2 and all(
3178 f in ma and ma.find(f) == m1.find(f)
3178 f in ma and ma.find(f) == m1.find(f)
3179 for ma in mas()
3179 for ma in mas()
3180 )
3180 )
3181 elif f in m2:
3181 elif f in m2:
3182 return all(
3182 return all(
3183 f in ma and ma.find(f) == m2.find(f)
3183 f in ma and ma.find(f) == m2.find(f)
3184 for ma in mas()
3184 for ma in mas()
3185 )
3185 )
3186 else:
3186 else:
3187 return True
3187 return True
3188
3188
3189 removed = [f for f in removed if not deletionfromparent(f)]
3189 removed = [f for f in removed if not deletionfromparent(f)]
3190
3190
3191 files = changed + removed
3191 files = changed + removed
3192 md = None
3192 md = None
3193 if not files:
3193 if not files:
3194 # if no "files" actually changed in terms of the changelog,
3194 # if no "files" actually changed in terms of the changelog,
3195 # try hard to detect unmodified manifest entry so that the
3195 # try hard to detect unmodified manifest entry so that the
3196 # exact same commit can be reproduced later on convert.
3196 # exact same commit can be reproduced later on convert.
3197 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3197 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3198 if not files and md:
3198 if not files and md:
3199 self.ui.debug(
3199 self.ui.debug(
3200 b'not reusing manifest (no file change in '
3200 b'not reusing manifest (no file change in '
3201 b'changelog, but manifest differs)\n'
3201 b'changelog, but manifest differs)\n'
3202 )
3202 )
3203 if files or md:
3203 if files or md:
3204 self.ui.note(_(b"committing manifest\n"))
3204 self.ui.note(_(b"committing manifest\n"))
3205 # we're using narrowmatch here since it's already applied at
3205 # we're using narrowmatch here since it's already applied at
3206 # other stages (such as dirstate.walk), so we're already
3206 # other stages (such as dirstate.walk), so we're already
3207 # ignoring things outside of narrowspec in most cases. The
3207 # ignoring things outside of narrowspec in most cases. The
3208 # one case where we might have files outside the narrowspec
3208 # one case where we might have files outside the narrowspec
3209 # at this point is merges, and we already error out in the
3209 # at this point is merges, and we already error out in the
3210 # case where the merge has files outside of the narrowspec,
3210 # case where the merge has files outside of the narrowspec,
3211 # so this is safe.
3211 # so this is safe.
3212 mn = mctx.write(
3212 mn = mctx.write(
3213 trp,
3213 trp,
3214 linkrev,
3214 linkrev,
3215 p1.manifestnode(),
3215 p1.manifestnode(),
3216 p2.manifestnode(),
3216 p2.manifestnode(),
3217 added,
3217 added,
3218 drop,
3218 drop,
3219 match=self.narrowmatch(),
3219 match=self.narrowmatch(),
3220 )
3220 )
3221
3221
3222 if writechangesetcopy:
3222 if writechangesetcopy:
3223 filesadded = [
3223 filesadded = [
3224 f for f in changed if not (f in m1 or f in m2)
3224 f for f in changed if not (f in m1 or f in m2)
3225 ]
3225 ]
3226 filesremoved = removed
3226 filesremoved = removed
3227 else:
3227 else:
3228 self.ui.debug(
3228 self.ui.debug(
3229 b'reusing manifest from p1 (listed files '
3229 b'reusing manifest from p1 (listed files '
3230 b'actually unchanged)\n'
3230 b'actually unchanged)\n'
3231 )
3231 )
3232 mn = p1.manifestnode()
3232 mn = p1.manifestnode()
3233 else:
3233 else:
3234 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3234 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3235 mn = p1.manifestnode()
3235 mn = p1.manifestnode()
3236 files = []
3236 files = []
3237
3237
3238 if writecopiesto == b'changeset-only':
3238 if writecopiesto == b'changeset-only':
3239 # If writing only to changeset extras, use None to indicate that
3239 # If writing only to changeset extras, use None to indicate that
3240 # no entry should be written. If writing to both, write an empty
3240 # no entry should be written. If writing to both, write an empty
3241 # entry to prevent the reader from falling back to reading
3241 # entry to prevent the reader from falling back to reading
3242 # filelogs.
3242 # filelogs.
3243 p1copies = p1copies or None
3243 p1copies = p1copies or None
3244 p2copies = p2copies or None
3244 p2copies = p2copies or None
3245 filesadded = filesadded or None
3245 filesadded = filesadded or None
3246 filesremoved = filesremoved or None
3246 filesremoved = filesremoved or None
3247
3247
3248 if origctx and origctx.manifestnode() == mn:
3248 if origctx and origctx.manifestnode() == mn:
3249 files = origctx.files()
3249 files = origctx.files()
3250
3250
3251 # update changelog
3251 # update changelog
3252 self.ui.note(_(b"committing changelog\n"))
3252 self.ui.note(_(b"committing changelog\n"))
3253 self.changelog.delayupdate(tr)
3253 self.changelog.delayupdate(tr)
3254 n = self.changelog.add(
3254 n = self.changelog.add(
3255 mn,
3255 mn,
3256 files,
3256 files,
3257 ctx.description(),
3257 ctx.description(),
3258 trp,
3258 trp,
3259 p1.node(),
3259 p1.node(),
3260 p2.node(),
3260 p2.node(),
3261 user,
3261 user,
3262 ctx.date(),
3262 ctx.date(),
3263 ctx.extra().copy(),
3263 ctx.extra().copy(),
3264 p1copies,
3264 p1copies,
3265 p2copies,
3265 p2copies,
3266 filesadded,
3266 filesadded,
3267 filesremoved,
3267 filesremoved,
3268 )
3268 )
3269 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3269 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3270 self.hook(
3270 self.hook(
3271 b'pretxncommit',
3271 b'pretxncommit',
3272 throw=True,
3272 throw=True,
3273 node=hex(n),
3273 node=hex(n),
3274 parent1=xp1,
3274 parent1=xp1,
3275 parent2=xp2,
3275 parent2=xp2,
3276 )
3276 )
3277 # set the new commit is proper phase
3277 # set the new commit is proper phase
3278 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3278 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3279 if targetphase:
3279 if targetphase:
3280 # retract boundary do not alter parent changeset.
3280 # retract boundary do not alter parent changeset.
3281 # if a parent have higher the resulting phase will
3281 # if a parent have higher the resulting phase will
3282 # be compliant anyway
3282 # be compliant anyway
3283 #
3283 #
3284 # if minimal phase was 0 we don't need to retract anything
3284 # if minimal phase was 0 we don't need to retract anything
3285 phases.registernew(self, tr, targetphase, [n])
3285 phases.registernew(self, tr, targetphase, [n])
3286 return n
3286 return n
3287
3287
3288 @unfilteredmethod
3288 @unfilteredmethod
3289 def destroying(self):
3289 def destroying(self):
3290 '''Inform the repository that nodes are about to be destroyed.
3290 '''Inform the repository that nodes are about to be destroyed.
3291 Intended for use by strip and rollback, so there's a common
3291 Intended for use by strip and rollback, so there's a common
3292 place for anything that has to be done before destroying history.
3292 place for anything that has to be done before destroying history.
3293
3293
3294 This is mostly useful for saving state that is in memory and waiting
3294 This is mostly useful for saving state that is in memory and waiting
3295 to be flushed when the current lock is released. Because a call to
3295 to be flushed when the current lock is released. Because a call to
3296 destroyed is imminent, the repo will be invalidated causing those
3296 destroyed is imminent, the repo will be invalidated causing those
3297 changes to stay in memory (waiting for the next unlock), or vanish
3297 changes to stay in memory (waiting for the next unlock), or vanish
3298 completely.
3298 completely.
3299 '''
3299 '''
3300 # When using the same lock to commit and strip, the phasecache is left
3300 # When using the same lock to commit and strip, the phasecache is left
3301 # dirty after committing. Then when we strip, the repo is invalidated,
3301 # dirty after committing. Then when we strip, the repo is invalidated,
3302 # causing those changes to disappear.
3302 # causing those changes to disappear.
3303 if '_phasecache' in vars(self):
3303 if '_phasecache' in vars(self):
3304 self._phasecache.write()
3304 self._phasecache.write()
3305
3305
3306 @unfilteredmethod
3306 @unfilteredmethod
3307 def destroyed(self):
3307 def destroyed(self):
3308 '''Inform the repository that nodes have been destroyed.
3308 '''Inform the repository that nodes have been destroyed.
3309 Intended for use by strip and rollback, so there's a common
3309 Intended for use by strip and rollback, so there's a common
3310 place for anything that has to be done after destroying history.
3310 place for anything that has to be done after destroying history.
3311 '''
3311 '''
3312 # When one tries to:
3312 # When one tries to:
3313 # 1) destroy nodes thus calling this method (e.g. strip)
3313 # 1) destroy nodes thus calling this method (e.g. strip)
3314 # 2) use phasecache somewhere (e.g. commit)
3314 # 2) use phasecache somewhere (e.g. commit)
3315 #
3315 #
3316 # then 2) will fail because the phasecache contains nodes that were
3316 # then 2) will fail because the phasecache contains nodes that were
3317 # removed. We can either remove phasecache from the filecache,
3317 # removed. We can either remove phasecache from the filecache,
3318 # causing it to reload next time it is accessed, or simply filter
3318 # causing it to reload next time it is accessed, or simply filter
3319 # the removed nodes now and write the updated cache.
3319 # the removed nodes now and write the updated cache.
3320 self._phasecache.filterunknown(self)
3320 self._phasecache.filterunknown(self)
3321 self._phasecache.write()
3321 self._phasecache.write()
3322
3322
3323 # refresh all repository caches
3323 # refresh all repository caches
3324 self.updatecaches()
3324 self.updatecaches()
3325
3325
3326 # Ensure the persistent tag cache is updated. Doing it now
3326 # Ensure the persistent tag cache is updated. Doing it now
3327 # means that the tag cache only has to worry about destroyed
3327 # means that the tag cache only has to worry about destroyed
3328 # heads immediately after a strip/rollback. That in turn
3328 # heads immediately after a strip/rollback. That in turn
3329 # guarantees that "cachetip == currenttip" (comparing both rev
3329 # guarantees that "cachetip == currenttip" (comparing both rev
3330 # and node) always means no nodes have been added or destroyed.
3330 # and node) always means no nodes have been added or destroyed.
3331
3331
3332 # XXX this is suboptimal when qrefresh'ing: we strip the current
3332 # XXX this is suboptimal when qrefresh'ing: we strip the current
3333 # head, refresh the tag cache, then immediately add a new head.
3333 # head, refresh the tag cache, then immediately add a new head.
3334 # But I think doing it this way is necessary for the "instant
3334 # But I think doing it this way is necessary for the "instant
3335 # tag cache retrieval" case to work.
3335 # tag cache retrieval" case to work.
3336 self.invalidate()
3336 self.invalidate()
3337
3337
3338 def status(
3338 def status(
3339 self,
3339 self,
3340 node1=b'.',
3340 node1=b'.',
3341 node2=None,
3341 node2=None,
3342 match=None,
3342 match=None,
3343 ignored=False,
3343 ignored=False,
3344 clean=False,
3344 clean=False,
3345 unknown=False,
3345 unknown=False,
3346 listsubrepos=False,
3346 listsubrepos=False,
3347 ):
3347 ):
3348 '''a convenience method that calls node1.status(node2)'''
3348 '''a convenience method that calls node1.status(node2)'''
3349 return self[node1].status(
3349 return self[node1].status(
3350 node2, match, ignored, clean, unknown, listsubrepos
3350 node2, match, ignored, clean, unknown, listsubrepos
3351 )
3351 )
3352
3352
3353 def addpostdsstatus(self, ps):
3353 def addpostdsstatus(self, ps):
3354 """Add a callback to run within the wlock, at the point at which status
3354 """Add a callback to run within the wlock, at the point at which status
3355 fixups happen.
3355 fixups happen.
3356
3356
3357 On status completion, callback(wctx, status) will be called with the
3357 On status completion, callback(wctx, status) will be called with the
3358 wlock held, unless the dirstate has changed from underneath or the wlock
3358 wlock held, unless the dirstate has changed from underneath or the wlock
3359 couldn't be grabbed.
3359 couldn't be grabbed.
3360
3360
3361 Callbacks should not capture and use a cached copy of the dirstate --
3361 Callbacks should not capture and use a cached copy of the dirstate --
3362 it might change in the meanwhile. Instead, they should access the
3362 it might change in the meanwhile. Instead, they should access the
3363 dirstate via wctx.repo().dirstate.
3363 dirstate via wctx.repo().dirstate.
3364
3364
3365 This list is emptied out after each status run -- extensions should
3365 This list is emptied out after each status run -- extensions should
3366 make sure it adds to this list each time dirstate.status is called.
3366 make sure it adds to this list each time dirstate.status is called.
3367 Extensions should also make sure they don't call this for statuses
3367 Extensions should also make sure they don't call this for statuses
3368 that don't involve the dirstate.
3368 that don't involve the dirstate.
3369 """
3369 """
3370
3370
3371 # The list is located here for uniqueness reasons -- it is actually
3371 # The list is located here for uniqueness reasons -- it is actually
3372 # managed by the workingctx, but that isn't unique per-repo.
3372 # managed by the workingctx, but that isn't unique per-repo.
3373 self._postdsstatus.append(ps)
3373 self._postdsstatus.append(ps)
3374
3374
3375 def postdsstatus(self):
3375 def postdsstatus(self):
3376 """Used by workingctx to get the list of post-dirstate-status hooks."""
3376 """Used by workingctx to get the list of post-dirstate-status hooks."""
3377 return self._postdsstatus
3377 return self._postdsstatus
3378
3378
3379 def clearpostdsstatus(self):
3379 def clearpostdsstatus(self):
3380 """Used by workingctx to clear post-dirstate-status hooks."""
3380 """Used by workingctx to clear post-dirstate-status hooks."""
3381 del self._postdsstatus[:]
3381 del self._postdsstatus[:]
3382
3382
3383 def heads(self, start=None):
3383 def heads(self, start=None):
3384 if start is None:
3384 if start is None:
3385 cl = self.changelog
3385 cl = self.changelog
3386 headrevs = reversed(cl.headrevs())
3386 headrevs = reversed(cl.headrevs())
3387 return [cl.node(rev) for rev in headrevs]
3387 return [cl.node(rev) for rev in headrevs]
3388
3388
3389 heads = self.changelog.heads(start)
3389 heads = self.changelog.heads(start)
3390 # sort the output in rev descending order
3390 # sort the output in rev descending order
3391 return sorted(heads, key=self.changelog.rev, reverse=True)
3391 return sorted(heads, key=self.changelog.rev, reverse=True)
3392
3392
3393 def branchheads(self, branch=None, start=None, closed=False):
3393 def branchheads(self, branch=None, start=None, closed=False):
3394 '''return a (possibly filtered) list of heads for the given branch
3394 '''return a (possibly filtered) list of heads for the given branch
3395
3395
3396 Heads are returned in topological order, from newest to oldest.
3396 Heads are returned in topological order, from newest to oldest.
3397 If branch is None, use the dirstate branch.
3397 If branch is None, use the dirstate branch.
3398 If start is not None, return only heads reachable from start.
3398 If start is not None, return only heads reachable from start.
3399 If closed is True, return heads that are marked as closed as well.
3399 If closed is True, return heads that are marked as closed as well.
3400 '''
3400 '''
3401 if branch is None:
3401 if branch is None:
3402 branch = self[None].branch()
3402 branch = self[None].branch()
3403 branches = self.branchmap()
3403 branches = self.branchmap()
3404 if not branches.hasbranch(branch):
3404 if not branches.hasbranch(branch):
3405 return []
3405 return []
3406 # the cache returns heads ordered lowest to highest
3406 # the cache returns heads ordered lowest to highest
3407 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3407 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3408 if start is not None:
3408 if start is not None:
3409 # filter out the heads that cannot be reached from startrev
3409 # filter out the heads that cannot be reached from startrev
3410 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3410 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3411 bheads = [h for h in bheads if h in fbheads]
3411 bheads = [h for h in bheads if h in fbheads]
3412 return bheads
3412 return bheads
3413
3413
3414 def branches(self, nodes):
3414 def branches(self, nodes):
3415 if not nodes:
3415 if not nodes:
3416 nodes = [self.changelog.tip()]
3416 nodes = [self.changelog.tip()]
3417 b = []
3417 b = []
3418 for n in nodes:
3418 for n in nodes:
3419 t = n
3419 t = n
3420 while True:
3420 while True:
3421 p = self.changelog.parents(n)
3421 p = self.changelog.parents(n)
3422 if p[1] != nullid or p[0] == nullid:
3422 if p[1] != nullid or p[0] == nullid:
3423 b.append((t, n, p[0], p[1]))
3423 b.append((t, n, p[0], p[1]))
3424 break
3424 break
3425 n = p[0]
3425 n = p[0]
3426 return b
3426 return b
3427
3427
3428 def between(self, pairs):
3428 def between(self, pairs):
3429 r = []
3429 r = []
3430
3430
3431 for top, bottom in pairs:
3431 for top, bottom in pairs:
3432 n, l, i = top, [], 0
3432 n, l, i = top, [], 0
3433 f = 1
3433 f = 1
3434
3434
3435 while n != bottom and n != nullid:
3435 while n != bottom and n != nullid:
3436 p = self.changelog.parents(n)[0]
3436 p = self.changelog.parents(n)[0]
3437 if i == f:
3437 if i == f:
3438 l.append(n)
3438 l.append(n)
3439 f = f * 2
3439 f = f * 2
3440 n = p
3440 n = p
3441 i += 1
3441 i += 1
3442
3442
3443 r.append(l)
3443 r.append(l)
3444
3444
3445 return r
3445 return r
3446
3446
3447 def checkpush(self, pushop):
3447 def checkpush(self, pushop):
3448 """Extensions can override this function if additional checks have
3448 """Extensions can override this function if additional checks have
3449 to be performed before pushing, or call it if they override push
3449 to be performed before pushing, or call it if they override push
3450 command.
3450 command.
3451 """
3451 """
3452
3452
3453 @unfilteredpropertycache
3453 @unfilteredpropertycache
3454 def prepushoutgoinghooks(self):
3454 def prepushoutgoinghooks(self):
3455 """Return util.hooks consists of a pushop with repo, remote, outgoing
3455 """Return util.hooks consists of a pushop with repo, remote, outgoing
3456 methods, which are called before pushing changesets.
3456 methods, which are called before pushing changesets.
3457 """
3457 """
3458 return util.hooks()
3458 return util.hooks()
3459
3459
3460 def pushkey(self, namespace, key, old, new):
3460 def pushkey(self, namespace, key, old, new):
3461 try:
3461 try:
3462 tr = self.currenttransaction()
3462 tr = self.currenttransaction()
3463 hookargs = {}
3463 hookargs = {}
3464 if tr is not None:
3464 if tr is not None:
3465 hookargs.update(tr.hookargs)
3465 hookargs.update(tr.hookargs)
3466 hookargs = pycompat.strkwargs(hookargs)
3466 hookargs = pycompat.strkwargs(hookargs)
3467 hookargs['namespace'] = namespace
3467 hookargs['namespace'] = namespace
3468 hookargs['key'] = key
3468 hookargs['key'] = key
3469 hookargs['old'] = old
3469 hookargs['old'] = old
3470 hookargs['new'] = new
3470 hookargs['new'] = new
3471 self.hook(b'prepushkey', throw=True, **hookargs)
3471 self.hook(b'prepushkey', throw=True, **hookargs)
3472 except error.HookAbort as exc:
3472 except error.HookAbort as exc:
3473 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3473 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3474 if exc.hint:
3474 if exc.hint:
3475 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3475 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3476 return False
3476 return False
3477 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3477 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3478 ret = pushkey.push(self, namespace, key, old, new)
3478 ret = pushkey.push(self, namespace, key, old, new)
3479
3479
3480 def runhook(unused_success):
3480 def runhook(unused_success):
3481 self.hook(
3481 self.hook(
3482 b'pushkey',
3482 b'pushkey',
3483 namespace=namespace,
3483 namespace=namespace,
3484 key=key,
3484 key=key,
3485 old=old,
3485 old=old,
3486 new=new,
3486 new=new,
3487 ret=ret,
3487 ret=ret,
3488 )
3488 )
3489
3489
3490 self._afterlock(runhook)
3490 self._afterlock(runhook)
3491 return ret
3491 return ret
3492
3492
3493 def listkeys(self, namespace):
3493 def listkeys(self, namespace):
3494 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3494 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3495 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3495 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3496 values = pushkey.list(self, namespace)
3496 values = pushkey.list(self, namespace)
3497 self.hook(b'listkeys', namespace=namespace, values=values)
3497 self.hook(b'listkeys', namespace=namespace, values=values)
3498 return values
3498 return values
3499
3499
3500 def debugwireargs(self, one, two, three=None, four=None, five=None):
3500 def debugwireargs(self, one, two, three=None, four=None, five=None):
3501 '''used to test argument passing over the wire'''
3501 '''used to test argument passing over the wire'''
3502 return b"%s %s %s %s %s" % (
3502 return b"%s %s %s %s %s" % (
3503 one,
3503 one,
3504 two,
3504 two,
3505 pycompat.bytestr(three),
3505 pycompat.bytestr(three),
3506 pycompat.bytestr(four),
3506 pycompat.bytestr(four),
3507 pycompat.bytestr(five),
3507 pycompat.bytestr(five),
3508 )
3508 )
3509
3509
3510 def savecommitmessage(self, text):
3510 def savecommitmessage(self, text):
3511 fp = self.vfs(b'last-message.txt', b'wb')
3511 fp = self.vfs(b'last-message.txt', b'wb')
3512 try:
3512 try:
3513 fp.write(text)
3513 fp.write(text)
3514 finally:
3514 finally:
3515 fp.close()
3515 fp.close()
3516 return self.pathto(fp.name[len(self.root) + 1 :])
3516 return self.pathto(fp.name[len(self.root) + 1 :])
3517
3517
3518
3518
3519 # used to avoid circular references so destructors work
3519 # used to avoid circular references so destructors work
3520 def aftertrans(files):
3520 def aftertrans(files):
3521 renamefiles = [tuple(t) for t in files]
3521 renamefiles = [tuple(t) for t in files]
3522
3522
3523 def a():
3523 def a():
3524 for vfs, src, dest in renamefiles:
3524 for vfs, src, dest in renamefiles:
3525 # if src and dest refer to a same file, vfs.rename is a no-op,
3525 # if src and dest refer to a same file, vfs.rename is a no-op,
3526 # leaving both src and dest on disk. delete dest to make sure
3526 # leaving both src and dest on disk. delete dest to make sure
3527 # the rename couldn't be such a no-op.
3527 # the rename couldn't be such a no-op.
3528 vfs.tryunlink(dest)
3528 vfs.tryunlink(dest)
3529 try:
3529 try:
3530 vfs.rename(src, dest)
3530 vfs.rename(src, dest)
3531 except OSError: # journal file does not yet exist
3531 except OSError: # journal file does not yet exist
3532 pass
3532 pass
3533
3533
3534 return a
3534 return a
3535
3535
3536
3536
3537 def undoname(fn):
3537 def undoname(fn):
3538 base, name = os.path.split(fn)
3538 base, name = os.path.split(fn)
3539 assert name.startswith(b'journal')
3539 assert name.startswith(b'journal')
3540 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3540 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3541
3541
3542
3542
3543 def instance(ui, path, create, intents=None, createopts=None):
3543 def instance(ui, path, create, intents=None, createopts=None):
3544 localpath = util.urllocalpath(path)
3544 localpath = util.urllocalpath(path)
3545 if create:
3545 if create:
3546 createrepository(ui, localpath, createopts=createopts)
3546 createrepository(ui, localpath, createopts=createopts)
3547
3547
3548 return makelocalrepository(ui, localpath, intents=intents)
3548 return makelocalrepository(ui, localpath, intents=intents)
3549
3549
3550
3550
3551 def islocal(path):
3551 def islocal(path):
3552 return True
3552 return True
3553
3553
3554
3554
3555 def defaultcreateopts(ui, createopts=None):
3555 def defaultcreateopts(ui, createopts=None):
3556 """Populate the default creation options for a repository.
3556 """Populate the default creation options for a repository.
3557
3557
3558 A dictionary of explicitly requested creation options can be passed
3558 A dictionary of explicitly requested creation options can be passed
3559 in. Missing keys will be populated.
3559 in. Missing keys will be populated.
3560 """
3560 """
3561 createopts = dict(createopts or {})
3561 createopts = dict(createopts or {})
3562
3562
3563 if b'backend' not in createopts:
3563 if b'backend' not in createopts:
3564 # experimental config: storage.new-repo-backend
3564 # experimental config: storage.new-repo-backend
3565 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3565 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3566
3566
3567 return createopts
3567 return createopts
3568
3568
3569
3569
3570 def newreporequirements(ui, createopts):
3570 def newreporequirements(ui, createopts):
3571 """Determine the set of requirements for a new local repository.
3571 """Determine the set of requirements for a new local repository.
3572
3572
3573 Extensions can wrap this function to specify custom requirements for
3573 Extensions can wrap this function to specify custom requirements for
3574 new repositories.
3574 new repositories.
3575 """
3575 """
3576 # If the repo is being created from a shared repository, we copy
3576 # If the repo is being created from a shared repository, we copy
3577 # its requirements.
3577 # its requirements.
3578 if b'sharedrepo' in createopts:
3578 if b'sharedrepo' in createopts:
3579 requirements = set(createopts[b'sharedrepo'].requirements)
3579 requirements = set(createopts[b'sharedrepo'].requirements)
3580 if createopts.get(b'sharedrelative'):
3580 if createopts.get(b'sharedrelative'):
3581 requirements.add(b'relshared')
3581 requirements.add(b'relshared')
3582 else:
3582 else:
3583 requirements.add(b'shared')
3583 requirements.add(b'shared')
3584
3584
3585 return requirements
3585 return requirements
3586
3586
3587 if b'backend' not in createopts:
3587 if b'backend' not in createopts:
3588 raise error.ProgrammingError(
3588 raise error.ProgrammingError(
3589 b'backend key not present in createopts; '
3589 b'backend key not present in createopts; '
3590 b'was defaultcreateopts() called?'
3590 b'was defaultcreateopts() called?'
3591 )
3591 )
3592
3592
3593 if createopts[b'backend'] != b'revlogv1':
3593 if createopts[b'backend'] != b'revlogv1':
3594 raise error.Abort(
3594 raise error.Abort(
3595 _(
3595 _(
3596 b'unable to determine repository requirements for '
3596 b'unable to determine repository requirements for '
3597 b'storage backend: %s'
3597 b'storage backend: %s'
3598 )
3598 )
3599 % createopts[b'backend']
3599 % createopts[b'backend']
3600 )
3600 )
3601
3601
3602 requirements = {b'revlogv1'}
3602 requirements = {b'revlogv1'}
3603 if ui.configbool(b'format', b'usestore'):
3603 if ui.configbool(b'format', b'usestore'):
3604 requirements.add(b'store')
3604 requirements.add(b'store')
3605 if ui.configbool(b'format', b'usefncache'):
3605 if ui.configbool(b'format', b'usefncache'):
3606 requirements.add(b'fncache')
3606 requirements.add(b'fncache')
3607 if ui.configbool(b'format', b'dotencode'):
3607 if ui.configbool(b'format', b'dotencode'):
3608 requirements.add(b'dotencode')
3608 requirements.add(b'dotencode')
3609
3609
3610 compengines = ui.configlist(b'format', b'revlog-compression')
3610 compengines = ui.configlist(b'format', b'revlog-compression')
3611 for compengine in compengines:
3611 for compengine in compengines:
3612 if compengine in util.compengines:
3612 if compengine in util.compengines:
3613 break
3613 break
3614 else:
3614 else:
3615 raise error.Abort(
3615 raise error.Abort(
3616 _(
3616 _(
3617 b'compression engines %s defined by '
3617 b'compression engines %s defined by '
3618 b'format.revlog-compression not available'
3618 b'format.revlog-compression not available'
3619 )
3619 )
3620 % b', '.join(b'"%s"' % e for e in compengines),
3620 % b', '.join(b'"%s"' % e for e in compengines),
3621 hint=_(
3621 hint=_(
3622 b'run "hg debuginstall" to list available '
3622 b'run "hg debuginstall" to list available '
3623 b'compression engines'
3623 b'compression engines'
3624 ),
3624 ),
3625 )
3625 )
3626
3626
3627 # zlib is the historical default and doesn't need an explicit requirement.
3627 # zlib is the historical default and doesn't need an explicit requirement.
3628 if compengine == b'zstd':
3628 if compengine == b'zstd':
3629 requirements.add(b'revlog-compression-zstd')
3629 requirements.add(b'revlog-compression-zstd')
3630 elif compengine != b'zlib':
3630 elif compengine != b'zlib':
3631 requirements.add(b'exp-compression-%s' % compengine)
3631 requirements.add(b'exp-compression-%s' % compengine)
3632
3632
3633 if scmutil.gdinitconfig(ui):
3633 if scmutil.gdinitconfig(ui):
3634 requirements.add(b'generaldelta')
3634 requirements.add(b'generaldelta')
3635 if ui.configbool(b'format', b'sparse-revlog'):
3635 if ui.configbool(b'format', b'sparse-revlog'):
3636 requirements.add(SPARSEREVLOG_REQUIREMENT)
3636 requirements.add(SPARSEREVLOG_REQUIREMENT)
3637
3637
3638 # experimental config: format.exp-use-side-data
3638 # experimental config: format.exp-use-side-data
3639 if ui.configbool(b'format', b'exp-use-side-data'):
3639 if ui.configbool(b'format', b'exp-use-side-data'):
3640 requirements.add(SIDEDATA_REQUIREMENT)
3640 requirements.add(SIDEDATA_REQUIREMENT)
3641 # experimental config: format.exp-use-copies-side-data-changeset
3641 # experimental config: format.exp-use-copies-side-data-changeset
3642 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3642 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3643 requirements.add(SIDEDATA_REQUIREMENT)
3643 requirements.add(SIDEDATA_REQUIREMENT)
3644 requirements.add(COPIESSDC_REQUIREMENT)
3644 requirements.add(COPIESSDC_REQUIREMENT)
3645 if ui.configbool(b'experimental', b'treemanifest'):
3645 if ui.configbool(b'experimental', b'treemanifest'):
3646 requirements.add(b'treemanifest')
3646 requirements.add(b'treemanifest')
3647
3647
3648 revlogv2 = ui.config(b'experimental', b'revlogv2')
3648 revlogv2 = ui.config(b'experimental', b'revlogv2')
3649 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3649 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 requirements.remove(b'revlogv1')
3650 requirements.remove(b'revlogv1')
3651 # generaldelta is implied by revlogv2.
3651 # generaldelta is implied by revlogv2.
3652 requirements.discard(b'generaldelta')
3652 requirements.discard(b'generaldelta')
3653 requirements.add(REVLOGV2_REQUIREMENT)
3653 requirements.add(REVLOGV2_REQUIREMENT)
3654 # experimental config: format.internal-phase
3654 # experimental config: format.internal-phase
3655 if ui.configbool(b'format', b'internal-phase'):
3655 if ui.configbool(b'format', b'internal-phase'):
3656 requirements.add(b'internal-phase')
3656 requirements.add(b'internal-phase')
3657
3657
3658 if createopts.get(b'narrowfiles'):
3658 if createopts.get(b'narrowfiles'):
3659 requirements.add(repository.NARROW_REQUIREMENT)
3659 requirements.add(repository.NARROW_REQUIREMENT)
3660
3660
3661 if createopts.get(b'lfs'):
3661 if createopts.get(b'lfs'):
3662 requirements.add(b'lfs')
3662 requirements.add(b'lfs')
3663
3663
3664 if ui.configbool(b'format', b'bookmarks-in-store'):
3664 if ui.configbool(b'format', b'bookmarks-in-store'):
3665 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3665 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3666
3666
3667 if ui.configbool(b'format', b'use-persistent-nodemap'):
3667 if ui.configbool(b'format', b'use-persistent-nodemap'):
3668 requirements.add(NODEMAP_REQUIREMENT)
3668 requirements.add(NODEMAP_REQUIREMENT)
3669
3669
3670 return requirements
3670 return requirements
3671
3671
3672
3672
3673 def filterknowncreateopts(ui, createopts):
3673 def filterknowncreateopts(ui, createopts):
3674 """Filters a dict of repo creation options against options that are known.
3674 """Filters a dict of repo creation options against options that are known.
3675
3675
3676 Receives a dict of repo creation options and returns a dict of those
3676 Receives a dict of repo creation options and returns a dict of those
3677 options that we don't know how to handle.
3677 options that we don't know how to handle.
3678
3678
3679 This function is called as part of repository creation. If the
3679 This function is called as part of repository creation. If the
3680 returned dict contains any items, repository creation will not
3680 returned dict contains any items, repository creation will not
3681 be allowed, as it means there was a request to create a repository
3681 be allowed, as it means there was a request to create a repository
3682 with options not recognized by loaded code.
3682 with options not recognized by loaded code.
3683
3683
3684 Extensions can wrap this function to filter out creation options
3684 Extensions can wrap this function to filter out creation options
3685 they know how to handle.
3685 they know how to handle.
3686 """
3686 """
3687 known = {
3687 known = {
3688 b'backend',
3688 b'backend',
3689 b'lfs',
3689 b'lfs',
3690 b'narrowfiles',
3690 b'narrowfiles',
3691 b'sharedrepo',
3691 b'sharedrepo',
3692 b'sharedrelative',
3692 b'sharedrelative',
3693 b'shareditems',
3693 b'shareditems',
3694 b'shallowfilestore',
3694 b'shallowfilestore',
3695 }
3695 }
3696
3696
3697 return {k: v for k, v in createopts.items() if k not in known}
3697 return {k: v for k, v in createopts.items() if k not in known}
3698
3698
3699
3699
3700 def createrepository(ui, path, createopts=None):
3700 def createrepository(ui, path, createopts=None):
3701 """Create a new repository in a vfs.
3701 """Create a new repository in a vfs.
3702
3702
3703 ``path`` path to the new repo's working directory.
3703 ``path`` path to the new repo's working directory.
3704 ``createopts`` options for the new repository.
3704 ``createopts`` options for the new repository.
3705
3705
3706 The following keys for ``createopts`` are recognized:
3706 The following keys for ``createopts`` are recognized:
3707
3707
3708 backend
3708 backend
3709 The storage backend to use.
3709 The storage backend to use.
3710 lfs
3710 lfs
3711 Repository will be created with ``lfs`` requirement. The lfs extension
3711 Repository will be created with ``lfs`` requirement. The lfs extension
3712 will automatically be loaded when the repository is accessed.
3712 will automatically be loaded when the repository is accessed.
3713 narrowfiles
3713 narrowfiles
3714 Set up repository to support narrow file storage.
3714 Set up repository to support narrow file storage.
3715 sharedrepo
3715 sharedrepo
3716 Repository object from which storage should be shared.
3716 Repository object from which storage should be shared.
3717 sharedrelative
3717 sharedrelative
3718 Boolean indicating if the path to the shared repo should be
3718 Boolean indicating if the path to the shared repo should be
3719 stored as relative. By default, the pointer to the "parent" repo
3719 stored as relative. By default, the pointer to the "parent" repo
3720 is stored as an absolute path.
3720 is stored as an absolute path.
3721 shareditems
3721 shareditems
3722 Set of items to share to the new repository (in addition to storage).
3722 Set of items to share to the new repository (in addition to storage).
3723 shallowfilestore
3723 shallowfilestore
3724 Indicates that storage for files should be shallow (not all ancestor
3724 Indicates that storage for files should be shallow (not all ancestor
3725 revisions are known).
3725 revisions are known).
3726 """
3726 """
3727 createopts = defaultcreateopts(ui, createopts=createopts)
3727 createopts = defaultcreateopts(ui, createopts=createopts)
3728
3728
3729 unknownopts = filterknowncreateopts(ui, createopts)
3729 unknownopts = filterknowncreateopts(ui, createopts)
3730
3730
3731 if not isinstance(unknownopts, dict):
3731 if not isinstance(unknownopts, dict):
3732 raise error.ProgrammingError(
3732 raise error.ProgrammingError(
3733 b'filterknowncreateopts() did not return a dict'
3733 b'filterknowncreateopts() did not return a dict'
3734 )
3734 )
3735
3735
3736 if unknownopts:
3736 if unknownopts:
3737 raise error.Abort(
3737 raise error.Abort(
3738 _(
3738 _(
3739 b'unable to create repository because of unknown '
3739 b'unable to create repository because of unknown '
3740 b'creation option: %s'
3740 b'creation option: %s'
3741 )
3741 )
3742 % b', '.join(sorted(unknownopts)),
3742 % b', '.join(sorted(unknownopts)),
3743 hint=_(b'is a required extension not loaded?'),
3743 hint=_(b'is a required extension not loaded?'),
3744 )
3744 )
3745
3745
3746 requirements = newreporequirements(ui, createopts=createopts)
3746 requirements = newreporequirements(ui, createopts=createopts)
3747
3747
3748 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3748 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3749
3749
3750 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3750 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3751 if hgvfs.exists():
3751 if hgvfs.exists():
3752 raise error.RepoError(_(b'repository %s already exists') % path)
3752 raise error.RepoError(_(b'repository %s already exists') % path)
3753
3753
3754 if b'sharedrepo' in createopts:
3754 if b'sharedrepo' in createopts:
3755 sharedpath = createopts[b'sharedrepo'].sharedpath
3755 sharedpath = createopts[b'sharedrepo'].sharedpath
3756
3756
3757 if createopts.get(b'sharedrelative'):
3757 if createopts.get(b'sharedrelative'):
3758 try:
3758 try:
3759 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3759 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3760 except (IOError, ValueError) as e:
3760 except (IOError, ValueError) as e:
3761 # ValueError is raised on Windows if the drive letters differ
3761 # ValueError is raised on Windows if the drive letters differ
3762 # on each path.
3762 # on each path.
3763 raise error.Abort(
3763 raise error.Abort(
3764 _(b'cannot calculate relative path'),
3764 _(b'cannot calculate relative path'),
3765 hint=stringutil.forcebytestr(e),
3765 hint=stringutil.forcebytestr(e),
3766 )
3766 )
3767
3767
3768 if not wdirvfs.exists():
3768 if not wdirvfs.exists():
3769 wdirvfs.makedirs()
3769 wdirvfs.makedirs()
3770
3770
3771 hgvfs.makedir(notindexed=True)
3771 hgvfs.makedir(notindexed=True)
3772 if b'sharedrepo' not in createopts:
3772 if b'sharedrepo' not in createopts:
3773 hgvfs.mkdir(b'cache')
3773 hgvfs.mkdir(b'cache')
3774 hgvfs.mkdir(b'wcache')
3774 hgvfs.mkdir(b'wcache')
3775
3775
3776 if b'store' in requirements and b'sharedrepo' not in createopts:
3776 if b'store' in requirements and b'sharedrepo' not in createopts:
3777 hgvfs.mkdir(b'store')
3777 hgvfs.mkdir(b'store')
3778
3778
3779 # We create an invalid changelog outside the store so very old
3779 # We create an invalid changelog outside the store so very old
3780 # Mercurial versions (which didn't know about the requirements
3780 # Mercurial versions (which didn't know about the requirements
3781 # file) encounter an error on reading the changelog. This
3781 # file) encounter an error on reading the changelog. This
3782 # effectively locks out old clients and prevents them from
3782 # effectively locks out old clients and prevents them from
3783 # mucking with a repo in an unknown format.
3783 # mucking with a repo in an unknown format.
3784 #
3784 #
3785 # The revlog header has version 2, which won't be recognized by
3785 # The revlog header has version 2, which won't be recognized by
3786 # such old clients.
3786 # such old clients.
3787 hgvfs.append(
3787 hgvfs.append(
3788 b'00changelog.i',
3788 b'00changelog.i',
3789 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3789 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3790 b'layout',
3790 b'layout',
3791 )
3791 )
3792
3792
3793 scmutil.writerequires(hgvfs, requirements)
3793 scmutil.writerequires(hgvfs, requirements)
3794
3794
3795 # Write out file telling readers where to find the shared store.
3795 # Write out file telling readers where to find the shared store.
3796 if b'sharedrepo' in createopts:
3796 if b'sharedrepo' in createopts:
3797 hgvfs.write(b'sharedpath', sharedpath)
3797 hgvfs.write(b'sharedpath', sharedpath)
3798
3798
3799 if createopts.get(b'shareditems'):
3799 if createopts.get(b'shareditems'):
3800 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3800 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3801 hgvfs.write(b'shared', shared)
3801 hgvfs.write(b'shared', shared)
3802
3802
3803
3803
3804 def poisonrepository(repo):
3804 def poisonrepository(repo):
3805 """Poison a repository instance so it can no longer be used."""
3805 """Poison a repository instance so it can no longer be used."""
3806 # Perform any cleanup on the instance.
3806 # Perform any cleanup on the instance.
3807 repo.close()
3807 repo.close()
3808
3808
3809 # Our strategy is to replace the type of the object with one that
3809 # Our strategy is to replace the type of the object with one that
3810 # has all attribute lookups result in error.
3810 # has all attribute lookups result in error.
3811 #
3811 #
3812 # But we have to allow the close() method because some constructors
3812 # But we have to allow the close() method because some constructors
3813 # of repos call close() on repo references.
3813 # of repos call close() on repo references.
3814 class poisonedrepository(object):
3814 class poisonedrepository(object):
3815 def __getattribute__(self, item):
3815 def __getattribute__(self, item):
3816 if item == 'close':
3816 if item == 'close':
3817 return object.__getattribute__(self, item)
3817 return object.__getattribute__(self, item)
3818
3818
3819 raise error.ProgrammingError(
3819 raise error.ProgrammingError(
3820 b'repo instances should not be used after unshare'
3820 b'repo instances should not be used after unshare'
3821 )
3821 )
3822
3822
3823 def close(self):
3823 def close(self):
3824 pass
3824 pass
3825
3825
3826 # We may have a repoview, which intercepts __setattr__. So be sure
3826 # We may have a repoview, which intercepts __setattr__. So be sure
3827 # we operate at the lowest level possible.
3827 # we operate at the lowest level possible.
3828 object.__setattr__(repo, '__class__', poisonedrepository)
3828 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,429 +1,429 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5 $ cat << EOF >> $HGRCPATH
5 $ cat << EOF >> $HGRCPATH
6 > [format]
6 > [format]
7 > use-persistent-nodemap=yes
7 > use-persistent-nodemap=yes
8 > [devel]
8 > [devel]
9 > persistent-nodemap=yes
9 > persistent-nodemap=yes
10 > EOF
10 > EOF
11 $ hg init test-repo
11 $ hg init test-repo
12 $ cd test-repo
12 $ cd test-repo
13 $ hg debugbuilddag .+5000 --new-file --config "experimental.exp-persistent-nodemap.mode=warn"
13 $ hg debugbuilddag .+5000 --new-file --config "storage.revlog.nodemap.mode=warn"
14 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
14 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
15 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
15 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
16 $ hg debugnodemap --metadata
16 $ hg debugnodemap --metadata
17 uid: ???????????????? (glob)
17 uid: ???????????????? (glob)
18 tip-rev: 5000
18 tip-rev: 5000
19 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
19 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
20 data-length: 121088
20 data-length: 121088
21 data-unused: 0
21 data-unused: 0
22 data-unused: 0.000%
22 data-unused: 0.000%
23 $ f --size .hg/store/00changelog.n
23 $ f --size .hg/store/00changelog.n
24 .hg/store/00changelog.n: size=70
24 .hg/store/00changelog.n: size=70
25
25
26 Simple lookup works
26 Simple lookup works
27
27
28 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
28 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
29 $ hg log -r "$ANYNODE" --template '{rev}\n'
29 $ hg log -r "$ANYNODE" --template '{rev}\n'
30 5000
30 5000
31
31
32
32
33 #if rust
33 #if rust
34
34
35 $ f --sha256 .hg/store/00changelog-*.nd
35 $ f --sha256 .hg/store/00changelog-*.nd
36 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
36 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
37
37
38 $ f --sha256 .hg/store/00manifest-*.nd
38 $ f --sha256 .hg/store/00manifest-*.nd
39 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
39 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
40 $ hg debugnodemap --dump-new | f --sha256 --size
40 $ hg debugnodemap --dump-new | f --sha256 --size
41 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
41 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
42 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
42 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
43 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
43 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
44 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
44 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
45 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
45 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
46 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
46 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
47 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
47 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
48 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
48 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
49 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
49 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
50 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
50 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
51 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
51 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
52 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
52 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
53 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
53 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
54 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
54 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
55 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
55 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
56 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
56 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
57 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
57 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
58 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
58 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
59 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
59 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
60
60
61
61
62 #else
62 #else
63
63
64 $ f --sha256 .hg/store/00changelog-*.nd
64 $ f --sha256 .hg/store/00changelog-*.nd
65 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
65 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
66 $ hg debugnodemap --dump-new | f --sha256 --size
66 $ hg debugnodemap --dump-new | f --sha256 --size
67 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
67 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
68 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
68 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
69 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
69 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
70 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
70 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
71 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
71 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
72 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
72 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
73 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
73 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
74 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
74 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
75 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
75 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
76 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
76 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
77 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
77 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
78 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
78 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
79 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
79 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
80 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
80 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
81 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
81 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
82 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
82 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
83 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
83 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
84 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
84 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
85 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
85 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
86
86
87 #endif
87 #endif
88
88
89 $ hg debugnodemap --check
89 $ hg debugnodemap --check
90 revision in index: 5001
90 revision in index: 5001
91 revision in nodemap: 5001
91 revision in nodemap: 5001
92
92
93 add a new commit
93 add a new commit
94
94
95 $ hg up
95 $ hg up
96 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 $ echo foo > foo
97 $ echo foo > foo
98 $ hg add foo
98 $ hg add foo
99
99
100 #if no-pure no-rust
100 #if no-pure no-rust
101
101
102 $ hg ci -m 'foo' --config "experimental.exp-persistent-nodemap.mode=strict"
102 $ hg ci -m 'foo' --config "storage.revlog.nodemap.mode=strict"
103 transaction abort!
103 transaction abort!
104 rollback completed
104 rollback completed
105 abort: persistent nodemap in strict mode without efficient method
105 abort: persistent nodemap in strict mode without efficient method
106 [255]
106 [255]
107
107
108 #endif
108 #endif
109
109
110 $ hg ci -m 'foo'
110 $ hg ci -m 'foo'
111
111
112 #if no-pure no-rust
112 #if no-pure no-rust
113 $ hg debugnodemap --metadata
113 $ hg debugnodemap --metadata
114 uid: ???????????????? (glob)
114 uid: ???????????????? (glob)
115 tip-rev: 5001
115 tip-rev: 5001
116 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
116 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
117 data-length: 121088
117 data-length: 121088
118 data-unused: 0
118 data-unused: 0
119 data-unused: 0.000%
119 data-unused: 0.000%
120 #else
120 #else
121 $ hg debugnodemap --metadata
121 $ hg debugnodemap --metadata
122 uid: ???????????????? (glob)
122 uid: ???????????????? (glob)
123 tip-rev: 5001
123 tip-rev: 5001
124 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
124 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
125 data-length: 121344
125 data-length: 121344
126 data-unused: 256
126 data-unused: 256
127 data-unused: 0.211%
127 data-unused: 0.211%
128 #endif
128 #endif
129
129
130 $ f --size .hg/store/00changelog.n
130 $ f --size .hg/store/00changelog.n
131 .hg/store/00changelog.n: size=70
131 .hg/store/00changelog.n: size=70
132
132
133 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
133 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
134
134
135 #if pure
135 #if pure
136 $ f --sha256 .hg/store/00changelog-*.nd --size
136 $ f --sha256 .hg/store/00changelog-*.nd --size
137 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
137 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
138 #endif
138 #endif
139
139
140 #if rust
140 #if rust
141 $ f --sha256 .hg/store/00changelog-*.nd --size
141 $ f --sha256 .hg/store/00changelog-*.nd --size
142 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
142 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
143 #endif
143 #endif
144
144
145 #if no-pure no-rust
145 #if no-pure no-rust
146 $ f --sha256 .hg/store/00changelog-*.nd --size
146 $ f --sha256 .hg/store/00changelog-*.nd --size
147 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
147 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
148 #endif
148 #endif
149
149
150 $ hg debugnodemap --check
150 $ hg debugnodemap --check
151 revision in index: 5002
151 revision in index: 5002
152 revision in nodemap: 5002
152 revision in nodemap: 5002
153
153
154 Test code path without mmap
154 Test code path without mmap
155 ---------------------------
155 ---------------------------
156
156
157 $ echo bar > bar
157 $ echo bar > bar
158 $ hg add bar
158 $ hg add bar
159 $ hg ci -m 'bar' --config storage.revlog.nodemap.mmap=no
159 $ hg ci -m 'bar' --config storage.revlog.nodemap.mmap=no
160
160
161 $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=yes
161 $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=yes
162 revision in index: 5003
162 revision in index: 5003
163 revision in nodemap: 5003
163 revision in nodemap: 5003
164 $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=no
164 $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=no
165 revision in index: 5003
165 revision in index: 5003
166 revision in nodemap: 5003
166 revision in nodemap: 5003
167
167
168
168
169 #if pure
169 #if pure
170 $ hg debugnodemap --metadata
170 $ hg debugnodemap --metadata
171 uid: ???????????????? (glob)
171 uid: ???????????????? (glob)
172 tip-rev: 5002
172 tip-rev: 5002
173 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
173 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
174 data-length: 121600
174 data-length: 121600
175 data-unused: 512
175 data-unused: 512
176 data-unused: 0.421%
176 data-unused: 0.421%
177 $ f --sha256 .hg/store/00changelog-*.nd --size
177 $ f --sha256 .hg/store/00changelog-*.nd --size
178 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
178 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
179 #endif
179 #endif
180 #if rust
180 #if rust
181 $ hg debugnodemap --metadata
181 $ hg debugnodemap --metadata
182 uid: ???????????????? (glob)
182 uid: ???????????????? (glob)
183 tip-rev: 5002
183 tip-rev: 5002
184 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
184 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
185 data-length: 121600
185 data-length: 121600
186 data-unused: 512
186 data-unused: 512
187 data-unused: 0.421%
187 data-unused: 0.421%
188 $ f --sha256 .hg/store/00changelog-*.nd --size
188 $ f --sha256 .hg/store/00changelog-*.nd --size
189 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
189 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
190 #endif
190 #endif
191 #if no-pure no-rust
191 #if no-pure no-rust
192 $ hg debugnodemap --metadata
192 $ hg debugnodemap --metadata
193 uid: ???????????????? (glob)
193 uid: ???????????????? (glob)
194 tip-rev: 5002
194 tip-rev: 5002
195 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
195 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
196 data-length: 121088
196 data-length: 121088
197 data-unused: 0
197 data-unused: 0
198 data-unused: 0.000%
198 data-unused: 0.000%
199 $ f --sha256 .hg/store/00changelog-*.nd --size
199 $ f --sha256 .hg/store/00changelog-*.nd --size
200 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
200 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
201 #endif
201 #endif
202
202
203 Test force warming the cache
203 Test force warming the cache
204
204
205 $ rm .hg/store/00changelog.n
205 $ rm .hg/store/00changelog.n
206 $ hg debugnodemap --metadata
206 $ hg debugnodemap --metadata
207 $ hg debugupdatecache
207 $ hg debugupdatecache
208 #if pure
208 #if pure
209 $ hg debugnodemap --metadata
209 $ hg debugnodemap --metadata
210 uid: ???????????????? (glob)
210 uid: ???????????????? (glob)
211 tip-rev: 5002
211 tip-rev: 5002
212 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
212 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
213 data-length: 121088
213 data-length: 121088
214 data-unused: 0
214 data-unused: 0
215 data-unused: 0.000%
215 data-unused: 0.000%
216 #else
216 #else
217 $ hg debugnodemap --metadata
217 $ hg debugnodemap --metadata
218 uid: ???????????????? (glob)
218 uid: ???????????????? (glob)
219 tip-rev: 5002
219 tip-rev: 5002
220 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
220 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
221 data-length: 121088
221 data-length: 121088
222 data-unused: 0
222 data-unused: 0
223 data-unused: 0.000%
223 data-unused: 0.000%
224 #endif
224 #endif
225
225
226 Check out of sync nodemap
226 Check out of sync nodemap
227 =========================
227 =========================
228
228
229 First copy old data on the side.
229 First copy old data on the side.
230
230
231 $ mkdir ../tmp-copies
231 $ mkdir ../tmp-copies
232 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
232 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
233
233
234 Nodemap lagging behind
234 Nodemap lagging behind
235 ----------------------
235 ----------------------
236
236
237 make a new commit
237 make a new commit
238
238
239 $ echo bar2 > bar
239 $ echo bar2 > bar
240 $ hg ci -m 'bar2'
240 $ hg ci -m 'bar2'
241 $ NODE=`hg log -r tip -T '{node}\n'`
241 $ NODE=`hg log -r tip -T '{node}\n'`
242 $ hg log -r "$NODE" -T '{rev}\n'
242 $ hg log -r "$NODE" -T '{rev}\n'
243 5003
243 5003
244
244
245 If the nodemap is lagging behind, it can catch up fine
245 If the nodemap is lagging behind, it can catch up fine
246
246
247 $ hg debugnodemap --metadata
247 $ hg debugnodemap --metadata
248 uid: ???????????????? (glob)
248 uid: ???????????????? (glob)
249 tip-rev: 5003
249 tip-rev: 5003
250 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
250 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
251 data-length: 121344 (pure !)
251 data-length: 121344 (pure !)
252 data-length: 121344 (rust !)
252 data-length: 121344 (rust !)
253 data-length: 121152 (no-rust no-pure !)
253 data-length: 121152 (no-rust no-pure !)
254 data-unused: 192 (pure !)
254 data-unused: 192 (pure !)
255 data-unused: 192 (rust !)
255 data-unused: 192 (rust !)
256 data-unused: 0 (no-rust no-pure !)
256 data-unused: 0 (no-rust no-pure !)
257 data-unused: 0.158% (pure !)
257 data-unused: 0.158% (pure !)
258 data-unused: 0.158% (rust !)
258 data-unused: 0.158% (rust !)
259 data-unused: 0.000% (no-rust no-pure !)
259 data-unused: 0.000% (no-rust no-pure !)
260 $ cp -f ../tmp-copies/* .hg/store/
260 $ cp -f ../tmp-copies/* .hg/store/
261 $ hg debugnodemap --metadata
261 $ hg debugnodemap --metadata
262 uid: ???????????????? (glob)
262 uid: ???????????????? (glob)
263 tip-rev: 5002
263 tip-rev: 5002
264 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
264 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
265 data-length: 121088
265 data-length: 121088
266 data-unused: 0
266 data-unused: 0
267 data-unused: 0.000%
267 data-unused: 0.000%
268 $ hg log -r "$NODE" -T '{rev}\n'
268 $ hg log -r "$NODE" -T '{rev}\n'
269 5003
269 5003
270
270
271 changelog altered
271 changelog altered
272 -----------------
272 -----------------
273
273
274 If the nodemap is not gated behind a requirements, an unaware client can alter
274 If the nodemap is not gated behind a requirements, an unaware client can alter
275 the repository so the revlog used to generate the nodemap is not longer
275 the repository so the revlog used to generate the nodemap is not longer
276 compatible with the persistent nodemap. We need to detect that.
276 compatible with the persistent nodemap. We need to detect that.
277
277
278 $ hg up "$NODE~5"
278 $ hg up "$NODE~5"
279 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
279 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
280 $ echo bar > babar
280 $ echo bar > babar
281 $ hg add babar
281 $ hg add babar
282 $ hg ci -m 'babar'
282 $ hg ci -m 'babar'
283 created new head
283 created new head
284 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
284 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
285 $ hg log -r "$OTHERNODE" -T '{rev}\n'
285 $ hg log -r "$OTHERNODE" -T '{rev}\n'
286 5004
286 5004
287
287
288 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
288 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
289
289
290 the nodemap should detect the changelog have been tampered with and recover.
290 the nodemap should detect the changelog have been tampered with and recover.
291
291
292 $ hg debugnodemap --metadata
292 $ hg debugnodemap --metadata
293 uid: ???????????????? (glob)
293 uid: ???????????????? (glob)
294 tip-rev: 5002
294 tip-rev: 5002
295 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
295 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
296 data-length: 121536 (pure !)
296 data-length: 121536 (pure !)
297 data-length: 121088 (rust !)
297 data-length: 121088 (rust !)
298 data-length: 121088 (no-pure no-rust !)
298 data-length: 121088 (no-pure no-rust !)
299 data-unused: 448 (pure !)
299 data-unused: 448 (pure !)
300 data-unused: 0 (rust !)
300 data-unused: 0 (rust !)
301 data-unused: 0 (no-pure no-rust !)
301 data-unused: 0 (no-pure no-rust !)
302 data-unused: 0.000% (rust !)
302 data-unused: 0.000% (rust !)
303 data-unused: 0.369% (pure !)
303 data-unused: 0.369% (pure !)
304 data-unused: 0.000% (no-pure no-rust !)
304 data-unused: 0.000% (no-pure no-rust !)
305
305
306 $ cp -f ../tmp-copies/* .hg/store/
306 $ cp -f ../tmp-copies/* .hg/store/
307 $ hg debugnodemap --metadata
307 $ hg debugnodemap --metadata
308 uid: ???????????????? (glob)
308 uid: ???????????????? (glob)
309 tip-rev: 5002
309 tip-rev: 5002
310 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
310 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
311 data-length: 121088
311 data-length: 121088
312 data-unused: 0
312 data-unused: 0
313 data-unused: 0.000%
313 data-unused: 0.000%
314 $ hg log -r "$OTHERNODE" -T '{rev}\n'
314 $ hg log -r "$OTHERNODE" -T '{rev}\n'
315 5002
315 5002
316
316
317 Check transaction related property
317 Check transaction related property
318 ==================================
318 ==================================
319
319
320 An up to date nodemap should be available to shell hooks,
320 An up to date nodemap should be available to shell hooks,
321
321
322 $ echo dsljfl > a
322 $ echo dsljfl > a
323 $ hg add a
323 $ hg add a
324 $ hg ci -m a
324 $ hg ci -m a
325 $ hg debugnodemap --metadata
325 $ hg debugnodemap --metadata
326 uid: ???????????????? (glob)
326 uid: ???????????????? (glob)
327 tip-rev: 5003
327 tip-rev: 5003
328 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
328 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
329 data-length: 121088
329 data-length: 121088
330 data-unused: 0
330 data-unused: 0
331 data-unused: 0.000%
331 data-unused: 0.000%
332 $ echo babar2 > babar
332 $ echo babar2 > babar
333 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
333 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
334 uid: ???????????????? (glob)
334 uid: ???????????????? (glob)
335 tip-rev: 5004
335 tip-rev: 5004
336 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
336 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
337 data-length: 121280 (pure !)
337 data-length: 121280 (pure !)
338 data-length: 121280 (rust !)
338 data-length: 121280 (rust !)
339 data-length: 121088 (no-pure no-rust !)
339 data-length: 121088 (no-pure no-rust !)
340 data-unused: 192 (pure !)
340 data-unused: 192 (pure !)
341 data-unused: 192 (rust !)
341 data-unused: 192 (rust !)
342 data-unused: 0 (no-pure no-rust !)
342 data-unused: 0 (no-pure no-rust !)
343 data-unused: 0.158% (pure !)
343 data-unused: 0.158% (pure !)
344 data-unused: 0.158% (rust !)
344 data-unused: 0.158% (rust !)
345 data-unused: 0.000% (no-pure no-rust !)
345 data-unused: 0.000% (no-pure no-rust !)
346 $ hg debugnodemap --metadata
346 $ hg debugnodemap --metadata
347 uid: ???????????????? (glob)
347 uid: ???????????????? (glob)
348 tip-rev: 5004
348 tip-rev: 5004
349 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
349 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
350 data-length: 121280 (pure !)
350 data-length: 121280 (pure !)
351 data-length: 121280 (rust !)
351 data-length: 121280 (rust !)
352 data-length: 121088 (no-pure no-rust !)
352 data-length: 121088 (no-pure no-rust !)
353 data-unused: 192 (pure !)
353 data-unused: 192 (pure !)
354 data-unused: 192 (rust !)
354 data-unused: 192 (rust !)
355 data-unused: 0 (no-pure no-rust !)
355 data-unused: 0 (no-pure no-rust !)
356 data-unused: 0.158% (pure !)
356 data-unused: 0.158% (pure !)
357 data-unused: 0.158% (rust !)
357 data-unused: 0.158% (rust !)
358 data-unused: 0.000% (no-pure no-rust !)
358 data-unused: 0.000% (no-pure no-rust !)
359
359
360 Another process does not see the pending nodemap content during run.
360 Another process does not see the pending nodemap content during run.
361
361
362 $ PATH=$RUNTESTDIR/testlib/:$PATH
362 $ PATH=$RUNTESTDIR/testlib/:$PATH
363 $ echo qpoasp > a
363 $ echo qpoasp > a
364 $ hg ci -m a2 \
364 $ hg ci -m a2 \
365 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
365 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
366 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
366 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
367
367
368 (read the repository while the commit transaction is pending)
368 (read the repository while the commit transaction is pending)
369
369
370 $ wait-on-file 20 sync-txn-pending && \
370 $ wait-on-file 20 sync-txn-pending && \
371 > hg debugnodemap --metadata && \
371 > hg debugnodemap --metadata && \
372 > wait-on-file 20 sync-txn-close sync-repo-read
372 > wait-on-file 20 sync-txn-close sync-repo-read
373 uid: ???????????????? (glob)
373 uid: ???????????????? (glob)
374 tip-rev: 5004
374 tip-rev: 5004
375 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
375 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
376 data-length: 121280 (pure !)
376 data-length: 121280 (pure !)
377 data-length: 121280 (rust !)
377 data-length: 121280 (rust !)
378 data-length: 121088 (no-pure no-rust !)
378 data-length: 121088 (no-pure no-rust !)
379 data-unused: 192 (pure !)
379 data-unused: 192 (pure !)
380 data-unused: 192 (rust !)
380 data-unused: 192 (rust !)
381 data-unused: 0 (no-pure no-rust !)
381 data-unused: 0 (no-pure no-rust !)
382 data-unused: 0.158% (pure !)
382 data-unused: 0.158% (pure !)
383 data-unused: 0.158% (rust !)
383 data-unused: 0.158% (rust !)
384 data-unused: 0.000% (no-pure no-rust !)
384 data-unused: 0.000% (no-pure no-rust !)
385 $ hg debugnodemap --metadata
385 $ hg debugnodemap --metadata
386 uid: ???????????????? (glob)
386 uid: ???????????????? (glob)
387 tip-rev: 5005
387 tip-rev: 5005
388 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
388 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
389 data-length: 121536 (pure !)
389 data-length: 121536 (pure !)
390 data-length: 121536 (rust !)
390 data-length: 121536 (rust !)
391 data-length: 121088 (no-pure no-rust !)
391 data-length: 121088 (no-pure no-rust !)
392 data-unused: 448 (pure !)
392 data-unused: 448 (pure !)
393 data-unused: 448 (rust !)
393 data-unused: 448 (rust !)
394 data-unused: 0 (no-pure no-rust !)
394 data-unused: 0 (no-pure no-rust !)
395 data-unused: 0.369% (pure !)
395 data-unused: 0.369% (pure !)
396 data-unused: 0.369% (rust !)
396 data-unused: 0.369% (rust !)
397 data-unused: 0.000% (no-pure no-rust !)
397 data-unused: 0.000% (no-pure no-rust !)
398
398
399 $ cat output.txt
399 $ cat output.txt
400
400
401 Check that a failing transaction will properly revert the data
401 Check that a failing transaction will properly revert the data
402
402
403 $ echo plakfe > a
403 $ echo plakfe > a
404 $ f --size --sha256 .hg/store/00changelog-*.nd
404 $ f --size --sha256 .hg/store/00changelog-*.nd
405 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
405 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
406 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
406 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
407 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
407 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
408 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
408 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
409 transaction abort!
409 transaction abort!
410 rollback completed
410 rollback completed
411 abort: This is a late abort
411 abort: This is a late abort
412 [255]
412 [255]
413 $ hg debugnodemap --metadata
413 $ hg debugnodemap --metadata
414 uid: ???????????????? (glob)
414 uid: ???????????????? (glob)
415 tip-rev: 5005
415 tip-rev: 5005
416 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
416 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
417 data-length: 121536 (pure !)
417 data-length: 121536 (pure !)
418 data-length: 121536 (rust !)
418 data-length: 121536 (rust !)
419 data-length: 121088 (no-pure no-rust !)
419 data-length: 121088 (no-pure no-rust !)
420 data-unused: 448 (pure !)
420 data-unused: 448 (pure !)
421 data-unused: 448 (rust !)
421 data-unused: 448 (rust !)
422 data-unused: 0 (no-pure no-rust !)
422 data-unused: 0 (no-pure no-rust !)
423 data-unused: 0.369% (pure !)
423 data-unused: 0.369% (pure !)
424 data-unused: 0.369% (rust !)
424 data-unused: 0.369% (rust !)
425 data-unused: 0.000% (no-pure no-rust !)
425 data-unused: 0.000% (no-pure no-rust !)
426 $ f --size --sha256 .hg/store/00changelog-*.nd
426 $ f --size --sha256 .hg/store/00changelog-*.nd
427 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
427 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
428 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
428 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
429 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
429 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
General Comments 0
You need to be logged in to leave comments. Login now