##// END OF EJS Templates
sidedata: rename the configuration option to `exp-use-side-data`...
marmoute -
r43437:bca9d1a6 default
parent child Browse files
Show More
@@ -1,1543 +1,1543 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section, configprefix + b'nodates', default=False,
136 section, configprefix + b'nodates', default=False,
137 )
137 )
138 coreconfigitem(
138 coreconfigitem(
139 section, configprefix + b'showfunc', default=False,
139 section, configprefix + b'showfunc', default=False,
140 )
140 )
141 coreconfigitem(
141 coreconfigitem(
142 section, configprefix + b'unified', default=None,
142 section, configprefix + b'unified', default=None,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section, configprefix + b'git', default=False,
145 section, configprefix + b'git', default=False,
146 )
146 )
147 coreconfigitem(
147 coreconfigitem(
148 section, configprefix + b'ignorews', default=False,
148 section, configprefix + b'ignorews', default=False,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section, configprefix + b'ignorewsamount', default=False,
151 section, configprefix + b'ignorewsamount', default=False,
152 )
152 )
153 coreconfigitem(
153 coreconfigitem(
154 section, configprefix + b'ignoreblanklines', default=False,
154 section, configprefix + b'ignoreblanklines', default=False,
155 )
155 )
156 coreconfigitem(
156 coreconfigitem(
157 section, configprefix + b'ignorewseol', default=False,
157 section, configprefix + b'ignorewseol', default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section, configprefix + b'nobinary', default=False,
160 section, configprefix + b'nobinary', default=False,
161 )
161 )
162 coreconfigitem(
162 coreconfigitem(
163 section, configprefix + b'noprefix', default=False,
163 section, configprefix + b'noprefix', default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section, configprefix + b'word-diff', default=False,
166 section, configprefix + b'word-diff', default=False,
167 )
167 )
168
168
169
169
170 coreconfigitem(
170 coreconfigitem(
171 b'alias', b'.*', default=dynamicdefault, generic=True,
171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 )
172 )
173 coreconfigitem(
173 coreconfigitem(
174 b'auth', b'cookiefile', default=None,
174 b'auth', b'cookiefile', default=None,
175 )
175 )
176 _registerdiffopts(section=b'annotate')
176 _registerdiffopts(section=b'annotate')
177 # bookmarks.pushing: internal hack for discovery
177 # bookmarks.pushing: internal hack for discovery
178 coreconfigitem(
178 coreconfigitem(
179 b'bookmarks', b'pushing', default=list,
179 b'bookmarks', b'pushing', default=list,
180 )
180 )
181 # bundle.mainreporoot: internal hack for bundlerepo
181 # bundle.mainreporoot: internal hack for bundlerepo
182 coreconfigitem(
182 coreconfigitem(
183 b'bundle', b'mainreporoot', default=b'',
183 b'bundle', b'mainreporoot', default=b'',
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 b'censor', b'policy', default=b'abort', experimental=True,
186 b'censor', b'policy', default=b'abort', experimental=True,
187 )
187 )
188 coreconfigitem(
188 coreconfigitem(
189 b'chgserver', b'idletimeout', default=3600,
189 b'chgserver', b'idletimeout', default=3600,
190 )
190 )
191 coreconfigitem(
191 coreconfigitem(
192 b'chgserver', b'skiphash', default=False,
192 b'chgserver', b'skiphash', default=False,
193 )
193 )
194 coreconfigitem(
194 coreconfigitem(
195 b'cmdserver', b'log', default=None,
195 b'cmdserver', b'log', default=None,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'cmdserver', b'max-log-files', default=7,
198 b'cmdserver', b'max-log-files', default=7,
199 )
199 )
200 coreconfigitem(
200 coreconfigitem(
201 b'cmdserver', b'max-log-size', default=b'1 MB',
201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 )
202 )
203 coreconfigitem(
203 coreconfigitem(
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 )
205 )
206 coreconfigitem(
206 coreconfigitem(
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 )
208 )
209 coreconfigitem(
209 coreconfigitem(
210 b'cmdserver',
210 b'cmdserver',
211 b'track-log',
211 b'track-log',
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 )
213 )
214 coreconfigitem(
214 coreconfigitem(
215 b'color', b'.*', default=None, generic=True,
215 b'color', b'.*', default=None, generic=True,
216 )
216 )
217 coreconfigitem(
217 coreconfigitem(
218 b'color', b'mode', default=b'auto',
218 b'color', b'mode', default=b'auto',
219 )
219 )
220 coreconfigitem(
220 coreconfigitem(
221 b'color', b'pagermode', default=dynamicdefault,
221 b'color', b'pagermode', default=dynamicdefault,
222 )
222 )
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 coreconfigitem(
224 coreconfigitem(
225 b'commands', b'commit.post-status', default=False,
225 b'commands', b'commit.post-status', default=False,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'commands', b'grep.all-files', default=False, experimental=True,
228 b'commands', b'grep.all-files', default=False, experimental=True,
229 )
229 )
230 coreconfigitem(
230 coreconfigitem(
231 b'commands', b'push.require-revs', default=False,
231 b'commands', b'push.require-revs', default=False,
232 )
232 )
233 coreconfigitem(
233 coreconfigitem(
234 b'commands', b'resolve.confirm', default=False,
234 b'commands', b'resolve.confirm', default=False,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'commands', b'resolve.explicit-re-merge', default=False,
237 b'commands', b'resolve.explicit-re-merge', default=False,
238 )
238 )
239 coreconfigitem(
239 coreconfigitem(
240 b'commands', b'resolve.mark-check', default=b'none',
240 b'commands', b'resolve.mark-check', default=b'none',
241 )
241 )
242 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
242 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
243 coreconfigitem(
243 coreconfigitem(
244 b'commands', b'show.aliasprefix', default=list,
244 b'commands', b'show.aliasprefix', default=list,
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'commands', b'status.relative', default=False,
247 b'commands', b'status.relative', default=False,
248 )
248 )
249 coreconfigitem(
249 coreconfigitem(
250 b'commands', b'status.skipstates', default=[], experimental=True,
250 b'commands', b'status.skipstates', default=[], experimental=True,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'commands', b'status.terse', default=b'',
253 b'commands', b'status.terse', default=b'',
254 )
254 )
255 coreconfigitem(
255 coreconfigitem(
256 b'commands', b'status.verbose', default=False,
256 b'commands', b'status.verbose', default=False,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'commands', b'update.check', default=None,
259 b'commands', b'update.check', default=None,
260 )
260 )
261 coreconfigitem(
261 coreconfigitem(
262 b'commands', b'update.requiredest', default=False,
262 b'commands', b'update.requiredest', default=False,
263 )
263 )
264 coreconfigitem(
264 coreconfigitem(
265 b'committemplate', b'.*', default=None, generic=True,
265 b'committemplate', b'.*', default=None, generic=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'convert', b'bzr.saverev', default=True,
268 b'convert', b'bzr.saverev', default=True,
269 )
269 )
270 coreconfigitem(
270 coreconfigitem(
271 b'convert', b'cvsps.cache', default=True,
271 b'convert', b'cvsps.cache', default=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'convert', b'cvsps.fuzz', default=60,
274 b'convert', b'cvsps.fuzz', default=60,
275 )
275 )
276 coreconfigitem(
276 coreconfigitem(
277 b'convert', b'cvsps.logencoding', default=None,
277 b'convert', b'cvsps.logencoding', default=None,
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'convert', b'cvsps.mergefrom', default=None,
280 b'convert', b'cvsps.mergefrom', default=None,
281 )
281 )
282 coreconfigitem(
282 coreconfigitem(
283 b'convert', b'cvsps.mergeto', default=None,
283 b'convert', b'cvsps.mergeto', default=None,
284 )
284 )
285 coreconfigitem(
285 coreconfigitem(
286 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
286 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
287 )
287 )
288 coreconfigitem(
288 coreconfigitem(
289 b'convert', b'git.extrakeys', default=list,
289 b'convert', b'git.extrakeys', default=list,
290 )
290 )
291 coreconfigitem(
291 coreconfigitem(
292 b'convert', b'git.findcopiesharder', default=False,
292 b'convert', b'git.findcopiesharder', default=False,
293 )
293 )
294 coreconfigitem(
294 coreconfigitem(
295 b'convert', b'git.remoteprefix', default=b'remote',
295 b'convert', b'git.remoteprefix', default=b'remote',
296 )
296 )
297 coreconfigitem(
297 coreconfigitem(
298 b'convert', b'git.renamelimit', default=400,
298 b'convert', b'git.renamelimit', default=400,
299 )
299 )
300 coreconfigitem(
300 coreconfigitem(
301 b'convert', b'git.saverev', default=True,
301 b'convert', b'git.saverev', default=True,
302 )
302 )
303 coreconfigitem(
303 coreconfigitem(
304 b'convert', b'git.similarity', default=50,
304 b'convert', b'git.similarity', default=50,
305 )
305 )
306 coreconfigitem(
306 coreconfigitem(
307 b'convert', b'git.skipsubmodules', default=False,
307 b'convert', b'git.skipsubmodules', default=False,
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'convert', b'hg.clonebranches', default=False,
310 b'convert', b'hg.clonebranches', default=False,
311 )
311 )
312 coreconfigitem(
312 coreconfigitem(
313 b'convert', b'hg.ignoreerrors', default=False,
313 b'convert', b'hg.ignoreerrors', default=False,
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'convert', b'hg.preserve-hash', default=False,
316 b'convert', b'hg.preserve-hash', default=False,
317 )
317 )
318 coreconfigitem(
318 coreconfigitem(
319 b'convert', b'hg.revs', default=None,
319 b'convert', b'hg.revs', default=None,
320 )
320 )
321 coreconfigitem(
321 coreconfigitem(
322 b'convert', b'hg.saverev', default=False,
322 b'convert', b'hg.saverev', default=False,
323 )
323 )
324 coreconfigitem(
324 coreconfigitem(
325 b'convert', b'hg.sourcename', default=None,
325 b'convert', b'hg.sourcename', default=None,
326 )
326 )
327 coreconfigitem(
327 coreconfigitem(
328 b'convert', b'hg.startrev', default=None,
328 b'convert', b'hg.startrev', default=None,
329 )
329 )
330 coreconfigitem(
330 coreconfigitem(
331 b'convert', b'hg.tagsbranch', default=b'default',
331 b'convert', b'hg.tagsbranch', default=b'default',
332 )
332 )
333 coreconfigitem(
333 coreconfigitem(
334 b'convert', b'hg.usebranchnames', default=True,
334 b'convert', b'hg.usebranchnames', default=True,
335 )
335 )
336 coreconfigitem(
336 coreconfigitem(
337 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
337 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
338 )
338 )
339 coreconfigitem(
339 coreconfigitem(
340 b'convert', b'localtimezone', default=False,
340 b'convert', b'localtimezone', default=False,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'convert', b'p4.encoding', default=dynamicdefault,
343 b'convert', b'p4.encoding', default=dynamicdefault,
344 )
344 )
345 coreconfigitem(
345 coreconfigitem(
346 b'convert', b'p4.startrev', default=0,
346 b'convert', b'p4.startrev', default=0,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'convert', b'skiptags', default=False,
349 b'convert', b'skiptags', default=False,
350 )
350 )
351 coreconfigitem(
351 coreconfigitem(
352 b'convert', b'svn.debugsvnlog', default=True,
352 b'convert', b'svn.debugsvnlog', default=True,
353 )
353 )
354 coreconfigitem(
354 coreconfigitem(
355 b'convert', b'svn.trunk', default=None,
355 b'convert', b'svn.trunk', default=None,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'convert', b'svn.tags', default=None,
358 b'convert', b'svn.tags', default=None,
359 )
359 )
360 coreconfigitem(
360 coreconfigitem(
361 b'convert', b'svn.branches', default=None,
361 b'convert', b'svn.branches', default=None,
362 )
362 )
363 coreconfigitem(
363 coreconfigitem(
364 b'convert', b'svn.startrev', default=0,
364 b'convert', b'svn.startrev', default=0,
365 )
365 )
366 coreconfigitem(
366 coreconfigitem(
367 b'debug', b'dirstate.delaywrite', default=0,
367 b'debug', b'dirstate.delaywrite', default=0,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'defaults', b'.*', default=None, generic=True,
370 b'defaults', b'.*', default=None, generic=True,
371 )
371 )
372 coreconfigitem(
372 coreconfigitem(
373 b'devel', b'all-warnings', default=False,
373 b'devel', b'all-warnings', default=False,
374 )
374 )
375 coreconfigitem(
375 coreconfigitem(
376 b'devel', b'bundle2.debug', default=False,
376 b'devel', b'bundle2.debug', default=False,
377 )
377 )
378 coreconfigitem(
378 coreconfigitem(
379 b'devel', b'bundle.delta', default=b'',
379 b'devel', b'bundle.delta', default=b'',
380 )
380 )
381 coreconfigitem(
381 coreconfigitem(
382 b'devel', b'cache-vfs', default=None,
382 b'devel', b'cache-vfs', default=None,
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'devel', b'check-locks', default=False,
385 b'devel', b'check-locks', default=False,
386 )
386 )
387 coreconfigitem(
387 coreconfigitem(
388 b'devel', b'check-relroot', default=False,
388 b'devel', b'check-relroot', default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'devel', b'default-date', default=None,
391 b'devel', b'default-date', default=None,
392 )
392 )
393 coreconfigitem(
393 coreconfigitem(
394 b'devel', b'deprec-warn', default=False,
394 b'devel', b'deprec-warn', default=False,
395 )
395 )
396 coreconfigitem(
396 coreconfigitem(
397 b'devel', b'disableloaddefaultcerts', default=False,
397 b'devel', b'disableloaddefaultcerts', default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'devel', b'warn-empty-changegroup', default=False,
400 b'devel', b'warn-empty-changegroup', default=False,
401 )
401 )
402 coreconfigitem(
402 coreconfigitem(
403 b'devel', b'legacy.exchange', default=list,
403 b'devel', b'legacy.exchange', default=list,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'devel', b'servercafile', default=b'',
406 b'devel', b'servercafile', default=b'',
407 )
407 )
408 coreconfigitem(
408 coreconfigitem(
409 b'devel', b'serverexactprotocol', default=b'',
409 b'devel', b'serverexactprotocol', default=b'',
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'devel', b'serverrequirecert', default=False,
412 b'devel', b'serverrequirecert', default=False,
413 )
413 )
414 coreconfigitem(
414 coreconfigitem(
415 b'devel', b'strip-obsmarkers', default=True,
415 b'devel', b'strip-obsmarkers', default=True,
416 )
416 )
417 coreconfigitem(
417 coreconfigitem(
418 b'devel', b'warn-config', default=None,
418 b'devel', b'warn-config', default=None,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'devel', b'warn-config-default', default=None,
421 b'devel', b'warn-config-default', default=None,
422 )
422 )
423 coreconfigitem(
423 coreconfigitem(
424 b'devel', b'user.obsmarker', default=None,
424 b'devel', b'user.obsmarker', default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'devel', b'warn-config-unknown', default=None,
427 b'devel', b'warn-config-unknown', default=None,
428 )
428 )
429 coreconfigitem(
429 coreconfigitem(
430 b'devel', b'debug.copies', default=False,
430 b'devel', b'debug.copies', default=False,
431 )
431 )
432 coreconfigitem(
432 coreconfigitem(
433 b'devel', b'debug.extensions', default=False,
433 b'devel', b'debug.extensions', default=False,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'devel', b'debug.peer-request', default=False,
436 b'devel', b'debug.peer-request', default=False,
437 )
437 )
438 coreconfigitem(
438 coreconfigitem(
439 b'devel', b'discovery.randomize', default=True,
439 b'devel', b'discovery.randomize', default=True,
440 )
440 )
441 _registerdiffopts(section=b'diff')
441 _registerdiffopts(section=b'diff')
442 coreconfigitem(
442 coreconfigitem(
443 b'email', b'bcc', default=None,
443 b'email', b'bcc', default=None,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'email', b'cc', default=None,
446 b'email', b'cc', default=None,
447 )
447 )
448 coreconfigitem(
448 coreconfigitem(
449 b'email', b'charsets', default=list,
449 b'email', b'charsets', default=list,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'email', b'from', default=None,
452 b'email', b'from', default=None,
453 )
453 )
454 coreconfigitem(
454 coreconfigitem(
455 b'email', b'method', default=b'smtp',
455 b'email', b'method', default=b'smtp',
456 )
456 )
457 coreconfigitem(
457 coreconfigitem(
458 b'email', b'reply-to', default=None,
458 b'email', b'reply-to', default=None,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'email', b'to', default=None,
461 b'email', b'to', default=None,
462 )
462 )
463 coreconfigitem(
463 coreconfigitem(
464 b'experimental', b'archivemetatemplate', default=dynamicdefault,
464 b'experimental', b'archivemetatemplate', default=dynamicdefault,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'experimental', b'auto-publish', default=b'publish',
467 b'experimental', b'auto-publish', default=b'publish',
468 )
468 )
469 coreconfigitem(
469 coreconfigitem(
470 b'experimental', b'bundle-phases', default=False,
470 b'experimental', b'bundle-phases', default=False,
471 )
471 )
472 coreconfigitem(
472 coreconfigitem(
473 b'experimental', b'bundle2-advertise', default=True,
473 b'experimental', b'bundle2-advertise', default=True,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'experimental', b'bundle2-output-capture', default=False,
476 b'experimental', b'bundle2-output-capture', default=False,
477 )
477 )
478 coreconfigitem(
478 coreconfigitem(
479 b'experimental', b'bundle2.pushback', default=False,
479 b'experimental', b'bundle2.pushback', default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'experimental', b'bundle2lazylocking', default=False,
482 b'experimental', b'bundle2lazylocking', default=False,
483 )
483 )
484 coreconfigitem(
484 coreconfigitem(
485 b'experimental', b'bundlecomplevel', default=None,
485 b'experimental', b'bundlecomplevel', default=None,
486 )
486 )
487 coreconfigitem(
487 coreconfigitem(
488 b'experimental', b'bundlecomplevel.bzip2', default=None,
488 b'experimental', b'bundlecomplevel.bzip2', default=None,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'experimental', b'bundlecomplevel.gzip', default=None,
491 b'experimental', b'bundlecomplevel.gzip', default=None,
492 )
492 )
493 coreconfigitem(
493 coreconfigitem(
494 b'experimental', b'bundlecomplevel.none', default=None,
494 b'experimental', b'bundlecomplevel.none', default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'experimental', b'bundlecomplevel.zstd', default=None,
497 b'experimental', b'bundlecomplevel.zstd', default=None,
498 )
498 )
499 coreconfigitem(
499 coreconfigitem(
500 b'experimental', b'changegroup3', default=False,
500 b'experimental', b'changegroup3', default=False,
501 )
501 )
502 coreconfigitem(
502 coreconfigitem(
503 b'experimental', b'cleanup-as-archived', default=False,
503 b'experimental', b'cleanup-as-archived', default=False,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'experimental', b'clientcompressionengines', default=list,
506 b'experimental', b'clientcompressionengines', default=list,
507 )
507 )
508 coreconfigitem(
508 coreconfigitem(
509 b'experimental', b'copytrace', default=b'on',
509 b'experimental', b'copytrace', default=b'on',
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'experimental', b'copytrace.movecandidateslimit', default=100,
512 b'experimental', b'copytrace.movecandidateslimit', default=100,
513 )
513 )
514 coreconfigitem(
514 coreconfigitem(
515 b'experimental', b'copytrace.sourcecommitlimit', default=100,
515 b'experimental', b'copytrace.sourcecommitlimit', default=100,
516 )
516 )
517 coreconfigitem(
517 coreconfigitem(
518 b'experimental', b'copies.read-from', default=b"filelog-only",
518 b'experimental', b'copies.read-from', default=b"filelog-only",
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'experimental', b'copies.write-to', default=b'filelog-only',
521 b'experimental', b'copies.write-to', default=b'filelog-only',
522 )
522 )
523 coreconfigitem(
523 coreconfigitem(
524 b'experimental', b'crecordtest', default=None,
524 b'experimental', b'crecordtest', default=None,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'experimental', b'directaccess', default=False,
527 b'experimental', b'directaccess', default=False,
528 )
528 )
529 coreconfigitem(
529 coreconfigitem(
530 b'experimental', b'directaccess.revnums', default=False,
530 b'experimental', b'directaccess.revnums', default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'experimental', b'editortmpinhg', default=False,
533 b'experimental', b'editortmpinhg', default=False,
534 )
534 )
535 coreconfigitem(
535 coreconfigitem(
536 b'experimental', b'evolution', default=list,
536 b'experimental', b'evolution', default=list,
537 )
537 )
538 coreconfigitem(
538 coreconfigitem(
539 b'experimental',
539 b'experimental',
540 b'evolution.allowdivergence',
540 b'evolution.allowdivergence',
541 default=False,
541 default=False,
542 alias=[(b'experimental', b'allowdivergence')],
542 alias=[(b'experimental', b'allowdivergence')],
543 )
543 )
544 coreconfigitem(
544 coreconfigitem(
545 b'experimental', b'evolution.allowunstable', default=None,
545 b'experimental', b'evolution.allowunstable', default=None,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'experimental', b'evolution.createmarkers', default=None,
548 b'experimental', b'evolution.createmarkers', default=None,
549 )
549 )
550 coreconfigitem(
550 coreconfigitem(
551 b'experimental',
551 b'experimental',
552 b'evolution.effect-flags',
552 b'evolution.effect-flags',
553 default=True,
553 default=True,
554 alias=[(b'experimental', b'effect-flags')],
554 alias=[(b'experimental', b'effect-flags')],
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'experimental', b'evolution.exchange', default=None,
557 b'experimental', b'evolution.exchange', default=None,
558 )
558 )
559 coreconfigitem(
559 coreconfigitem(
560 b'experimental', b'evolution.bundle-obsmarker', default=False,
560 b'experimental', b'evolution.bundle-obsmarker', default=False,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'experimental', b'log.topo', default=False,
563 b'experimental', b'log.topo', default=False,
564 )
564 )
565 coreconfigitem(
565 coreconfigitem(
566 b'experimental', b'evolution.report-instabilities', default=True,
566 b'experimental', b'evolution.report-instabilities', default=True,
567 )
567 )
568 coreconfigitem(
568 coreconfigitem(
569 b'experimental', b'evolution.track-operation', default=True,
569 b'experimental', b'evolution.track-operation', default=True,
570 )
570 )
571 # repo-level config to exclude a revset visibility
571 # repo-level config to exclude a revset visibility
572 #
572 #
573 # The target use case is to use `share` to expose different subset of the same
573 # The target use case is to use `share` to expose different subset of the same
574 # repository, especially server side. See also `server.view`.
574 # repository, especially server side. See also `server.view`.
575 coreconfigitem(
575 coreconfigitem(
576 b'experimental', b'extra-filter-revs', default=None,
576 b'experimental', b'extra-filter-revs', default=None,
577 )
577 )
578 coreconfigitem(
578 coreconfigitem(
579 b'experimental', b'maxdeltachainspan', default=-1,
579 b'experimental', b'maxdeltachainspan', default=-1,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'experimental', b'mergetempdirprefix', default=None,
582 b'experimental', b'mergetempdirprefix', default=None,
583 )
583 )
584 coreconfigitem(
584 coreconfigitem(
585 b'experimental', b'mmapindexthreshold', default=None,
585 b'experimental', b'mmapindexthreshold', default=None,
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'experimental', b'narrow', default=False,
588 b'experimental', b'narrow', default=False,
589 )
589 )
590 coreconfigitem(
590 coreconfigitem(
591 b'experimental', b'nonnormalparanoidcheck', default=False,
591 b'experimental', b'nonnormalparanoidcheck', default=False,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'experimental', b'exportableenviron', default=list,
594 b'experimental', b'exportableenviron', default=list,
595 )
595 )
596 coreconfigitem(
596 coreconfigitem(
597 b'experimental', b'extendedheader.index', default=None,
597 b'experimental', b'extendedheader.index', default=None,
598 )
598 )
599 coreconfigitem(
599 coreconfigitem(
600 b'experimental', b'extendedheader.similarity', default=False,
600 b'experimental', b'extendedheader.similarity', default=False,
601 )
601 )
602 coreconfigitem(
602 coreconfigitem(
603 b'experimental', b'graphshorten', default=False,
603 b'experimental', b'graphshorten', default=False,
604 )
604 )
605 coreconfigitem(
605 coreconfigitem(
606 b'experimental', b'graphstyle.parent', default=dynamicdefault,
606 b'experimental', b'graphstyle.parent', default=dynamicdefault,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'experimental', b'graphstyle.missing', default=dynamicdefault,
609 b'experimental', b'graphstyle.missing', default=dynamicdefault,
610 )
610 )
611 coreconfigitem(
611 coreconfigitem(
612 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
612 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'experimental', b'hook-track-tags', default=False,
615 b'experimental', b'hook-track-tags', default=False,
616 )
616 )
617 coreconfigitem(
617 coreconfigitem(
618 b'experimental', b'httppeer.advertise-v2', default=False,
618 b'experimental', b'httppeer.advertise-v2', default=False,
619 )
619 )
620 coreconfigitem(
620 coreconfigitem(
621 b'experimental', b'httppeer.v2-encoder-order', default=None,
621 b'experimental', b'httppeer.v2-encoder-order', default=None,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'experimental', b'httppostargs', default=False,
624 b'experimental', b'httppostargs', default=False,
625 )
625 )
626 coreconfigitem(
626 coreconfigitem(
627 b'experimental', b'mergedriver', default=None,
627 b'experimental', b'mergedriver', default=None,
628 )
628 )
629 coreconfigitem(b'experimental', b'nointerrupt', default=False)
629 coreconfigitem(b'experimental', b'nointerrupt', default=False)
630 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
630 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
631
631
632 coreconfigitem(
632 coreconfigitem(
633 b'experimental', b'obsmarkers-exchange-debug', default=False,
633 b'experimental', b'obsmarkers-exchange-debug', default=False,
634 )
634 )
635 coreconfigitem(
635 coreconfigitem(
636 b'experimental', b'remotenames', default=False,
636 b'experimental', b'remotenames', default=False,
637 )
637 )
638 coreconfigitem(
638 coreconfigitem(
639 b'experimental', b'removeemptydirs', default=True,
639 b'experimental', b'removeemptydirs', default=True,
640 )
640 )
641 coreconfigitem(
641 coreconfigitem(
642 b'experimental', b'revert.interactive.select-to-keep', default=False,
642 b'experimental', b'revert.interactive.select-to-keep', default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'experimental', b'revisions.prefixhexnode', default=False,
645 b'experimental', b'revisions.prefixhexnode', default=False,
646 )
646 )
647 coreconfigitem(
647 coreconfigitem(
648 b'experimental', b'revlogv2', default=None,
648 b'experimental', b'revlogv2', default=None,
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'experimental', b'revisions.disambiguatewithin', default=None,
651 b'experimental', b'revisions.disambiguatewithin', default=None,
652 )
652 )
653 coreconfigitem(
653 coreconfigitem(
654 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
654 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
655 )
655 )
656 coreconfigitem(
656 coreconfigitem(
657 b'experimental',
657 b'experimental',
658 b'server.manifestdata.recommended-batch-size',
658 b'server.manifestdata.recommended-batch-size',
659 default=100000,
659 default=100000,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'experimental', b'server.stream-narrow-clones', default=False,
662 b'experimental', b'server.stream-narrow-clones', default=False,
663 )
663 )
664 coreconfigitem(
664 coreconfigitem(
665 b'experimental', b'single-head-per-branch', default=False,
665 b'experimental', b'single-head-per-branch', default=False,
666 )
666 )
667 coreconfigitem(
667 coreconfigitem(
668 b'experimental',
668 b'experimental',
669 b'single-head-per-branch:account-closed-heads',
669 b'single-head-per-branch:account-closed-heads',
670 default=False,
670 default=False,
671 )
671 )
672 coreconfigitem(
672 coreconfigitem(
673 b'experimental', b'sshserver.support-v2', default=False,
673 b'experimental', b'sshserver.support-v2', default=False,
674 )
674 )
675 coreconfigitem(
675 coreconfigitem(
676 b'experimental', b'sparse-read', default=False,
676 b'experimental', b'sparse-read', default=False,
677 )
677 )
678 coreconfigitem(
678 coreconfigitem(
679 b'experimental', b'sparse-read.density-threshold', default=0.50,
679 b'experimental', b'sparse-read.density-threshold', default=0.50,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
682 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
683 )
683 )
684 coreconfigitem(
684 coreconfigitem(
685 b'experimental', b'treemanifest', default=False,
685 b'experimental', b'treemanifest', default=False,
686 )
686 )
687 coreconfigitem(
687 coreconfigitem(
688 b'experimental', b'update.atomic-file', default=False,
688 b'experimental', b'update.atomic-file', default=False,
689 )
689 )
690 coreconfigitem(
690 coreconfigitem(
691 b'experimental', b'sshpeer.advertise-v2', default=False,
691 b'experimental', b'sshpeer.advertise-v2', default=False,
692 )
692 )
693 coreconfigitem(
693 coreconfigitem(
694 b'experimental', b'web.apiserver', default=False,
694 b'experimental', b'web.apiserver', default=False,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'experimental', b'web.api.http-v2', default=False,
697 b'experimental', b'web.api.http-v2', default=False,
698 )
698 )
699 coreconfigitem(
699 coreconfigitem(
700 b'experimental', b'web.api.debugreflect', default=False,
700 b'experimental', b'web.api.debugreflect', default=False,
701 )
701 )
702 coreconfigitem(
702 coreconfigitem(
703 b'experimental', b'worker.wdir-get-thread-safe', default=False,
703 b'experimental', b'worker.wdir-get-thread-safe', default=False,
704 )
704 )
705 coreconfigitem(
705 coreconfigitem(
706 b'experimental', b'xdiff', default=False,
706 b'experimental', b'xdiff', default=False,
707 )
707 )
708 coreconfigitem(
708 coreconfigitem(
709 b'extensions', b'.*', default=None, generic=True,
709 b'extensions', b'.*', default=None, generic=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'extdata', b'.*', default=None, generic=True,
712 b'extdata', b'.*', default=None, generic=True,
713 )
713 )
714 coreconfigitem(
714 coreconfigitem(
715 b'format', b'bookmarks-in-store', default=False,
715 b'format', b'bookmarks-in-store', default=False,
716 )
716 )
717 coreconfigitem(
717 coreconfigitem(
718 b'format', b'chunkcachesize', default=None, experimental=True,
718 b'format', b'chunkcachesize', default=None, experimental=True,
719 )
719 )
720 coreconfigitem(
720 coreconfigitem(
721 b'format', b'dotencode', default=True,
721 b'format', b'dotencode', default=True,
722 )
722 )
723 coreconfigitem(
723 coreconfigitem(
724 b'format', b'generaldelta', default=False, experimental=True,
724 b'format', b'generaldelta', default=False, experimental=True,
725 )
725 )
726 coreconfigitem(
726 coreconfigitem(
727 b'format', b'manifestcachesize', default=None, experimental=True,
727 b'format', b'manifestcachesize', default=None, experimental=True,
728 )
728 )
729 coreconfigitem(
729 coreconfigitem(
730 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
730 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
731 )
731 )
732 coreconfigitem(
732 coreconfigitem(
733 b'format', b'obsstore-version', default=None,
733 b'format', b'obsstore-version', default=None,
734 )
734 )
735 coreconfigitem(
735 coreconfigitem(
736 b'format', b'sparse-revlog', default=True,
736 b'format', b'sparse-revlog', default=True,
737 )
737 )
738 coreconfigitem(
738 coreconfigitem(
739 b'format',
739 b'format',
740 b'revlog-compression',
740 b'revlog-compression',
741 default=b'zlib',
741 default=b'zlib',
742 alias=[(b'experimental', b'format.compression')],
742 alias=[(b'experimental', b'format.compression')],
743 )
743 )
744 coreconfigitem(
744 coreconfigitem(
745 b'format', b'usefncache', default=True,
745 b'format', b'usefncache', default=True,
746 )
746 )
747 coreconfigitem(
747 coreconfigitem(
748 b'format', b'usegeneraldelta', default=True,
748 b'format', b'usegeneraldelta', default=True,
749 )
749 )
750 coreconfigitem(
750 coreconfigitem(
751 b'format', b'usestore', default=True,
751 b'format', b'usestore', default=True,
752 )
752 )
753 coreconfigitem(
753 coreconfigitem(
754 b'format',
754 b'format',
755 b'exp-use-copies-side-data-changeset',
755 b'exp-use-copies-side-data-changeset',
756 default=False,
756 default=False,
757 experimental=True,
757 experimental=True,
758 )
758 )
759 coreconfigitem(
759 coreconfigitem(
760 b'format', b'use-side-data', default=False, experimental=True,
760 b'format', b'exp-use-side-data', default=False, experimental=True,
761 )
761 )
762 coreconfigitem(
762 coreconfigitem(
763 b'format', b'internal-phase', default=False, experimental=True,
763 b'format', b'internal-phase', default=False, experimental=True,
764 )
764 )
765 coreconfigitem(
765 coreconfigitem(
766 b'fsmonitor', b'warn_when_unused', default=True,
766 b'fsmonitor', b'warn_when_unused', default=True,
767 )
767 )
768 coreconfigitem(
768 coreconfigitem(
769 b'fsmonitor', b'warn_update_file_count', default=50000,
769 b'fsmonitor', b'warn_update_file_count', default=50000,
770 )
770 )
771 coreconfigitem(
771 coreconfigitem(
772 b'help', br'hidden-command\..*', default=False, generic=True,
772 b'help', br'hidden-command\..*', default=False, generic=True,
773 )
773 )
774 coreconfigitem(
774 coreconfigitem(
775 b'help', br'hidden-topic\..*', default=False, generic=True,
775 b'help', br'hidden-topic\..*', default=False, generic=True,
776 )
776 )
777 coreconfigitem(
777 coreconfigitem(
778 b'hooks', b'.*', default=dynamicdefault, generic=True,
778 b'hooks', b'.*', default=dynamicdefault, generic=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'hgweb-paths', b'.*', default=list, generic=True,
781 b'hgweb-paths', b'.*', default=list, generic=True,
782 )
782 )
783 coreconfigitem(
783 coreconfigitem(
784 b'hostfingerprints', b'.*', default=list, generic=True,
784 b'hostfingerprints', b'.*', default=list, generic=True,
785 )
785 )
786 coreconfigitem(
786 coreconfigitem(
787 b'hostsecurity', b'ciphers', default=None,
787 b'hostsecurity', b'ciphers', default=None,
788 )
788 )
789 coreconfigitem(
789 coreconfigitem(
790 b'hostsecurity', b'disabletls10warning', default=False,
790 b'hostsecurity', b'disabletls10warning', default=False,
791 )
791 )
792 coreconfigitem(
792 coreconfigitem(
793 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
793 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'hostsecurity',
796 b'hostsecurity',
797 b'.*:minimumprotocol$',
797 b'.*:minimumprotocol$',
798 default=dynamicdefault,
798 default=dynamicdefault,
799 generic=True,
799 generic=True,
800 )
800 )
801 coreconfigitem(
801 coreconfigitem(
802 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
802 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
803 )
803 )
804 coreconfigitem(
804 coreconfigitem(
805 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
805 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
806 )
806 )
807 coreconfigitem(
807 coreconfigitem(
808 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
808 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
809 )
809 )
810
810
811 coreconfigitem(
811 coreconfigitem(
812 b'http_proxy', b'always', default=False,
812 b'http_proxy', b'always', default=False,
813 )
813 )
814 coreconfigitem(
814 coreconfigitem(
815 b'http_proxy', b'host', default=None,
815 b'http_proxy', b'host', default=None,
816 )
816 )
817 coreconfigitem(
817 coreconfigitem(
818 b'http_proxy', b'no', default=list,
818 b'http_proxy', b'no', default=list,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'http_proxy', b'passwd', default=None,
821 b'http_proxy', b'passwd', default=None,
822 )
822 )
823 coreconfigitem(
823 coreconfigitem(
824 b'http_proxy', b'user', default=None,
824 b'http_proxy', b'user', default=None,
825 )
825 )
826
826
827 coreconfigitem(
827 coreconfigitem(
828 b'http', b'timeout', default=None,
828 b'http', b'timeout', default=None,
829 )
829 )
830
830
831 coreconfigitem(
831 coreconfigitem(
832 b'logtoprocess', b'commandexception', default=None,
832 b'logtoprocess', b'commandexception', default=None,
833 )
833 )
834 coreconfigitem(
834 coreconfigitem(
835 b'logtoprocess', b'commandfinish', default=None,
835 b'logtoprocess', b'commandfinish', default=None,
836 )
836 )
837 coreconfigitem(
837 coreconfigitem(
838 b'logtoprocess', b'command', default=None,
838 b'logtoprocess', b'command', default=None,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'logtoprocess', b'develwarn', default=None,
841 b'logtoprocess', b'develwarn', default=None,
842 )
842 )
843 coreconfigitem(
843 coreconfigitem(
844 b'logtoprocess', b'uiblocked', default=None,
844 b'logtoprocess', b'uiblocked', default=None,
845 )
845 )
846 coreconfigitem(
846 coreconfigitem(
847 b'merge', b'checkunknown', default=b'abort',
847 b'merge', b'checkunknown', default=b'abort',
848 )
848 )
849 coreconfigitem(
849 coreconfigitem(
850 b'merge', b'checkignored', default=b'abort',
850 b'merge', b'checkignored', default=b'abort',
851 )
851 )
852 coreconfigitem(
852 coreconfigitem(
853 b'experimental', b'merge.checkpathconflicts', default=False,
853 b'experimental', b'merge.checkpathconflicts', default=False,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'merge', b'followcopies', default=True,
856 b'merge', b'followcopies', default=True,
857 )
857 )
858 coreconfigitem(
858 coreconfigitem(
859 b'merge', b'on-failure', default=b'continue',
859 b'merge', b'on-failure', default=b'continue',
860 )
860 )
861 coreconfigitem(
861 coreconfigitem(
862 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
862 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
863 )
863 )
864 coreconfigitem(
864 coreconfigitem(
865 b'merge', b'strict-capability-check', default=False,
865 b'merge', b'strict-capability-check', default=False,
866 )
866 )
867 coreconfigitem(
867 coreconfigitem(
868 b'merge-tools', b'.*', default=None, generic=True,
868 b'merge-tools', b'.*', default=None, generic=True,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'merge-tools',
871 b'merge-tools',
872 br'.*\.args$',
872 br'.*\.args$',
873 default=b"$local $base $other",
873 default=b"$local $base $other",
874 generic=True,
874 generic=True,
875 priority=-1,
875 priority=-1,
876 )
876 )
877 coreconfigitem(
877 coreconfigitem(
878 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
878 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
881 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
882 )
882 )
883 coreconfigitem(
883 coreconfigitem(
884 b'merge-tools',
884 b'merge-tools',
885 br'.*\.checkchanged$',
885 br'.*\.checkchanged$',
886 default=False,
886 default=False,
887 generic=True,
887 generic=True,
888 priority=-1,
888 priority=-1,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'merge-tools',
891 b'merge-tools',
892 br'.*\.executable$',
892 br'.*\.executable$',
893 default=dynamicdefault,
893 default=dynamicdefault,
894 generic=True,
894 generic=True,
895 priority=-1,
895 priority=-1,
896 )
896 )
897 coreconfigitem(
897 coreconfigitem(
898 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
898 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
901 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
902 )
902 )
903 coreconfigitem(
903 coreconfigitem(
904 b'merge-tools',
904 b'merge-tools',
905 br'.*\.mergemarkers$',
905 br'.*\.mergemarkers$',
906 default=b'basic',
906 default=b'basic',
907 generic=True,
907 generic=True,
908 priority=-1,
908 priority=-1,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'merge-tools',
911 b'merge-tools',
912 br'.*\.mergemarkertemplate$',
912 br'.*\.mergemarkertemplate$',
913 default=dynamicdefault, # take from ui.mergemarkertemplate
913 default=dynamicdefault, # take from ui.mergemarkertemplate
914 generic=True,
914 generic=True,
915 priority=-1,
915 priority=-1,
916 )
916 )
917 coreconfigitem(
917 coreconfigitem(
918 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
918 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'merge-tools',
921 b'merge-tools',
922 br'.*\.premerge$',
922 br'.*\.premerge$',
923 default=dynamicdefault,
923 default=dynamicdefault,
924 generic=True,
924 generic=True,
925 priority=-1,
925 priority=-1,
926 )
926 )
927 coreconfigitem(
927 coreconfigitem(
928 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
928 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
931 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
932 )
932 )
933 coreconfigitem(
933 coreconfigitem(
934 b'pager', b'ignore', default=list,
934 b'pager', b'ignore', default=list,
935 )
935 )
936 coreconfigitem(
936 coreconfigitem(
937 b'pager', b'pager', default=dynamicdefault,
937 b'pager', b'pager', default=dynamicdefault,
938 )
938 )
939 coreconfigitem(
939 coreconfigitem(
940 b'patch', b'eol', default=b'strict',
940 b'patch', b'eol', default=b'strict',
941 )
941 )
942 coreconfigitem(
942 coreconfigitem(
943 b'patch', b'fuzz', default=2,
943 b'patch', b'fuzz', default=2,
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'paths', b'default', default=None,
946 b'paths', b'default', default=None,
947 )
947 )
948 coreconfigitem(
948 coreconfigitem(
949 b'paths', b'default-push', default=None,
949 b'paths', b'default-push', default=None,
950 )
950 )
951 coreconfigitem(
951 coreconfigitem(
952 b'paths', b'.*', default=None, generic=True,
952 b'paths', b'.*', default=None, generic=True,
953 )
953 )
954 coreconfigitem(
954 coreconfigitem(
955 b'phases', b'checksubrepos', default=b'follow',
955 b'phases', b'checksubrepos', default=b'follow',
956 )
956 )
957 coreconfigitem(
957 coreconfigitem(
958 b'phases', b'new-commit', default=b'draft',
958 b'phases', b'new-commit', default=b'draft',
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'phases', b'publish', default=True,
961 b'phases', b'publish', default=True,
962 )
962 )
963 coreconfigitem(
963 coreconfigitem(
964 b'profiling', b'enabled', default=False,
964 b'profiling', b'enabled', default=False,
965 )
965 )
966 coreconfigitem(
966 coreconfigitem(
967 b'profiling', b'format', default=b'text',
967 b'profiling', b'format', default=b'text',
968 )
968 )
969 coreconfigitem(
969 coreconfigitem(
970 b'profiling', b'freq', default=1000,
970 b'profiling', b'freq', default=1000,
971 )
971 )
972 coreconfigitem(
972 coreconfigitem(
973 b'profiling', b'limit', default=30,
973 b'profiling', b'limit', default=30,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'profiling', b'nested', default=0,
976 b'profiling', b'nested', default=0,
977 )
977 )
978 coreconfigitem(
978 coreconfigitem(
979 b'profiling', b'output', default=None,
979 b'profiling', b'output', default=None,
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'profiling', b'showmax', default=0.999,
982 b'profiling', b'showmax', default=0.999,
983 )
983 )
984 coreconfigitem(
984 coreconfigitem(
985 b'profiling', b'showmin', default=dynamicdefault,
985 b'profiling', b'showmin', default=dynamicdefault,
986 )
986 )
987 coreconfigitem(
987 coreconfigitem(
988 b'profiling', b'showtime', default=True,
988 b'profiling', b'showtime', default=True,
989 )
989 )
990 coreconfigitem(
990 coreconfigitem(
991 b'profiling', b'sort', default=b'inlinetime',
991 b'profiling', b'sort', default=b'inlinetime',
992 )
992 )
993 coreconfigitem(
993 coreconfigitem(
994 b'profiling', b'statformat', default=b'hotpath',
994 b'profiling', b'statformat', default=b'hotpath',
995 )
995 )
996 coreconfigitem(
996 coreconfigitem(
997 b'profiling', b'time-track', default=dynamicdefault,
997 b'profiling', b'time-track', default=dynamicdefault,
998 )
998 )
999 coreconfigitem(
999 coreconfigitem(
1000 b'profiling', b'type', default=b'stat',
1000 b'profiling', b'type', default=b'stat',
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'progress', b'assume-tty', default=False,
1003 b'progress', b'assume-tty', default=False,
1004 )
1004 )
1005 coreconfigitem(
1005 coreconfigitem(
1006 b'progress', b'changedelay', default=1,
1006 b'progress', b'changedelay', default=1,
1007 )
1007 )
1008 coreconfigitem(
1008 coreconfigitem(
1009 b'progress', b'clear-complete', default=True,
1009 b'progress', b'clear-complete', default=True,
1010 )
1010 )
1011 coreconfigitem(
1011 coreconfigitem(
1012 b'progress', b'debug', default=False,
1012 b'progress', b'debug', default=False,
1013 )
1013 )
1014 coreconfigitem(
1014 coreconfigitem(
1015 b'progress', b'delay', default=3,
1015 b'progress', b'delay', default=3,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'progress', b'disable', default=False,
1018 b'progress', b'disable', default=False,
1019 )
1019 )
1020 coreconfigitem(
1020 coreconfigitem(
1021 b'progress', b'estimateinterval', default=60.0,
1021 b'progress', b'estimateinterval', default=60.0,
1022 )
1022 )
1023 coreconfigitem(
1023 coreconfigitem(
1024 b'progress',
1024 b'progress',
1025 b'format',
1025 b'format',
1026 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1026 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1027 )
1027 )
1028 coreconfigitem(
1028 coreconfigitem(
1029 b'progress', b'refresh', default=0.1,
1029 b'progress', b'refresh', default=0.1,
1030 )
1030 )
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'progress', b'width', default=dynamicdefault,
1032 b'progress', b'width', default=dynamicdefault,
1033 )
1033 )
1034 coreconfigitem(
1034 coreconfigitem(
1035 b'push', b'pushvars.server', default=False,
1035 b'push', b'pushvars.server', default=False,
1036 )
1036 )
1037 coreconfigitem(
1037 coreconfigitem(
1038 b'rewrite',
1038 b'rewrite',
1039 b'backup-bundle',
1039 b'backup-bundle',
1040 default=True,
1040 default=True,
1041 alias=[(b'ui', b'history-editing-backup')],
1041 alias=[(b'ui', b'history-editing-backup')],
1042 )
1042 )
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'rewrite', b'update-timestamp', default=False,
1044 b'rewrite', b'update-timestamp', default=False,
1045 )
1045 )
1046 coreconfigitem(
1046 coreconfigitem(
1047 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1047 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1048 )
1048 )
1049 coreconfigitem(
1049 coreconfigitem(
1050 b'storage',
1050 b'storage',
1051 b'revlog.optimize-delta-parent-choice',
1051 b'revlog.optimize-delta-parent-choice',
1052 default=True,
1052 default=True,
1053 alias=[(b'format', b'aggressivemergedeltas')],
1053 alias=[(b'format', b'aggressivemergedeltas')],
1054 )
1054 )
1055 coreconfigitem(
1055 coreconfigitem(
1056 b'storage', b'revlog.reuse-external-delta', default=True,
1056 b'storage', b'revlog.reuse-external-delta', default=True,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1059 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1060 )
1060 )
1061 coreconfigitem(
1061 coreconfigitem(
1062 b'storage', b'revlog.zlib.level', default=None,
1062 b'storage', b'revlog.zlib.level', default=None,
1063 )
1063 )
1064 coreconfigitem(
1064 coreconfigitem(
1065 b'storage', b'revlog.zstd.level', default=None,
1065 b'storage', b'revlog.zstd.level', default=None,
1066 )
1066 )
1067 coreconfigitem(
1067 coreconfigitem(
1068 b'server', b'bookmarks-pushkey-compat', default=True,
1068 b'server', b'bookmarks-pushkey-compat', default=True,
1069 )
1069 )
1070 coreconfigitem(
1070 coreconfigitem(
1071 b'server', b'bundle1', default=True,
1071 b'server', b'bundle1', default=True,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'server', b'bundle1gd', default=None,
1074 b'server', b'bundle1gd', default=None,
1075 )
1075 )
1076 coreconfigitem(
1076 coreconfigitem(
1077 b'server', b'bundle1.pull', default=None,
1077 b'server', b'bundle1.pull', default=None,
1078 )
1078 )
1079 coreconfigitem(
1079 coreconfigitem(
1080 b'server', b'bundle1gd.pull', default=None,
1080 b'server', b'bundle1gd.pull', default=None,
1081 )
1081 )
1082 coreconfigitem(
1082 coreconfigitem(
1083 b'server', b'bundle1.push', default=None,
1083 b'server', b'bundle1.push', default=None,
1084 )
1084 )
1085 coreconfigitem(
1085 coreconfigitem(
1086 b'server', b'bundle1gd.push', default=None,
1086 b'server', b'bundle1gd.push', default=None,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'server',
1089 b'server',
1090 b'bundle2.stream',
1090 b'bundle2.stream',
1091 default=True,
1091 default=True,
1092 alias=[(b'experimental', b'bundle2.stream')],
1092 alias=[(b'experimental', b'bundle2.stream')],
1093 )
1093 )
1094 coreconfigitem(
1094 coreconfigitem(
1095 b'server', b'compressionengines', default=list,
1095 b'server', b'compressionengines', default=list,
1096 )
1096 )
1097 coreconfigitem(
1097 coreconfigitem(
1098 b'server', b'concurrent-push-mode', default=b'strict',
1098 b'server', b'concurrent-push-mode', default=b'strict',
1099 )
1099 )
1100 coreconfigitem(
1100 coreconfigitem(
1101 b'server', b'disablefullbundle', default=False,
1101 b'server', b'disablefullbundle', default=False,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'server', b'maxhttpheaderlen', default=1024,
1104 b'server', b'maxhttpheaderlen', default=1024,
1105 )
1105 )
1106 coreconfigitem(
1106 coreconfigitem(
1107 b'server', b'pullbundle', default=False,
1107 b'server', b'pullbundle', default=False,
1108 )
1108 )
1109 coreconfigitem(
1109 coreconfigitem(
1110 b'server', b'preferuncompressed', default=False,
1110 b'server', b'preferuncompressed', default=False,
1111 )
1111 )
1112 coreconfigitem(
1112 coreconfigitem(
1113 b'server', b'streamunbundle', default=False,
1113 b'server', b'streamunbundle', default=False,
1114 )
1114 )
1115 coreconfigitem(
1115 coreconfigitem(
1116 b'server', b'uncompressed', default=True,
1116 b'server', b'uncompressed', default=True,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'server', b'uncompressedallowsecret', default=False,
1119 b'server', b'uncompressedallowsecret', default=False,
1120 )
1120 )
1121 coreconfigitem(
1121 coreconfigitem(
1122 b'server', b'view', default=b'served',
1122 b'server', b'view', default=b'served',
1123 )
1123 )
1124 coreconfigitem(
1124 coreconfigitem(
1125 b'server', b'validate', default=False,
1125 b'server', b'validate', default=False,
1126 )
1126 )
1127 coreconfigitem(
1127 coreconfigitem(
1128 b'server', b'zliblevel', default=-1,
1128 b'server', b'zliblevel', default=-1,
1129 )
1129 )
1130 coreconfigitem(
1130 coreconfigitem(
1131 b'server', b'zstdlevel', default=3,
1131 b'server', b'zstdlevel', default=3,
1132 )
1132 )
1133 coreconfigitem(
1133 coreconfigitem(
1134 b'share', b'pool', default=None,
1134 b'share', b'pool', default=None,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'share', b'poolnaming', default=b'identity',
1137 b'share', b'poolnaming', default=b'identity',
1138 )
1138 )
1139 coreconfigitem(
1139 coreconfigitem(
1140 b'shelve', b'maxbackups', default=10,
1140 b'shelve', b'maxbackups', default=10,
1141 )
1141 )
1142 coreconfigitem(
1142 coreconfigitem(
1143 b'smtp', b'host', default=None,
1143 b'smtp', b'host', default=None,
1144 )
1144 )
1145 coreconfigitem(
1145 coreconfigitem(
1146 b'smtp', b'local_hostname', default=None,
1146 b'smtp', b'local_hostname', default=None,
1147 )
1147 )
1148 coreconfigitem(
1148 coreconfigitem(
1149 b'smtp', b'password', default=None,
1149 b'smtp', b'password', default=None,
1150 )
1150 )
1151 coreconfigitem(
1151 coreconfigitem(
1152 b'smtp', b'port', default=dynamicdefault,
1152 b'smtp', b'port', default=dynamicdefault,
1153 )
1153 )
1154 coreconfigitem(
1154 coreconfigitem(
1155 b'smtp', b'tls', default=b'none',
1155 b'smtp', b'tls', default=b'none',
1156 )
1156 )
1157 coreconfigitem(
1157 coreconfigitem(
1158 b'smtp', b'username', default=None,
1158 b'smtp', b'username', default=None,
1159 )
1159 )
1160 coreconfigitem(
1160 coreconfigitem(
1161 b'sparse', b'missingwarning', default=True, experimental=True,
1161 b'sparse', b'missingwarning', default=True, experimental=True,
1162 )
1162 )
1163 coreconfigitem(
1163 coreconfigitem(
1164 b'subrepos',
1164 b'subrepos',
1165 b'allowed',
1165 b'allowed',
1166 default=dynamicdefault, # to make backporting simpler
1166 default=dynamicdefault, # to make backporting simpler
1167 )
1167 )
1168 coreconfigitem(
1168 coreconfigitem(
1169 b'subrepos', b'hg:allowed', default=dynamicdefault,
1169 b'subrepos', b'hg:allowed', default=dynamicdefault,
1170 )
1170 )
1171 coreconfigitem(
1171 coreconfigitem(
1172 b'subrepos', b'git:allowed', default=dynamicdefault,
1172 b'subrepos', b'git:allowed', default=dynamicdefault,
1173 )
1173 )
1174 coreconfigitem(
1174 coreconfigitem(
1175 b'subrepos', b'svn:allowed', default=dynamicdefault,
1175 b'subrepos', b'svn:allowed', default=dynamicdefault,
1176 )
1176 )
1177 coreconfigitem(
1177 coreconfigitem(
1178 b'templates', b'.*', default=None, generic=True,
1178 b'templates', b'.*', default=None, generic=True,
1179 )
1179 )
1180 coreconfigitem(
1180 coreconfigitem(
1181 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1181 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1182 )
1182 )
1183 coreconfigitem(
1183 coreconfigitem(
1184 b'trusted', b'groups', default=list,
1184 b'trusted', b'groups', default=list,
1185 )
1185 )
1186 coreconfigitem(
1186 coreconfigitem(
1187 b'trusted', b'users', default=list,
1187 b'trusted', b'users', default=list,
1188 )
1188 )
1189 coreconfigitem(
1189 coreconfigitem(
1190 b'ui', b'_usedassubrepo', default=False,
1190 b'ui', b'_usedassubrepo', default=False,
1191 )
1191 )
1192 coreconfigitem(
1192 coreconfigitem(
1193 b'ui', b'allowemptycommit', default=False,
1193 b'ui', b'allowemptycommit', default=False,
1194 )
1194 )
1195 coreconfigitem(
1195 coreconfigitem(
1196 b'ui', b'archivemeta', default=True,
1196 b'ui', b'archivemeta', default=True,
1197 )
1197 )
1198 coreconfigitem(
1198 coreconfigitem(
1199 b'ui', b'askusername', default=False,
1199 b'ui', b'askusername', default=False,
1200 )
1200 )
1201 coreconfigitem(
1201 coreconfigitem(
1202 b'ui', b'clonebundlefallback', default=False,
1202 b'ui', b'clonebundlefallback', default=False,
1203 )
1203 )
1204 coreconfigitem(
1204 coreconfigitem(
1205 b'ui', b'clonebundleprefers', default=list,
1205 b'ui', b'clonebundleprefers', default=list,
1206 )
1206 )
1207 coreconfigitem(
1207 coreconfigitem(
1208 b'ui', b'clonebundles', default=True,
1208 b'ui', b'clonebundles', default=True,
1209 )
1209 )
1210 coreconfigitem(
1210 coreconfigitem(
1211 b'ui', b'color', default=b'auto',
1211 b'ui', b'color', default=b'auto',
1212 )
1212 )
1213 coreconfigitem(
1213 coreconfigitem(
1214 b'ui', b'commitsubrepos', default=False,
1214 b'ui', b'commitsubrepos', default=False,
1215 )
1215 )
1216 coreconfigitem(
1216 coreconfigitem(
1217 b'ui', b'debug', default=False,
1217 b'ui', b'debug', default=False,
1218 )
1218 )
1219 coreconfigitem(
1219 coreconfigitem(
1220 b'ui', b'debugger', default=None,
1220 b'ui', b'debugger', default=None,
1221 )
1221 )
1222 coreconfigitem(
1222 coreconfigitem(
1223 b'ui', b'editor', default=dynamicdefault,
1223 b'ui', b'editor', default=dynamicdefault,
1224 )
1224 )
1225 coreconfigitem(
1225 coreconfigitem(
1226 b'ui', b'fallbackencoding', default=None,
1226 b'ui', b'fallbackencoding', default=None,
1227 )
1227 )
1228 coreconfigitem(
1228 coreconfigitem(
1229 b'ui', b'forcecwd', default=None,
1229 b'ui', b'forcecwd', default=None,
1230 )
1230 )
1231 coreconfigitem(
1231 coreconfigitem(
1232 b'ui', b'forcemerge', default=None,
1232 b'ui', b'forcemerge', default=None,
1233 )
1233 )
1234 coreconfigitem(
1234 coreconfigitem(
1235 b'ui', b'formatdebug', default=False,
1235 b'ui', b'formatdebug', default=False,
1236 )
1236 )
1237 coreconfigitem(
1237 coreconfigitem(
1238 b'ui', b'formatjson', default=False,
1238 b'ui', b'formatjson', default=False,
1239 )
1239 )
1240 coreconfigitem(
1240 coreconfigitem(
1241 b'ui', b'formatted', default=None,
1241 b'ui', b'formatted', default=None,
1242 )
1242 )
1243 coreconfigitem(
1243 coreconfigitem(
1244 b'ui', b'graphnodetemplate', default=None,
1244 b'ui', b'graphnodetemplate', default=None,
1245 )
1245 )
1246 coreconfigitem(
1246 coreconfigitem(
1247 b'ui', b'interactive', default=None,
1247 b'ui', b'interactive', default=None,
1248 )
1248 )
1249 coreconfigitem(
1249 coreconfigitem(
1250 b'ui', b'interface', default=None,
1250 b'ui', b'interface', default=None,
1251 )
1251 )
1252 coreconfigitem(
1252 coreconfigitem(
1253 b'ui', b'interface.chunkselector', default=None,
1253 b'ui', b'interface.chunkselector', default=None,
1254 )
1254 )
1255 coreconfigitem(
1255 coreconfigitem(
1256 b'ui', b'large-file-limit', default=10000000,
1256 b'ui', b'large-file-limit', default=10000000,
1257 )
1257 )
1258 coreconfigitem(
1258 coreconfigitem(
1259 b'ui', b'logblockedtimes', default=False,
1259 b'ui', b'logblockedtimes', default=False,
1260 )
1260 )
1261 coreconfigitem(
1261 coreconfigitem(
1262 b'ui', b'logtemplate', default=None,
1262 b'ui', b'logtemplate', default=None,
1263 )
1263 )
1264 coreconfigitem(
1264 coreconfigitem(
1265 b'ui', b'merge', default=None,
1265 b'ui', b'merge', default=None,
1266 )
1266 )
1267 coreconfigitem(
1267 coreconfigitem(
1268 b'ui', b'mergemarkers', default=b'basic',
1268 b'ui', b'mergemarkers', default=b'basic',
1269 )
1269 )
1270 coreconfigitem(
1270 coreconfigitem(
1271 b'ui',
1271 b'ui',
1272 b'mergemarkertemplate',
1272 b'mergemarkertemplate',
1273 default=(
1273 default=(
1274 b'{node|short} '
1274 b'{node|short} '
1275 b'{ifeq(tags, "tip", "", '
1275 b'{ifeq(tags, "tip", "", '
1276 b'ifeq(tags, "", "", "{tags} "))}'
1276 b'ifeq(tags, "", "", "{tags} "))}'
1277 b'{if(bookmarks, "{bookmarks} ")}'
1277 b'{if(bookmarks, "{bookmarks} ")}'
1278 b'{ifeq(branch, "default", "", "{branch} ")}'
1278 b'{ifeq(branch, "default", "", "{branch} ")}'
1279 b'- {author|user}: {desc|firstline}'
1279 b'- {author|user}: {desc|firstline}'
1280 ),
1280 ),
1281 )
1281 )
1282 coreconfigitem(
1282 coreconfigitem(
1283 b'ui', b'message-output', default=b'stdio',
1283 b'ui', b'message-output', default=b'stdio',
1284 )
1284 )
1285 coreconfigitem(
1285 coreconfigitem(
1286 b'ui', b'nontty', default=False,
1286 b'ui', b'nontty', default=False,
1287 )
1287 )
1288 coreconfigitem(
1288 coreconfigitem(
1289 b'ui', b'origbackuppath', default=None,
1289 b'ui', b'origbackuppath', default=None,
1290 )
1290 )
1291 coreconfigitem(
1291 coreconfigitem(
1292 b'ui', b'paginate', default=True,
1292 b'ui', b'paginate', default=True,
1293 )
1293 )
1294 coreconfigitem(
1294 coreconfigitem(
1295 b'ui', b'patch', default=None,
1295 b'ui', b'patch', default=None,
1296 )
1296 )
1297 coreconfigitem(
1297 coreconfigitem(
1298 b'ui', b'pre-merge-tool-output-template', default=None,
1298 b'ui', b'pre-merge-tool-output-template', default=None,
1299 )
1299 )
1300 coreconfigitem(
1300 coreconfigitem(
1301 b'ui', b'portablefilenames', default=b'warn',
1301 b'ui', b'portablefilenames', default=b'warn',
1302 )
1302 )
1303 coreconfigitem(
1303 coreconfigitem(
1304 b'ui', b'promptecho', default=False,
1304 b'ui', b'promptecho', default=False,
1305 )
1305 )
1306 coreconfigitem(
1306 coreconfigitem(
1307 b'ui', b'quiet', default=False,
1307 b'ui', b'quiet', default=False,
1308 )
1308 )
1309 coreconfigitem(
1309 coreconfigitem(
1310 b'ui', b'quietbookmarkmove', default=False,
1310 b'ui', b'quietbookmarkmove', default=False,
1311 )
1311 )
1312 coreconfigitem(
1312 coreconfigitem(
1313 b'ui', b'relative-paths', default=b'legacy',
1313 b'ui', b'relative-paths', default=b'legacy',
1314 )
1314 )
1315 coreconfigitem(
1315 coreconfigitem(
1316 b'ui', b'remotecmd', default=b'hg',
1316 b'ui', b'remotecmd', default=b'hg',
1317 )
1317 )
1318 coreconfigitem(
1318 coreconfigitem(
1319 b'ui', b'report_untrusted', default=True,
1319 b'ui', b'report_untrusted', default=True,
1320 )
1320 )
1321 coreconfigitem(
1321 coreconfigitem(
1322 b'ui', b'rollback', default=True,
1322 b'ui', b'rollback', default=True,
1323 )
1323 )
1324 coreconfigitem(
1324 coreconfigitem(
1325 b'ui', b'signal-safe-lock', default=True,
1325 b'ui', b'signal-safe-lock', default=True,
1326 )
1326 )
1327 coreconfigitem(
1327 coreconfigitem(
1328 b'ui', b'slash', default=False,
1328 b'ui', b'slash', default=False,
1329 )
1329 )
1330 coreconfigitem(
1330 coreconfigitem(
1331 b'ui', b'ssh', default=b'ssh',
1331 b'ui', b'ssh', default=b'ssh',
1332 )
1332 )
1333 coreconfigitem(
1333 coreconfigitem(
1334 b'ui', b'ssherrorhint', default=None,
1334 b'ui', b'ssherrorhint', default=None,
1335 )
1335 )
1336 coreconfigitem(
1336 coreconfigitem(
1337 b'ui', b'statuscopies', default=False,
1337 b'ui', b'statuscopies', default=False,
1338 )
1338 )
1339 coreconfigitem(
1339 coreconfigitem(
1340 b'ui', b'strict', default=False,
1340 b'ui', b'strict', default=False,
1341 )
1341 )
1342 coreconfigitem(
1342 coreconfigitem(
1343 b'ui', b'style', default=b'',
1343 b'ui', b'style', default=b'',
1344 )
1344 )
1345 coreconfigitem(
1345 coreconfigitem(
1346 b'ui', b'supportcontact', default=None,
1346 b'ui', b'supportcontact', default=None,
1347 )
1347 )
1348 coreconfigitem(
1348 coreconfigitem(
1349 b'ui', b'textwidth', default=78,
1349 b'ui', b'textwidth', default=78,
1350 )
1350 )
1351 coreconfigitem(
1351 coreconfigitem(
1352 b'ui', b'timeout', default=b'600',
1352 b'ui', b'timeout', default=b'600',
1353 )
1353 )
1354 coreconfigitem(
1354 coreconfigitem(
1355 b'ui', b'timeout.warn', default=0,
1355 b'ui', b'timeout.warn', default=0,
1356 )
1356 )
1357 coreconfigitem(
1357 coreconfigitem(
1358 b'ui', b'traceback', default=False,
1358 b'ui', b'traceback', default=False,
1359 )
1359 )
1360 coreconfigitem(
1360 coreconfigitem(
1361 b'ui', b'tweakdefaults', default=False,
1361 b'ui', b'tweakdefaults', default=False,
1362 )
1362 )
1363 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1363 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1364 coreconfigitem(
1364 coreconfigitem(
1365 b'ui', b'verbose', default=False,
1365 b'ui', b'verbose', default=False,
1366 )
1366 )
1367 coreconfigitem(
1367 coreconfigitem(
1368 b'verify', b'skipflags', default=None,
1368 b'verify', b'skipflags', default=None,
1369 )
1369 )
1370 coreconfigitem(
1370 coreconfigitem(
1371 b'web', b'allowbz2', default=False,
1371 b'web', b'allowbz2', default=False,
1372 )
1372 )
1373 coreconfigitem(
1373 coreconfigitem(
1374 b'web', b'allowgz', default=False,
1374 b'web', b'allowgz', default=False,
1375 )
1375 )
1376 coreconfigitem(
1376 coreconfigitem(
1377 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1377 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1378 )
1378 )
1379 coreconfigitem(
1379 coreconfigitem(
1380 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1380 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1381 )
1381 )
1382 coreconfigitem(
1382 coreconfigitem(
1383 b'web', b'allowzip', default=False,
1383 b'web', b'allowzip', default=False,
1384 )
1384 )
1385 coreconfigitem(
1385 coreconfigitem(
1386 b'web', b'archivesubrepos', default=False,
1386 b'web', b'archivesubrepos', default=False,
1387 )
1387 )
1388 coreconfigitem(
1388 coreconfigitem(
1389 b'web', b'cache', default=True,
1389 b'web', b'cache', default=True,
1390 )
1390 )
1391 coreconfigitem(
1391 coreconfigitem(
1392 b'web', b'comparisoncontext', default=5,
1392 b'web', b'comparisoncontext', default=5,
1393 )
1393 )
1394 coreconfigitem(
1394 coreconfigitem(
1395 b'web', b'contact', default=None,
1395 b'web', b'contact', default=None,
1396 )
1396 )
1397 coreconfigitem(
1397 coreconfigitem(
1398 b'web', b'deny_push', default=list,
1398 b'web', b'deny_push', default=list,
1399 )
1399 )
1400 coreconfigitem(
1400 coreconfigitem(
1401 b'web', b'guessmime', default=False,
1401 b'web', b'guessmime', default=False,
1402 )
1402 )
1403 coreconfigitem(
1403 coreconfigitem(
1404 b'web', b'hidden', default=False,
1404 b'web', b'hidden', default=False,
1405 )
1405 )
1406 coreconfigitem(
1406 coreconfigitem(
1407 b'web', b'labels', default=list,
1407 b'web', b'labels', default=list,
1408 )
1408 )
1409 coreconfigitem(
1409 coreconfigitem(
1410 b'web', b'logoimg', default=b'hglogo.png',
1410 b'web', b'logoimg', default=b'hglogo.png',
1411 )
1411 )
1412 coreconfigitem(
1412 coreconfigitem(
1413 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1413 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1414 )
1414 )
1415 coreconfigitem(
1415 coreconfigitem(
1416 b'web', b'accesslog', default=b'-',
1416 b'web', b'accesslog', default=b'-',
1417 )
1417 )
1418 coreconfigitem(
1418 coreconfigitem(
1419 b'web', b'address', default=b'',
1419 b'web', b'address', default=b'',
1420 )
1420 )
1421 coreconfigitem(
1421 coreconfigitem(
1422 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1422 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1423 )
1423 )
1424 coreconfigitem(
1424 coreconfigitem(
1425 b'web', b'allow_read', default=list,
1425 b'web', b'allow_read', default=list,
1426 )
1426 )
1427 coreconfigitem(
1427 coreconfigitem(
1428 b'web', b'baseurl', default=None,
1428 b'web', b'baseurl', default=None,
1429 )
1429 )
1430 coreconfigitem(
1430 coreconfigitem(
1431 b'web', b'cacerts', default=None,
1431 b'web', b'cacerts', default=None,
1432 )
1432 )
1433 coreconfigitem(
1433 coreconfigitem(
1434 b'web', b'certificate', default=None,
1434 b'web', b'certificate', default=None,
1435 )
1435 )
1436 coreconfigitem(
1436 coreconfigitem(
1437 b'web', b'collapse', default=False,
1437 b'web', b'collapse', default=False,
1438 )
1438 )
1439 coreconfigitem(
1439 coreconfigitem(
1440 b'web', b'csp', default=None,
1440 b'web', b'csp', default=None,
1441 )
1441 )
1442 coreconfigitem(
1442 coreconfigitem(
1443 b'web', b'deny_read', default=list,
1443 b'web', b'deny_read', default=list,
1444 )
1444 )
1445 coreconfigitem(
1445 coreconfigitem(
1446 b'web', b'descend', default=True,
1446 b'web', b'descend', default=True,
1447 )
1447 )
1448 coreconfigitem(
1448 coreconfigitem(
1449 b'web', b'description', default=b"",
1449 b'web', b'description', default=b"",
1450 )
1450 )
1451 coreconfigitem(
1451 coreconfigitem(
1452 b'web', b'encoding', default=lambda: encoding.encoding,
1452 b'web', b'encoding', default=lambda: encoding.encoding,
1453 )
1453 )
1454 coreconfigitem(
1454 coreconfigitem(
1455 b'web', b'errorlog', default=b'-',
1455 b'web', b'errorlog', default=b'-',
1456 )
1456 )
1457 coreconfigitem(
1457 coreconfigitem(
1458 b'web', b'ipv6', default=False,
1458 b'web', b'ipv6', default=False,
1459 )
1459 )
1460 coreconfigitem(
1460 coreconfigitem(
1461 b'web', b'maxchanges', default=10,
1461 b'web', b'maxchanges', default=10,
1462 )
1462 )
1463 coreconfigitem(
1463 coreconfigitem(
1464 b'web', b'maxfiles', default=10,
1464 b'web', b'maxfiles', default=10,
1465 )
1465 )
1466 coreconfigitem(
1466 coreconfigitem(
1467 b'web', b'maxshortchanges', default=60,
1467 b'web', b'maxshortchanges', default=60,
1468 )
1468 )
1469 coreconfigitem(
1469 coreconfigitem(
1470 b'web', b'motd', default=b'',
1470 b'web', b'motd', default=b'',
1471 )
1471 )
1472 coreconfigitem(
1472 coreconfigitem(
1473 b'web', b'name', default=dynamicdefault,
1473 b'web', b'name', default=dynamicdefault,
1474 )
1474 )
1475 coreconfigitem(
1475 coreconfigitem(
1476 b'web', b'port', default=8000,
1476 b'web', b'port', default=8000,
1477 )
1477 )
1478 coreconfigitem(
1478 coreconfigitem(
1479 b'web', b'prefix', default=b'',
1479 b'web', b'prefix', default=b'',
1480 )
1480 )
1481 coreconfigitem(
1481 coreconfigitem(
1482 b'web', b'push_ssl', default=True,
1482 b'web', b'push_ssl', default=True,
1483 )
1483 )
1484 coreconfigitem(
1484 coreconfigitem(
1485 b'web', b'refreshinterval', default=20,
1485 b'web', b'refreshinterval', default=20,
1486 )
1486 )
1487 coreconfigitem(
1487 coreconfigitem(
1488 b'web', b'server-header', default=None,
1488 b'web', b'server-header', default=None,
1489 )
1489 )
1490 coreconfigitem(
1490 coreconfigitem(
1491 b'web', b'static', default=None,
1491 b'web', b'static', default=None,
1492 )
1492 )
1493 coreconfigitem(
1493 coreconfigitem(
1494 b'web', b'staticurl', default=None,
1494 b'web', b'staticurl', default=None,
1495 )
1495 )
1496 coreconfigitem(
1496 coreconfigitem(
1497 b'web', b'stripes', default=1,
1497 b'web', b'stripes', default=1,
1498 )
1498 )
1499 coreconfigitem(
1499 coreconfigitem(
1500 b'web', b'style', default=b'paper',
1500 b'web', b'style', default=b'paper',
1501 )
1501 )
1502 coreconfigitem(
1502 coreconfigitem(
1503 b'web', b'templates', default=None,
1503 b'web', b'templates', default=None,
1504 )
1504 )
1505 coreconfigitem(
1505 coreconfigitem(
1506 b'web', b'view', default=b'served', experimental=True,
1506 b'web', b'view', default=b'served', experimental=True,
1507 )
1507 )
1508 coreconfigitem(
1508 coreconfigitem(
1509 b'worker', b'backgroundclose', default=dynamicdefault,
1509 b'worker', b'backgroundclose', default=dynamicdefault,
1510 )
1510 )
1511 # Windows defaults to a limit of 512 open files. A buffer of 128
1511 # Windows defaults to a limit of 512 open files. A buffer of 128
1512 # should give us enough headway.
1512 # should give us enough headway.
1513 coreconfigitem(
1513 coreconfigitem(
1514 b'worker', b'backgroundclosemaxqueue', default=384,
1514 b'worker', b'backgroundclosemaxqueue', default=384,
1515 )
1515 )
1516 coreconfigitem(
1516 coreconfigitem(
1517 b'worker', b'backgroundcloseminfilecount', default=2048,
1517 b'worker', b'backgroundcloseminfilecount', default=2048,
1518 )
1518 )
1519 coreconfigitem(
1519 coreconfigitem(
1520 b'worker', b'backgroundclosethreadcount', default=4,
1520 b'worker', b'backgroundclosethreadcount', default=4,
1521 )
1521 )
1522 coreconfigitem(
1522 coreconfigitem(
1523 b'worker', b'enabled', default=True,
1523 b'worker', b'enabled', default=True,
1524 )
1524 )
1525 coreconfigitem(
1525 coreconfigitem(
1526 b'worker', b'numcpus', default=None,
1526 b'worker', b'numcpus', default=None,
1527 )
1527 )
1528
1528
1529 # Rebase related configuration moved to core because other extension are doing
1529 # Rebase related configuration moved to core because other extension are doing
1530 # strange things. For example, shelve import the extensions to reuse some bit
1530 # strange things. For example, shelve import the extensions to reuse some bit
1531 # without formally loading it.
1531 # without formally loading it.
1532 coreconfigitem(
1532 coreconfigitem(
1533 b'commands', b'rebase.requiredest', default=False,
1533 b'commands', b'rebase.requiredest', default=False,
1534 )
1534 )
1535 coreconfigitem(
1535 coreconfigitem(
1536 b'experimental', b'rebaseskipobsolete', default=True,
1536 b'experimental', b'rebaseskipobsolete', default=True,
1537 )
1537 )
1538 coreconfigitem(
1538 coreconfigitem(
1539 b'rebase', b'singletransaction', default=False,
1539 b'rebase', b'singletransaction', default=False,
1540 )
1540 )
1541 coreconfigitem(
1541 coreconfigitem(
1542 b'rebase', b'experimental.inmemory', default=False,
1542 b'rebase', b'experimental.inmemory', default=False,
1543 )
1543 )
@@ -1,3716 +1,3716 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 changegroup,
34 changegroup,
35 color,
35 color,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 merge as mergemod,
48 merge as mergemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 procutil,
77 procutil,
78 stringutil,
78 stringutil,
79 )
79 )
80
80
81 from .revlogutils import constants as revlogconst
81 from .revlogutils import constants as revlogconst
82
82
83 release = lockmod.release
83 release = lockmod.release
84 urlerr = util.urlerr
84 urlerr = util.urlerr
85 urlreq = util.urlreq
85 urlreq = util.urlreq
86
86
87 # set of (path, vfs-location) tuples. vfs-location is:
87 # set of (path, vfs-location) tuples. vfs-location is:
88 # - 'plain for vfs relative paths
88 # - 'plain for vfs relative paths
89 # - '' for svfs relative paths
89 # - '' for svfs relative paths
90 _cachedfiles = set()
90 _cachedfiles = set()
91
91
92
92
93 class _basefilecache(scmutil.filecache):
93 class _basefilecache(scmutil.filecache):
94 """All filecache usage on repo are done for logic that should be unfiltered
94 """All filecache usage on repo are done for logic that should be unfiltered
95 """
95 """
96
96
97 def __get__(self, repo, type=None):
97 def __get__(self, repo, type=None):
98 if repo is None:
98 if repo is None:
99 return self
99 return self
100 # proxy to unfiltered __dict__ since filtered repo has no entry
100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 unfi = repo.unfiltered()
101 unfi = repo.unfiltered()
102 try:
102 try:
103 return unfi.__dict__[self.sname]
103 return unfi.__dict__[self.sname]
104 except KeyError:
104 except KeyError:
105 pass
105 pass
106 return super(_basefilecache, self).__get__(unfi, type)
106 return super(_basefilecache, self).__get__(unfi, type)
107
107
108 def set(self, repo, value):
108 def set(self, repo, value):
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110
110
111
111
112 class repofilecache(_basefilecache):
112 class repofilecache(_basefilecache):
113 """filecache for files in .hg but outside of .hg/store"""
113 """filecache for files in .hg but outside of .hg/store"""
114
114
115 def __init__(self, *paths):
115 def __init__(self, *paths):
116 super(repofilecache, self).__init__(*paths)
116 super(repofilecache, self).__init__(*paths)
117 for path in paths:
117 for path in paths:
118 _cachedfiles.add((path, b'plain'))
118 _cachedfiles.add((path, b'plain'))
119
119
120 def join(self, obj, fname):
120 def join(self, obj, fname):
121 return obj.vfs.join(fname)
121 return obj.vfs.join(fname)
122
122
123
123
124 class storecache(_basefilecache):
124 class storecache(_basefilecache):
125 """filecache for files in the store"""
125 """filecache for files in the store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(storecache, self).__init__(*paths)
128 super(storecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b''))
130 _cachedfiles.add((path, b''))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.sjoin(fname)
133 return obj.sjoin(fname)
134
134
135
135
136 class mixedrepostorecache(_basefilecache):
136 class mixedrepostorecache(_basefilecache):
137 """filecache for a mix files in .hg/store and outside"""
137 """filecache for a mix files in .hg/store and outside"""
138
138
139 def __init__(self, *pathsandlocations):
139 def __init__(self, *pathsandlocations):
140 # scmutil.filecache only uses the path for passing back into our
140 # scmutil.filecache only uses the path for passing back into our
141 # join(), so we can safely pass a list of paths and locations
141 # join(), so we can safely pass a list of paths and locations
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
144
144
145 def join(self, obj, fnameandlocation):
145 def join(self, obj, fnameandlocation):
146 fname, location = fnameandlocation
146 fname, location = fnameandlocation
147 if location == b'plain':
147 if location == b'plain':
148 return obj.vfs.join(fname)
148 return obj.vfs.join(fname)
149 else:
149 else:
150 if location != b'':
150 if location != b'':
151 raise error.ProgrammingError(
151 raise error.ProgrammingError(
152 b'unexpected location: %s' % location
152 b'unexpected location: %s' % location
153 )
153 )
154 return obj.sjoin(fname)
154 return obj.sjoin(fname)
155
155
156
156
157 def isfilecached(repo, name):
157 def isfilecached(repo, name):
158 """check if a repo has already cached "name" filecache-ed property
158 """check if a repo has already cached "name" filecache-ed property
159
159
160 This returns (cachedobj-or-None, iscached) tuple.
160 This returns (cachedobj-or-None, iscached) tuple.
161 """
161 """
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 if not cacheentry:
163 if not cacheentry:
164 return None, False
164 return None, False
165 return cacheentry.obj, True
165 return cacheentry.obj, True
166
166
167
167
168 class unfilteredpropertycache(util.propertycache):
168 class unfilteredpropertycache(util.propertycache):
169 """propertycache that apply to unfiltered repo only"""
169 """propertycache that apply to unfiltered repo only"""
170
170
171 def __get__(self, repo, type=None):
171 def __get__(self, repo, type=None):
172 unfi = repo.unfiltered()
172 unfi = repo.unfiltered()
173 if unfi is repo:
173 if unfi is repo:
174 return super(unfilteredpropertycache, self).__get__(unfi)
174 return super(unfilteredpropertycache, self).__get__(unfi)
175 return getattr(unfi, self.name)
175 return getattr(unfi, self.name)
176
176
177
177
178 class filteredpropertycache(util.propertycache):
178 class filteredpropertycache(util.propertycache):
179 """propertycache that must take filtering in account"""
179 """propertycache that must take filtering in account"""
180
180
181 def cachevalue(self, obj, value):
181 def cachevalue(self, obj, value):
182 object.__setattr__(obj, self.name, value)
182 object.__setattr__(obj, self.name, value)
183
183
184
184
185 def hasunfilteredcache(repo, name):
185 def hasunfilteredcache(repo, name):
186 """check if a repo has an unfilteredpropertycache value for <name>"""
186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 return name in vars(repo.unfiltered())
187 return name in vars(repo.unfiltered())
188
188
189
189
190 def unfilteredmethod(orig):
190 def unfilteredmethod(orig):
191 """decorate method that always need to be run on unfiltered version"""
191 """decorate method that always need to be run on unfiltered version"""
192
192
193 def wrapper(repo, *args, **kwargs):
193 def wrapper(repo, *args, **kwargs):
194 return orig(repo.unfiltered(), *args, **kwargs)
194 return orig(repo.unfiltered(), *args, **kwargs)
195
195
196 return wrapper
196 return wrapper
197
197
198
198
199 moderncaps = {
199 moderncaps = {
200 b'lookup',
200 b'lookup',
201 b'branchmap',
201 b'branchmap',
202 b'pushkey',
202 b'pushkey',
203 b'known',
203 b'known',
204 b'getbundle',
204 b'getbundle',
205 b'unbundle',
205 b'unbundle',
206 }
206 }
207 legacycaps = moderncaps.union({b'changegroupsubset'})
207 legacycaps = moderncaps.union({b'changegroupsubset'})
208
208
209
209
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 class localcommandexecutor(object):
211 class localcommandexecutor(object):
212 def __init__(self, peer):
212 def __init__(self, peer):
213 self._peer = peer
213 self._peer = peer
214 self._sent = False
214 self._sent = False
215 self._closed = False
215 self._closed = False
216
216
217 def __enter__(self):
217 def __enter__(self):
218 return self
218 return self
219
219
220 def __exit__(self, exctype, excvalue, exctb):
220 def __exit__(self, exctype, excvalue, exctb):
221 self.close()
221 self.close()
222
222
223 def callcommand(self, command, args):
223 def callcommand(self, command, args):
224 if self._sent:
224 if self._sent:
225 raise error.ProgrammingError(
225 raise error.ProgrammingError(
226 b'callcommand() cannot be used after sendcommands()'
226 b'callcommand() cannot be used after sendcommands()'
227 )
227 )
228
228
229 if self._closed:
229 if self._closed:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after close()'
231 b'callcommand() cannot be used after close()'
232 )
232 )
233
233
234 # We don't need to support anything fancy. Just call the named
234 # We don't need to support anything fancy. Just call the named
235 # method on the peer and return a resolved future.
235 # method on the peer and return a resolved future.
236 fn = getattr(self._peer, pycompat.sysstr(command))
236 fn = getattr(self._peer, pycompat.sysstr(command))
237
237
238 f = pycompat.futures.Future()
238 f = pycompat.futures.Future()
239
239
240 try:
240 try:
241 result = fn(**pycompat.strkwargs(args))
241 result = fn(**pycompat.strkwargs(args))
242 except Exception:
242 except Exception:
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 else:
244 else:
245 f.set_result(result)
245 f.set_result(result)
246
246
247 return f
247 return f
248
248
249 def sendcommands(self):
249 def sendcommands(self):
250 self._sent = True
250 self._sent = True
251
251
252 def close(self):
252 def close(self):
253 self._closed = True
253 self._closed = True
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommands)
256 @interfaceutil.implementer(repository.ipeercommands)
257 class localpeer(repository.peer):
257 class localpeer(repository.peer):
258 '''peer for a local repo; reflects only the most recent API'''
258 '''peer for a local repo; reflects only the most recent API'''
259
259
260 def __init__(self, repo, caps=None):
260 def __init__(self, repo, caps=None):
261 super(localpeer, self).__init__()
261 super(localpeer, self).__init__()
262
262
263 if caps is None:
263 if caps is None:
264 caps = moderncaps.copy()
264 caps = moderncaps.copy()
265 self._repo = repo.filtered(b'served')
265 self._repo = repo.filtered(b'served')
266 self.ui = repo.ui
266 self.ui = repo.ui
267 self._caps = repo._restrictcapabilities(caps)
267 self._caps = repo._restrictcapabilities(caps)
268
268
269 # Begin of _basepeer interface.
269 # Begin of _basepeer interface.
270
270
271 def url(self):
271 def url(self):
272 return self._repo.url()
272 return self._repo.url()
273
273
274 def local(self):
274 def local(self):
275 return self._repo
275 return self._repo
276
276
277 def peer(self):
277 def peer(self):
278 return self
278 return self
279
279
280 def canpush(self):
280 def canpush(self):
281 return True
281 return True
282
282
283 def close(self):
283 def close(self):
284 self._repo.close()
284 self._repo.close()
285
285
286 # End of _basepeer interface.
286 # End of _basepeer interface.
287
287
288 # Begin of _basewirecommands interface.
288 # Begin of _basewirecommands interface.
289
289
290 def branchmap(self):
290 def branchmap(self):
291 return self._repo.branchmap()
291 return self._repo.branchmap()
292
292
293 def capabilities(self):
293 def capabilities(self):
294 return self._caps
294 return self._caps
295
295
296 def clonebundles(self):
296 def clonebundles(self):
297 return self._repo.tryread(b'clonebundles.manifest')
297 return self._repo.tryread(b'clonebundles.manifest')
298
298
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 """Used to test argument passing over the wire"""
300 """Used to test argument passing over the wire"""
301 return b"%s %s %s %s %s" % (
301 return b"%s %s %s %s %s" % (
302 one,
302 one,
303 two,
303 two,
304 pycompat.bytestr(three),
304 pycompat.bytestr(three),
305 pycompat.bytestr(four),
305 pycompat.bytestr(four),
306 pycompat.bytestr(five),
306 pycompat.bytestr(five),
307 )
307 )
308
308
309 def getbundle(
309 def getbundle(
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 ):
311 ):
312 chunks = exchange.getbundlechunks(
312 chunks = exchange.getbundlechunks(
313 self._repo,
313 self._repo,
314 source,
314 source,
315 heads=heads,
315 heads=heads,
316 common=common,
316 common=common,
317 bundlecaps=bundlecaps,
317 bundlecaps=bundlecaps,
318 **kwargs
318 **kwargs
319 )[1]
319 )[1]
320 cb = util.chunkbuffer(chunks)
320 cb = util.chunkbuffer(chunks)
321
321
322 if exchange.bundle2requested(bundlecaps):
322 if exchange.bundle2requested(bundlecaps):
323 # When requesting a bundle2, getbundle returns a stream to make the
323 # When requesting a bundle2, getbundle returns a stream to make the
324 # wire level function happier. We need to build a proper object
324 # wire level function happier. We need to build a proper object
325 # from it in local peer.
325 # from it in local peer.
326 return bundle2.getunbundler(self.ui, cb)
326 return bundle2.getunbundler(self.ui, cb)
327 else:
327 else:
328 return changegroup.getunbundler(b'01', cb, None)
328 return changegroup.getunbundler(b'01', cb, None)
329
329
330 def heads(self):
330 def heads(self):
331 return self._repo.heads()
331 return self._repo.heads()
332
332
333 def known(self, nodes):
333 def known(self, nodes):
334 return self._repo.known(nodes)
334 return self._repo.known(nodes)
335
335
336 def listkeys(self, namespace):
336 def listkeys(self, namespace):
337 return self._repo.listkeys(namespace)
337 return self._repo.listkeys(namespace)
338
338
339 def lookup(self, key):
339 def lookup(self, key):
340 return self._repo.lookup(key)
340 return self._repo.lookup(key)
341
341
342 def pushkey(self, namespace, key, old, new):
342 def pushkey(self, namespace, key, old, new):
343 return self._repo.pushkey(namespace, key, old, new)
343 return self._repo.pushkey(namespace, key, old, new)
344
344
345 def stream_out(self):
345 def stream_out(self):
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347
347
348 def unbundle(self, bundle, heads, url):
348 def unbundle(self, bundle, heads, url):
349 """apply a bundle on a repo
349 """apply a bundle on a repo
350
350
351 This function handles the repo locking itself."""
351 This function handles the repo locking itself."""
352 try:
352 try:
353 try:
353 try:
354 bundle = exchange.readbundle(self.ui, bundle, None)
354 bundle = exchange.readbundle(self.ui, bundle, None)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 if util.safehasattr(ret, b'getchunks'):
356 if util.safehasattr(ret, b'getchunks'):
357 # This is a bundle20 object, turn it into an unbundler.
357 # This is a bundle20 object, turn it into an unbundler.
358 # This little dance should be dropped eventually when the
358 # This little dance should be dropped eventually when the
359 # API is finally improved.
359 # API is finally improved.
360 stream = util.chunkbuffer(ret.getchunks())
360 stream = util.chunkbuffer(ret.getchunks())
361 ret = bundle2.getunbundler(self.ui, stream)
361 ret = bundle2.getunbundler(self.ui, stream)
362 return ret
362 return ret
363 except Exception as exc:
363 except Exception as exc:
364 # If the exception contains output salvaged from a bundle2
364 # If the exception contains output salvaged from a bundle2
365 # reply, we need to make sure it is printed before continuing
365 # reply, we need to make sure it is printed before continuing
366 # to fail. So we build a bundle2 with such output and consume
366 # to fail. So we build a bundle2 with such output and consume
367 # it directly.
367 # it directly.
368 #
368 #
369 # This is not very elegant but allows a "simple" solution for
369 # This is not very elegant but allows a "simple" solution for
370 # issue4594
370 # issue4594
371 output = getattr(exc, '_bundle2salvagedoutput', ())
371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 if output:
372 if output:
373 bundler = bundle2.bundle20(self._repo.ui)
373 bundler = bundle2.bundle20(self._repo.ui)
374 for out in output:
374 for out in output:
375 bundler.addpart(out)
375 bundler.addpart(out)
376 stream = util.chunkbuffer(bundler.getchunks())
376 stream = util.chunkbuffer(bundler.getchunks())
377 b = bundle2.getunbundler(self.ui, stream)
377 b = bundle2.getunbundler(self.ui, stream)
378 bundle2.processbundle(self._repo, b)
378 bundle2.processbundle(self._repo, b)
379 raise
379 raise
380 except error.PushRaced as exc:
380 except error.PushRaced as exc:
381 raise error.ResponseError(
381 raise error.ResponseError(
382 _(b'push failed:'), stringutil.forcebytestr(exc)
382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 )
383 )
384
384
385 # End of _basewirecommands interface.
385 # End of _basewirecommands interface.
386
386
387 # Begin of peer interface.
387 # Begin of peer interface.
388
388
389 def commandexecutor(self):
389 def commandexecutor(self):
390 return localcommandexecutor(self)
390 return localcommandexecutor(self)
391
391
392 # End of peer interface.
392 # End of peer interface.
393
393
394
394
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 class locallegacypeer(localpeer):
396 class locallegacypeer(localpeer):
397 '''peer extension which implements legacy methods too; used for tests with
397 '''peer extension which implements legacy methods too; used for tests with
398 restricted capabilities'''
398 restricted capabilities'''
399
399
400 def __init__(self, repo):
400 def __init__(self, repo):
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402
402
403 # Begin of baselegacywirecommands interface.
403 # Begin of baselegacywirecommands interface.
404
404
405 def between(self, pairs):
405 def between(self, pairs):
406 return self._repo.between(pairs)
406 return self._repo.between(pairs)
407
407
408 def branches(self, nodes):
408 def branches(self, nodes):
409 return self._repo.branches(nodes)
409 return self._repo.branches(nodes)
410
410
411 def changegroup(self, nodes, source):
411 def changegroup(self, nodes, source):
412 outgoing = discovery.outgoing(
412 outgoing = discovery.outgoing(
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 )
414 )
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416
416
417 def changegroupsubset(self, bases, heads, source):
417 def changegroupsubset(self, bases, heads, source):
418 outgoing = discovery.outgoing(
418 outgoing = discovery.outgoing(
419 self._repo, missingroots=bases, missingheads=heads
419 self._repo, missingroots=bases, missingheads=heads
420 )
420 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
422
423 # End of baselegacywirecommands interface.
423 # End of baselegacywirecommands interface.
424
424
425
425
426 # Increment the sub-version when the revlog v2 format changes to lock out old
426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # clients.
427 # clients.
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429
429
430 # A repository with the sparserevlog feature will have delta chains that
430 # A repository with the sparserevlog feature will have delta chains that
431 # can spread over a larger span. Sparse reading cuts these large spans into
431 # can spread over a larger span. Sparse reading cuts these large spans into
432 # pieces, so that each piece isn't too big.
432 # pieces, so that each piece isn't too big.
433 # Without the sparserevlog capability, reading from the repository could use
433 # Without the sparserevlog capability, reading from the repository could use
434 # huge amounts of memory, because the whole span would be read at once,
434 # huge amounts of memory, because the whole span would be read at once,
435 # including all the intermediate revisions that aren't pertinent for the chain.
435 # including all the intermediate revisions that aren't pertinent for the chain.
436 # This is why once a repository has enabled sparse-read, it becomes required.
436 # This is why once a repository has enabled sparse-read, it becomes required.
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438
438
439 # A repository with the sidedataflag requirement will allow to store extra
439 # A repository with the sidedataflag requirement will allow to store extra
440 # information for revision without altering their original hashes.
440 # information for revision without altering their original hashes.
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442
442
443 # A repository with the the copies-sidedata-changeset requirement will store
443 # A repository with the the copies-sidedata-changeset requirement will store
444 # copies related information in changeset's sidedata.
444 # copies related information in changeset's sidedata.
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446
446
447 # Functions receiving (ui, features) that extensions can register to impact
447 # Functions receiving (ui, features) that extensions can register to impact
448 # the ability to load repositories with custom requirements. Only
448 # the ability to load repositories with custom requirements. Only
449 # functions defined in loaded extensions are called.
449 # functions defined in loaded extensions are called.
450 #
450 #
451 # The function receives a set of requirement strings that the repository
451 # The function receives a set of requirement strings that the repository
452 # is capable of opening. Functions will typically add elements to the
452 # is capable of opening. Functions will typically add elements to the
453 # set to reflect that the extension knows how to handle that requirements.
453 # set to reflect that the extension knows how to handle that requirements.
454 featuresetupfuncs = set()
454 featuresetupfuncs = set()
455
455
456
456
457 def makelocalrepository(baseui, path, intents=None):
457 def makelocalrepository(baseui, path, intents=None):
458 """Create a local repository object.
458 """Create a local repository object.
459
459
460 Given arguments needed to construct a local repository, this function
460 Given arguments needed to construct a local repository, this function
461 performs various early repository loading functionality (such as
461 performs various early repository loading functionality (such as
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 the repository can be opened, derives a type suitable for representing
463 the repository can be opened, derives a type suitable for representing
464 that repository, and returns an instance of it.
464 that repository, and returns an instance of it.
465
465
466 The returned object conforms to the ``repository.completelocalrepository``
466 The returned object conforms to the ``repository.completelocalrepository``
467 interface.
467 interface.
468
468
469 The repository type is derived by calling a series of factory functions
469 The repository type is derived by calling a series of factory functions
470 for each aspect/interface of the final repository. These are defined by
470 for each aspect/interface of the final repository. These are defined by
471 ``REPO_INTERFACES``.
471 ``REPO_INTERFACES``.
472
472
473 Each factory function is called to produce a type implementing a specific
473 Each factory function is called to produce a type implementing a specific
474 interface. The cumulative list of returned types will be combined into a
474 interface. The cumulative list of returned types will be combined into a
475 new type and that type will be instantiated to represent the local
475 new type and that type will be instantiated to represent the local
476 repository.
476 repository.
477
477
478 The factory functions each receive various state that may be consulted
478 The factory functions each receive various state that may be consulted
479 as part of deriving a type.
479 as part of deriving a type.
480
480
481 Extensions should wrap these factory functions to customize repository type
481 Extensions should wrap these factory functions to customize repository type
482 creation. Note that an extension's wrapped function may be called even if
482 creation. Note that an extension's wrapped function may be called even if
483 that extension is not loaded for the repo being constructed. Extensions
483 that extension is not loaded for the repo being constructed. Extensions
484 should check if their ``__name__`` appears in the
484 should check if their ``__name__`` appears in the
485 ``extensionmodulenames`` set passed to the factory function and no-op if
485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 not.
486 not.
487 """
487 """
488 ui = baseui.copy()
488 ui = baseui.copy()
489 # Prevent copying repo configuration.
489 # Prevent copying repo configuration.
490 ui.copy = baseui.copy
490 ui.copy = baseui.copy
491
491
492 # Working directory VFS rooted at repository root.
492 # Working directory VFS rooted at repository root.
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494
494
495 # Main VFS for .hg/ directory.
495 # Main VFS for .hg/ directory.
496 hgpath = wdirvfs.join(b'.hg')
496 hgpath = wdirvfs.join(b'.hg')
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498
498
499 # The .hg/ path should exist and should be a directory. All other
499 # The .hg/ path should exist and should be a directory. All other
500 # cases are errors.
500 # cases are errors.
501 if not hgvfs.isdir():
501 if not hgvfs.isdir():
502 try:
502 try:
503 hgvfs.stat()
503 hgvfs.stat()
504 except OSError as e:
504 except OSError as e:
505 if e.errno != errno.ENOENT:
505 if e.errno != errno.ENOENT:
506 raise
506 raise
507
507
508 raise error.RepoError(_(b'repository %s not found') % path)
508 raise error.RepoError(_(b'repository %s not found') % path)
509
509
510 # .hg/requires file contains a newline-delimited list of
510 # .hg/requires file contains a newline-delimited list of
511 # features/capabilities the opener (us) must have in order to use
511 # features/capabilities the opener (us) must have in order to use
512 # the repository. This file was introduced in Mercurial 0.9.2,
512 # the repository. This file was introduced in Mercurial 0.9.2,
513 # which means very old repositories may not have one. We assume
513 # which means very old repositories may not have one. We assume
514 # a missing file translates to no requirements.
514 # a missing file translates to no requirements.
515 try:
515 try:
516 requirements = set(hgvfs.read(b'requires').splitlines())
516 requirements = set(hgvfs.read(b'requires').splitlines())
517 except IOError as e:
517 except IOError as e:
518 if e.errno != errno.ENOENT:
518 if e.errno != errno.ENOENT:
519 raise
519 raise
520 requirements = set()
520 requirements = set()
521
521
522 # The .hg/hgrc file may load extensions or contain config options
522 # The .hg/hgrc file may load extensions or contain config options
523 # that influence repository construction. Attempt to load it and
523 # that influence repository construction. Attempt to load it and
524 # process any new extensions that it may have pulled in.
524 # process any new extensions that it may have pulled in.
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 extensions.loadall(ui)
527 extensions.loadall(ui)
528 extensions.populateui(ui)
528 extensions.populateui(ui)
529
529
530 # Set of module names of extensions loaded for this repository.
530 # Set of module names of extensions loaded for this repository.
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532
532
533 supportedrequirements = gathersupportedrequirements(ui)
533 supportedrequirements = gathersupportedrequirements(ui)
534
534
535 # We first validate the requirements are known.
535 # We first validate the requirements are known.
536 ensurerequirementsrecognized(requirements, supportedrequirements)
536 ensurerequirementsrecognized(requirements, supportedrequirements)
537
537
538 # Then we validate that the known set is reasonable to use together.
538 # Then we validate that the known set is reasonable to use together.
539 ensurerequirementscompatible(ui, requirements)
539 ensurerequirementscompatible(ui, requirements)
540
540
541 # TODO there are unhandled edge cases related to opening repositories with
541 # TODO there are unhandled edge cases related to opening repositories with
542 # shared storage. If storage is shared, we should also test for requirements
542 # shared storage. If storage is shared, we should also test for requirements
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # that repo, as that repo may load extensions needed to open it. This is a
544 # that repo, as that repo may load extensions needed to open it. This is a
545 # bit complicated because we don't want the other hgrc to overwrite settings
545 # bit complicated because we don't want the other hgrc to overwrite settings
546 # in this hgrc.
546 # in this hgrc.
547 #
547 #
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # file when sharing repos. But if a requirement is added after the share is
549 # file when sharing repos. But if a requirement is added after the share is
550 # performed, thereby introducing a new requirement for the opener, we may
550 # performed, thereby introducing a new requirement for the opener, we may
551 # will not see that and could encounter a run-time error interacting with
551 # will not see that and could encounter a run-time error interacting with
552 # that shared store since it has an unknown-to-us requirement.
552 # that shared store since it has an unknown-to-us requirement.
553
553
554 # At this point, we know we should be capable of opening the repository.
554 # At this point, we know we should be capable of opening the repository.
555 # Now get on with doing that.
555 # Now get on with doing that.
556
556
557 features = set()
557 features = set()
558
558
559 # The "store" part of the repository holds versioned data. How it is
559 # The "store" part of the repository holds versioned data. How it is
560 # accessed is determined by various requirements. The ``shared`` or
560 # accessed is determined by various requirements. The ``shared`` or
561 # ``relshared`` requirements indicate the store lives in the path contained
561 # ``relshared`` requirements indicate the store lives in the path contained
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 if b'shared' in requirements or b'relshared' in requirements:
564 if b'shared' in requirements or b'relshared' in requirements:
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 if b'relshared' in requirements:
566 if b'relshared' in requirements:
567 sharedpath = hgvfs.join(sharedpath)
567 sharedpath = hgvfs.join(sharedpath)
568
568
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570
570
571 if not sharedvfs.exists():
571 if not sharedvfs.exists():
572 raise error.RepoError(
572 raise error.RepoError(
573 _(b'.hg/sharedpath points to nonexistent directory %s')
573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 % sharedvfs.base
574 % sharedvfs.base
575 )
575 )
576
576
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578
578
579 storebasepath = sharedvfs.base
579 storebasepath = sharedvfs.base
580 cachepath = sharedvfs.join(b'cache')
580 cachepath = sharedvfs.join(b'cache')
581 else:
581 else:
582 storebasepath = hgvfs.base
582 storebasepath = hgvfs.base
583 cachepath = hgvfs.join(b'cache')
583 cachepath = hgvfs.join(b'cache')
584 wcachepath = hgvfs.join(b'wcache')
584 wcachepath = hgvfs.join(b'wcache')
585
585
586 # The store has changed over time and the exact layout is dictated by
586 # The store has changed over time and the exact layout is dictated by
587 # requirements. The store interface abstracts differences across all
587 # requirements. The store interface abstracts differences across all
588 # of them.
588 # of them.
589 store = makestore(
589 store = makestore(
590 requirements,
590 requirements,
591 storebasepath,
591 storebasepath,
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 )
593 )
594 hgvfs.createmode = store.createmode
594 hgvfs.createmode = store.createmode
595
595
596 storevfs = store.vfs
596 storevfs = store.vfs
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598
598
599 # The cache vfs is used to manage cache files.
599 # The cache vfs is used to manage cache files.
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs.createmode = store.createmode
601 cachevfs.createmode = store.createmode
602 # The cache vfs is used to manage cache files related to the working copy
602 # The cache vfs is used to manage cache files related to the working copy
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs.createmode = store.createmode
604 wcachevfs.createmode = store.createmode
605
605
606 # Now resolve the type for the repository object. We do this by repeatedly
606 # Now resolve the type for the repository object. We do this by repeatedly
607 # calling a factory function to produces types for specific aspects of the
607 # calling a factory function to produces types for specific aspects of the
608 # repo's operation. The aggregate returned types are used as base classes
608 # repo's operation. The aggregate returned types are used as base classes
609 # for a dynamically-derived type, which will represent our new repository.
609 # for a dynamically-derived type, which will represent our new repository.
610
610
611 bases = []
611 bases = []
612 extrastate = {}
612 extrastate = {}
613
613
614 for iface, fn in REPO_INTERFACES:
614 for iface, fn in REPO_INTERFACES:
615 # We pass all potentially useful state to give extensions tons of
615 # We pass all potentially useful state to give extensions tons of
616 # flexibility.
616 # flexibility.
617 typ = fn()(
617 typ = fn()(
618 ui=ui,
618 ui=ui,
619 intents=intents,
619 intents=intents,
620 requirements=requirements,
620 requirements=requirements,
621 features=features,
621 features=features,
622 wdirvfs=wdirvfs,
622 wdirvfs=wdirvfs,
623 hgvfs=hgvfs,
623 hgvfs=hgvfs,
624 store=store,
624 store=store,
625 storevfs=storevfs,
625 storevfs=storevfs,
626 storeoptions=storevfs.options,
626 storeoptions=storevfs.options,
627 cachevfs=cachevfs,
627 cachevfs=cachevfs,
628 wcachevfs=wcachevfs,
628 wcachevfs=wcachevfs,
629 extensionmodulenames=extensionmodulenames,
629 extensionmodulenames=extensionmodulenames,
630 extrastate=extrastate,
630 extrastate=extrastate,
631 baseclasses=bases,
631 baseclasses=bases,
632 )
632 )
633
633
634 if not isinstance(typ, type):
634 if not isinstance(typ, type):
635 raise error.ProgrammingError(
635 raise error.ProgrammingError(
636 b'unable to construct type for %s' % iface
636 b'unable to construct type for %s' % iface
637 )
637 )
638
638
639 bases.append(typ)
639 bases.append(typ)
640
640
641 # type() allows you to use characters in type names that wouldn't be
641 # type() allows you to use characters in type names that wouldn't be
642 # recognized as Python symbols in source code. We abuse that to add
642 # recognized as Python symbols in source code. We abuse that to add
643 # rich information about our constructed repo.
643 # rich information about our constructed repo.
644 name = pycompat.sysstr(
644 name = pycompat.sysstr(
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 )
646 )
647
647
648 cls = type(name, tuple(bases), {})
648 cls = type(name, tuple(bases), {})
649
649
650 return cls(
650 return cls(
651 baseui=baseui,
651 baseui=baseui,
652 ui=ui,
652 ui=ui,
653 origroot=path,
653 origroot=path,
654 wdirvfs=wdirvfs,
654 wdirvfs=wdirvfs,
655 hgvfs=hgvfs,
655 hgvfs=hgvfs,
656 requirements=requirements,
656 requirements=requirements,
657 supportedrequirements=supportedrequirements,
657 supportedrequirements=supportedrequirements,
658 sharedpath=storebasepath,
658 sharedpath=storebasepath,
659 store=store,
659 store=store,
660 cachevfs=cachevfs,
660 cachevfs=cachevfs,
661 wcachevfs=wcachevfs,
661 wcachevfs=wcachevfs,
662 features=features,
662 features=features,
663 intents=intents,
663 intents=intents,
664 )
664 )
665
665
666
666
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 """Load hgrc files/content into a ui instance.
668 """Load hgrc files/content into a ui instance.
669
669
670 This is called during repository opening to load any additional
670 This is called during repository opening to load any additional
671 config files or settings relevant to the current repository.
671 config files or settings relevant to the current repository.
672
672
673 Returns a bool indicating whether any additional configs were loaded.
673 Returns a bool indicating whether any additional configs were loaded.
674
674
675 Extensions should monkeypatch this function to modify how per-repo
675 Extensions should monkeypatch this function to modify how per-repo
676 configs are loaded. For example, an extension may wish to pull in
676 configs are loaded. For example, an extension may wish to pull in
677 configs from alternate files or sources.
677 configs from alternate files or sources.
678 """
678 """
679 try:
679 try:
680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
681 return True
681 return True
682 except IOError:
682 except IOError:
683 return False
683 return False
684
684
685
685
686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
687 """Perform additional actions after .hg/hgrc is loaded.
687 """Perform additional actions after .hg/hgrc is loaded.
688
688
689 This function is called during repository loading immediately after
689 This function is called during repository loading immediately after
690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
691
691
692 The function can be used to validate configs, automatically add
692 The function can be used to validate configs, automatically add
693 options (including extensions) based on requirements, etc.
693 options (including extensions) based on requirements, etc.
694 """
694 """
695
695
696 # Map of requirements to list of extensions to load automatically when
696 # Map of requirements to list of extensions to load automatically when
697 # requirement is present.
697 # requirement is present.
698 autoextensions = {
698 autoextensions = {
699 b'largefiles': [b'largefiles'],
699 b'largefiles': [b'largefiles'],
700 b'lfs': [b'lfs'],
700 b'lfs': [b'lfs'],
701 }
701 }
702
702
703 for requirement, names in sorted(autoextensions.items()):
703 for requirement, names in sorted(autoextensions.items()):
704 if requirement not in requirements:
704 if requirement not in requirements:
705 continue
705 continue
706
706
707 for name in names:
707 for name in names:
708 if not ui.hasconfig(b'extensions', name):
708 if not ui.hasconfig(b'extensions', name):
709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
710
710
711
711
712 def gathersupportedrequirements(ui):
712 def gathersupportedrequirements(ui):
713 """Determine the complete set of recognized requirements."""
713 """Determine the complete set of recognized requirements."""
714 # Start with all requirements supported by this file.
714 # Start with all requirements supported by this file.
715 supported = set(localrepository._basesupported)
715 supported = set(localrepository._basesupported)
716
716
717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
718 # relevant to this ui instance.
718 # relevant to this ui instance.
719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
720
720
721 for fn in featuresetupfuncs:
721 for fn in featuresetupfuncs:
722 if fn.__module__ in modules:
722 if fn.__module__ in modules:
723 fn(ui, supported)
723 fn(ui, supported)
724
724
725 # Add derived requirements from registered compression engines.
725 # Add derived requirements from registered compression engines.
726 for name in util.compengines:
726 for name in util.compengines:
727 engine = util.compengines[name]
727 engine = util.compengines[name]
728 if engine.available() and engine.revlogheader():
728 if engine.available() and engine.revlogheader():
729 supported.add(b'exp-compression-%s' % name)
729 supported.add(b'exp-compression-%s' % name)
730 if engine.name() == b'zstd':
730 if engine.name() == b'zstd':
731 supported.add(b'revlog-compression-zstd')
731 supported.add(b'revlog-compression-zstd')
732
732
733 return supported
733 return supported
734
734
735
735
736 def ensurerequirementsrecognized(requirements, supported):
736 def ensurerequirementsrecognized(requirements, supported):
737 """Validate that a set of local requirements is recognized.
737 """Validate that a set of local requirements is recognized.
738
738
739 Receives a set of requirements. Raises an ``error.RepoError`` if there
739 Receives a set of requirements. Raises an ``error.RepoError`` if there
740 exists any requirement in that set that currently loaded code doesn't
740 exists any requirement in that set that currently loaded code doesn't
741 recognize.
741 recognize.
742
742
743 Returns a set of supported requirements.
743 Returns a set of supported requirements.
744 """
744 """
745 missing = set()
745 missing = set()
746
746
747 for requirement in requirements:
747 for requirement in requirements:
748 if requirement in supported:
748 if requirement in supported:
749 continue
749 continue
750
750
751 if not requirement or not requirement[0:1].isalnum():
751 if not requirement or not requirement[0:1].isalnum():
752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
753
753
754 missing.add(requirement)
754 missing.add(requirement)
755
755
756 if missing:
756 if missing:
757 raise error.RequirementError(
757 raise error.RequirementError(
758 _(b'repository requires features unknown to this Mercurial: %s')
758 _(b'repository requires features unknown to this Mercurial: %s')
759 % b' '.join(sorted(missing)),
759 % b' '.join(sorted(missing)),
760 hint=_(
760 hint=_(
761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
762 b'for more information'
762 b'for more information'
763 ),
763 ),
764 )
764 )
765
765
766
766
767 def ensurerequirementscompatible(ui, requirements):
767 def ensurerequirementscompatible(ui, requirements):
768 """Validates that a set of recognized requirements is mutually compatible.
768 """Validates that a set of recognized requirements is mutually compatible.
769
769
770 Some requirements may not be compatible with others or require
770 Some requirements may not be compatible with others or require
771 config options that aren't enabled. This function is called during
771 config options that aren't enabled. This function is called during
772 repository opening to ensure that the set of requirements needed
772 repository opening to ensure that the set of requirements needed
773 to open a repository is sane and compatible with config options.
773 to open a repository is sane and compatible with config options.
774
774
775 Extensions can monkeypatch this function to perform additional
775 Extensions can monkeypatch this function to perform additional
776 checking.
776 checking.
777
777
778 ``error.RepoError`` should be raised on failure.
778 ``error.RepoError`` should be raised on failure.
779 """
779 """
780 if b'exp-sparse' in requirements and not sparse.enabled:
780 if b'exp-sparse' in requirements and not sparse.enabled:
781 raise error.RepoError(
781 raise error.RepoError(
782 _(
782 _(
783 b'repository is using sparse feature but '
783 b'repository is using sparse feature but '
784 b'sparse is not enabled; enable the '
784 b'sparse is not enabled; enable the '
785 b'"sparse" extensions to access'
785 b'"sparse" extensions to access'
786 )
786 )
787 )
787 )
788
788
789
789
790 def makestore(requirements, path, vfstype):
790 def makestore(requirements, path, vfstype):
791 """Construct a storage object for a repository."""
791 """Construct a storage object for a repository."""
792 if b'store' in requirements:
792 if b'store' in requirements:
793 if b'fncache' in requirements:
793 if b'fncache' in requirements:
794 return storemod.fncachestore(
794 return storemod.fncachestore(
795 path, vfstype, b'dotencode' in requirements
795 path, vfstype, b'dotencode' in requirements
796 )
796 )
797
797
798 return storemod.encodedstore(path, vfstype)
798 return storemod.encodedstore(path, vfstype)
799
799
800 return storemod.basicstore(path, vfstype)
800 return storemod.basicstore(path, vfstype)
801
801
802
802
803 def resolvestorevfsoptions(ui, requirements, features):
803 def resolvestorevfsoptions(ui, requirements, features):
804 """Resolve the options to pass to the store vfs opener.
804 """Resolve the options to pass to the store vfs opener.
805
805
806 The returned dict is used to influence behavior of the storage layer.
806 The returned dict is used to influence behavior of the storage layer.
807 """
807 """
808 options = {}
808 options = {}
809
809
810 if b'treemanifest' in requirements:
810 if b'treemanifest' in requirements:
811 options[b'treemanifest'] = True
811 options[b'treemanifest'] = True
812
812
813 # experimental config: format.manifestcachesize
813 # experimental config: format.manifestcachesize
814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
815 if manifestcachesize is not None:
815 if manifestcachesize is not None:
816 options[b'manifestcachesize'] = manifestcachesize
816 options[b'manifestcachesize'] = manifestcachesize
817
817
818 # In the absence of another requirement superseding a revlog-related
818 # In the absence of another requirement superseding a revlog-related
819 # requirement, we have to assume the repo is using revlog version 0.
819 # requirement, we have to assume the repo is using revlog version 0.
820 # This revlog format is super old and we don't bother trying to parse
820 # This revlog format is super old and we don't bother trying to parse
821 # opener options for it because those options wouldn't do anything
821 # opener options for it because those options wouldn't do anything
822 # meaningful on such old repos.
822 # meaningful on such old repos.
823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
825 else: # explicitly mark repo as using revlogv0
825 else: # explicitly mark repo as using revlogv0
826 options[b'revlogv0'] = True
826 options[b'revlogv0'] = True
827
827
828 if COPIESSDC_REQUIREMENT in requirements:
828 if COPIESSDC_REQUIREMENT in requirements:
829 options[b'copies-storage'] = b'changeset-sidedata'
829 options[b'copies-storage'] = b'changeset-sidedata'
830 else:
830 else:
831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
832 copiesextramode = (b'changeset-only', b'compatibility')
832 copiesextramode = (b'changeset-only', b'compatibility')
833 if writecopiesto in copiesextramode:
833 if writecopiesto in copiesextramode:
834 options[b'copies-storage'] = b'extra'
834 options[b'copies-storage'] = b'extra'
835
835
836 return options
836 return options
837
837
838
838
839 def resolverevlogstorevfsoptions(ui, requirements, features):
839 def resolverevlogstorevfsoptions(ui, requirements, features):
840 """Resolve opener options specific to revlogs."""
840 """Resolve opener options specific to revlogs."""
841
841
842 options = {}
842 options = {}
843 options[b'flagprocessors'] = {}
843 options[b'flagprocessors'] = {}
844
844
845 if b'revlogv1' in requirements:
845 if b'revlogv1' in requirements:
846 options[b'revlogv1'] = True
846 options[b'revlogv1'] = True
847 if REVLOGV2_REQUIREMENT in requirements:
847 if REVLOGV2_REQUIREMENT in requirements:
848 options[b'revlogv2'] = True
848 options[b'revlogv2'] = True
849
849
850 if b'generaldelta' in requirements:
850 if b'generaldelta' in requirements:
851 options[b'generaldelta'] = True
851 options[b'generaldelta'] = True
852
852
853 # experimental config: format.chunkcachesize
853 # experimental config: format.chunkcachesize
854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
855 if chunkcachesize is not None:
855 if chunkcachesize is not None:
856 options[b'chunkcachesize'] = chunkcachesize
856 options[b'chunkcachesize'] = chunkcachesize
857
857
858 deltabothparents = ui.configbool(
858 deltabothparents = ui.configbool(
859 b'storage', b'revlog.optimize-delta-parent-choice'
859 b'storage', b'revlog.optimize-delta-parent-choice'
860 )
860 )
861 options[b'deltabothparents'] = deltabothparents
861 options[b'deltabothparents'] = deltabothparents
862
862
863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
864 lazydeltabase = False
864 lazydeltabase = False
865 if lazydelta:
865 if lazydelta:
866 lazydeltabase = ui.configbool(
866 lazydeltabase = ui.configbool(
867 b'storage', b'revlog.reuse-external-delta-parent'
867 b'storage', b'revlog.reuse-external-delta-parent'
868 )
868 )
869 if lazydeltabase is None:
869 if lazydeltabase is None:
870 lazydeltabase = not scmutil.gddeltaconfig(ui)
870 lazydeltabase = not scmutil.gddeltaconfig(ui)
871 options[b'lazydelta'] = lazydelta
871 options[b'lazydelta'] = lazydelta
872 options[b'lazydeltabase'] = lazydeltabase
872 options[b'lazydeltabase'] = lazydeltabase
873
873
874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
875 if 0 <= chainspan:
875 if 0 <= chainspan:
876 options[b'maxdeltachainspan'] = chainspan
876 options[b'maxdeltachainspan'] = chainspan
877
877
878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
879 if mmapindexthreshold is not None:
879 if mmapindexthreshold is not None:
880 options[b'mmapindexthreshold'] = mmapindexthreshold
880 options[b'mmapindexthreshold'] = mmapindexthreshold
881
881
882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
883 srdensitythres = float(
883 srdensitythres = float(
884 ui.config(b'experimental', b'sparse-read.density-threshold')
884 ui.config(b'experimental', b'sparse-read.density-threshold')
885 )
885 )
886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
887 options[b'with-sparse-read'] = withsparseread
887 options[b'with-sparse-read'] = withsparseread
888 options[b'sparse-read-density-threshold'] = srdensitythres
888 options[b'sparse-read-density-threshold'] = srdensitythres
889 options[b'sparse-read-min-gap-size'] = srmingapsize
889 options[b'sparse-read-min-gap-size'] = srmingapsize
890
890
891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
892 options[b'sparse-revlog'] = sparserevlog
892 options[b'sparse-revlog'] = sparserevlog
893 if sparserevlog:
893 if sparserevlog:
894 options[b'generaldelta'] = True
894 options[b'generaldelta'] = True
895
895
896 sidedata = SIDEDATA_REQUIREMENT in requirements
896 sidedata = SIDEDATA_REQUIREMENT in requirements
897 options[b'side-data'] = sidedata
897 options[b'side-data'] = sidedata
898
898
899 maxchainlen = None
899 maxchainlen = None
900 if sparserevlog:
900 if sparserevlog:
901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
902 # experimental config: format.maxchainlen
902 # experimental config: format.maxchainlen
903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
904 if maxchainlen is not None:
904 if maxchainlen is not None:
905 options[b'maxchainlen'] = maxchainlen
905 options[b'maxchainlen'] = maxchainlen
906
906
907 for r in requirements:
907 for r in requirements:
908 # we allow multiple compression engine requirement to co-exist because
908 # we allow multiple compression engine requirement to co-exist because
909 # strickly speaking, revlog seems to support mixed compression style.
909 # strickly speaking, revlog seems to support mixed compression style.
910 #
910 #
911 # The compression used for new entries will be "the last one"
911 # The compression used for new entries will be "the last one"
912 prefix = r.startswith
912 prefix = r.startswith
913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
914 options[b'compengine'] = r.split(b'-', 2)[2]
914 options[b'compengine'] = r.split(b'-', 2)[2]
915
915
916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
917 if options[b'zlib.level'] is not None:
917 if options[b'zlib.level'] is not None:
918 if not (0 <= options[b'zlib.level'] <= 9):
918 if not (0 <= options[b'zlib.level'] <= 9):
919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
920 raise error.Abort(msg % options[b'zlib.level'])
920 raise error.Abort(msg % options[b'zlib.level'])
921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
922 if options[b'zstd.level'] is not None:
922 if options[b'zstd.level'] is not None:
923 if not (0 <= options[b'zstd.level'] <= 22):
923 if not (0 <= options[b'zstd.level'] <= 22):
924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
925 raise error.Abort(msg % options[b'zstd.level'])
925 raise error.Abort(msg % options[b'zstd.level'])
926
926
927 if repository.NARROW_REQUIREMENT in requirements:
927 if repository.NARROW_REQUIREMENT in requirements:
928 options[b'enableellipsis'] = True
928 options[b'enableellipsis'] = True
929
929
930 return options
930 return options
931
931
932
932
933 def makemain(**kwargs):
933 def makemain(**kwargs):
934 """Produce a type conforming to ``ilocalrepositorymain``."""
934 """Produce a type conforming to ``ilocalrepositorymain``."""
935 return localrepository
935 return localrepository
936
936
937
937
938 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
938 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
939 class revlogfilestorage(object):
939 class revlogfilestorage(object):
940 """File storage when using revlogs."""
940 """File storage when using revlogs."""
941
941
942 def file(self, path):
942 def file(self, path):
943 if path[0] == b'/':
943 if path[0] == b'/':
944 path = path[1:]
944 path = path[1:]
945
945
946 return filelog.filelog(self.svfs, path)
946 return filelog.filelog(self.svfs, path)
947
947
948
948
949 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
949 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
950 class revlognarrowfilestorage(object):
950 class revlognarrowfilestorage(object):
951 """File storage when using revlogs and narrow files."""
951 """File storage when using revlogs and narrow files."""
952
952
953 def file(self, path):
953 def file(self, path):
954 if path[0] == b'/':
954 if path[0] == b'/':
955 path = path[1:]
955 path = path[1:]
956
956
957 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
957 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
958
958
959
959
960 def makefilestorage(requirements, features, **kwargs):
960 def makefilestorage(requirements, features, **kwargs):
961 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
961 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
962 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
962 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
963 features.add(repository.REPO_FEATURE_STREAM_CLONE)
963 features.add(repository.REPO_FEATURE_STREAM_CLONE)
964
964
965 if repository.NARROW_REQUIREMENT in requirements:
965 if repository.NARROW_REQUIREMENT in requirements:
966 return revlognarrowfilestorage
966 return revlognarrowfilestorage
967 else:
967 else:
968 return revlogfilestorage
968 return revlogfilestorage
969
969
970
970
971 # List of repository interfaces and factory functions for them. Each
971 # List of repository interfaces and factory functions for them. Each
972 # will be called in order during ``makelocalrepository()`` to iteratively
972 # will be called in order during ``makelocalrepository()`` to iteratively
973 # derive the final type for a local repository instance. We capture the
973 # derive the final type for a local repository instance. We capture the
974 # function as a lambda so we don't hold a reference and the module-level
974 # function as a lambda so we don't hold a reference and the module-level
975 # functions can be wrapped.
975 # functions can be wrapped.
976 REPO_INTERFACES = [
976 REPO_INTERFACES = [
977 (repository.ilocalrepositorymain, lambda: makemain),
977 (repository.ilocalrepositorymain, lambda: makemain),
978 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
978 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
979 ]
979 ]
980
980
981
981
982 @interfaceutil.implementer(repository.ilocalrepositorymain)
982 @interfaceutil.implementer(repository.ilocalrepositorymain)
983 class localrepository(object):
983 class localrepository(object):
984 """Main class for representing local repositories.
984 """Main class for representing local repositories.
985
985
986 All local repositories are instances of this class.
986 All local repositories are instances of this class.
987
987
988 Constructed on its own, instances of this class are not usable as
988 Constructed on its own, instances of this class are not usable as
989 repository objects. To obtain a usable repository object, call
989 repository objects. To obtain a usable repository object, call
990 ``hg.repository()``, ``localrepo.instance()``, or
990 ``hg.repository()``, ``localrepo.instance()``, or
991 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
991 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
992 ``instance()`` adds support for creating new repositories.
992 ``instance()`` adds support for creating new repositories.
993 ``hg.repository()`` adds more extension integration, including calling
993 ``hg.repository()`` adds more extension integration, including calling
994 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
994 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
995 used.
995 used.
996 """
996 """
997
997
998 # obsolete experimental requirements:
998 # obsolete experimental requirements:
999 # - manifestv2: An experimental new manifest format that allowed
999 # - manifestv2: An experimental new manifest format that allowed
1000 # for stem compression of long paths. Experiment ended up not
1000 # for stem compression of long paths. Experiment ended up not
1001 # being successful (repository sizes went up due to worse delta
1001 # being successful (repository sizes went up due to worse delta
1002 # chains), and the code was deleted in 4.6.
1002 # chains), and the code was deleted in 4.6.
1003 supportedformats = {
1003 supportedformats = {
1004 b'revlogv1',
1004 b'revlogv1',
1005 b'generaldelta',
1005 b'generaldelta',
1006 b'treemanifest',
1006 b'treemanifest',
1007 COPIESSDC_REQUIREMENT,
1007 COPIESSDC_REQUIREMENT,
1008 REVLOGV2_REQUIREMENT,
1008 REVLOGV2_REQUIREMENT,
1009 SIDEDATA_REQUIREMENT,
1009 SIDEDATA_REQUIREMENT,
1010 SPARSEREVLOG_REQUIREMENT,
1010 SPARSEREVLOG_REQUIREMENT,
1011 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1011 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1012 }
1012 }
1013 _basesupported = supportedformats | {
1013 _basesupported = supportedformats | {
1014 b'store',
1014 b'store',
1015 b'fncache',
1015 b'fncache',
1016 b'shared',
1016 b'shared',
1017 b'relshared',
1017 b'relshared',
1018 b'dotencode',
1018 b'dotencode',
1019 b'exp-sparse',
1019 b'exp-sparse',
1020 b'internal-phase',
1020 b'internal-phase',
1021 }
1021 }
1022
1022
1023 # list of prefix for file which can be written without 'wlock'
1023 # list of prefix for file which can be written without 'wlock'
1024 # Extensions should extend this list when needed
1024 # Extensions should extend this list when needed
1025 _wlockfreeprefix = {
1025 _wlockfreeprefix = {
1026 # We migh consider requiring 'wlock' for the next
1026 # We migh consider requiring 'wlock' for the next
1027 # two, but pretty much all the existing code assume
1027 # two, but pretty much all the existing code assume
1028 # wlock is not needed so we keep them excluded for
1028 # wlock is not needed so we keep them excluded for
1029 # now.
1029 # now.
1030 b'hgrc',
1030 b'hgrc',
1031 b'requires',
1031 b'requires',
1032 # XXX cache is a complicatged business someone
1032 # XXX cache is a complicatged business someone
1033 # should investigate this in depth at some point
1033 # should investigate this in depth at some point
1034 b'cache/',
1034 b'cache/',
1035 # XXX shouldn't be dirstate covered by the wlock?
1035 # XXX shouldn't be dirstate covered by the wlock?
1036 b'dirstate',
1036 b'dirstate',
1037 # XXX bisect was still a bit too messy at the time
1037 # XXX bisect was still a bit too messy at the time
1038 # this changeset was introduced. Someone should fix
1038 # this changeset was introduced. Someone should fix
1039 # the remainig bit and drop this line
1039 # the remainig bit and drop this line
1040 b'bisect.state',
1040 b'bisect.state',
1041 }
1041 }
1042
1042
1043 def __init__(
1043 def __init__(
1044 self,
1044 self,
1045 baseui,
1045 baseui,
1046 ui,
1046 ui,
1047 origroot,
1047 origroot,
1048 wdirvfs,
1048 wdirvfs,
1049 hgvfs,
1049 hgvfs,
1050 requirements,
1050 requirements,
1051 supportedrequirements,
1051 supportedrequirements,
1052 sharedpath,
1052 sharedpath,
1053 store,
1053 store,
1054 cachevfs,
1054 cachevfs,
1055 wcachevfs,
1055 wcachevfs,
1056 features,
1056 features,
1057 intents=None,
1057 intents=None,
1058 ):
1058 ):
1059 """Create a new local repository instance.
1059 """Create a new local repository instance.
1060
1060
1061 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1061 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1062 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1062 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1063 object.
1063 object.
1064
1064
1065 Arguments:
1065 Arguments:
1066
1066
1067 baseui
1067 baseui
1068 ``ui.ui`` instance that ``ui`` argument was based off of.
1068 ``ui.ui`` instance that ``ui`` argument was based off of.
1069
1069
1070 ui
1070 ui
1071 ``ui.ui`` instance for use by the repository.
1071 ``ui.ui`` instance for use by the repository.
1072
1072
1073 origroot
1073 origroot
1074 ``bytes`` path to working directory root of this repository.
1074 ``bytes`` path to working directory root of this repository.
1075
1075
1076 wdirvfs
1076 wdirvfs
1077 ``vfs.vfs`` rooted at the working directory.
1077 ``vfs.vfs`` rooted at the working directory.
1078
1078
1079 hgvfs
1079 hgvfs
1080 ``vfs.vfs`` rooted at .hg/
1080 ``vfs.vfs`` rooted at .hg/
1081
1081
1082 requirements
1082 requirements
1083 ``set`` of bytestrings representing repository opening requirements.
1083 ``set`` of bytestrings representing repository opening requirements.
1084
1084
1085 supportedrequirements
1085 supportedrequirements
1086 ``set`` of bytestrings representing repository requirements that we
1086 ``set`` of bytestrings representing repository requirements that we
1087 know how to open. May be a supetset of ``requirements``.
1087 know how to open. May be a supetset of ``requirements``.
1088
1088
1089 sharedpath
1089 sharedpath
1090 ``bytes`` Defining path to storage base directory. Points to a
1090 ``bytes`` Defining path to storage base directory. Points to a
1091 ``.hg/`` directory somewhere.
1091 ``.hg/`` directory somewhere.
1092
1092
1093 store
1093 store
1094 ``store.basicstore`` (or derived) instance providing access to
1094 ``store.basicstore`` (or derived) instance providing access to
1095 versioned storage.
1095 versioned storage.
1096
1096
1097 cachevfs
1097 cachevfs
1098 ``vfs.vfs`` used for cache files.
1098 ``vfs.vfs`` used for cache files.
1099
1099
1100 wcachevfs
1100 wcachevfs
1101 ``vfs.vfs`` used for cache files related to the working copy.
1101 ``vfs.vfs`` used for cache files related to the working copy.
1102
1102
1103 features
1103 features
1104 ``set`` of bytestrings defining features/capabilities of this
1104 ``set`` of bytestrings defining features/capabilities of this
1105 instance.
1105 instance.
1106
1106
1107 intents
1107 intents
1108 ``set`` of system strings indicating what this repo will be used
1108 ``set`` of system strings indicating what this repo will be used
1109 for.
1109 for.
1110 """
1110 """
1111 self.baseui = baseui
1111 self.baseui = baseui
1112 self.ui = ui
1112 self.ui = ui
1113 self.origroot = origroot
1113 self.origroot = origroot
1114 # vfs rooted at working directory.
1114 # vfs rooted at working directory.
1115 self.wvfs = wdirvfs
1115 self.wvfs = wdirvfs
1116 self.root = wdirvfs.base
1116 self.root = wdirvfs.base
1117 # vfs rooted at .hg/. Used to access most non-store paths.
1117 # vfs rooted at .hg/. Used to access most non-store paths.
1118 self.vfs = hgvfs
1118 self.vfs = hgvfs
1119 self.path = hgvfs.base
1119 self.path = hgvfs.base
1120 self.requirements = requirements
1120 self.requirements = requirements
1121 self.supported = supportedrequirements
1121 self.supported = supportedrequirements
1122 self.sharedpath = sharedpath
1122 self.sharedpath = sharedpath
1123 self.store = store
1123 self.store = store
1124 self.cachevfs = cachevfs
1124 self.cachevfs = cachevfs
1125 self.wcachevfs = wcachevfs
1125 self.wcachevfs = wcachevfs
1126 self.features = features
1126 self.features = features
1127
1127
1128 self.filtername = None
1128 self.filtername = None
1129
1129
1130 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1130 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1131 b'devel', b'check-locks'
1131 b'devel', b'check-locks'
1132 ):
1132 ):
1133 self.vfs.audit = self._getvfsward(self.vfs.audit)
1133 self.vfs.audit = self._getvfsward(self.vfs.audit)
1134 # A list of callback to shape the phase if no data were found.
1134 # A list of callback to shape the phase if no data were found.
1135 # Callback are in the form: func(repo, roots) --> processed root.
1135 # Callback are in the form: func(repo, roots) --> processed root.
1136 # This list it to be filled by extension during repo setup
1136 # This list it to be filled by extension during repo setup
1137 self._phasedefaults = []
1137 self._phasedefaults = []
1138
1138
1139 color.setup(self.ui)
1139 color.setup(self.ui)
1140
1140
1141 self.spath = self.store.path
1141 self.spath = self.store.path
1142 self.svfs = self.store.vfs
1142 self.svfs = self.store.vfs
1143 self.sjoin = self.store.join
1143 self.sjoin = self.store.join
1144 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1144 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1145 b'devel', b'check-locks'
1145 b'devel', b'check-locks'
1146 ):
1146 ):
1147 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1147 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1148 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1148 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1149 else: # standard vfs
1149 else: # standard vfs
1150 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1150 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1151
1151
1152 self._dirstatevalidatewarned = False
1152 self._dirstatevalidatewarned = False
1153
1153
1154 self._branchcaches = branchmap.BranchMapCache()
1154 self._branchcaches = branchmap.BranchMapCache()
1155 self._revbranchcache = None
1155 self._revbranchcache = None
1156 self._filterpats = {}
1156 self._filterpats = {}
1157 self._datafilters = {}
1157 self._datafilters = {}
1158 self._transref = self._lockref = self._wlockref = None
1158 self._transref = self._lockref = self._wlockref = None
1159
1159
1160 # A cache for various files under .hg/ that tracks file changes,
1160 # A cache for various files under .hg/ that tracks file changes,
1161 # (used by the filecache decorator)
1161 # (used by the filecache decorator)
1162 #
1162 #
1163 # Maps a property name to its util.filecacheentry
1163 # Maps a property name to its util.filecacheentry
1164 self._filecache = {}
1164 self._filecache = {}
1165
1165
1166 # hold sets of revision to be filtered
1166 # hold sets of revision to be filtered
1167 # should be cleared when something might have changed the filter value:
1167 # should be cleared when something might have changed the filter value:
1168 # - new changesets,
1168 # - new changesets,
1169 # - phase change,
1169 # - phase change,
1170 # - new obsolescence marker,
1170 # - new obsolescence marker,
1171 # - working directory parent change,
1171 # - working directory parent change,
1172 # - bookmark changes
1172 # - bookmark changes
1173 self.filteredrevcache = {}
1173 self.filteredrevcache = {}
1174
1174
1175 # post-dirstate-status hooks
1175 # post-dirstate-status hooks
1176 self._postdsstatus = []
1176 self._postdsstatus = []
1177
1177
1178 # generic mapping between names and nodes
1178 # generic mapping between names and nodes
1179 self.names = namespaces.namespaces()
1179 self.names = namespaces.namespaces()
1180
1180
1181 # Key to signature value.
1181 # Key to signature value.
1182 self._sparsesignaturecache = {}
1182 self._sparsesignaturecache = {}
1183 # Signature to cached matcher instance.
1183 # Signature to cached matcher instance.
1184 self._sparsematchercache = {}
1184 self._sparsematchercache = {}
1185
1185
1186 self._extrafilterid = repoview.extrafilter(ui)
1186 self._extrafilterid = repoview.extrafilter(ui)
1187
1187
1188 self.filecopiesmode = None
1188 self.filecopiesmode = None
1189 if COPIESSDC_REQUIREMENT in self.requirements:
1189 if COPIESSDC_REQUIREMENT in self.requirements:
1190 self.filecopiesmode = b'changeset-sidedata'
1190 self.filecopiesmode = b'changeset-sidedata'
1191
1191
1192 def _getvfsward(self, origfunc):
1192 def _getvfsward(self, origfunc):
1193 """build a ward for self.vfs"""
1193 """build a ward for self.vfs"""
1194 rref = weakref.ref(self)
1194 rref = weakref.ref(self)
1195
1195
1196 def checkvfs(path, mode=None):
1196 def checkvfs(path, mode=None):
1197 ret = origfunc(path, mode=mode)
1197 ret = origfunc(path, mode=mode)
1198 repo = rref()
1198 repo = rref()
1199 if (
1199 if (
1200 repo is None
1200 repo is None
1201 or not util.safehasattr(repo, b'_wlockref')
1201 or not util.safehasattr(repo, b'_wlockref')
1202 or not util.safehasattr(repo, b'_lockref')
1202 or not util.safehasattr(repo, b'_lockref')
1203 ):
1203 ):
1204 return
1204 return
1205 if mode in (None, b'r', b'rb'):
1205 if mode in (None, b'r', b'rb'):
1206 return
1206 return
1207 if path.startswith(repo.path):
1207 if path.startswith(repo.path):
1208 # truncate name relative to the repository (.hg)
1208 # truncate name relative to the repository (.hg)
1209 path = path[len(repo.path) + 1 :]
1209 path = path[len(repo.path) + 1 :]
1210 if path.startswith(b'cache/'):
1210 if path.startswith(b'cache/'):
1211 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1211 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1212 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1212 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1213 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1213 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1214 # journal is covered by 'lock'
1214 # journal is covered by 'lock'
1215 if repo._currentlock(repo._lockref) is None:
1215 if repo._currentlock(repo._lockref) is None:
1216 repo.ui.develwarn(
1216 repo.ui.develwarn(
1217 b'write with no lock: "%s"' % path,
1217 b'write with no lock: "%s"' % path,
1218 stacklevel=3,
1218 stacklevel=3,
1219 config=b'check-locks',
1219 config=b'check-locks',
1220 )
1220 )
1221 elif repo._currentlock(repo._wlockref) is None:
1221 elif repo._currentlock(repo._wlockref) is None:
1222 # rest of vfs files are covered by 'wlock'
1222 # rest of vfs files are covered by 'wlock'
1223 #
1223 #
1224 # exclude special files
1224 # exclude special files
1225 for prefix in self._wlockfreeprefix:
1225 for prefix in self._wlockfreeprefix:
1226 if path.startswith(prefix):
1226 if path.startswith(prefix):
1227 return
1227 return
1228 repo.ui.develwarn(
1228 repo.ui.develwarn(
1229 b'write with no wlock: "%s"' % path,
1229 b'write with no wlock: "%s"' % path,
1230 stacklevel=3,
1230 stacklevel=3,
1231 config=b'check-locks',
1231 config=b'check-locks',
1232 )
1232 )
1233 return ret
1233 return ret
1234
1234
1235 return checkvfs
1235 return checkvfs
1236
1236
1237 def _getsvfsward(self, origfunc):
1237 def _getsvfsward(self, origfunc):
1238 """build a ward for self.svfs"""
1238 """build a ward for self.svfs"""
1239 rref = weakref.ref(self)
1239 rref = weakref.ref(self)
1240
1240
1241 def checksvfs(path, mode=None):
1241 def checksvfs(path, mode=None):
1242 ret = origfunc(path, mode=mode)
1242 ret = origfunc(path, mode=mode)
1243 repo = rref()
1243 repo = rref()
1244 if repo is None or not util.safehasattr(repo, b'_lockref'):
1244 if repo is None or not util.safehasattr(repo, b'_lockref'):
1245 return
1245 return
1246 if mode in (None, b'r', b'rb'):
1246 if mode in (None, b'r', b'rb'):
1247 return
1247 return
1248 if path.startswith(repo.sharedpath):
1248 if path.startswith(repo.sharedpath):
1249 # truncate name relative to the repository (.hg)
1249 # truncate name relative to the repository (.hg)
1250 path = path[len(repo.sharedpath) + 1 :]
1250 path = path[len(repo.sharedpath) + 1 :]
1251 if repo._currentlock(repo._lockref) is None:
1251 if repo._currentlock(repo._lockref) is None:
1252 repo.ui.develwarn(
1252 repo.ui.develwarn(
1253 b'write with no lock: "%s"' % path, stacklevel=4
1253 b'write with no lock: "%s"' % path, stacklevel=4
1254 )
1254 )
1255 return ret
1255 return ret
1256
1256
1257 return checksvfs
1257 return checksvfs
1258
1258
1259 def close(self):
1259 def close(self):
1260 self._writecaches()
1260 self._writecaches()
1261
1261
1262 def _writecaches(self):
1262 def _writecaches(self):
1263 if self._revbranchcache:
1263 if self._revbranchcache:
1264 self._revbranchcache.write()
1264 self._revbranchcache.write()
1265
1265
1266 def _restrictcapabilities(self, caps):
1266 def _restrictcapabilities(self, caps):
1267 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1267 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1268 caps = set(caps)
1268 caps = set(caps)
1269 capsblob = bundle2.encodecaps(
1269 capsblob = bundle2.encodecaps(
1270 bundle2.getrepocaps(self, role=b'client')
1270 bundle2.getrepocaps(self, role=b'client')
1271 )
1271 )
1272 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1272 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1273 return caps
1273 return caps
1274
1274
1275 def _writerequirements(self):
1275 def _writerequirements(self):
1276 scmutil.writerequires(self.vfs, self.requirements)
1276 scmutil.writerequires(self.vfs, self.requirements)
1277
1277
1278 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1278 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1279 # self -> auditor -> self._checknested -> self
1279 # self -> auditor -> self._checknested -> self
1280
1280
1281 @property
1281 @property
1282 def auditor(self):
1282 def auditor(self):
1283 # This is only used by context.workingctx.match in order to
1283 # This is only used by context.workingctx.match in order to
1284 # detect files in subrepos.
1284 # detect files in subrepos.
1285 return pathutil.pathauditor(self.root, callback=self._checknested)
1285 return pathutil.pathauditor(self.root, callback=self._checknested)
1286
1286
1287 @property
1287 @property
1288 def nofsauditor(self):
1288 def nofsauditor(self):
1289 # This is only used by context.basectx.match in order to detect
1289 # This is only used by context.basectx.match in order to detect
1290 # files in subrepos.
1290 # files in subrepos.
1291 return pathutil.pathauditor(
1291 return pathutil.pathauditor(
1292 self.root, callback=self._checknested, realfs=False, cached=True
1292 self.root, callback=self._checknested, realfs=False, cached=True
1293 )
1293 )
1294
1294
1295 def _checknested(self, path):
1295 def _checknested(self, path):
1296 """Determine if path is a legal nested repository."""
1296 """Determine if path is a legal nested repository."""
1297 if not path.startswith(self.root):
1297 if not path.startswith(self.root):
1298 return False
1298 return False
1299 subpath = path[len(self.root) + 1 :]
1299 subpath = path[len(self.root) + 1 :]
1300 normsubpath = util.pconvert(subpath)
1300 normsubpath = util.pconvert(subpath)
1301
1301
1302 # XXX: Checking against the current working copy is wrong in
1302 # XXX: Checking against the current working copy is wrong in
1303 # the sense that it can reject things like
1303 # the sense that it can reject things like
1304 #
1304 #
1305 # $ hg cat -r 10 sub/x.txt
1305 # $ hg cat -r 10 sub/x.txt
1306 #
1306 #
1307 # if sub/ is no longer a subrepository in the working copy
1307 # if sub/ is no longer a subrepository in the working copy
1308 # parent revision.
1308 # parent revision.
1309 #
1309 #
1310 # However, it can of course also allow things that would have
1310 # However, it can of course also allow things that would have
1311 # been rejected before, such as the above cat command if sub/
1311 # been rejected before, such as the above cat command if sub/
1312 # is a subrepository now, but was a normal directory before.
1312 # is a subrepository now, but was a normal directory before.
1313 # The old path auditor would have rejected by mistake since it
1313 # The old path auditor would have rejected by mistake since it
1314 # panics when it sees sub/.hg/.
1314 # panics when it sees sub/.hg/.
1315 #
1315 #
1316 # All in all, checking against the working copy seems sensible
1316 # All in all, checking against the working copy seems sensible
1317 # since we want to prevent access to nested repositories on
1317 # since we want to prevent access to nested repositories on
1318 # the filesystem *now*.
1318 # the filesystem *now*.
1319 ctx = self[None]
1319 ctx = self[None]
1320 parts = util.splitpath(subpath)
1320 parts = util.splitpath(subpath)
1321 while parts:
1321 while parts:
1322 prefix = b'/'.join(parts)
1322 prefix = b'/'.join(parts)
1323 if prefix in ctx.substate:
1323 if prefix in ctx.substate:
1324 if prefix == normsubpath:
1324 if prefix == normsubpath:
1325 return True
1325 return True
1326 else:
1326 else:
1327 sub = ctx.sub(prefix)
1327 sub = ctx.sub(prefix)
1328 return sub.checknested(subpath[len(prefix) + 1 :])
1328 return sub.checknested(subpath[len(prefix) + 1 :])
1329 else:
1329 else:
1330 parts.pop()
1330 parts.pop()
1331 return False
1331 return False
1332
1332
1333 def peer(self):
1333 def peer(self):
1334 return localpeer(self) # not cached to avoid reference cycle
1334 return localpeer(self) # not cached to avoid reference cycle
1335
1335
1336 def unfiltered(self):
1336 def unfiltered(self):
1337 """Return unfiltered version of the repository
1337 """Return unfiltered version of the repository
1338
1338
1339 Intended to be overwritten by filtered repo."""
1339 Intended to be overwritten by filtered repo."""
1340 return self
1340 return self
1341
1341
1342 def filtered(self, name, visibilityexceptions=None):
1342 def filtered(self, name, visibilityexceptions=None):
1343 """Return a filtered version of a repository
1343 """Return a filtered version of a repository
1344
1344
1345 The `name` parameter is the identifier of the requested view. This
1345 The `name` parameter is the identifier of the requested view. This
1346 will return a repoview object set "exactly" to the specified view.
1346 will return a repoview object set "exactly" to the specified view.
1347
1347
1348 This function does not apply recursive filtering to a repository. For
1348 This function does not apply recursive filtering to a repository. For
1349 example calling `repo.filtered("served")` will return a repoview using
1349 example calling `repo.filtered("served")` will return a repoview using
1350 the "served" view, regardless of the initial view used by `repo`.
1350 the "served" view, regardless of the initial view used by `repo`.
1351
1351
1352 In other word, there is always only one level of `repoview` "filtering".
1352 In other word, there is always only one level of `repoview` "filtering".
1353 """
1353 """
1354 if self._extrafilterid is not None and b'%' not in name:
1354 if self._extrafilterid is not None and b'%' not in name:
1355 name = name + b'%' + self._extrafilterid
1355 name = name + b'%' + self._extrafilterid
1356
1356
1357 cls = repoview.newtype(self.unfiltered().__class__)
1357 cls = repoview.newtype(self.unfiltered().__class__)
1358 return cls(self, name, visibilityexceptions)
1358 return cls(self, name, visibilityexceptions)
1359
1359
1360 @mixedrepostorecache(
1360 @mixedrepostorecache(
1361 (b'bookmarks', b'plain'),
1361 (b'bookmarks', b'plain'),
1362 (b'bookmarks.current', b'plain'),
1362 (b'bookmarks.current', b'plain'),
1363 (b'bookmarks', b''),
1363 (b'bookmarks', b''),
1364 (b'00changelog.i', b''),
1364 (b'00changelog.i', b''),
1365 )
1365 )
1366 def _bookmarks(self):
1366 def _bookmarks(self):
1367 # Since the multiple files involved in the transaction cannot be
1367 # Since the multiple files involved in the transaction cannot be
1368 # written atomically (with current repository format), there is a race
1368 # written atomically (with current repository format), there is a race
1369 # condition here.
1369 # condition here.
1370 #
1370 #
1371 # 1) changelog content A is read
1371 # 1) changelog content A is read
1372 # 2) outside transaction update changelog to content B
1372 # 2) outside transaction update changelog to content B
1373 # 3) outside transaction update bookmark file referring to content B
1373 # 3) outside transaction update bookmark file referring to content B
1374 # 4) bookmarks file content is read and filtered against changelog-A
1374 # 4) bookmarks file content is read and filtered against changelog-A
1375 #
1375 #
1376 # When this happens, bookmarks against nodes missing from A are dropped.
1376 # When this happens, bookmarks against nodes missing from A are dropped.
1377 #
1377 #
1378 # Having this happening during read is not great, but it become worse
1378 # Having this happening during read is not great, but it become worse
1379 # when this happen during write because the bookmarks to the "unknown"
1379 # when this happen during write because the bookmarks to the "unknown"
1380 # nodes will be dropped for good. However, writes happen within locks.
1380 # nodes will be dropped for good. However, writes happen within locks.
1381 # This locking makes it possible to have a race free consistent read.
1381 # This locking makes it possible to have a race free consistent read.
1382 # For this purpose data read from disc before locking are
1382 # For this purpose data read from disc before locking are
1383 # "invalidated" right after the locks are taken. This invalidations are
1383 # "invalidated" right after the locks are taken. This invalidations are
1384 # "light", the `filecache` mechanism keep the data in memory and will
1384 # "light", the `filecache` mechanism keep the data in memory and will
1385 # reuse them if the underlying files did not changed. Not parsing the
1385 # reuse them if the underlying files did not changed. Not parsing the
1386 # same data multiple times helps performances.
1386 # same data multiple times helps performances.
1387 #
1387 #
1388 # Unfortunately in the case describe above, the files tracked by the
1388 # Unfortunately in the case describe above, the files tracked by the
1389 # bookmarks file cache might not have changed, but the in-memory
1389 # bookmarks file cache might not have changed, but the in-memory
1390 # content is still "wrong" because we used an older changelog content
1390 # content is still "wrong" because we used an older changelog content
1391 # to process the on-disk data. So after locking, the changelog would be
1391 # to process the on-disk data. So after locking, the changelog would be
1392 # refreshed but `_bookmarks` would be preserved.
1392 # refreshed but `_bookmarks` would be preserved.
1393 # Adding `00changelog.i` to the list of tracked file is not
1393 # Adding `00changelog.i` to the list of tracked file is not
1394 # enough, because at the time we build the content for `_bookmarks` in
1394 # enough, because at the time we build the content for `_bookmarks` in
1395 # (4), the changelog file has already diverged from the content used
1395 # (4), the changelog file has already diverged from the content used
1396 # for loading `changelog` in (1)
1396 # for loading `changelog` in (1)
1397 #
1397 #
1398 # To prevent the issue, we force the changelog to be explicitly
1398 # To prevent the issue, we force the changelog to be explicitly
1399 # reloaded while computing `_bookmarks`. The data race can still happen
1399 # reloaded while computing `_bookmarks`. The data race can still happen
1400 # without the lock (with a narrower window), but it would no longer go
1400 # without the lock (with a narrower window), but it would no longer go
1401 # undetected during the lock time refresh.
1401 # undetected during the lock time refresh.
1402 #
1402 #
1403 # The new schedule is as follow
1403 # The new schedule is as follow
1404 #
1404 #
1405 # 1) filecache logic detect that `_bookmarks` needs to be computed
1405 # 1) filecache logic detect that `_bookmarks` needs to be computed
1406 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1406 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1407 # 3) We force `changelog` filecache to be tested
1407 # 3) We force `changelog` filecache to be tested
1408 # 4) cachestat for `changelog` are captured (for changelog)
1408 # 4) cachestat for `changelog` are captured (for changelog)
1409 # 5) `_bookmarks` is computed and cached
1409 # 5) `_bookmarks` is computed and cached
1410 #
1410 #
1411 # The step in (3) ensure we have a changelog at least as recent as the
1411 # The step in (3) ensure we have a changelog at least as recent as the
1412 # cache stat computed in (1). As a result at locking time:
1412 # cache stat computed in (1). As a result at locking time:
1413 # * if the changelog did not changed since (1) -> we can reuse the data
1413 # * if the changelog did not changed since (1) -> we can reuse the data
1414 # * otherwise -> the bookmarks get refreshed.
1414 # * otherwise -> the bookmarks get refreshed.
1415 self._refreshchangelog()
1415 self._refreshchangelog()
1416 return bookmarks.bmstore(self)
1416 return bookmarks.bmstore(self)
1417
1417
1418 def _refreshchangelog(self):
1418 def _refreshchangelog(self):
1419 """make sure the in memory changelog match the on-disk one"""
1419 """make sure the in memory changelog match the on-disk one"""
1420 if b'changelog' in vars(self) and self.currenttransaction() is None:
1420 if b'changelog' in vars(self) and self.currenttransaction() is None:
1421 del self.changelog
1421 del self.changelog
1422
1422
1423 @property
1423 @property
1424 def _activebookmark(self):
1424 def _activebookmark(self):
1425 return self._bookmarks.active
1425 return self._bookmarks.active
1426
1426
1427 # _phasesets depend on changelog. what we need is to call
1427 # _phasesets depend on changelog. what we need is to call
1428 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1428 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1429 # can't be easily expressed in filecache mechanism.
1429 # can't be easily expressed in filecache mechanism.
1430 @storecache(b'phaseroots', b'00changelog.i')
1430 @storecache(b'phaseroots', b'00changelog.i')
1431 def _phasecache(self):
1431 def _phasecache(self):
1432 return phases.phasecache(self, self._phasedefaults)
1432 return phases.phasecache(self, self._phasedefaults)
1433
1433
1434 @storecache(b'obsstore')
1434 @storecache(b'obsstore')
1435 def obsstore(self):
1435 def obsstore(self):
1436 return obsolete.makestore(self.ui, self)
1436 return obsolete.makestore(self.ui, self)
1437
1437
1438 @storecache(b'00changelog.i')
1438 @storecache(b'00changelog.i')
1439 def changelog(self):
1439 def changelog(self):
1440 return self.store.changelog(txnutil.mayhavepending(self.root))
1440 return self.store.changelog(txnutil.mayhavepending(self.root))
1441
1441
1442 @storecache(b'00manifest.i')
1442 @storecache(b'00manifest.i')
1443 def manifestlog(self):
1443 def manifestlog(self):
1444 return self.store.manifestlog(self, self._storenarrowmatch)
1444 return self.store.manifestlog(self, self._storenarrowmatch)
1445
1445
1446 @repofilecache(b'dirstate')
1446 @repofilecache(b'dirstate')
1447 def dirstate(self):
1447 def dirstate(self):
1448 return self._makedirstate()
1448 return self._makedirstate()
1449
1449
1450 def _makedirstate(self):
1450 def _makedirstate(self):
1451 """Extension point for wrapping the dirstate per-repo."""
1451 """Extension point for wrapping the dirstate per-repo."""
1452 sparsematchfn = lambda: sparse.matcher(self)
1452 sparsematchfn = lambda: sparse.matcher(self)
1453
1453
1454 return dirstate.dirstate(
1454 return dirstate.dirstate(
1455 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1455 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1456 )
1456 )
1457
1457
1458 def _dirstatevalidate(self, node):
1458 def _dirstatevalidate(self, node):
1459 try:
1459 try:
1460 self.changelog.rev(node)
1460 self.changelog.rev(node)
1461 return node
1461 return node
1462 except error.LookupError:
1462 except error.LookupError:
1463 if not self._dirstatevalidatewarned:
1463 if not self._dirstatevalidatewarned:
1464 self._dirstatevalidatewarned = True
1464 self._dirstatevalidatewarned = True
1465 self.ui.warn(
1465 self.ui.warn(
1466 _(b"warning: ignoring unknown working parent %s!\n")
1466 _(b"warning: ignoring unknown working parent %s!\n")
1467 % short(node)
1467 % short(node)
1468 )
1468 )
1469 return nullid
1469 return nullid
1470
1470
1471 @storecache(narrowspec.FILENAME)
1471 @storecache(narrowspec.FILENAME)
1472 def narrowpats(self):
1472 def narrowpats(self):
1473 """matcher patterns for this repository's narrowspec
1473 """matcher patterns for this repository's narrowspec
1474
1474
1475 A tuple of (includes, excludes).
1475 A tuple of (includes, excludes).
1476 """
1476 """
1477 return narrowspec.load(self)
1477 return narrowspec.load(self)
1478
1478
1479 @storecache(narrowspec.FILENAME)
1479 @storecache(narrowspec.FILENAME)
1480 def _storenarrowmatch(self):
1480 def _storenarrowmatch(self):
1481 if repository.NARROW_REQUIREMENT not in self.requirements:
1481 if repository.NARROW_REQUIREMENT not in self.requirements:
1482 return matchmod.always()
1482 return matchmod.always()
1483 include, exclude = self.narrowpats
1483 include, exclude = self.narrowpats
1484 return narrowspec.match(self.root, include=include, exclude=exclude)
1484 return narrowspec.match(self.root, include=include, exclude=exclude)
1485
1485
1486 @storecache(narrowspec.FILENAME)
1486 @storecache(narrowspec.FILENAME)
1487 def _narrowmatch(self):
1487 def _narrowmatch(self):
1488 if repository.NARROW_REQUIREMENT not in self.requirements:
1488 if repository.NARROW_REQUIREMENT not in self.requirements:
1489 return matchmod.always()
1489 return matchmod.always()
1490 narrowspec.checkworkingcopynarrowspec(self)
1490 narrowspec.checkworkingcopynarrowspec(self)
1491 include, exclude = self.narrowpats
1491 include, exclude = self.narrowpats
1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1493
1493
1494 def narrowmatch(self, match=None, includeexact=False):
1494 def narrowmatch(self, match=None, includeexact=False):
1495 """matcher corresponding the the repo's narrowspec
1495 """matcher corresponding the the repo's narrowspec
1496
1496
1497 If `match` is given, then that will be intersected with the narrow
1497 If `match` is given, then that will be intersected with the narrow
1498 matcher.
1498 matcher.
1499
1499
1500 If `includeexact` is True, then any exact matches from `match` will
1500 If `includeexact` is True, then any exact matches from `match` will
1501 be included even if they're outside the narrowspec.
1501 be included even if they're outside the narrowspec.
1502 """
1502 """
1503 if match:
1503 if match:
1504 if includeexact and not self._narrowmatch.always():
1504 if includeexact and not self._narrowmatch.always():
1505 # do not exclude explicitly-specified paths so that they can
1505 # do not exclude explicitly-specified paths so that they can
1506 # be warned later on
1506 # be warned later on
1507 em = matchmod.exact(match.files())
1507 em = matchmod.exact(match.files())
1508 nm = matchmod.unionmatcher([self._narrowmatch, em])
1508 nm = matchmod.unionmatcher([self._narrowmatch, em])
1509 return matchmod.intersectmatchers(match, nm)
1509 return matchmod.intersectmatchers(match, nm)
1510 return matchmod.intersectmatchers(match, self._narrowmatch)
1510 return matchmod.intersectmatchers(match, self._narrowmatch)
1511 return self._narrowmatch
1511 return self._narrowmatch
1512
1512
1513 def setnarrowpats(self, newincludes, newexcludes):
1513 def setnarrowpats(self, newincludes, newexcludes):
1514 narrowspec.save(self, newincludes, newexcludes)
1514 narrowspec.save(self, newincludes, newexcludes)
1515 self.invalidate(clearfilecache=True)
1515 self.invalidate(clearfilecache=True)
1516
1516
1517 def __getitem__(self, changeid):
1517 def __getitem__(self, changeid):
1518 if changeid is None:
1518 if changeid is None:
1519 return context.workingctx(self)
1519 return context.workingctx(self)
1520 if isinstance(changeid, context.basectx):
1520 if isinstance(changeid, context.basectx):
1521 return changeid
1521 return changeid
1522 if isinstance(changeid, slice):
1522 if isinstance(changeid, slice):
1523 # wdirrev isn't contiguous so the slice shouldn't include it
1523 # wdirrev isn't contiguous so the slice shouldn't include it
1524 return [
1524 return [
1525 self[i]
1525 self[i]
1526 for i in pycompat.xrange(*changeid.indices(len(self)))
1526 for i in pycompat.xrange(*changeid.indices(len(self)))
1527 if i not in self.changelog.filteredrevs
1527 if i not in self.changelog.filteredrevs
1528 ]
1528 ]
1529 try:
1529 try:
1530 if isinstance(changeid, int):
1530 if isinstance(changeid, int):
1531 node = self.changelog.node(changeid)
1531 node = self.changelog.node(changeid)
1532 rev = changeid
1532 rev = changeid
1533 elif changeid == b'null':
1533 elif changeid == b'null':
1534 node = nullid
1534 node = nullid
1535 rev = nullrev
1535 rev = nullrev
1536 elif changeid == b'tip':
1536 elif changeid == b'tip':
1537 node = self.changelog.tip()
1537 node = self.changelog.tip()
1538 rev = self.changelog.rev(node)
1538 rev = self.changelog.rev(node)
1539 elif changeid == b'.':
1539 elif changeid == b'.':
1540 # this is a hack to delay/avoid loading obsmarkers
1540 # this is a hack to delay/avoid loading obsmarkers
1541 # when we know that '.' won't be hidden
1541 # when we know that '.' won't be hidden
1542 node = self.dirstate.p1()
1542 node = self.dirstate.p1()
1543 rev = self.unfiltered().changelog.rev(node)
1543 rev = self.unfiltered().changelog.rev(node)
1544 elif len(changeid) == 20:
1544 elif len(changeid) == 20:
1545 try:
1545 try:
1546 node = changeid
1546 node = changeid
1547 rev = self.changelog.rev(changeid)
1547 rev = self.changelog.rev(changeid)
1548 except error.FilteredLookupError:
1548 except error.FilteredLookupError:
1549 changeid = hex(changeid) # for the error message
1549 changeid = hex(changeid) # for the error message
1550 raise
1550 raise
1551 except LookupError:
1551 except LookupError:
1552 # check if it might have come from damaged dirstate
1552 # check if it might have come from damaged dirstate
1553 #
1553 #
1554 # XXX we could avoid the unfiltered if we had a recognizable
1554 # XXX we could avoid the unfiltered if we had a recognizable
1555 # exception for filtered changeset access
1555 # exception for filtered changeset access
1556 if (
1556 if (
1557 self.local()
1557 self.local()
1558 and changeid in self.unfiltered().dirstate.parents()
1558 and changeid in self.unfiltered().dirstate.parents()
1559 ):
1559 ):
1560 msg = _(b"working directory has unknown parent '%s'!")
1560 msg = _(b"working directory has unknown parent '%s'!")
1561 raise error.Abort(msg % short(changeid))
1561 raise error.Abort(msg % short(changeid))
1562 changeid = hex(changeid) # for the error message
1562 changeid = hex(changeid) # for the error message
1563 raise
1563 raise
1564
1564
1565 elif len(changeid) == 40:
1565 elif len(changeid) == 40:
1566 node = bin(changeid)
1566 node = bin(changeid)
1567 rev = self.changelog.rev(node)
1567 rev = self.changelog.rev(node)
1568 else:
1568 else:
1569 raise error.ProgrammingError(
1569 raise error.ProgrammingError(
1570 b"unsupported changeid '%s' of type %s"
1570 b"unsupported changeid '%s' of type %s"
1571 % (changeid, type(changeid))
1571 % (changeid, type(changeid))
1572 )
1572 )
1573
1573
1574 return context.changectx(self, rev, node)
1574 return context.changectx(self, rev, node)
1575
1575
1576 except (error.FilteredIndexError, error.FilteredLookupError):
1576 except (error.FilteredIndexError, error.FilteredLookupError):
1577 raise error.FilteredRepoLookupError(
1577 raise error.FilteredRepoLookupError(
1578 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1578 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1579 )
1579 )
1580 except (IndexError, LookupError):
1580 except (IndexError, LookupError):
1581 raise error.RepoLookupError(
1581 raise error.RepoLookupError(
1582 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1582 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1583 )
1583 )
1584 except error.WdirUnsupported:
1584 except error.WdirUnsupported:
1585 return context.workingctx(self)
1585 return context.workingctx(self)
1586
1586
1587 def __contains__(self, changeid):
1587 def __contains__(self, changeid):
1588 """True if the given changeid exists
1588 """True if the given changeid exists
1589
1589
1590 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1590 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1591 specified.
1591 specified.
1592 """
1592 """
1593 try:
1593 try:
1594 self[changeid]
1594 self[changeid]
1595 return True
1595 return True
1596 except error.RepoLookupError:
1596 except error.RepoLookupError:
1597 return False
1597 return False
1598
1598
1599 def __nonzero__(self):
1599 def __nonzero__(self):
1600 return True
1600 return True
1601
1601
1602 __bool__ = __nonzero__
1602 __bool__ = __nonzero__
1603
1603
1604 def __len__(self):
1604 def __len__(self):
1605 # no need to pay the cost of repoview.changelog
1605 # no need to pay the cost of repoview.changelog
1606 unfi = self.unfiltered()
1606 unfi = self.unfiltered()
1607 return len(unfi.changelog)
1607 return len(unfi.changelog)
1608
1608
1609 def __iter__(self):
1609 def __iter__(self):
1610 return iter(self.changelog)
1610 return iter(self.changelog)
1611
1611
1612 def revs(self, expr, *args):
1612 def revs(self, expr, *args):
1613 '''Find revisions matching a revset.
1613 '''Find revisions matching a revset.
1614
1614
1615 The revset is specified as a string ``expr`` that may contain
1615 The revset is specified as a string ``expr`` that may contain
1616 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1616 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1617
1617
1618 Revset aliases from the configuration are not expanded. To expand
1618 Revset aliases from the configuration are not expanded. To expand
1619 user aliases, consider calling ``scmutil.revrange()`` or
1619 user aliases, consider calling ``scmutil.revrange()`` or
1620 ``repo.anyrevs([expr], user=True)``.
1620 ``repo.anyrevs([expr], user=True)``.
1621
1621
1622 Returns a revset.abstractsmartset, which is a list-like interface
1622 Returns a revset.abstractsmartset, which is a list-like interface
1623 that contains integer revisions.
1623 that contains integer revisions.
1624 '''
1624 '''
1625 tree = revsetlang.spectree(expr, *args)
1625 tree = revsetlang.spectree(expr, *args)
1626 return revset.makematcher(tree)(self)
1626 return revset.makematcher(tree)(self)
1627
1627
1628 def set(self, expr, *args):
1628 def set(self, expr, *args):
1629 '''Find revisions matching a revset and emit changectx instances.
1629 '''Find revisions matching a revset and emit changectx instances.
1630
1630
1631 This is a convenience wrapper around ``revs()`` that iterates the
1631 This is a convenience wrapper around ``revs()`` that iterates the
1632 result and is a generator of changectx instances.
1632 result and is a generator of changectx instances.
1633
1633
1634 Revset aliases from the configuration are not expanded. To expand
1634 Revset aliases from the configuration are not expanded. To expand
1635 user aliases, consider calling ``scmutil.revrange()``.
1635 user aliases, consider calling ``scmutil.revrange()``.
1636 '''
1636 '''
1637 for r in self.revs(expr, *args):
1637 for r in self.revs(expr, *args):
1638 yield self[r]
1638 yield self[r]
1639
1639
1640 def anyrevs(self, specs, user=False, localalias=None):
1640 def anyrevs(self, specs, user=False, localalias=None):
1641 '''Find revisions matching one of the given revsets.
1641 '''Find revisions matching one of the given revsets.
1642
1642
1643 Revset aliases from the configuration are not expanded by default. To
1643 Revset aliases from the configuration are not expanded by default. To
1644 expand user aliases, specify ``user=True``. To provide some local
1644 expand user aliases, specify ``user=True``. To provide some local
1645 definitions overriding user aliases, set ``localalias`` to
1645 definitions overriding user aliases, set ``localalias`` to
1646 ``{name: definitionstring}``.
1646 ``{name: definitionstring}``.
1647 '''
1647 '''
1648 if user:
1648 if user:
1649 m = revset.matchany(
1649 m = revset.matchany(
1650 self.ui,
1650 self.ui,
1651 specs,
1651 specs,
1652 lookup=revset.lookupfn(self),
1652 lookup=revset.lookupfn(self),
1653 localalias=localalias,
1653 localalias=localalias,
1654 )
1654 )
1655 else:
1655 else:
1656 m = revset.matchany(None, specs, localalias=localalias)
1656 m = revset.matchany(None, specs, localalias=localalias)
1657 return m(self)
1657 return m(self)
1658
1658
1659 def url(self):
1659 def url(self):
1660 return b'file:' + self.root
1660 return b'file:' + self.root
1661
1661
1662 def hook(self, name, throw=False, **args):
1662 def hook(self, name, throw=False, **args):
1663 """Call a hook, passing this repo instance.
1663 """Call a hook, passing this repo instance.
1664
1664
1665 This a convenience method to aid invoking hooks. Extensions likely
1665 This a convenience method to aid invoking hooks. Extensions likely
1666 won't call this unless they have registered a custom hook or are
1666 won't call this unless they have registered a custom hook or are
1667 replacing code that is expected to call a hook.
1667 replacing code that is expected to call a hook.
1668 """
1668 """
1669 return hook.hook(self.ui, self, name, throw, **args)
1669 return hook.hook(self.ui, self, name, throw, **args)
1670
1670
1671 @filteredpropertycache
1671 @filteredpropertycache
1672 def _tagscache(self):
1672 def _tagscache(self):
1673 '''Returns a tagscache object that contains various tags related
1673 '''Returns a tagscache object that contains various tags related
1674 caches.'''
1674 caches.'''
1675
1675
1676 # This simplifies its cache management by having one decorated
1676 # This simplifies its cache management by having one decorated
1677 # function (this one) and the rest simply fetch things from it.
1677 # function (this one) and the rest simply fetch things from it.
1678 class tagscache(object):
1678 class tagscache(object):
1679 def __init__(self):
1679 def __init__(self):
1680 # These two define the set of tags for this repository. tags
1680 # These two define the set of tags for this repository. tags
1681 # maps tag name to node; tagtypes maps tag name to 'global' or
1681 # maps tag name to node; tagtypes maps tag name to 'global' or
1682 # 'local'. (Global tags are defined by .hgtags across all
1682 # 'local'. (Global tags are defined by .hgtags across all
1683 # heads, and local tags are defined in .hg/localtags.)
1683 # heads, and local tags are defined in .hg/localtags.)
1684 # They constitute the in-memory cache of tags.
1684 # They constitute the in-memory cache of tags.
1685 self.tags = self.tagtypes = None
1685 self.tags = self.tagtypes = None
1686
1686
1687 self.nodetagscache = self.tagslist = None
1687 self.nodetagscache = self.tagslist = None
1688
1688
1689 cache = tagscache()
1689 cache = tagscache()
1690 cache.tags, cache.tagtypes = self._findtags()
1690 cache.tags, cache.tagtypes = self._findtags()
1691
1691
1692 return cache
1692 return cache
1693
1693
1694 def tags(self):
1694 def tags(self):
1695 '''return a mapping of tag to node'''
1695 '''return a mapping of tag to node'''
1696 t = {}
1696 t = {}
1697 if self.changelog.filteredrevs:
1697 if self.changelog.filteredrevs:
1698 tags, tt = self._findtags()
1698 tags, tt = self._findtags()
1699 else:
1699 else:
1700 tags = self._tagscache.tags
1700 tags = self._tagscache.tags
1701 rev = self.changelog.rev
1701 rev = self.changelog.rev
1702 for k, v in pycompat.iteritems(tags):
1702 for k, v in pycompat.iteritems(tags):
1703 try:
1703 try:
1704 # ignore tags to unknown nodes
1704 # ignore tags to unknown nodes
1705 rev(v)
1705 rev(v)
1706 t[k] = v
1706 t[k] = v
1707 except (error.LookupError, ValueError):
1707 except (error.LookupError, ValueError):
1708 pass
1708 pass
1709 return t
1709 return t
1710
1710
1711 def _findtags(self):
1711 def _findtags(self):
1712 '''Do the hard work of finding tags. Return a pair of dicts
1712 '''Do the hard work of finding tags. Return a pair of dicts
1713 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1713 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1714 maps tag name to a string like \'global\' or \'local\'.
1714 maps tag name to a string like \'global\' or \'local\'.
1715 Subclasses or extensions are free to add their own tags, but
1715 Subclasses or extensions are free to add their own tags, but
1716 should be aware that the returned dicts will be retained for the
1716 should be aware that the returned dicts will be retained for the
1717 duration of the localrepo object.'''
1717 duration of the localrepo object.'''
1718
1718
1719 # XXX what tagtype should subclasses/extensions use? Currently
1719 # XXX what tagtype should subclasses/extensions use? Currently
1720 # mq and bookmarks add tags, but do not set the tagtype at all.
1720 # mq and bookmarks add tags, but do not set the tagtype at all.
1721 # Should each extension invent its own tag type? Should there
1721 # Should each extension invent its own tag type? Should there
1722 # be one tagtype for all such "virtual" tags? Or is the status
1722 # be one tagtype for all such "virtual" tags? Or is the status
1723 # quo fine?
1723 # quo fine?
1724
1724
1725 # map tag name to (node, hist)
1725 # map tag name to (node, hist)
1726 alltags = tagsmod.findglobaltags(self.ui, self)
1726 alltags = tagsmod.findglobaltags(self.ui, self)
1727 # map tag name to tag type
1727 # map tag name to tag type
1728 tagtypes = dict((tag, b'global') for tag in alltags)
1728 tagtypes = dict((tag, b'global') for tag in alltags)
1729
1729
1730 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1730 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1731
1731
1732 # Build the return dicts. Have to re-encode tag names because
1732 # Build the return dicts. Have to re-encode tag names because
1733 # the tags module always uses UTF-8 (in order not to lose info
1733 # the tags module always uses UTF-8 (in order not to lose info
1734 # writing to the cache), but the rest of Mercurial wants them in
1734 # writing to the cache), but the rest of Mercurial wants them in
1735 # local encoding.
1735 # local encoding.
1736 tags = {}
1736 tags = {}
1737 for (name, (node, hist)) in pycompat.iteritems(alltags):
1737 for (name, (node, hist)) in pycompat.iteritems(alltags):
1738 if node != nullid:
1738 if node != nullid:
1739 tags[encoding.tolocal(name)] = node
1739 tags[encoding.tolocal(name)] = node
1740 tags[b'tip'] = self.changelog.tip()
1740 tags[b'tip'] = self.changelog.tip()
1741 tagtypes = dict(
1741 tagtypes = dict(
1742 [
1742 [
1743 (encoding.tolocal(name), value)
1743 (encoding.tolocal(name), value)
1744 for (name, value) in pycompat.iteritems(tagtypes)
1744 for (name, value) in pycompat.iteritems(tagtypes)
1745 ]
1745 ]
1746 )
1746 )
1747 return (tags, tagtypes)
1747 return (tags, tagtypes)
1748
1748
1749 def tagtype(self, tagname):
1749 def tagtype(self, tagname):
1750 '''
1750 '''
1751 return the type of the given tag. result can be:
1751 return the type of the given tag. result can be:
1752
1752
1753 'local' : a local tag
1753 'local' : a local tag
1754 'global' : a global tag
1754 'global' : a global tag
1755 None : tag does not exist
1755 None : tag does not exist
1756 '''
1756 '''
1757
1757
1758 return self._tagscache.tagtypes.get(tagname)
1758 return self._tagscache.tagtypes.get(tagname)
1759
1759
1760 def tagslist(self):
1760 def tagslist(self):
1761 '''return a list of tags ordered by revision'''
1761 '''return a list of tags ordered by revision'''
1762 if not self._tagscache.tagslist:
1762 if not self._tagscache.tagslist:
1763 l = []
1763 l = []
1764 for t, n in pycompat.iteritems(self.tags()):
1764 for t, n in pycompat.iteritems(self.tags()):
1765 l.append((self.changelog.rev(n), t, n))
1765 l.append((self.changelog.rev(n), t, n))
1766 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1766 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1767
1767
1768 return self._tagscache.tagslist
1768 return self._tagscache.tagslist
1769
1769
1770 def nodetags(self, node):
1770 def nodetags(self, node):
1771 '''return the tags associated with a node'''
1771 '''return the tags associated with a node'''
1772 if not self._tagscache.nodetagscache:
1772 if not self._tagscache.nodetagscache:
1773 nodetagscache = {}
1773 nodetagscache = {}
1774 for t, n in pycompat.iteritems(self._tagscache.tags):
1774 for t, n in pycompat.iteritems(self._tagscache.tags):
1775 nodetagscache.setdefault(n, []).append(t)
1775 nodetagscache.setdefault(n, []).append(t)
1776 for tags in pycompat.itervalues(nodetagscache):
1776 for tags in pycompat.itervalues(nodetagscache):
1777 tags.sort()
1777 tags.sort()
1778 self._tagscache.nodetagscache = nodetagscache
1778 self._tagscache.nodetagscache = nodetagscache
1779 return self._tagscache.nodetagscache.get(node, [])
1779 return self._tagscache.nodetagscache.get(node, [])
1780
1780
1781 def nodebookmarks(self, node):
1781 def nodebookmarks(self, node):
1782 """return the list of bookmarks pointing to the specified node"""
1782 """return the list of bookmarks pointing to the specified node"""
1783 return self._bookmarks.names(node)
1783 return self._bookmarks.names(node)
1784
1784
1785 def branchmap(self):
1785 def branchmap(self):
1786 '''returns a dictionary {branch: [branchheads]} with branchheads
1786 '''returns a dictionary {branch: [branchheads]} with branchheads
1787 ordered by increasing revision number'''
1787 ordered by increasing revision number'''
1788 return self._branchcaches[self]
1788 return self._branchcaches[self]
1789
1789
1790 @unfilteredmethod
1790 @unfilteredmethod
1791 def revbranchcache(self):
1791 def revbranchcache(self):
1792 if not self._revbranchcache:
1792 if not self._revbranchcache:
1793 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1793 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1794 return self._revbranchcache
1794 return self._revbranchcache
1795
1795
1796 def branchtip(self, branch, ignoremissing=False):
1796 def branchtip(self, branch, ignoremissing=False):
1797 '''return the tip node for a given branch
1797 '''return the tip node for a given branch
1798
1798
1799 If ignoremissing is True, then this method will not raise an error.
1799 If ignoremissing is True, then this method will not raise an error.
1800 This is helpful for callers that only expect None for a missing branch
1800 This is helpful for callers that only expect None for a missing branch
1801 (e.g. namespace).
1801 (e.g. namespace).
1802
1802
1803 '''
1803 '''
1804 try:
1804 try:
1805 return self.branchmap().branchtip(branch)
1805 return self.branchmap().branchtip(branch)
1806 except KeyError:
1806 except KeyError:
1807 if not ignoremissing:
1807 if not ignoremissing:
1808 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1808 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1809 else:
1809 else:
1810 pass
1810 pass
1811
1811
1812 def lookup(self, key):
1812 def lookup(self, key):
1813 node = scmutil.revsymbol(self, key).node()
1813 node = scmutil.revsymbol(self, key).node()
1814 if node is None:
1814 if node is None:
1815 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1815 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1816 return node
1816 return node
1817
1817
1818 def lookupbranch(self, key):
1818 def lookupbranch(self, key):
1819 if self.branchmap().hasbranch(key):
1819 if self.branchmap().hasbranch(key):
1820 return key
1820 return key
1821
1821
1822 return scmutil.revsymbol(self, key).branch()
1822 return scmutil.revsymbol(self, key).branch()
1823
1823
1824 def known(self, nodes):
1824 def known(self, nodes):
1825 cl = self.changelog
1825 cl = self.changelog
1826 nm = cl.nodemap
1826 nm = cl.nodemap
1827 filtered = cl.filteredrevs
1827 filtered = cl.filteredrevs
1828 result = []
1828 result = []
1829 for n in nodes:
1829 for n in nodes:
1830 r = nm.get(n)
1830 r = nm.get(n)
1831 resp = not (r is None or r in filtered)
1831 resp = not (r is None or r in filtered)
1832 result.append(resp)
1832 result.append(resp)
1833 return result
1833 return result
1834
1834
1835 def local(self):
1835 def local(self):
1836 return self
1836 return self
1837
1837
1838 def publishing(self):
1838 def publishing(self):
1839 # it's safe (and desirable) to trust the publish flag unconditionally
1839 # it's safe (and desirable) to trust the publish flag unconditionally
1840 # so that we don't finalize changes shared between users via ssh or nfs
1840 # so that we don't finalize changes shared between users via ssh or nfs
1841 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1841 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1842
1842
1843 def cancopy(self):
1843 def cancopy(self):
1844 # so statichttprepo's override of local() works
1844 # so statichttprepo's override of local() works
1845 if not self.local():
1845 if not self.local():
1846 return False
1846 return False
1847 if not self.publishing():
1847 if not self.publishing():
1848 return True
1848 return True
1849 # if publishing we can't copy if there is filtered content
1849 # if publishing we can't copy if there is filtered content
1850 return not self.filtered(b'visible').changelog.filteredrevs
1850 return not self.filtered(b'visible').changelog.filteredrevs
1851
1851
1852 def shared(self):
1852 def shared(self):
1853 '''the type of shared repository (None if not shared)'''
1853 '''the type of shared repository (None if not shared)'''
1854 if self.sharedpath != self.path:
1854 if self.sharedpath != self.path:
1855 return b'store'
1855 return b'store'
1856 return None
1856 return None
1857
1857
1858 def wjoin(self, f, *insidef):
1858 def wjoin(self, f, *insidef):
1859 return self.vfs.reljoin(self.root, f, *insidef)
1859 return self.vfs.reljoin(self.root, f, *insidef)
1860
1860
1861 def setparents(self, p1, p2=nullid):
1861 def setparents(self, p1, p2=nullid):
1862 with self.dirstate.parentchange():
1862 with self.dirstate.parentchange():
1863 copies = self.dirstate.setparents(p1, p2)
1863 copies = self.dirstate.setparents(p1, p2)
1864 pctx = self[p1]
1864 pctx = self[p1]
1865 if copies:
1865 if copies:
1866 # Adjust copy records, the dirstate cannot do it, it
1866 # Adjust copy records, the dirstate cannot do it, it
1867 # requires access to parents manifests. Preserve them
1867 # requires access to parents manifests. Preserve them
1868 # only for entries added to first parent.
1868 # only for entries added to first parent.
1869 for f in copies:
1869 for f in copies:
1870 if f not in pctx and copies[f] in pctx:
1870 if f not in pctx and copies[f] in pctx:
1871 self.dirstate.copy(copies[f], f)
1871 self.dirstate.copy(copies[f], f)
1872 if p2 == nullid:
1872 if p2 == nullid:
1873 for f, s in sorted(self.dirstate.copies().items()):
1873 for f, s in sorted(self.dirstate.copies().items()):
1874 if f not in pctx and s not in pctx:
1874 if f not in pctx and s not in pctx:
1875 self.dirstate.copy(None, f)
1875 self.dirstate.copy(None, f)
1876
1876
1877 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1877 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1878 """changeid must be a changeset revision, if specified.
1878 """changeid must be a changeset revision, if specified.
1879 fileid can be a file revision or node."""
1879 fileid can be a file revision or node."""
1880 return context.filectx(
1880 return context.filectx(
1881 self, path, changeid, fileid, changectx=changectx
1881 self, path, changeid, fileid, changectx=changectx
1882 )
1882 )
1883
1883
1884 def getcwd(self):
1884 def getcwd(self):
1885 return self.dirstate.getcwd()
1885 return self.dirstate.getcwd()
1886
1886
1887 def pathto(self, f, cwd=None):
1887 def pathto(self, f, cwd=None):
1888 return self.dirstate.pathto(f, cwd)
1888 return self.dirstate.pathto(f, cwd)
1889
1889
1890 def _loadfilter(self, filter):
1890 def _loadfilter(self, filter):
1891 if filter not in self._filterpats:
1891 if filter not in self._filterpats:
1892 l = []
1892 l = []
1893 for pat, cmd in self.ui.configitems(filter):
1893 for pat, cmd in self.ui.configitems(filter):
1894 if cmd == b'!':
1894 if cmd == b'!':
1895 continue
1895 continue
1896 mf = matchmod.match(self.root, b'', [pat])
1896 mf = matchmod.match(self.root, b'', [pat])
1897 fn = None
1897 fn = None
1898 params = cmd
1898 params = cmd
1899 for name, filterfn in pycompat.iteritems(self._datafilters):
1899 for name, filterfn in pycompat.iteritems(self._datafilters):
1900 if cmd.startswith(name):
1900 if cmd.startswith(name):
1901 fn = filterfn
1901 fn = filterfn
1902 params = cmd[len(name) :].lstrip()
1902 params = cmd[len(name) :].lstrip()
1903 break
1903 break
1904 if not fn:
1904 if not fn:
1905 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1905 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1906 # Wrap old filters not supporting keyword arguments
1906 # Wrap old filters not supporting keyword arguments
1907 if not pycompat.getargspec(fn)[2]:
1907 if not pycompat.getargspec(fn)[2]:
1908 oldfn = fn
1908 oldfn = fn
1909 fn = lambda s, c, **kwargs: oldfn(s, c)
1909 fn = lambda s, c, **kwargs: oldfn(s, c)
1910 l.append((mf, fn, params))
1910 l.append((mf, fn, params))
1911 self._filterpats[filter] = l
1911 self._filterpats[filter] = l
1912 return self._filterpats[filter]
1912 return self._filterpats[filter]
1913
1913
1914 def _filter(self, filterpats, filename, data):
1914 def _filter(self, filterpats, filename, data):
1915 for mf, fn, cmd in filterpats:
1915 for mf, fn, cmd in filterpats:
1916 if mf(filename):
1916 if mf(filename):
1917 self.ui.debug(b"filtering %s through %s\n" % (filename, cmd))
1917 self.ui.debug(b"filtering %s through %s\n" % (filename, cmd))
1918 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1918 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1919 break
1919 break
1920
1920
1921 return data
1921 return data
1922
1922
1923 @unfilteredpropertycache
1923 @unfilteredpropertycache
1924 def _encodefilterpats(self):
1924 def _encodefilterpats(self):
1925 return self._loadfilter(b'encode')
1925 return self._loadfilter(b'encode')
1926
1926
1927 @unfilteredpropertycache
1927 @unfilteredpropertycache
1928 def _decodefilterpats(self):
1928 def _decodefilterpats(self):
1929 return self._loadfilter(b'decode')
1929 return self._loadfilter(b'decode')
1930
1930
1931 def adddatafilter(self, name, filter):
1931 def adddatafilter(self, name, filter):
1932 self._datafilters[name] = filter
1932 self._datafilters[name] = filter
1933
1933
1934 def wread(self, filename):
1934 def wread(self, filename):
1935 if self.wvfs.islink(filename):
1935 if self.wvfs.islink(filename):
1936 data = self.wvfs.readlink(filename)
1936 data = self.wvfs.readlink(filename)
1937 else:
1937 else:
1938 data = self.wvfs.read(filename)
1938 data = self.wvfs.read(filename)
1939 return self._filter(self._encodefilterpats, filename, data)
1939 return self._filter(self._encodefilterpats, filename, data)
1940
1940
1941 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1941 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1942 """write ``data`` into ``filename`` in the working directory
1942 """write ``data`` into ``filename`` in the working directory
1943
1943
1944 This returns length of written (maybe decoded) data.
1944 This returns length of written (maybe decoded) data.
1945 """
1945 """
1946 data = self._filter(self._decodefilterpats, filename, data)
1946 data = self._filter(self._decodefilterpats, filename, data)
1947 if b'l' in flags:
1947 if b'l' in flags:
1948 self.wvfs.symlink(data, filename)
1948 self.wvfs.symlink(data, filename)
1949 else:
1949 else:
1950 self.wvfs.write(
1950 self.wvfs.write(
1951 filename, data, backgroundclose=backgroundclose, **kwargs
1951 filename, data, backgroundclose=backgroundclose, **kwargs
1952 )
1952 )
1953 if b'x' in flags:
1953 if b'x' in flags:
1954 self.wvfs.setflags(filename, False, True)
1954 self.wvfs.setflags(filename, False, True)
1955 else:
1955 else:
1956 self.wvfs.setflags(filename, False, False)
1956 self.wvfs.setflags(filename, False, False)
1957 return len(data)
1957 return len(data)
1958
1958
1959 def wwritedata(self, filename, data):
1959 def wwritedata(self, filename, data):
1960 return self._filter(self._decodefilterpats, filename, data)
1960 return self._filter(self._decodefilterpats, filename, data)
1961
1961
1962 def currenttransaction(self):
1962 def currenttransaction(self):
1963 """return the current transaction or None if non exists"""
1963 """return the current transaction or None if non exists"""
1964 if self._transref:
1964 if self._transref:
1965 tr = self._transref()
1965 tr = self._transref()
1966 else:
1966 else:
1967 tr = None
1967 tr = None
1968
1968
1969 if tr and tr.running():
1969 if tr and tr.running():
1970 return tr
1970 return tr
1971 return None
1971 return None
1972
1972
1973 def transaction(self, desc, report=None):
1973 def transaction(self, desc, report=None):
1974 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1974 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1975 b'devel', b'check-locks'
1975 b'devel', b'check-locks'
1976 ):
1976 ):
1977 if self._currentlock(self._lockref) is None:
1977 if self._currentlock(self._lockref) is None:
1978 raise error.ProgrammingError(b'transaction requires locking')
1978 raise error.ProgrammingError(b'transaction requires locking')
1979 tr = self.currenttransaction()
1979 tr = self.currenttransaction()
1980 if tr is not None:
1980 if tr is not None:
1981 return tr.nest(name=desc)
1981 return tr.nest(name=desc)
1982
1982
1983 # abort here if the journal already exists
1983 # abort here if the journal already exists
1984 if self.svfs.exists(b"journal"):
1984 if self.svfs.exists(b"journal"):
1985 raise error.RepoError(
1985 raise error.RepoError(
1986 _(b"abandoned transaction found"),
1986 _(b"abandoned transaction found"),
1987 hint=_(b"run 'hg recover' to clean up transaction"),
1987 hint=_(b"run 'hg recover' to clean up transaction"),
1988 )
1988 )
1989
1989
1990 idbase = b"%.40f#%f" % (random.random(), time.time())
1990 idbase = b"%.40f#%f" % (random.random(), time.time())
1991 ha = hex(hashlib.sha1(idbase).digest())
1991 ha = hex(hashlib.sha1(idbase).digest())
1992 txnid = b'TXN:' + ha
1992 txnid = b'TXN:' + ha
1993 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
1993 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
1994
1994
1995 self._writejournal(desc)
1995 self._writejournal(desc)
1996 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1996 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1997 if report:
1997 if report:
1998 rp = report
1998 rp = report
1999 else:
1999 else:
2000 rp = self.ui.warn
2000 rp = self.ui.warn
2001 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2001 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2002 # we must avoid cyclic reference between repo and transaction.
2002 # we must avoid cyclic reference between repo and transaction.
2003 reporef = weakref.ref(self)
2003 reporef = weakref.ref(self)
2004 # Code to track tag movement
2004 # Code to track tag movement
2005 #
2005 #
2006 # Since tags are all handled as file content, it is actually quite hard
2006 # Since tags are all handled as file content, it is actually quite hard
2007 # to track these movement from a code perspective. So we fallback to a
2007 # to track these movement from a code perspective. So we fallback to a
2008 # tracking at the repository level. One could envision to track changes
2008 # tracking at the repository level. One could envision to track changes
2009 # to the '.hgtags' file through changegroup apply but that fails to
2009 # to the '.hgtags' file through changegroup apply but that fails to
2010 # cope with case where transaction expose new heads without changegroup
2010 # cope with case where transaction expose new heads without changegroup
2011 # being involved (eg: phase movement).
2011 # being involved (eg: phase movement).
2012 #
2012 #
2013 # For now, We gate the feature behind a flag since this likely comes
2013 # For now, We gate the feature behind a flag since this likely comes
2014 # with performance impacts. The current code run more often than needed
2014 # with performance impacts. The current code run more often than needed
2015 # and do not use caches as much as it could. The current focus is on
2015 # and do not use caches as much as it could. The current focus is on
2016 # the behavior of the feature so we disable it by default. The flag
2016 # the behavior of the feature so we disable it by default. The flag
2017 # will be removed when we are happy with the performance impact.
2017 # will be removed when we are happy with the performance impact.
2018 #
2018 #
2019 # Once this feature is no longer experimental move the following
2019 # Once this feature is no longer experimental move the following
2020 # documentation to the appropriate help section:
2020 # documentation to the appropriate help section:
2021 #
2021 #
2022 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2022 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2023 # tags (new or changed or deleted tags). In addition the details of
2023 # tags (new or changed or deleted tags). In addition the details of
2024 # these changes are made available in a file at:
2024 # these changes are made available in a file at:
2025 # ``REPOROOT/.hg/changes/tags.changes``.
2025 # ``REPOROOT/.hg/changes/tags.changes``.
2026 # Make sure you check for HG_TAG_MOVED before reading that file as it
2026 # Make sure you check for HG_TAG_MOVED before reading that file as it
2027 # might exist from a previous transaction even if no tag were touched
2027 # might exist from a previous transaction even if no tag were touched
2028 # in this one. Changes are recorded in a line base format::
2028 # in this one. Changes are recorded in a line base format::
2029 #
2029 #
2030 # <action> <hex-node> <tag-name>\n
2030 # <action> <hex-node> <tag-name>\n
2031 #
2031 #
2032 # Actions are defined as follow:
2032 # Actions are defined as follow:
2033 # "-R": tag is removed,
2033 # "-R": tag is removed,
2034 # "+A": tag is added,
2034 # "+A": tag is added,
2035 # "-M": tag is moved (old value),
2035 # "-M": tag is moved (old value),
2036 # "+M": tag is moved (new value),
2036 # "+M": tag is moved (new value),
2037 tracktags = lambda x: None
2037 tracktags = lambda x: None
2038 # experimental config: experimental.hook-track-tags
2038 # experimental config: experimental.hook-track-tags
2039 shouldtracktags = self.ui.configbool(
2039 shouldtracktags = self.ui.configbool(
2040 b'experimental', b'hook-track-tags'
2040 b'experimental', b'hook-track-tags'
2041 )
2041 )
2042 if desc != b'strip' and shouldtracktags:
2042 if desc != b'strip' and shouldtracktags:
2043 oldheads = self.changelog.headrevs()
2043 oldheads = self.changelog.headrevs()
2044
2044
2045 def tracktags(tr2):
2045 def tracktags(tr2):
2046 repo = reporef()
2046 repo = reporef()
2047 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2047 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2048 newheads = repo.changelog.headrevs()
2048 newheads = repo.changelog.headrevs()
2049 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2049 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2050 # notes: we compare lists here.
2050 # notes: we compare lists here.
2051 # As we do it only once buiding set would not be cheaper
2051 # As we do it only once buiding set would not be cheaper
2052 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2052 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2053 if changes:
2053 if changes:
2054 tr2.hookargs[b'tag_moved'] = b'1'
2054 tr2.hookargs[b'tag_moved'] = b'1'
2055 with repo.vfs(
2055 with repo.vfs(
2056 b'changes/tags.changes', b'w', atomictemp=True
2056 b'changes/tags.changes', b'w', atomictemp=True
2057 ) as changesfile:
2057 ) as changesfile:
2058 # note: we do not register the file to the transaction
2058 # note: we do not register the file to the transaction
2059 # because we needs it to still exist on the transaction
2059 # because we needs it to still exist on the transaction
2060 # is close (for txnclose hooks)
2060 # is close (for txnclose hooks)
2061 tagsmod.writediff(changesfile, changes)
2061 tagsmod.writediff(changesfile, changes)
2062
2062
2063 def validate(tr2):
2063 def validate(tr2):
2064 """will run pre-closing hooks"""
2064 """will run pre-closing hooks"""
2065 # XXX the transaction API is a bit lacking here so we take a hacky
2065 # XXX the transaction API is a bit lacking here so we take a hacky
2066 # path for now
2066 # path for now
2067 #
2067 #
2068 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2068 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2069 # dict is copied before these run. In addition we needs the data
2069 # dict is copied before these run. In addition we needs the data
2070 # available to in memory hooks too.
2070 # available to in memory hooks too.
2071 #
2071 #
2072 # Moreover, we also need to make sure this runs before txnclose
2072 # Moreover, we also need to make sure this runs before txnclose
2073 # hooks and there is no "pending" mechanism that would execute
2073 # hooks and there is no "pending" mechanism that would execute
2074 # logic only if hooks are about to run.
2074 # logic only if hooks are about to run.
2075 #
2075 #
2076 # Fixing this limitation of the transaction is also needed to track
2076 # Fixing this limitation of the transaction is also needed to track
2077 # other families of changes (bookmarks, phases, obsolescence).
2077 # other families of changes (bookmarks, phases, obsolescence).
2078 #
2078 #
2079 # This will have to be fixed before we remove the experimental
2079 # This will have to be fixed before we remove the experimental
2080 # gating.
2080 # gating.
2081 tracktags(tr2)
2081 tracktags(tr2)
2082 repo = reporef()
2082 repo = reporef()
2083
2083
2084 r = repo.ui.configsuboptions(
2084 r = repo.ui.configsuboptions(
2085 b'experimental', b'single-head-per-branch'
2085 b'experimental', b'single-head-per-branch'
2086 )
2086 )
2087 singlehead, singleheadsub = r
2087 singlehead, singleheadsub = r
2088 if singlehead:
2088 if singlehead:
2089 accountclosed = singleheadsub.get(
2089 accountclosed = singleheadsub.get(
2090 b"account-closed-heads", False
2090 b"account-closed-heads", False
2091 )
2091 )
2092 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2092 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2093 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2093 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2094 for name, (old, new) in sorted(
2094 for name, (old, new) in sorted(
2095 tr.changes[b'bookmarks'].items()
2095 tr.changes[b'bookmarks'].items()
2096 ):
2096 ):
2097 args = tr.hookargs.copy()
2097 args = tr.hookargs.copy()
2098 args.update(bookmarks.preparehookargs(name, old, new))
2098 args.update(bookmarks.preparehookargs(name, old, new))
2099 repo.hook(
2099 repo.hook(
2100 b'pretxnclose-bookmark',
2100 b'pretxnclose-bookmark',
2101 throw=True,
2101 throw=True,
2102 **pycompat.strkwargs(args)
2102 **pycompat.strkwargs(args)
2103 )
2103 )
2104 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2104 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2105 cl = repo.unfiltered().changelog
2105 cl = repo.unfiltered().changelog
2106 for rev, (old, new) in tr.changes[b'phases'].items():
2106 for rev, (old, new) in tr.changes[b'phases'].items():
2107 args = tr.hookargs.copy()
2107 args = tr.hookargs.copy()
2108 node = hex(cl.node(rev))
2108 node = hex(cl.node(rev))
2109 args.update(phases.preparehookargs(node, old, new))
2109 args.update(phases.preparehookargs(node, old, new))
2110 repo.hook(
2110 repo.hook(
2111 b'pretxnclose-phase',
2111 b'pretxnclose-phase',
2112 throw=True,
2112 throw=True,
2113 **pycompat.strkwargs(args)
2113 **pycompat.strkwargs(args)
2114 )
2114 )
2115
2115
2116 repo.hook(
2116 repo.hook(
2117 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2117 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2118 )
2118 )
2119
2119
2120 def releasefn(tr, success):
2120 def releasefn(tr, success):
2121 repo = reporef()
2121 repo = reporef()
2122 if repo is None:
2122 if repo is None:
2123 # If the repo has been GC'd (and this release function is being
2123 # If the repo has been GC'd (and this release function is being
2124 # called from transaction.__del__), there's not much we can do,
2124 # called from transaction.__del__), there's not much we can do,
2125 # so just leave the unfinished transaction there and let the
2125 # so just leave the unfinished transaction there and let the
2126 # user run `hg recover`.
2126 # user run `hg recover`.
2127 return
2127 return
2128 if success:
2128 if success:
2129 # this should be explicitly invoked here, because
2129 # this should be explicitly invoked here, because
2130 # in-memory changes aren't written out at closing
2130 # in-memory changes aren't written out at closing
2131 # transaction, if tr.addfilegenerator (via
2131 # transaction, if tr.addfilegenerator (via
2132 # dirstate.write or so) isn't invoked while
2132 # dirstate.write or so) isn't invoked while
2133 # transaction running
2133 # transaction running
2134 repo.dirstate.write(None)
2134 repo.dirstate.write(None)
2135 else:
2135 else:
2136 # discard all changes (including ones already written
2136 # discard all changes (including ones already written
2137 # out) in this transaction
2137 # out) in this transaction
2138 narrowspec.restorebackup(self, b'journal.narrowspec')
2138 narrowspec.restorebackup(self, b'journal.narrowspec')
2139 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2139 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2140 repo.dirstate.restorebackup(None, b'journal.dirstate')
2140 repo.dirstate.restorebackup(None, b'journal.dirstate')
2141
2141
2142 repo.invalidate(clearfilecache=True)
2142 repo.invalidate(clearfilecache=True)
2143
2143
2144 tr = transaction.transaction(
2144 tr = transaction.transaction(
2145 rp,
2145 rp,
2146 self.svfs,
2146 self.svfs,
2147 vfsmap,
2147 vfsmap,
2148 b"journal",
2148 b"journal",
2149 b"undo",
2149 b"undo",
2150 aftertrans(renames),
2150 aftertrans(renames),
2151 self.store.createmode,
2151 self.store.createmode,
2152 validator=validate,
2152 validator=validate,
2153 releasefn=releasefn,
2153 releasefn=releasefn,
2154 checkambigfiles=_cachedfiles,
2154 checkambigfiles=_cachedfiles,
2155 name=desc,
2155 name=desc,
2156 )
2156 )
2157 tr.changes[b'origrepolen'] = len(self)
2157 tr.changes[b'origrepolen'] = len(self)
2158 tr.changes[b'obsmarkers'] = set()
2158 tr.changes[b'obsmarkers'] = set()
2159 tr.changes[b'phases'] = {}
2159 tr.changes[b'phases'] = {}
2160 tr.changes[b'bookmarks'] = {}
2160 tr.changes[b'bookmarks'] = {}
2161
2161
2162 tr.hookargs[b'txnid'] = txnid
2162 tr.hookargs[b'txnid'] = txnid
2163 tr.hookargs[b'txnname'] = desc
2163 tr.hookargs[b'txnname'] = desc
2164 # note: writing the fncache only during finalize mean that the file is
2164 # note: writing the fncache only during finalize mean that the file is
2165 # outdated when running hooks. As fncache is used for streaming clone,
2165 # outdated when running hooks. As fncache is used for streaming clone,
2166 # this is not expected to break anything that happen during the hooks.
2166 # this is not expected to break anything that happen during the hooks.
2167 tr.addfinalize(b'flush-fncache', self.store.write)
2167 tr.addfinalize(b'flush-fncache', self.store.write)
2168
2168
2169 def txnclosehook(tr2):
2169 def txnclosehook(tr2):
2170 """To be run if transaction is successful, will schedule a hook run
2170 """To be run if transaction is successful, will schedule a hook run
2171 """
2171 """
2172 # Don't reference tr2 in hook() so we don't hold a reference.
2172 # Don't reference tr2 in hook() so we don't hold a reference.
2173 # This reduces memory consumption when there are multiple
2173 # This reduces memory consumption when there are multiple
2174 # transactions per lock. This can likely go away if issue5045
2174 # transactions per lock. This can likely go away if issue5045
2175 # fixes the function accumulation.
2175 # fixes the function accumulation.
2176 hookargs = tr2.hookargs
2176 hookargs = tr2.hookargs
2177
2177
2178 def hookfunc():
2178 def hookfunc():
2179 repo = reporef()
2179 repo = reporef()
2180 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2180 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2181 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2181 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2182 for name, (old, new) in bmchanges:
2182 for name, (old, new) in bmchanges:
2183 args = tr.hookargs.copy()
2183 args = tr.hookargs.copy()
2184 args.update(bookmarks.preparehookargs(name, old, new))
2184 args.update(bookmarks.preparehookargs(name, old, new))
2185 repo.hook(
2185 repo.hook(
2186 b'txnclose-bookmark',
2186 b'txnclose-bookmark',
2187 throw=False,
2187 throw=False,
2188 **pycompat.strkwargs(args)
2188 **pycompat.strkwargs(args)
2189 )
2189 )
2190
2190
2191 if hook.hashook(repo.ui, b'txnclose-phase'):
2191 if hook.hashook(repo.ui, b'txnclose-phase'):
2192 cl = repo.unfiltered().changelog
2192 cl = repo.unfiltered().changelog
2193 phasemv = sorted(tr.changes[b'phases'].items())
2193 phasemv = sorted(tr.changes[b'phases'].items())
2194 for rev, (old, new) in phasemv:
2194 for rev, (old, new) in phasemv:
2195 args = tr.hookargs.copy()
2195 args = tr.hookargs.copy()
2196 node = hex(cl.node(rev))
2196 node = hex(cl.node(rev))
2197 args.update(phases.preparehookargs(node, old, new))
2197 args.update(phases.preparehookargs(node, old, new))
2198 repo.hook(
2198 repo.hook(
2199 b'txnclose-phase',
2199 b'txnclose-phase',
2200 throw=False,
2200 throw=False,
2201 **pycompat.strkwargs(args)
2201 **pycompat.strkwargs(args)
2202 )
2202 )
2203
2203
2204 repo.hook(
2204 repo.hook(
2205 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2205 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2206 )
2206 )
2207
2207
2208 reporef()._afterlock(hookfunc)
2208 reporef()._afterlock(hookfunc)
2209
2209
2210 tr.addfinalize(b'txnclose-hook', txnclosehook)
2210 tr.addfinalize(b'txnclose-hook', txnclosehook)
2211 # Include a leading "-" to make it happen before the transaction summary
2211 # Include a leading "-" to make it happen before the transaction summary
2212 # reports registered via scmutil.registersummarycallback() whose names
2212 # reports registered via scmutil.registersummarycallback() whose names
2213 # are 00-txnreport etc. That way, the caches will be warm when the
2213 # are 00-txnreport etc. That way, the caches will be warm when the
2214 # callbacks run.
2214 # callbacks run.
2215 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2215 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2216
2216
2217 def txnaborthook(tr2):
2217 def txnaborthook(tr2):
2218 """To be run if transaction is aborted
2218 """To be run if transaction is aborted
2219 """
2219 """
2220 reporef().hook(
2220 reporef().hook(
2221 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2221 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2222 )
2222 )
2223
2223
2224 tr.addabort(b'txnabort-hook', txnaborthook)
2224 tr.addabort(b'txnabort-hook', txnaborthook)
2225 # avoid eager cache invalidation. in-memory data should be identical
2225 # avoid eager cache invalidation. in-memory data should be identical
2226 # to stored data if transaction has no error.
2226 # to stored data if transaction has no error.
2227 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2227 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2228 self._transref = weakref.ref(tr)
2228 self._transref = weakref.ref(tr)
2229 scmutil.registersummarycallback(self, tr, desc)
2229 scmutil.registersummarycallback(self, tr, desc)
2230 return tr
2230 return tr
2231
2231
2232 def _journalfiles(self):
2232 def _journalfiles(self):
2233 return (
2233 return (
2234 (self.svfs, b'journal'),
2234 (self.svfs, b'journal'),
2235 (self.svfs, b'journal.narrowspec'),
2235 (self.svfs, b'journal.narrowspec'),
2236 (self.vfs, b'journal.narrowspec.dirstate'),
2236 (self.vfs, b'journal.narrowspec.dirstate'),
2237 (self.vfs, b'journal.dirstate'),
2237 (self.vfs, b'journal.dirstate'),
2238 (self.vfs, b'journal.branch'),
2238 (self.vfs, b'journal.branch'),
2239 (self.vfs, b'journal.desc'),
2239 (self.vfs, b'journal.desc'),
2240 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2240 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2241 (self.svfs, b'journal.phaseroots'),
2241 (self.svfs, b'journal.phaseroots'),
2242 )
2242 )
2243
2243
2244 def undofiles(self):
2244 def undofiles(self):
2245 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2245 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2246
2246
2247 @unfilteredmethod
2247 @unfilteredmethod
2248 def _writejournal(self, desc):
2248 def _writejournal(self, desc):
2249 self.dirstate.savebackup(None, b'journal.dirstate')
2249 self.dirstate.savebackup(None, b'journal.dirstate')
2250 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2250 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2251 narrowspec.savebackup(self, b'journal.narrowspec')
2251 narrowspec.savebackup(self, b'journal.narrowspec')
2252 self.vfs.write(
2252 self.vfs.write(
2253 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2253 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2254 )
2254 )
2255 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2255 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2256 bookmarksvfs = bookmarks.bookmarksvfs(self)
2256 bookmarksvfs = bookmarks.bookmarksvfs(self)
2257 bookmarksvfs.write(
2257 bookmarksvfs.write(
2258 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2258 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2259 )
2259 )
2260 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2260 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2261
2261
2262 def recover(self):
2262 def recover(self):
2263 with self.lock():
2263 with self.lock():
2264 if self.svfs.exists(b"journal"):
2264 if self.svfs.exists(b"journal"):
2265 self.ui.status(_(b"rolling back interrupted transaction\n"))
2265 self.ui.status(_(b"rolling back interrupted transaction\n"))
2266 vfsmap = {
2266 vfsmap = {
2267 b'': self.svfs,
2267 b'': self.svfs,
2268 b'plain': self.vfs,
2268 b'plain': self.vfs,
2269 }
2269 }
2270 transaction.rollback(
2270 transaction.rollback(
2271 self.svfs,
2271 self.svfs,
2272 vfsmap,
2272 vfsmap,
2273 b"journal",
2273 b"journal",
2274 self.ui.warn,
2274 self.ui.warn,
2275 checkambigfiles=_cachedfiles,
2275 checkambigfiles=_cachedfiles,
2276 )
2276 )
2277 self.invalidate()
2277 self.invalidate()
2278 return True
2278 return True
2279 else:
2279 else:
2280 self.ui.warn(_(b"no interrupted transaction available\n"))
2280 self.ui.warn(_(b"no interrupted transaction available\n"))
2281 return False
2281 return False
2282
2282
2283 def rollback(self, dryrun=False, force=False):
2283 def rollback(self, dryrun=False, force=False):
2284 wlock = lock = dsguard = None
2284 wlock = lock = dsguard = None
2285 try:
2285 try:
2286 wlock = self.wlock()
2286 wlock = self.wlock()
2287 lock = self.lock()
2287 lock = self.lock()
2288 if self.svfs.exists(b"undo"):
2288 if self.svfs.exists(b"undo"):
2289 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2289 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2290
2290
2291 return self._rollback(dryrun, force, dsguard)
2291 return self._rollback(dryrun, force, dsguard)
2292 else:
2292 else:
2293 self.ui.warn(_(b"no rollback information available\n"))
2293 self.ui.warn(_(b"no rollback information available\n"))
2294 return 1
2294 return 1
2295 finally:
2295 finally:
2296 release(dsguard, lock, wlock)
2296 release(dsguard, lock, wlock)
2297
2297
2298 @unfilteredmethod # Until we get smarter cache management
2298 @unfilteredmethod # Until we get smarter cache management
2299 def _rollback(self, dryrun, force, dsguard):
2299 def _rollback(self, dryrun, force, dsguard):
2300 ui = self.ui
2300 ui = self.ui
2301 try:
2301 try:
2302 args = self.vfs.read(b'undo.desc').splitlines()
2302 args = self.vfs.read(b'undo.desc').splitlines()
2303 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2303 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2304 if len(args) >= 3:
2304 if len(args) >= 3:
2305 detail = args[2]
2305 detail = args[2]
2306 oldtip = oldlen - 1
2306 oldtip = oldlen - 1
2307
2307
2308 if detail and ui.verbose:
2308 if detail and ui.verbose:
2309 msg = _(
2309 msg = _(
2310 b'repository tip rolled back to revision %d'
2310 b'repository tip rolled back to revision %d'
2311 b' (undo %s: %s)\n'
2311 b' (undo %s: %s)\n'
2312 ) % (oldtip, desc, detail)
2312 ) % (oldtip, desc, detail)
2313 else:
2313 else:
2314 msg = _(
2314 msg = _(
2315 b'repository tip rolled back to revision %d (undo %s)\n'
2315 b'repository tip rolled back to revision %d (undo %s)\n'
2316 ) % (oldtip, desc)
2316 ) % (oldtip, desc)
2317 except IOError:
2317 except IOError:
2318 msg = _(b'rolling back unknown transaction\n')
2318 msg = _(b'rolling back unknown transaction\n')
2319 desc = None
2319 desc = None
2320
2320
2321 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2321 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2322 raise error.Abort(
2322 raise error.Abort(
2323 _(
2323 _(
2324 b'rollback of last commit while not checked out '
2324 b'rollback of last commit while not checked out '
2325 b'may lose data'
2325 b'may lose data'
2326 ),
2326 ),
2327 hint=_(b'use -f to force'),
2327 hint=_(b'use -f to force'),
2328 )
2328 )
2329
2329
2330 ui.status(msg)
2330 ui.status(msg)
2331 if dryrun:
2331 if dryrun:
2332 return 0
2332 return 0
2333
2333
2334 parents = self.dirstate.parents()
2334 parents = self.dirstate.parents()
2335 self.destroying()
2335 self.destroying()
2336 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2336 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2337 transaction.rollback(
2337 transaction.rollback(
2338 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2338 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2339 )
2339 )
2340 bookmarksvfs = bookmarks.bookmarksvfs(self)
2340 bookmarksvfs = bookmarks.bookmarksvfs(self)
2341 if bookmarksvfs.exists(b'undo.bookmarks'):
2341 if bookmarksvfs.exists(b'undo.bookmarks'):
2342 bookmarksvfs.rename(
2342 bookmarksvfs.rename(
2343 b'undo.bookmarks', b'bookmarks', checkambig=True
2343 b'undo.bookmarks', b'bookmarks', checkambig=True
2344 )
2344 )
2345 if self.svfs.exists(b'undo.phaseroots'):
2345 if self.svfs.exists(b'undo.phaseroots'):
2346 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2346 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2347 self.invalidate()
2347 self.invalidate()
2348
2348
2349 parentgone = any(p not in self.changelog.nodemap for p in parents)
2349 parentgone = any(p not in self.changelog.nodemap for p in parents)
2350 if parentgone:
2350 if parentgone:
2351 # prevent dirstateguard from overwriting already restored one
2351 # prevent dirstateguard from overwriting already restored one
2352 dsguard.close()
2352 dsguard.close()
2353
2353
2354 narrowspec.restorebackup(self, b'undo.narrowspec')
2354 narrowspec.restorebackup(self, b'undo.narrowspec')
2355 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2355 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2356 self.dirstate.restorebackup(None, b'undo.dirstate')
2356 self.dirstate.restorebackup(None, b'undo.dirstate')
2357 try:
2357 try:
2358 branch = self.vfs.read(b'undo.branch')
2358 branch = self.vfs.read(b'undo.branch')
2359 self.dirstate.setbranch(encoding.tolocal(branch))
2359 self.dirstate.setbranch(encoding.tolocal(branch))
2360 except IOError:
2360 except IOError:
2361 ui.warn(
2361 ui.warn(
2362 _(
2362 _(
2363 b'named branch could not be reset: '
2363 b'named branch could not be reset: '
2364 b'current branch is still \'%s\'\n'
2364 b'current branch is still \'%s\'\n'
2365 )
2365 )
2366 % self.dirstate.branch()
2366 % self.dirstate.branch()
2367 )
2367 )
2368
2368
2369 parents = tuple([p.rev() for p in self[None].parents()])
2369 parents = tuple([p.rev() for p in self[None].parents()])
2370 if len(parents) > 1:
2370 if len(parents) > 1:
2371 ui.status(
2371 ui.status(
2372 _(
2372 _(
2373 b'working directory now based on '
2373 b'working directory now based on '
2374 b'revisions %d and %d\n'
2374 b'revisions %d and %d\n'
2375 )
2375 )
2376 % parents
2376 % parents
2377 )
2377 )
2378 else:
2378 else:
2379 ui.status(
2379 ui.status(
2380 _(b'working directory now based on revision %d\n') % parents
2380 _(b'working directory now based on revision %d\n') % parents
2381 )
2381 )
2382 mergemod.mergestate.clean(self, self[b'.'].node())
2382 mergemod.mergestate.clean(self, self[b'.'].node())
2383
2383
2384 # TODO: if we know which new heads may result from this rollback, pass
2384 # TODO: if we know which new heads may result from this rollback, pass
2385 # them to destroy(), which will prevent the branchhead cache from being
2385 # them to destroy(), which will prevent the branchhead cache from being
2386 # invalidated.
2386 # invalidated.
2387 self.destroyed()
2387 self.destroyed()
2388 return 0
2388 return 0
2389
2389
2390 def _buildcacheupdater(self, newtransaction):
2390 def _buildcacheupdater(self, newtransaction):
2391 """called during transaction to build the callback updating cache
2391 """called during transaction to build the callback updating cache
2392
2392
2393 Lives on the repository to help extension who might want to augment
2393 Lives on the repository to help extension who might want to augment
2394 this logic. For this purpose, the created transaction is passed to the
2394 this logic. For this purpose, the created transaction is passed to the
2395 method.
2395 method.
2396 """
2396 """
2397 # we must avoid cyclic reference between repo and transaction.
2397 # we must avoid cyclic reference between repo and transaction.
2398 reporef = weakref.ref(self)
2398 reporef = weakref.ref(self)
2399
2399
2400 def updater(tr):
2400 def updater(tr):
2401 repo = reporef()
2401 repo = reporef()
2402 repo.updatecaches(tr)
2402 repo.updatecaches(tr)
2403
2403
2404 return updater
2404 return updater
2405
2405
2406 @unfilteredmethod
2406 @unfilteredmethod
2407 def updatecaches(self, tr=None, full=False):
2407 def updatecaches(self, tr=None, full=False):
2408 """warm appropriate caches
2408 """warm appropriate caches
2409
2409
2410 If this function is called after a transaction closed. The transaction
2410 If this function is called after a transaction closed. The transaction
2411 will be available in the 'tr' argument. This can be used to selectively
2411 will be available in the 'tr' argument. This can be used to selectively
2412 update caches relevant to the changes in that transaction.
2412 update caches relevant to the changes in that transaction.
2413
2413
2414 If 'full' is set, make sure all caches the function knows about have
2414 If 'full' is set, make sure all caches the function knows about have
2415 up-to-date data. Even the ones usually loaded more lazily.
2415 up-to-date data. Even the ones usually loaded more lazily.
2416 """
2416 """
2417 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2417 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2418 # During strip, many caches are invalid but
2418 # During strip, many caches are invalid but
2419 # later call to `destroyed` will refresh them.
2419 # later call to `destroyed` will refresh them.
2420 return
2420 return
2421
2421
2422 if tr is None or tr.changes[b'origrepolen'] < len(self):
2422 if tr is None or tr.changes[b'origrepolen'] < len(self):
2423 # accessing the 'ser ved' branchmap should refresh all the others,
2423 # accessing the 'ser ved' branchmap should refresh all the others,
2424 self.ui.debug(b'updating the branch cache\n')
2424 self.ui.debug(b'updating the branch cache\n')
2425 self.filtered(b'served').branchmap()
2425 self.filtered(b'served').branchmap()
2426 self.filtered(b'served.hidden').branchmap()
2426 self.filtered(b'served.hidden').branchmap()
2427
2427
2428 if full:
2428 if full:
2429 unfi = self.unfiltered()
2429 unfi = self.unfiltered()
2430 rbc = unfi.revbranchcache()
2430 rbc = unfi.revbranchcache()
2431 for r in unfi.changelog:
2431 for r in unfi.changelog:
2432 rbc.branchinfo(r)
2432 rbc.branchinfo(r)
2433 rbc.write()
2433 rbc.write()
2434
2434
2435 # ensure the working copy parents are in the manifestfulltextcache
2435 # ensure the working copy parents are in the manifestfulltextcache
2436 for ctx in self[b'.'].parents():
2436 for ctx in self[b'.'].parents():
2437 ctx.manifest() # accessing the manifest is enough
2437 ctx.manifest() # accessing the manifest is enough
2438
2438
2439 # accessing fnode cache warms the cache
2439 # accessing fnode cache warms the cache
2440 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2440 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2441 # accessing tags warm the cache
2441 # accessing tags warm the cache
2442 self.tags()
2442 self.tags()
2443 self.filtered(b'served').tags()
2443 self.filtered(b'served').tags()
2444
2444
2445 # The `full` arg is documented as updating even the lazily-loaded
2445 # The `full` arg is documented as updating even the lazily-loaded
2446 # caches immediately, so we're forcing a write to cause these caches
2446 # caches immediately, so we're forcing a write to cause these caches
2447 # to be warmed up even if they haven't explicitly been requested
2447 # to be warmed up even if they haven't explicitly been requested
2448 # yet (if they've never been used by hg, they won't ever have been
2448 # yet (if they've never been used by hg, they won't ever have been
2449 # written, even if they're a subset of another kind of cache that
2449 # written, even if they're a subset of another kind of cache that
2450 # *has* been used).
2450 # *has* been used).
2451 for filt in repoview.filtertable.keys():
2451 for filt in repoview.filtertable.keys():
2452 filtered = self.filtered(filt)
2452 filtered = self.filtered(filt)
2453 filtered.branchmap().write(filtered)
2453 filtered.branchmap().write(filtered)
2454
2454
2455 def invalidatecaches(self):
2455 def invalidatecaches(self):
2456
2456
2457 if r'_tagscache' in vars(self):
2457 if r'_tagscache' in vars(self):
2458 # can't use delattr on proxy
2458 # can't use delattr on proxy
2459 del self.__dict__[r'_tagscache']
2459 del self.__dict__[r'_tagscache']
2460
2460
2461 self._branchcaches.clear()
2461 self._branchcaches.clear()
2462 self.invalidatevolatilesets()
2462 self.invalidatevolatilesets()
2463 self._sparsesignaturecache.clear()
2463 self._sparsesignaturecache.clear()
2464
2464
2465 def invalidatevolatilesets(self):
2465 def invalidatevolatilesets(self):
2466 self.filteredrevcache.clear()
2466 self.filteredrevcache.clear()
2467 obsolete.clearobscaches(self)
2467 obsolete.clearobscaches(self)
2468
2468
2469 def invalidatedirstate(self):
2469 def invalidatedirstate(self):
2470 '''Invalidates the dirstate, causing the next call to dirstate
2470 '''Invalidates the dirstate, causing the next call to dirstate
2471 to check if it was modified since the last time it was read,
2471 to check if it was modified since the last time it was read,
2472 rereading it if it has.
2472 rereading it if it has.
2473
2473
2474 This is different to dirstate.invalidate() that it doesn't always
2474 This is different to dirstate.invalidate() that it doesn't always
2475 rereads the dirstate. Use dirstate.invalidate() if you want to
2475 rereads the dirstate. Use dirstate.invalidate() if you want to
2476 explicitly read the dirstate again (i.e. restoring it to a previous
2476 explicitly read the dirstate again (i.e. restoring it to a previous
2477 known good state).'''
2477 known good state).'''
2478 if hasunfilteredcache(self, r'dirstate'):
2478 if hasunfilteredcache(self, r'dirstate'):
2479 for k in self.dirstate._filecache:
2479 for k in self.dirstate._filecache:
2480 try:
2480 try:
2481 delattr(self.dirstate, k)
2481 delattr(self.dirstate, k)
2482 except AttributeError:
2482 except AttributeError:
2483 pass
2483 pass
2484 delattr(self.unfiltered(), r'dirstate')
2484 delattr(self.unfiltered(), r'dirstate')
2485
2485
2486 def invalidate(self, clearfilecache=False):
2486 def invalidate(self, clearfilecache=False):
2487 '''Invalidates both store and non-store parts other than dirstate
2487 '''Invalidates both store and non-store parts other than dirstate
2488
2488
2489 If a transaction is running, invalidation of store is omitted,
2489 If a transaction is running, invalidation of store is omitted,
2490 because discarding in-memory changes might cause inconsistency
2490 because discarding in-memory changes might cause inconsistency
2491 (e.g. incomplete fncache causes unintentional failure, but
2491 (e.g. incomplete fncache causes unintentional failure, but
2492 redundant one doesn't).
2492 redundant one doesn't).
2493 '''
2493 '''
2494 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2494 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2495 for k in list(self._filecache.keys()):
2495 for k in list(self._filecache.keys()):
2496 # dirstate is invalidated separately in invalidatedirstate()
2496 # dirstate is invalidated separately in invalidatedirstate()
2497 if k == b'dirstate':
2497 if k == b'dirstate':
2498 continue
2498 continue
2499 if (
2499 if (
2500 k == b'changelog'
2500 k == b'changelog'
2501 and self.currenttransaction()
2501 and self.currenttransaction()
2502 and self.changelog._delayed
2502 and self.changelog._delayed
2503 ):
2503 ):
2504 # The changelog object may store unwritten revisions. We don't
2504 # The changelog object may store unwritten revisions. We don't
2505 # want to lose them.
2505 # want to lose them.
2506 # TODO: Solve the problem instead of working around it.
2506 # TODO: Solve the problem instead of working around it.
2507 continue
2507 continue
2508
2508
2509 if clearfilecache:
2509 if clearfilecache:
2510 del self._filecache[k]
2510 del self._filecache[k]
2511 try:
2511 try:
2512 delattr(unfiltered, k)
2512 delattr(unfiltered, k)
2513 except AttributeError:
2513 except AttributeError:
2514 pass
2514 pass
2515 self.invalidatecaches()
2515 self.invalidatecaches()
2516 if not self.currenttransaction():
2516 if not self.currenttransaction():
2517 # TODO: Changing contents of store outside transaction
2517 # TODO: Changing contents of store outside transaction
2518 # causes inconsistency. We should make in-memory store
2518 # causes inconsistency. We should make in-memory store
2519 # changes detectable, and abort if changed.
2519 # changes detectable, and abort if changed.
2520 self.store.invalidatecaches()
2520 self.store.invalidatecaches()
2521
2521
2522 def invalidateall(self):
2522 def invalidateall(self):
2523 '''Fully invalidates both store and non-store parts, causing the
2523 '''Fully invalidates both store and non-store parts, causing the
2524 subsequent operation to reread any outside changes.'''
2524 subsequent operation to reread any outside changes.'''
2525 # extension should hook this to invalidate its caches
2525 # extension should hook this to invalidate its caches
2526 self.invalidate()
2526 self.invalidate()
2527 self.invalidatedirstate()
2527 self.invalidatedirstate()
2528
2528
2529 @unfilteredmethod
2529 @unfilteredmethod
2530 def _refreshfilecachestats(self, tr):
2530 def _refreshfilecachestats(self, tr):
2531 """Reload stats of cached files so that they are flagged as valid"""
2531 """Reload stats of cached files so that they are flagged as valid"""
2532 for k, ce in self._filecache.items():
2532 for k, ce in self._filecache.items():
2533 k = pycompat.sysstr(k)
2533 k = pycompat.sysstr(k)
2534 if k == r'dirstate' or k not in self.__dict__:
2534 if k == r'dirstate' or k not in self.__dict__:
2535 continue
2535 continue
2536 ce.refresh()
2536 ce.refresh()
2537
2537
2538 def _lock(
2538 def _lock(
2539 self,
2539 self,
2540 vfs,
2540 vfs,
2541 lockname,
2541 lockname,
2542 wait,
2542 wait,
2543 releasefn,
2543 releasefn,
2544 acquirefn,
2544 acquirefn,
2545 desc,
2545 desc,
2546 inheritchecker=None,
2546 inheritchecker=None,
2547 parentenvvar=None,
2547 parentenvvar=None,
2548 ):
2548 ):
2549 parentlock = None
2549 parentlock = None
2550 # the contents of parentenvvar are used by the underlying lock to
2550 # the contents of parentenvvar are used by the underlying lock to
2551 # determine whether it can be inherited
2551 # determine whether it can be inherited
2552 if parentenvvar is not None:
2552 if parentenvvar is not None:
2553 parentlock = encoding.environ.get(parentenvvar)
2553 parentlock = encoding.environ.get(parentenvvar)
2554
2554
2555 timeout = 0
2555 timeout = 0
2556 warntimeout = 0
2556 warntimeout = 0
2557 if wait:
2557 if wait:
2558 timeout = self.ui.configint(b"ui", b"timeout")
2558 timeout = self.ui.configint(b"ui", b"timeout")
2559 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2559 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2560 # internal config: ui.signal-safe-lock
2560 # internal config: ui.signal-safe-lock
2561 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2561 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2562
2562
2563 l = lockmod.trylock(
2563 l = lockmod.trylock(
2564 self.ui,
2564 self.ui,
2565 vfs,
2565 vfs,
2566 lockname,
2566 lockname,
2567 timeout,
2567 timeout,
2568 warntimeout,
2568 warntimeout,
2569 releasefn=releasefn,
2569 releasefn=releasefn,
2570 acquirefn=acquirefn,
2570 acquirefn=acquirefn,
2571 desc=desc,
2571 desc=desc,
2572 inheritchecker=inheritchecker,
2572 inheritchecker=inheritchecker,
2573 parentlock=parentlock,
2573 parentlock=parentlock,
2574 signalsafe=signalsafe,
2574 signalsafe=signalsafe,
2575 )
2575 )
2576 return l
2576 return l
2577
2577
2578 def _afterlock(self, callback):
2578 def _afterlock(self, callback):
2579 """add a callback to be run when the repository is fully unlocked
2579 """add a callback to be run when the repository is fully unlocked
2580
2580
2581 The callback will be executed when the outermost lock is released
2581 The callback will be executed when the outermost lock is released
2582 (with wlock being higher level than 'lock')."""
2582 (with wlock being higher level than 'lock')."""
2583 for ref in (self._wlockref, self._lockref):
2583 for ref in (self._wlockref, self._lockref):
2584 l = ref and ref()
2584 l = ref and ref()
2585 if l and l.held:
2585 if l and l.held:
2586 l.postrelease.append(callback)
2586 l.postrelease.append(callback)
2587 break
2587 break
2588 else: # no lock have been found.
2588 else: # no lock have been found.
2589 callback()
2589 callback()
2590
2590
2591 def lock(self, wait=True):
2591 def lock(self, wait=True):
2592 '''Lock the repository store (.hg/store) and return a weak reference
2592 '''Lock the repository store (.hg/store) and return a weak reference
2593 to the lock. Use this before modifying the store (e.g. committing or
2593 to the lock. Use this before modifying the store (e.g. committing or
2594 stripping). If you are opening a transaction, get a lock as well.)
2594 stripping). If you are opening a transaction, get a lock as well.)
2595
2595
2596 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2596 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2597 'wlock' first to avoid a dead-lock hazard.'''
2597 'wlock' first to avoid a dead-lock hazard.'''
2598 l = self._currentlock(self._lockref)
2598 l = self._currentlock(self._lockref)
2599 if l is not None:
2599 if l is not None:
2600 l.lock()
2600 l.lock()
2601 return l
2601 return l
2602
2602
2603 l = self._lock(
2603 l = self._lock(
2604 vfs=self.svfs,
2604 vfs=self.svfs,
2605 lockname=b"lock",
2605 lockname=b"lock",
2606 wait=wait,
2606 wait=wait,
2607 releasefn=None,
2607 releasefn=None,
2608 acquirefn=self.invalidate,
2608 acquirefn=self.invalidate,
2609 desc=_(b'repository %s') % self.origroot,
2609 desc=_(b'repository %s') % self.origroot,
2610 )
2610 )
2611 self._lockref = weakref.ref(l)
2611 self._lockref = weakref.ref(l)
2612 return l
2612 return l
2613
2613
2614 def _wlockchecktransaction(self):
2614 def _wlockchecktransaction(self):
2615 if self.currenttransaction() is not None:
2615 if self.currenttransaction() is not None:
2616 raise error.LockInheritanceContractViolation(
2616 raise error.LockInheritanceContractViolation(
2617 b'wlock cannot be inherited in the middle of a transaction'
2617 b'wlock cannot be inherited in the middle of a transaction'
2618 )
2618 )
2619
2619
2620 def wlock(self, wait=True):
2620 def wlock(self, wait=True):
2621 '''Lock the non-store parts of the repository (everything under
2621 '''Lock the non-store parts of the repository (everything under
2622 .hg except .hg/store) and return a weak reference to the lock.
2622 .hg except .hg/store) and return a weak reference to the lock.
2623
2623
2624 Use this before modifying files in .hg.
2624 Use this before modifying files in .hg.
2625
2625
2626 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2626 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2627 'wlock' first to avoid a dead-lock hazard.'''
2627 'wlock' first to avoid a dead-lock hazard.'''
2628 l = self._wlockref and self._wlockref()
2628 l = self._wlockref and self._wlockref()
2629 if l is not None and l.held:
2629 if l is not None and l.held:
2630 l.lock()
2630 l.lock()
2631 return l
2631 return l
2632
2632
2633 # We do not need to check for non-waiting lock acquisition. Such
2633 # We do not need to check for non-waiting lock acquisition. Such
2634 # acquisition would not cause dead-lock as they would just fail.
2634 # acquisition would not cause dead-lock as they would just fail.
2635 if wait and (
2635 if wait and (
2636 self.ui.configbool(b'devel', b'all-warnings')
2636 self.ui.configbool(b'devel', b'all-warnings')
2637 or self.ui.configbool(b'devel', b'check-locks')
2637 or self.ui.configbool(b'devel', b'check-locks')
2638 ):
2638 ):
2639 if self._currentlock(self._lockref) is not None:
2639 if self._currentlock(self._lockref) is not None:
2640 self.ui.develwarn(b'"wlock" acquired after "lock"')
2640 self.ui.develwarn(b'"wlock" acquired after "lock"')
2641
2641
2642 def unlock():
2642 def unlock():
2643 if self.dirstate.pendingparentchange():
2643 if self.dirstate.pendingparentchange():
2644 self.dirstate.invalidate()
2644 self.dirstate.invalidate()
2645 else:
2645 else:
2646 self.dirstate.write(None)
2646 self.dirstate.write(None)
2647
2647
2648 self._filecache[b'dirstate'].refresh()
2648 self._filecache[b'dirstate'].refresh()
2649
2649
2650 l = self._lock(
2650 l = self._lock(
2651 self.vfs,
2651 self.vfs,
2652 b"wlock",
2652 b"wlock",
2653 wait,
2653 wait,
2654 unlock,
2654 unlock,
2655 self.invalidatedirstate,
2655 self.invalidatedirstate,
2656 _(b'working directory of %s') % self.origroot,
2656 _(b'working directory of %s') % self.origroot,
2657 inheritchecker=self._wlockchecktransaction,
2657 inheritchecker=self._wlockchecktransaction,
2658 parentenvvar=b'HG_WLOCK_LOCKER',
2658 parentenvvar=b'HG_WLOCK_LOCKER',
2659 )
2659 )
2660 self._wlockref = weakref.ref(l)
2660 self._wlockref = weakref.ref(l)
2661 return l
2661 return l
2662
2662
2663 def _currentlock(self, lockref):
2663 def _currentlock(self, lockref):
2664 """Returns the lock if it's held, or None if it's not."""
2664 """Returns the lock if it's held, or None if it's not."""
2665 if lockref is None:
2665 if lockref is None:
2666 return None
2666 return None
2667 l = lockref()
2667 l = lockref()
2668 if l is None or not l.held:
2668 if l is None or not l.held:
2669 return None
2669 return None
2670 return l
2670 return l
2671
2671
2672 def currentwlock(self):
2672 def currentwlock(self):
2673 """Returns the wlock if it's held, or None if it's not."""
2673 """Returns the wlock if it's held, or None if it's not."""
2674 return self._currentlock(self._wlockref)
2674 return self._currentlock(self._wlockref)
2675
2675
2676 def _filecommit(
2676 def _filecommit(
2677 self,
2677 self,
2678 fctx,
2678 fctx,
2679 manifest1,
2679 manifest1,
2680 manifest2,
2680 manifest2,
2681 linkrev,
2681 linkrev,
2682 tr,
2682 tr,
2683 changelist,
2683 changelist,
2684 includecopymeta,
2684 includecopymeta,
2685 ):
2685 ):
2686 """
2686 """
2687 commit an individual file as part of a larger transaction
2687 commit an individual file as part of a larger transaction
2688 """
2688 """
2689
2689
2690 fname = fctx.path()
2690 fname = fctx.path()
2691 fparent1 = manifest1.get(fname, nullid)
2691 fparent1 = manifest1.get(fname, nullid)
2692 fparent2 = manifest2.get(fname, nullid)
2692 fparent2 = manifest2.get(fname, nullid)
2693 if isinstance(fctx, context.filectx):
2693 if isinstance(fctx, context.filectx):
2694 node = fctx.filenode()
2694 node = fctx.filenode()
2695 if node in [fparent1, fparent2]:
2695 if node in [fparent1, fparent2]:
2696 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2696 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2697 if (
2697 if (
2698 fparent1 != nullid
2698 fparent1 != nullid
2699 and manifest1.flags(fname) != fctx.flags()
2699 and manifest1.flags(fname) != fctx.flags()
2700 ) or (
2700 ) or (
2701 fparent2 != nullid
2701 fparent2 != nullid
2702 and manifest2.flags(fname) != fctx.flags()
2702 and manifest2.flags(fname) != fctx.flags()
2703 ):
2703 ):
2704 changelist.append(fname)
2704 changelist.append(fname)
2705 return node
2705 return node
2706
2706
2707 flog = self.file(fname)
2707 flog = self.file(fname)
2708 meta = {}
2708 meta = {}
2709 cfname = fctx.copysource()
2709 cfname = fctx.copysource()
2710 if cfname and cfname != fname:
2710 if cfname and cfname != fname:
2711 # Mark the new revision of this file as a copy of another
2711 # Mark the new revision of this file as a copy of another
2712 # file. This copy data will effectively act as a parent
2712 # file. This copy data will effectively act as a parent
2713 # of this new revision. If this is a merge, the first
2713 # of this new revision. If this is a merge, the first
2714 # parent will be the nullid (meaning "look up the copy data")
2714 # parent will be the nullid (meaning "look up the copy data")
2715 # and the second one will be the other parent. For example:
2715 # and the second one will be the other parent. For example:
2716 #
2716 #
2717 # 0 --- 1 --- 3 rev1 changes file foo
2717 # 0 --- 1 --- 3 rev1 changes file foo
2718 # \ / rev2 renames foo to bar and changes it
2718 # \ / rev2 renames foo to bar and changes it
2719 # \- 2 -/ rev3 should have bar with all changes and
2719 # \- 2 -/ rev3 should have bar with all changes and
2720 # should record that bar descends from
2720 # should record that bar descends from
2721 # bar in rev2 and foo in rev1
2721 # bar in rev2 and foo in rev1
2722 #
2722 #
2723 # this allows this merge to succeed:
2723 # this allows this merge to succeed:
2724 #
2724 #
2725 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2725 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2726 # \ / merging rev3 and rev4 should use bar@rev2
2726 # \ / merging rev3 and rev4 should use bar@rev2
2727 # \- 2 --- 4 as the merge base
2727 # \- 2 --- 4 as the merge base
2728 #
2728 #
2729
2729
2730 cnode = manifest1.get(cfname)
2730 cnode = manifest1.get(cfname)
2731 newfparent = fparent2
2731 newfparent = fparent2
2732
2732
2733 if manifest2: # branch merge
2733 if manifest2: # branch merge
2734 if fparent2 == nullid or cnode is None: # copied on remote side
2734 if fparent2 == nullid or cnode is None: # copied on remote side
2735 if cfname in manifest2:
2735 if cfname in manifest2:
2736 cnode = manifest2[cfname]
2736 cnode = manifest2[cfname]
2737 newfparent = fparent1
2737 newfparent = fparent1
2738
2738
2739 # Here, we used to search backwards through history to try to find
2739 # Here, we used to search backwards through history to try to find
2740 # where the file copy came from if the source of a copy was not in
2740 # where the file copy came from if the source of a copy was not in
2741 # the parent directory. However, this doesn't actually make sense to
2741 # the parent directory. However, this doesn't actually make sense to
2742 # do (what does a copy from something not in your working copy even
2742 # do (what does a copy from something not in your working copy even
2743 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2743 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2744 # the user that copy information was dropped, so if they didn't
2744 # the user that copy information was dropped, so if they didn't
2745 # expect this outcome it can be fixed, but this is the correct
2745 # expect this outcome it can be fixed, but this is the correct
2746 # behavior in this circumstance.
2746 # behavior in this circumstance.
2747
2747
2748 if cnode:
2748 if cnode:
2749 self.ui.debug(
2749 self.ui.debug(
2750 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2750 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2751 )
2751 )
2752 if includecopymeta:
2752 if includecopymeta:
2753 meta[b"copy"] = cfname
2753 meta[b"copy"] = cfname
2754 meta[b"copyrev"] = hex(cnode)
2754 meta[b"copyrev"] = hex(cnode)
2755 fparent1, fparent2 = nullid, newfparent
2755 fparent1, fparent2 = nullid, newfparent
2756 else:
2756 else:
2757 self.ui.warn(
2757 self.ui.warn(
2758 _(
2758 _(
2759 b"warning: can't find ancestor for '%s' "
2759 b"warning: can't find ancestor for '%s' "
2760 b"copied from '%s'!\n"
2760 b"copied from '%s'!\n"
2761 )
2761 )
2762 % (fname, cfname)
2762 % (fname, cfname)
2763 )
2763 )
2764
2764
2765 elif fparent1 == nullid:
2765 elif fparent1 == nullid:
2766 fparent1, fparent2 = fparent2, nullid
2766 fparent1, fparent2 = fparent2, nullid
2767 elif fparent2 != nullid:
2767 elif fparent2 != nullid:
2768 # is one parent an ancestor of the other?
2768 # is one parent an ancestor of the other?
2769 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2769 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2770 if fparent1 in fparentancestors:
2770 if fparent1 in fparentancestors:
2771 fparent1, fparent2 = fparent2, nullid
2771 fparent1, fparent2 = fparent2, nullid
2772 elif fparent2 in fparentancestors:
2772 elif fparent2 in fparentancestors:
2773 fparent2 = nullid
2773 fparent2 = nullid
2774
2774
2775 # is the file changed?
2775 # is the file changed?
2776 text = fctx.data()
2776 text = fctx.data()
2777 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2777 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2778 changelist.append(fname)
2778 changelist.append(fname)
2779 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2779 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2780 # are just the flags changed during merge?
2780 # are just the flags changed during merge?
2781 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2781 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2782 changelist.append(fname)
2782 changelist.append(fname)
2783
2783
2784 return fparent1
2784 return fparent1
2785
2785
2786 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2786 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2787 """check for commit arguments that aren't committable"""
2787 """check for commit arguments that aren't committable"""
2788 if match.isexact() or match.prefix():
2788 if match.isexact() or match.prefix():
2789 matched = set(status.modified + status.added + status.removed)
2789 matched = set(status.modified + status.added + status.removed)
2790
2790
2791 for f in match.files():
2791 for f in match.files():
2792 f = self.dirstate.normalize(f)
2792 f = self.dirstate.normalize(f)
2793 if f == b'.' or f in matched or f in wctx.substate:
2793 if f == b'.' or f in matched or f in wctx.substate:
2794 continue
2794 continue
2795 if f in status.deleted:
2795 if f in status.deleted:
2796 fail(f, _(b'file not found!'))
2796 fail(f, _(b'file not found!'))
2797 if f in vdirs: # visited directory
2797 if f in vdirs: # visited directory
2798 d = f + b'/'
2798 d = f + b'/'
2799 for mf in matched:
2799 for mf in matched:
2800 if mf.startswith(d):
2800 if mf.startswith(d):
2801 break
2801 break
2802 else:
2802 else:
2803 fail(f, _(b"no match under directory!"))
2803 fail(f, _(b"no match under directory!"))
2804 elif f not in self.dirstate:
2804 elif f not in self.dirstate:
2805 fail(f, _(b"file not tracked!"))
2805 fail(f, _(b"file not tracked!"))
2806
2806
2807 @unfilteredmethod
2807 @unfilteredmethod
2808 def commit(
2808 def commit(
2809 self,
2809 self,
2810 text=b"",
2810 text=b"",
2811 user=None,
2811 user=None,
2812 date=None,
2812 date=None,
2813 match=None,
2813 match=None,
2814 force=False,
2814 force=False,
2815 editor=False,
2815 editor=False,
2816 extra=None,
2816 extra=None,
2817 ):
2817 ):
2818 """Add a new revision to current repository.
2818 """Add a new revision to current repository.
2819
2819
2820 Revision information is gathered from the working directory,
2820 Revision information is gathered from the working directory,
2821 match can be used to filter the committed files. If editor is
2821 match can be used to filter the committed files. If editor is
2822 supplied, it is called to get a commit message.
2822 supplied, it is called to get a commit message.
2823 """
2823 """
2824 if extra is None:
2824 if extra is None:
2825 extra = {}
2825 extra = {}
2826
2826
2827 def fail(f, msg):
2827 def fail(f, msg):
2828 raise error.Abort(b'%s: %s' % (f, msg))
2828 raise error.Abort(b'%s: %s' % (f, msg))
2829
2829
2830 if not match:
2830 if not match:
2831 match = matchmod.always()
2831 match = matchmod.always()
2832
2832
2833 if not force:
2833 if not force:
2834 vdirs = []
2834 vdirs = []
2835 match.explicitdir = vdirs.append
2835 match.explicitdir = vdirs.append
2836 match.bad = fail
2836 match.bad = fail
2837
2837
2838 # lock() for recent changelog (see issue4368)
2838 # lock() for recent changelog (see issue4368)
2839 with self.wlock(), self.lock():
2839 with self.wlock(), self.lock():
2840 wctx = self[None]
2840 wctx = self[None]
2841 merge = len(wctx.parents()) > 1
2841 merge = len(wctx.parents()) > 1
2842
2842
2843 if not force and merge and not match.always():
2843 if not force and merge and not match.always():
2844 raise error.Abort(
2844 raise error.Abort(
2845 _(
2845 _(
2846 b'cannot partially commit a merge '
2846 b'cannot partially commit a merge '
2847 b'(do not specify files or patterns)'
2847 b'(do not specify files or patterns)'
2848 )
2848 )
2849 )
2849 )
2850
2850
2851 status = self.status(match=match, clean=force)
2851 status = self.status(match=match, clean=force)
2852 if force:
2852 if force:
2853 status.modified.extend(
2853 status.modified.extend(
2854 status.clean
2854 status.clean
2855 ) # mq may commit clean files
2855 ) # mq may commit clean files
2856
2856
2857 # check subrepos
2857 # check subrepos
2858 subs, commitsubs, newstate = subrepoutil.precommit(
2858 subs, commitsubs, newstate = subrepoutil.precommit(
2859 self.ui, wctx, status, match, force=force
2859 self.ui, wctx, status, match, force=force
2860 )
2860 )
2861
2861
2862 # make sure all explicit patterns are matched
2862 # make sure all explicit patterns are matched
2863 if not force:
2863 if not force:
2864 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2864 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2865
2865
2866 cctx = context.workingcommitctx(
2866 cctx = context.workingcommitctx(
2867 self, status, text, user, date, extra
2867 self, status, text, user, date, extra
2868 )
2868 )
2869
2869
2870 # internal config: ui.allowemptycommit
2870 # internal config: ui.allowemptycommit
2871 allowemptycommit = (
2871 allowemptycommit = (
2872 wctx.branch() != wctx.p1().branch()
2872 wctx.branch() != wctx.p1().branch()
2873 or extra.get(b'close')
2873 or extra.get(b'close')
2874 or merge
2874 or merge
2875 or cctx.files()
2875 or cctx.files()
2876 or self.ui.configbool(b'ui', b'allowemptycommit')
2876 or self.ui.configbool(b'ui', b'allowemptycommit')
2877 )
2877 )
2878 if not allowemptycommit:
2878 if not allowemptycommit:
2879 return None
2879 return None
2880
2880
2881 if merge and cctx.deleted():
2881 if merge and cctx.deleted():
2882 raise error.Abort(_(b"cannot commit merge with missing files"))
2882 raise error.Abort(_(b"cannot commit merge with missing files"))
2883
2883
2884 ms = mergemod.mergestate.read(self)
2884 ms = mergemod.mergestate.read(self)
2885 mergeutil.checkunresolved(ms)
2885 mergeutil.checkunresolved(ms)
2886
2886
2887 if editor:
2887 if editor:
2888 cctx._text = editor(self, cctx, subs)
2888 cctx._text = editor(self, cctx, subs)
2889 edited = text != cctx._text
2889 edited = text != cctx._text
2890
2890
2891 # Save commit message in case this transaction gets rolled back
2891 # Save commit message in case this transaction gets rolled back
2892 # (e.g. by a pretxncommit hook). Leave the content alone on
2892 # (e.g. by a pretxncommit hook). Leave the content alone on
2893 # the assumption that the user will use the same editor again.
2893 # the assumption that the user will use the same editor again.
2894 msgfn = self.savecommitmessage(cctx._text)
2894 msgfn = self.savecommitmessage(cctx._text)
2895
2895
2896 # commit subs and write new state
2896 # commit subs and write new state
2897 if subs:
2897 if subs:
2898 uipathfn = scmutil.getuipathfn(self)
2898 uipathfn = scmutil.getuipathfn(self)
2899 for s in sorted(commitsubs):
2899 for s in sorted(commitsubs):
2900 sub = wctx.sub(s)
2900 sub = wctx.sub(s)
2901 self.ui.status(
2901 self.ui.status(
2902 _(b'committing subrepository %s\n')
2902 _(b'committing subrepository %s\n')
2903 % uipathfn(subrepoutil.subrelpath(sub))
2903 % uipathfn(subrepoutil.subrelpath(sub))
2904 )
2904 )
2905 sr = sub.commit(cctx._text, user, date)
2905 sr = sub.commit(cctx._text, user, date)
2906 newstate[s] = (newstate[s][0], sr)
2906 newstate[s] = (newstate[s][0], sr)
2907 subrepoutil.writestate(self, newstate)
2907 subrepoutil.writestate(self, newstate)
2908
2908
2909 p1, p2 = self.dirstate.parents()
2909 p1, p2 = self.dirstate.parents()
2910 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2910 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2911 try:
2911 try:
2912 self.hook(
2912 self.hook(
2913 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2913 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2914 )
2914 )
2915 with self.transaction(b'commit'):
2915 with self.transaction(b'commit'):
2916 ret = self.commitctx(cctx, True)
2916 ret = self.commitctx(cctx, True)
2917 # update bookmarks, dirstate and mergestate
2917 # update bookmarks, dirstate and mergestate
2918 bookmarks.update(self, [p1, p2], ret)
2918 bookmarks.update(self, [p1, p2], ret)
2919 cctx.markcommitted(ret)
2919 cctx.markcommitted(ret)
2920 ms.reset()
2920 ms.reset()
2921 except: # re-raises
2921 except: # re-raises
2922 if edited:
2922 if edited:
2923 self.ui.write(
2923 self.ui.write(
2924 _(b'note: commit message saved in %s\n') % msgfn
2924 _(b'note: commit message saved in %s\n') % msgfn
2925 )
2925 )
2926 raise
2926 raise
2927
2927
2928 def commithook():
2928 def commithook():
2929 # hack for command that use a temporary commit (eg: histedit)
2929 # hack for command that use a temporary commit (eg: histedit)
2930 # temporary commit got stripped before hook release
2930 # temporary commit got stripped before hook release
2931 if self.changelog.hasnode(ret):
2931 if self.changelog.hasnode(ret):
2932 self.hook(
2932 self.hook(
2933 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2933 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2934 )
2934 )
2935
2935
2936 self._afterlock(commithook)
2936 self._afterlock(commithook)
2937 return ret
2937 return ret
2938
2938
2939 @unfilteredmethod
2939 @unfilteredmethod
2940 def commitctx(self, ctx, error=False, origctx=None):
2940 def commitctx(self, ctx, error=False, origctx=None):
2941 """Add a new revision to current repository.
2941 """Add a new revision to current repository.
2942 Revision information is passed via the context argument.
2942 Revision information is passed via the context argument.
2943
2943
2944 ctx.files() should list all files involved in this commit, i.e.
2944 ctx.files() should list all files involved in this commit, i.e.
2945 modified/added/removed files. On merge, it may be wider than the
2945 modified/added/removed files. On merge, it may be wider than the
2946 ctx.files() to be committed, since any file nodes derived directly
2946 ctx.files() to be committed, since any file nodes derived directly
2947 from p1 or p2 are excluded from the committed ctx.files().
2947 from p1 or p2 are excluded from the committed ctx.files().
2948
2948
2949 origctx is for convert to work around the problem that bug
2949 origctx is for convert to work around the problem that bug
2950 fixes to the files list in changesets change hashes. For
2950 fixes to the files list in changesets change hashes. For
2951 convert to be the identity, it can pass an origctx and this
2951 convert to be the identity, it can pass an origctx and this
2952 function will use the same files list when it makes sense to
2952 function will use the same files list when it makes sense to
2953 do so.
2953 do so.
2954 """
2954 """
2955
2955
2956 p1, p2 = ctx.p1(), ctx.p2()
2956 p1, p2 = ctx.p1(), ctx.p2()
2957 user = ctx.user()
2957 user = ctx.user()
2958
2958
2959 if self.filecopiesmode == b'changeset-sidedata':
2959 if self.filecopiesmode == b'changeset-sidedata':
2960 writechangesetcopy = True
2960 writechangesetcopy = True
2961 writefilecopymeta = True
2961 writefilecopymeta = True
2962 writecopiesto = None
2962 writecopiesto = None
2963 else:
2963 else:
2964 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2964 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2965 writefilecopymeta = writecopiesto != b'changeset-only'
2965 writefilecopymeta = writecopiesto != b'changeset-only'
2966 writechangesetcopy = writecopiesto in (
2966 writechangesetcopy = writecopiesto in (
2967 b'changeset-only',
2967 b'changeset-only',
2968 b'compatibility',
2968 b'compatibility',
2969 )
2969 )
2970 p1copies, p2copies = None, None
2970 p1copies, p2copies = None, None
2971 if writechangesetcopy:
2971 if writechangesetcopy:
2972 p1copies = ctx.p1copies()
2972 p1copies = ctx.p1copies()
2973 p2copies = ctx.p2copies()
2973 p2copies = ctx.p2copies()
2974 filesadded, filesremoved = None, None
2974 filesadded, filesremoved = None, None
2975 with self.lock(), self.transaction(b"commit") as tr:
2975 with self.lock(), self.transaction(b"commit") as tr:
2976 trp = weakref.proxy(tr)
2976 trp = weakref.proxy(tr)
2977
2977
2978 if ctx.manifestnode():
2978 if ctx.manifestnode():
2979 # reuse an existing manifest revision
2979 # reuse an existing manifest revision
2980 self.ui.debug(b'reusing known manifest\n')
2980 self.ui.debug(b'reusing known manifest\n')
2981 mn = ctx.manifestnode()
2981 mn = ctx.manifestnode()
2982 files = ctx.files()
2982 files = ctx.files()
2983 if writechangesetcopy:
2983 if writechangesetcopy:
2984 filesadded = ctx.filesadded()
2984 filesadded = ctx.filesadded()
2985 filesremoved = ctx.filesremoved()
2985 filesremoved = ctx.filesremoved()
2986 elif ctx.files():
2986 elif ctx.files():
2987 m1ctx = p1.manifestctx()
2987 m1ctx = p1.manifestctx()
2988 m2ctx = p2.manifestctx()
2988 m2ctx = p2.manifestctx()
2989 mctx = m1ctx.copy()
2989 mctx = m1ctx.copy()
2990
2990
2991 m = mctx.read()
2991 m = mctx.read()
2992 m1 = m1ctx.read()
2992 m1 = m1ctx.read()
2993 m2 = m2ctx.read()
2993 m2 = m2ctx.read()
2994
2994
2995 # check in files
2995 # check in files
2996 added = []
2996 added = []
2997 changed = []
2997 changed = []
2998 removed = list(ctx.removed())
2998 removed = list(ctx.removed())
2999 linkrev = len(self)
2999 linkrev = len(self)
3000 self.ui.note(_(b"committing files:\n"))
3000 self.ui.note(_(b"committing files:\n"))
3001 uipathfn = scmutil.getuipathfn(self)
3001 uipathfn = scmutil.getuipathfn(self)
3002 for f in sorted(ctx.modified() + ctx.added()):
3002 for f in sorted(ctx.modified() + ctx.added()):
3003 self.ui.note(uipathfn(f) + b"\n")
3003 self.ui.note(uipathfn(f) + b"\n")
3004 try:
3004 try:
3005 fctx = ctx[f]
3005 fctx = ctx[f]
3006 if fctx is None:
3006 if fctx is None:
3007 removed.append(f)
3007 removed.append(f)
3008 else:
3008 else:
3009 added.append(f)
3009 added.append(f)
3010 m[f] = self._filecommit(
3010 m[f] = self._filecommit(
3011 fctx,
3011 fctx,
3012 m1,
3012 m1,
3013 m2,
3013 m2,
3014 linkrev,
3014 linkrev,
3015 trp,
3015 trp,
3016 changed,
3016 changed,
3017 writefilecopymeta,
3017 writefilecopymeta,
3018 )
3018 )
3019 m.setflag(f, fctx.flags())
3019 m.setflag(f, fctx.flags())
3020 except OSError:
3020 except OSError:
3021 self.ui.warn(
3021 self.ui.warn(
3022 _(b"trouble committing %s!\n") % uipathfn(f)
3022 _(b"trouble committing %s!\n") % uipathfn(f)
3023 )
3023 )
3024 raise
3024 raise
3025 except IOError as inst:
3025 except IOError as inst:
3026 errcode = getattr(inst, 'errno', errno.ENOENT)
3026 errcode = getattr(inst, 'errno', errno.ENOENT)
3027 if error or errcode and errcode != errno.ENOENT:
3027 if error or errcode and errcode != errno.ENOENT:
3028 self.ui.warn(
3028 self.ui.warn(
3029 _(b"trouble committing %s!\n") % uipathfn(f)
3029 _(b"trouble committing %s!\n") % uipathfn(f)
3030 )
3030 )
3031 raise
3031 raise
3032
3032
3033 # update manifest
3033 # update manifest
3034 removed = [f for f in removed if f in m1 or f in m2]
3034 removed = [f for f in removed if f in m1 or f in m2]
3035 drop = sorted([f for f in removed if f in m])
3035 drop = sorted([f for f in removed if f in m])
3036 for f in drop:
3036 for f in drop:
3037 del m[f]
3037 del m[f]
3038 if p2.rev() != nullrev:
3038 if p2.rev() != nullrev:
3039
3039
3040 @util.cachefunc
3040 @util.cachefunc
3041 def mas():
3041 def mas():
3042 p1n = p1.node()
3042 p1n = p1.node()
3043 p2n = p2.node()
3043 p2n = p2.node()
3044 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3044 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3045 if not cahs:
3045 if not cahs:
3046 cahs = [nullrev]
3046 cahs = [nullrev]
3047 return [self[r].manifest() for r in cahs]
3047 return [self[r].manifest() for r in cahs]
3048
3048
3049 def deletionfromparent(f):
3049 def deletionfromparent(f):
3050 # When a file is removed relative to p1 in a merge, this
3050 # When a file is removed relative to p1 in a merge, this
3051 # function determines whether the absence is due to a
3051 # function determines whether the absence is due to a
3052 # deletion from a parent, or whether the merge commit
3052 # deletion from a parent, or whether the merge commit
3053 # itself deletes the file. We decide this by doing a
3053 # itself deletes the file. We decide this by doing a
3054 # simplified three way merge of the manifest entry for
3054 # simplified three way merge of the manifest entry for
3055 # the file. There are two ways we decide the merge
3055 # the file. There are two ways we decide the merge
3056 # itself didn't delete a file:
3056 # itself didn't delete a file:
3057 # - neither parent (nor the merge) contain the file
3057 # - neither parent (nor the merge) contain the file
3058 # - exactly one parent contains the file, and that
3058 # - exactly one parent contains the file, and that
3059 # parent has the same filelog entry as the merge
3059 # parent has the same filelog entry as the merge
3060 # ancestor (or all of them if there two). In other
3060 # ancestor (or all of them if there two). In other
3061 # words, that parent left the file unchanged while the
3061 # words, that parent left the file unchanged while the
3062 # other one deleted it.
3062 # other one deleted it.
3063 # One way to think about this is that deleting a file is
3063 # One way to think about this is that deleting a file is
3064 # similar to emptying it, so the list of changed files
3064 # similar to emptying it, so the list of changed files
3065 # should be similar either way. The computation
3065 # should be similar either way. The computation
3066 # described above is not done directly in _filecommit
3066 # described above is not done directly in _filecommit
3067 # when creating the list of changed files, however
3067 # when creating the list of changed files, however
3068 # it does something very similar by comparing filelog
3068 # it does something very similar by comparing filelog
3069 # nodes.
3069 # nodes.
3070 if f in m1:
3070 if f in m1:
3071 return f not in m2 and all(
3071 return f not in m2 and all(
3072 f in ma and ma.find(f) == m1.find(f)
3072 f in ma and ma.find(f) == m1.find(f)
3073 for ma in mas()
3073 for ma in mas()
3074 )
3074 )
3075 elif f in m2:
3075 elif f in m2:
3076 return all(
3076 return all(
3077 f in ma and ma.find(f) == m2.find(f)
3077 f in ma and ma.find(f) == m2.find(f)
3078 for ma in mas()
3078 for ma in mas()
3079 )
3079 )
3080 else:
3080 else:
3081 return True
3081 return True
3082
3082
3083 removed = [f for f in removed if not deletionfromparent(f)]
3083 removed = [f for f in removed if not deletionfromparent(f)]
3084
3084
3085 files = changed + removed
3085 files = changed + removed
3086 md = None
3086 md = None
3087 if not files:
3087 if not files:
3088 # if no "files" actually changed in terms of the changelog,
3088 # if no "files" actually changed in terms of the changelog,
3089 # try hard to detect unmodified manifest entry so that the
3089 # try hard to detect unmodified manifest entry so that the
3090 # exact same commit can be reproduced later on convert.
3090 # exact same commit can be reproduced later on convert.
3091 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3091 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3092 if not files and md:
3092 if not files and md:
3093 self.ui.debug(
3093 self.ui.debug(
3094 b'not reusing manifest (no file change in '
3094 b'not reusing manifest (no file change in '
3095 b'changelog, but manifest differs)\n'
3095 b'changelog, but manifest differs)\n'
3096 )
3096 )
3097 if files or md:
3097 if files or md:
3098 self.ui.note(_(b"committing manifest\n"))
3098 self.ui.note(_(b"committing manifest\n"))
3099 # we're using narrowmatch here since it's already applied at
3099 # we're using narrowmatch here since it's already applied at
3100 # other stages (such as dirstate.walk), so we're already
3100 # other stages (such as dirstate.walk), so we're already
3101 # ignoring things outside of narrowspec in most cases. The
3101 # ignoring things outside of narrowspec in most cases. The
3102 # one case where we might have files outside the narrowspec
3102 # one case where we might have files outside the narrowspec
3103 # at this point is merges, and we already error out in the
3103 # at this point is merges, and we already error out in the
3104 # case where the merge has files outside of the narrowspec,
3104 # case where the merge has files outside of the narrowspec,
3105 # so this is safe.
3105 # so this is safe.
3106 mn = mctx.write(
3106 mn = mctx.write(
3107 trp,
3107 trp,
3108 linkrev,
3108 linkrev,
3109 p1.manifestnode(),
3109 p1.manifestnode(),
3110 p2.manifestnode(),
3110 p2.manifestnode(),
3111 added,
3111 added,
3112 drop,
3112 drop,
3113 match=self.narrowmatch(),
3113 match=self.narrowmatch(),
3114 )
3114 )
3115
3115
3116 if writechangesetcopy:
3116 if writechangesetcopy:
3117 filesadded = [
3117 filesadded = [
3118 f for f in changed if not (f in m1 or f in m2)
3118 f for f in changed if not (f in m1 or f in m2)
3119 ]
3119 ]
3120 filesremoved = removed
3120 filesremoved = removed
3121 else:
3121 else:
3122 self.ui.debug(
3122 self.ui.debug(
3123 b'reusing manifest from p1 (listed files '
3123 b'reusing manifest from p1 (listed files '
3124 b'actually unchanged)\n'
3124 b'actually unchanged)\n'
3125 )
3125 )
3126 mn = p1.manifestnode()
3126 mn = p1.manifestnode()
3127 else:
3127 else:
3128 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3128 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3129 mn = p1.manifestnode()
3129 mn = p1.manifestnode()
3130 files = []
3130 files = []
3131
3131
3132 if writecopiesto == b'changeset-only':
3132 if writecopiesto == b'changeset-only':
3133 # If writing only to changeset extras, use None to indicate that
3133 # If writing only to changeset extras, use None to indicate that
3134 # no entry should be written. If writing to both, write an empty
3134 # no entry should be written. If writing to both, write an empty
3135 # entry to prevent the reader from falling back to reading
3135 # entry to prevent the reader from falling back to reading
3136 # filelogs.
3136 # filelogs.
3137 p1copies = p1copies or None
3137 p1copies = p1copies or None
3138 p2copies = p2copies or None
3138 p2copies = p2copies or None
3139 filesadded = filesadded or None
3139 filesadded = filesadded or None
3140 filesremoved = filesremoved or None
3140 filesremoved = filesremoved or None
3141
3141
3142 if origctx and origctx.manifestnode() == mn:
3142 if origctx and origctx.manifestnode() == mn:
3143 files = origctx.files()
3143 files = origctx.files()
3144
3144
3145 # update changelog
3145 # update changelog
3146 self.ui.note(_(b"committing changelog\n"))
3146 self.ui.note(_(b"committing changelog\n"))
3147 self.changelog.delayupdate(tr)
3147 self.changelog.delayupdate(tr)
3148 n = self.changelog.add(
3148 n = self.changelog.add(
3149 mn,
3149 mn,
3150 files,
3150 files,
3151 ctx.description(),
3151 ctx.description(),
3152 trp,
3152 trp,
3153 p1.node(),
3153 p1.node(),
3154 p2.node(),
3154 p2.node(),
3155 user,
3155 user,
3156 ctx.date(),
3156 ctx.date(),
3157 ctx.extra().copy(),
3157 ctx.extra().copy(),
3158 p1copies,
3158 p1copies,
3159 p2copies,
3159 p2copies,
3160 filesadded,
3160 filesadded,
3161 filesremoved,
3161 filesremoved,
3162 )
3162 )
3163 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3163 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3164 self.hook(
3164 self.hook(
3165 b'pretxncommit',
3165 b'pretxncommit',
3166 throw=True,
3166 throw=True,
3167 node=hex(n),
3167 node=hex(n),
3168 parent1=xp1,
3168 parent1=xp1,
3169 parent2=xp2,
3169 parent2=xp2,
3170 )
3170 )
3171 # set the new commit is proper phase
3171 # set the new commit is proper phase
3172 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3172 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3173 if targetphase:
3173 if targetphase:
3174 # retract boundary do not alter parent changeset.
3174 # retract boundary do not alter parent changeset.
3175 # if a parent have higher the resulting phase will
3175 # if a parent have higher the resulting phase will
3176 # be compliant anyway
3176 # be compliant anyway
3177 #
3177 #
3178 # if minimal phase was 0 we don't need to retract anything
3178 # if minimal phase was 0 we don't need to retract anything
3179 phases.registernew(self, tr, targetphase, [n])
3179 phases.registernew(self, tr, targetphase, [n])
3180 return n
3180 return n
3181
3181
3182 @unfilteredmethod
3182 @unfilteredmethod
3183 def destroying(self):
3183 def destroying(self):
3184 '''Inform the repository that nodes are about to be destroyed.
3184 '''Inform the repository that nodes are about to be destroyed.
3185 Intended for use by strip and rollback, so there's a common
3185 Intended for use by strip and rollback, so there's a common
3186 place for anything that has to be done before destroying history.
3186 place for anything that has to be done before destroying history.
3187
3187
3188 This is mostly useful for saving state that is in memory and waiting
3188 This is mostly useful for saving state that is in memory and waiting
3189 to be flushed when the current lock is released. Because a call to
3189 to be flushed when the current lock is released. Because a call to
3190 destroyed is imminent, the repo will be invalidated causing those
3190 destroyed is imminent, the repo will be invalidated causing those
3191 changes to stay in memory (waiting for the next unlock), or vanish
3191 changes to stay in memory (waiting for the next unlock), or vanish
3192 completely.
3192 completely.
3193 '''
3193 '''
3194 # When using the same lock to commit and strip, the phasecache is left
3194 # When using the same lock to commit and strip, the phasecache is left
3195 # dirty after committing. Then when we strip, the repo is invalidated,
3195 # dirty after committing. Then when we strip, the repo is invalidated,
3196 # causing those changes to disappear.
3196 # causing those changes to disappear.
3197 if b'_phasecache' in vars(self):
3197 if b'_phasecache' in vars(self):
3198 self._phasecache.write()
3198 self._phasecache.write()
3199
3199
3200 @unfilteredmethod
3200 @unfilteredmethod
3201 def destroyed(self):
3201 def destroyed(self):
3202 '''Inform the repository that nodes have been destroyed.
3202 '''Inform the repository that nodes have been destroyed.
3203 Intended for use by strip and rollback, so there's a common
3203 Intended for use by strip and rollback, so there's a common
3204 place for anything that has to be done after destroying history.
3204 place for anything that has to be done after destroying history.
3205 '''
3205 '''
3206 # When one tries to:
3206 # When one tries to:
3207 # 1) destroy nodes thus calling this method (e.g. strip)
3207 # 1) destroy nodes thus calling this method (e.g. strip)
3208 # 2) use phasecache somewhere (e.g. commit)
3208 # 2) use phasecache somewhere (e.g. commit)
3209 #
3209 #
3210 # then 2) will fail because the phasecache contains nodes that were
3210 # then 2) will fail because the phasecache contains nodes that were
3211 # removed. We can either remove phasecache from the filecache,
3211 # removed. We can either remove phasecache from the filecache,
3212 # causing it to reload next time it is accessed, or simply filter
3212 # causing it to reload next time it is accessed, or simply filter
3213 # the removed nodes now and write the updated cache.
3213 # the removed nodes now and write the updated cache.
3214 self._phasecache.filterunknown(self)
3214 self._phasecache.filterunknown(self)
3215 self._phasecache.write()
3215 self._phasecache.write()
3216
3216
3217 # refresh all repository caches
3217 # refresh all repository caches
3218 self.updatecaches()
3218 self.updatecaches()
3219
3219
3220 # Ensure the persistent tag cache is updated. Doing it now
3220 # Ensure the persistent tag cache is updated. Doing it now
3221 # means that the tag cache only has to worry about destroyed
3221 # means that the tag cache only has to worry about destroyed
3222 # heads immediately after a strip/rollback. That in turn
3222 # heads immediately after a strip/rollback. That in turn
3223 # guarantees that "cachetip == currenttip" (comparing both rev
3223 # guarantees that "cachetip == currenttip" (comparing both rev
3224 # and node) always means no nodes have been added or destroyed.
3224 # and node) always means no nodes have been added or destroyed.
3225
3225
3226 # XXX this is suboptimal when qrefresh'ing: we strip the current
3226 # XXX this is suboptimal when qrefresh'ing: we strip the current
3227 # head, refresh the tag cache, then immediately add a new head.
3227 # head, refresh the tag cache, then immediately add a new head.
3228 # But I think doing it this way is necessary for the "instant
3228 # But I think doing it this way is necessary for the "instant
3229 # tag cache retrieval" case to work.
3229 # tag cache retrieval" case to work.
3230 self.invalidate()
3230 self.invalidate()
3231
3231
3232 def status(
3232 def status(
3233 self,
3233 self,
3234 node1=b'.',
3234 node1=b'.',
3235 node2=None,
3235 node2=None,
3236 match=None,
3236 match=None,
3237 ignored=False,
3237 ignored=False,
3238 clean=False,
3238 clean=False,
3239 unknown=False,
3239 unknown=False,
3240 listsubrepos=False,
3240 listsubrepos=False,
3241 ):
3241 ):
3242 '''a convenience method that calls node1.status(node2)'''
3242 '''a convenience method that calls node1.status(node2)'''
3243 return self[node1].status(
3243 return self[node1].status(
3244 node2, match, ignored, clean, unknown, listsubrepos
3244 node2, match, ignored, clean, unknown, listsubrepos
3245 )
3245 )
3246
3246
3247 def addpostdsstatus(self, ps):
3247 def addpostdsstatus(self, ps):
3248 """Add a callback to run within the wlock, at the point at which status
3248 """Add a callback to run within the wlock, at the point at which status
3249 fixups happen.
3249 fixups happen.
3250
3250
3251 On status completion, callback(wctx, status) will be called with the
3251 On status completion, callback(wctx, status) will be called with the
3252 wlock held, unless the dirstate has changed from underneath or the wlock
3252 wlock held, unless the dirstate has changed from underneath or the wlock
3253 couldn't be grabbed.
3253 couldn't be grabbed.
3254
3254
3255 Callbacks should not capture and use a cached copy of the dirstate --
3255 Callbacks should not capture and use a cached copy of the dirstate --
3256 it might change in the meanwhile. Instead, they should access the
3256 it might change in the meanwhile. Instead, they should access the
3257 dirstate via wctx.repo().dirstate.
3257 dirstate via wctx.repo().dirstate.
3258
3258
3259 This list is emptied out after each status run -- extensions should
3259 This list is emptied out after each status run -- extensions should
3260 make sure it adds to this list each time dirstate.status is called.
3260 make sure it adds to this list each time dirstate.status is called.
3261 Extensions should also make sure they don't call this for statuses
3261 Extensions should also make sure they don't call this for statuses
3262 that don't involve the dirstate.
3262 that don't involve the dirstate.
3263 """
3263 """
3264
3264
3265 # The list is located here for uniqueness reasons -- it is actually
3265 # The list is located here for uniqueness reasons -- it is actually
3266 # managed by the workingctx, but that isn't unique per-repo.
3266 # managed by the workingctx, but that isn't unique per-repo.
3267 self._postdsstatus.append(ps)
3267 self._postdsstatus.append(ps)
3268
3268
3269 def postdsstatus(self):
3269 def postdsstatus(self):
3270 """Used by workingctx to get the list of post-dirstate-status hooks."""
3270 """Used by workingctx to get the list of post-dirstate-status hooks."""
3271 return self._postdsstatus
3271 return self._postdsstatus
3272
3272
3273 def clearpostdsstatus(self):
3273 def clearpostdsstatus(self):
3274 """Used by workingctx to clear post-dirstate-status hooks."""
3274 """Used by workingctx to clear post-dirstate-status hooks."""
3275 del self._postdsstatus[:]
3275 del self._postdsstatus[:]
3276
3276
3277 def heads(self, start=None):
3277 def heads(self, start=None):
3278 if start is None:
3278 if start is None:
3279 cl = self.changelog
3279 cl = self.changelog
3280 headrevs = reversed(cl.headrevs())
3280 headrevs = reversed(cl.headrevs())
3281 return [cl.node(rev) for rev in headrevs]
3281 return [cl.node(rev) for rev in headrevs]
3282
3282
3283 heads = self.changelog.heads(start)
3283 heads = self.changelog.heads(start)
3284 # sort the output in rev descending order
3284 # sort the output in rev descending order
3285 return sorted(heads, key=self.changelog.rev, reverse=True)
3285 return sorted(heads, key=self.changelog.rev, reverse=True)
3286
3286
3287 def branchheads(self, branch=None, start=None, closed=False):
3287 def branchheads(self, branch=None, start=None, closed=False):
3288 '''return a (possibly filtered) list of heads for the given branch
3288 '''return a (possibly filtered) list of heads for the given branch
3289
3289
3290 Heads are returned in topological order, from newest to oldest.
3290 Heads are returned in topological order, from newest to oldest.
3291 If branch is None, use the dirstate branch.
3291 If branch is None, use the dirstate branch.
3292 If start is not None, return only heads reachable from start.
3292 If start is not None, return only heads reachable from start.
3293 If closed is True, return heads that are marked as closed as well.
3293 If closed is True, return heads that are marked as closed as well.
3294 '''
3294 '''
3295 if branch is None:
3295 if branch is None:
3296 branch = self[None].branch()
3296 branch = self[None].branch()
3297 branches = self.branchmap()
3297 branches = self.branchmap()
3298 if not branches.hasbranch(branch):
3298 if not branches.hasbranch(branch):
3299 return []
3299 return []
3300 # the cache returns heads ordered lowest to highest
3300 # the cache returns heads ordered lowest to highest
3301 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3301 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3302 if start is not None:
3302 if start is not None:
3303 # filter out the heads that cannot be reached from startrev
3303 # filter out the heads that cannot be reached from startrev
3304 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3304 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3305 bheads = [h for h in bheads if h in fbheads]
3305 bheads = [h for h in bheads if h in fbheads]
3306 return bheads
3306 return bheads
3307
3307
3308 def branches(self, nodes):
3308 def branches(self, nodes):
3309 if not nodes:
3309 if not nodes:
3310 nodes = [self.changelog.tip()]
3310 nodes = [self.changelog.tip()]
3311 b = []
3311 b = []
3312 for n in nodes:
3312 for n in nodes:
3313 t = n
3313 t = n
3314 while True:
3314 while True:
3315 p = self.changelog.parents(n)
3315 p = self.changelog.parents(n)
3316 if p[1] != nullid or p[0] == nullid:
3316 if p[1] != nullid or p[0] == nullid:
3317 b.append((t, n, p[0], p[1]))
3317 b.append((t, n, p[0], p[1]))
3318 break
3318 break
3319 n = p[0]
3319 n = p[0]
3320 return b
3320 return b
3321
3321
3322 def between(self, pairs):
3322 def between(self, pairs):
3323 r = []
3323 r = []
3324
3324
3325 for top, bottom in pairs:
3325 for top, bottom in pairs:
3326 n, l, i = top, [], 0
3326 n, l, i = top, [], 0
3327 f = 1
3327 f = 1
3328
3328
3329 while n != bottom and n != nullid:
3329 while n != bottom and n != nullid:
3330 p = self.changelog.parents(n)[0]
3330 p = self.changelog.parents(n)[0]
3331 if i == f:
3331 if i == f:
3332 l.append(n)
3332 l.append(n)
3333 f = f * 2
3333 f = f * 2
3334 n = p
3334 n = p
3335 i += 1
3335 i += 1
3336
3336
3337 r.append(l)
3337 r.append(l)
3338
3338
3339 return r
3339 return r
3340
3340
3341 def checkpush(self, pushop):
3341 def checkpush(self, pushop):
3342 """Extensions can override this function if additional checks have
3342 """Extensions can override this function if additional checks have
3343 to be performed before pushing, or call it if they override push
3343 to be performed before pushing, or call it if they override push
3344 command.
3344 command.
3345 """
3345 """
3346
3346
3347 @unfilteredpropertycache
3347 @unfilteredpropertycache
3348 def prepushoutgoinghooks(self):
3348 def prepushoutgoinghooks(self):
3349 """Return util.hooks consists of a pushop with repo, remote, outgoing
3349 """Return util.hooks consists of a pushop with repo, remote, outgoing
3350 methods, which are called before pushing changesets.
3350 methods, which are called before pushing changesets.
3351 """
3351 """
3352 return util.hooks()
3352 return util.hooks()
3353
3353
3354 def pushkey(self, namespace, key, old, new):
3354 def pushkey(self, namespace, key, old, new):
3355 try:
3355 try:
3356 tr = self.currenttransaction()
3356 tr = self.currenttransaction()
3357 hookargs = {}
3357 hookargs = {}
3358 if tr is not None:
3358 if tr is not None:
3359 hookargs.update(tr.hookargs)
3359 hookargs.update(tr.hookargs)
3360 hookargs = pycompat.strkwargs(hookargs)
3360 hookargs = pycompat.strkwargs(hookargs)
3361 hookargs[r'namespace'] = namespace
3361 hookargs[r'namespace'] = namespace
3362 hookargs[r'key'] = key
3362 hookargs[r'key'] = key
3363 hookargs[r'old'] = old
3363 hookargs[r'old'] = old
3364 hookargs[r'new'] = new
3364 hookargs[r'new'] = new
3365 self.hook(b'prepushkey', throw=True, **hookargs)
3365 self.hook(b'prepushkey', throw=True, **hookargs)
3366 except error.HookAbort as exc:
3366 except error.HookAbort as exc:
3367 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3367 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3368 if exc.hint:
3368 if exc.hint:
3369 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3369 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3370 return False
3370 return False
3371 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3371 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3372 ret = pushkey.push(self, namespace, key, old, new)
3372 ret = pushkey.push(self, namespace, key, old, new)
3373
3373
3374 def runhook():
3374 def runhook():
3375 self.hook(
3375 self.hook(
3376 b'pushkey',
3376 b'pushkey',
3377 namespace=namespace,
3377 namespace=namespace,
3378 key=key,
3378 key=key,
3379 old=old,
3379 old=old,
3380 new=new,
3380 new=new,
3381 ret=ret,
3381 ret=ret,
3382 )
3382 )
3383
3383
3384 self._afterlock(runhook)
3384 self._afterlock(runhook)
3385 return ret
3385 return ret
3386
3386
3387 def listkeys(self, namespace):
3387 def listkeys(self, namespace):
3388 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3388 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3389 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3389 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3390 values = pushkey.list(self, namespace)
3390 values = pushkey.list(self, namespace)
3391 self.hook(b'listkeys', namespace=namespace, values=values)
3391 self.hook(b'listkeys', namespace=namespace, values=values)
3392 return values
3392 return values
3393
3393
3394 def debugwireargs(self, one, two, three=None, four=None, five=None):
3394 def debugwireargs(self, one, two, three=None, four=None, five=None):
3395 '''used to test argument passing over the wire'''
3395 '''used to test argument passing over the wire'''
3396 return b"%s %s %s %s %s" % (
3396 return b"%s %s %s %s %s" % (
3397 one,
3397 one,
3398 two,
3398 two,
3399 pycompat.bytestr(three),
3399 pycompat.bytestr(three),
3400 pycompat.bytestr(four),
3400 pycompat.bytestr(four),
3401 pycompat.bytestr(five),
3401 pycompat.bytestr(five),
3402 )
3402 )
3403
3403
3404 def savecommitmessage(self, text):
3404 def savecommitmessage(self, text):
3405 fp = self.vfs(b'last-message.txt', b'wb')
3405 fp = self.vfs(b'last-message.txt', b'wb')
3406 try:
3406 try:
3407 fp.write(text)
3407 fp.write(text)
3408 finally:
3408 finally:
3409 fp.close()
3409 fp.close()
3410 return self.pathto(fp.name[len(self.root) + 1 :])
3410 return self.pathto(fp.name[len(self.root) + 1 :])
3411
3411
3412
3412
3413 # used to avoid circular references so destructors work
3413 # used to avoid circular references so destructors work
3414 def aftertrans(files):
3414 def aftertrans(files):
3415 renamefiles = [tuple(t) for t in files]
3415 renamefiles = [tuple(t) for t in files]
3416
3416
3417 def a():
3417 def a():
3418 for vfs, src, dest in renamefiles:
3418 for vfs, src, dest in renamefiles:
3419 # if src and dest refer to a same file, vfs.rename is a no-op,
3419 # if src and dest refer to a same file, vfs.rename is a no-op,
3420 # leaving both src and dest on disk. delete dest to make sure
3420 # leaving both src and dest on disk. delete dest to make sure
3421 # the rename couldn't be such a no-op.
3421 # the rename couldn't be such a no-op.
3422 vfs.tryunlink(dest)
3422 vfs.tryunlink(dest)
3423 try:
3423 try:
3424 vfs.rename(src, dest)
3424 vfs.rename(src, dest)
3425 except OSError: # journal file does not yet exist
3425 except OSError: # journal file does not yet exist
3426 pass
3426 pass
3427
3427
3428 return a
3428 return a
3429
3429
3430
3430
3431 def undoname(fn):
3431 def undoname(fn):
3432 base, name = os.path.split(fn)
3432 base, name = os.path.split(fn)
3433 assert name.startswith(b'journal')
3433 assert name.startswith(b'journal')
3434 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3434 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3435
3435
3436
3436
3437 def instance(ui, path, create, intents=None, createopts=None):
3437 def instance(ui, path, create, intents=None, createopts=None):
3438 localpath = util.urllocalpath(path)
3438 localpath = util.urllocalpath(path)
3439 if create:
3439 if create:
3440 createrepository(ui, localpath, createopts=createopts)
3440 createrepository(ui, localpath, createopts=createopts)
3441
3441
3442 return makelocalrepository(ui, localpath, intents=intents)
3442 return makelocalrepository(ui, localpath, intents=intents)
3443
3443
3444
3444
3445 def islocal(path):
3445 def islocal(path):
3446 return True
3446 return True
3447
3447
3448
3448
3449 def defaultcreateopts(ui, createopts=None):
3449 def defaultcreateopts(ui, createopts=None):
3450 """Populate the default creation options for a repository.
3450 """Populate the default creation options for a repository.
3451
3451
3452 A dictionary of explicitly requested creation options can be passed
3452 A dictionary of explicitly requested creation options can be passed
3453 in. Missing keys will be populated.
3453 in. Missing keys will be populated.
3454 """
3454 """
3455 createopts = dict(createopts or {})
3455 createopts = dict(createopts or {})
3456
3456
3457 if b'backend' not in createopts:
3457 if b'backend' not in createopts:
3458 # experimental config: storage.new-repo-backend
3458 # experimental config: storage.new-repo-backend
3459 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3459 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3460
3460
3461 return createopts
3461 return createopts
3462
3462
3463
3463
3464 def newreporequirements(ui, createopts):
3464 def newreporequirements(ui, createopts):
3465 """Determine the set of requirements for a new local repository.
3465 """Determine the set of requirements for a new local repository.
3466
3466
3467 Extensions can wrap this function to specify custom requirements for
3467 Extensions can wrap this function to specify custom requirements for
3468 new repositories.
3468 new repositories.
3469 """
3469 """
3470 # If the repo is being created from a shared repository, we copy
3470 # If the repo is being created from a shared repository, we copy
3471 # its requirements.
3471 # its requirements.
3472 if b'sharedrepo' in createopts:
3472 if b'sharedrepo' in createopts:
3473 requirements = set(createopts[b'sharedrepo'].requirements)
3473 requirements = set(createopts[b'sharedrepo'].requirements)
3474 if createopts.get(b'sharedrelative'):
3474 if createopts.get(b'sharedrelative'):
3475 requirements.add(b'relshared')
3475 requirements.add(b'relshared')
3476 else:
3476 else:
3477 requirements.add(b'shared')
3477 requirements.add(b'shared')
3478
3478
3479 return requirements
3479 return requirements
3480
3480
3481 if b'backend' not in createopts:
3481 if b'backend' not in createopts:
3482 raise error.ProgrammingError(
3482 raise error.ProgrammingError(
3483 b'backend key not present in createopts; '
3483 b'backend key not present in createopts; '
3484 b'was defaultcreateopts() called?'
3484 b'was defaultcreateopts() called?'
3485 )
3485 )
3486
3486
3487 if createopts[b'backend'] != b'revlogv1':
3487 if createopts[b'backend'] != b'revlogv1':
3488 raise error.Abort(
3488 raise error.Abort(
3489 _(
3489 _(
3490 b'unable to determine repository requirements for '
3490 b'unable to determine repository requirements for '
3491 b'storage backend: %s'
3491 b'storage backend: %s'
3492 )
3492 )
3493 % createopts[b'backend']
3493 % createopts[b'backend']
3494 )
3494 )
3495
3495
3496 requirements = {b'revlogv1'}
3496 requirements = {b'revlogv1'}
3497 if ui.configbool(b'format', b'usestore'):
3497 if ui.configbool(b'format', b'usestore'):
3498 requirements.add(b'store')
3498 requirements.add(b'store')
3499 if ui.configbool(b'format', b'usefncache'):
3499 if ui.configbool(b'format', b'usefncache'):
3500 requirements.add(b'fncache')
3500 requirements.add(b'fncache')
3501 if ui.configbool(b'format', b'dotencode'):
3501 if ui.configbool(b'format', b'dotencode'):
3502 requirements.add(b'dotencode')
3502 requirements.add(b'dotencode')
3503
3503
3504 compengine = ui.config(b'format', b'revlog-compression')
3504 compengine = ui.config(b'format', b'revlog-compression')
3505 if compengine not in util.compengines:
3505 if compengine not in util.compengines:
3506 raise error.Abort(
3506 raise error.Abort(
3507 _(
3507 _(
3508 b'compression engine %s defined by '
3508 b'compression engine %s defined by '
3509 b'format.revlog-compression not available'
3509 b'format.revlog-compression not available'
3510 )
3510 )
3511 % compengine,
3511 % compengine,
3512 hint=_(
3512 hint=_(
3513 b'run "hg debuginstall" to list available '
3513 b'run "hg debuginstall" to list available '
3514 b'compression engines'
3514 b'compression engines'
3515 ),
3515 ),
3516 )
3516 )
3517
3517
3518 # zlib is the historical default and doesn't need an explicit requirement.
3518 # zlib is the historical default and doesn't need an explicit requirement.
3519 elif compengine == b'zstd':
3519 elif compengine == b'zstd':
3520 requirements.add(b'revlog-compression-zstd')
3520 requirements.add(b'revlog-compression-zstd')
3521 elif compengine != b'zlib':
3521 elif compengine != b'zlib':
3522 requirements.add(b'exp-compression-%s' % compengine)
3522 requirements.add(b'exp-compression-%s' % compengine)
3523
3523
3524 if scmutil.gdinitconfig(ui):
3524 if scmutil.gdinitconfig(ui):
3525 requirements.add(b'generaldelta')
3525 requirements.add(b'generaldelta')
3526 if ui.configbool(b'format', b'sparse-revlog'):
3526 if ui.configbool(b'format', b'sparse-revlog'):
3527 requirements.add(SPARSEREVLOG_REQUIREMENT)
3527 requirements.add(SPARSEREVLOG_REQUIREMENT)
3528
3528
3529 # experimental config: format.use-side-data
3529 # experimental config: format.exp-use-side-data
3530 if ui.configbool(b'format', b'use-side-data'):
3530 if ui.configbool(b'format', b'exp-use-side-data'):
3531 requirements.add(SIDEDATA_REQUIREMENT)
3531 requirements.add(SIDEDATA_REQUIREMENT)
3532 # experimental config: format.exp-use-copies-side-data-changeset
3532 # experimental config: format.exp-use-copies-side-data-changeset
3533 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3533 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3534 requirements.add(SIDEDATA_REQUIREMENT)
3534 requirements.add(SIDEDATA_REQUIREMENT)
3535 requirements.add(COPIESSDC_REQUIREMENT)
3535 requirements.add(COPIESSDC_REQUIREMENT)
3536 if ui.configbool(b'experimental', b'treemanifest'):
3536 if ui.configbool(b'experimental', b'treemanifest'):
3537 requirements.add(b'treemanifest')
3537 requirements.add(b'treemanifest')
3538
3538
3539 revlogv2 = ui.config(b'experimental', b'revlogv2')
3539 revlogv2 = ui.config(b'experimental', b'revlogv2')
3540 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3540 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3541 requirements.remove(b'revlogv1')
3541 requirements.remove(b'revlogv1')
3542 # generaldelta is implied by revlogv2.
3542 # generaldelta is implied by revlogv2.
3543 requirements.discard(b'generaldelta')
3543 requirements.discard(b'generaldelta')
3544 requirements.add(REVLOGV2_REQUIREMENT)
3544 requirements.add(REVLOGV2_REQUIREMENT)
3545 # experimental config: format.internal-phase
3545 # experimental config: format.internal-phase
3546 if ui.configbool(b'format', b'internal-phase'):
3546 if ui.configbool(b'format', b'internal-phase'):
3547 requirements.add(b'internal-phase')
3547 requirements.add(b'internal-phase')
3548
3548
3549 if createopts.get(b'narrowfiles'):
3549 if createopts.get(b'narrowfiles'):
3550 requirements.add(repository.NARROW_REQUIREMENT)
3550 requirements.add(repository.NARROW_REQUIREMENT)
3551
3551
3552 if createopts.get(b'lfs'):
3552 if createopts.get(b'lfs'):
3553 requirements.add(b'lfs')
3553 requirements.add(b'lfs')
3554
3554
3555 if ui.configbool(b'format', b'bookmarks-in-store'):
3555 if ui.configbool(b'format', b'bookmarks-in-store'):
3556 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3556 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3557
3557
3558 return requirements
3558 return requirements
3559
3559
3560
3560
3561 def filterknowncreateopts(ui, createopts):
3561 def filterknowncreateopts(ui, createopts):
3562 """Filters a dict of repo creation options against options that are known.
3562 """Filters a dict of repo creation options against options that are known.
3563
3563
3564 Receives a dict of repo creation options and returns a dict of those
3564 Receives a dict of repo creation options and returns a dict of those
3565 options that we don't know how to handle.
3565 options that we don't know how to handle.
3566
3566
3567 This function is called as part of repository creation. If the
3567 This function is called as part of repository creation. If the
3568 returned dict contains any items, repository creation will not
3568 returned dict contains any items, repository creation will not
3569 be allowed, as it means there was a request to create a repository
3569 be allowed, as it means there was a request to create a repository
3570 with options not recognized by loaded code.
3570 with options not recognized by loaded code.
3571
3571
3572 Extensions can wrap this function to filter out creation options
3572 Extensions can wrap this function to filter out creation options
3573 they know how to handle.
3573 they know how to handle.
3574 """
3574 """
3575 known = {
3575 known = {
3576 b'backend',
3576 b'backend',
3577 b'lfs',
3577 b'lfs',
3578 b'narrowfiles',
3578 b'narrowfiles',
3579 b'sharedrepo',
3579 b'sharedrepo',
3580 b'sharedrelative',
3580 b'sharedrelative',
3581 b'shareditems',
3581 b'shareditems',
3582 b'shallowfilestore',
3582 b'shallowfilestore',
3583 }
3583 }
3584
3584
3585 return {k: v for k, v in createopts.items() if k not in known}
3585 return {k: v for k, v in createopts.items() if k not in known}
3586
3586
3587
3587
3588 def createrepository(ui, path, createopts=None):
3588 def createrepository(ui, path, createopts=None):
3589 """Create a new repository in a vfs.
3589 """Create a new repository in a vfs.
3590
3590
3591 ``path`` path to the new repo's working directory.
3591 ``path`` path to the new repo's working directory.
3592 ``createopts`` options for the new repository.
3592 ``createopts`` options for the new repository.
3593
3593
3594 The following keys for ``createopts`` are recognized:
3594 The following keys for ``createopts`` are recognized:
3595
3595
3596 backend
3596 backend
3597 The storage backend to use.
3597 The storage backend to use.
3598 lfs
3598 lfs
3599 Repository will be created with ``lfs`` requirement. The lfs extension
3599 Repository will be created with ``lfs`` requirement. The lfs extension
3600 will automatically be loaded when the repository is accessed.
3600 will automatically be loaded when the repository is accessed.
3601 narrowfiles
3601 narrowfiles
3602 Set up repository to support narrow file storage.
3602 Set up repository to support narrow file storage.
3603 sharedrepo
3603 sharedrepo
3604 Repository object from which storage should be shared.
3604 Repository object from which storage should be shared.
3605 sharedrelative
3605 sharedrelative
3606 Boolean indicating if the path to the shared repo should be
3606 Boolean indicating if the path to the shared repo should be
3607 stored as relative. By default, the pointer to the "parent" repo
3607 stored as relative. By default, the pointer to the "parent" repo
3608 is stored as an absolute path.
3608 is stored as an absolute path.
3609 shareditems
3609 shareditems
3610 Set of items to share to the new repository (in addition to storage).
3610 Set of items to share to the new repository (in addition to storage).
3611 shallowfilestore
3611 shallowfilestore
3612 Indicates that storage for files should be shallow (not all ancestor
3612 Indicates that storage for files should be shallow (not all ancestor
3613 revisions are known).
3613 revisions are known).
3614 """
3614 """
3615 createopts = defaultcreateopts(ui, createopts=createopts)
3615 createopts = defaultcreateopts(ui, createopts=createopts)
3616
3616
3617 unknownopts = filterknowncreateopts(ui, createopts)
3617 unknownopts = filterknowncreateopts(ui, createopts)
3618
3618
3619 if not isinstance(unknownopts, dict):
3619 if not isinstance(unknownopts, dict):
3620 raise error.ProgrammingError(
3620 raise error.ProgrammingError(
3621 b'filterknowncreateopts() did not return a dict'
3621 b'filterknowncreateopts() did not return a dict'
3622 )
3622 )
3623
3623
3624 if unknownopts:
3624 if unknownopts:
3625 raise error.Abort(
3625 raise error.Abort(
3626 _(
3626 _(
3627 b'unable to create repository because of unknown '
3627 b'unable to create repository because of unknown '
3628 b'creation option: %s'
3628 b'creation option: %s'
3629 )
3629 )
3630 % b', '.join(sorted(unknownopts)),
3630 % b', '.join(sorted(unknownopts)),
3631 hint=_(b'is a required extension not loaded?'),
3631 hint=_(b'is a required extension not loaded?'),
3632 )
3632 )
3633
3633
3634 requirements = newreporequirements(ui, createopts=createopts)
3634 requirements = newreporequirements(ui, createopts=createopts)
3635
3635
3636 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3636 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3637
3637
3638 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3638 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3639 if hgvfs.exists():
3639 if hgvfs.exists():
3640 raise error.RepoError(_(b'repository %s already exists') % path)
3640 raise error.RepoError(_(b'repository %s already exists') % path)
3641
3641
3642 if b'sharedrepo' in createopts:
3642 if b'sharedrepo' in createopts:
3643 sharedpath = createopts[b'sharedrepo'].sharedpath
3643 sharedpath = createopts[b'sharedrepo'].sharedpath
3644
3644
3645 if createopts.get(b'sharedrelative'):
3645 if createopts.get(b'sharedrelative'):
3646 try:
3646 try:
3647 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3647 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3648 except (IOError, ValueError) as e:
3648 except (IOError, ValueError) as e:
3649 # ValueError is raised on Windows if the drive letters differ
3649 # ValueError is raised on Windows if the drive letters differ
3650 # on each path.
3650 # on each path.
3651 raise error.Abort(
3651 raise error.Abort(
3652 _(b'cannot calculate relative path'),
3652 _(b'cannot calculate relative path'),
3653 hint=stringutil.forcebytestr(e),
3653 hint=stringutil.forcebytestr(e),
3654 )
3654 )
3655
3655
3656 if not wdirvfs.exists():
3656 if not wdirvfs.exists():
3657 wdirvfs.makedirs()
3657 wdirvfs.makedirs()
3658
3658
3659 hgvfs.makedir(notindexed=True)
3659 hgvfs.makedir(notindexed=True)
3660 if b'sharedrepo' not in createopts:
3660 if b'sharedrepo' not in createopts:
3661 hgvfs.mkdir(b'cache')
3661 hgvfs.mkdir(b'cache')
3662 hgvfs.mkdir(b'wcache')
3662 hgvfs.mkdir(b'wcache')
3663
3663
3664 if b'store' in requirements and b'sharedrepo' not in createopts:
3664 if b'store' in requirements and b'sharedrepo' not in createopts:
3665 hgvfs.mkdir(b'store')
3665 hgvfs.mkdir(b'store')
3666
3666
3667 # We create an invalid changelog outside the store so very old
3667 # We create an invalid changelog outside the store so very old
3668 # Mercurial versions (which didn't know about the requirements
3668 # Mercurial versions (which didn't know about the requirements
3669 # file) encounter an error on reading the changelog. This
3669 # file) encounter an error on reading the changelog. This
3670 # effectively locks out old clients and prevents them from
3670 # effectively locks out old clients and prevents them from
3671 # mucking with a repo in an unknown format.
3671 # mucking with a repo in an unknown format.
3672 #
3672 #
3673 # The revlog header has version 2, which won't be recognized by
3673 # The revlog header has version 2, which won't be recognized by
3674 # such old clients.
3674 # such old clients.
3675 hgvfs.append(
3675 hgvfs.append(
3676 b'00changelog.i',
3676 b'00changelog.i',
3677 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3677 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3678 b'layout',
3678 b'layout',
3679 )
3679 )
3680
3680
3681 scmutil.writerequires(hgvfs, requirements)
3681 scmutil.writerequires(hgvfs, requirements)
3682
3682
3683 # Write out file telling readers where to find the shared store.
3683 # Write out file telling readers where to find the shared store.
3684 if b'sharedrepo' in createopts:
3684 if b'sharedrepo' in createopts:
3685 hgvfs.write(b'sharedpath', sharedpath)
3685 hgvfs.write(b'sharedpath', sharedpath)
3686
3686
3687 if createopts.get(b'shareditems'):
3687 if createopts.get(b'shareditems'):
3688 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3688 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3689 hgvfs.write(b'shared', shared)
3689 hgvfs.write(b'shared', shared)
3690
3690
3691
3691
3692 def poisonrepository(repo):
3692 def poisonrepository(repo):
3693 """Poison a repository instance so it can no longer be used."""
3693 """Poison a repository instance so it can no longer be used."""
3694 # Perform any cleanup on the instance.
3694 # Perform any cleanup on the instance.
3695 repo.close()
3695 repo.close()
3696
3696
3697 # Our strategy is to replace the type of the object with one that
3697 # Our strategy is to replace the type of the object with one that
3698 # has all attribute lookups result in error.
3698 # has all attribute lookups result in error.
3699 #
3699 #
3700 # But we have to allow the close() method because some constructors
3700 # But we have to allow the close() method because some constructors
3701 # of repos call close() on repo references.
3701 # of repos call close() on repo references.
3702 class poisonedrepository(object):
3702 class poisonedrepository(object):
3703 def __getattribute__(self, item):
3703 def __getattribute__(self, item):
3704 if item == r'close':
3704 if item == r'close':
3705 return object.__getattribute__(self, item)
3705 return object.__getattribute__(self, item)
3706
3706
3707 raise error.ProgrammingError(
3707 raise error.ProgrammingError(
3708 b'repo instances should not be used after unshare'
3708 b'repo instances should not be used after unshare'
3709 )
3709 )
3710
3710
3711 def close(self):
3711 def close(self):
3712 pass
3712 pass
3713
3713
3714 # We may have a repoview, which intercepts __setattr__. So be sure
3714 # We may have a repoview, which intercepts __setattr__. So be sure
3715 # we operate at the lowest level possible.
3715 # we operate at the lowest level possible.
3716 object.__setattr__(repo, r'__class__', poisonedrepository)
3716 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,540 +1,540 b''
1 #testcases extra sidedata
1 #testcases extra sidedata
2
2
3 #if extra
3 #if extra
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > copies.write-to=changeset-only
6 > copies.write-to=changeset-only
7 > copies.read-from=changeset-only
7 > copies.read-from=changeset-only
8 > [alias]
8 > [alias]
9 > changesetcopies = log -r . -T 'files: {files}
9 > changesetcopies = log -r . -T 'files: {files}
10 > {extras % "{ifcontains("files", key, "{key}: {value}\n")}"}
10 > {extras % "{ifcontains("files", key, "{key}: {value}\n")}"}
11 > {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
11 > {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
12 > EOF
12 > EOF
13 #endif
13 #endif
14
14
15 #if sidedata
15 #if sidedata
16 $ cat >> $HGRCPATH << EOF
16 $ cat >> $HGRCPATH << EOF
17 > [format]
17 > [format]
18 > exp-use-copies-side-data-changeset = yes
18 > exp-use-copies-side-data-changeset = yes
19 > EOF
19 > EOF
20 #endif
20 #endif
21
21
22 $ cat >> $HGRCPATH << EOF
22 $ cat >> $HGRCPATH << EOF
23 > [alias]
23 > [alias]
24 > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}'
24 > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}'
25 > [extensions]
25 > [extensions]
26 > rebase =
26 > rebase =
27 > split =
27 > split =
28 > EOF
28 > EOF
29
29
30 Check that copies are recorded correctly
30 Check that copies are recorded correctly
31
31
32 $ hg init repo
32 $ hg init repo
33 $ cd repo
33 $ cd repo
34 #if sidedata
34 #if sidedata
35 $ hg debugformat -v
35 $ hg debugformat -v
36 format-variant repo config default
36 format-variant repo config default
37 fncache: yes yes yes
37 fncache: yes yes yes
38 dotencode: yes yes yes
38 dotencode: yes yes yes
39 generaldelta: yes yes yes
39 generaldelta: yes yes yes
40 sparserevlog: yes yes yes
40 sparserevlog: yes yes yes
41 sidedata: yes yes no
41 sidedata: yes yes no
42 copies-sdc: yes yes no
42 copies-sdc: yes yes no
43 plain-cl-delta: yes yes yes
43 plain-cl-delta: yes yes yes
44 compression: zlib zlib zlib
44 compression: zlib zlib zlib
45 compression-level: default default default
45 compression-level: default default default
46 #else
46 #else
47 $ hg debugformat -v
47 $ hg debugformat -v
48 format-variant repo config default
48 format-variant repo config default
49 fncache: yes yes yes
49 fncache: yes yes yes
50 dotencode: yes yes yes
50 dotencode: yes yes yes
51 generaldelta: yes yes yes
51 generaldelta: yes yes yes
52 sparserevlog: yes yes yes
52 sparserevlog: yes yes yes
53 sidedata: no no no
53 sidedata: no no no
54 copies-sdc: no no no
54 copies-sdc: no no no
55 plain-cl-delta: yes yes yes
55 plain-cl-delta: yes yes yes
56 compression: zlib zlib zlib
56 compression: zlib zlib zlib
57 compression-level: default default default
57 compression-level: default default default
58 #endif
58 #endif
59 $ echo a > a
59 $ echo a > a
60 $ hg add a
60 $ hg add a
61 $ hg ci -m initial
61 $ hg ci -m initial
62 $ hg cp a b
62 $ hg cp a b
63 $ hg cp a c
63 $ hg cp a c
64 $ hg cp a d
64 $ hg cp a d
65 $ hg ci -m 'copy a to b, c, and d'
65 $ hg ci -m 'copy a to b, c, and d'
66
66
67 #if extra
67 #if extra
68
68
69 $ hg changesetcopies
69 $ hg changesetcopies
70 files: b c d
70 files: b c d
71 filesadded: 0
71 filesadded: 0
72 1
72 1
73 2
73 2
74
74
75 p1copies: 0\x00a (esc)
75 p1copies: 0\x00a (esc)
76 1\x00a (esc)
76 1\x00a (esc)
77 2\x00a (esc)
77 2\x00a (esc)
78 #else
78 #else
79 $ hg debugsidedata -c -v -- -1
79 $ hg debugsidedata -c -v -- -1
80 4 sidedata entries
80 4 sidedata entries
81 entry-0010 size 11
81 entry-0010 size 11
82 '0\x00a\n1\x00a\n2\x00a'
82 '0\x00a\n1\x00a\n2\x00a'
83 entry-0011 size 0
83 entry-0011 size 0
84 ''
84 ''
85 entry-0012 size 5
85 entry-0012 size 5
86 '0\n1\n2'
86 '0\n1\n2'
87 entry-0013 size 0
87 entry-0013 size 0
88 ''
88 ''
89 #endif
89 #endif
90
90
91 $ hg showcopies
91 $ hg showcopies
92 a -> b
92 a -> b
93 a -> c
93 a -> c
94 a -> d
94 a -> d
95
95
96 #if extra
96 #if extra
97
97
98 $ hg showcopies --config experimental.copies.read-from=compatibility
98 $ hg showcopies --config experimental.copies.read-from=compatibility
99 a -> b
99 a -> b
100 a -> c
100 a -> c
101 a -> d
101 a -> d
102 $ hg showcopies --config experimental.copies.read-from=filelog-only
102 $ hg showcopies --config experimental.copies.read-from=filelog-only
103
103
104 #endif
104 #endif
105
105
106 Check that renames are recorded correctly
106 Check that renames are recorded correctly
107
107
108 $ hg mv b b2
108 $ hg mv b b2
109 $ hg ci -m 'rename b to b2'
109 $ hg ci -m 'rename b to b2'
110
110
111 #if extra
111 #if extra
112
112
113 $ hg changesetcopies
113 $ hg changesetcopies
114 files: b b2
114 files: b b2
115 filesadded: 1
115 filesadded: 1
116 filesremoved: 0
116 filesremoved: 0
117
117
118 p1copies: 1\x00b (esc)
118 p1copies: 1\x00b (esc)
119
119
120 #else
120 #else
121 $ hg debugsidedata -c -v -- -1
121 $ hg debugsidedata -c -v -- -1
122 4 sidedata entries
122 4 sidedata entries
123 entry-0010 size 3
123 entry-0010 size 3
124 '1\x00b'
124 '1\x00b'
125 entry-0011 size 0
125 entry-0011 size 0
126 ''
126 ''
127 entry-0012 size 1
127 entry-0012 size 1
128 '1'
128 '1'
129 entry-0013 size 1
129 entry-0013 size 1
130 '0'
130 '0'
131 #endif
131 #endif
132
132
133 $ hg showcopies
133 $ hg showcopies
134 b -> b2
134 b -> b2
135
135
136
136
137 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
137 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
138 even though there is no filelog entry.
138 even though there is no filelog entry.
139
139
140 $ hg cp b2 c --force
140 $ hg cp b2 c --force
141 $ hg st --copies
141 $ hg st --copies
142 M c
142 M c
143 b2
143 b2
144
144
145 #if extra
145 #if extra
146
146
147 $ hg debugindex c
147 $ hg debugindex c
148 rev linkrev nodeid p1 p2
148 rev linkrev nodeid p1 p2
149 0 1 b789fdd96dc2 000000000000 000000000000
149 0 1 b789fdd96dc2 000000000000 000000000000
150
150
151 #else
151 #else
152
152
153 $ hg debugindex c
153 $ hg debugindex c
154 rev linkrev nodeid p1 p2
154 rev linkrev nodeid p1 p2
155 0 1 37d9b5d994ea 000000000000 000000000000
155 0 1 37d9b5d994ea 000000000000 000000000000
156
156
157 #endif
157 #endif
158
158
159
159
160 $ hg ci -m 'move b onto d'
160 $ hg ci -m 'move b onto d'
161
161
162 #if extra
162 #if extra
163
163
164 $ hg changesetcopies
164 $ hg changesetcopies
165 files: c
165 files: c
166
166
167 p1copies: 0\x00b2 (esc)
167 p1copies: 0\x00b2 (esc)
168
168
169 #else
169 #else
170 $ hg debugsidedata -c -v -- -1
170 $ hg debugsidedata -c -v -- -1
171 4 sidedata entries
171 4 sidedata entries
172 entry-0010 size 4
172 entry-0010 size 4
173 '0\x00b2'
173 '0\x00b2'
174 entry-0011 size 0
174 entry-0011 size 0
175 ''
175 ''
176 entry-0012 size 0
176 entry-0012 size 0
177 ''
177 ''
178 entry-0013 size 0
178 entry-0013 size 0
179 ''
179 ''
180 #endif
180 #endif
181
181
182 $ hg showcopies
182 $ hg showcopies
183 b2 -> c
183 b2 -> c
184
184
185 #if extra
185 #if extra
186
186
187 $ hg debugindex c
187 $ hg debugindex c
188 rev linkrev nodeid p1 p2
188 rev linkrev nodeid p1 p2
189 0 1 b789fdd96dc2 000000000000 000000000000
189 0 1 b789fdd96dc2 000000000000 000000000000
190
190
191 #else
191 #else
192
192
193 $ hg debugindex c
193 $ hg debugindex c
194 rev linkrev nodeid p1 p2
194 rev linkrev nodeid p1 p2
195 0 1 37d9b5d994ea 000000000000 000000000000
195 0 1 37d9b5d994ea 000000000000 000000000000
196 1 3 029625640347 000000000000 000000000000
196 1 3 029625640347 000000000000 000000000000
197
197
198 #endif
198 #endif
199
199
200 Create a merge commit with copying done during merge.
200 Create a merge commit with copying done during merge.
201
201
202 $ hg co 0
202 $ hg co 0
203 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
203 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
204 $ hg cp a e
204 $ hg cp a e
205 $ hg cp a f
205 $ hg cp a f
206 $ hg ci -m 'copy a to e and f'
206 $ hg ci -m 'copy a to e and f'
207 created new head
207 created new head
208 $ hg merge 3
208 $ hg merge 3
209 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
210 (branch merge, don't forget to commit)
210 (branch merge, don't forget to commit)
211 File 'a' exists on both sides, so 'g' could be recorded as being from p1 or p2, but we currently
211 File 'a' exists on both sides, so 'g' could be recorded as being from p1 or p2, but we currently
212 always record it as being from p1
212 always record it as being from p1
213 $ hg cp a g
213 $ hg cp a g
214 File 'd' exists only in p2, so 'h' should be from p2
214 File 'd' exists only in p2, so 'h' should be from p2
215 $ hg cp d h
215 $ hg cp d h
216 File 'f' exists only in p1, so 'i' should be from p1
216 File 'f' exists only in p1, so 'i' should be from p1
217 $ hg cp f i
217 $ hg cp f i
218 $ hg ci -m 'merge'
218 $ hg ci -m 'merge'
219
219
220 #if extra
220 #if extra
221
221
222 $ hg changesetcopies
222 $ hg changesetcopies
223 files: g h i
223 files: g h i
224 filesadded: 0
224 filesadded: 0
225 1
225 1
226 2
226 2
227
227
228 p1copies: 0\x00a (esc)
228 p1copies: 0\x00a (esc)
229 2\x00f (esc)
229 2\x00f (esc)
230 p2copies: 1\x00d (esc)
230 p2copies: 1\x00d (esc)
231
231
232 #else
232 #else
233 $ hg debugsidedata -c -v -- -1
233 $ hg debugsidedata -c -v -- -1
234 4 sidedata entries
234 4 sidedata entries
235 entry-0010 size 7
235 entry-0010 size 7
236 '0\x00a\n2\x00f'
236 '0\x00a\n2\x00f'
237 entry-0011 size 3
237 entry-0011 size 3
238 '1\x00d'
238 '1\x00d'
239 entry-0012 size 5
239 entry-0012 size 5
240 '0\n1\n2'
240 '0\n1\n2'
241 entry-0013 size 0
241 entry-0013 size 0
242 ''
242 ''
243 #endif
243 #endif
244
244
245 $ hg showcopies
245 $ hg showcopies
246 a -> g
246 a -> g
247 d -> h
247 d -> h
248 f -> i
248 f -> i
249
249
250 Test writing to both changeset and filelog
250 Test writing to both changeset and filelog
251
251
252 $ hg cp a j
252 $ hg cp a j
253 #if extra
253 #if extra
254 $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
254 $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
255 $ hg changesetcopies
255 $ hg changesetcopies
256 files: j
256 files: j
257 filesadded: 0
257 filesadded: 0
258 filesremoved:
258 filesremoved:
259
259
260 p1copies: 0\x00a (esc)
260 p1copies: 0\x00a (esc)
261 p2copies:
261 p2copies:
262 #else
262 #else
263 $ hg ci -m 'copy a to j'
263 $ hg ci -m 'copy a to j'
264 $ hg debugsidedata -c -v -- -1
264 $ hg debugsidedata -c -v -- -1
265 4 sidedata entries
265 4 sidedata entries
266 entry-0010 size 3
266 entry-0010 size 3
267 '0\x00a'
267 '0\x00a'
268 entry-0011 size 0
268 entry-0011 size 0
269 ''
269 ''
270 entry-0012 size 1
270 entry-0012 size 1
271 '0'
271 '0'
272 entry-0013 size 0
272 entry-0013 size 0
273 ''
273 ''
274 #endif
274 #endif
275 $ hg debugdata j 0
275 $ hg debugdata j 0
276 \x01 (esc)
276 \x01 (esc)
277 copy: a
277 copy: a
278 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
278 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
279 \x01 (esc)
279 \x01 (esc)
280 a
280 a
281 $ hg showcopies
281 $ hg showcopies
282 a -> j
282 a -> j
283 $ hg showcopies --config experimental.copies.read-from=compatibility
283 $ hg showcopies --config experimental.copies.read-from=compatibility
284 a -> j
284 a -> j
285 $ hg showcopies --config experimental.copies.read-from=filelog-only
285 $ hg showcopies --config experimental.copies.read-from=filelog-only
286 a -> j
286 a -> j
287 Existing copy information in the changeset gets removed on amend and writing
287 Existing copy information in the changeset gets removed on amend and writing
288 copy information on to the filelog
288 copy information on to the filelog
289 #if extra
289 #if extra
290 $ hg ci --amend -m 'copy a to j, v2' \
290 $ hg ci --amend -m 'copy a to j, v2' \
291 > --config experimental.copies.write-to=filelog-only
291 > --config experimental.copies.write-to=filelog-only
292 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-*-amend.hg (glob)
292 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-*-amend.hg (glob)
293 $ hg changesetcopies
293 $ hg changesetcopies
294 files: j
294 files: j
295
295
296 #else
296 #else
297 $ hg ci --amend -m 'copy a to j, v2'
297 $ hg ci --amend -m 'copy a to j, v2'
298 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-*-amend.hg (glob)
298 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-*-amend.hg (glob)
299 $ hg debugsidedata -c -v -- -1
299 $ hg debugsidedata -c -v -- -1
300 4 sidedata entries
300 4 sidedata entries
301 entry-0010 size 3
301 entry-0010 size 3
302 '0\x00a'
302 '0\x00a'
303 entry-0011 size 0
303 entry-0011 size 0
304 ''
304 ''
305 entry-0012 size 1
305 entry-0012 size 1
306 '0'
306 '0'
307 entry-0013 size 0
307 entry-0013 size 0
308 ''
308 ''
309 #endif
309 #endif
310 $ hg showcopies --config experimental.copies.read-from=filelog-only
310 $ hg showcopies --config experimental.copies.read-from=filelog-only
311 a -> j
311 a -> j
312 The entries should be written to extras even if they're empty (so the client
312 The entries should be written to extras even if they're empty (so the client
313 won't have to fall back to reading from filelogs)
313 won't have to fall back to reading from filelogs)
314 $ echo x >> j
314 $ echo x >> j
315 #if extra
315 #if extra
316 $ hg ci -m 'modify j' --config experimental.copies.write-to=compatibility
316 $ hg ci -m 'modify j' --config experimental.copies.write-to=compatibility
317 $ hg changesetcopies
317 $ hg changesetcopies
318 files: j
318 files: j
319 filesadded:
319 filesadded:
320 filesremoved:
320 filesremoved:
321
321
322 p1copies:
322 p1copies:
323 p2copies:
323 p2copies:
324 #else
324 #else
325 $ hg ci -m 'modify j'
325 $ hg ci -m 'modify j'
326 $ hg debugsidedata -c -v -- -1
326 $ hg debugsidedata -c -v -- -1
327 4 sidedata entries
327 4 sidedata entries
328 entry-0010 size 0
328 entry-0010 size 0
329 ''
329 ''
330 entry-0011 size 0
330 entry-0011 size 0
331 ''
331 ''
332 entry-0012 size 0
332 entry-0012 size 0
333 ''
333 ''
334 entry-0013 size 0
334 entry-0013 size 0
335 ''
335 ''
336 #endif
336 #endif
337
337
338 Test writing only to filelog
338 Test writing only to filelog
339
339
340 $ hg cp a k
340 $ hg cp a k
341 #if extra
341 #if extra
342 $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
342 $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
343
343
344 $ hg changesetcopies
344 $ hg changesetcopies
345 files: k
345 files: k
346
346
347 #else
347 #else
348 $ hg ci -m 'copy a to k'
348 $ hg ci -m 'copy a to k'
349 $ hg debugsidedata -c -v -- -1
349 $ hg debugsidedata -c -v -- -1
350 4 sidedata entries
350 4 sidedata entries
351 entry-0010 size 3
351 entry-0010 size 3
352 '0\x00a'
352 '0\x00a'
353 entry-0011 size 0
353 entry-0011 size 0
354 ''
354 ''
355 entry-0012 size 1
355 entry-0012 size 1
356 '0'
356 '0'
357 entry-0013 size 0
357 entry-0013 size 0
358 ''
358 ''
359 #endif
359 #endif
360
360
361 $ hg debugdata k 0
361 $ hg debugdata k 0
362 \x01 (esc)
362 \x01 (esc)
363 copy: a
363 copy: a
364 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
364 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
365 \x01 (esc)
365 \x01 (esc)
366 a
366 a
367 #if extra
367 #if extra
368 $ hg showcopies
368 $ hg showcopies
369
369
370 $ hg showcopies --config experimental.copies.read-from=compatibility
370 $ hg showcopies --config experimental.copies.read-from=compatibility
371 a -> k
371 a -> k
372 $ hg showcopies --config experimental.copies.read-from=filelog-only
372 $ hg showcopies --config experimental.copies.read-from=filelog-only
373 a -> k
373 a -> k
374 #else
374 #else
375 $ hg showcopies
375 $ hg showcopies
376 a -> k
376 a -> k
377 #endif
377 #endif
378
378
379 $ cd ..
379 $ cd ..
380
380
381 Test rebasing a commit with copy information
381 Test rebasing a commit with copy information
382
382
383 $ hg init rebase-rename
383 $ hg init rebase-rename
384 $ cd rebase-rename
384 $ cd rebase-rename
385 $ echo a > a
385 $ echo a > a
386 $ hg ci -Aqm 'add a'
386 $ hg ci -Aqm 'add a'
387 $ echo a2 > a
387 $ echo a2 > a
388 $ hg ci -m 'modify a'
388 $ hg ci -m 'modify a'
389 $ hg co -q 0
389 $ hg co -q 0
390 $ hg mv a b
390 $ hg mv a b
391 $ hg ci -qm 'rename a to b'
391 $ hg ci -qm 'rename a to b'
392 $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
392 $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
393 rebasing 2:* "rename a to b" (tip) (glob)
393 rebasing 2:* "rename a to b" (tip) (glob)
394 merging a and b to b
394 merging a and b to b
395 saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/*-*-rebase.hg (glob)
395 saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/*-*-rebase.hg (glob)
396 $ hg st --change . --copies
396 $ hg st --change . --copies
397 A b
397 A b
398 a
398 a
399 R a
399 R a
400 $ cd ..
400 $ cd ..
401
401
402 Test splitting a commit
402 Test splitting a commit
403
403
404 $ hg init split
404 $ hg init split
405 $ cd split
405 $ cd split
406 $ echo a > a
406 $ echo a > a
407 $ echo b > b
407 $ echo b > b
408 $ hg ci -Aqm 'add a and b'
408 $ hg ci -Aqm 'add a and b'
409 $ echo a2 > a
409 $ echo a2 > a
410 $ hg mv b c
410 $ hg mv b c
411 $ hg ci -m 'modify a, move b to c'
411 $ hg ci -m 'modify a, move b to c'
412 $ hg --config ui.interactive=yes split <<EOF
412 $ hg --config ui.interactive=yes split <<EOF
413 > y
413 > y
414 > y
414 > y
415 > n
415 > n
416 > y
416 > y
417 > EOF
417 > EOF
418 diff --git a/a b/a
418 diff --git a/a b/a
419 1 hunks, 1 lines changed
419 1 hunks, 1 lines changed
420 examine changes to 'a'?
420 examine changes to 'a'?
421 (enter ? for help) [Ynesfdaq?] y
421 (enter ? for help) [Ynesfdaq?] y
422
422
423 @@ -1,1 +1,1 @@
423 @@ -1,1 +1,1 @@
424 -a
424 -a
425 +a2
425 +a2
426 record this change to 'a'?
426 record this change to 'a'?
427 (enter ? for help) [Ynesfdaq?] y
427 (enter ? for help) [Ynesfdaq?] y
428
428
429 diff --git a/b b/c
429 diff --git a/b b/c
430 rename from b
430 rename from b
431 rename to c
431 rename to c
432 examine changes to 'b' and 'c'?
432 examine changes to 'b' and 'c'?
433 (enter ? for help) [Ynesfdaq?] n
433 (enter ? for help) [Ynesfdaq?] n
434
434
435 created new head
435 created new head
436 diff --git a/b b/c
436 diff --git a/b b/c
437 rename from b
437 rename from b
438 rename to c
438 rename to c
439 examine changes to 'b' and 'c'?
439 examine changes to 'b' and 'c'?
440 (enter ? for help) [Ynesfdaq?] y
440 (enter ? for help) [Ynesfdaq?] y
441
441
442 saved backup bundle to $TESTTMP/split/.hg/strip-backup/*-*-split.hg (glob)
442 saved backup bundle to $TESTTMP/split/.hg/strip-backup/*-*-split.hg (glob)
443 $ cd ..
443 $ cd ..
444
444
445 Test committing half a rename
445 Test committing half a rename
446
446
447 $ hg init partial
447 $ hg init partial
448 $ cd partial
448 $ cd partial
449 $ echo a > a
449 $ echo a > a
450 $ hg ci -Aqm 'add a'
450 $ hg ci -Aqm 'add a'
451 $ hg mv a b
451 $ hg mv a b
452 $ hg ci -m 'remove a' a
452 $ hg ci -m 'remove a' a
453
453
454 #if sidedata
454 #if sidedata
455
455
456 Test upgrading/downgrading to sidedata storage
456 Test upgrading/downgrading to sidedata storage
457 ==============================================
457 ==============================================
458
458
459 downgrading (keeping some sidedata)
459 downgrading (keeping some sidedata)
460
460
461 $ hg debugformat -v
461 $ hg debugformat -v
462 format-variant repo config default
462 format-variant repo config default
463 fncache: yes yes yes
463 fncache: yes yes yes
464 dotencode: yes yes yes
464 dotencode: yes yes yes
465 generaldelta: yes yes yes
465 generaldelta: yes yes yes
466 sparserevlog: yes yes yes
466 sparserevlog: yes yes yes
467 sidedata: yes yes no
467 sidedata: yes yes no
468 copies-sdc: yes yes no
468 copies-sdc: yes yes no
469 plain-cl-delta: yes yes yes
469 plain-cl-delta: yes yes yes
470 compression: zlib zlib zlib
470 compression: zlib zlib zlib
471 compression-level: default default default
471 compression-level: default default default
472 $ hg debugsidedata -c -- 0
472 $ hg debugsidedata -c -- 0
473 4 sidedata entries
473 4 sidedata entries
474 entry-0010 size 0
474 entry-0010 size 0
475 entry-0011 size 0
475 entry-0011 size 0
476 entry-0012 size 1
476 entry-0012 size 1
477 entry-0013 size 0
477 entry-0013 size 0
478 $ hg debugsidedata -c -- 1
478 $ hg debugsidedata -c -- 1
479 4 sidedata entries
479 4 sidedata entries
480 entry-0010 size 0
480 entry-0010 size 0
481 entry-0011 size 0
481 entry-0011 size 0
482 entry-0012 size 0
482 entry-0012 size 0
483 entry-0013 size 1
483 entry-0013 size 1
484 $ hg debugsidedata -m -- 0
484 $ hg debugsidedata -m -- 0
485 $ cat << EOF > .hg/hgrc
485 $ cat << EOF > .hg/hgrc
486 > [format]
486 > [format]
487 > use-side-data = yes
487 > exp-use-side-data = yes
488 > exp-use-copies-side-data-changeset = no
488 > exp-use-copies-side-data-changeset = no
489 > EOF
489 > EOF
490 $ hg debugupgraderepo --run --quiet --no-backup > /dev/null
490 $ hg debugupgraderepo --run --quiet --no-backup > /dev/null
491 $ hg debugformat -v
491 $ hg debugformat -v
492 format-variant repo config default
492 format-variant repo config default
493 fncache: yes yes yes
493 fncache: yes yes yes
494 dotencode: yes yes yes
494 dotencode: yes yes yes
495 generaldelta: yes yes yes
495 generaldelta: yes yes yes
496 sparserevlog: yes yes yes
496 sparserevlog: yes yes yes
497 sidedata: yes yes no
497 sidedata: yes yes no
498 copies-sdc: no no no
498 copies-sdc: no no no
499 plain-cl-delta: yes yes yes
499 plain-cl-delta: yes yes yes
500 compression: zlib zlib zlib
500 compression: zlib zlib zlib
501 compression-level: default default default
501 compression-level: default default default
502 $ hg debugsidedata -c -- 0
502 $ hg debugsidedata -c -- 0
503 $ hg debugsidedata -c -- 1
503 $ hg debugsidedata -c -- 1
504 $ hg debugsidedata -m -- 0
504 $ hg debugsidedata -m -- 0
505
505
506 upgrading
506 upgrading
507
507
508 $ cat << EOF > .hg/hgrc
508 $ cat << EOF > .hg/hgrc
509 > [format]
509 > [format]
510 > exp-use-copies-side-data-changeset = yes
510 > exp-use-copies-side-data-changeset = yes
511 > EOF
511 > EOF
512 $ hg debugupgraderepo --run --quiet --no-backup > /dev/null
512 $ hg debugupgraderepo --run --quiet --no-backup > /dev/null
513 $ hg debugformat -v
513 $ hg debugformat -v
514 format-variant repo config default
514 format-variant repo config default
515 fncache: yes yes yes
515 fncache: yes yes yes
516 dotencode: yes yes yes
516 dotencode: yes yes yes
517 generaldelta: yes yes yes
517 generaldelta: yes yes yes
518 sparserevlog: yes yes yes
518 sparserevlog: yes yes yes
519 sidedata: yes yes no
519 sidedata: yes yes no
520 copies-sdc: yes yes no
520 copies-sdc: yes yes no
521 plain-cl-delta: yes yes yes
521 plain-cl-delta: yes yes yes
522 compression: zlib zlib zlib
522 compression: zlib zlib zlib
523 compression-level: default default default
523 compression-level: default default default
524 $ hg debugsidedata -c -- 0
524 $ hg debugsidedata -c -- 0
525 4 sidedata entries
525 4 sidedata entries
526 entry-0010 size 0
526 entry-0010 size 0
527 entry-0011 size 0
527 entry-0011 size 0
528 entry-0012 size 1
528 entry-0012 size 1
529 entry-0013 size 0
529 entry-0013 size 0
530 $ hg debugsidedata -c -- 1
530 $ hg debugsidedata -c -- 1
531 4 sidedata entries
531 4 sidedata entries
532 entry-0010 size 0
532 entry-0010 size 0
533 entry-0011 size 0
533 entry-0011 size 0
534 entry-0012 size 0
534 entry-0012 size 0
535 entry-0013 size 1
535 entry-0013 size 1
536 $ hg debugsidedata -m -- 0
536 $ hg debugsidedata -m -- 0
537
537
538 #endif
538 #endif
539
539
540 $ cd ..
540 $ cd ..
@@ -1,102 +1,102 b''
1 ==========================================================
1 ==========================================================
2 Test file dedicated to checking side-data related behavior
2 Test file dedicated to checking side-data related behavior
3 ==========================================================
3 ==========================================================
4
4
5 Check data can be written/read from sidedata
5 Check data can be written/read from sidedata
6 ============================================
6 ============================================
7
7
8 $ cat << EOF >> $HGRCPATH
8 $ cat << EOF >> $HGRCPATH
9 > [extensions]
9 > [extensions]
10 > testsidedata=$TESTDIR/testlib/ext-sidedata.py
10 > testsidedata=$TESTDIR/testlib/ext-sidedata.py
11 > EOF
11 > EOF
12
12
13 $ hg init test-sidedata --config format.use-side-data=yes
13 $ hg init test-sidedata --config format.exp-use-side-data=yes
14 $ cd test-sidedata
14 $ cd test-sidedata
15 $ echo aaa > a
15 $ echo aaa > a
16 $ hg add a
16 $ hg add a
17 $ hg commit -m a --traceback
17 $ hg commit -m a --traceback
18 $ echo aaa > b
18 $ echo aaa > b
19 $ hg add b
19 $ hg add b
20 $ hg commit -m b
20 $ hg commit -m b
21 $ echo xxx >> a
21 $ echo xxx >> a
22 $ hg commit -m aa
22 $ hg commit -m aa
23
23
24 $ hg debugsidedata -c 0
24 $ hg debugsidedata -c 0
25 2 sidedata entries
25 2 sidedata entries
26 entry-0001 size 4
26 entry-0001 size 4
27 entry-0002 size 32
27 entry-0002 size 32
28 $ hg debugsidedata -c 1 -v
28 $ hg debugsidedata -c 1 -v
29 2 sidedata entries
29 2 sidedata entries
30 entry-0001 size 4
30 entry-0001 size 4
31 '\x00\x00\x006'
31 '\x00\x00\x006'
32 entry-0002 size 32
32 entry-0002 size 32
33 '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
33 '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
34 $ hg debugsidedata -m 2
34 $ hg debugsidedata -m 2
35 2 sidedata entries
35 2 sidedata entries
36 entry-0001 size 4
36 entry-0001 size 4
37 entry-0002 size 32
37 entry-0002 size 32
38 $ hg debugsidedata a 1
38 $ hg debugsidedata a 1
39 2 sidedata entries
39 2 sidedata entries
40 entry-0001 size 4
40 entry-0001 size 4
41 entry-0002 size 32
41 entry-0002 size 32
42
42
43 Check upgrade behavior
43 Check upgrade behavior
44 ======================
44 ======================
45
45
46 Right now, sidedata has not upgrade support
46 Right now, sidedata has not upgrade support
47
47
48 Check that we can upgrade to sidedata
48 Check that we can upgrade to sidedata
49 -------------------------------------
49 -------------------------------------
50
50
51 $ hg init up-no-side-data --config format.use-side-data=no
51 $ hg init up-no-side-data --config format.exp-use-side-data=no
52 $ hg debugformat -v -R up-no-side-data
52 $ hg debugformat -v -R up-no-side-data
53 format-variant repo config default
53 format-variant repo config default
54 fncache: yes yes yes
54 fncache: yes yes yes
55 dotencode: yes yes yes
55 dotencode: yes yes yes
56 generaldelta: yes yes yes
56 generaldelta: yes yes yes
57 sparserevlog: yes yes yes
57 sparserevlog: yes yes yes
58 sidedata: no no no
58 sidedata: no no no
59 copies-sdc: no no no
59 copies-sdc: no no no
60 plain-cl-delta: yes yes yes
60 plain-cl-delta: yes yes yes
61 compression: zlib zlib zlib
61 compression: zlib zlib zlib
62 compression-level: default default default
62 compression-level: default default default
63 $ hg debugformat -v -R up-no-side-data --config format.use-side-data=yes
63 $ hg debugformat -v -R up-no-side-data --config format.exp-use-side-data=yes
64 format-variant repo config default
64 format-variant repo config default
65 fncache: yes yes yes
65 fncache: yes yes yes
66 dotencode: yes yes yes
66 dotencode: yes yes yes
67 generaldelta: yes yes yes
67 generaldelta: yes yes yes
68 sparserevlog: yes yes yes
68 sparserevlog: yes yes yes
69 sidedata: no yes no
69 sidedata: no yes no
70 copies-sdc: no no no
70 copies-sdc: no no no
71 plain-cl-delta: yes yes yes
71 plain-cl-delta: yes yes yes
72 compression: zlib zlib zlib
72 compression: zlib zlib zlib
73 compression-level: default default default
73 compression-level: default default default
74 $ hg debugupgraderepo -R up-no-side-data --config format.use-side-data=yes > /dev/null
74 $ hg debugupgraderepo -R up-no-side-data --config format.exp-use-side-data=yes > /dev/null
75
75
76 Check that we can downgrade from sidedata
76 Check that we can downgrade from sidedata
77 -----------------------------------------
77 -----------------------------------------
78
78
79 $ hg init up-side-data --config format.use-side-data=yes
79 $ hg init up-side-data --config format.exp-use-side-data=yes
80 $ hg debugformat -v -R up-side-data
80 $ hg debugformat -v -R up-side-data
81 format-variant repo config default
81 format-variant repo config default
82 fncache: yes yes yes
82 fncache: yes yes yes
83 dotencode: yes yes yes
83 dotencode: yes yes yes
84 generaldelta: yes yes yes
84 generaldelta: yes yes yes
85 sparserevlog: yes yes yes
85 sparserevlog: yes yes yes
86 sidedata: yes no no
86 sidedata: yes no no
87 copies-sdc: no no no
87 copies-sdc: no no no
88 plain-cl-delta: yes yes yes
88 plain-cl-delta: yes yes yes
89 compression: zlib zlib zlib
89 compression: zlib zlib zlib
90 compression-level: default default default
90 compression-level: default default default
91 $ hg debugformat -v -R up-side-data --config format.use-side-data=no
91 $ hg debugformat -v -R up-side-data --config format.exp-use-side-data=no
92 format-variant repo config default
92 format-variant repo config default
93 fncache: yes yes yes
93 fncache: yes yes yes
94 dotencode: yes yes yes
94 dotencode: yes yes yes
95 generaldelta: yes yes yes
95 generaldelta: yes yes yes
96 sparserevlog: yes yes yes
96 sparserevlog: yes yes yes
97 sidedata: yes no no
97 sidedata: yes no no
98 copies-sdc: no no no
98 copies-sdc: no no no
99 plain-cl-delta: yes yes yes
99 plain-cl-delta: yes yes yes
100 compression: zlib zlib zlib
100 compression: zlib zlib zlib
101 compression-level: default default default
101 compression-level: default default default
102 $ hg debugupgraderepo -R up-side-data --config format.use-side-data=no > /dev/null
102 $ hg debugupgraderepo -R up-side-data --config format.exp-use-side-data=no > /dev/null
@@ -1,1493 +1,1493 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > EOF
6 > EOF
7
7
8 store and revlogv1 are required in source
8 store and revlogv1 are required in source
9
9
10 $ hg --config format.usestore=false init no-store
10 $ hg --config format.usestore=false init no-store
11 $ hg -R no-store debugupgraderepo
11 $ hg -R no-store debugupgraderepo
12 abort: cannot upgrade repository; requirement missing: store
12 abort: cannot upgrade repository; requirement missing: store
13 [255]
13 [255]
14
14
15 $ hg init no-revlogv1
15 $ hg init no-revlogv1
16 $ cat > no-revlogv1/.hg/requires << EOF
16 $ cat > no-revlogv1/.hg/requires << EOF
17 > dotencode
17 > dotencode
18 > fncache
18 > fncache
19 > generaldelta
19 > generaldelta
20 > store
20 > store
21 > EOF
21 > EOF
22
22
23 $ hg -R no-revlogv1 debugupgraderepo
23 $ hg -R no-revlogv1 debugupgraderepo
24 abort: cannot upgrade repository; requirement missing: revlogv1
24 abort: cannot upgrade repository; requirement missing: revlogv1
25 [255]
25 [255]
26
26
27 Cannot upgrade shared repositories
27 Cannot upgrade shared repositories
28
28
29 $ hg init share-parent
29 $ hg init share-parent
30 $ hg -q share share-parent share-child
30 $ hg -q share share-parent share-child
31
31
32 $ hg -R share-child debugupgraderepo
32 $ hg -R share-child debugupgraderepo
33 abort: cannot upgrade repository; unsupported source requirement: shared
33 abort: cannot upgrade repository; unsupported source requirement: shared
34 [255]
34 [255]
35
35
36 Do not yet support upgrading treemanifest repos
36 Do not yet support upgrading treemanifest repos
37
37
38 $ hg --config experimental.treemanifest=true init treemanifest
38 $ hg --config experimental.treemanifest=true init treemanifest
39 $ hg -R treemanifest debugupgraderepo
39 $ hg -R treemanifest debugupgraderepo
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 [255]
41 [255]
42
42
43 Cannot add treemanifest requirement during upgrade
43 Cannot add treemanifest requirement during upgrade
44
44
45 $ hg init disallowaddedreq
45 $ hg init disallowaddedreq
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 [255]
48 [255]
49
49
50 An upgrade of a repository created with recommended settings only suggests optimizations
50 An upgrade of a repository created with recommended settings only suggests optimizations
51
51
52 $ hg init empty
52 $ hg init empty
53 $ cd empty
53 $ cd empty
54 $ hg debugformat
54 $ hg debugformat
55 format-variant repo
55 format-variant repo
56 fncache: yes
56 fncache: yes
57 dotencode: yes
57 dotencode: yes
58 generaldelta: yes
58 generaldelta: yes
59 sparserevlog: yes
59 sparserevlog: yes
60 sidedata: no
60 sidedata: no
61 copies-sdc: no
61 copies-sdc: no
62 plain-cl-delta: yes
62 plain-cl-delta: yes
63 compression: zlib
63 compression: zlib
64 compression-level: default
64 compression-level: default
65 $ hg debugformat --verbose
65 $ hg debugformat --verbose
66 format-variant repo config default
66 format-variant repo config default
67 fncache: yes yes yes
67 fncache: yes yes yes
68 dotencode: yes yes yes
68 dotencode: yes yes yes
69 generaldelta: yes yes yes
69 generaldelta: yes yes yes
70 sparserevlog: yes yes yes
70 sparserevlog: yes yes yes
71 sidedata: no no no
71 sidedata: no no no
72 copies-sdc: no no no
72 copies-sdc: no no no
73 plain-cl-delta: yes yes yes
73 plain-cl-delta: yes yes yes
74 compression: zlib zlib zlib
74 compression: zlib zlib zlib
75 compression-level: default default default
75 compression-level: default default default
76 $ hg debugformat --verbose --config format.usefncache=no
76 $ hg debugformat --verbose --config format.usefncache=no
77 format-variant repo config default
77 format-variant repo config default
78 fncache: yes no yes
78 fncache: yes no yes
79 dotencode: yes no yes
79 dotencode: yes no yes
80 generaldelta: yes yes yes
80 generaldelta: yes yes yes
81 sparserevlog: yes yes yes
81 sparserevlog: yes yes yes
82 sidedata: no no no
82 sidedata: no no no
83 copies-sdc: no no no
83 copies-sdc: no no no
84 plain-cl-delta: yes yes yes
84 plain-cl-delta: yes yes yes
85 compression: zlib zlib zlib
85 compression: zlib zlib zlib
86 compression-level: default default default
86 compression-level: default default default
87 $ hg debugformat --verbose --config format.usefncache=no --color=debug
87 $ hg debugformat --verbose --config format.usefncache=no --color=debug
88 format-variant repo config default
88 format-variant repo config default
89 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
89 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
90 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
90 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
91 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
91 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
92 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
92 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
93 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
93 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
94 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
94 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
95 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
95 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
96 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
96 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
97 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
97 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
98 $ hg debugformat -Tjson
98 $ hg debugformat -Tjson
99 [
99 [
100 {
100 {
101 "config": true,
101 "config": true,
102 "default": true,
102 "default": true,
103 "name": "fncache",
103 "name": "fncache",
104 "repo": true
104 "repo": true
105 },
105 },
106 {
106 {
107 "config": true,
107 "config": true,
108 "default": true,
108 "default": true,
109 "name": "dotencode",
109 "name": "dotencode",
110 "repo": true
110 "repo": true
111 },
111 },
112 {
112 {
113 "config": true,
113 "config": true,
114 "default": true,
114 "default": true,
115 "name": "generaldelta",
115 "name": "generaldelta",
116 "repo": true
116 "repo": true
117 },
117 },
118 {
118 {
119 "config": true,
119 "config": true,
120 "default": true,
120 "default": true,
121 "name": "sparserevlog",
121 "name": "sparserevlog",
122 "repo": true
122 "repo": true
123 },
123 },
124 {
124 {
125 "config": false,
125 "config": false,
126 "default": false,
126 "default": false,
127 "name": "sidedata",
127 "name": "sidedata",
128 "repo": false
128 "repo": false
129 },
129 },
130 {
130 {
131 "config": false,
131 "config": false,
132 "default": false,
132 "default": false,
133 "name": "copies-sdc",
133 "name": "copies-sdc",
134 "repo": false
134 "repo": false
135 },
135 },
136 {
136 {
137 "config": true,
137 "config": true,
138 "default": true,
138 "default": true,
139 "name": "plain-cl-delta",
139 "name": "plain-cl-delta",
140 "repo": true
140 "repo": true
141 },
141 },
142 {
142 {
143 "config": "zlib",
143 "config": "zlib",
144 "default": "zlib",
144 "default": "zlib",
145 "name": "compression",
145 "name": "compression",
146 "repo": "zlib"
146 "repo": "zlib"
147 },
147 },
148 {
148 {
149 "config": "default",
149 "config": "default",
150 "default": "default",
150 "default": "default",
151 "name": "compression-level",
151 "name": "compression-level",
152 "repo": "default"
152 "repo": "default"
153 }
153 }
154 ]
154 ]
155 $ hg debugupgraderepo
155 $ hg debugupgraderepo
156 (no feature deficiencies found in existing repository)
156 (no feature deficiencies found in existing repository)
157 performing an upgrade with "--run" will make the following changes:
157 performing an upgrade with "--run" will make the following changes:
158
158
159 requirements
159 requirements
160 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
160 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
161
161
162 sidedata
162 sidedata
163 Allows storage of extra data alongside a revision.
163 Allows storage of extra data alongside a revision.
164
164
165 copies-sdc
165 copies-sdc
166 Allows to use more efficient algorithm to deal with copy tracing.
166 Allows to use more efficient algorithm to deal with copy tracing.
167
167
168 additional optimizations are available by specifying "--optimize <name>":
168 additional optimizations are available by specifying "--optimize <name>":
169
169
170 re-delta-parent
170 re-delta-parent
171 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
171 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
172
172
173 re-delta-multibase
173 re-delta-multibase
174 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
174 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
175
175
176 re-delta-all
176 re-delta-all
177 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
177 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
178
178
179 re-delta-fulladd
179 re-delta-fulladd
180 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
180 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
181
181
182
182
183 --optimize can be used to add optimizations
183 --optimize can be used to add optimizations
184
184
185 $ hg debugupgrade --optimize redeltaparent
185 $ hg debugupgrade --optimize redeltaparent
186 (no feature deficiencies found in existing repository)
186 (no feature deficiencies found in existing repository)
187 performing an upgrade with "--run" will make the following changes:
187 performing an upgrade with "--run" will make the following changes:
188
188
189 requirements
189 requirements
190 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
190 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
191
191
192 sidedata
192 sidedata
193 Allows storage of extra data alongside a revision.
193 Allows storage of extra data alongside a revision.
194
194
195 copies-sdc
195 copies-sdc
196 Allows to use more efficient algorithm to deal with copy tracing.
196 Allows to use more efficient algorithm to deal with copy tracing.
197
197
198 re-delta-parent
198 re-delta-parent
199 deltas within internal storage will choose a new base revision if needed
199 deltas within internal storage will choose a new base revision if needed
200
200
201 additional optimizations are available by specifying "--optimize <name>":
201 additional optimizations are available by specifying "--optimize <name>":
202
202
203 re-delta-multibase
203 re-delta-multibase
204 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
204 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
205
205
206 re-delta-all
206 re-delta-all
207 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
207 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
208
208
209 re-delta-fulladd
209 re-delta-fulladd
210 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
210 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
211
211
212
212
213 modern form of the option
213 modern form of the option
214
214
215 $ hg debugupgrade --optimize re-delta-parent
215 $ hg debugupgrade --optimize re-delta-parent
216 (no feature deficiencies found in existing repository)
216 (no feature deficiencies found in existing repository)
217 performing an upgrade with "--run" will make the following changes:
217 performing an upgrade with "--run" will make the following changes:
218
218
219 requirements
219 requirements
220 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
220 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
221
221
222 sidedata
222 sidedata
223 Allows storage of extra data alongside a revision.
223 Allows storage of extra data alongside a revision.
224
224
225 copies-sdc
225 copies-sdc
226 Allows to use more efficient algorithm to deal with copy tracing.
226 Allows to use more efficient algorithm to deal with copy tracing.
227
227
228 re-delta-parent
228 re-delta-parent
229 deltas within internal storage will choose a new base revision if needed
229 deltas within internal storage will choose a new base revision if needed
230
230
231 additional optimizations are available by specifying "--optimize <name>":
231 additional optimizations are available by specifying "--optimize <name>":
232
232
233 re-delta-multibase
233 re-delta-multibase
234 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
234 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
235
235
236 re-delta-all
236 re-delta-all
237 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
237 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
238
238
239 re-delta-fulladd
239 re-delta-fulladd
240 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
240 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
241
241
242
242
243 unknown optimization:
243 unknown optimization:
244
244
245 $ hg debugupgrade --optimize foobar
245 $ hg debugupgrade --optimize foobar
246 abort: unknown optimization action requested: foobar
246 abort: unknown optimization action requested: foobar
247 (run without arguments to see valid optimizations)
247 (run without arguments to see valid optimizations)
248 [255]
248 [255]
249
249
250 Various sub-optimal detections work
250 Various sub-optimal detections work
251
251
252 $ cat > .hg/requires << EOF
252 $ cat > .hg/requires << EOF
253 > revlogv1
253 > revlogv1
254 > store
254 > store
255 > EOF
255 > EOF
256
256
257 $ hg debugformat
257 $ hg debugformat
258 format-variant repo
258 format-variant repo
259 fncache: no
259 fncache: no
260 dotencode: no
260 dotencode: no
261 generaldelta: no
261 generaldelta: no
262 sparserevlog: no
262 sparserevlog: no
263 sidedata: no
263 sidedata: no
264 copies-sdc: no
264 copies-sdc: no
265 plain-cl-delta: yes
265 plain-cl-delta: yes
266 compression: zlib
266 compression: zlib
267 compression-level: default
267 compression-level: default
268 $ hg debugformat --verbose
268 $ hg debugformat --verbose
269 format-variant repo config default
269 format-variant repo config default
270 fncache: no yes yes
270 fncache: no yes yes
271 dotencode: no yes yes
271 dotencode: no yes yes
272 generaldelta: no yes yes
272 generaldelta: no yes yes
273 sparserevlog: no yes yes
273 sparserevlog: no yes yes
274 sidedata: no no no
274 sidedata: no no no
275 copies-sdc: no no no
275 copies-sdc: no no no
276 plain-cl-delta: yes yes yes
276 plain-cl-delta: yes yes yes
277 compression: zlib zlib zlib
277 compression: zlib zlib zlib
278 compression-level: default default default
278 compression-level: default default default
279 $ hg debugformat --verbose --config format.usegeneraldelta=no
279 $ hg debugformat --verbose --config format.usegeneraldelta=no
280 format-variant repo config default
280 format-variant repo config default
281 fncache: no yes yes
281 fncache: no yes yes
282 dotencode: no yes yes
282 dotencode: no yes yes
283 generaldelta: no no yes
283 generaldelta: no no yes
284 sparserevlog: no no yes
284 sparserevlog: no no yes
285 sidedata: no no no
285 sidedata: no no no
286 copies-sdc: no no no
286 copies-sdc: no no no
287 plain-cl-delta: yes yes yes
287 plain-cl-delta: yes yes yes
288 compression: zlib zlib zlib
288 compression: zlib zlib zlib
289 compression-level: default default default
289 compression-level: default default default
290 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
290 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
291 format-variant repo config default
291 format-variant repo config default
292 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
292 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
293 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
293 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
294 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
294 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
295 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
295 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
296 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
296 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
297 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
297 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
298 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
298 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
299 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
299 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
300 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
300 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
301 $ hg debugupgraderepo
301 $ hg debugupgraderepo
302 repository lacks features recommended by current config options:
302 repository lacks features recommended by current config options:
303
303
304 fncache
304 fncache
305 long and reserved filenames may not work correctly; repository performance is sub-optimal
305 long and reserved filenames may not work correctly; repository performance is sub-optimal
306
306
307 dotencode
307 dotencode
308 storage of filenames beginning with a period or space may not work correctly
308 storage of filenames beginning with a period or space may not work correctly
309
309
310 generaldelta
310 generaldelta
311 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
311 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
312
312
313 sparserevlog
313 sparserevlog
314 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
314 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
315
315
316
316
317 performing an upgrade with "--run" will make the following changes:
317 performing an upgrade with "--run" will make the following changes:
318
318
319 requirements
319 requirements
320 preserved: revlogv1, store
320 preserved: revlogv1, store
321 added: dotencode, fncache, generaldelta, sparserevlog
321 added: dotencode, fncache, generaldelta, sparserevlog
322
322
323 fncache
323 fncache
324 repository will be more resilient to storing certain paths and performance of certain operations should be improved
324 repository will be more resilient to storing certain paths and performance of certain operations should be improved
325
325
326 dotencode
326 dotencode
327 repository will be better able to store files beginning with a space or period
327 repository will be better able to store files beginning with a space or period
328
328
329 generaldelta
329 generaldelta
330 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
330 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
331
331
332 sparserevlog
332 sparserevlog
333 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
333 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
334
334
335 sidedata
335 sidedata
336 Allows storage of extra data alongside a revision.
336 Allows storage of extra data alongside a revision.
337
337
338 copies-sdc
338 copies-sdc
339 Allows to use more efficient algorithm to deal with copy tracing.
339 Allows to use more efficient algorithm to deal with copy tracing.
340
340
341 additional optimizations are available by specifying "--optimize <name>":
341 additional optimizations are available by specifying "--optimize <name>":
342
342
343 re-delta-parent
343 re-delta-parent
344 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
344 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
345
345
346 re-delta-multibase
346 re-delta-multibase
347 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
347 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
348
348
349 re-delta-all
349 re-delta-all
350 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
350 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
351
351
352 re-delta-fulladd
352 re-delta-fulladd
353 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
353 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
354
354
355
355
356 $ hg --config format.dotencode=false debugupgraderepo
356 $ hg --config format.dotencode=false debugupgraderepo
357 repository lacks features recommended by current config options:
357 repository lacks features recommended by current config options:
358
358
359 fncache
359 fncache
360 long and reserved filenames may not work correctly; repository performance is sub-optimal
360 long and reserved filenames may not work correctly; repository performance is sub-optimal
361
361
362 generaldelta
362 generaldelta
363 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
363 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
364
364
365 sparserevlog
365 sparserevlog
366 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
366 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
367
367
368 repository lacks features used by the default config options:
368 repository lacks features used by the default config options:
369
369
370 dotencode
370 dotencode
371 storage of filenames beginning with a period or space may not work correctly
371 storage of filenames beginning with a period or space may not work correctly
372
372
373
373
374 performing an upgrade with "--run" will make the following changes:
374 performing an upgrade with "--run" will make the following changes:
375
375
376 requirements
376 requirements
377 preserved: revlogv1, store
377 preserved: revlogv1, store
378 added: fncache, generaldelta, sparserevlog
378 added: fncache, generaldelta, sparserevlog
379
379
380 fncache
380 fncache
381 repository will be more resilient to storing certain paths and performance of certain operations should be improved
381 repository will be more resilient to storing certain paths and performance of certain operations should be improved
382
382
383 generaldelta
383 generaldelta
384 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
384 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
385
385
386 sparserevlog
386 sparserevlog
387 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
387 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
388
388
389 sidedata
389 sidedata
390 Allows storage of extra data alongside a revision.
390 Allows storage of extra data alongside a revision.
391
391
392 copies-sdc
392 copies-sdc
393 Allows to use more efficient algorithm to deal with copy tracing.
393 Allows to use more efficient algorithm to deal with copy tracing.
394
394
395 additional optimizations are available by specifying "--optimize <name>":
395 additional optimizations are available by specifying "--optimize <name>":
396
396
397 re-delta-parent
397 re-delta-parent
398 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
398 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
399
399
400 re-delta-multibase
400 re-delta-multibase
401 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
401 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
402
402
403 re-delta-all
403 re-delta-all
404 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
404 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
405
405
406 re-delta-fulladd
406 re-delta-fulladd
407 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
407 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
408
408
409
409
410 $ cd ..
410 $ cd ..
411
411
412 Upgrading a repository that is already modern essentially no-ops
412 Upgrading a repository that is already modern essentially no-ops
413
413
414 $ hg init modern
414 $ hg init modern
415 $ hg -R modern debugupgraderepo --run
415 $ hg -R modern debugupgraderepo --run
416 upgrade will perform the following actions:
416 upgrade will perform the following actions:
417
417
418 requirements
418 requirements
419 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
419 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
420
420
421 sidedata
421 sidedata
422 Allows storage of extra data alongside a revision.
422 Allows storage of extra data alongside a revision.
423
423
424 copies-sdc
424 copies-sdc
425 Allows to use more efficient algorithm to deal with copy tracing.
425 Allows to use more efficient algorithm to deal with copy tracing.
426
426
427 beginning upgrade...
427 beginning upgrade...
428 repository locked and read-only
428 repository locked and read-only
429 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
429 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
430 (it is safe to interrupt this process any time before data migration completes)
430 (it is safe to interrupt this process any time before data migration completes)
431 data fully migrated to temporary repository
431 data fully migrated to temporary repository
432 marking source repository as being upgraded; clients will be unable to read from repository
432 marking source repository as being upgraded; clients will be unable to read from repository
433 starting in-place swap of repository data
433 starting in-place swap of repository data
434 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
434 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
435 replacing store...
435 replacing store...
436 store replacement complete; repository was inconsistent for *s (glob)
436 store replacement complete; repository was inconsistent for *s (glob)
437 finalizing requirements file and making repository readable again
437 finalizing requirements file and making repository readable again
438 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
438 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
439 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
439 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
440 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
440 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
441
441
442 Upgrading a repository to generaldelta works
442 Upgrading a repository to generaldelta works
443
443
444 $ hg --config format.usegeneraldelta=false init upgradegd
444 $ hg --config format.usegeneraldelta=false init upgradegd
445 $ cd upgradegd
445 $ cd upgradegd
446 $ touch f0
446 $ touch f0
447 $ hg -q commit -A -m initial
447 $ hg -q commit -A -m initial
448 $ mkdir FooBarDirectory.d
448 $ mkdir FooBarDirectory.d
449 $ touch FooBarDirectory.d/f1
449 $ touch FooBarDirectory.d/f1
450 $ hg -q commit -A -m 'add f1'
450 $ hg -q commit -A -m 'add f1'
451 $ hg -q up -r 0
451 $ hg -q up -r 0
452 >>> from __future__ import absolute_import, print_function
452 >>> from __future__ import absolute_import, print_function
453 >>> import random
453 >>> import random
454 >>> random.seed(0) # have a reproducible content
454 >>> random.seed(0) # have a reproducible content
455 >>> with open("f2", "w") as f:
455 >>> with open("f2", "w") as f:
456 ... for i in range(100000):
456 ... for i in range(100000):
457 ... f.write("%d\n" % random.randint(1000000000, 9999999999)) and None
457 ... f.write("%d\n" % random.randint(1000000000, 9999999999)) and None
458 $ hg -q commit -A -m 'add f2'
458 $ hg -q commit -A -m 'add f2'
459
459
460 make sure we have a .d file
460 make sure we have a .d file
461
461
462 $ ls -d .hg/store/data/*
462 $ ls -d .hg/store/data/*
463 .hg/store/data/_foo_bar_directory.d.hg
463 .hg/store/data/_foo_bar_directory.d.hg
464 .hg/store/data/f0.i
464 .hg/store/data/f0.i
465 .hg/store/data/f2.d
465 .hg/store/data/f2.d
466 .hg/store/data/f2.i
466 .hg/store/data/f2.i
467
467
468 $ hg debugupgraderepo --run --config format.sparse-revlog=false
468 $ hg debugupgraderepo --run --config format.sparse-revlog=false
469 upgrade will perform the following actions:
469 upgrade will perform the following actions:
470
470
471 requirements
471 requirements
472 preserved: dotencode, fncache, revlogv1, store
472 preserved: dotencode, fncache, revlogv1, store
473 added: generaldelta
473 added: generaldelta
474
474
475 generaldelta
475 generaldelta
476 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
476 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
477
477
478 sidedata
478 sidedata
479 Allows storage of extra data alongside a revision.
479 Allows storage of extra data alongside a revision.
480
480
481 copies-sdc
481 copies-sdc
482 Allows to use more efficient algorithm to deal with copy tracing.
482 Allows to use more efficient algorithm to deal with copy tracing.
483
483
484 beginning upgrade...
484 beginning upgrade...
485 repository locked and read-only
485 repository locked and read-only
486 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
486 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
487 (it is safe to interrupt this process any time before data migration completes)
487 (it is safe to interrupt this process any time before data migration completes)
488 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
488 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
489 migrating 519 KB in store; 1.05 MB tracked data
489 migrating 519 KB in store; 1.05 MB tracked data
490 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
490 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
491 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
491 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
492 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
492 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
493 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
493 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
494 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
494 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
495 finished migrating 3 changelog revisions; change in size: 0 bytes
495 finished migrating 3 changelog revisions; change in size: 0 bytes
496 finished migrating 9 total revisions; total change in store size: -17 bytes
496 finished migrating 9 total revisions; total change in store size: -17 bytes
497 copying phaseroots
497 copying phaseroots
498 data fully migrated to temporary repository
498 data fully migrated to temporary repository
499 marking source repository as being upgraded; clients will be unable to read from repository
499 marking source repository as being upgraded; clients will be unable to read from repository
500 starting in-place swap of repository data
500 starting in-place swap of repository data
501 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
501 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
502 replacing store...
502 replacing store...
503 store replacement complete; repository was inconsistent for *s (glob)
503 store replacement complete; repository was inconsistent for *s (glob)
504 finalizing requirements file and making repository readable again
504 finalizing requirements file and making repository readable again
505 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
505 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
506 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
506 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
507 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
507 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
508
508
509 Original requirements backed up
509 Original requirements backed up
510
510
511 $ cat .hg/upgradebackup.*/requires
511 $ cat .hg/upgradebackup.*/requires
512 dotencode
512 dotencode
513 fncache
513 fncache
514 revlogv1
514 revlogv1
515 store
515 store
516
516
517 generaldelta added to original requirements files
517 generaldelta added to original requirements files
518
518
519 $ cat .hg/requires
519 $ cat .hg/requires
520 dotencode
520 dotencode
521 fncache
521 fncache
522 generaldelta
522 generaldelta
523 revlogv1
523 revlogv1
524 store
524 store
525
525
526 store directory has files we expect
526 store directory has files we expect
527
527
528 $ ls .hg/store
528 $ ls .hg/store
529 00changelog.i
529 00changelog.i
530 00manifest.i
530 00manifest.i
531 data
531 data
532 fncache
532 fncache
533 phaseroots
533 phaseroots
534 undo
534 undo
535 undo.backupfiles
535 undo.backupfiles
536 undo.phaseroots
536 undo.phaseroots
537
537
538 manifest should be generaldelta
538 manifest should be generaldelta
539
539
540 $ hg debugrevlog -m | grep flags
540 $ hg debugrevlog -m | grep flags
541 flags : inline, generaldelta
541 flags : inline, generaldelta
542
542
543 verify should be happy
543 verify should be happy
544
544
545 $ hg verify
545 $ hg verify
546 checking changesets
546 checking changesets
547 checking manifests
547 checking manifests
548 crosschecking files in changesets and manifests
548 crosschecking files in changesets and manifests
549 checking files
549 checking files
550 checked 3 changesets with 3 changes to 3 files
550 checked 3 changesets with 3 changes to 3 files
551
551
552 old store should be backed up
552 old store should be backed up
553
553
554 $ ls -d .hg/upgradebackup.*/
554 $ ls -d .hg/upgradebackup.*/
555 .hg/upgradebackup.*/ (glob)
555 .hg/upgradebackup.*/ (glob)
556 $ ls .hg/upgradebackup.*/store
556 $ ls .hg/upgradebackup.*/store
557 00changelog.i
557 00changelog.i
558 00manifest.i
558 00manifest.i
559 data
559 data
560 fncache
560 fncache
561 phaseroots
561 phaseroots
562 undo
562 undo
563 undo.backup.fncache
563 undo.backup.fncache
564 undo.backupfiles
564 undo.backupfiles
565 undo.phaseroots
565 undo.phaseroots
566
566
567 unless --no-backup is passed
567 unless --no-backup is passed
568
568
569 $ rm -rf .hg/upgradebackup.*/
569 $ rm -rf .hg/upgradebackup.*/
570 $ hg debugupgraderepo --run --no-backup
570 $ hg debugupgraderepo --run --no-backup
571 upgrade will perform the following actions:
571 upgrade will perform the following actions:
572
572
573 requirements
573 requirements
574 preserved: dotencode, fncache, generaldelta, revlogv1, store
574 preserved: dotencode, fncache, generaldelta, revlogv1, store
575 added: sparserevlog
575 added: sparserevlog
576
576
577 sparserevlog
577 sparserevlog
578 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
578 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
579
579
580 sidedata
580 sidedata
581 Allows storage of extra data alongside a revision.
581 Allows storage of extra data alongside a revision.
582
582
583 copies-sdc
583 copies-sdc
584 Allows to use more efficient algorithm to deal with copy tracing.
584 Allows to use more efficient algorithm to deal with copy tracing.
585
585
586 beginning upgrade...
586 beginning upgrade...
587 repository locked and read-only
587 repository locked and read-only
588 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
588 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
589 (it is safe to interrupt this process any time before data migration completes)
589 (it is safe to interrupt this process any time before data migration completes)
590 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
590 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
591 migrating 519 KB in store; 1.05 MB tracked data
591 migrating 519 KB in store; 1.05 MB tracked data
592 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
592 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
593 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
593 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
594 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
594 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
595 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
595 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
596 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
596 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
597 finished migrating 3 changelog revisions; change in size: 0 bytes
597 finished migrating 3 changelog revisions; change in size: 0 bytes
598 finished migrating 9 total revisions; total change in store size: 0 bytes
598 finished migrating 9 total revisions; total change in store size: 0 bytes
599 copying phaseroots
599 copying phaseroots
600 data fully migrated to temporary repository
600 data fully migrated to temporary repository
601 marking source repository as being upgraded; clients will be unable to read from repository
601 marking source repository as being upgraded; clients will be unable to read from repository
602 starting in-place swap of repository data
602 starting in-place swap of repository data
603 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
603 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
604 replacing store...
604 replacing store...
605 store replacement complete; repository was inconsistent for * (glob)
605 store replacement complete; repository was inconsistent for * (glob)
606 finalizing requirements file and making repository readable again
606 finalizing requirements file and making repository readable again
607 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
607 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
608 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
608 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
609 $ ls -1 .hg/ | grep upgradebackup
609 $ ls -1 .hg/ | grep upgradebackup
610 [1]
610 [1]
611
611
612 We can restrict optimization to some revlog:
612 We can restrict optimization to some revlog:
613
613
614 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
614 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
615 upgrade will perform the following actions:
615 upgrade will perform the following actions:
616
616
617 requirements
617 requirements
618 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
618 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
619
619
620 sidedata
620 sidedata
621 Allows storage of extra data alongside a revision.
621 Allows storage of extra data alongside a revision.
622
622
623 copies-sdc
623 copies-sdc
624 Allows to use more efficient algorithm to deal with copy tracing.
624 Allows to use more efficient algorithm to deal with copy tracing.
625
625
626 re-delta-parent
626 re-delta-parent
627 deltas within internal storage will choose a new base revision if needed
627 deltas within internal storage will choose a new base revision if needed
628
628
629 beginning upgrade...
629 beginning upgrade...
630 repository locked and read-only
630 repository locked and read-only
631 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
631 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
632 (it is safe to interrupt this process any time before data migration completes)
632 (it is safe to interrupt this process any time before data migration completes)
633 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
633 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
634 migrating 519 KB in store; 1.05 MB tracked data
634 migrating 519 KB in store; 1.05 MB tracked data
635 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
635 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
636 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
636 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
637 blindly copying data/f0.i containing 1 revisions
637 blindly copying data/f0.i containing 1 revisions
638 blindly copying data/f2.i containing 1 revisions
638 blindly copying data/f2.i containing 1 revisions
639 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
639 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
640 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
640 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
641 cloning 3 revisions from 00manifest.i
641 cloning 3 revisions from 00manifest.i
642 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
642 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
643 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
643 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
644 blindly copying 00changelog.i containing 3 revisions
644 blindly copying 00changelog.i containing 3 revisions
645 finished migrating 3 changelog revisions; change in size: 0 bytes
645 finished migrating 3 changelog revisions; change in size: 0 bytes
646 finished migrating 9 total revisions; total change in store size: 0 bytes
646 finished migrating 9 total revisions; total change in store size: 0 bytes
647 copying phaseroots
647 copying phaseroots
648 data fully migrated to temporary repository
648 data fully migrated to temporary repository
649 marking source repository as being upgraded; clients will be unable to read from repository
649 marking source repository as being upgraded; clients will be unable to read from repository
650 starting in-place swap of repository data
650 starting in-place swap of repository data
651 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
651 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
652 replacing store...
652 replacing store...
653 store replacement complete; repository was inconsistent for *s (glob)
653 store replacement complete; repository was inconsistent for *s (glob)
654 finalizing requirements file and making repository readable again
654 finalizing requirements file and making repository readable again
655 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
655 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
656 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
656 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
657
657
658 Check that the repo still works fine
658 Check that the repo still works fine
659
659
660 $ hg log -G --stat
660 $ hg log -G --stat
661 @ changeset: 2:76d4395f5413 (no-py3 !)
661 @ changeset: 2:76d4395f5413 (no-py3 !)
662 @ changeset: 2:fca376863211 (py3 !)
662 @ changeset: 2:fca376863211 (py3 !)
663 | tag: tip
663 | tag: tip
664 | parent: 0:ba592bf28da2
664 | parent: 0:ba592bf28da2
665 | user: test
665 | user: test
666 | date: Thu Jan 01 00:00:00 1970 +0000
666 | date: Thu Jan 01 00:00:00 1970 +0000
667 | summary: add f2
667 | summary: add f2
668 |
668 |
669 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
669 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
670 | 1 files changed, 100000 insertions(+), 0 deletions(-)
670 | 1 files changed, 100000 insertions(+), 0 deletions(-)
671 |
671 |
672 | o changeset: 1:2029ce2354e2
672 | o changeset: 1:2029ce2354e2
673 |/ user: test
673 |/ user: test
674 | date: Thu Jan 01 00:00:00 1970 +0000
674 | date: Thu Jan 01 00:00:00 1970 +0000
675 | summary: add f1
675 | summary: add f1
676 |
676 |
677 |
677 |
678 o changeset: 0:ba592bf28da2
678 o changeset: 0:ba592bf28da2
679 user: test
679 user: test
680 date: Thu Jan 01 00:00:00 1970 +0000
680 date: Thu Jan 01 00:00:00 1970 +0000
681 summary: initial
681 summary: initial
682
682
683
683
684
684
685 $ hg verify
685 $ hg verify
686 checking changesets
686 checking changesets
687 checking manifests
687 checking manifests
688 crosschecking files in changesets and manifests
688 crosschecking files in changesets and manifests
689 checking files
689 checking files
690 checked 3 changesets with 3 changes to 3 files
690 checked 3 changesets with 3 changes to 3 files
691
691
692 Check we can select negatively
692 Check we can select negatively
693
693
694 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
694 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
695 upgrade will perform the following actions:
695 upgrade will perform the following actions:
696
696
697 requirements
697 requirements
698 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
698 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
699
699
700 sidedata
700 sidedata
701 Allows storage of extra data alongside a revision.
701 Allows storage of extra data alongside a revision.
702
702
703 copies-sdc
703 copies-sdc
704 Allows to use more efficient algorithm to deal with copy tracing.
704 Allows to use more efficient algorithm to deal with copy tracing.
705
705
706 re-delta-parent
706 re-delta-parent
707 deltas within internal storage will choose a new base revision if needed
707 deltas within internal storage will choose a new base revision if needed
708
708
709 beginning upgrade...
709 beginning upgrade...
710 repository locked and read-only
710 repository locked and read-only
711 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
711 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
712 (it is safe to interrupt this process any time before data migration completes)
712 (it is safe to interrupt this process any time before data migration completes)
713 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
713 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
714 migrating 519 KB in store; 1.05 MB tracked data
714 migrating 519 KB in store; 1.05 MB tracked data
715 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
715 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
716 cloning 1 revisions from data/FooBarDirectory.d/f1.i
716 cloning 1 revisions from data/FooBarDirectory.d/f1.i
717 cloning 1 revisions from data/f0.i
717 cloning 1 revisions from data/f0.i
718 cloning 1 revisions from data/f2.i
718 cloning 1 revisions from data/f2.i
719 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
719 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
720 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
720 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
721 blindly copying 00manifest.i containing 3 revisions
721 blindly copying 00manifest.i containing 3 revisions
722 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
722 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
723 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
723 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
724 cloning 3 revisions from 00changelog.i
724 cloning 3 revisions from 00changelog.i
725 finished migrating 3 changelog revisions; change in size: 0 bytes
725 finished migrating 3 changelog revisions; change in size: 0 bytes
726 finished migrating 9 total revisions; total change in store size: 0 bytes
726 finished migrating 9 total revisions; total change in store size: 0 bytes
727 copying phaseroots
727 copying phaseroots
728 data fully migrated to temporary repository
728 data fully migrated to temporary repository
729 marking source repository as being upgraded; clients will be unable to read from repository
729 marking source repository as being upgraded; clients will be unable to read from repository
730 starting in-place swap of repository data
730 starting in-place swap of repository data
731 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
731 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
732 replacing store...
732 replacing store...
733 store replacement complete; repository was inconsistent for *s (glob)
733 store replacement complete; repository was inconsistent for *s (glob)
734 finalizing requirements file and making repository readable again
734 finalizing requirements file and making repository readable again
735 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
735 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
736 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
736 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
737 $ hg verify
737 $ hg verify
738 checking changesets
738 checking changesets
739 checking manifests
739 checking manifests
740 crosschecking files in changesets and manifests
740 crosschecking files in changesets and manifests
741 checking files
741 checking files
742 checked 3 changesets with 3 changes to 3 files
742 checked 3 changesets with 3 changes to 3 files
743
743
744 Check that we can select changelog only
744 Check that we can select changelog only
745
745
746 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
746 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
747 upgrade will perform the following actions:
747 upgrade will perform the following actions:
748
748
749 requirements
749 requirements
750 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
750 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
751
751
752 sidedata
752 sidedata
753 Allows storage of extra data alongside a revision.
753 Allows storage of extra data alongside a revision.
754
754
755 copies-sdc
755 copies-sdc
756 Allows to use more efficient algorithm to deal with copy tracing.
756 Allows to use more efficient algorithm to deal with copy tracing.
757
757
758 re-delta-parent
758 re-delta-parent
759 deltas within internal storage will choose a new base revision if needed
759 deltas within internal storage will choose a new base revision if needed
760
760
761 beginning upgrade...
761 beginning upgrade...
762 repository locked and read-only
762 repository locked and read-only
763 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
763 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
764 (it is safe to interrupt this process any time before data migration completes)
764 (it is safe to interrupt this process any time before data migration completes)
765 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
765 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
766 migrating 519 KB in store; 1.05 MB tracked data
766 migrating 519 KB in store; 1.05 MB tracked data
767 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
767 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
768 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
768 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
769 blindly copying data/f0.i containing 1 revisions
769 blindly copying data/f0.i containing 1 revisions
770 blindly copying data/f2.i containing 1 revisions
770 blindly copying data/f2.i containing 1 revisions
771 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
771 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
772 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
772 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
773 blindly copying 00manifest.i containing 3 revisions
773 blindly copying 00manifest.i containing 3 revisions
774 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
774 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
775 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
775 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
776 cloning 3 revisions from 00changelog.i
776 cloning 3 revisions from 00changelog.i
777 finished migrating 3 changelog revisions; change in size: 0 bytes
777 finished migrating 3 changelog revisions; change in size: 0 bytes
778 finished migrating 9 total revisions; total change in store size: 0 bytes
778 finished migrating 9 total revisions; total change in store size: 0 bytes
779 copying phaseroots
779 copying phaseroots
780 data fully migrated to temporary repository
780 data fully migrated to temporary repository
781 marking source repository as being upgraded; clients will be unable to read from repository
781 marking source repository as being upgraded; clients will be unable to read from repository
782 starting in-place swap of repository data
782 starting in-place swap of repository data
783 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
783 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
784 replacing store...
784 replacing store...
785 store replacement complete; repository was inconsistent for *s (glob)
785 store replacement complete; repository was inconsistent for *s (glob)
786 finalizing requirements file and making repository readable again
786 finalizing requirements file and making repository readable again
787 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
787 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
788 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
788 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
789 $ hg verify
789 $ hg verify
790 checking changesets
790 checking changesets
791 checking manifests
791 checking manifests
792 crosschecking files in changesets and manifests
792 crosschecking files in changesets and manifests
793 checking files
793 checking files
794 checked 3 changesets with 3 changes to 3 files
794 checked 3 changesets with 3 changes to 3 files
795
795
796 Check that we can select filelog only
796 Check that we can select filelog only
797
797
798 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
798 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
799 upgrade will perform the following actions:
799 upgrade will perform the following actions:
800
800
801 requirements
801 requirements
802 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
802 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
803
803
804 sidedata
804 sidedata
805 Allows storage of extra data alongside a revision.
805 Allows storage of extra data alongside a revision.
806
806
807 copies-sdc
807 copies-sdc
808 Allows to use more efficient algorithm to deal with copy tracing.
808 Allows to use more efficient algorithm to deal with copy tracing.
809
809
810 re-delta-parent
810 re-delta-parent
811 deltas within internal storage will choose a new base revision if needed
811 deltas within internal storage will choose a new base revision if needed
812
812
813 beginning upgrade...
813 beginning upgrade...
814 repository locked and read-only
814 repository locked and read-only
815 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
815 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
816 (it is safe to interrupt this process any time before data migration completes)
816 (it is safe to interrupt this process any time before data migration completes)
817 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
817 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
818 migrating 519 KB in store; 1.05 MB tracked data
818 migrating 519 KB in store; 1.05 MB tracked data
819 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
819 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
820 cloning 1 revisions from data/FooBarDirectory.d/f1.i
820 cloning 1 revisions from data/FooBarDirectory.d/f1.i
821 cloning 1 revisions from data/f0.i
821 cloning 1 revisions from data/f0.i
822 cloning 1 revisions from data/f2.i
822 cloning 1 revisions from data/f2.i
823 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
823 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
824 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
824 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
825 blindly copying 00manifest.i containing 3 revisions
825 blindly copying 00manifest.i containing 3 revisions
826 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
826 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
827 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
827 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
828 blindly copying 00changelog.i containing 3 revisions
828 blindly copying 00changelog.i containing 3 revisions
829 finished migrating 3 changelog revisions; change in size: 0 bytes
829 finished migrating 3 changelog revisions; change in size: 0 bytes
830 finished migrating 9 total revisions; total change in store size: 0 bytes
830 finished migrating 9 total revisions; total change in store size: 0 bytes
831 copying phaseroots
831 copying phaseroots
832 data fully migrated to temporary repository
832 data fully migrated to temporary repository
833 marking source repository as being upgraded; clients will be unable to read from repository
833 marking source repository as being upgraded; clients will be unable to read from repository
834 starting in-place swap of repository data
834 starting in-place swap of repository data
835 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
835 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
836 replacing store...
836 replacing store...
837 store replacement complete; repository was inconsistent for *s (glob)
837 store replacement complete; repository was inconsistent for *s (glob)
838 finalizing requirements file and making repository readable again
838 finalizing requirements file and making repository readable again
839 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
839 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
840 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
840 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
841 $ hg verify
841 $ hg verify
842 checking changesets
842 checking changesets
843 checking manifests
843 checking manifests
844 crosschecking files in changesets and manifests
844 crosschecking files in changesets and manifests
845 checking files
845 checking files
846 checked 3 changesets with 3 changes to 3 files
846 checked 3 changesets with 3 changes to 3 files
847
847
848
848
849 Check you can't skip revlog clone during important format downgrade
849 Check you can't skip revlog clone during important format downgrade
850
850
851 $ echo "[format]" > .hg/hgrc
851 $ echo "[format]" > .hg/hgrc
852 $ echo "sparse-revlog=no" >> .hg/hgrc
852 $ echo "sparse-revlog=no" >> .hg/hgrc
853 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
853 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
854 ignoring revlogs selection flags, format requirements change: sparserevlog
854 ignoring revlogs selection flags, format requirements change: sparserevlog
855 upgrade will perform the following actions:
855 upgrade will perform the following actions:
856
856
857 requirements
857 requirements
858 preserved: dotencode, fncache, generaldelta, revlogv1, store
858 preserved: dotencode, fncache, generaldelta, revlogv1, store
859 removed: sparserevlog
859 removed: sparserevlog
860
860
861 sidedata
861 sidedata
862 Allows storage of extra data alongside a revision.
862 Allows storage of extra data alongside a revision.
863
863
864 copies-sdc
864 copies-sdc
865 Allows to use more efficient algorithm to deal with copy tracing.
865 Allows to use more efficient algorithm to deal with copy tracing.
866
866
867 re-delta-parent
867 re-delta-parent
868 deltas within internal storage will choose a new base revision if needed
868 deltas within internal storage will choose a new base revision if needed
869
869
870 beginning upgrade...
870 beginning upgrade...
871 repository locked and read-only
871 repository locked and read-only
872 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
872 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
873 (it is safe to interrupt this process any time before data migration completes)
873 (it is safe to interrupt this process any time before data migration completes)
874 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
874 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
875 migrating 519 KB in store; 1.05 MB tracked data
875 migrating 519 KB in store; 1.05 MB tracked data
876 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
876 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
877 cloning 1 revisions from data/FooBarDirectory.d/f1.i
877 cloning 1 revisions from data/FooBarDirectory.d/f1.i
878 cloning 1 revisions from data/f0.i
878 cloning 1 revisions from data/f0.i
879 cloning 1 revisions from data/f2.i
879 cloning 1 revisions from data/f2.i
880 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
880 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
881 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
881 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
882 cloning 3 revisions from 00manifest.i
882 cloning 3 revisions from 00manifest.i
883 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
883 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
884 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
884 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
885 cloning 3 revisions from 00changelog.i
885 cloning 3 revisions from 00changelog.i
886 finished migrating 3 changelog revisions; change in size: 0 bytes
886 finished migrating 3 changelog revisions; change in size: 0 bytes
887 finished migrating 9 total revisions; total change in store size: 0 bytes
887 finished migrating 9 total revisions; total change in store size: 0 bytes
888 copying phaseroots
888 copying phaseroots
889 data fully migrated to temporary repository
889 data fully migrated to temporary repository
890 marking source repository as being upgraded; clients will be unable to read from repository
890 marking source repository as being upgraded; clients will be unable to read from repository
891 starting in-place swap of repository data
891 starting in-place swap of repository data
892 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
892 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
893 replacing store...
893 replacing store...
894 store replacement complete; repository was inconsistent for *s (glob)
894 store replacement complete; repository was inconsistent for *s (glob)
895 finalizing requirements file and making repository readable again
895 finalizing requirements file and making repository readable again
896 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
896 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
897 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
897 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
898 $ hg verify
898 $ hg verify
899 checking changesets
899 checking changesets
900 checking manifests
900 checking manifests
901 crosschecking files in changesets and manifests
901 crosschecking files in changesets and manifests
902 checking files
902 checking files
903 checked 3 changesets with 3 changes to 3 files
903 checked 3 changesets with 3 changes to 3 files
904
904
905 Check you can't skip revlog clone during important format upgrade
905 Check you can't skip revlog clone during important format upgrade
906
906
907 $ echo "sparse-revlog=yes" >> .hg/hgrc
907 $ echo "sparse-revlog=yes" >> .hg/hgrc
908 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
908 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
909 ignoring revlogs selection flags, format requirements change: sparserevlog
909 ignoring revlogs selection flags, format requirements change: sparserevlog
910 upgrade will perform the following actions:
910 upgrade will perform the following actions:
911
911
912 requirements
912 requirements
913 preserved: dotencode, fncache, generaldelta, revlogv1, store
913 preserved: dotencode, fncache, generaldelta, revlogv1, store
914 added: sparserevlog
914 added: sparserevlog
915
915
916 sparserevlog
916 sparserevlog
917 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
917 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
918
918
919 sidedata
919 sidedata
920 Allows storage of extra data alongside a revision.
920 Allows storage of extra data alongside a revision.
921
921
922 copies-sdc
922 copies-sdc
923 Allows to use more efficient algorithm to deal with copy tracing.
923 Allows to use more efficient algorithm to deal with copy tracing.
924
924
925 re-delta-parent
925 re-delta-parent
926 deltas within internal storage will choose a new base revision if needed
926 deltas within internal storage will choose a new base revision if needed
927
927
928 beginning upgrade...
928 beginning upgrade...
929 repository locked and read-only
929 repository locked and read-only
930 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
930 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
931 (it is safe to interrupt this process any time before data migration completes)
931 (it is safe to interrupt this process any time before data migration completes)
932 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
932 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
933 migrating 519 KB in store; 1.05 MB tracked data
933 migrating 519 KB in store; 1.05 MB tracked data
934 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
934 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
935 cloning 1 revisions from data/FooBarDirectory.d/f1.i
935 cloning 1 revisions from data/FooBarDirectory.d/f1.i
936 cloning 1 revisions from data/f0.i
936 cloning 1 revisions from data/f0.i
937 cloning 1 revisions from data/f2.i
937 cloning 1 revisions from data/f2.i
938 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
938 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
939 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
939 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
940 cloning 3 revisions from 00manifest.i
940 cloning 3 revisions from 00manifest.i
941 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
941 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
942 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
942 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
943 cloning 3 revisions from 00changelog.i
943 cloning 3 revisions from 00changelog.i
944 finished migrating 3 changelog revisions; change in size: 0 bytes
944 finished migrating 3 changelog revisions; change in size: 0 bytes
945 finished migrating 9 total revisions; total change in store size: 0 bytes
945 finished migrating 9 total revisions; total change in store size: 0 bytes
946 copying phaseroots
946 copying phaseroots
947 data fully migrated to temporary repository
947 data fully migrated to temporary repository
948 marking source repository as being upgraded; clients will be unable to read from repository
948 marking source repository as being upgraded; clients will be unable to read from repository
949 starting in-place swap of repository data
949 starting in-place swap of repository data
950 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
950 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
951 replacing store...
951 replacing store...
952 store replacement complete; repository was inconsistent for *s (glob)
952 store replacement complete; repository was inconsistent for *s (glob)
953 finalizing requirements file and making repository readable again
953 finalizing requirements file and making repository readable again
954 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
954 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
955 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
955 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
956 $ hg verify
956 $ hg verify
957 checking changesets
957 checking changesets
958 checking manifests
958 checking manifests
959 crosschecking files in changesets and manifests
959 crosschecking files in changesets and manifests
960 checking files
960 checking files
961 checked 3 changesets with 3 changes to 3 files
961 checked 3 changesets with 3 changes to 3 files
962
962
963 $ cd ..
963 $ cd ..
964
964
965 store files with special filenames aren't encoded during copy
965 store files with special filenames aren't encoded during copy
966
966
967 $ hg init store-filenames
967 $ hg init store-filenames
968 $ cd store-filenames
968 $ cd store-filenames
969 $ touch foo
969 $ touch foo
970 $ hg -q commit -A -m initial
970 $ hg -q commit -A -m initial
971 $ touch .hg/store/.XX_special_filename
971 $ touch .hg/store/.XX_special_filename
972
972
973 $ hg debugupgraderepo --run
973 $ hg debugupgraderepo --run
974 upgrade will perform the following actions:
974 upgrade will perform the following actions:
975
975
976 requirements
976 requirements
977 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
977 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
978
978
979 sidedata
979 sidedata
980 Allows storage of extra data alongside a revision.
980 Allows storage of extra data alongside a revision.
981
981
982 copies-sdc
982 copies-sdc
983 Allows to use more efficient algorithm to deal with copy tracing.
983 Allows to use more efficient algorithm to deal with copy tracing.
984
984
985 beginning upgrade...
985 beginning upgrade...
986 repository locked and read-only
986 repository locked and read-only
987 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
987 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
988 (it is safe to interrupt this process any time before data migration completes)
988 (it is safe to interrupt this process any time before data migration completes)
989 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
989 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
990 migrating 301 bytes in store; 107 bytes tracked data
990 migrating 301 bytes in store; 107 bytes tracked data
991 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
991 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
992 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
992 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
993 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
993 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
994 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
994 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
995 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
995 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
996 finished migrating 1 changelog revisions; change in size: 0 bytes
996 finished migrating 1 changelog revisions; change in size: 0 bytes
997 finished migrating 3 total revisions; total change in store size: 0 bytes
997 finished migrating 3 total revisions; total change in store size: 0 bytes
998 copying .XX_special_filename
998 copying .XX_special_filename
999 copying phaseroots
999 copying phaseroots
1000 data fully migrated to temporary repository
1000 data fully migrated to temporary repository
1001 marking source repository as being upgraded; clients will be unable to read from repository
1001 marking source repository as being upgraded; clients will be unable to read from repository
1002 starting in-place swap of repository data
1002 starting in-place swap of repository data
1003 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1003 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1004 replacing store...
1004 replacing store...
1005 store replacement complete; repository was inconsistent for *s (glob)
1005 store replacement complete; repository was inconsistent for *s (glob)
1006 finalizing requirements file and making repository readable again
1006 finalizing requirements file and making repository readable again
1007 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1007 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1008 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1008 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1009 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1009 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1010 $ hg debugupgraderepo --run --optimize redeltafulladd
1010 $ hg debugupgraderepo --run --optimize redeltafulladd
1011 upgrade will perform the following actions:
1011 upgrade will perform the following actions:
1012
1012
1013 requirements
1013 requirements
1014 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1014 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1015
1015
1016 sidedata
1016 sidedata
1017 Allows storage of extra data alongside a revision.
1017 Allows storage of extra data alongside a revision.
1018
1018
1019 copies-sdc
1019 copies-sdc
1020 Allows to use more efficient algorithm to deal with copy tracing.
1020 Allows to use more efficient algorithm to deal with copy tracing.
1021
1021
1022 re-delta-fulladd
1022 re-delta-fulladd
1023 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1023 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1024
1024
1025 beginning upgrade...
1025 beginning upgrade...
1026 repository locked and read-only
1026 repository locked and read-only
1027 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1027 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1028 (it is safe to interrupt this process any time before data migration completes)
1028 (it is safe to interrupt this process any time before data migration completes)
1029 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1029 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1030 migrating 301 bytes in store; 107 bytes tracked data
1030 migrating 301 bytes in store; 107 bytes tracked data
1031 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1031 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1032 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1032 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1033 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1033 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1034 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1034 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1035 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1035 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1036 finished migrating 1 changelog revisions; change in size: 0 bytes
1036 finished migrating 1 changelog revisions; change in size: 0 bytes
1037 finished migrating 3 total revisions; total change in store size: 0 bytes
1037 finished migrating 3 total revisions; total change in store size: 0 bytes
1038 copying .XX_special_filename
1038 copying .XX_special_filename
1039 copying phaseroots
1039 copying phaseroots
1040 data fully migrated to temporary repository
1040 data fully migrated to temporary repository
1041 marking source repository as being upgraded; clients will be unable to read from repository
1041 marking source repository as being upgraded; clients will be unable to read from repository
1042 starting in-place swap of repository data
1042 starting in-place swap of repository data
1043 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1043 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1044 replacing store...
1044 replacing store...
1045 store replacement complete; repository was inconsistent for *s (glob)
1045 store replacement complete; repository was inconsistent for *s (glob)
1046 finalizing requirements file and making repository readable again
1046 finalizing requirements file and making repository readable again
1047 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1047 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1048 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1048 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1049 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1049 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1050
1050
1051 fncache is valid after upgrade
1051 fncache is valid after upgrade
1052
1052
1053 $ hg debugrebuildfncache
1053 $ hg debugrebuildfncache
1054 fncache already up to date
1054 fncache already up to date
1055
1055
1056 $ cd ..
1056 $ cd ..
1057
1057
1058 Check upgrading a large file repository
1058 Check upgrading a large file repository
1059 ---------------------------------------
1059 ---------------------------------------
1060
1060
1061 $ hg init largefilesrepo
1061 $ hg init largefilesrepo
1062 $ cat << EOF >> largefilesrepo/.hg/hgrc
1062 $ cat << EOF >> largefilesrepo/.hg/hgrc
1063 > [extensions]
1063 > [extensions]
1064 > largefiles =
1064 > largefiles =
1065 > EOF
1065 > EOF
1066
1066
1067 $ cd largefilesrepo
1067 $ cd largefilesrepo
1068 $ touch foo
1068 $ touch foo
1069 $ hg add --large foo
1069 $ hg add --large foo
1070 $ hg -q commit -m initial
1070 $ hg -q commit -m initial
1071 $ cat .hg/requires
1071 $ cat .hg/requires
1072 dotencode
1072 dotencode
1073 fncache
1073 fncache
1074 generaldelta
1074 generaldelta
1075 largefiles
1075 largefiles
1076 revlogv1
1076 revlogv1
1077 sparserevlog
1077 sparserevlog
1078 store
1078 store
1079
1079
1080 $ hg debugupgraderepo --run
1080 $ hg debugupgraderepo --run
1081 upgrade will perform the following actions:
1081 upgrade will perform the following actions:
1082
1082
1083 requirements
1083 requirements
1084 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
1084 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
1085
1085
1086 sidedata
1086 sidedata
1087 Allows storage of extra data alongside a revision.
1087 Allows storage of extra data alongside a revision.
1088
1088
1089 copies-sdc
1089 copies-sdc
1090 Allows to use more efficient algorithm to deal with copy tracing.
1090 Allows to use more efficient algorithm to deal with copy tracing.
1091
1091
1092 beginning upgrade...
1092 beginning upgrade...
1093 repository locked and read-only
1093 repository locked and read-only
1094 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1094 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1095 (it is safe to interrupt this process any time before data migration completes)
1095 (it is safe to interrupt this process any time before data migration completes)
1096 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1096 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1097 migrating 355 bytes in store; 160 bytes tracked data
1097 migrating 355 bytes in store; 160 bytes tracked data
1098 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
1098 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
1099 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1099 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1100 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
1100 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
1101 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1101 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1102 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1102 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1103 finished migrating 1 changelog revisions; change in size: 0 bytes
1103 finished migrating 1 changelog revisions; change in size: 0 bytes
1104 finished migrating 3 total revisions; total change in store size: 0 bytes
1104 finished migrating 3 total revisions; total change in store size: 0 bytes
1105 copying phaseroots
1105 copying phaseroots
1106 data fully migrated to temporary repository
1106 data fully migrated to temporary repository
1107 marking source repository as being upgraded; clients will be unable to read from repository
1107 marking source repository as being upgraded; clients will be unable to read from repository
1108 starting in-place swap of repository data
1108 starting in-place swap of repository data
1109 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1109 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1110 replacing store...
1110 replacing store...
1111 store replacement complete; repository was inconsistent for *s (glob)
1111 store replacement complete; repository was inconsistent for *s (glob)
1112 finalizing requirements file and making repository readable again
1112 finalizing requirements file and making repository readable again
1113 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1113 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1114 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1114 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1115 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1115 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1116 $ cat .hg/requires
1116 $ cat .hg/requires
1117 dotencode
1117 dotencode
1118 fncache
1118 fncache
1119 generaldelta
1119 generaldelta
1120 largefiles
1120 largefiles
1121 revlogv1
1121 revlogv1
1122 sparserevlog
1122 sparserevlog
1123 store
1123 store
1124
1124
1125 $ cat << EOF >> .hg/hgrc
1125 $ cat << EOF >> .hg/hgrc
1126 > [extensions]
1126 > [extensions]
1127 > lfs =
1127 > lfs =
1128 > [lfs]
1128 > [lfs]
1129 > threshold = 10
1129 > threshold = 10
1130 > EOF
1130 > EOF
1131 $ echo '123456789012345' > lfs.bin
1131 $ echo '123456789012345' > lfs.bin
1132 $ hg ci -Am 'lfs.bin'
1132 $ hg ci -Am 'lfs.bin'
1133 adding lfs.bin
1133 adding lfs.bin
1134 $ grep lfs .hg/requires
1134 $ grep lfs .hg/requires
1135 lfs
1135 lfs
1136 $ find .hg/store/lfs -type f
1136 $ find .hg/store/lfs -type f
1137 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1137 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1138
1138
1139 $ hg debugupgraderepo --run
1139 $ hg debugupgraderepo --run
1140 upgrade will perform the following actions:
1140 upgrade will perform the following actions:
1141
1141
1142 requirements
1142 requirements
1143 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1143 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1144
1144
1145 sidedata
1145 sidedata
1146 Allows storage of extra data alongside a revision.
1146 Allows storage of extra data alongside a revision.
1147
1147
1148 copies-sdc
1148 copies-sdc
1149 Allows to use more efficient algorithm to deal with copy tracing.
1149 Allows to use more efficient algorithm to deal with copy tracing.
1150
1150
1151 beginning upgrade...
1151 beginning upgrade...
1152 repository locked and read-only
1152 repository locked and read-only
1153 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1153 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1154 (it is safe to interrupt this process any time before data migration completes)
1154 (it is safe to interrupt this process any time before data migration completes)
1155 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1155 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1156 migrating 801 bytes in store; 467 bytes tracked data
1156 migrating 801 bytes in store; 467 bytes tracked data
1157 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1157 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1158 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1158 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1159 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1159 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1160 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1160 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1161 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1161 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1162 finished migrating 2 changelog revisions; change in size: 0 bytes
1162 finished migrating 2 changelog revisions; change in size: 0 bytes
1163 finished migrating 6 total revisions; total change in store size: 0 bytes
1163 finished migrating 6 total revisions; total change in store size: 0 bytes
1164 copying phaseroots
1164 copying phaseroots
1165 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1165 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1166 data fully migrated to temporary repository
1166 data fully migrated to temporary repository
1167 marking source repository as being upgraded; clients will be unable to read from repository
1167 marking source repository as being upgraded; clients will be unable to read from repository
1168 starting in-place swap of repository data
1168 starting in-place swap of repository data
1169 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1169 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1170 replacing store...
1170 replacing store...
1171 store replacement complete; repository was inconsistent for *s (glob)
1171 store replacement complete; repository was inconsistent for *s (glob)
1172 finalizing requirements file and making repository readable again
1172 finalizing requirements file and making repository readable again
1173 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1173 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1174 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1174 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1175 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1175 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1176
1176
1177 $ grep lfs .hg/requires
1177 $ grep lfs .hg/requires
1178 lfs
1178 lfs
1179 $ find .hg/store/lfs -type f
1179 $ find .hg/store/lfs -type f
1180 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1180 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1181 $ hg verify
1181 $ hg verify
1182 checking changesets
1182 checking changesets
1183 checking manifests
1183 checking manifests
1184 crosschecking files in changesets and manifests
1184 crosschecking files in changesets and manifests
1185 checking files
1185 checking files
1186 checked 2 changesets with 2 changes to 2 files
1186 checked 2 changesets with 2 changes to 2 files
1187 $ hg debugdata lfs.bin 0
1187 $ hg debugdata lfs.bin 0
1188 version https://git-lfs.github.com/spec/v1
1188 version https://git-lfs.github.com/spec/v1
1189 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1189 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1190 size 16
1190 size 16
1191 x-is-binary 0
1191 x-is-binary 0
1192
1192
1193 $ cd ..
1193 $ cd ..
1194
1194
1195 repository config is taken in account
1195 repository config is taken in account
1196 -------------------------------------
1196 -------------------------------------
1197
1197
1198 $ cat << EOF >> $HGRCPATH
1198 $ cat << EOF >> $HGRCPATH
1199 > [format]
1199 > [format]
1200 > maxchainlen = 1
1200 > maxchainlen = 1
1201 > EOF
1201 > EOF
1202
1202
1203 $ hg init localconfig
1203 $ hg init localconfig
1204 $ cd localconfig
1204 $ cd localconfig
1205 $ cat << EOF > file
1205 $ cat << EOF > file
1206 > some content
1206 > some content
1207 > with some length
1207 > with some length
1208 > to make sure we get a delta
1208 > to make sure we get a delta
1209 > after changes
1209 > after changes
1210 > very long
1210 > very long
1211 > very long
1211 > very long
1212 > very long
1212 > very long
1213 > very long
1213 > very long
1214 > very long
1214 > very long
1215 > very long
1215 > very long
1216 > very long
1216 > very long
1217 > very long
1217 > very long
1218 > very long
1218 > very long
1219 > very long
1219 > very long
1220 > very long
1220 > very long
1221 > EOF
1221 > EOF
1222 $ hg -q commit -A -m A
1222 $ hg -q commit -A -m A
1223 $ echo "new line" >> file
1223 $ echo "new line" >> file
1224 $ hg -q commit -m B
1224 $ hg -q commit -m B
1225 $ echo "new line" >> file
1225 $ echo "new line" >> file
1226 $ hg -q commit -m C
1226 $ hg -q commit -m C
1227
1227
1228 $ cat << EOF >> .hg/hgrc
1228 $ cat << EOF >> .hg/hgrc
1229 > [format]
1229 > [format]
1230 > maxchainlen = 9001
1230 > maxchainlen = 9001
1231 > EOF
1231 > EOF
1232 $ hg config format
1232 $ hg config format
1233 format.maxchainlen=9001
1233 format.maxchainlen=9001
1234 $ hg debugdeltachain file
1234 $ hg debugdeltachain file
1235 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1235 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1236 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1236 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1237 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1237 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1238 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1238 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1239
1239
1240 $ hg debugupgraderepo --run --optimize redeltaall
1240 $ hg debugupgraderepo --run --optimize redeltaall
1241 upgrade will perform the following actions:
1241 upgrade will perform the following actions:
1242
1242
1243 requirements
1243 requirements
1244 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1244 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1245
1245
1246 sidedata
1246 sidedata
1247 Allows storage of extra data alongside a revision.
1247 Allows storage of extra data alongside a revision.
1248
1248
1249 copies-sdc
1249 copies-sdc
1250 Allows to use more efficient algorithm to deal with copy tracing.
1250 Allows to use more efficient algorithm to deal with copy tracing.
1251
1251
1252 re-delta-all
1252 re-delta-all
1253 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1253 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1254
1254
1255 beginning upgrade...
1255 beginning upgrade...
1256 repository locked and read-only
1256 repository locked and read-only
1257 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1257 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1258 (it is safe to interrupt this process any time before data migration completes)
1258 (it is safe to interrupt this process any time before data migration completes)
1259 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1259 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1260 migrating 1019 bytes in store; 882 bytes tracked data
1260 migrating 1019 bytes in store; 882 bytes tracked data
1261 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1261 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1262 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1262 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1263 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1263 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1264 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1264 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1265 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1265 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1266 finished migrating 3 changelog revisions; change in size: 0 bytes
1266 finished migrating 3 changelog revisions; change in size: 0 bytes
1267 finished migrating 9 total revisions; total change in store size: -9 bytes
1267 finished migrating 9 total revisions; total change in store size: -9 bytes
1268 copying phaseroots
1268 copying phaseroots
1269 data fully migrated to temporary repository
1269 data fully migrated to temporary repository
1270 marking source repository as being upgraded; clients will be unable to read from repository
1270 marking source repository as being upgraded; clients will be unable to read from repository
1271 starting in-place swap of repository data
1271 starting in-place swap of repository data
1272 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1272 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1273 replacing store...
1273 replacing store...
1274 store replacement complete; repository was inconsistent for *s (glob)
1274 store replacement complete; repository was inconsistent for *s (glob)
1275 finalizing requirements file and making repository readable again
1275 finalizing requirements file and making repository readable again
1276 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1276 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1277 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1277 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1278 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1278 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1279 $ hg debugdeltachain file
1279 $ hg debugdeltachain file
1280 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1280 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1281 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1281 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1282 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1282 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1283 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1283 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1284 $ cd ..
1284 $ cd ..
1285
1285
1286 $ cat << EOF >> $HGRCPATH
1286 $ cat << EOF >> $HGRCPATH
1287 > [format]
1287 > [format]
1288 > maxchainlen = 9001
1288 > maxchainlen = 9001
1289 > EOF
1289 > EOF
1290
1290
1291 Check upgrading a sparse-revlog repository
1291 Check upgrading a sparse-revlog repository
1292 ---------------------------------------
1292 ---------------------------------------
1293
1293
1294 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1294 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1295 $ cd sparserevlogrepo
1295 $ cd sparserevlogrepo
1296 $ touch foo
1296 $ touch foo
1297 $ hg add foo
1297 $ hg add foo
1298 $ hg -q commit -m "foo"
1298 $ hg -q commit -m "foo"
1299 $ cat .hg/requires
1299 $ cat .hg/requires
1300 dotencode
1300 dotencode
1301 fncache
1301 fncache
1302 generaldelta
1302 generaldelta
1303 revlogv1
1303 revlogv1
1304 store
1304 store
1305
1305
1306 Check that we can add the sparse-revlog format requirement
1306 Check that we can add the sparse-revlog format requirement
1307 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
1307 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
1308 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1308 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1309 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1309 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1310 $ cat .hg/requires
1310 $ cat .hg/requires
1311 dotencode
1311 dotencode
1312 fncache
1312 fncache
1313 generaldelta
1313 generaldelta
1314 revlogv1
1314 revlogv1
1315 sparserevlog
1315 sparserevlog
1316 store
1316 store
1317
1317
1318 Check that we can remove the sparse-revlog format requirement
1318 Check that we can remove the sparse-revlog format requirement
1319 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
1319 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
1320 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1320 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1321 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1321 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1322 $ cat .hg/requires
1322 $ cat .hg/requires
1323 dotencode
1323 dotencode
1324 fncache
1324 fncache
1325 generaldelta
1325 generaldelta
1326 revlogv1
1326 revlogv1
1327 store
1327 store
1328
1328
1329 #if zstd
1329 #if zstd
1330
1330
1331 Check upgrading to a zstd revlog
1331 Check upgrading to a zstd revlog
1332 --------------------------------
1332 --------------------------------
1333
1333
1334 upgrade
1334 upgrade
1335
1335
1336 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
1336 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
1337 $ hg debugformat -v
1337 $ hg debugformat -v
1338 format-variant repo config default
1338 format-variant repo config default
1339 fncache: yes yes yes
1339 fncache: yes yes yes
1340 dotencode: yes yes yes
1340 dotencode: yes yes yes
1341 generaldelta: yes yes yes
1341 generaldelta: yes yes yes
1342 sparserevlog: yes yes yes
1342 sparserevlog: yes yes yes
1343 sidedata: no no no
1343 sidedata: no no no
1344 copies-sdc: no no no
1344 copies-sdc: no no no
1345 plain-cl-delta: yes yes yes
1345 plain-cl-delta: yes yes yes
1346 compression: zstd zlib zlib
1346 compression: zstd zlib zlib
1347 compression-level: default default default
1347 compression-level: default default default
1348 $ cat .hg/requires
1348 $ cat .hg/requires
1349 dotencode
1349 dotencode
1350 fncache
1350 fncache
1351 generaldelta
1351 generaldelta
1352 revlog-compression-zstd
1352 revlog-compression-zstd
1353 revlogv1
1353 revlogv1
1354 sparserevlog
1354 sparserevlog
1355 store
1355 store
1356
1356
1357 downgrade
1357 downgrade
1358
1358
1359 $ hg debugupgraderepo --run --no-backup > /dev/null
1359 $ hg debugupgraderepo --run --no-backup > /dev/null
1360 $ hg debugformat -v
1360 $ hg debugformat -v
1361 format-variant repo config default
1361 format-variant repo config default
1362 fncache: yes yes yes
1362 fncache: yes yes yes
1363 dotencode: yes yes yes
1363 dotencode: yes yes yes
1364 generaldelta: yes yes yes
1364 generaldelta: yes yes yes
1365 sparserevlog: yes yes yes
1365 sparserevlog: yes yes yes
1366 sidedata: no no no
1366 sidedata: no no no
1367 copies-sdc: no no no
1367 copies-sdc: no no no
1368 plain-cl-delta: yes yes yes
1368 plain-cl-delta: yes yes yes
1369 compression: zlib zlib zlib
1369 compression: zlib zlib zlib
1370 compression-level: default default default
1370 compression-level: default default default
1371 $ cat .hg/requires
1371 $ cat .hg/requires
1372 dotencode
1372 dotencode
1373 fncache
1373 fncache
1374 generaldelta
1374 generaldelta
1375 revlogv1
1375 revlogv1
1376 sparserevlog
1376 sparserevlog
1377 store
1377 store
1378
1378
1379 upgrade from hgrc
1379 upgrade from hgrc
1380
1380
1381 $ cat >> .hg/hgrc << EOF
1381 $ cat >> .hg/hgrc << EOF
1382 > [format]
1382 > [format]
1383 > revlog-compression=zstd
1383 > revlog-compression=zstd
1384 > EOF
1384 > EOF
1385 $ hg debugupgraderepo --run --no-backup > /dev/null
1385 $ hg debugupgraderepo --run --no-backup > /dev/null
1386 $ hg debugformat -v
1386 $ hg debugformat -v
1387 format-variant repo config default
1387 format-variant repo config default
1388 fncache: yes yes yes
1388 fncache: yes yes yes
1389 dotencode: yes yes yes
1389 dotencode: yes yes yes
1390 generaldelta: yes yes yes
1390 generaldelta: yes yes yes
1391 sparserevlog: yes yes yes
1391 sparserevlog: yes yes yes
1392 sidedata: no no no
1392 sidedata: no no no
1393 copies-sdc: no no no
1393 copies-sdc: no no no
1394 plain-cl-delta: yes yes yes
1394 plain-cl-delta: yes yes yes
1395 compression: zstd zstd zlib
1395 compression: zstd zstd zlib
1396 compression-level: default default default
1396 compression-level: default default default
1397 $ cat .hg/requires
1397 $ cat .hg/requires
1398 dotencode
1398 dotencode
1399 fncache
1399 fncache
1400 generaldelta
1400 generaldelta
1401 revlog-compression-zstd
1401 revlog-compression-zstd
1402 revlogv1
1402 revlogv1
1403 sparserevlog
1403 sparserevlog
1404 store
1404 store
1405
1405
1406 #endif
1406 #endif
1407
1407
1408 Check upgrading to a side-data revlog
1408 Check upgrading to a side-data revlog
1409 -------------------------------------
1409 -------------------------------------
1410
1410
1411 upgrade
1411 upgrade
1412
1412
1413 $ hg --config format.use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
1413 $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
1414 $ hg debugformat -v
1414 $ hg debugformat -v
1415 format-variant repo config default
1415 format-variant repo config default
1416 fncache: yes yes yes
1416 fncache: yes yes yes
1417 dotencode: yes yes yes
1417 dotencode: yes yes yes
1418 generaldelta: yes yes yes
1418 generaldelta: yes yes yes
1419 sparserevlog: yes yes yes
1419 sparserevlog: yes yes yes
1420 sidedata: yes no no
1420 sidedata: yes no no
1421 copies-sdc: no no no
1421 copies-sdc: no no no
1422 plain-cl-delta: yes yes yes
1422 plain-cl-delta: yes yes yes
1423 compression: zstd zstd zlib (zstd !)
1423 compression: zstd zstd zlib (zstd !)
1424 compression: zlib zlib zlib (no-zstd !)
1424 compression: zlib zlib zlib (no-zstd !)
1425 compression-level: default default default
1425 compression-level: default default default
1426 $ cat .hg/requires
1426 $ cat .hg/requires
1427 dotencode
1427 dotencode
1428 exp-sidedata-flag
1428 exp-sidedata-flag
1429 fncache
1429 fncache
1430 generaldelta
1430 generaldelta
1431 revlog-compression-zstd (zstd !)
1431 revlog-compression-zstd (zstd !)
1432 revlogv1
1432 revlogv1
1433 sparserevlog
1433 sparserevlog
1434 store
1434 store
1435 $ hg debugsidedata -c 0
1435 $ hg debugsidedata -c 0
1436 2 sidedata entries
1436 2 sidedata entries
1437 entry-0001 size 4
1437 entry-0001 size 4
1438 entry-0002 size 32
1438 entry-0002 size 32
1439
1439
1440 downgrade
1440 downgrade
1441
1441
1442 $ hg debugupgraderepo --config format.use-side-data=no --run --no-backup > /dev/null
1442 $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup > /dev/null
1443 $ hg debugformat -v
1443 $ hg debugformat -v
1444 format-variant repo config default
1444 format-variant repo config default
1445 fncache: yes yes yes
1445 fncache: yes yes yes
1446 dotencode: yes yes yes
1446 dotencode: yes yes yes
1447 generaldelta: yes yes yes
1447 generaldelta: yes yes yes
1448 sparserevlog: yes yes yes
1448 sparserevlog: yes yes yes
1449 sidedata: no no no
1449 sidedata: no no no
1450 copies-sdc: no no no
1450 copies-sdc: no no no
1451 plain-cl-delta: yes yes yes
1451 plain-cl-delta: yes yes yes
1452 compression: zstd zstd zlib (zstd !)
1452 compression: zstd zstd zlib (zstd !)
1453 compression: zlib zlib zlib (no-zstd !)
1453 compression: zlib zlib zlib (no-zstd !)
1454 compression-level: default default default
1454 compression-level: default default default
1455 $ cat .hg/requires
1455 $ cat .hg/requires
1456 dotencode
1456 dotencode
1457 fncache
1457 fncache
1458 generaldelta
1458 generaldelta
1459 revlog-compression-zstd (zstd !)
1459 revlog-compression-zstd (zstd !)
1460 revlogv1
1460 revlogv1
1461 sparserevlog
1461 sparserevlog
1462 store
1462 store
1463 $ hg debugsidedata -c 0
1463 $ hg debugsidedata -c 0
1464
1464
1465 upgrade from hgrc
1465 upgrade from hgrc
1466
1466
1467 $ cat >> .hg/hgrc << EOF
1467 $ cat >> .hg/hgrc << EOF
1468 > [format]
1468 > [format]
1469 > use-side-data=yes
1469 > exp-use-side-data=yes
1470 > EOF
1470 > EOF
1471 $ hg debugupgraderepo --run --no-backup > /dev/null
1471 $ hg debugupgraderepo --run --no-backup > /dev/null
1472 $ hg debugformat -v
1472 $ hg debugformat -v
1473 format-variant repo config default
1473 format-variant repo config default
1474 fncache: yes yes yes
1474 fncache: yes yes yes
1475 dotencode: yes yes yes
1475 dotencode: yes yes yes
1476 generaldelta: yes yes yes
1476 generaldelta: yes yes yes
1477 sparserevlog: yes yes yes
1477 sparserevlog: yes yes yes
1478 sidedata: yes yes no
1478 sidedata: yes yes no
1479 copies-sdc: no no no
1479 copies-sdc: no no no
1480 plain-cl-delta: yes yes yes
1480 plain-cl-delta: yes yes yes
1481 compression: zstd zstd zlib (zstd !)
1481 compression: zstd zstd zlib (zstd !)
1482 compression: zlib zlib zlib (no-zstd !)
1482 compression: zlib zlib zlib (no-zstd !)
1483 compression-level: default default default
1483 compression-level: default default default
1484 $ cat .hg/requires
1484 $ cat .hg/requires
1485 dotencode
1485 dotencode
1486 exp-sidedata-flag
1486 exp-sidedata-flag
1487 fncache
1487 fncache
1488 generaldelta
1488 generaldelta
1489 revlog-compression-zstd (zstd !)
1489 revlog-compression-zstd (zstd !)
1490 revlogv1
1490 revlogv1
1491 sparserevlog
1491 sparserevlog
1492 store
1492 store
1493 $ hg debugsidedata -c 0
1493 $ hg debugsidedata -c 0
General Comments 0
You need to be logged in to leave comments. Login now