##// END OF EJS Templates
localrepo: define storage backend in creation options (API)...
Gregory Szorc -
r40032:dbcb466d default
parent child Browse files
Show More
@@ -1,1404 +1,1407
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 coreconfigitem('alias', '.*',
116 coreconfigitem('alias', '.*',
117 default=dynamicdefault,
117 default=dynamicdefault,
118 generic=True,
118 generic=True,
119 )
119 )
120 coreconfigitem('annotate', 'nodates',
120 coreconfigitem('annotate', 'nodates',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'showfunc',
123 coreconfigitem('annotate', 'showfunc',
124 default=False,
124 default=False,
125 )
125 )
126 coreconfigitem('annotate', 'unified',
126 coreconfigitem('annotate', 'unified',
127 default=None,
127 default=None,
128 )
128 )
129 coreconfigitem('annotate', 'git',
129 coreconfigitem('annotate', 'git',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorews',
132 coreconfigitem('annotate', 'ignorews',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignorewsamount',
135 coreconfigitem('annotate', 'ignorewsamount',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignoreblanklines',
138 coreconfigitem('annotate', 'ignoreblanklines',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'ignorewseol',
141 coreconfigitem('annotate', 'ignorewseol',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'nobinary',
144 coreconfigitem('annotate', 'nobinary',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('annotate', 'noprefix',
147 coreconfigitem('annotate', 'noprefix',
148 default=False,
148 default=False,
149 )
149 )
150 coreconfigitem('annotate', 'word-diff',
150 coreconfigitem('annotate', 'word-diff',
151 default=False,
151 default=False,
152 )
152 )
153 coreconfigitem('auth', 'cookiefile',
153 coreconfigitem('auth', 'cookiefile',
154 default=None,
154 default=None,
155 )
155 )
156 # bookmarks.pushing: internal hack for discovery
156 # bookmarks.pushing: internal hack for discovery
157 coreconfigitem('bookmarks', 'pushing',
157 coreconfigitem('bookmarks', 'pushing',
158 default=list,
158 default=list,
159 )
159 )
160 # bundle.mainreporoot: internal hack for bundlerepo
160 # bundle.mainreporoot: internal hack for bundlerepo
161 coreconfigitem('bundle', 'mainreporoot',
161 coreconfigitem('bundle', 'mainreporoot',
162 default='',
162 default='',
163 )
163 )
164 coreconfigitem('censor', 'policy',
164 coreconfigitem('censor', 'policy',
165 default='abort',
165 default='abort',
166 )
166 )
167 coreconfigitem('chgserver', 'idletimeout',
167 coreconfigitem('chgserver', 'idletimeout',
168 default=3600,
168 default=3600,
169 )
169 )
170 coreconfigitem('chgserver', 'skiphash',
170 coreconfigitem('chgserver', 'skiphash',
171 default=False,
171 default=False,
172 )
172 )
173 coreconfigitem('cmdserver', 'log',
173 coreconfigitem('cmdserver', 'log',
174 default=None,
174 default=None,
175 )
175 )
176 coreconfigitem('color', '.*',
176 coreconfigitem('color', '.*',
177 default=None,
177 default=None,
178 generic=True,
178 generic=True,
179 )
179 )
180 coreconfigitem('color', 'mode',
180 coreconfigitem('color', 'mode',
181 default='auto',
181 default='auto',
182 )
182 )
183 coreconfigitem('color', 'pagermode',
183 coreconfigitem('color', 'pagermode',
184 default=dynamicdefault,
184 default=dynamicdefault,
185 )
185 )
186 coreconfigitem('commands', 'grep.all-files',
186 coreconfigitem('commands', 'grep.all-files',
187 default=False,
187 default=False,
188 )
188 )
189 coreconfigitem('commands', 'resolve.confirm',
189 coreconfigitem('commands', 'resolve.confirm',
190 default=False,
190 default=False,
191 )
191 )
192 coreconfigitem('commands', 'resolve.explicit-re-merge',
192 coreconfigitem('commands', 'resolve.explicit-re-merge',
193 default=False,
193 default=False,
194 )
194 )
195 coreconfigitem('commands', 'resolve.mark-check',
195 coreconfigitem('commands', 'resolve.mark-check',
196 default='none',
196 default='none',
197 )
197 )
198 coreconfigitem('commands', 'show.aliasprefix',
198 coreconfigitem('commands', 'show.aliasprefix',
199 default=list,
199 default=list,
200 )
200 )
201 coreconfigitem('commands', 'status.relative',
201 coreconfigitem('commands', 'status.relative',
202 default=False,
202 default=False,
203 )
203 )
204 coreconfigitem('commands', 'status.skipstates',
204 coreconfigitem('commands', 'status.skipstates',
205 default=[],
205 default=[],
206 )
206 )
207 coreconfigitem('commands', 'status.terse',
207 coreconfigitem('commands', 'status.terse',
208 default='',
208 default='',
209 )
209 )
210 coreconfigitem('commands', 'status.verbose',
210 coreconfigitem('commands', 'status.verbose',
211 default=False,
211 default=False,
212 )
212 )
213 coreconfigitem('commands', 'update.check',
213 coreconfigitem('commands', 'update.check',
214 default=None,
214 default=None,
215 )
215 )
216 coreconfigitem('commands', 'update.requiredest',
216 coreconfigitem('commands', 'update.requiredest',
217 default=False,
217 default=False,
218 )
218 )
219 coreconfigitem('committemplate', '.*',
219 coreconfigitem('committemplate', '.*',
220 default=None,
220 default=None,
221 generic=True,
221 generic=True,
222 )
222 )
223 coreconfigitem('convert', 'bzr.saverev',
223 coreconfigitem('convert', 'bzr.saverev',
224 default=True,
224 default=True,
225 )
225 )
226 coreconfigitem('convert', 'cvsps.cache',
226 coreconfigitem('convert', 'cvsps.cache',
227 default=True,
227 default=True,
228 )
228 )
229 coreconfigitem('convert', 'cvsps.fuzz',
229 coreconfigitem('convert', 'cvsps.fuzz',
230 default=60,
230 default=60,
231 )
231 )
232 coreconfigitem('convert', 'cvsps.logencoding',
232 coreconfigitem('convert', 'cvsps.logencoding',
233 default=None,
233 default=None,
234 )
234 )
235 coreconfigitem('convert', 'cvsps.mergefrom',
235 coreconfigitem('convert', 'cvsps.mergefrom',
236 default=None,
236 default=None,
237 )
237 )
238 coreconfigitem('convert', 'cvsps.mergeto',
238 coreconfigitem('convert', 'cvsps.mergeto',
239 default=None,
239 default=None,
240 )
240 )
241 coreconfigitem('convert', 'git.committeractions',
241 coreconfigitem('convert', 'git.committeractions',
242 default=lambda: ['messagedifferent'],
242 default=lambda: ['messagedifferent'],
243 )
243 )
244 coreconfigitem('convert', 'git.extrakeys',
244 coreconfigitem('convert', 'git.extrakeys',
245 default=list,
245 default=list,
246 )
246 )
247 coreconfigitem('convert', 'git.findcopiesharder',
247 coreconfigitem('convert', 'git.findcopiesharder',
248 default=False,
248 default=False,
249 )
249 )
250 coreconfigitem('convert', 'git.remoteprefix',
250 coreconfigitem('convert', 'git.remoteprefix',
251 default='remote',
251 default='remote',
252 )
252 )
253 coreconfigitem('convert', 'git.renamelimit',
253 coreconfigitem('convert', 'git.renamelimit',
254 default=400,
254 default=400,
255 )
255 )
256 coreconfigitem('convert', 'git.saverev',
256 coreconfigitem('convert', 'git.saverev',
257 default=True,
257 default=True,
258 )
258 )
259 coreconfigitem('convert', 'git.similarity',
259 coreconfigitem('convert', 'git.similarity',
260 default=50,
260 default=50,
261 )
261 )
262 coreconfigitem('convert', 'git.skipsubmodules',
262 coreconfigitem('convert', 'git.skipsubmodules',
263 default=False,
263 default=False,
264 )
264 )
265 coreconfigitem('convert', 'hg.clonebranches',
265 coreconfigitem('convert', 'hg.clonebranches',
266 default=False,
266 default=False,
267 )
267 )
268 coreconfigitem('convert', 'hg.ignoreerrors',
268 coreconfigitem('convert', 'hg.ignoreerrors',
269 default=False,
269 default=False,
270 )
270 )
271 coreconfigitem('convert', 'hg.revs',
271 coreconfigitem('convert', 'hg.revs',
272 default=None,
272 default=None,
273 )
273 )
274 coreconfigitem('convert', 'hg.saverev',
274 coreconfigitem('convert', 'hg.saverev',
275 default=False,
275 default=False,
276 )
276 )
277 coreconfigitem('convert', 'hg.sourcename',
277 coreconfigitem('convert', 'hg.sourcename',
278 default=None,
278 default=None,
279 )
279 )
280 coreconfigitem('convert', 'hg.startrev',
280 coreconfigitem('convert', 'hg.startrev',
281 default=None,
281 default=None,
282 )
282 )
283 coreconfigitem('convert', 'hg.tagsbranch',
283 coreconfigitem('convert', 'hg.tagsbranch',
284 default='default',
284 default='default',
285 )
285 )
286 coreconfigitem('convert', 'hg.usebranchnames',
286 coreconfigitem('convert', 'hg.usebranchnames',
287 default=True,
287 default=True,
288 )
288 )
289 coreconfigitem('convert', 'ignoreancestorcheck',
289 coreconfigitem('convert', 'ignoreancestorcheck',
290 default=False,
290 default=False,
291 )
291 )
292 coreconfigitem('convert', 'localtimezone',
292 coreconfigitem('convert', 'localtimezone',
293 default=False,
293 default=False,
294 )
294 )
295 coreconfigitem('convert', 'p4.encoding',
295 coreconfigitem('convert', 'p4.encoding',
296 default=dynamicdefault,
296 default=dynamicdefault,
297 )
297 )
298 coreconfigitem('convert', 'p4.startrev',
298 coreconfigitem('convert', 'p4.startrev',
299 default=0,
299 default=0,
300 )
300 )
301 coreconfigitem('convert', 'skiptags',
301 coreconfigitem('convert', 'skiptags',
302 default=False,
302 default=False,
303 )
303 )
304 coreconfigitem('convert', 'svn.debugsvnlog',
304 coreconfigitem('convert', 'svn.debugsvnlog',
305 default=True,
305 default=True,
306 )
306 )
307 coreconfigitem('convert', 'svn.trunk',
307 coreconfigitem('convert', 'svn.trunk',
308 default=None,
308 default=None,
309 )
309 )
310 coreconfigitem('convert', 'svn.tags',
310 coreconfigitem('convert', 'svn.tags',
311 default=None,
311 default=None,
312 )
312 )
313 coreconfigitem('convert', 'svn.branches',
313 coreconfigitem('convert', 'svn.branches',
314 default=None,
314 default=None,
315 )
315 )
316 coreconfigitem('convert', 'svn.startrev',
316 coreconfigitem('convert', 'svn.startrev',
317 default=0,
317 default=0,
318 )
318 )
319 coreconfigitem('debug', 'dirstate.delaywrite',
319 coreconfigitem('debug', 'dirstate.delaywrite',
320 default=0,
320 default=0,
321 )
321 )
322 coreconfigitem('defaults', '.*',
322 coreconfigitem('defaults', '.*',
323 default=None,
323 default=None,
324 generic=True,
324 generic=True,
325 )
325 )
326 coreconfigitem('devel', 'all-warnings',
326 coreconfigitem('devel', 'all-warnings',
327 default=False,
327 default=False,
328 )
328 )
329 coreconfigitem('devel', 'bundle2.debug',
329 coreconfigitem('devel', 'bundle2.debug',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem('devel', 'cache-vfs',
332 coreconfigitem('devel', 'cache-vfs',
333 default=None,
333 default=None,
334 )
334 )
335 coreconfigitem('devel', 'check-locks',
335 coreconfigitem('devel', 'check-locks',
336 default=False,
336 default=False,
337 )
337 )
338 coreconfigitem('devel', 'check-relroot',
338 coreconfigitem('devel', 'check-relroot',
339 default=False,
339 default=False,
340 )
340 )
341 coreconfigitem('devel', 'default-date',
341 coreconfigitem('devel', 'default-date',
342 default=None,
342 default=None,
343 )
343 )
344 coreconfigitem('devel', 'deprec-warn',
344 coreconfigitem('devel', 'deprec-warn',
345 default=False,
345 default=False,
346 )
346 )
347 coreconfigitem('devel', 'disableloaddefaultcerts',
347 coreconfigitem('devel', 'disableloaddefaultcerts',
348 default=False,
348 default=False,
349 )
349 )
350 coreconfigitem('devel', 'warn-empty-changegroup',
350 coreconfigitem('devel', 'warn-empty-changegroup',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem('devel', 'legacy.exchange',
353 coreconfigitem('devel', 'legacy.exchange',
354 default=list,
354 default=list,
355 )
355 )
356 coreconfigitem('devel', 'servercafile',
356 coreconfigitem('devel', 'servercafile',
357 default='',
357 default='',
358 )
358 )
359 coreconfigitem('devel', 'serverexactprotocol',
359 coreconfigitem('devel', 'serverexactprotocol',
360 default='',
360 default='',
361 )
361 )
362 coreconfigitem('devel', 'serverrequirecert',
362 coreconfigitem('devel', 'serverrequirecert',
363 default=False,
363 default=False,
364 )
364 )
365 coreconfigitem('devel', 'strip-obsmarkers',
365 coreconfigitem('devel', 'strip-obsmarkers',
366 default=True,
366 default=True,
367 )
367 )
368 coreconfigitem('devel', 'warn-config',
368 coreconfigitem('devel', 'warn-config',
369 default=None,
369 default=None,
370 )
370 )
371 coreconfigitem('devel', 'warn-config-default',
371 coreconfigitem('devel', 'warn-config-default',
372 default=None,
372 default=None,
373 )
373 )
374 coreconfigitem('devel', 'user.obsmarker',
374 coreconfigitem('devel', 'user.obsmarker',
375 default=None,
375 default=None,
376 )
376 )
377 coreconfigitem('devel', 'warn-config-unknown',
377 coreconfigitem('devel', 'warn-config-unknown',
378 default=None,
378 default=None,
379 )
379 )
380 coreconfigitem('devel', 'debug.extensions',
380 coreconfigitem('devel', 'debug.extensions',
381 default=False,
381 default=False,
382 )
382 )
383 coreconfigitem('devel', 'debug.peer-request',
383 coreconfigitem('devel', 'debug.peer-request',
384 default=False,
384 default=False,
385 )
385 )
386 coreconfigitem('diff', 'nodates',
386 coreconfigitem('diff', 'nodates',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem('diff', 'showfunc',
389 coreconfigitem('diff', 'showfunc',
390 default=False,
390 default=False,
391 )
391 )
392 coreconfigitem('diff', 'unified',
392 coreconfigitem('diff', 'unified',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem('diff', 'git',
395 coreconfigitem('diff', 'git',
396 default=False,
396 default=False,
397 )
397 )
398 coreconfigitem('diff', 'ignorews',
398 coreconfigitem('diff', 'ignorews',
399 default=False,
399 default=False,
400 )
400 )
401 coreconfigitem('diff', 'ignorewsamount',
401 coreconfigitem('diff', 'ignorewsamount',
402 default=False,
402 default=False,
403 )
403 )
404 coreconfigitem('diff', 'ignoreblanklines',
404 coreconfigitem('diff', 'ignoreblanklines',
405 default=False,
405 default=False,
406 )
406 )
407 coreconfigitem('diff', 'ignorewseol',
407 coreconfigitem('diff', 'ignorewseol',
408 default=False,
408 default=False,
409 )
409 )
410 coreconfigitem('diff', 'nobinary',
410 coreconfigitem('diff', 'nobinary',
411 default=False,
411 default=False,
412 )
412 )
413 coreconfigitem('diff', 'noprefix',
413 coreconfigitem('diff', 'noprefix',
414 default=False,
414 default=False,
415 )
415 )
416 coreconfigitem('diff', 'word-diff',
416 coreconfigitem('diff', 'word-diff',
417 default=False,
417 default=False,
418 )
418 )
419 coreconfigitem('email', 'bcc',
419 coreconfigitem('email', 'bcc',
420 default=None,
420 default=None,
421 )
421 )
422 coreconfigitem('email', 'cc',
422 coreconfigitem('email', 'cc',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem('email', 'charsets',
425 coreconfigitem('email', 'charsets',
426 default=list,
426 default=list,
427 )
427 )
428 coreconfigitem('email', 'from',
428 coreconfigitem('email', 'from',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem('email', 'method',
431 coreconfigitem('email', 'method',
432 default='smtp',
432 default='smtp',
433 )
433 )
434 coreconfigitem('email', 'reply-to',
434 coreconfigitem('email', 'reply-to',
435 default=None,
435 default=None,
436 )
436 )
437 coreconfigitem('email', 'to',
437 coreconfigitem('email', 'to',
438 default=None,
438 default=None,
439 )
439 )
440 coreconfigitem('experimental', 'archivemetatemplate',
440 coreconfigitem('experimental', 'archivemetatemplate',
441 default=dynamicdefault,
441 default=dynamicdefault,
442 )
442 )
443 coreconfigitem('experimental', 'bundle-phases',
443 coreconfigitem('experimental', 'bundle-phases',
444 default=False,
444 default=False,
445 )
445 )
446 coreconfigitem('experimental', 'bundle2-advertise',
446 coreconfigitem('experimental', 'bundle2-advertise',
447 default=True,
447 default=True,
448 )
448 )
449 coreconfigitem('experimental', 'bundle2-output-capture',
449 coreconfigitem('experimental', 'bundle2-output-capture',
450 default=False,
450 default=False,
451 )
451 )
452 coreconfigitem('experimental', 'bundle2.pushback',
452 coreconfigitem('experimental', 'bundle2.pushback',
453 default=False,
453 default=False,
454 )
454 )
455 coreconfigitem('experimental', 'bundle2lazylocking',
455 coreconfigitem('experimental', 'bundle2lazylocking',
456 default=False,
456 default=False,
457 )
457 )
458 coreconfigitem('experimental', 'bundlecomplevel',
458 coreconfigitem('experimental', 'bundlecomplevel',
459 default=None,
459 default=None,
460 )
460 )
461 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
461 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
462 default=None,
462 default=None,
463 )
463 )
464 coreconfigitem('experimental', 'bundlecomplevel.gzip',
464 coreconfigitem('experimental', 'bundlecomplevel.gzip',
465 default=None,
465 default=None,
466 )
466 )
467 coreconfigitem('experimental', 'bundlecomplevel.none',
467 coreconfigitem('experimental', 'bundlecomplevel.none',
468 default=None,
468 default=None,
469 )
469 )
470 coreconfigitem('experimental', 'bundlecomplevel.zstd',
470 coreconfigitem('experimental', 'bundlecomplevel.zstd',
471 default=None,
471 default=None,
472 )
472 )
473 coreconfigitem('experimental', 'changegroup3',
473 coreconfigitem('experimental', 'changegroup3',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem('experimental', 'clientcompressionengines',
476 coreconfigitem('experimental', 'clientcompressionengines',
477 default=list,
477 default=list,
478 )
478 )
479 coreconfigitem('experimental', 'copytrace',
479 coreconfigitem('experimental', 'copytrace',
480 default='on',
480 default='on',
481 )
481 )
482 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
482 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
483 default=100,
483 default=100,
484 )
484 )
485 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
485 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
486 default=100,
486 default=100,
487 )
487 )
488 coreconfigitem('experimental', 'crecordtest',
488 coreconfigitem('experimental', 'crecordtest',
489 default=None,
489 default=None,
490 )
490 )
491 coreconfigitem('experimental', 'directaccess',
491 coreconfigitem('experimental', 'directaccess',
492 default=False,
492 default=False,
493 )
493 )
494 coreconfigitem('experimental', 'directaccess.revnums',
494 coreconfigitem('experimental', 'directaccess.revnums',
495 default=False,
495 default=False,
496 )
496 )
497 coreconfigitem('experimental', 'editortmpinhg',
497 coreconfigitem('experimental', 'editortmpinhg',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem('experimental', 'evolution',
500 coreconfigitem('experimental', 'evolution',
501 default=list,
501 default=list,
502 )
502 )
503 coreconfigitem('experimental', 'evolution.allowdivergence',
503 coreconfigitem('experimental', 'evolution.allowdivergence',
504 default=False,
504 default=False,
505 alias=[('experimental', 'allowdivergence')]
505 alias=[('experimental', 'allowdivergence')]
506 )
506 )
507 coreconfigitem('experimental', 'evolution.allowunstable',
507 coreconfigitem('experimental', 'evolution.allowunstable',
508 default=None,
508 default=None,
509 )
509 )
510 coreconfigitem('experimental', 'evolution.createmarkers',
510 coreconfigitem('experimental', 'evolution.createmarkers',
511 default=None,
511 default=None,
512 )
512 )
513 coreconfigitem('experimental', 'evolution.effect-flags',
513 coreconfigitem('experimental', 'evolution.effect-flags',
514 default=True,
514 default=True,
515 alias=[('experimental', 'effect-flags')]
515 alias=[('experimental', 'effect-flags')]
516 )
516 )
517 coreconfigitem('experimental', 'evolution.exchange',
517 coreconfigitem('experimental', 'evolution.exchange',
518 default=None,
518 default=None,
519 )
519 )
520 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
520 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
521 default=False,
521 default=False,
522 )
522 )
523 coreconfigitem('experimental', 'evolution.report-instabilities',
523 coreconfigitem('experimental', 'evolution.report-instabilities',
524 default=True,
524 default=True,
525 )
525 )
526 coreconfigitem('experimental', 'evolution.track-operation',
526 coreconfigitem('experimental', 'evolution.track-operation',
527 default=True,
527 default=True,
528 )
528 )
529 coreconfigitem('experimental', 'maxdeltachainspan',
529 coreconfigitem('experimental', 'maxdeltachainspan',
530 default=-1,
530 default=-1,
531 )
531 )
532 coreconfigitem('experimental', 'mergetempdirprefix',
532 coreconfigitem('experimental', 'mergetempdirprefix',
533 default=None,
533 default=None,
534 )
534 )
535 coreconfigitem('experimental', 'mmapindexthreshold',
535 coreconfigitem('experimental', 'mmapindexthreshold',
536 default=None,
536 default=None,
537 )
537 )
538 coreconfigitem('experimental', 'nonnormalparanoidcheck',
538 coreconfigitem('experimental', 'nonnormalparanoidcheck',
539 default=False,
539 default=False,
540 )
540 )
541 coreconfigitem('experimental', 'exportableenviron',
541 coreconfigitem('experimental', 'exportableenviron',
542 default=list,
542 default=list,
543 )
543 )
544 coreconfigitem('experimental', 'extendedheader.index',
544 coreconfigitem('experimental', 'extendedheader.index',
545 default=None,
545 default=None,
546 )
546 )
547 coreconfigitem('experimental', 'extendedheader.similarity',
547 coreconfigitem('experimental', 'extendedheader.similarity',
548 default=False,
548 default=False,
549 )
549 )
550 coreconfigitem('experimental', 'format.compression',
550 coreconfigitem('experimental', 'format.compression',
551 default='zlib',
551 default='zlib',
552 )
552 )
553 coreconfigitem('experimental', 'graphshorten',
553 coreconfigitem('experimental', 'graphshorten',
554 default=False,
554 default=False,
555 )
555 )
556 coreconfigitem('experimental', 'graphstyle.parent',
556 coreconfigitem('experimental', 'graphstyle.parent',
557 default=dynamicdefault,
557 default=dynamicdefault,
558 )
558 )
559 coreconfigitem('experimental', 'graphstyle.missing',
559 coreconfigitem('experimental', 'graphstyle.missing',
560 default=dynamicdefault,
560 default=dynamicdefault,
561 )
561 )
562 coreconfigitem('experimental', 'graphstyle.grandparent',
562 coreconfigitem('experimental', 'graphstyle.grandparent',
563 default=dynamicdefault,
563 default=dynamicdefault,
564 )
564 )
565 coreconfigitem('experimental', 'hook-track-tags',
565 coreconfigitem('experimental', 'hook-track-tags',
566 default=False,
566 default=False,
567 )
567 )
568 coreconfigitem('experimental', 'httppeer.advertise-v2',
568 coreconfigitem('experimental', 'httppeer.advertise-v2',
569 default=False,
569 default=False,
570 )
570 )
571 coreconfigitem('experimental', 'httppostargs',
571 coreconfigitem('experimental', 'httppostargs',
572 default=False,
572 default=False,
573 )
573 )
574 coreconfigitem('experimental', 'mergedriver',
574 coreconfigitem('experimental', 'mergedriver',
575 default=None,
575 default=None,
576 )
576 )
577 coreconfigitem('experimental', 'nointerrupt', default=False)
577 coreconfigitem('experimental', 'nointerrupt', default=False)
578 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
578 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
579
579
580 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
580 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
581 default=False,
581 default=False,
582 )
582 )
583 coreconfigitem('experimental', 'remotenames',
583 coreconfigitem('experimental', 'remotenames',
584 default=False,
584 default=False,
585 )
585 )
586 coreconfigitem('experimental', 'removeemptydirs',
586 coreconfigitem('experimental', 'removeemptydirs',
587 default=True,
587 default=True,
588 )
588 )
589 coreconfigitem('experimental', 'revisions.prefixhexnode',
589 coreconfigitem('experimental', 'revisions.prefixhexnode',
590 default=False,
590 default=False,
591 )
591 )
592 coreconfigitem('experimental', 'revlogv2',
592 coreconfigitem('experimental', 'revlogv2',
593 default=None,
593 default=None,
594 )
594 )
595 coreconfigitem('experimental', 'revisions.disambiguatewithin',
595 coreconfigitem('experimental', 'revisions.disambiguatewithin',
596 default=None,
596 default=None,
597 )
597 )
598 coreconfigitem('experimental', 'single-head-per-branch',
598 coreconfigitem('experimental', 'single-head-per-branch',
599 default=False,
599 default=False,
600 )
600 )
601 coreconfigitem('experimental', 'sshserver.support-v2',
601 coreconfigitem('experimental', 'sshserver.support-v2',
602 default=False,
602 default=False,
603 )
603 )
604 coreconfigitem('experimental', 'spacemovesdown',
604 coreconfigitem('experimental', 'spacemovesdown',
605 default=False,
605 default=False,
606 )
606 )
607 coreconfigitem('experimental', 'sparse-read',
607 coreconfigitem('experimental', 'sparse-read',
608 default=False,
608 default=False,
609 )
609 )
610 coreconfigitem('experimental', 'sparse-read.density-threshold',
610 coreconfigitem('experimental', 'sparse-read.density-threshold',
611 default=0.50,
611 default=0.50,
612 )
612 )
613 coreconfigitem('experimental', 'sparse-read.min-gap-size',
613 coreconfigitem('experimental', 'sparse-read.min-gap-size',
614 default='65K',
614 default='65K',
615 )
615 )
616 coreconfigitem('experimental', 'treemanifest',
616 coreconfigitem('experimental', 'treemanifest',
617 default=False,
617 default=False,
618 )
618 )
619 coreconfigitem('experimental', 'update.atomic-file',
619 coreconfigitem('experimental', 'update.atomic-file',
620 default=False,
620 default=False,
621 )
621 )
622 coreconfigitem('experimental', 'sshpeer.advertise-v2',
622 coreconfigitem('experimental', 'sshpeer.advertise-v2',
623 default=False,
623 default=False,
624 )
624 )
625 coreconfigitem('experimental', 'web.apiserver',
625 coreconfigitem('experimental', 'web.apiserver',
626 default=False,
626 default=False,
627 )
627 )
628 coreconfigitem('experimental', 'web.api.http-v2',
628 coreconfigitem('experimental', 'web.api.http-v2',
629 default=False,
629 default=False,
630 )
630 )
631 coreconfigitem('experimental', 'web.api.debugreflect',
631 coreconfigitem('experimental', 'web.api.debugreflect',
632 default=False,
632 default=False,
633 )
633 )
634 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
634 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
635 default=False,
635 default=False,
636 )
636 )
637 coreconfigitem('experimental', 'xdiff',
637 coreconfigitem('experimental', 'xdiff',
638 default=False,
638 default=False,
639 )
639 )
640 coreconfigitem('extensions', '.*',
640 coreconfigitem('extensions', '.*',
641 default=None,
641 default=None,
642 generic=True,
642 generic=True,
643 )
643 )
644 coreconfigitem('extdata', '.*',
644 coreconfigitem('extdata', '.*',
645 default=None,
645 default=None,
646 generic=True,
646 generic=True,
647 )
647 )
648 coreconfigitem('format', 'chunkcachesize',
648 coreconfigitem('format', 'chunkcachesize',
649 default=None,
649 default=None,
650 )
650 )
651 coreconfigitem('format', 'dotencode',
651 coreconfigitem('format', 'dotencode',
652 default=True,
652 default=True,
653 )
653 )
654 coreconfigitem('format', 'generaldelta',
654 coreconfigitem('format', 'generaldelta',
655 default=False,
655 default=False,
656 )
656 )
657 coreconfigitem('format', 'manifestcachesize',
657 coreconfigitem('format', 'manifestcachesize',
658 default=None,
658 default=None,
659 )
659 )
660 coreconfigitem('format', 'maxchainlen',
660 coreconfigitem('format', 'maxchainlen',
661 default=dynamicdefault,
661 default=dynamicdefault,
662 )
662 )
663 coreconfigitem('format', 'obsstore-version',
663 coreconfigitem('format', 'obsstore-version',
664 default=None,
664 default=None,
665 )
665 )
666 coreconfigitem('format', 'sparse-revlog',
666 coreconfigitem('format', 'sparse-revlog',
667 default=False,
667 default=False,
668 )
668 )
669 coreconfigitem('format', 'usefncache',
669 coreconfigitem('format', 'usefncache',
670 default=True,
670 default=True,
671 )
671 )
672 coreconfigitem('format', 'usegeneraldelta',
672 coreconfigitem('format', 'usegeneraldelta',
673 default=True,
673 default=True,
674 )
674 )
675 coreconfigitem('format', 'usestore',
675 coreconfigitem('format', 'usestore',
676 default=True,
676 default=True,
677 )
677 )
678 coreconfigitem('format', 'internal-phase',
678 coreconfigitem('format', 'internal-phase',
679 default=False,
679 default=False,
680 )
680 )
681 coreconfigitem('fsmonitor', 'warn_when_unused',
681 coreconfigitem('fsmonitor', 'warn_when_unused',
682 default=True,
682 default=True,
683 )
683 )
684 coreconfigitem('fsmonitor', 'warn_update_file_count',
684 coreconfigitem('fsmonitor', 'warn_update_file_count',
685 default=50000,
685 default=50000,
686 )
686 )
687 coreconfigitem('hooks', '.*',
687 coreconfigitem('hooks', '.*',
688 default=dynamicdefault,
688 default=dynamicdefault,
689 generic=True,
689 generic=True,
690 )
690 )
691 coreconfigitem('hgweb-paths', '.*',
691 coreconfigitem('hgweb-paths', '.*',
692 default=list,
692 default=list,
693 generic=True,
693 generic=True,
694 )
694 )
695 coreconfigitem('hostfingerprints', '.*',
695 coreconfigitem('hostfingerprints', '.*',
696 default=list,
696 default=list,
697 generic=True,
697 generic=True,
698 )
698 )
699 coreconfigitem('hostsecurity', 'ciphers',
699 coreconfigitem('hostsecurity', 'ciphers',
700 default=None,
700 default=None,
701 )
701 )
702 coreconfigitem('hostsecurity', 'disabletls10warning',
702 coreconfigitem('hostsecurity', 'disabletls10warning',
703 default=False,
703 default=False,
704 )
704 )
705 coreconfigitem('hostsecurity', 'minimumprotocol',
705 coreconfigitem('hostsecurity', 'minimumprotocol',
706 default=dynamicdefault,
706 default=dynamicdefault,
707 )
707 )
708 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
708 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
709 default=dynamicdefault,
709 default=dynamicdefault,
710 generic=True,
710 generic=True,
711 )
711 )
712 coreconfigitem('hostsecurity', '.*:ciphers$',
712 coreconfigitem('hostsecurity', '.*:ciphers$',
713 default=dynamicdefault,
713 default=dynamicdefault,
714 generic=True,
714 generic=True,
715 )
715 )
716 coreconfigitem('hostsecurity', '.*:fingerprints$',
716 coreconfigitem('hostsecurity', '.*:fingerprints$',
717 default=list,
717 default=list,
718 generic=True,
718 generic=True,
719 )
719 )
720 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
720 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
721 default=None,
721 default=None,
722 generic=True,
722 generic=True,
723 )
723 )
724
724
725 coreconfigitem('http_proxy', 'always',
725 coreconfigitem('http_proxy', 'always',
726 default=False,
726 default=False,
727 )
727 )
728 coreconfigitem('http_proxy', 'host',
728 coreconfigitem('http_proxy', 'host',
729 default=None,
729 default=None,
730 )
730 )
731 coreconfigitem('http_proxy', 'no',
731 coreconfigitem('http_proxy', 'no',
732 default=list,
732 default=list,
733 )
733 )
734 coreconfigitem('http_proxy', 'passwd',
734 coreconfigitem('http_proxy', 'passwd',
735 default=None,
735 default=None,
736 )
736 )
737 coreconfigitem('http_proxy', 'user',
737 coreconfigitem('http_proxy', 'user',
738 default=None,
738 default=None,
739 )
739 )
740 coreconfigitem('logtoprocess', 'commandexception',
740 coreconfigitem('logtoprocess', 'commandexception',
741 default=None,
741 default=None,
742 )
742 )
743 coreconfigitem('logtoprocess', 'commandfinish',
743 coreconfigitem('logtoprocess', 'commandfinish',
744 default=None,
744 default=None,
745 )
745 )
746 coreconfigitem('logtoprocess', 'command',
746 coreconfigitem('logtoprocess', 'command',
747 default=None,
747 default=None,
748 )
748 )
749 coreconfigitem('logtoprocess', 'develwarn',
749 coreconfigitem('logtoprocess', 'develwarn',
750 default=None,
750 default=None,
751 )
751 )
752 coreconfigitem('logtoprocess', 'uiblocked',
752 coreconfigitem('logtoprocess', 'uiblocked',
753 default=None,
753 default=None,
754 )
754 )
755 coreconfigitem('merge', 'checkunknown',
755 coreconfigitem('merge', 'checkunknown',
756 default='abort',
756 default='abort',
757 )
757 )
758 coreconfigitem('merge', 'checkignored',
758 coreconfigitem('merge', 'checkignored',
759 default='abort',
759 default='abort',
760 )
760 )
761 coreconfigitem('experimental', 'merge.checkpathconflicts',
761 coreconfigitem('experimental', 'merge.checkpathconflicts',
762 default=False,
762 default=False,
763 )
763 )
764 coreconfigitem('merge', 'followcopies',
764 coreconfigitem('merge', 'followcopies',
765 default=True,
765 default=True,
766 )
766 )
767 coreconfigitem('merge', 'on-failure',
767 coreconfigitem('merge', 'on-failure',
768 default='continue',
768 default='continue',
769 )
769 )
770 coreconfigitem('merge', 'preferancestor',
770 coreconfigitem('merge', 'preferancestor',
771 default=lambda: ['*'],
771 default=lambda: ['*'],
772 )
772 )
773 coreconfigitem('merge', 'strict-capability-check',
773 coreconfigitem('merge', 'strict-capability-check',
774 default=False,
774 default=False,
775 )
775 )
776 coreconfigitem('merge-tools', '.*',
776 coreconfigitem('merge-tools', '.*',
777 default=None,
777 default=None,
778 generic=True,
778 generic=True,
779 )
779 )
780 coreconfigitem('merge-tools', br'.*\.args$',
780 coreconfigitem('merge-tools', br'.*\.args$',
781 default="$local $base $other",
781 default="$local $base $other",
782 generic=True,
782 generic=True,
783 priority=-1,
783 priority=-1,
784 )
784 )
785 coreconfigitem('merge-tools', br'.*\.binary$',
785 coreconfigitem('merge-tools', br'.*\.binary$',
786 default=False,
786 default=False,
787 generic=True,
787 generic=True,
788 priority=-1,
788 priority=-1,
789 )
789 )
790 coreconfigitem('merge-tools', br'.*\.check$',
790 coreconfigitem('merge-tools', br'.*\.check$',
791 default=list,
791 default=list,
792 generic=True,
792 generic=True,
793 priority=-1,
793 priority=-1,
794 )
794 )
795 coreconfigitem('merge-tools', br'.*\.checkchanged$',
795 coreconfigitem('merge-tools', br'.*\.checkchanged$',
796 default=False,
796 default=False,
797 generic=True,
797 generic=True,
798 priority=-1,
798 priority=-1,
799 )
799 )
800 coreconfigitem('merge-tools', br'.*\.executable$',
800 coreconfigitem('merge-tools', br'.*\.executable$',
801 default=dynamicdefault,
801 default=dynamicdefault,
802 generic=True,
802 generic=True,
803 priority=-1,
803 priority=-1,
804 )
804 )
805 coreconfigitem('merge-tools', br'.*\.fixeol$',
805 coreconfigitem('merge-tools', br'.*\.fixeol$',
806 default=False,
806 default=False,
807 generic=True,
807 generic=True,
808 priority=-1,
808 priority=-1,
809 )
809 )
810 coreconfigitem('merge-tools', br'.*\.gui$',
810 coreconfigitem('merge-tools', br'.*\.gui$',
811 default=False,
811 default=False,
812 generic=True,
812 generic=True,
813 priority=-1,
813 priority=-1,
814 )
814 )
815 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
815 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
816 default='basic',
816 default='basic',
817 generic=True,
817 generic=True,
818 priority=-1,
818 priority=-1,
819 )
819 )
820 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
820 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
821 default=dynamicdefault, # take from ui.mergemarkertemplate
821 default=dynamicdefault, # take from ui.mergemarkertemplate
822 generic=True,
822 generic=True,
823 priority=-1,
823 priority=-1,
824 )
824 )
825 coreconfigitem('merge-tools', br'.*\.priority$',
825 coreconfigitem('merge-tools', br'.*\.priority$',
826 default=0,
826 default=0,
827 generic=True,
827 generic=True,
828 priority=-1,
828 priority=-1,
829 )
829 )
830 coreconfigitem('merge-tools', br'.*\.premerge$',
830 coreconfigitem('merge-tools', br'.*\.premerge$',
831 default=dynamicdefault,
831 default=dynamicdefault,
832 generic=True,
832 generic=True,
833 priority=-1,
833 priority=-1,
834 )
834 )
835 coreconfigitem('merge-tools', br'.*\.symlink$',
835 coreconfigitem('merge-tools', br'.*\.symlink$',
836 default=False,
836 default=False,
837 generic=True,
837 generic=True,
838 priority=-1,
838 priority=-1,
839 )
839 )
840 coreconfigitem('pager', 'attend-.*',
840 coreconfigitem('pager', 'attend-.*',
841 default=dynamicdefault,
841 default=dynamicdefault,
842 generic=True,
842 generic=True,
843 )
843 )
844 coreconfigitem('pager', 'ignore',
844 coreconfigitem('pager', 'ignore',
845 default=list,
845 default=list,
846 )
846 )
847 coreconfigitem('pager', 'pager',
847 coreconfigitem('pager', 'pager',
848 default=dynamicdefault,
848 default=dynamicdefault,
849 )
849 )
850 coreconfigitem('patch', 'eol',
850 coreconfigitem('patch', 'eol',
851 default='strict',
851 default='strict',
852 )
852 )
853 coreconfigitem('patch', 'fuzz',
853 coreconfigitem('patch', 'fuzz',
854 default=2,
854 default=2,
855 )
855 )
856 coreconfigitem('paths', 'default',
856 coreconfigitem('paths', 'default',
857 default=None,
857 default=None,
858 )
858 )
859 coreconfigitem('paths', 'default-push',
859 coreconfigitem('paths', 'default-push',
860 default=None,
860 default=None,
861 )
861 )
862 coreconfigitem('paths', '.*',
862 coreconfigitem('paths', '.*',
863 default=None,
863 default=None,
864 generic=True,
864 generic=True,
865 )
865 )
866 coreconfigitem('phases', 'checksubrepos',
866 coreconfigitem('phases', 'checksubrepos',
867 default='follow',
867 default='follow',
868 )
868 )
869 coreconfigitem('phases', 'new-commit',
869 coreconfigitem('phases', 'new-commit',
870 default='draft',
870 default='draft',
871 )
871 )
872 coreconfigitem('phases', 'publish',
872 coreconfigitem('phases', 'publish',
873 default=True,
873 default=True,
874 )
874 )
875 coreconfigitem('profiling', 'enabled',
875 coreconfigitem('profiling', 'enabled',
876 default=False,
876 default=False,
877 )
877 )
878 coreconfigitem('profiling', 'format',
878 coreconfigitem('profiling', 'format',
879 default='text',
879 default='text',
880 )
880 )
881 coreconfigitem('profiling', 'freq',
881 coreconfigitem('profiling', 'freq',
882 default=1000,
882 default=1000,
883 )
883 )
884 coreconfigitem('profiling', 'limit',
884 coreconfigitem('profiling', 'limit',
885 default=30,
885 default=30,
886 )
886 )
887 coreconfigitem('profiling', 'nested',
887 coreconfigitem('profiling', 'nested',
888 default=0,
888 default=0,
889 )
889 )
890 coreconfigitem('profiling', 'output',
890 coreconfigitem('profiling', 'output',
891 default=None,
891 default=None,
892 )
892 )
893 coreconfigitem('profiling', 'showmax',
893 coreconfigitem('profiling', 'showmax',
894 default=0.999,
894 default=0.999,
895 )
895 )
896 coreconfigitem('profiling', 'showmin',
896 coreconfigitem('profiling', 'showmin',
897 default=dynamicdefault,
897 default=dynamicdefault,
898 )
898 )
899 coreconfigitem('profiling', 'sort',
899 coreconfigitem('profiling', 'sort',
900 default='inlinetime',
900 default='inlinetime',
901 )
901 )
902 coreconfigitem('profiling', 'statformat',
902 coreconfigitem('profiling', 'statformat',
903 default='hotpath',
903 default='hotpath',
904 )
904 )
905 coreconfigitem('profiling', 'time-track',
905 coreconfigitem('profiling', 'time-track',
906 default='cpu',
906 default='cpu',
907 )
907 )
908 coreconfigitem('profiling', 'type',
908 coreconfigitem('profiling', 'type',
909 default='stat',
909 default='stat',
910 )
910 )
911 coreconfigitem('progress', 'assume-tty',
911 coreconfigitem('progress', 'assume-tty',
912 default=False,
912 default=False,
913 )
913 )
914 coreconfigitem('progress', 'changedelay',
914 coreconfigitem('progress', 'changedelay',
915 default=1,
915 default=1,
916 )
916 )
917 coreconfigitem('progress', 'clear-complete',
917 coreconfigitem('progress', 'clear-complete',
918 default=True,
918 default=True,
919 )
919 )
920 coreconfigitem('progress', 'debug',
920 coreconfigitem('progress', 'debug',
921 default=False,
921 default=False,
922 )
922 )
923 coreconfigitem('progress', 'delay',
923 coreconfigitem('progress', 'delay',
924 default=3,
924 default=3,
925 )
925 )
926 coreconfigitem('progress', 'disable',
926 coreconfigitem('progress', 'disable',
927 default=False,
927 default=False,
928 )
928 )
929 coreconfigitem('progress', 'estimateinterval',
929 coreconfigitem('progress', 'estimateinterval',
930 default=60.0,
930 default=60.0,
931 )
931 )
932 coreconfigitem('progress', 'format',
932 coreconfigitem('progress', 'format',
933 default=lambda: ['topic', 'bar', 'number', 'estimate'],
933 default=lambda: ['topic', 'bar', 'number', 'estimate'],
934 )
934 )
935 coreconfigitem('progress', 'refresh',
935 coreconfigitem('progress', 'refresh',
936 default=0.1,
936 default=0.1,
937 )
937 )
938 coreconfigitem('progress', 'width',
938 coreconfigitem('progress', 'width',
939 default=dynamicdefault,
939 default=dynamicdefault,
940 )
940 )
941 coreconfigitem('push', 'pushvars.server',
941 coreconfigitem('push', 'pushvars.server',
942 default=False,
942 default=False,
943 )
943 )
944 coreconfigitem('storage', 'new-repo-backend',
945 default='revlogv1',
946 )
944 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
947 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
945 default=True,
948 default=True,
946 alias=[('format', 'aggressivemergedeltas')],
949 alias=[('format', 'aggressivemergedeltas')],
947 )
950 )
948 coreconfigitem('server', 'bookmarks-pushkey-compat',
951 coreconfigitem('server', 'bookmarks-pushkey-compat',
949 default=True,
952 default=True,
950 )
953 )
951 coreconfigitem('server', 'bundle1',
954 coreconfigitem('server', 'bundle1',
952 default=True,
955 default=True,
953 )
956 )
954 coreconfigitem('server', 'bundle1gd',
957 coreconfigitem('server', 'bundle1gd',
955 default=None,
958 default=None,
956 )
959 )
957 coreconfigitem('server', 'bundle1.pull',
960 coreconfigitem('server', 'bundle1.pull',
958 default=None,
961 default=None,
959 )
962 )
960 coreconfigitem('server', 'bundle1gd.pull',
963 coreconfigitem('server', 'bundle1gd.pull',
961 default=None,
964 default=None,
962 )
965 )
963 coreconfigitem('server', 'bundle1.push',
966 coreconfigitem('server', 'bundle1.push',
964 default=None,
967 default=None,
965 )
968 )
966 coreconfigitem('server', 'bundle1gd.push',
969 coreconfigitem('server', 'bundle1gd.push',
967 default=None,
970 default=None,
968 )
971 )
969 coreconfigitem('server', 'bundle2.stream',
972 coreconfigitem('server', 'bundle2.stream',
970 default=True,
973 default=True,
971 alias=[('experimental', 'bundle2.stream')]
974 alias=[('experimental', 'bundle2.stream')]
972 )
975 )
973 coreconfigitem('server', 'compressionengines',
976 coreconfigitem('server', 'compressionengines',
974 default=list,
977 default=list,
975 )
978 )
976 coreconfigitem('server', 'concurrent-push-mode',
979 coreconfigitem('server', 'concurrent-push-mode',
977 default='strict',
980 default='strict',
978 )
981 )
979 coreconfigitem('server', 'disablefullbundle',
982 coreconfigitem('server', 'disablefullbundle',
980 default=False,
983 default=False,
981 )
984 )
982 coreconfigitem('server', 'maxhttpheaderlen',
985 coreconfigitem('server', 'maxhttpheaderlen',
983 default=1024,
986 default=1024,
984 )
987 )
985 coreconfigitem('server', 'pullbundle',
988 coreconfigitem('server', 'pullbundle',
986 default=False,
989 default=False,
987 )
990 )
988 coreconfigitem('server', 'preferuncompressed',
991 coreconfigitem('server', 'preferuncompressed',
989 default=False,
992 default=False,
990 )
993 )
991 coreconfigitem('server', 'streamunbundle',
994 coreconfigitem('server', 'streamunbundle',
992 default=False,
995 default=False,
993 )
996 )
994 coreconfigitem('server', 'uncompressed',
997 coreconfigitem('server', 'uncompressed',
995 default=True,
998 default=True,
996 )
999 )
997 coreconfigitem('server', 'uncompressedallowsecret',
1000 coreconfigitem('server', 'uncompressedallowsecret',
998 default=False,
1001 default=False,
999 )
1002 )
1000 coreconfigitem('server', 'validate',
1003 coreconfigitem('server', 'validate',
1001 default=False,
1004 default=False,
1002 )
1005 )
1003 coreconfigitem('server', 'zliblevel',
1006 coreconfigitem('server', 'zliblevel',
1004 default=-1,
1007 default=-1,
1005 )
1008 )
1006 coreconfigitem('server', 'zstdlevel',
1009 coreconfigitem('server', 'zstdlevel',
1007 default=3,
1010 default=3,
1008 )
1011 )
1009 coreconfigitem('share', 'pool',
1012 coreconfigitem('share', 'pool',
1010 default=None,
1013 default=None,
1011 )
1014 )
1012 coreconfigitem('share', 'poolnaming',
1015 coreconfigitem('share', 'poolnaming',
1013 default='identity',
1016 default='identity',
1014 )
1017 )
1015 coreconfigitem('smtp', 'host',
1018 coreconfigitem('smtp', 'host',
1016 default=None,
1019 default=None,
1017 )
1020 )
1018 coreconfigitem('smtp', 'local_hostname',
1021 coreconfigitem('smtp', 'local_hostname',
1019 default=None,
1022 default=None,
1020 )
1023 )
1021 coreconfigitem('smtp', 'password',
1024 coreconfigitem('smtp', 'password',
1022 default=None,
1025 default=None,
1023 )
1026 )
1024 coreconfigitem('smtp', 'port',
1027 coreconfigitem('smtp', 'port',
1025 default=dynamicdefault,
1028 default=dynamicdefault,
1026 )
1029 )
1027 coreconfigitem('smtp', 'tls',
1030 coreconfigitem('smtp', 'tls',
1028 default='none',
1031 default='none',
1029 )
1032 )
1030 coreconfigitem('smtp', 'username',
1033 coreconfigitem('smtp', 'username',
1031 default=None,
1034 default=None,
1032 )
1035 )
1033 coreconfigitem('sparse', 'missingwarning',
1036 coreconfigitem('sparse', 'missingwarning',
1034 default=True,
1037 default=True,
1035 )
1038 )
1036 coreconfigitem('subrepos', 'allowed',
1039 coreconfigitem('subrepos', 'allowed',
1037 default=dynamicdefault, # to make backporting simpler
1040 default=dynamicdefault, # to make backporting simpler
1038 )
1041 )
1039 coreconfigitem('subrepos', 'hg:allowed',
1042 coreconfigitem('subrepos', 'hg:allowed',
1040 default=dynamicdefault,
1043 default=dynamicdefault,
1041 )
1044 )
1042 coreconfigitem('subrepos', 'git:allowed',
1045 coreconfigitem('subrepos', 'git:allowed',
1043 default=dynamicdefault,
1046 default=dynamicdefault,
1044 )
1047 )
1045 coreconfigitem('subrepos', 'svn:allowed',
1048 coreconfigitem('subrepos', 'svn:allowed',
1046 default=dynamicdefault,
1049 default=dynamicdefault,
1047 )
1050 )
1048 coreconfigitem('templates', '.*',
1051 coreconfigitem('templates', '.*',
1049 default=None,
1052 default=None,
1050 generic=True,
1053 generic=True,
1051 )
1054 )
1052 coreconfigitem('trusted', 'groups',
1055 coreconfigitem('trusted', 'groups',
1053 default=list,
1056 default=list,
1054 )
1057 )
1055 coreconfigitem('trusted', 'users',
1058 coreconfigitem('trusted', 'users',
1056 default=list,
1059 default=list,
1057 )
1060 )
1058 coreconfigitem('ui', '_usedassubrepo',
1061 coreconfigitem('ui', '_usedassubrepo',
1059 default=False,
1062 default=False,
1060 )
1063 )
1061 coreconfigitem('ui', 'allowemptycommit',
1064 coreconfigitem('ui', 'allowemptycommit',
1062 default=False,
1065 default=False,
1063 )
1066 )
1064 coreconfigitem('ui', 'archivemeta',
1067 coreconfigitem('ui', 'archivemeta',
1065 default=True,
1068 default=True,
1066 )
1069 )
1067 coreconfigitem('ui', 'askusername',
1070 coreconfigitem('ui', 'askusername',
1068 default=False,
1071 default=False,
1069 )
1072 )
1070 coreconfigitem('ui', 'clonebundlefallback',
1073 coreconfigitem('ui', 'clonebundlefallback',
1071 default=False,
1074 default=False,
1072 )
1075 )
1073 coreconfigitem('ui', 'clonebundleprefers',
1076 coreconfigitem('ui', 'clonebundleprefers',
1074 default=list,
1077 default=list,
1075 )
1078 )
1076 coreconfigitem('ui', 'clonebundles',
1079 coreconfigitem('ui', 'clonebundles',
1077 default=True,
1080 default=True,
1078 )
1081 )
1079 coreconfigitem('ui', 'color',
1082 coreconfigitem('ui', 'color',
1080 default='auto',
1083 default='auto',
1081 )
1084 )
1082 coreconfigitem('ui', 'commitsubrepos',
1085 coreconfigitem('ui', 'commitsubrepos',
1083 default=False,
1086 default=False,
1084 )
1087 )
1085 coreconfigitem('ui', 'debug',
1088 coreconfigitem('ui', 'debug',
1086 default=False,
1089 default=False,
1087 )
1090 )
1088 coreconfigitem('ui', 'debugger',
1091 coreconfigitem('ui', 'debugger',
1089 default=None,
1092 default=None,
1090 )
1093 )
1091 coreconfigitem('ui', 'editor',
1094 coreconfigitem('ui', 'editor',
1092 default=dynamicdefault,
1095 default=dynamicdefault,
1093 )
1096 )
1094 coreconfigitem('ui', 'fallbackencoding',
1097 coreconfigitem('ui', 'fallbackencoding',
1095 default=None,
1098 default=None,
1096 )
1099 )
1097 coreconfigitem('ui', 'forcecwd',
1100 coreconfigitem('ui', 'forcecwd',
1098 default=None,
1101 default=None,
1099 )
1102 )
1100 coreconfigitem('ui', 'forcemerge',
1103 coreconfigitem('ui', 'forcemerge',
1101 default=None,
1104 default=None,
1102 )
1105 )
1103 coreconfigitem('ui', 'formatdebug',
1106 coreconfigitem('ui', 'formatdebug',
1104 default=False,
1107 default=False,
1105 )
1108 )
1106 coreconfigitem('ui', 'formatjson',
1109 coreconfigitem('ui', 'formatjson',
1107 default=False,
1110 default=False,
1108 )
1111 )
1109 coreconfigitem('ui', 'formatted',
1112 coreconfigitem('ui', 'formatted',
1110 default=None,
1113 default=None,
1111 )
1114 )
1112 coreconfigitem('ui', 'graphnodetemplate',
1115 coreconfigitem('ui', 'graphnodetemplate',
1113 default=None,
1116 default=None,
1114 )
1117 )
1115 coreconfigitem('ui', 'history-editing-backup',
1118 coreconfigitem('ui', 'history-editing-backup',
1116 default=True,
1119 default=True,
1117 )
1120 )
1118 coreconfigitem('ui', 'interactive',
1121 coreconfigitem('ui', 'interactive',
1119 default=None,
1122 default=None,
1120 )
1123 )
1121 coreconfigitem('ui', 'interface',
1124 coreconfigitem('ui', 'interface',
1122 default=None,
1125 default=None,
1123 )
1126 )
1124 coreconfigitem('ui', 'interface.chunkselector',
1127 coreconfigitem('ui', 'interface.chunkselector',
1125 default=None,
1128 default=None,
1126 )
1129 )
1127 coreconfigitem('ui', 'large-file-limit',
1130 coreconfigitem('ui', 'large-file-limit',
1128 default=10000000,
1131 default=10000000,
1129 )
1132 )
1130 coreconfigitem('ui', 'logblockedtimes',
1133 coreconfigitem('ui', 'logblockedtimes',
1131 default=False,
1134 default=False,
1132 )
1135 )
1133 coreconfigitem('ui', 'logtemplate',
1136 coreconfigitem('ui', 'logtemplate',
1134 default=None,
1137 default=None,
1135 )
1138 )
1136 coreconfigitem('ui', 'merge',
1139 coreconfigitem('ui', 'merge',
1137 default=None,
1140 default=None,
1138 )
1141 )
1139 coreconfigitem('ui', 'mergemarkers',
1142 coreconfigitem('ui', 'mergemarkers',
1140 default='basic',
1143 default='basic',
1141 )
1144 )
1142 coreconfigitem('ui', 'mergemarkertemplate',
1145 coreconfigitem('ui', 'mergemarkertemplate',
1143 default=('{node|short} '
1146 default=('{node|short} '
1144 '{ifeq(tags, "tip", "", '
1147 '{ifeq(tags, "tip", "", '
1145 'ifeq(tags, "", "", "{tags} "))}'
1148 'ifeq(tags, "", "", "{tags} "))}'
1146 '{if(bookmarks, "{bookmarks} ")}'
1149 '{if(bookmarks, "{bookmarks} ")}'
1147 '{ifeq(branch, "default", "", "{branch} ")}'
1150 '{ifeq(branch, "default", "", "{branch} ")}'
1148 '- {author|user}: {desc|firstline}')
1151 '- {author|user}: {desc|firstline}')
1149 )
1152 )
1150 coreconfigitem('ui', 'nontty',
1153 coreconfigitem('ui', 'nontty',
1151 default=False,
1154 default=False,
1152 )
1155 )
1153 coreconfigitem('ui', 'origbackuppath',
1156 coreconfigitem('ui', 'origbackuppath',
1154 default=None,
1157 default=None,
1155 )
1158 )
1156 coreconfigitem('ui', 'paginate',
1159 coreconfigitem('ui', 'paginate',
1157 default=True,
1160 default=True,
1158 )
1161 )
1159 coreconfigitem('ui', 'patch',
1162 coreconfigitem('ui', 'patch',
1160 default=None,
1163 default=None,
1161 )
1164 )
1162 coreconfigitem('ui', 'portablefilenames',
1165 coreconfigitem('ui', 'portablefilenames',
1163 default='warn',
1166 default='warn',
1164 )
1167 )
1165 coreconfigitem('ui', 'promptecho',
1168 coreconfigitem('ui', 'promptecho',
1166 default=False,
1169 default=False,
1167 )
1170 )
1168 coreconfigitem('ui', 'quiet',
1171 coreconfigitem('ui', 'quiet',
1169 default=False,
1172 default=False,
1170 )
1173 )
1171 coreconfigitem('ui', 'quietbookmarkmove',
1174 coreconfigitem('ui', 'quietbookmarkmove',
1172 default=False,
1175 default=False,
1173 )
1176 )
1174 coreconfigitem('ui', 'remotecmd',
1177 coreconfigitem('ui', 'remotecmd',
1175 default='hg',
1178 default='hg',
1176 )
1179 )
1177 coreconfigitem('ui', 'report_untrusted',
1180 coreconfigitem('ui', 'report_untrusted',
1178 default=True,
1181 default=True,
1179 )
1182 )
1180 coreconfigitem('ui', 'rollback',
1183 coreconfigitem('ui', 'rollback',
1181 default=True,
1184 default=True,
1182 )
1185 )
1183 coreconfigitem('ui', 'signal-safe-lock',
1186 coreconfigitem('ui', 'signal-safe-lock',
1184 default=True,
1187 default=True,
1185 )
1188 )
1186 coreconfigitem('ui', 'slash',
1189 coreconfigitem('ui', 'slash',
1187 default=False,
1190 default=False,
1188 )
1191 )
1189 coreconfigitem('ui', 'ssh',
1192 coreconfigitem('ui', 'ssh',
1190 default='ssh',
1193 default='ssh',
1191 )
1194 )
1192 coreconfigitem('ui', 'ssherrorhint',
1195 coreconfigitem('ui', 'ssherrorhint',
1193 default=None,
1196 default=None,
1194 )
1197 )
1195 coreconfigitem('ui', 'statuscopies',
1198 coreconfigitem('ui', 'statuscopies',
1196 default=False,
1199 default=False,
1197 )
1200 )
1198 coreconfigitem('ui', 'strict',
1201 coreconfigitem('ui', 'strict',
1199 default=False,
1202 default=False,
1200 )
1203 )
1201 coreconfigitem('ui', 'style',
1204 coreconfigitem('ui', 'style',
1202 default='',
1205 default='',
1203 )
1206 )
1204 coreconfigitem('ui', 'supportcontact',
1207 coreconfigitem('ui', 'supportcontact',
1205 default=None,
1208 default=None,
1206 )
1209 )
1207 coreconfigitem('ui', 'textwidth',
1210 coreconfigitem('ui', 'textwidth',
1208 default=78,
1211 default=78,
1209 )
1212 )
1210 coreconfigitem('ui', 'timeout',
1213 coreconfigitem('ui', 'timeout',
1211 default='600',
1214 default='600',
1212 )
1215 )
1213 coreconfigitem('ui', 'timeout.warn',
1216 coreconfigitem('ui', 'timeout.warn',
1214 default=0,
1217 default=0,
1215 )
1218 )
1216 coreconfigitem('ui', 'traceback',
1219 coreconfigitem('ui', 'traceback',
1217 default=False,
1220 default=False,
1218 )
1221 )
1219 coreconfigitem('ui', 'tweakdefaults',
1222 coreconfigitem('ui', 'tweakdefaults',
1220 default=False,
1223 default=False,
1221 )
1224 )
1222 coreconfigitem('ui', 'username',
1225 coreconfigitem('ui', 'username',
1223 alias=[('ui', 'user')]
1226 alias=[('ui', 'user')]
1224 )
1227 )
1225 coreconfigitem('ui', 'verbose',
1228 coreconfigitem('ui', 'verbose',
1226 default=False,
1229 default=False,
1227 )
1230 )
1228 coreconfigitem('verify', 'skipflags',
1231 coreconfigitem('verify', 'skipflags',
1229 default=None,
1232 default=None,
1230 )
1233 )
1231 coreconfigitem('web', 'allowbz2',
1234 coreconfigitem('web', 'allowbz2',
1232 default=False,
1235 default=False,
1233 )
1236 )
1234 coreconfigitem('web', 'allowgz',
1237 coreconfigitem('web', 'allowgz',
1235 default=False,
1238 default=False,
1236 )
1239 )
1237 coreconfigitem('web', 'allow-pull',
1240 coreconfigitem('web', 'allow-pull',
1238 alias=[('web', 'allowpull')],
1241 alias=[('web', 'allowpull')],
1239 default=True,
1242 default=True,
1240 )
1243 )
1241 coreconfigitem('web', 'allow-push',
1244 coreconfigitem('web', 'allow-push',
1242 alias=[('web', 'allow_push')],
1245 alias=[('web', 'allow_push')],
1243 default=list,
1246 default=list,
1244 )
1247 )
1245 coreconfigitem('web', 'allowzip',
1248 coreconfigitem('web', 'allowzip',
1246 default=False,
1249 default=False,
1247 )
1250 )
1248 coreconfigitem('web', 'archivesubrepos',
1251 coreconfigitem('web', 'archivesubrepos',
1249 default=False,
1252 default=False,
1250 )
1253 )
1251 coreconfigitem('web', 'cache',
1254 coreconfigitem('web', 'cache',
1252 default=True,
1255 default=True,
1253 )
1256 )
1254 coreconfigitem('web', 'contact',
1257 coreconfigitem('web', 'contact',
1255 default=None,
1258 default=None,
1256 )
1259 )
1257 coreconfigitem('web', 'deny_push',
1260 coreconfigitem('web', 'deny_push',
1258 default=list,
1261 default=list,
1259 )
1262 )
1260 coreconfigitem('web', 'guessmime',
1263 coreconfigitem('web', 'guessmime',
1261 default=False,
1264 default=False,
1262 )
1265 )
1263 coreconfigitem('web', 'hidden',
1266 coreconfigitem('web', 'hidden',
1264 default=False,
1267 default=False,
1265 )
1268 )
1266 coreconfigitem('web', 'labels',
1269 coreconfigitem('web', 'labels',
1267 default=list,
1270 default=list,
1268 )
1271 )
1269 coreconfigitem('web', 'logoimg',
1272 coreconfigitem('web', 'logoimg',
1270 default='hglogo.png',
1273 default='hglogo.png',
1271 )
1274 )
1272 coreconfigitem('web', 'logourl',
1275 coreconfigitem('web', 'logourl',
1273 default='https://mercurial-scm.org/',
1276 default='https://mercurial-scm.org/',
1274 )
1277 )
1275 coreconfigitem('web', 'accesslog',
1278 coreconfigitem('web', 'accesslog',
1276 default='-',
1279 default='-',
1277 )
1280 )
1278 coreconfigitem('web', 'address',
1281 coreconfigitem('web', 'address',
1279 default='',
1282 default='',
1280 )
1283 )
1281 coreconfigitem('web', 'allow-archive',
1284 coreconfigitem('web', 'allow-archive',
1282 alias=[('web', 'allow_archive')],
1285 alias=[('web', 'allow_archive')],
1283 default=list,
1286 default=list,
1284 )
1287 )
1285 coreconfigitem('web', 'allow_read',
1288 coreconfigitem('web', 'allow_read',
1286 default=list,
1289 default=list,
1287 )
1290 )
1288 coreconfigitem('web', 'baseurl',
1291 coreconfigitem('web', 'baseurl',
1289 default=None,
1292 default=None,
1290 )
1293 )
1291 coreconfigitem('web', 'cacerts',
1294 coreconfigitem('web', 'cacerts',
1292 default=None,
1295 default=None,
1293 )
1296 )
1294 coreconfigitem('web', 'certificate',
1297 coreconfigitem('web', 'certificate',
1295 default=None,
1298 default=None,
1296 )
1299 )
1297 coreconfigitem('web', 'collapse',
1300 coreconfigitem('web', 'collapse',
1298 default=False,
1301 default=False,
1299 )
1302 )
1300 coreconfigitem('web', 'csp',
1303 coreconfigitem('web', 'csp',
1301 default=None,
1304 default=None,
1302 )
1305 )
1303 coreconfigitem('web', 'deny_read',
1306 coreconfigitem('web', 'deny_read',
1304 default=list,
1307 default=list,
1305 )
1308 )
1306 coreconfigitem('web', 'descend',
1309 coreconfigitem('web', 'descend',
1307 default=True,
1310 default=True,
1308 )
1311 )
1309 coreconfigitem('web', 'description',
1312 coreconfigitem('web', 'description',
1310 default="",
1313 default="",
1311 )
1314 )
1312 coreconfigitem('web', 'encoding',
1315 coreconfigitem('web', 'encoding',
1313 default=lambda: encoding.encoding,
1316 default=lambda: encoding.encoding,
1314 )
1317 )
1315 coreconfigitem('web', 'errorlog',
1318 coreconfigitem('web', 'errorlog',
1316 default='-',
1319 default='-',
1317 )
1320 )
1318 coreconfigitem('web', 'ipv6',
1321 coreconfigitem('web', 'ipv6',
1319 default=False,
1322 default=False,
1320 )
1323 )
1321 coreconfigitem('web', 'maxchanges',
1324 coreconfigitem('web', 'maxchanges',
1322 default=10,
1325 default=10,
1323 )
1326 )
1324 coreconfigitem('web', 'maxfiles',
1327 coreconfigitem('web', 'maxfiles',
1325 default=10,
1328 default=10,
1326 )
1329 )
1327 coreconfigitem('web', 'maxshortchanges',
1330 coreconfigitem('web', 'maxshortchanges',
1328 default=60,
1331 default=60,
1329 )
1332 )
1330 coreconfigitem('web', 'motd',
1333 coreconfigitem('web', 'motd',
1331 default='',
1334 default='',
1332 )
1335 )
1333 coreconfigitem('web', 'name',
1336 coreconfigitem('web', 'name',
1334 default=dynamicdefault,
1337 default=dynamicdefault,
1335 )
1338 )
1336 coreconfigitem('web', 'port',
1339 coreconfigitem('web', 'port',
1337 default=8000,
1340 default=8000,
1338 )
1341 )
1339 coreconfigitem('web', 'prefix',
1342 coreconfigitem('web', 'prefix',
1340 default='',
1343 default='',
1341 )
1344 )
1342 coreconfigitem('web', 'push_ssl',
1345 coreconfigitem('web', 'push_ssl',
1343 default=True,
1346 default=True,
1344 )
1347 )
1345 coreconfigitem('web', 'refreshinterval',
1348 coreconfigitem('web', 'refreshinterval',
1346 default=20,
1349 default=20,
1347 )
1350 )
1348 coreconfigitem('web', 'server-header',
1351 coreconfigitem('web', 'server-header',
1349 default=None,
1352 default=None,
1350 )
1353 )
1351 coreconfigitem('web', 'static',
1354 coreconfigitem('web', 'static',
1352 default=None,
1355 default=None,
1353 )
1356 )
1354 coreconfigitem('web', 'staticurl',
1357 coreconfigitem('web', 'staticurl',
1355 default=None,
1358 default=None,
1356 )
1359 )
1357 coreconfigitem('web', 'stripes',
1360 coreconfigitem('web', 'stripes',
1358 default=1,
1361 default=1,
1359 )
1362 )
1360 coreconfigitem('web', 'style',
1363 coreconfigitem('web', 'style',
1361 default='paper',
1364 default='paper',
1362 )
1365 )
1363 coreconfigitem('web', 'templates',
1366 coreconfigitem('web', 'templates',
1364 default=None,
1367 default=None,
1365 )
1368 )
1366 coreconfigitem('web', 'view',
1369 coreconfigitem('web', 'view',
1367 default='served',
1370 default='served',
1368 )
1371 )
1369 coreconfigitem('worker', 'backgroundclose',
1372 coreconfigitem('worker', 'backgroundclose',
1370 default=dynamicdefault,
1373 default=dynamicdefault,
1371 )
1374 )
1372 # Windows defaults to a limit of 512 open files. A buffer of 128
1375 # Windows defaults to a limit of 512 open files. A buffer of 128
1373 # should give us enough headway.
1376 # should give us enough headway.
1374 coreconfigitem('worker', 'backgroundclosemaxqueue',
1377 coreconfigitem('worker', 'backgroundclosemaxqueue',
1375 default=384,
1378 default=384,
1376 )
1379 )
1377 coreconfigitem('worker', 'backgroundcloseminfilecount',
1380 coreconfigitem('worker', 'backgroundcloseminfilecount',
1378 default=2048,
1381 default=2048,
1379 )
1382 )
1380 coreconfigitem('worker', 'backgroundclosethreadcount',
1383 coreconfigitem('worker', 'backgroundclosethreadcount',
1381 default=4,
1384 default=4,
1382 )
1385 )
1383 coreconfigitem('worker', 'enabled',
1386 coreconfigitem('worker', 'enabled',
1384 default=True,
1387 default=True,
1385 )
1388 )
1386 coreconfigitem('worker', 'numcpus',
1389 coreconfigitem('worker', 'numcpus',
1387 default=None,
1390 default=None,
1388 )
1391 )
1389
1392
1390 # Rebase related configuration moved to core because other extension are doing
1393 # Rebase related configuration moved to core because other extension are doing
1391 # strange things. For example, shelve import the extensions to reuse some bit
1394 # strange things. For example, shelve import the extensions to reuse some bit
1392 # without formally loading it.
1395 # without formally loading it.
1393 coreconfigitem('commands', 'rebase.requiredest',
1396 coreconfigitem('commands', 'rebase.requiredest',
1394 default=False,
1397 default=False,
1395 )
1398 )
1396 coreconfigitem('experimental', 'rebaseskipobsolete',
1399 coreconfigitem('experimental', 'rebaseskipobsolete',
1397 default=True,
1400 default=True,
1398 )
1401 )
1399 coreconfigitem('rebase', 'singletransaction',
1402 coreconfigitem('rebase', 'singletransaction',
1400 default=False,
1403 default=False,
1401 )
1404 )
1402 coreconfigitem('rebase', 'experimental.inmemory',
1405 coreconfigitem('rebase', 'experimental.inmemory',
1403 default=False,
1406 default=False,
1404 )
1407 )
@@ -1,3002 +1,3025
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 def __set__(self, repo, value):
95 def __set__(self, repo, value):
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 def __delete__(self, repo):
97 def __delete__(self, repo):
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99
99
100 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
101 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
102 def __init__(self, *paths):
102 def __init__(self, *paths):
103 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
104 for path in paths:
104 for path in paths:
105 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
106
106
107 def join(self, obj, fname):
107 def join(self, obj, fname):
108 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
109
109
110 class storecache(_basefilecache):
110 class storecache(_basefilecache):
111 """filecache for files in the store"""
111 """filecache for files in the store"""
112 def __init__(self, *paths):
112 def __init__(self, *paths):
113 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
114 for path in paths:
114 for path in paths:
115 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
116
116
117 def join(self, obj, fname):
117 def join(self, obj, fname):
118 return obj.sjoin(fname)
118 return obj.sjoin(fname)
119
119
120 def isfilecached(repo, name):
120 def isfilecached(repo, name):
121 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
122
122
123 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
124 """
124 """
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 if not cacheentry:
126 if not cacheentry:
127 return None, False
127 return None, False
128 return cacheentry.obj, True
128 return cacheentry.obj, True
129
129
130 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
131 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
132
132
133 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
134 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
135 if unfi is repo:
135 if unfi is repo:
136 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
137 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
138
138
139 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
140 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
141
141
142 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
143 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
144
144
145
145
146 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
147 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
149
149
150 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
151 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
152 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
153 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
154 return wrapper
154 return wrapper
155
155
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 'unbundle'}
157 'unbundle'}
158 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
159
159
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 class localcommandexecutor(object):
161 class localcommandexecutor(object):
162 def __init__(self, peer):
162 def __init__(self, peer):
163 self._peer = peer
163 self._peer = peer
164 self._sent = False
164 self._sent = False
165 self._closed = False
165 self._closed = False
166
166
167 def __enter__(self):
167 def __enter__(self):
168 return self
168 return self
169
169
170 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
171 self.close()
171 self.close()
172
172
173 def callcommand(self, command, args):
173 def callcommand(self, command, args):
174 if self._sent:
174 if self._sent:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'sendcommands()')
176 'sendcommands()')
177
177
178 if self._closed:
178 if self._closed:
179 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
180 'close()')
180 'close()')
181
181
182 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
183 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
184 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
185
185
186 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
187
187
188 try:
188 try:
189 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
190 except Exception:
190 except Exception:
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 else:
192 else:
193 f.set_result(result)
193 f.set_result(result)
194
194
195 return f
195 return f
196
196
197 def sendcommands(self):
197 def sendcommands(self):
198 self._sent = True
198 self._sent = True
199
199
200 def close(self):
200 def close(self):
201 self._closed = True
201 self._closed = True
202
202
203 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
204 class localpeer(repository.peer):
204 class localpeer(repository.peer):
205 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
206
206
207 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
208 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
209
209
210 if caps is None:
210 if caps is None:
211 caps = moderncaps.copy()
211 caps = moderncaps.copy()
212 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
213 self.ui = repo.ui
213 self.ui = repo.ui
214 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
215
215
216 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
217
217
218 def url(self):
218 def url(self):
219 return self._repo.url()
219 return self._repo.url()
220
220
221 def local(self):
221 def local(self):
222 return self._repo
222 return self._repo
223
223
224 def peer(self):
224 def peer(self):
225 return self
225 return self
226
226
227 def canpush(self):
227 def canpush(self):
228 return True
228 return True
229
229
230 def close(self):
230 def close(self):
231 self._repo.close()
231 self._repo.close()
232
232
233 # End of _basepeer interface.
233 # End of _basepeer interface.
234
234
235 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
236
236
237 def branchmap(self):
237 def branchmap(self):
238 return self._repo.branchmap()
238 return self._repo.branchmap()
239
239
240 def capabilities(self):
240 def capabilities(self):
241 return self._caps
241 return self._caps
242
242
243 def clonebundles(self):
243 def clonebundles(self):
244 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
245
245
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 pycompat.bytestr(four),
249 pycompat.bytestr(four),
250 pycompat.bytestr(five))
250 pycompat.bytestr(five))
251
251
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 **kwargs):
253 **kwargs):
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
256 **kwargs)[1]
256 **kwargs)[1]
257 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
258
258
259 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
260 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
261 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
262 # from it in local peer.
262 # from it in local peer.
263 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
264 else:
264 else:
265 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
266
266
267 def heads(self):
267 def heads(self):
268 return self._repo.heads()
268 return self._repo.heads()
269
269
270 def known(self, nodes):
270 def known(self, nodes):
271 return self._repo.known(nodes)
271 return self._repo.known(nodes)
272
272
273 def listkeys(self, namespace):
273 def listkeys(self, namespace):
274 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
275
275
276 def lookup(self, key):
276 def lookup(self, key):
277 return self._repo.lookup(key)
277 return self._repo.lookup(key)
278
278
279 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
280 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
281
281
282 def stream_out(self):
282 def stream_out(self):
283 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
284 'peer'))
284 'peer'))
285
285
286 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
287 """apply a bundle on a repo
287 """apply a bundle on a repo
288
288
289 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
290 try:
290 try:
291 try:
291 try:
292 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
295 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
296 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
297 # API is finally improved.
297 # API is finally improved.
298 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
299 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
300 return ret
300 return ret
301 except Exception as exc:
301 except Exception as exc:
302 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
303 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
304 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
305 # it directly.
305 # it directly.
306 #
306 #
307 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
308 # issue4594
308 # issue4594
309 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 if output:
310 if output:
311 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
312 for out in output:
312 for out in output:
313 bundler.addpart(out)
313 bundler.addpart(out)
314 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
315 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
316 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
317 raise
317 raise
318 except error.PushRaced as exc:
318 except error.PushRaced as exc:
319 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
320 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
321
321
322 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
323
323
324 # Begin of peer interface.
324 # Begin of peer interface.
325
325
326 def commandexecutor(self):
326 def commandexecutor(self):
327 return localcommandexecutor(self)
327 return localcommandexecutor(self)
328
328
329 # End of peer interface.
329 # End of peer interface.
330
330
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
333 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
334 restricted capabilities'''
334 restricted capabilities'''
335
335
336 def __init__(self, repo):
336 def __init__(self, repo):
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338
338
339 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
340
340
341 def between(self, pairs):
341 def between(self, pairs):
342 return self._repo.between(pairs)
342 return self._repo.between(pairs)
343
343
344 def branches(self, nodes):
344 def branches(self, nodes):
345 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
346
346
347 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351
351
352 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 missingheads=heads)
354 missingheads=heads)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
358
358
359 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 # clients.
360 # clients.
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362
362
363 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
364 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
365 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
366 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
367 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
368 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
369 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371
371
372 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
375 #
375 #
376 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
379 featuresetupfuncs = set()
380
380
381 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
382 """Create a local repository object.
382 """Create a local repository object.
383
383
384 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
385 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
388 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
389
389
390 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
391 interface.
391 interface.
392
392
393 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
394 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
395 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
396
396
397 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
398 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
399 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
400 repository.
400 repository.
401
401
402 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
403 as part of deriving a type.
403 as part of deriving a type.
404
404
405 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
406 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
407 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
408 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
409 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 not.
410 not.
411 """
411 """
412 ui = baseui.copy()
412 ui = baseui.copy()
413 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
414 ui.copy = baseui.copy
414 ui.copy = baseui.copy
415
415
416 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418
418
419 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
420 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422
422
423 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
424 # cases are errors.
424 # cases are errors.
425 if not hgvfs.isdir():
425 if not hgvfs.isdir():
426 try:
426 try:
427 hgvfs.stat()
427 hgvfs.stat()
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
430 raise
430 raise
431
431
432 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
433
433
434 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
435 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
436 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
437 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
438 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
439 try:
439 try:
440 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
441 except IOError as e:
441 except IOError as e:
442 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
443 raise
443 raise
444 requirements = set()
444 requirements = set()
445
445
446 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
447 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
448 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
449 try:
449 try:
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
452 # automatically enabled.
452 # automatically enabled.
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 except IOError:
454 except IOError:
455 pass
455 pass
456 else:
456 else:
457 extensions.loadall(ui)
457 extensions.loadall(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511
511
512 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
514 # of them.
514 # of them.
515 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
518
518
519 storevfs = store.vfs
519 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
521
522 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
525
525
526 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
530
530
531 bases = []
531 bases = []
532 extrastate = {}
532 extrastate = {}
533
533
534 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
536 # flexibility.
537 typ = fn()(ui=ui,
537 typ = fn()(ui=ui,
538 intents=intents,
538 intents=intents,
539 requirements=requirements,
539 requirements=requirements,
540 features=features,
540 features=features,
541 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
542 hgvfs=hgvfs,
543 store=store,
543 store=store,
544 storevfs=storevfs,
544 storevfs=storevfs,
545 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
546 cachevfs=cachevfs,
547 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
548 extrastate=extrastate,
549 baseclasses=bases)
549 baseclasses=bases)
550
550
551 if not isinstance(typ, type):
551 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
553 iface)
554
554
555 bases.append(typ)
555 bases.append(typ)
556
556
557 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
559 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
561 wdirvfs.base,
562 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
563
563
564 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
565
565
566 return cls(
566 return cls(
567 baseui=baseui,
567 baseui=baseui,
568 ui=ui,
568 ui=ui,
569 origroot=path,
569 origroot=path,
570 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
571 hgvfs=hgvfs,
572 requirements=requirements,
572 requirements=requirements,
573 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
574 sharedpath=storebasepath,
575 store=store,
575 store=store,
576 cachevfs=cachevfs,
576 cachevfs=cachevfs,
577 features=features,
577 features=features,
578 intents=intents)
578 intents=intents)
579
579
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
582
582
583 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585
585
586 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
587 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
588 """
588 """
589
589
590 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
591 # requirement is present.
591 # requirement is present.
592 autoextensions = {
592 autoextensions = {
593 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
594 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
595 }
595 }
596
596
597 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
598 if requirement not in requirements:
598 if requirement not in requirements:
599 continue
599 continue
600
600
601 for name in names:
601 for name in names:
602 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
603 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
604
604
605 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
606 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
607 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
608 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
609
609
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 # relevant to this ui instance.
611 # relevant to this ui instance.
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613
613
614 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
615 if fn.__module__ in modules:
615 if fn.__module__ in modules:
616 fn(ui, supported)
616 fn(ui, supported)
617
617
618 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
619 for name in util.compengines:
619 for name in util.compengines:
620 engine = util.compengines[name]
620 engine = util.compengines[name]
621 if engine.revlogheader():
621 if engine.revlogheader():
622 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
623
623
624 return supported
624 return supported
625
625
626 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
627 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
628
628
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
631 recognize.
631 recognize.
632
632
633 Returns a set of supported requirements.
633 Returns a set of supported requirements.
634 """
634 """
635 missing = set()
635 missing = set()
636
636
637 for requirement in requirements:
637 for requirement in requirements:
638 if requirement in supported:
638 if requirement in supported:
639 continue
639 continue
640
640
641 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643
643
644 missing.add(requirement)
644 missing.add(requirement)
645
645
646 if missing:
646 if missing:
647 raise error.RequirementError(
647 raise error.RequirementError(
648 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
649 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 b'for more information'))
651 b'for more information'))
652
652
653 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
654 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
655
655
656 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
657 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
658 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
659 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
660
660
661 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
662 checking.
662 checking.
663
663
664 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
665 """
665 """
666 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
667 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
668 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
669 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
670
670
671 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
672 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
673 if b'store' in requirements:
673 if b'store' in requirements:
674 if b'fncache' in requirements:
674 if b'fncache' in requirements:
675 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
676 b'dotencode' in requirements)
676 b'dotencode' in requirements)
677
677
678 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
679
679
680 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
681
681
682 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
683 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
684
684
685 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
686 """
686 """
687 options = {}
687 options = {}
688
688
689 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
690 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
691
691
692 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 if manifestcachesize is not None:
694 if manifestcachesize is not None:
695 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
696
696
697 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
698 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
699 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
700 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
701 # meaningful on such old repos.
701 # meaningful on such old repos.
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704
704
705 return options
705 return options
706
706
707 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
709
709
710 options = {}
710 options = {}
711
711
712 if b'revlogv1' in requirements:
712 if b'revlogv1' in requirements:
713 options[b'revlogv1'] = True
713 options[b'revlogv1'] = True
714 if REVLOGV2_REQUIREMENT in requirements:
714 if REVLOGV2_REQUIREMENT in requirements:
715 options[b'revlogv2'] = True
715 options[b'revlogv2'] = True
716
716
717 if b'generaldelta' in requirements:
717 if b'generaldelta' in requirements:
718 options[b'generaldelta'] = True
718 options[b'generaldelta'] = True
719
719
720 # experimental config: format.chunkcachesize
720 # experimental config: format.chunkcachesize
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 if chunkcachesize is not None:
722 if chunkcachesize is not None:
723 options[b'chunkcachesize'] = chunkcachesize
723 options[b'chunkcachesize'] = chunkcachesize
724
724
725 deltabothparents = ui.configbool(b'storage',
725 deltabothparents = ui.configbool(b'storage',
726 b'revlog.optimize-delta-parent-choice')
726 b'revlog.optimize-delta-parent-choice')
727 options[b'deltabothparents'] = deltabothparents
727 options[b'deltabothparents'] = deltabothparents
728
728
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730
730
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 if 0 <= chainspan:
732 if 0 <= chainspan:
733 options[b'maxdeltachainspan'] = chainspan
733 options[b'maxdeltachainspan'] = chainspan
734
734
735 mmapindexthreshold = ui.configbytes(b'experimental',
735 mmapindexthreshold = ui.configbytes(b'experimental',
736 b'mmapindexthreshold')
736 b'mmapindexthreshold')
737 if mmapindexthreshold is not None:
737 if mmapindexthreshold is not None:
738 options[b'mmapindexthreshold'] = mmapindexthreshold
738 options[b'mmapindexthreshold'] = mmapindexthreshold
739
739
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 srdensitythres = float(ui.config(b'experimental',
741 srdensitythres = float(ui.config(b'experimental',
742 b'sparse-read.density-threshold'))
742 b'sparse-read.density-threshold'))
743 srmingapsize = ui.configbytes(b'experimental',
743 srmingapsize = ui.configbytes(b'experimental',
744 b'sparse-read.min-gap-size')
744 b'sparse-read.min-gap-size')
745 options[b'with-sparse-read'] = withsparseread
745 options[b'with-sparse-read'] = withsparseread
746 options[b'sparse-read-density-threshold'] = srdensitythres
746 options[b'sparse-read-density-threshold'] = srdensitythres
747 options[b'sparse-read-min-gap-size'] = srmingapsize
747 options[b'sparse-read-min-gap-size'] = srmingapsize
748
748
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 options[b'sparse-revlog'] = sparserevlog
750 options[b'sparse-revlog'] = sparserevlog
751 if sparserevlog:
751 if sparserevlog:
752 options[b'generaldelta'] = True
752 options[b'generaldelta'] = True
753
753
754 maxchainlen = None
754 maxchainlen = None
755 if sparserevlog:
755 if sparserevlog:
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 # experimental config: format.maxchainlen
757 # experimental config: format.maxchainlen
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 if maxchainlen is not None:
759 if maxchainlen is not None:
760 options[b'maxchainlen'] = maxchainlen
760 options[b'maxchainlen'] = maxchainlen
761
761
762 for r in requirements:
762 for r in requirements:
763 if r.startswith(b'exp-compression-'):
763 if r.startswith(b'exp-compression-'):
764 options[b'compengine'] = r[len(b'exp-compression-'):]
764 options[b'compengine'] = r[len(b'exp-compression-'):]
765
765
766 if repository.NARROW_REQUIREMENT in requirements:
766 if repository.NARROW_REQUIREMENT in requirements:
767 options[b'enableellipsis'] = True
767 options[b'enableellipsis'] = True
768
768
769 return options
769 return options
770
770
771 def makemain(**kwargs):
771 def makemain(**kwargs):
772 """Produce a type conforming to ``ilocalrepositorymain``."""
772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 return localrepository
773 return localrepository
774
774
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 class revlogfilestorage(object):
776 class revlogfilestorage(object):
777 """File storage when using revlogs."""
777 """File storage when using revlogs."""
778
778
779 def file(self, path):
779 def file(self, path):
780 if path[0] == b'/':
780 if path[0] == b'/':
781 path = path[1:]
781 path = path[1:]
782
782
783 return filelog.filelog(self.svfs, path)
783 return filelog.filelog(self.svfs, path)
784
784
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 class revlognarrowfilestorage(object):
786 class revlognarrowfilestorage(object):
787 """File storage when using revlogs and narrow files."""
787 """File storage when using revlogs and narrow files."""
788
788
789 def file(self, path):
789 def file(self, path):
790 if path[0] == b'/':
790 if path[0] == b'/':
791 path = path[1:]
791 path = path[1:]
792
792
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794
794
795 def makefilestorage(requirements, features, **kwargs):
795 def makefilestorage(requirements, features, **kwargs):
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798
798
799 if repository.NARROW_REQUIREMENT in requirements:
799 if repository.NARROW_REQUIREMENT in requirements:
800 return revlognarrowfilestorage
800 return revlognarrowfilestorage
801 else:
801 else:
802 return revlogfilestorage
802 return revlogfilestorage
803
803
804 # List of repository interfaces and factory functions for them. Each
804 # List of repository interfaces and factory functions for them. Each
805 # will be called in order during ``makelocalrepository()`` to iteratively
805 # will be called in order during ``makelocalrepository()`` to iteratively
806 # derive the final type for a local repository instance. We capture the
806 # derive the final type for a local repository instance. We capture the
807 # function as a lambda so we don't hold a reference and the module-level
807 # function as a lambda so we don't hold a reference and the module-level
808 # functions can be wrapped.
808 # functions can be wrapped.
809 REPO_INTERFACES = [
809 REPO_INTERFACES = [
810 (repository.ilocalrepositorymain, lambda: makemain),
810 (repository.ilocalrepositorymain, lambda: makemain),
811 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
811 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
812 ]
812 ]
813
813
814 @interfaceutil.implementer(repository.ilocalrepositorymain)
814 @interfaceutil.implementer(repository.ilocalrepositorymain)
815 class localrepository(object):
815 class localrepository(object):
816 """Main class for representing local repositories.
816 """Main class for representing local repositories.
817
817
818 All local repositories are instances of this class.
818 All local repositories are instances of this class.
819
819
820 Constructed on its own, instances of this class are not usable as
820 Constructed on its own, instances of this class are not usable as
821 repository objects. To obtain a usable repository object, call
821 repository objects. To obtain a usable repository object, call
822 ``hg.repository()``, ``localrepo.instance()``, or
822 ``hg.repository()``, ``localrepo.instance()``, or
823 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
823 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
824 ``instance()`` adds support for creating new repositories.
824 ``instance()`` adds support for creating new repositories.
825 ``hg.repository()`` adds more extension integration, including calling
825 ``hg.repository()`` adds more extension integration, including calling
826 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
826 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
827 used.
827 used.
828 """
828 """
829
829
830 # obsolete experimental requirements:
830 # obsolete experimental requirements:
831 # - manifestv2: An experimental new manifest format that allowed
831 # - manifestv2: An experimental new manifest format that allowed
832 # for stem compression of long paths. Experiment ended up not
832 # for stem compression of long paths. Experiment ended up not
833 # being successful (repository sizes went up due to worse delta
833 # being successful (repository sizes went up due to worse delta
834 # chains), and the code was deleted in 4.6.
834 # chains), and the code was deleted in 4.6.
835 supportedformats = {
835 supportedformats = {
836 'revlogv1',
836 'revlogv1',
837 'generaldelta',
837 'generaldelta',
838 'treemanifest',
838 'treemanifest',
839 REVLOGV2_REQUIREMENT,
839 REVLOGV2_REQUIREMENT,
840 SPARSEREVLOG_REQUIREMENT,
840 SPARSEREVLOG_REQUIREMENT,
841 }
841 }
842 _basesupported = supportedformats | {
842 _basesupported = supportedformats | {
843 'store',
843 'store',
844 'fncache',
844 'fncache',
845 'shared',
845 'shared',
846 'relshared',
846 'relshared',
847 'dotencode',
847 'dotencode',
848 'exp-sparse',
848 'exp-sparse',
849 'internal-phase'
849 'internal-phase'
850 }
850 }
851
851
852 # list of prefix for file which can be written without 'wlock'
852 # list of prefix for file which can be written without 'wlock'
853 # Extensions should extend this list when needed
853 # Extensions should extend this list when needed
854 _wlockfreeprefix = {
854 _wlockfreeprefix = {
855 # We migh consider requiring 'wlock' for the next
855 # We migh consider requiring 'wlock' for the next
856 # two, but pretty much all the existing code assume
856 # two, but pretty much all the existing code assume
857 # wlock is not needed so we keep them excluded for
857 # wlock is not needed so we keep them excluded for
858 # now.
858 # now.
859 'hgrc',
859 'hgrc',
860 'requires',
860 'requires',
861 # XXX cache is a complicatged business someone
861 # XXX cache is a complicatged business someone
862 # should investigate this in depth at some point
862 # should investigate this in depth at some point
863 'cache/',
863 'cache/',
864 # XXX shouldn't be dirstate covered by the wlock?
864 # XXX shouldn't be dirstate covered by the wlock?
865 'dirstate',
865 'dirstate',
866 # XXX bisect was still a bit too messy at the time
866 # XXX bisect was still a bit too messy at the time
867 # this changeset was introduced. Someone should fix
867 # this changeset was introduced. Someone should fix
868 # the remainig bit and drop this line
868 # the remainig bit and drop this line
869 'bisect.state',
869 'bisect.state',
870 }
870 }
871
871
872 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
872 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
873 supportedrequirements, sharedpath, store, cachevfs,
873 supportedrequirements, sharedpath, store, cachevfs,
874 features, intents=None):
874 features, intents=None):
875 """Create a new local repository instance.
875 """Create a new local repository instance.
876
876
877 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
877 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
878 or ``localrepo.makelocalrepository()`` for obtaining a new repository
878 or ``localrepo.makelocalrepository()`` for obtaining a new repository
879 object.
879 object.
880
880
881 Arguments:
881 Arguments:
882
882
883 baseui
883 baseui
884 ``ui.ui`` instance that ``ui`` argument was based off of.
884 ``ui.ui`` instance that ``ui`` argument was based off of.
885
885
886 ui
886 ui
887 ``ui.ui`` instance for use by the repository.
887 ``ui.ui`` instance for use by the repository.
888
888
889 origroot
889 origroot
890 ``bytes`` path to working directory root of this repository.
890 ``bytes`` path to working directory root of this repository.
891
891
892 wdirvfs
892 wdirvfs
893 ``vfs.vfs`` rooted at the working directory.
893 ``vfs.vfs`` rooted at the working directory.
894
894
895 hgvfs
895 hgvfs
896 ``vfs.vfs`` rooted at .hg/
896 ``vfs.vfs`` rooted at .hg/
897
897
898 requirements
898 requirements
899 ``set`` of bytestrings representing repository opening requirements.
899 ``set`` of bytestrings representing repository opening requirements.
900
900
901 supportedrequirements
901 supportedrequirements
902 ``set`` of bytestrings representing repository requirements that we
902 ``set`` of bytestrings representing repository requirements that we
903 know how to open. May be a supetset of ``requirements``.
903 know how to open. May be a supetset of ``requirements``.
904
904
905 sharedpath
905 sharedpath
906 ``bytes`` Defining path to storage base directory. Points to a
906 ``bytes`` Defining path to storage base directory. Points to a
907 ``.hg/`` directory somewhere.
907 ``.hg/`` directory somewhere.
908
908
909 store
909 store
910 ``store.basicstore`` (or derived) instance providing access to
910 ``store.basicstore`` (or derived) instance providing access to
911 versioned storage.
911 versioned storage.
912
912
913 cachevfs
913 cachevfs
914 ``vfs.vfs`` used for cache files.
914 ``vfs.vfs`` used for cache files.
915
915
916 features
916 features
917 ``set`` of bytestrings defining features/capabilities of this
917 ``set`` of bytestrings defining features/capabilities of this
918 instance.
918 instance.
919
919
920 intents
920 intents
921 ``set`` of system strings indicating what this repo will be used
921 ``set`` of system strings indicating what this repo will be used
922 for.
922 for.
923 """
923 """
924 self.baseui = baseui
924 self.baseui = baseui
925 self.ui = ui
925 self.ui = ui
926 self.origroot = origroot
926 self.origroot = origroot
927 # vfs rooted at working directory.
927 # vfs rooted at working directory.
928 self.wvfs = wdirvfs
928 self.wvfs = wdirvfs
929 self.root = wdirvfs.base
929 self.root = wdirvfs.base
930 # vfs rooted at .hg/. Used to access most non-store paths.
930 # vfs rooted at .hg/. Used to access most non-store paths.
931 self.vfs = hgvfs
931 self.vfs = hgvfs
932 self.path = hgvfs.base
932 self.path = hgvfs.base
933 self.requirements = requirements
933 self.requirements = requirements
934 self.supported = supportedrequirements
934 self.supported = supportedrequirements
935 self.sharedpath = sharedpath
935 self.sharedpath = sharedpath
936 self.store = store
936 self.store = store
937 self.cachevfs = cachevfs
937 self.cachevfs = cachevfs
938 self.features = features
938 self.features = features
939
939
940 self.filtername = None
940 self.filtername = None
941
941
942 if (self.ui.configbool('devel', 'all-warnings') or
942 if (self.ui.configbool('devel', 'all-warnings') or
943 self.ui.configbool('devel', 'check-locks')):
943 self.ui.configbool('devel', 'check-locks')):
944 self.vfs.audit = self._getvfsward(self.vfs.audit)
944 self.vfs.audit = self._getvfsward(self.vfs.audit)
945 # A list of callback to shape the phase if no data were found.
945 # A list of callback to shape the phase if no data were found.
946 # Callback are in the form: func(repo, roots) --> processed root.
946 # Callback are in the form: func(repo, roots) --> processed root.
947 # This list it to be filled by extension during repo setup
947 # This list it to be filled by extension during repo setup
948 self._phasedefaults = []
948 self._phasedefaults = []
949
949
950 color.setup(self.ui)
950 color.setup(self.ui)
951
951
952 self.spath = self.store.path
952 self.spath = self.store.path
953 self.svfs = self.store.vfs
953 self.svfs = self.store.vfs
954 self.sjoin = self.store.join
954 self.sjoin = self.store.join
955 if (self.ui.configbool('devel', 'all-warnings') or
955 if (self.ui.configbool('devel', 'all-warnings') or
956 self.ui.configbool('devel', 'check-locks')):
956 self.ui.configbool('devel', 'check-locks')):
957 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
957 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
958 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
958 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
959 else: # standard vfs
959 else: # standard vfs
960 self.svfs.audit = self._getsvfsward(self.svfs.audit)
960 self.svfs.audit = self._getsvfsward(self.svfs.audit)
961
961
962 self._dirstatevalidatewarned = False
962 self._dirstatevalidatewarned = False
963
963
964 self._branchcaches = {}
964 self._branchcaches = {}
965 self._revbranchcache = None
965 self._revbranchcache = None
966 self._filterpats = {}
966 self._filterpats = {}
967 self._datafilters = {}
967 self._datafilters = {}
968 self._transref = self._lockref = self._wlockref = None
968 self._transref = self._lockref = self._wlockref = None
969
969
970 # A cache for various files under .hg/ that tracks file changes,
970 # A cache for various files under .hg/ that tracks file changes,
971 # (used by the filecache decorator)
971 # (used by the filecache decorator)
972 #
972 #
973 # Maps a property name to its util.filecacheentry
973 # Maps a property name to its util.filecacheentry
974 self._filecache = {}
974 self._filecache = {}
975
975
976 # hold sets of revision to be filtered
976 # hold sets of revision to be filtered
977 # should be cleared when something might have changed the filter value:
977 # should be cleared when something might have changed the filter value:
978 # - new changesets,
978 # - new changesets,
979 # - phase change,
979 # - phase change,
980 # - new obsolescence marker,
980 # - new obsolescence marker,
981 # - working directory parent change,
981 # - working directory parent change,
982 # - bookmark changes
982 # - bookmark changes
983 self.filteredrevcache = {}
983 self.filteredrevcache = {}
984
984
985 # post-dirstate-status hooks
985 # post-dirstate-status hooks
986 self._postdsstatus = []
986 self._postdsstatus = []
987
987
988 # generic mapping between names and nodes
988 # generic mapping between names and nodes
989 self.names = namespaces.namespaces()
989 self.names = namespaces.namespaces()
990
990
991 # Key to signature value.
991 # Key to signature value.
992 self._sparsesignaturecache = {}
992 self._sparsesignaturecache = {}
993 # Signature to cached matcher instance.
993 # Signature to cached matcher instance.
994 self._sparsematchercache = {}
994 self._sparsematchercache = {}
995
995
996 def _getvfsward(self, origfunc):
996 def _getvfsward(self, origfunc):
997 """build a ward for self.vfs"""
997 """build a ward for self.vfs"""
998 rref = weakref.ref(self)
998 rref = weakref.ref(self)
999 def checkvfs(path, mode=None):
999 def checkvfs(path, mode=None):
1000 ret = origfunc(path, mode=mode)
1000 ret = origfunc(path, mode=mode)
1001 repo = rref()
1001 repo = rref()
1002 if (repo is None
1002 if (repo is None
1003 or not util.safehasattr(repo, '_wlockref')
1003 or not util.safehasattr(repo, '_wlockref')
1004 or not util.safehasattr(repo, '_lockref')):
1004 or not util.safehasattr(repo, '_lockref')):
1005 return
1005 return
1006 if mode in (None, 'r', 'rb'):
1006 if mode in (None, 'r', 'rb'):
1007 return
1007 return
1008 if path.startswith(repo.path):
1008 if path.startswith(repo.path):
1009 # truncate name relative to the repository (.hg)
1009 # truncate name relative to the repository (.hg)
1010 path = path[len(repo.path) + 1:]
1010 path = path[len(repo.path) + 1:]
1011 if path.startswith('cache/'):
1011 if path.startswith('cache/'):
1012 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1012 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1013 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1013 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1014 if path.startswith('journal.'):
1014 if path.startswith('journal.'):
1015 # journal is covered by 'lock'
1015 # journal is covered by 'lock'
1016 if repo._currentlock(repo._lockref) is None:
1016 if repo._currentlock(repo._lockref) is None:
1017 repo.ui.develwarn('write with no lock: "%s"' % path,
1017 repo.ui.develwarn('write with no lock: "%s"' % path,
1018 stacklevel=2, config='check-locks')
1018 stacklevel=2, config='check-locks')
1019 elif repo._currentlock(repo._wlockref) is None:
1019 elif repo._currentlock(repo._wlockref) is None:
1020 # rest of vfs files are covered by 'wlock'
1020 # rest of vfs files are covered by 'wlock'
1021 #
1021 #
1022 # exclude special files
1022 # exclude special files
1023 for prefix in self._wlockfreeprefix:
1023 for prefix in self._wlockfreeprefix:
1024 if path.startswith(prefix):
1024 if path.startswith(prefix):
1025 return
1025 return
1026 repo.ui.develwarn('write with no wlock: "%s"' % path,
1026 repo.ui.develwarn('write with no wlock: "%s"' % path,
1027 stacklevel=2, config='check-locks')
1027 stacklevel=2, config='check-locks')
1028 return ret
1028 return ret
1029 return checkvfs
1029 return checkvfs
1030
1030
1031 def _getsvfsward(self, origfunc):
1031 def _getsvfsward(self, origfunc):
1032 """build a ward for self.svfs"""
1032 """build a ward for self.svfs"""
1033 rref = weakref.ref(self)
1033 rref = weakref.ref(self)
1034 def checksvfs(path, mode=None):
1034 def checksvfs(path, mode=None):
1035 ret = origfunc(path, mode=mode)
1035 ret = origfunc(path, mode=mode)
1036 repo = rref()
1036 repo = rref()
1037 if repo is None or not util.safehasattr(repo, '_lockref'):
1037 if repo is None or not util.safehasattr(repo, '_lockref'):
1038 return
1038 return
1039 if mode in (None, 'r', 'rb'):
1039 if mode in (None, 'r', 'rb'):
1040 return
1040 return
1041 if path.startswith(repo.sharedpath):
1041 if path.startswith(repo.sharedpath):
1042 # truncate name relative to the repository (.hg)
1042 # truncate name relative to the repository (.hg)
1043 path = path[len(repo.sharedpath) + 1:]
1043 path = path[len(repo.sharedpath) + 1:]
1044 if repo._currentlock(repo._lockref) is None:
1044 if repo._currentlock(repo._lockref) is None:
1045 repo.ui.develwarn('write with no lock: "%s"' % path,
1045 repo.ui.develwarn('write with no lock: "%s"' % path,
1046 stacklevel=3)
1046 stacklevel=3)
1047 return ret
1047 return ret
1048 return checksvfs
1048 return checksvfs
1049
1049
1050 def close(self):
1050 def close(self):
1051 self._writecaches()
1051 self._writecaches()
1052
1052
1053 def _writecaches(self):
1053 def _writecaches(self):
1054 if self._revbranchcache:
1054 if self._revbranchcache:
1055 self._revbranchcache.write()
1055 self._revbranchcache.write()
1056
1056
1057 def _restrictcapabilities(self, caps):
1057 def _restrictcapabilities(self, caps):
1058 if self.ui.configbool('experimental', 'bundle2-advertise'):
1058 if self.ui.configbool('experimental', 'bundle2-advertise'):
1059 caps = set(caps)
1059 caps = set(caps)
1060 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1060 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1061 role='client'))
1061 role='client'))
1062 caps.add('bundle2=' + urlreq.quote(capsblob))
1062 caps.add('bundle2=' + urlreq.quote(capsblob))
1063 return caps
1063 return caps
1064
1064
1065 def _writerequirements(self):
1065 def _writerequirements(self):
1066 scmutil.writerequires(self.vfs, self.requirements)
1066 scmutil.writerequires(self.vfs, self.requirements)
1067
1067
1068 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1068 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1069 # self -> auditor -> self._checknested -> self
1069 # self -> auditor -> self._checknested -> self
1070
1070
1071 @property
1071 @property
1072 def auditor(self):
1072 def auditor(self):
1073 # This is only used by context.workingctx.match in order to
1073 # This is only used by context.workingctx.match in order to
1074 # detect files in subrepos.
1074 # detect files in subrepos.
1075 return pathutil.pathauditor(self.root, callback=self._checknested)
1075 return pathutil.pathauditor(self.root, callback=self._checknested)
1076
1076
1077 @property
1077 @property
1078 def nofsauditor(self):
1078 def nofsauditor(self):
1079 # This is only used by context.basectx.match in order to detect
1079 # This is only used by context.basectx.match in order to detect
1080 # files in subrepos.
1080 # files in subrepos.
1081 return pathutil.pathauditor(self.root, callback=self._checknested,
1081 return pathutil.pathauditor(self.root, callback=self._checknested,
1082 realfs=False, cached=True)
1082 realfs=False, cached=True)
1083
1083
1084 def _checknested(self, path):
1084 def _checknested(self, path):
1085 """Determine if path is a legal nested repository."""
1085 """Determine if path is a legal nested repository."""
1086 if not path.startswith(self.root):
1086 if not path.startswith(self.root):
1087 return False
1087 return False
1088 subpath = path[len(self.root) + 1:]
1088 subpath = path[len(self.root) + 1:]
1089 normsubpath = util.pconvert(subpath)
1089 normsubpath = util.pconvert(subpath)
1090
1090
1091 # XXX: Checking against the current working copy is wrong in
1091 # XXX: Checking against the current working copy is wrong in
1092 # the sense that it can reject things like
1092 # the sense that it can reject things like
1093 #
1093 #
1094 # $ hg cat -r 10 sub/x.txt
1094 # $ hg cat -r 10 sub/x.txt
1095 #
1095 #
1096 # if sub/ is no longer a subrepository in the working copy
1096 # if sub/ is no longer a subrepository in the working copy
1097 # parent revision.
1097 # parent revision.
1098 #
1098 #
1099 # However, it can of course also allow things that would have
1099 # However, it can of course also allow things that would have
1100 # been rejected before, such as the above cat command if sub/
1100 # been rejected before, such as the above cat command if sub/
1101 # is a subrepository now, but was a normal directory before.
1101 # is a subrepository now, but was a normal directory before.
1102 # The old path auditor would have rejected by mistake since it
1102 # The old path auditor would have rejected by mistake since it
1103 # panics when it sees sub/.hg/.
1103 # panics when it sees sub/.hg/.
1104 #
1104 #
1105 # All in all, checking against the working copy seems sensible
1105 # All in all, checking against the working copy seems sensible
1106 # since we want to prevent access to nested repositories on
1106 # since we want to prevent access to nested repositories on
1107 # the filesystem *now*.
1107 # the filesystem *now*.
1108 ctx = self[None]
1108 ctx = self[None]
1109 parts = util.splitpath(subpath)
1109 parts = util.splitpath(subpath)
1110 while parts:
1110 while parts:
1111 prefix = '/'.join(parts)
1111 prefix = '/'.join(parts)
1112 if prefix in ctx.substate:
1112 if prefix in ctx.substate:
1113 if prefix == normsubpath:
1113 if prefix == normsubpath:
1114 return True
1114 return True
1115 else:
1115 else:
1116 sub = ctx.sub(prefix)
1116 sub = ctx.sub(prefix)
1117 return sub.checknested(subpath[len(prefix) + 1:])
1117 return sub.checknested(subpath[len(prefix) + 1:])
1118 else:
1118 else:
1119 parts.pop()
1119 parts.pop()
1120 return False
1120 return False
1121
1121
1122 def peer(self):
1122 def peer(self):
1123 return localpeer(self) # not cached to avoid reference cycle
1123 return localpeer(self) # not cached to avoid reference cycle
1124
1124
1125 def unfiltered(self):
1125 def unfiltered(self):
1126 """Return unfiltered version of the repository
1126 """Return unfiltered version of the repository
1127
1127
1128 Intended to be overwritten by filtered repo."""
1128 Intended to be overwritten by filtered repo."""
1129 return self
1129 return self
1130
1130
1131 def filtered(self, name, visibilityexceptions=None):
1131 def filtered(self, name, visibilityexceptions=None):
1132 """Return a filtered version of a repository"""
1132 """Return a filtered version of a repository"""
1133 cls = repoview.newtype(self.unfiltered().__class__)
1133 cls = repoview.newtype(self.unfiltered().__class__)
1134 return cls(self, name, visibilityexceptions)
1134 return cls(self, name, visibilityexceptions)
1135
1135
1136 @repofilecache('bookmarks', 'bookmarks.current')
1136 @repofilecache('bookmarks', 'bookmarks.current')
1137 def _bookmarks(self):
1137 def _bookmarks(self):
1138 return bookmarks.bmstore(self)
1138 return bookmarks.bmstore(self)
1139
1139
1140 @property
1140 @property
1141 def _activebookmark(self):
1141 def _activebookmark(self):
1142 return self._bookmarks.active
1142 return self._bookmarks.active
1143
1143
1144 # _phasesets depend on changelog. what we need is to call
1144 # _phasesets depend on changelog. what we need is to call
1145 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1145 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1146 # can't be easily expressed in filecache mechanism.
1146 # can't be easily expressed in filecache mechanism.
1147 @storecache('phaseroots', '00changelog.i')
1147 @storecache('phaseroots', '00changelog.i')
1148 def _phasecache(self):
1148 def _phasecache(self):
1149 return phases.phasecache(self, self._phasedefaults)
1149 return phases.phasecache(self, self._phasedefaults)
1150
1150
1151 @storecache('obsstore')
1151 @storecache('obsstore')
1152 def obsstore(self):
1152 def obsstore(self):
1153 return obsolete.makestore(self.ui, self)
1153 return obsolete.makestore(self.ui, self)
1154
1154
1155 @storecache('00changelog.i')
1155 @storecache('00changelog.i')
1156 def changelog(self):
1156 def changelog(self):
1157 return changelog.changelog(self.svfs,
1157 return changelog.changelog(self.svfs,
1158 trypending=txnutil.mayhavepending(self.root))
1158 trypending=txnutil.mayhavepending(self.root))
1159
1159
1160 @storecache('00manifest.i')
1160 @storecache('00manifest.i')
1161 def manifestlog(self):
1161 def manifestlog(self):
1162 rootstore = manifest.manifestrevlog(self.svfs)
1162 rootstore = manifest.manifestrevlog(self.svfs)
1163 return manifest.manifestlog(self.svfs, self, rootstore)
1163 return manifest.manifestlog(self.svfs, self, rootstore)
1164
1164
1165 @repofilecache('dirstate')
1165 @repofilecache('dirstate')
1166 def dirstate(self):
1166 def dirstate(self):
1167 return self._makedirstate()
1167 return self._makedirstate()
1168
1168
1169 def _makedirstate(self):
1169 def _makedirstate(self):
1170 """Extension point for wrapping the dirstate per-repo."""
1170 """Extension point for wrapping the dirstate per-repo."""
1171 sparsematchfn = lambda: sparse.matcher(self)
1171 sparsematchfn = lambda: sparse.matcher(self)
1172
1172
1173 return dirstate.dirstate(self.vfs, self.ui, self.root,
1173 return dirstate.dirstate(self.vfs, self.ui, self.root,
1174 self._dirstatevalidate, sparsematchfn)
1174 self._dirstatevalidate, sparsematchfn)
1175
1175
1176 def _dirstatevalidate(self, node):
1176 def _dirstatevalidate(self, node):
1177 try:
1177 try:
1178 self.changelog.rev(node)
1178 self.changelog.rev(node)
1179 return node
1179 return node
1180 except error.LookupError:
1180 except error.LookupError:
1181 if not self._dirstatevalidatewarned:
1181 if not self._dirstatevalidatewarned:
1182 self._dirstatevalidatewarned = True
1182 self._dirstatevalidatewarned = True
1183 self.ui.warn(_("warning: ignoring unknown"
1183 self.ui.warn(_("warning: ignoring unknown"
1184 " working parent %s!\n") % short(node))
1184 " working parent %s!\n") % short(node))
1185 return nullid
1185 return nullid
1186
1186
1187 @storecache(narrowspec.FILENAME)
1187 @storecache(narrowspec.FILENAME)
1188 def narrowpats(self):
1188 def narrowpats(self):
1189 """matcher patterns for this repository's narrowspec
1189 """matcher patterns for this repository's narrowspec
1190
1190
1191 A tuple of (includes, excludes).
1191 A tuple of (includes, excludes).
1192 """
1192 """
1193 return narrowspec.load(self)
1193 return narrowspec.load(self)
1194
1194
1195 @storecache(narrowspec.FILENAME)
1195 @storecache(narrowspec.FILENAME)
1196 def _narrowmatch(self):
1196 def _narrowmatch(self):
1197 if repository.NARROW_REQUIREMENT not in self.requirements:
1197 if repository.NARROW_REQUIREMENT not in self.requirements:
1198 return matchmod.always(self.root, '')
1198 return matchmod.always(self.root, '')
1199 include, exclude = self.narrowpats
1199 include, exclude = self.narrowpats
1200 return narrowspec.match(self.root, include=include, exclude=exclude)
1200 return narrowspec.match(self.root, include=include, exclude=exclude)
1201
1201
1202 # TODO(martinvonz): make this property-like instead?
1202 # TODO(martinvonz): make this property-like instead?
1203 def narrowmatch(self):
1203 def narrowmatch(self):
1204 return self._narrowmatch
1204 return self._narrowmatch
1205
1205
1206 def setnarrowpats(self, newincludes, newexcludes):
1206 def setnarrowpats(self, newincludes, newexcludes):
1207 narrowspec.save(self, newincludes, newexcludes)
1207 narrowspec.save(self, newincludes, newexcludes)
1208 self.invalidate(clearfilecache=True)
1208 self.invalidate(clearfilecache=True)
1209
1209
1210 def __getitem__(self, changeid):
1210 def __getitem__(self, changeid):
1211 if changeid is None:
1211 if changeid is None:
1212 return context.workingctx(self)
1212 return context.workingctx(self)
1213 if isinstance(changeid, context.basectx):
1213 if isinstance(changeid, context.basectx):
1214 return changeid
1214 return changeid
1215 if isinstance(changeid, slice):
1215 if isinstance(changeid, slice):
1216 # wdirrev isn't contiguous so the slice shouldn't include it
1216 # wdirrev isn't contiguous so the slice shouldn't include it
1217 return [self[i]
1217 return [self[i]
1218 for i in pycompat.xrange(*changeid.indices(len(self)))
1218 for i in pycompat.xrange(*changeid.indices(len(self)))
1219 if i not in self.changelog.filteredrevs]
1219 if i not in self.changelog.filteredrevs]
1220 try:
1220 try:
1221 if isinstance(changeid, int):
1221 if isinstance(changeid, int):
1222 node = self.changelog.node(changeid)
1222 node = self.changelog.node(changeid)
1223 rev = changeid
1223 rev = changeid
1224 return context.changectx(self, rev, node)
1224 return context.changectx(self, rev, node)
1225 elif changeid == 'null':
1225 elif changeid == 'null':
1226 node = nullid
1226 node = nullid
1227 rev = nullrev
1227 rev = nullrev
1228 return context.changectx(self, rev, node)
1228 return context.changectx(self, rev, node)
1229 elif changeid == 'tip':
1229 elif changeid == 'tip':
1230 node = self.changelog.tip()
1230 node = self.changelog.tip()
1231 rev = self.changelog.rev(node)
1231 rev = self.changelog.rev(node)
1232 return context.changectx(self, rev, node)
1232 return context.changectx(self, rev, node)
1233 elif changeid == '.':
1233 elif changeid == '.':
1234 # this is a hack to delay/avoid loading obsmarkers
1234 # this is a hack to delay/avoid loading obsmarkers
1235 # when we know that '.' won't be hidden
1235 # when we know that '.' won't be hidden
1236 node = self.dirstate.p1()
1236 node = self.dirstate.p1()
1237 rev = self.unfiltered().changelog.rev(node)
1237 rev = self.unfiltered().changelog.rev(node)
1238 return context.changectx(self, rev, node)
1238 return context.changectx(self, rev, node)
1239 elif len(changeid) == 20:
1239 elif len(changeid) == 20:
1240 try:
1240 try:
1241 node = changeid
1241 node = changeid
1242 rev = self.changelog.rev(changeid)
1242 rev = self.changelog.rev(changeid)
1243 return context.changectx(self, rev, node)
1243 return context.changectx(self, rev, node)
1244 except error.FilteredLookupError:
1244 except error.FilteredLookupError:
1245 changeid = hex(changeid) # for the error message
1245 changeid = hex(changeid) # for the error message
1246 raise
1246 raise
1247 except LookupError:
1247 except LookupError:
1248 # check if it might have come from damaged dirstate
1248 # check if it might have come from damaged dirstate
1249 #
1249 #
1250 # XXX we could avoid the unfiltered if we had a recognizable
1250 # XXX we could avoid the unfiltered if we had a recognizable
1251 # exception for filtered changeset access
1251 # exception for filtered changeset access
1252 if (self.local()
1252 if (self.local()
1253 and changeid in self.unfiltered().dirstate.parents()):
1253 and changeid in self.unfiltered().dirstate.parents()):
1254 msg = _("working directory has unknown parent '%s'!")
1254 msg = _("working directory has unknown parent '%s'!")
1255 raise error.Abort(msg % short(changeid))
1255 raise error.Abort(msg % short(changeid))
1256 changeid = hex(changeid) # for the error message
1256 changeid = hex(changeid) # for the error message
1257
1257
1258 elif len(changeid) == 40:
1258 elif len(changeid) == 40:
1259 try:
1259 try:
1260 node = bin(changeid)
1260 node = bin(changeid)
1261 rev = self.changelog.rev(node)
1261 rev = self.changelog.rev(node)
1262 return context.changectx(self, rev, node)
1262 return context.changectx(self, rev, node)
1263 except error.FilteredLookupError:
1263 except error.FilteredLookupError:
1264 raise
1264 raise
1265 except LookupError:
1265 except LookupError:
1266 pass
1266 pass
1267 else:
1267 else:
1268 raise error.ProgrammingError(
1268 raise error.ProgrammingError(
1269 "unsupported changeid '%s' of type %s" %
1269 "unsupported changeid '%s' of type %s" %
1270 (changeid, type(changeid)))
1270 (changeid, type(changeid)))
1271
1271
1272 except (error.FilteredIndexError, error.FilteredLookupError):
1272 except (error.FilteredIndexError, error.FilteredLookupError):
1273 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1273 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1274 % pycompat.bytestr(changeid))
1274 % pycompat.bytestr(changeid))
1275 except IndexError:
1275 except IndexError:
1276 pass
1276 pass
1277 except error.WdirUnsupported:
1277 except error.WdirUnsupported:
1278 return context.workingctx(self)
1278 return context.workingctx(self)
1279 raise error.RepoLookupError(
1279 raise error.RepoLookupError(
1280 _("unknown revision '%s'") % changeid)
1280 _("unknown revision '%s'") % changeid)
1281
1281
1282 def __contains__(self, changeid):
1282 def __contains__(self, changeid):
1283 """True if the given changeid exists
1283 """True if the given changeid exists
1284
1284
1285 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1285 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1286 specified.
1286 specified.
1287 """
1287 """
1288 try:
1288 try:
1289 self[changeid]
1289 self[changeid]
1290 return True
1290 return True
1291 except error.RepoLookupError:
1291 except error.RepoLookupError:
1292 return False
1292 return False
1293
1293
1294 def __nonzero__(self):
1294 def __nonzero__(self):
1295 return True
1295 return True
1296
1296
1297 __bool__ = __nonzero__
1297 __bool__ = __nonzero__
1298
1298
1299 def __len__(self):
1299 def __len__(self):
1300 # no need to pay the cost of repoview.changelog
1300 # no need to pay the cost of repoview.changelog
1301 unfi = self.unfiltered()
1301 unfi = self.unfiltered()
1302 return len(unfi.changelog)
1302 return len(unfi.changelog)
1303
1303
1304 def __iter__(self):
1304 def __iter__(self):
1305 return iter(self.changelog)
1305 return iter(self.changelog)
1306
1306
1307 def revs(self, expr, *args):
1307 def revs(self, expr, *args):
1308 '''Find revisions matching a revset.
1308 '''Find revisions matching a revset.
1309
1309
1310 The revset is specified as a string ``expr`` that may contain
1310 The revset is specified as a string ``expr`` that may contain
1311 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1311 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1312
1312
1313 Revset aliases from the configuration are not expanded. To expand
1313 Revset aliases from the configuration are not expanded. To expand
1314 user aliases, consider calling ``scmutil.revrange()`` or
1314 user aliases, consider calling ``scmutil.revrange()`` or
1315 ``repo.anyrevs([expr], user=True)``.
1315 ``repo.anyrevs([expr], user=True)``.
1316
1316
1317 Returns a revset.abstractsmartset, which is a list-like interface
1317 Returns a revset.abstractsmartset, which is a list-like interface
1318 that contains integer revisions.
1318 that contains integer revisions.
1319 '''
1319 '''
1320 expr = revsetlang.formatspec(expr, *args)
1320 expr = revsetlang.formatspec(expr, *args)
1321 m = revset.match(None, expr)
1321 m = revset.match(None, expr)
1322 return m(self)
1322 return m(self)
1323
1323
1324 def set(self, expr, *args):
1324 def set(self, expr, *args):
1325 '''Find revisions matching a revset and emit changectx instances.
1325 '''Find revisions matching a revset and emit changectx instances.
1326
1326
1327 This is a convenience wrapper around ``revs()`` that iterates the
1327 This is a convenience wrapper around ``revs()`` that iterates the
1328 result and is a generator of changectx instances.
1328 result and is a generator of changectx instances.
1329
1329
1330 Revset aliases from the configuration are not expanded. To expand
1330 Revset aliases from the configuration are not expanded. To expand
1331 user aliases, consider calling ``scmutil.revrange()``.
1331 user aliases, consider calling ``scmutil.revrange()``.
1332 '''
1332 '''
1333 for r in self.revs(expr, *args):
1333 for r in self.revs(expr, *args):
1334 yield self[r]
1334 yield self[r]
1335
1335
1336 def anyrevs(self, specs, user=False, localalias=None):
1336 def anyrevs(self, specs, user=False, localalias=None):
1337 '''Find revisions matching one of the given revsets.
1337 '''Find revisions matching one of the given revsets.
1338
1338
1339 Revset aliases from the configuration are not expanded by default. To
1339 Revset aliases from the configuration are not expanded by default. To
1340 expand user aliases, specify ``user=True``. To provide some local
1340 expand user aliases, specify ``user=True``. To provide some local
1341 definitions overriding user aliases, set ``localalias`` to
1341 definitions overriding user aliases, set ``localalias`` to
1342 ``{name: definitionstring}``.
1342 ``{name: definitionstring}``.
1343 '''
1343 '''
1344 if user:
1344 if user:
1345 m = revset.matchany(self.ui, specs,
1345 m = revset.matchany(self.ui, specs,
1346 lookup=revset.lookupfn(self),
1346 lookup=revset.lookupfn(self),
1347 localalias=localalias)
1347 localalias=localalias)
1348 else:
1348 else:
1349 m = revset.matchany(None, specs, localalias=localalias)
1349 m = revset.matchany(None, specs, localalias=localalias)
1350 return m(self)
1350 return m(self)
1351
1351
1352 def url(self):
1352 def url(self):
1353 return 'file:' + self.root
1353 return 'file:' + self.root
1354
1354
1355 def hook(self, name, throw=False, **args):
1355 def hook(self, name, throw=False, **args):
1356 """Call a hook, passing this repo instance.
1356 """Call a hook, passing this repo instance.
1357
1357
1358 This a convenience method to aid invoking hooks. Extensions likely
1358 This a convenience method to aid invoking hooks. Extensions likely
1359 won't call this unless they have registered a custom hook or are
1359 won't call this unless they have registered a custom hook or are
1360 replacing code that is expected to call a hook.
1360 replacing code that is expected to call a hook.
1361 """
1361 """
1362 return hook.hook(self.ui, self, name, throw, **args)
1362 return hook.hook(self.ui, self, name, throw, **args)
1363
1363
1364 @filteredpropertycache
1364 @filteredpropertycache
1365 def _tagscache(self):
1365 def _tagscache(self):
1366 '''Returns a tagscache object that contains various tags related
1366 '''Returns a tagscache object that contains various tags related
1367 caches.'''
1367 caches.'''
1368
1368
1369 # This simplifies its cache management by having one decorated
1369 # This simplifies its cache management by having one decorated
1370 # function (this one) and the rest simply fetch things from it.
1370 # function (this one) and the rest simply fetch things from it.
1371 class tagscache(object):
1371 class tagscache(object):
1372 def __init__(self):
1372 def __init__(self):
1373 # These two define the set of tags for this repository. tags
1373 # These two define the set of tags for this repository. tags
1374 # maps tag name to node; tagtypes maps tag name to 'global' or
1374 # maps tag name to node; tagtypes maps tag name to 'global' or
1375 # 'local'. (Global tags are defined by .hgtags across all
1375 # 'local'. (Global tags are defined by .hgtags across all
1376 # heads, and local tags are defined in .hg/localtags.)
1376 # heads, and local tags are defined in .hg/localtags.)
1377 # They constitute the in-memory cache of tags.
1377 # They constitute the in-memory cache of tags.
1378 self.tags = self.tagtypes = None
1378 self.tags = self.tagtypes = None
1379
1379
1380 self.nodetagscache = self.tagslist = None
1380 self.nodetagscache = self.tagslist = None
1381
1381
1382 cache = tagscache()
1382 cache = tagscache()
1383 cache.tags, cache.tagtypes = self._findtags()
1383 cache.tags, cache.tagtypes = self._findtags()
1384
1384
1385 return cache
1385 return cache
1386
1386
1387 def tags(self):
1387 def tags(self):
1388 '''return a mapping of tag to node'''
1388 '''return a mapping of tag to node'''
1389 t = {}
1389 t = {}
1390 if self.changelog.filteredrevs:
1390 if self.changelog.filteredrevs:
1391 tags, tt = self._findtags()
1391 tags, tt = self._findtags()
1392 else:
1392 else:
1393 tags = self._tagscache.tags
1393 tags = self._tagscache.tags
1394 for k, v in tags.iteritems():
1394 for k, v in tags.iteritems():
1395 try:
1395 try:
1396 # ignore tags to unknown nodes
1396 # ignore tags to unknown nodes
1397 self.changelog.rev(v)
1397 self.changelog.rev(v)
1398 t[k] = v
1398 t[k] = v
1399 except (error.LookupError, ValueError):
1399 except (error.LookupError, ValueError):
1400 pass
1400 pass
1401 return t
1401 return t
1402
1402
1403 def _findtags(self):
1403 def _findtags(self):
1404 '''Do the hard work of finding tags. Return a pair of dicts
1404 '''Do the hard work of finding tags. Return a pair of dicts
1405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1406 maps tag name to a string like \'global\' or \'local\'.
1406 maps tag name to a string like \'global\' or \'local\'.
1407 Subclasses or extensions are free to add their own tags, but
1407 Subclasses or extensions are free to add their own tags, but
1408 should be aware that the returned dicts will be retained for the
1408 should be aware that the returned dicts will be retained for the
1409 duration of the localrepo object.'''
1409 duration of the localrepo object.'''
1410
1410
1411 # XXX what tagtype should subclasses/extensions use? Currently
1411 # XXX what tagtype should subclasses/extensions use? Currently
1412 # mq and bookmarks add tags, but do not set the tagtype at all.
1412 # mq and bookmarks add tags, but do not set the tagtype at all.
1413 # Should each extension invent its own tag type? Should there
1413 # Should each extension invent its own tag type? Should there
1414 # be one tagtype for all such "virtual" tags? Or is the status
1414 # be one tagtype for all such "virtual" tags? Or is the status
1415 # quo fine?
1415 # quo fine?
1416
1416
1417
1417
1418 # map tag name to (node, hist)
1418 # map tag name to (node, hist)
1419 alltags = tagsmod.findglobaltags(self.ui, self)
1419 alltags = tagsmod.findglobaltags(self.ui, self)
1420 # map tag name to tag type
1420 # map tag name to tag type
1421 tagtypes = dict((tag, 'global') for tag in alltags)
1421 tagtypes = dict((tag, 'global') for tag in alltags)
1422
1422
1423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1424
1424
1425 # Build the return dicts. Have to re-encode tag names because
1425 # Build the return dicts. Have to re-encode tag names because
1426 # the tags module always uses UTF-8 (in order not to lose info
1426 # the tags module always uses UTF-8 (in order not to lose info
1427 # writing to the cache), but the rest of Mercurial wants them in
1427 # writing to the cache), but the rest of Mercurial wants them in
1428 # local encoding.
1428 # local encoding.
1429 tags = {}
1429 tags = {}
1430 for (name, (node, hist)) in alltags.iteritems():
1430 for (name, (node, hist)) in alltags.iteritems():
1431 if node != nullid:
1431 if node != nullid:
1432 tags[encoding.tolocal(name)] = node
1432 tags[encoding.tolocal(name)] = node
1433 tags['tip'] = self.changelog.tip()
1433 tags['tip'] = self.changelog.tip()
1434 tagtypes = dict([(encoding.tolocal(name), value)
1434 tagtypes = dict([(encoding.tolocal(name), value)
1435 for (name, value) in tagtypes.iteritems()])
1435 for (name, value) in tagtypes.iteritems()])
1436 return (tags, tagtypes)
1436 return (tags, tagtypes)
1437
1437
1438 def tagtype(self, tagname):
1438 def tagtype(self, tagname):
1439 '''
1439 '''
1440 return the type of the given tag. result can be:
1440 return the type of the given tag. result can be:
1441
1441
1442 'local' : a local tag
1442 'local' : a local tag
1443 'global' : a global tag
1443 'global' : a global tag
1444 None : tag does not exist
1444 None : tag does not exist
1445 '''
1445 '''
1446
1446
1447 return self._tagscache.tagtypes.get(tagname)
1447 return self._tagscache.tagtypes.get(tagname)
1448
1448
1449 def tagslist(self):
1449 def tagslist(self):
1450 '''return a list of tags ordered by revision'''
1450 '''return a list of tags ordered by revision'''
1451 if not self._tagscache.tagslist:
1451 if not self._tagscache.tagslist:
1452 l = []
1452 l = []
1453 for t, n in self.tags().iteritems():
1453 for t, n in self.tags().iteritems():
1454 l.append((self.changelog.rev(n), t, n))
1454 l.append((self.changelog.rev(n), t, n))
1455 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1455 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1456
1456
1457 return self._tagscache.tagslist
1457 return self._tagscache.tagslist
1458
1458
1459 def nodetags(self, node):
1459 def nodetags(self, node):
1460 '''return the tags associated with a node'''
1460 '''return the tags associated with a node'''
1461 if not self._tagscache.nodetagscache:
1461 if not self._tagscache.nodetagscache:
1462 nodetagscache = {}
1462 nodetagscache = {}
1463 for t, n in self._tagscache.tags.iteritems():
1463 for t, n in self._tagscache.tags.iteritems():
1464 nodetagscache.setdefault(n, []).append(t)
1464 nodetagscache.setdefault(n, []).append(t)
1465 for tags in nodetagscache.itervalues():
1465 for tags in nodetagscache.itervalues():
1466 tags.sort()
1466 tags.sort()
1467 self._tagscache.nodetagscache = nodetagscache
1467 self._tagscache.nodetagscache = nodetagscache
1468 return self._tagscache.nodetagscache.get(node, [])
1468 return self._tagscache.nodetagscache.get(node, [])
1469
1469
1470 def nodebookmarks(self, node):
1470 def nodebookmarks(self, node):
1471 """return the list of bookmarks pointing to the specified node"""
1471 """return the list of bookmarks pointing to the specified node"""
1472 return self._bookmarks.names(node)
1472 return self._bookmarks.names(node)
1473
1473
1474 def branchmap(self):
1474 def branchmap(self):
1475 '''returns a dictionary {branch: [branchheads]} with branchheads
1475 '''returns a dictionary {branch: [branchheads]} with branchheads
1476 ordered by increasing revision number'''
1476 ordered by increasing revision number'''
1477 branchmap.updatecache(self)
1477 branchmap.updatecache(self)
1478 return self._branchcaches[self.filtername]
1478 return self._branchcaches[self.filtername]
1479
1479
1480 @unfilteredmethod
1480 @unfilteredmethod
1481 def revbranchcache(self):
1481 def revbranchcache(self):
1482 if not self._revbranchcache:
1482 if not self._revbranchcache:
1483 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1483 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1484 return self._revbranchcache
1484 return self._revbranchcache
1485
1485
1486 def branchtip(self, branch, ignoremissing=False):
1486 def branchtip(self, branch, ignoremissing=False):
1487 '''return the tip node for a given branch
1487 '''return the tip node for a given branch
1488
1488
1489 If ignoremissing is True, then this method will not raise an error.
1489 If ignoremissing is True, then this method will not raise an error.
1490 This is helpful for callers that only expect None for a missing branch
1490 This is helpful for callers that only expect None for a missing branch
1491 (e.g. namespace).
1491 (e.g. namespace).
1492
1492
1493 '''
1493 '''
1494 try:
1494 try:
1495 return self.branchmap().branchtip(branch)
1495 return self.branchmap().branchtip(branch)
1496 except KeyError:
1496 except KeyError:
1497 if not ignoremissing:
1497 if not ignoremissing:
1498 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1498 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1499 else:
1499 else:
1500 pass
1500 pass
1501
1501
1502 def lookup(self, key):
1502 def lookup(self, key):
1503 return scmutil.revsymbol(self, key).node()
1503 return scmutil.revsymbol(self, key).node()
1504
1504
1505 def lookupbranch(self, key):
1505 def lookupbranch(self, key):
1506 if key in self.branchmap():
1506 if key in self.branchmap():
1507 return key
1507 return key
1508
1508
1509 return scmutil.revsymbol(self, key).branch()
1509 return scmutil.revsymbol(self, key).branch()
1510
1510
1511 def known(self, nodes):
1511 def known(self, nodes):
1512 cl = self.changelog
1512 cl = self.changelog
1513 nm = cl.nodemap
1513 nm = cl.nodemap
1514 filtered = cl.filteredrevs
1514 filtered = cl.filteredrevs
1515 result = []
1515 result = []
1516 for n in nodes:
1516 for n in nodes:
1517 r = nm.get(n)
1517 r = nm.get(n)
1518 resp = not (r is None or r in filtered)
1518 resp = not (r is None or r in filtered)
1519 result.append(resp)
1519 result.append(resp)
1520 return result
1520 return result
1521
1521
1522 def local(self):
1522 def local(self):
1523 return self
1523 return self
1524
1524
1525 def publishing(self):
1525 def publishing(self):
1526 # it's safe (and desirable) to trust the publish flag unconditionally
1526 # it's safe (and desirable) to trust the publish flag unconditionally
1527 # so that we don't finalize changes shared between users via ssh or nfs
1527 # so that we don't finalize changes shared between users via ssh or nfs
1528 return self.ui.configbool('phases', 'publish', untrusted=True)
1528 return self.ui.configbool('phases', 'publish', untrusted=True)
1529
1529
1530 def cancopy(self):
1530 def cancopy(self):
1531 # so statichttprepo's override of local() works
1531 # so statichttprepo's override of local() works
1532 if not self.local():
1532 if not self.local():
1533 return False
1533 return False
1534 if not self.publishing():
1534 if not self.publishing():
1535 return True
1535 return True
1536 # if publishing we can't copy if there is filtered content
1536 # if publishing we can't copy if there is filtered content
1537 return not self.filtered('visible').changelog.filteredrevs
1537 return not self.filtered('visible').changelog.filteredrevs
1538
1538
1539 def shared(self):
1539 def shared(self):
1540 '''the type of shared repository (None if not shared)'''
1540 '''the type of shared repository (None if not shared)'''
1541 if self.sharedpath != self.path:
1541 if self.sharedpath != self.path:
1542 return 'store'
1542 return 'store'
1543 return None
1543 return None
1544
1544
1545 def wjoin(self, f, *insidef):
1545 def wjoin(self, f, *insidef):
1546 return self.vfs.reljoin(self.root, f, *insidef)
1546 return self.vfs.reljoin(self.root, f, *insidef)
1547
1547
1548 def setparents(self, p1, p2=nullid):
1548 def setparents(self, p1, p2=nullid):
1549 with self.dirstate.parentchange():
1549 with self.dirstate.parentchange():
1550 copies = self.dirstate.setparents(p1, p2)
1550 copies = self.dirstate.setparents(p1, p2)
1551 pctx = self[p1]
1551 pctx = self[p1]
1552 if copies:
1552 if copies:
1553 # Adjust copy records, the dirstate cannot do it, it
1553 # Adjust copy records, the dirstate cannot do it, it
1554 # requires access to parents manifests. Preserve them
1554 # requires access to parents manifests. Preserve them
1555 # only for entries added to first parent.
1555 # only for entries added to first parent.
1556 for f in copies:
1556 for f in copies:
1557 if f not in pctx and copies[f] in pctx:
1557 if f not in pctx and copies[f] in pctx:
1558 self.dirstate.copy(copies[f], f)
1558 self.dirstate.copy(copies[f], f)
1559 if p2 == nullid:
1559 if p2 == nullid:
1560 for f, s in sorted(self.dirstate.copies().items()):
1560 for f, s in sorted(self.dirstate.copies().items()):
1561 if f not in pctx and s not in pctx:
1561 if f not in pctx and s not in pctx:
1562 self.dirstate.copy(None, f)
1562 self.dirstate.copy(None, f)
1563
1563
1564 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1564 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1565 """changeid can be a changeset revision, node, or tag.
1565 """changeid can be a changeset revision, node, or tag.
1566 fileid can be a file revision or node."""
1566 fileid can be a file revision or node."""
1567 return context.filectx(self, path, changeid, fileid,
1567 return context.filectx(self, path, changeid, fileid,
1568 changectx=changectx)
1568 changectx=changectx)
1569
1569
1570 def getcwd(self):
1570 def getcwd(self):
1571 return self.dirstate.getcwd()
1571 return self.dirstate.getcwd()
1572
1572
1573 def pathto(self, f, cwd=None):
1573 def pathto(self, f, cwd=None):
1574 return self.dirstate.pathto(f, cwd)
1574 return self.dirstate.pathto(f, cwd)
1575
1575
1576 def _loadfilter(self, filter):
1576 def _loadfilter(self, filter):
1577 if filter not in self._filterpats:
1577 if filter not in self._filterpats:
1578 l = []
1578 l = []
1579 for pat, cmd in self.ui.configitems(filter):
1579 for pat, cmd in self.ui.configitems(filter):
1580 if cmd == '!':
1580 if cmd == '!':
1581 continue
1581 continue
1582 mf = matchmod.match(self.root, '', [pat])
1582 mf = matchmod.match(self.root, '', [pat])
1583 fn = None
1583 fn = None
1584 params = cmd
1584 params = cmd
1585 for name, filterfn in self._datafilters.iteritems():
1585 for name, filterfn in self._datafilters.iteritems():
1586 if cmd.startswith(name):
1586 if cmd.startswith(name):
1587 fn = filterfn
1587 fn = filterfn
1588 params = cmd[len(name):].lstrip()
1588 params = cmd[len(name):].lstrip()
1589 break
1589 break
1590 if not fn:
1590 if not fn:
1591 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1591 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1592 # Wrap old filters not supporting keyword arguments
1592 # Wrap old filters not supporting keyword arguments
1593 if not pycompat.getargspec(fn)[2]:
1593 if not pycompat.getargspec(fn)[2]:
1594 oldfn = fn
1594 oldfn = fn
1595 fn = lambda s, c, **kwargs: oldfn(s, c)
1595 fn = lambda s, c, **kwargs: oldfn(s, c)
1596 l.append((mf, fn, params))
1596 l.append((mf, fn, params))
1597 self._filterpats[filter] = l
1597 self._filterpats[filter] = l
1598 return self._filterpats[filter]
1598 return self._filterpats[filter]
1599
1599
1600 def _filter(self, filterpats, filename, data):
1600 def _filter(self, filterpats, filename, data):
1601 for mf, fn, cmd in filterpats:
1601 for mf, fn, cmd in filterpats:
1602 if mf(filename):
1602 if mf(filename):
1603 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1603 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1604 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1604 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1605 break
1605 break
1606
1606
1607 return data
1607 return data
1608
1608
1609 @unfilteredpropertycache
1609 @unfilteredpropertycache
1610 def _encodefilterpats(self):
1610 def _encodefilterpats(self):
1611 return self._loadfilter('encode')
1611 return self._loadfilter('encode')
1612
1612
1613 @unfilteredpropertycache
1613 @unfilteredpropertycache
1614 def _decodefilterpats(self):
1614 def _decodefilterpats(self):
1615 return self._loadfilter('decode')
1615 return self._loadfilter('decode')
1616
1616
1617 def adddatafilter(self, name, filter):
1617 def adddatafilter(self, name, filter):
1618 self._datafilters[name] = filter
1618 self._datafilters[name] = filter
1619
1619
1620 def wread(self, filename):
1620 def wread(self, filename):
1621 if self.wvfs.islink(filename):
1621 if self.wvfs.islink(filename):
1622 data = self.wvfs.readlink(filename)
1622 data = self.wvfs.readlink(filename)
1623 else:
1623 else:
1624 data = self.wvfs.read(filename)
1624 data = self.wvfs.read(filename)
1625 return self._filter(self._encodefilterpats, filename, data)
1625 return self._filter(self._encodefilterpats, filename, data)
1626
1626
1627 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1627 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1628 """write ``data`` into ``filename`` in the working directory
1628 """write ``data`` into ``filename`` in the working directory
1629
1629
1630 This returns length of written (maybe decoded) data.
1630 This returns length of written (maybe decoded) data.
1631 """
1631 """
1632 data = self._filter(self._decodefilterpats, filename, data)
1632 data = self._filter(self._decodefilterpats, filename, data)
1633 if 'l' in flags:
1633 if 'l' in flags:
1634 self.wvfs.symlink(data, filename)
1634 self.wvfs.symlink(data, filename)
1635 else:
1635 else:
1636 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1636 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1637 **kwargs)
1637 **kwargs)
1638 if 'x' in flags:
1638 if 'x' in flags:
1639 self.wvfs.setflags(filename, False, True)
1639 self.wvfs.setflags(filename, False, True)
1640 else:
1640 else:
1641 self.wvfs.setflags(filename, False, False)
1641 self.wvfs.setflags(filename, False, False)
1642 return len(data)
1642 return len(data)
1643
1643
1644 def wwritedata(self, filename, data):
1644 def wwritedata(self, filename, data):
1645 return self._filter(self._decodefilterpats, filename, data)
1645 return self._filter(self._decodefilterpats, filename, data)
1646
1646
1647 def currenttransaction(self):
1647 def currenttransaction(self):
1648 """return the current transaction or None if non exists"""
1648 """return the current transaction or None if non exists"""
1649 if self._transref:
1649 if self._transref:
1650 tr = self._transref()
1650 tr = self._transref()
1651 else:
1651 else:
1652 tr = None
1652 tr = None
1653
1653
1654 if tr and tr.running():
1654 if tr and tr.running():
1655 return tr
1655 return tr
1656 return None
1656 return None
1657
1657
1658 def transaction(self, desc, report=None):
1658 def transaction(self, desc, report=None):
1659 if (self.ui.configbool('devel', 'all-warnings')
1659 if (self.ui.configbool('devel', 'all-warnings')
1660 or self.ui.configbool('devel', 'check-locks')):
1660 or self.ui.configbool('devel', 'check-locks')):
1661 if self._currentlock(self._lockref) is None:
1661 if self._currentlock(self._lockref) is None:
1662 raise error.ProgrammingError('transaction requires locking')
1662 raise error.ProgrammingError('transaction requires locking')
1663 tr = self.currenttransaction()
1663 tr = self.currenttransaction()
1664 if tr is not None:
1664 if tr is not None:
1665 return tr.nest(name=desc)
1665 return tr.nest(name=desc)
1666
1666
1667 # abort here if the journal already exists
1667 # abort here if the journal already exists
1668 if self.svfs.exists("journal"):
1668 if self.svfs.exists("journal"):
1669 raise error.RepoError(
1669 raise error.RepoError(
1670 _("abandoned transaction found"),
1670 _("abandoned transaction found"),
1671 hint=_("run 'hg recover' to clean up transaction"))
1671 hint=_("run 'hg recover' to clean up transaction"))
1672
1672
1673 idbase = "%.40f#%f" % (random.random(), time.time())
1673 idbase = "%.40f#%f" % (random.random(), time.time())
1674 ha = hex(hashlib.sha1(idbase).digest())
1674 ha = hex(hashlib.sha1(idbase).digest())
1675 txnid = 'TXN:' + ha
1675 txnid = 'TXN:' + ha
1676 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1676 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1677
1677
1678 self._writejournal(desc)
1678 self._writejournal(desc)
1679 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1679 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1680 if report:
1680 if report:
1681 rp = report
1681 rp = report
1682 else:
1682 else:
1683 rp = self.ui.warn
1683 rp = self.ui.warn
1684 vfsmap = {'plain': self.vfs} # root of .hg/
1684 vfsmap = {'plain': self.vfs} # root of .hg/
1685 # we must avoid cyclic reference between repo and transaction.
1685 # we must avoid cyclic reference between repo and transaction.
1686 reporef = weakref.ref(self)
1686 reporef = weakref.ref(self)
1687 # Code to track tag movement
1687 # Code to track tag movement
1688 #
1688 #
1689 # Since tags are all handled as file content, it is actually quite hard
1689 # Since tags are all handled as file content, it is actually quite hard
1690 # to track these movement from a code perspective. So we fallback to a
1690 # to track these movement from a code perspective. So we fallback to a
1691 # tracking at the repository level. One could envision to track changes
1691 # tracking at the repository level. One could envision to track changes
1692 # to the '.hgtags' file through changegroup apply but that fails to
1692 # to the '.hgtags' file through changegroup apply but that fails to
1693 # cope with case where transaction expose new heads without changegroup
1693 # cope with case where transaction expose new heads without changegroup
1694 # being involved (eg: phase movement).
1694 # being involved (eg: phase movement).
1695 #
1695 #
1696 # For now, We gate the feature behind a flag since this likely comes
1696 # For now, We gate the feature behind a flag since this likely comes
1697 # with performance impacts. The current code run more often than needed
1697 # with performance impacts. The current code run more often than needed
1698 # and do not use caches as much as it could. The current focus is on
1698 # and do not use caches as much as it could. The current focus is on
1699 # the behavior of the feature so we disable it by default. The flag
1699 # the behavior of the feature so we disable it by default. The flag
1700 # will be removed when we are happy with the performance impact.
1700 # will be removed when we are happy with the performance impact.
1701 #
1701 #
1702 # Once this feature is no longer experimental move the following
1702 # Once this feature is no longer experimental move the following
1703 # documentation to the appropriate help section:
1703 # documentation to the appropriate help section:
1704 #
1704 #
1705 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1705 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1706 # tags (new or changed or deleted tags). In addition the details of
1706 # tags (new or changed or deleted tags). In addition the details of
1707 # these changes are made available in a file at:
1707 # these changes are made available in a file at:
1708 # ``REPOROOT/.hg/changes/tags.changes``.
1708 # ``REPOROOT/.hg/changes/tags.changes``.
1709 # Make sure you check for HG_TAG_MOVED before reading that file as it
1709 # Make sure you check for HG_TAG_MOVED before reading that file as it
1710 # might exist from a previous transaction even if no tag were touched
1710 # might exist from a previous transaction even if no tag were touched
1711 # in this one. Changes are recorded in a line base format::
1711 # in this one. Changes are recorded in a line base format::
1712 #
1712 #
1713 # <action> <hex-node> <tag-name>\n
1713 # <action> <hex-node> <tag-name>\n
1714 #
1714 #
1715 # Actions are defined as follow:
1715 # Actions are defined as follow:
1716 # "-R": tag is removed,
1716 # "-R": tag is removed,
1717 # "+A": tag is added,
1717 # "+A": tag is added,
1718 # "-M": tag is moved (old value),
1718 # "-M": tag is moved (old value),
1719 # "+M": tag is moved (new value),
1719 # "+M": tag is moved (new value),
1720 tracktags = lambda x: None
1720 tracktags = lambda x: None
1721 # experimental config: experimental.hook-track-tags
1721 # experimental config: experimental.hook-track-tags
1722 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1722 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1723 if desc != 'strip' and shouldtracktags:
1723 if desc != 'strip' and shouldtracktags:
1724 oldheads = self.changelog.headrevs()
1724 oldheads = self.changelog.headrevs()
1725 def tracktags(tr2):
1725 def tracktags(tr2):
1726 repo = reporef()
1726 repo = reporef()
1727 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1727 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1728 newheads = repo.changelog.headrevs()
1728 newheads = repo.changelog.headrevs()
1729 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1729 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1730 # notes: we compare lists here.
1730 # notes: we compare lists here.
1731 # As we do it only once buiding set would not be cheaper
1731 # As we do it only once buiding set would not be cheaper
1732 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1732 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1733 if changes:
1733 if changes:
1734 tr2.hookargs['tag_moved'] = '1'
1734 tr2.hookargs['tag_moved'] = '1'
1735 with repo.vfs('changes/tags.changes', 'w',
1735 with repo.vfs('changes/tags.changes', 'w',
1736 atomictemp=True) as changesfile:
1736 atomictemp=True) as changesfile:
1737 # note: we do not register the file to the transaction
1737 # note: we do not register the file to the transaction
1738 # because we needs it to still exist on the transaction
1738 # because we needs it to still exist on the transaction
1739 # is close (for txnclose hooks)
1739 # is close (for txnclose hooks)
1740 tagsmod.writediff(changesfile, changes)
1740 tagsmod.writediff(changesfile, changes)
1741 def validate(tr2):
1741 def validate(tr2):
1742 """will run pre-closing hooks"""
1742 """will run pre-closing hooks"""
1743 # XXX the transaction API is a bit lacking here so we take a hacky
1743 # XXX the transaction API is a bit lacking here so we take a hacky
1744 # path for now
1744 # path for now
1745 #
1745 #
1746 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1746 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1747 # dict is copied before these run. In addition we needs the data
1747 # dict is copied before these run. In addition we needs the data
1748 # available to in memory hooks too.
1748 # available to in memory hooks too.
1749 #
1749 #
1750 # Moreover, we also need to make sure this runs before txnclose
1750 # Moreover, we also need to make sure this runs before txnclose
1751 # hooks and there is no "pending" mechanism that would execute
1751 # hooks and there is no "pending" mechanism that would execute
1752 # logic only if hooks are about to run.
1752 # logic only if hooks are about to run.
1753 #
1753 #
1754 # Fixing this limitation of the transaction is also needed to track
1754 # Fixing this limitation of the transaction is also needed to track
1755 # other families of changes (bookmarks, phases, obsolescence).
1755 # other families of changes (bookmarks, phases, obsolescence).
1756 #
1756 #
1757 # This will have to be fixed before we remove the experimental
1757 # This will have to be fixed before we remove the experimental
1758 # gating.
1758 # gating.
1759 tracktags(tr2)
1759 tracktags(tr2)
1760 repo = reporef()
1760 repo = reporef()
1761 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1761 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1762 scmutil.enforcesinglehead(repo, tr2, desc)
1762 scmutil.enforcesinglehead(repo, tr2, desc)
1763 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1763 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1764 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1764 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1765 args = tr.hookargs.copy()
1765 args = tr.hookargs.copy()
1766 args.update(bookmarks.preparehookargs(name, old, new))
1766 args.update(bookmarks.preparehookargs(name, old, new))
1767 repo.hook('pretxnclose-bookmark', throw=True,
1767 repo.hook('pretxnclose-bookmark', throw=True,
1768 txnname=desc,
1768 txnname=desc,
1769 **pycompat.strkwargs(args))
1769 **pycompat.strkwargs(args))
1770 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1770 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1771 cl = repo.unfiltered().changelog
1771 cl = repo.unfiltered().changelog
1772 for rev, (old, new) in tr.changes['phases'].items():
1772 for rev, (old, new) in tr.changes['phases'].items():
1773 args = tr.hookargs.copy()
1773 args = tr.hookargs.copy()
1774 node = hex(cl.node(rev))
1774 node = hex(cl.node(rev))
1775 args.update(phases.preparehookargs(node, old, new))
1775 args.update(phases.preparehookargs(node, old, new))
1776 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1776 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1777 **pycompat.strkwargs(args))
1777 **pycompat.strkwargs(args))
1778
1778
1779 repo.hook('pretxnclose', throw=True,
1779 repo.hook('pretxnclose', throw=True,
1780 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1780 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1781 def releasefn(tr, success):
1781 def releasefn(tr, success):
1782 repo = reporef()
1782 repo = reporef()
1783 if success:
1783 if success:
1784 # this should be explicitly invoked here, because
1784 # this should be explicitly invoked here, because
1785 # in-memory changes aren't written out at closing
1785 # in-memory changes aren't written out at closing
1786 # transaction, if tr.addfilegenerator (via
1786 # transaction, if tr.addfilegenerator (via
1787 # dirstate.write or so) isn't invoked while
1787 # dirstate.write or so) isn't invoked while
1788 # transaction running
1788 # transaction running
1789 repo.dirstate.write(None)
1789 repo.dirstate.write(None)
1790 else:
1790 else:
1791 # discard all changes (including ones already written
1791 # discard all changes (including ones already written
1792 # out) in this transaction
1792 # out) in this transaction
1793 narrowspec.restorebackup(self, 'journal.narrowspec')
1793 narrowspec.restorebackup(self, 'journal.narrowspec')
1794 repo.dirstate.restorebackup(None, 'journal.dirstate')
1794 repo.dirstate.restorebackup(None, 'journal.dirstate')
1795
1795
1796 repo.invalidate(clearfilecache=True)
1796 repo.invalidate(clearfilecache=True)
1797
1797
1798 tr = transaction.transaction(rp, self.svfs, vfsmap,
1798 tr = transaction.transaction(rp, self.svfs, vfsmap,
1799 "journal",
1799 "journal",
1800 "undo",
1800 "undo",
1801 aftertrans(renames),
1801 aftertrans(renames),
1802 self.store.createmode,
1802 self.store.createmode,
1803 validator=validate,
1803 validator=validate,
1804 releasefn=releasefn,
1804 releasefn=releasefn,
1805 checkambigfiles=_cachedfiles,
1805 checkambigfiles=_cachedfiles,
1806 name=desc)
1806 name=desc)
1807 tr.changes['origrepolen'] = len(self)
1807 tr.changes['origrepolen'] = len(self)
1808 tr.changes['obsmarkers'] = set()
1808 tr.changes['obsmarkers'] = set()
1809 tr.changes['phases'] = {}
1809 tr.changes['phases'] = {}
1810 tr.changes['bookmarks'] = {}
1810 tr.changes['bookmarks'] = {}
1811
1811
1812 tr.hookargs['txnid'] = txnid
1812 tr.hookargs['txnid'] = txnid
1813 # note: writing the fncache only during finalize mean that the file is
1813 # note: writing the fncache only during finalize mean that the file is
1814 # outdated when running hooks. As fncache is used for streaming clone,
1814 # outdated when running hooks. As fncache is used for streaming clone,
1815 # this is not expected to break anything that happen during the hooks.
1815 # this is not expected to break anything that happen during the hooks.
1816 tr.addfinalize('flush-fncache', self.store.write)
1816 tr.addfinalize('flush-fncache', self.store.write)
1817 def txnclosehook(tr2):
1817 def txnclosehook(tr2):
1818 """To be run if transaction is successful, will schedule a hook run
1818 """To be run if transaction is successful, will schedule a hook run
1819 """
1819 """
1820 # Don't reference tr2 in hook() so we don't hold a reference.
1820 # Don't reference tr2 in hook() so we don't hold a reference.
1821 # This reduces memory consumption when there are multiple
1821 # This reduces memory consumption when there are multiple
1822 # transactions per lock. This can likely go away if issue5045
1822 # transactions per lock. This can likely go away if issue5045
1823 # fixes the function accumulation.
1823 # fixes the function accumulation.
1824 hookargs = tr2.hookargs
1824 hookargs = tr2.hookargs
1825
1825
1826 def hookfunc():
1826 def hookfunc():
1827 repo = reporef()
1827 repo = reporef()
1828 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1828 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1829 bmchanges = sorted(tr.changes['bookmarks'].items())
1829 bmchanges = sorted(tr.changes['bookmarks'].items())
1830 for name, (old, new) in bmchanges:
1830 for name, (old, new) in bmchanges:
1831 args = tr.hookargs.copy()
1831 args = tr.hookargs.copy()
1832 args.update(bookmarks.preparehookargs(name, old, new))
1832 args.update(bookmarks.preparehookargs(name, old, new))
1833 repo.hook('txnclose-bookmark', throw=False,
1833 repo.hook('txnclose-bookmark', throw=False,
1834 txnname=desc, **pycompat.strkwargs(args))
1834 txnname=desc, **pycompat.strkwargs(args))
1835
1835
1836 if hook.hashook(repo.ui, 'txnclose-phase'):
1836 if hook.hashook(repo.ui, 'txnclose-phase'):
1837 cl = repo.unfiltered().changelog
1837 cl = repo.unfiltered().changelog
1838 phasemv = sorted(tr.changes['phases'].items())
1838 phasemv = sorted(tr.changes['phases'].items())
1839 for rev, (old, new) in phasemv:
1839 for rev, (old, new) in phasemv:
1840 args = tr.hookargs.copy()
1840 args = tr.hookargs.copy()
1841 node = hex(cl.node(rev))
1841 node = hex(cl.node(rev))
1842 args.update(phases.preparehookargs(node, old, new))
1842 args.update(phases.preparehookargs(node, old, new))
1843 repo.hook('txnclose-phase', throw=False, txnname=desc,
1843 repo.hook('txnclose-phase', throw=False, txnname=desc,
1844 **pycompat.strkwargs(args))
1844 **pycompat.strkwargs(args))
1845
1845
1846 repo.hook('txnclose', throw=False, txnname=desc,
1846 repo.hook('txnclose', throw=False, txnname=desc,
1847 **pycompat.strkwargs(hookargs))
1847 **pycompat.strkwargs(hookargs))
1848 reporef()._afterlock(hookfunc)
1848 reporef()._afterlock(hookfunc)
1849 tr.addfinalize('txnclose-hook', txnclosehook)
1849 tr.addfinalize('txnclose-hook', txnclosehook)
1850 # Include a leading "-" to make it happen before the transaction summary
1850 # Include a leading "-" to make it happen before the transaction summary
1851 # reports registered via scmutil.registersummarycallback() whose names
1851 # reports registered via scmutil.registersummarycallback() whose names
1852 # are 00-txnreport etc. That way, the caches will be warm when the
1852 # are 00-txnreport etc. That way, the caches will be warm when the
1853 # callbacks run.
1853 # callbacks run.
1854 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1854 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1855 def txnaborthook(tr2):
1855 def txnaborthook(tr2):
1856 """To be run if transaction is aborted
1856 """To be run if transaction is aborted
1857 """
1857 """
1858 reporef().hook('txnabort', throw=False, txnname=desc,
1858 reporef().hook('txnabort', throw=False, txnname=desc,
1859 **pycompat.strkwargs(tr2.hookargs))
1859 **pycompat.strkwargs(tr2.hookargs))
1860 tr.addabort('txnabort-hook', txnaborthook)
1860 tr.addabort('txnabort-hook', txnaborthook)
1861 # avoid eager cache invalidation. in-memory data should be identical
1861 # avoid eager cache invalidation. in-memory data should be identical
1862 # to stored data if transaction has no error.
1862 # to stored data if transaction has no error.
1863 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1863 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1864 self._transref = weakref.ref(tr)
1864 self._transref = weakref.ref(tr)
1865 scmutil.registersummarycallback(self, tr, desc)
1865 scmutil.registersummarycallback(self, tr, desc)
1866 return tr
1866 return tr
1867
1867
1868 def _journalfiles(self):
1868 def _journalfiles(self):
1869 return ((self.svfs, 'journal'),
1869 return ((self.svfs, 'journal'),
1870 (self.vfs, 'journal.dirstate'),
1870 (self.vfs, 'journal.dirstate'),
1871 (self.vfs, 'journal.branch'),
1871 (self.vfs, 'journal.branch'),
1872 (self.vfs, 'journal.desc'),
1872 (self.vfs, 'journal.desc'),
1873 (self.vfs, 'journal.bookmarks'),
1873 (self.vfs, 'journal.bookmarks'),
1874 (self.svfs, 'journal.phaseroots'))
1874 (self.svfs, 'journal.phaseroots'))
1875
1875
1876 def undofiles(self):
1876 def undofiles(self):
1877 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1877 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1878
1878
1879 @unfilteredmethod
1879 @unfilteredmethod
1880 def _writejournal(self, desc):
1880 def _writejournal(self, desc):
1881 self.dirstate.savebackup(None, 'journal.dirstate')
1881 self.dirstate.savebackup(None, 'journal.dirstate')
1882 narrowspec.savebackup(self, 'journal.narrowspec')
1882 narrowspec.savebackup(self, 'journal.narrowspec')
1883 self.vfs.write("journal.branch",
1883 self.vfs.write("journal.branch",
1884 encoding.fromlocal(self.dirstate.branch()))
1884 encoding.fromlocal(self.dirstate.branch()))
1885 self.vfs.write("journal.desc",
1885 self.vfs.write("journal.desc",
1886 "%d\n%s\n" % (len(self), desc))
1886 "%d\n%s\n" % (len(self), desc))
1887 self.vfs.write("journal.bookmarks",
1887 self.vfs.write("journal.bookmarks",
1888 self.vfs.tryread("bookmarks"))
1888 self.vfs.tryread("bookmarks"))
1889 self.svfs.write("journal.phaseroots",
1889 self.svfs.write("journal.phaseroots",
1890 self.svfs.tryread("phaseroots"))
1890 self.svfs.tryread("phaseroots"))
1891
1891
1892 def recover(self):
1892 def recover(self):
1893 with self.lock():
1893 with self.lock():
1894 if self.svfs.exists("journal"):
1894 if self.svfs.exists("journal"):
1895 self.ui.status(_("rolling back interrupted transaction\n"))
1895 self.ui.status(_("rolling back interrupted transaction\n"))
1896 vfsmap = {'': self.svfs,
1896 vfsmap = {'': self.svfs,
1897 'plain': self.vfs,}
1897 'plain': self.vfs,}
1898 transaction.rollback(self.svfs, vfsmap, "journal",
1898 transaction.rollback(self.svfs, vfsmap, "journal",
1899 self.ui.warn,
1899 self.ui.warn,
1900 checkambigfiles=_cachedfiles)
1900 checkambigfiles=_cachedfiles)
1901 self.invalidate()
1901 self.invalidate()
1902 return True
1902 return True
1903 else:
1903 else:
1904 self.ui.warn(_("no interrupted transaction available\n"))
1904 self.ui.warn(_("no interrupted transaction available\n"))
1905 return False
1905 return False
1906
1906
1907 def rollback(self, dryrun=False, force=False):
1907 def rollback(self, dryrun=False, force=False):
1908 wlock = lock = dsguard = None
1908 wlock = lock = dsguard = None
1909 try:
1909 try:
1910 wlock = self.wlock()
1910 wlock = self.wlock()
1911 lock = self.lock()
1911 lock = self.lock()
1912 if self.svfs.exists("undo"):
1912 if self.svfs.exists("undo"):
1913 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1913 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1914
1914
1915 return self._rollback(dryrun, force, dsguard)
1915 return self._rollback(dryrun, force, dsguard)
1916 else:
1916 else:
1917 self.ui.warn(_("no rollback information available\n"))
1917 self.ui.warn(_("no rollback information available\n"))
1918 return 1
1918 return 1
1919 finally:
1919 finally:
1920 release(dsguard, lock, wlock)
1920 release(dsguard, lock, wlock)
1921
1921
1922 @unfilteredmethod # Until we get smarter cache management
1922 @unfilteredmethod # Until we get smarter cache management
1923 def _rollback(self, dryrun, force, dsguard):
1923 def _rollback(self, dryrun, force, dsguard):
1924 ui = self.ui
1924 ui = self.ui
1925 try:
1925 try:
1926 args = self.vfs.read('undo.desc').splitlines()
1926 args = self.vfs.read('undo.desc').splitlines()
1927 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1927 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1928 if len(args) >= 3:
1928 if len(args) >= 3:
1929 detail = args[2]
1929 detail = args[2]
1930 oldtip = oldlen - 1
1930 oldtip = oldlen - 1
1931
1931
1932 if detail and ui.verbose:
1932 if detail and ui.verbose:
1933 msg = (_('repository tip rolled back to revision %d'
1933 msg = (_('repository tip rolled back to revision %d'
1934 ' (undo %s: %s)\n')
1934 ' (undo %s: %s)\n')
1935 % (oldtip, desc, detail))
1935 % (oldtip, desc, detail))
1936 else:
1936 else:
1937 msg = (_('repository tip rolled back to revision %d'
1937 msg = (_('repository tip rolled back to revision %d'
1938 ' (undo %s)\n')
1938 ' (undo %s)\n')
1939 % (oldtip, desc))
1939 % (oldtip, desc))
1940 except IOError:
1940 except IOError:
1941 msg = _('rolling back unknown transaction\n')
1941 msg = _('rolling back unknown transaction\n')
1942 desc = None
1942 desc = None
1943
1943
1944 if not force and self['.'] != self['tip'] and desc == 'commit':
1944 if not force and self['.'] != self['tip'] and desc == 'commit':
1945 raise error.Abort(
1945 raise error.Abort(
1946 _('rollback of last commit while not checked out '
1946 _('rollback of last commit while not checked out '
1947 'may lose data'), hint=_('use -f to force'))
1947 'may lose data'), hint=_('use -f to force'))
1948
1948
1949 ui.status(msg)
1949 ui.status(msg)
1950 if dryrun:
1950 if dryrun:
1951 return 0
1951 return 0
1952
1952
1953 parents = self.dirstate.parents()
1953 parents = self.dirstate.parents()
1954 self.destroying()
1954 self.destroying()
1955 vfsmap = {'plain': self.vfs, '': self.svfs}
1955 vfsmap = {'plain': self.vfs, '': self.svfs}
1956 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1956 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1957 checkambigfiles=_cachedfiles)
1957 checkambigfiles=_cachedfiles)
1958 if self.vfs.exists('undo.bookmarks'):
1958 if self.vfs.exists('undo.bookmarks'):
1959 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1959 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1960 if self.svfs.exists('undo.phaseroots'):
1960 if self.svfs.exists('undo.phaseroots'):
1961 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1961 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1962 self.invalidate()
1962 self.invalidate()
1963
1963
1964 parentgone = (parents[0] not in self.changelog.nodemap or
1964 parentgone = (parents[0] not in self.changelog.nodemap or
1965 parents[1] not in self.changelog.nodemap)
1965 parents[1] not in self.changelog.nodemap)
1966 if parentgone:
1966 if parentgone:
1967 # prevent dirstateguard from overwriting already restored one
1967 # prevent dirstateguard from overwriting already restored one
1968 dsguard.close()
1968 dsguard.close()
1969
1969
1970 narrowspec.restorebackup(self, 'undo.narrowspec')
1970 narrowspec.restorebackup(self, 'undo.narrowspec')
1971 self.dirstate.restorebackup(None, 'undo.dirstate')
1971 self.dirstate.restorebackup(None, 'undo.dirstate')
1972 try:
1972 try:
1973 branch = self.vfs.read('undo.branch')
1973 branch = self.vfs.read('undo.branch')
1974 self.dirstate.setbranch(encoding.tolocal(branch))
1974 self.dirstate.setbranch(encoding.tolocal(branch))
1975 except IOError:
1975 except IOError:
1976 ui.warn(_('named branch could not be reset: '
1976 ui.warn(_('named branch could not be reset: '
1977 'current branch is still \'%s\'\n')
1977 'current branch is still \'%s\'\n')
1978 % self.dirstate.branch())
1978 % self.dirstate.branch())
1979
1979
1980 parents = tuple([p.rev() for p in self[None].parents()])
1980 parents = tuple([p.rev() for p in self[None].parents()])
1981 if len(parents) > 1:
1981 if len(parents) > 1:
1982 ui.status(_('working directory now based on '
1982 ui.status(_('working directory now based on '
1983 'revisions %d and %d\n') % parents)
1983 'revisions %d and %d\n') % parents)
1984 else:
1984 else:
1985 ui.status(_('working directory now based on '
1985 ui.status(_('working directory now based on '
1986 'revision %d\n') % parents)
1986 'revision %d\n') % parents)
1987 mergemod.mergestate.clean(self, self['.'].node())
1987 mergemod.mergestate.clean(self, self['.'].node())
1988
1988
1989 # TODO: if we know which new heads may result from this rollback, pass
1989 # TODO: if we know which new heads may result from this rollback, pass
1990 # them to destroy(), which will prevent the branchhead cache from being
1990 # them to destroy(), which will prevent the branchhead cache from being
1991 # invalidated.
1991 # invalidated.
1992 self.destroyed()
1992 self.destroyed()
1993 return 0
1993 return 0
1994
1994
1995 def _buildcacheupdater(self, newtransaction):
1995 def _buildcacheupdater(self, newtransaction):
1996 """called during transaction to build the callback updating cache
1996 """called during transaction to build the callback updating cache
1997
1997
1998 Lives on the repository to help extension who might want to augment
1998 Lives on the repository to help extension who might want to augment
1999 this logic. For this purpose, the created transaction is passed to the
1999 this logic. For this purpose, the created transaction is passed to the
2000 method.
2000 method.
2001 """
2001 """
2002 # we must avoid cyclic reference between repo and transaction.
2002 # we must avoid cyclic reference between repo and transaction.
2003 reporef = weakref.ref(self)
2003 reporef = weakref.ref(self)
2004 def updater(tr):
2004 def updater(tr):
2005 repo = reporef()
2005 repo = reporef()
2006 repo.updatecaches(tr)
2006 repo.updatecaches(tr)
2007 return updater
2007 return updater
2008
2008
2009 @unfilteredmethod
2009 @unfilteredmethod
2010 def updatecaches(self, tr=None, full=False):
2010 def updatecaches(self, tr=None, full=False):
2011 """warm appropriate caches
2011 """warm appropriate caches
2012
2012
2013 If this function is called after a transaction closed. The transaction
2013 If this function is called after a transaction closed. The transaction
2014 will be available in the 'tr' argument. This can be used to selectively
2014 will be available in the 'tr' argument. This can be used to selectively
2015 update caches relevant to the changes in that transaction.
2015 update caches relevant to the changes in that transaction.
2016
2016
2017 If 'full' is set, make sure all caches the function knows about have
2017 If 'full' is set, make sure all caches the function knows about have
2018 up-to-date data. Even the ones usually loaded more lazily.
2018 up-to-date data. Even the ones usually loaded more lazily.
2019 """
2019 """
2020 if tr is not None and tr.hookargs.get('source') == 'strip':
2020 if tr is not None and tr.hookargs.get('source') == 'strip':
2021 # During strip, many caches are invalid but
2021 # During strip, many caches are invalid but
2022 # later call to `destroyed` will refresh them.
2022 # later call to `destroyed` will refresh them.
2023 return
2023 return
2024
2024
2025 if tr is None or tr.changes['origrepolen'] < len(self):
2025 if tr is None or tr.changes['origrepolen'] < len(self):
2026 # updating the unfiltered branchmap should refresh all the others,
2026 # updating the unfiltered branchmap should refresh all the others,
2027 self.ui.debug('updating the branch cache\n')
2027 self.ui.debug('updating the branch cache\n')
2028 branchmap.updatecache(self.filtered('served'))
2028 branchmap.updatecache(self.filtered('served'))
2029
2029
2030 if full:
2030 if full:
2031 rbc = self.revbranchcache()
2031 rbc = self.revbranchcache()
2032 for r in self.changelog:
2032 for r in self.changelog:
2033 rbc.branchinfo(r)
2033 rbc.branchinfo(r)
2034 rbc.write()
2034 rbc.write()
2035
2035
2036 # ensure the working copy parents are in the manifestfulltextcache
2036 # ensure the working copy parents are in the manifestfulltextcache
2037 for ctx in self['.'].parents():
2037 for ctx in self['.'].parents():
2038 ctx.manifest() # accessing the manifest is enough
2038 ctx.manifest() # accessing the manifest is enough
2039
2039
2040 def invalidatecaches(self):
2040 def invalidatecaches(self):
2041
2041
2042 if '_tagscache' in vars(self):
2042 if '_tagscache' in vars(self):
2043 # can't use delattr on proxy
2043 # can't use delattr on proxy
2044 del self.__dict__['_tagscache']
2044 del self.__dict__['_tagscache']
2045
2045
2046 self.unfiltered()._branchcaches.clear()
2046 self.unfiltered()._branchcaches.clear()
2047 self.invalidatevolatilesets()
2047 self.invalidatevolatilesets()
2048 self._sparsesignaturecache.clear()
2048 self._sparsesignaturecache.clear()
2049
2049
2050 def invalidatevolatilesets(self):
2050 def invalidatevolatilesets(self):
2051 self.filteredrevcache.clear()
2051 self.filteredrevcache.clear()
2052 obsolete.clearobscaches(self)
2052 obsolete.clearobscaches(self)
2053
2053
2054 def invalidatedirstate(self):
2054 def invalidatedirstate(self):
2055 '''Invalidates the dirstate, causing the next call to dirstate
2055 '''Invalidates the dirstate, causing the next call to dirstate
2056 to check if it was modified since the last time it was read,
2056 to check if it was modified since the last time it was read,
2057 rereading it if it has.
2057 rereading it if it has.
2058
2058
2059 This is different to dirstate.invalidate() that it doesn't always
2059 This is different to dirstate.invalidate() that it doesn't always
2060 rereads the dirstate. Use dirstate.invalidate() if you want to
2060 rereads the dirstate. Use dirstate.invalidate() if you want to
2061 explicitly read the dirstate again (i.e. restoring it to a previous
2061 explicitly read the dirstate again (i.e. restoring it to a previous
2062 known good state).'''
2062 known good state).'''
2063 if hasunfilteredcache(self, 'dirstate'):
2063 if hasunfilteredcache(self, 'dirstate'):
2064 for k in self.dirstate._filecache:
2064 for k in self.dirstate._filecache:
2065 try:
2065 try:
2066 delattr(self.dirstate, k)
2066 delattr(self.dirstate, k)
2067 except AttributeError:
2067 except AttributeError:
2068 pass
2068 pass
2069 delattr(self.unfiltered(), 'dirstate')
2069 delattr(self.unfiltered(), 'dirstate')
2070
2070
2071 def invalidate(self, clearfilecache=False):
2071 def invalidate(self, clearfilecache=False):
2072 '''Invalidates both store and non-store parts other than dirstate
2072 '''Invalidates both store and non-store parts other than dirstate
2073
2073
2074 If a transaction is running, invalidation of store is omitted,
2074 If a transaction is running, invalidation of store is omitted,
2075 because discarding in-memory changes might cause inconsistency
2075 because discarding in-memory changes might cause inconsistency
2076 (e.g. incomplete fncache causes unintentional failure, but
2076 (e.g. incomplete fncache causes unintentional failure, but
2077 redundant one doesn't).
2077 redundant one doesn't).
2078 '''
2078 '''
2079 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2079 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2080 for k in list(self._filecache.keys()):
2080 for k in list(self._filecache.keys()):
2081 # dirstate is invalidated separately in invalidatedirstate()
2081 # dirstate is invalidated separately in invalidatedirstate()
2082 if k == 'dirstate':
2082 if k == 'dirstate':
2083 continue
2083 continue
2084 if (k == 'changelog' and
2084 if (k == 'changelog' and
2085 self.currenttransaction() and
2085 self.currenttransaction() and
2086 self.changelog._delayed):
2086 self.changelog._delayed):
2087 # The changelog object may store unwritten revisions. We don't
2087 # The changelog object may store unwritten revisions. We don't
2088 # want to lose them.
2088 # want to lose them.
2089 # TODO: Solve the problem instead of working around it.
2089 # TODO: Solve the problem instead of working around it.
2090 continue
2090 continue
2091
2091
2092 if clearfilecache:
2092 if clearfilecache:
2093 del self._filecache[k]
2093 del self._filecache[k]
2094 try:
2094 try:
2095 delattr(unfiltered, k)
2095 delattr(unfiltered, k)
2096 except AttributeError:
2096 except AttributeError:
2097 pass
2097 pass
2098 self.invalidatecaches()
2098 self.invalidatecaches()
2099 if not self.currenttransaction():
2099 if not self.currenttransaction():
2100 # TODO: Changing contents of store outside transaction
2100 # TODO: Changing contents of store outside transaction
2101 # causes inconsistency. We should make in-memory store
2101 # causes inconsistency. We should make in-memory store
2102 # changes detectable, and abort if changed.
2102 # changes detectable, and abort if changed.
2103 self.store.invalidatecaches()
2103 self.store.invalidatecaches()
2104
2104
2105 def invalidateall(self):
2105 def invalidateall(self):
2106 '''Fully invalidates both store and non-store parts, causing the
2106 '''Fully invalidates both store and non-store parts, causing the
2107 subsequent operation to reread any outside changes.'''
2107 subsequent operation to reread any outside changes.'''
2108 # extension should hook this to invalidate its caches
2108 # extension should hook this to invalidate its caches
2109 self.invalidate()
2109 self.invalidate()
2110 self.invalidatedirstate()
2110 self.invalidatedirstate()
2111
2111
2112 @unfilteredmethod
2112 @unfilteredmethod
2113 def _refreshfilecachestats(self, tr):
2113 def _refreshfilecachestats(self, tr):
2114 """Reload stats of cached files so that they are flagged as valid"""
2114 """Reload stats of cached files so that they are flagged as valid"""
2115 for k, ce in self._filecache.items():
2115 for k, ce in self._filecache.items():
2116 k = pycompat.sysstr(k)
2116 k = pycompat.sysstr(k)
2117 if k == r'dirstate' or k not in self.__dict__:
2117 if k == r'dirstate' or k not in self.__dict__:
2118 continue
2118 continue
2119 ce.refresh()
2119 ce.refresh()
2120
2120
2121 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2121 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2122 inheritchecker=None, parentenvvar=None):
2122 inheritchecker=None, parentenvvar=None):
2123 parentlock = None
2123 parentlock = None
2124 # the contents of parentenvvar are used by the underlying lock to
2124 # the contents of parentenvvar are used by the underlying lock to
2125 # determine whether it can be inherited
2125 # determine whether it can be inherited
2126 if parentenvvar is not None:
2126 if parentenvvar is not None:
2127 parentlock = encoding.environ.get(parentenvvar)
2127 parentlock = encoding.environ.get(parentenvvar)
2128
2128
2129 timeout = 0
2129 timeout = 0
2130 warntimeout = 0
2130 warntimeout = 0
2131 if wait:
2131 if wait:
2132 timeout = self.ui.configint("ui", "timeout")
2132 timeout = self.ui.configint("ui", "timeout")
2133 warntimeout = self.ui.configint("ui", "timeout.warn")
2133 warntimeout = self.ui.configint("ui", "timeout.warn")
2134 # internal config: ui.signal-safe-lock
2134 # internal config: ui.signal-safe-lock
2135 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2135 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2136
2136
2137 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2137 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2138 releasefn=releasefn,
2138 releasefn=releasefn,
2139 acquirefn=acquirefn, desc=desc,
2139 acquirefn=acquirefn, desc=desc,
2140 inheritchecker=inheritchecker,
2140 inheritchecker=inheritchecker,
2141 parentlock=parentlock,
2141 parentlock=parentlock,
2142 signalsafe=signalsafe)
2142 signalsafe=signalsafe)
2143 return l
2143 return l
2144
2144
2145 def _afterlock(self, callback):
2145 def _afterlock(self, callback):
2146 """add a callback to be run when the repository is fully unlocked
2146 """add a callback to be run when the repository is fully unlocked
2147
2147
2148 The callback will be executed when the outermost lock is released
2148 The callback will be executed when the outermost lock is released
2149 (with wlock being higher level than 'lock')."""
2149 (with wlock being higher level than 'lock')."""
2150 for ref in (self._wlockref, self._lockref):
2150 for ref in (self._wlockref, self._lockref):
2151 l = ref and ref()
2151 l = ref and ref()
2152 if l and l.held:
2152 if l and l.held:
2153 l.postrelease.append(callback)
2153 l.postrelease.append(callback)
2154 break
2154 break
2155 else: # no lock have been found.
2155 else: # no lock have been found.
2156 callback()
2156 callback()
2157
2157
2158 def lock(self, wait=True):
2158 def lock(self, wait=True):
2159 '''Lock the repository store (.hg/store) and return a weak reference
2159 '''Lock the repository store (.hg/store) and return a weak reference
2160 to the lock. Use this before modifying the store (e.g. committing or
2160 to the lock. Use this before modifying the store (e.g. committing or
2161 stripping). If you are opening a transaction, get a lock as well.)
2161 stripping). If you are opening a transaction, get a lock as well.)
2162
2162
2163 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2163 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2164 'wlock' first to avoid a dead-lock hazard.'''
2164 'wlock' first to avoid a dead-lock hazard.'''
2165 l = self._currentlock(self._lockref)
2165 l = self._currentlock(self._lockref)
2166 if l is not None:
2166 if l is not None:
2167 l.lock()
2167 l.lock()
2168 return l
2168 return l
2169
2169
2170 l = self._lock(self.svfs, "lock", wait, None,
2170 l = self._lock(self.svfs, "lock", wait, None,
2171 self.invalidate, _('repository %s') % self.origroot)
2171 self.invalidate, _('repository %s') % self.origroot)
2172 self._lockref = weakref.ref(l)
2172 self._lockref = weakref.ref(l)
2173 return l
2173 return l
2174
2174
2175 def _wlockchecktransaction(self):
2175 def _wlockchecktransaction(self):
2176 if self.currenttransaction() is not None:
2176 if self.currenttransaction() is not None:
2177 raise error.LockInheritanceContractViolation(
2177 raise error.LockInheritanceContractViolation(
2178 'wlock cannot be inherited in the middle of a transaction')
2178 'wlock cannot be inherited in the middle of a transaction')
2179
2179
2180 def wlock(self, wait=True):
2180 def wlock(self, wait=True):
2181 '''Lock the non-store parts of the repository (everything under
2181 '''Lock the non-store parts of the repository (everything under
2182 .hg except .hg/store) and return a weak reference to the lock.
2182 .hg except .hg/store) and return a weak reference to the lock.
2183
2183
2184 Use this before modifying files in .hg.
2184 Use this before modifying files in .hg.
2185
2185
2186 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2186 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2187 'wlock' first to avoid a dead-lock hazard.'''
2187 'wlock' first to avoid a dead-lock hazard.'''
2188 l = self._wlockref and self._wlockref()
2188 l = self._wlockref and self._wlockref()
2189 if l is not None and l.held:
2189 if l is not None and l.held:
2190 l.lock()
2190 l.lock()
2191 return l
2191 return l
2192
2192
2193 # We do not need to check for non-waiting lock acquisition. Such
2193 # We do not need to check for non-waiting lock acquisition. Such
2194 # acquisition would not cause dead-lock as they would just fail.
2194 # acquisition would not cause dead-lock as they would just fail.
2195 if wait and (self.ui.configbool('devel', 'all-warnings')
2195 if wait and (self.ui.configbool('devel', 'all-warnings')
2196 or self.ui.configbool('devel', 'check-locks')):
2196 or self.ui.configbool('devel', 'check-locks')):
2197 if self._currentlock(self._lockref) is not None:
2197 if self._currentlock(self._lockref) is not None:
2198 self.ui.develwarn('"wlock" acquired after "lock"')
2198 self.ui.develwarn('"wlock" acquired after "lock"')
2199
2199
2200 def unlock():
2200 def unlock():
2201 if self.dirstate.pendingparentchange():
2201 if self.dirstate.pendingparentchange():
2202 self.dirstate.invalidate()
2202 self.dirstate.invalidate()
2203 else:
2203 else:
2204 self.dirstate.write(None)
2204 self.dirstate.write(None)
2205
2205
2206 self._filecache['dirstate'].refresh()
2206 self._filecache['dirstate'].refresh()
2207
2207
2208 l = self._lock(self.vfs, "wlock", wait, unlock,
2208 l = self._lock(self.vfs, "wlock", wait, unlock,
2209 self.invalidatedirstate, _('working directory of %s') %
2209 self.invalidatedirstate, _('working directory of %s') %
2210 self.origroot,
2210 self.origroot,
2211 inheritchecker=self._wlockchecktransaction,
2211 inheritchecker=self._wlockchecktransaction,
2212 parentenvvar='HG_WLOCK_LOCKER')
2212 parentenvvar='HG_WLOCK_LOCKER')
2213 self._wlockref = weakref.ref(l)
2213 self._wlockref = weakref.ref(l)
2214 return l
2214 return l
2215
2215
2216 def _currentlock(self, lockref):
2216 def _currentlock(self, lockref):
2217 """Returns the lock if it's held, or None if it's not."""
2217 """Returns the lock if it's held, or None if it's not."""
2218 if lockref is None:
2218 if lockref is None:
2219 return None
2219 return None
2220 l = lockref()
2220 l = lockref()
2221 if l is None or not l.held:
2221 if l is None or not l.held:
2222 return None
2222 return None
2223 return l
2223 return l
2224
2224
2225 def currentwlock(self):
2225 def currentwlock(self):
2226 """Returns the wlock if it's held, or None if it's not."""
2226 """Returns the wlock if it's held, or None if it's not."""
2227 return self._currentlock(self._wlockref)
2227 return self._currentlock(self._wlockref)
2228
2228
2229 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2229 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2230 """
2230 """
2231 commit an individual file as part of a larger transaction
2231 commit an individual file as part of a larger transaction
2232 """
2232 """
2233
2233
2234 fname = fctx.path()
2234 fname = fctx.path()
2235 fparent1 = manifest1.get(fname, nullid)
2235 fparent1 = manifest1.get(fname, nullid)
2236 fparent2 = manifest2.get(fname, nullid)
2236 fparent2 = manifest2.get(fname, nullid)
2237 if isinstance(fctx, context.filectx):
2237 if isinstance(fctx, context.filectx):
2238 node = fctx.filenode()
2238 node = fctx.filenode()
2239 if node in [fparent1, fparent2]:
2239 if node in [fparent1, fparent2]:
2240 self.ui.debug('reusing %s filelog entry\n' % fname)
2240 self.ui.debug('reusing %s filelog entry\n' % fname)
2241 if manifest1.flags(fname) != fctx.flags():
2241 if manifest1.flags(fname) != fctx.flags():
2242 changelist.append(fname)
2242 changelist.append(fname)
2243 return node
2243 return node
2244
2244
2245 flog = self.file(fname)
2245 flog = self.file(fname)
2246 meta = {}
2246 meta = {}
2247 copy = fctx.renamed()
2247 copy = fctx.renamed()
2248 if copy and copy[0] != fname:
2248 if copy and copy[0] != fname:
2249 # Mark the new revision of this file as a copy of another
2249 # Mark the new revision of this file as a copy of another
2250 # file. This copy data will effectively act as a parent
2250 # file. This copy data will effectively act as a parent
2251 # of this new revision. If this is a merge, the first
2251 # of this new revision. If this is a merge, the first
2252 # parent will be the nullid (meaning "look up the copy data")
2252 # parent will be the nullid (meaning "look up the copy data")
2253 # and the second one will be the other parent. For example:
2253 # and the second one will be the other parent. For example:
2254 #
2254 #
2255 # 0 --- 1 --- 3 rev1 changes file foo
2255 # 0 --- 1 --- 3 rev1 changes file foo
2256 # \ / rev2 renames foo to bar and changes it
2256 # \ / rev2 renames foo to bar and changes it
2257 # \- 2 -/ rev3 should have bar with all changes and
2257 # \- 2 -/ rev3 should have bar with all changes and
2258 # should record that bar descends from
2258 # should record that bar descends from
2259 # bar in rev2 and foo in rev1
2259 # bar in rev2 and foo in rev1
2260 #
2260 #
2261 # this allows this merge to succeed:
2261 # this allows this merge to succeed:
2262 #
2262 #
2263 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2263 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2264 # \ / merging rev3 and rev4 should use bar@rev2
2264 # \ / merging rev3 and rev4 should use bar@rev2
2265 # \- 2 --- 4 as the merge base
2265 # \- 2 --- 4 as the merge base
2266 #
2266 #
2267
2267
2268 cfname = copy[0]
2268 cfname = copy[0]
2269 crev = manifest1.get(cfname)
2269 crev = manifest1.get(cfname)
2270 newfparent = fparent2
2270 newfparent = fparent2
2271
2271
2272 if manifest2: # branch merge
2272 if manifest2: # branch merge
2273 if fparent2 == nullid or crev is None: # copied on remote side
2273 if fparent2 == nullid or crev is None: # copied on remote side
2274 if cfname in manifest2:
2274 if cfname in manifest2:
2275 crev = manifest2[cfname]
2275 crev = manifest2[cfname]
2276 newfparent = fparent1
2276 newfparent = fparent1
2277
2277
2278 # Here, we used to search backwards through history to try to find
2278 # Here, we used to search backwards through history to try to find
2279 # where the file copy came from if the source of a copy was not in
2279 # where the file copy came from if the source of a copy was not in
2280 # the parent directory. However, this doesn't actually make sense to
2280 # the parent directory. However, this doesn't actually make sense to
2281 # do (what does a copy from something not in your working copy even
2281 # do (what does a copy from something not in your working copy even
2282 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2282 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2283 # the user that copy information was dropped, so if they didn't
2283 # the user that copy information was dropped, so if they didn't
2284 # expect this outcome it can be fixed, but this is the correct
2284 # expect this outcome it can be fixed, but this is the correct
2285 # behavior in this circumstance.
2285 # behavior in this circumstance.
2286
2286
2287 if crev:
2287 if crev:
2288 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2288 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2289 meta["copy"] = cfname
2289 meta["copy"] = cfname
2290 meta["copyrev"] = hex(crev)
2290 meta["copyrev"] = hex(crev)
2291 fparent1, fparent2 = nullid, newfparent
2291 fparent1, fparent2 = nullid, newfparent
2292 else:
2292 else:
2293 self.ui.warn(_("warning: can't find ancestor for '%s' "
2293 self.ui.warn(_("warning: can't find ancestor for '%s' "
2294 "copied from '%s'!\n") % (fname, cfname))
2294 "copied from '%s'!\n") % (fname, cfname))
2295
2295
2296 elif fparent1 == nullid:
2296 elif fparent1 == nullid:
2297 fparent1, fparent2 = fparent2, nullid
2297 fparent1, fparent2 = fparent2, nullid
2298 elif fparent2 != nullid:
2298 elif fparent2 != nullid:
2299 # is one parent an ancestor of the other?
2299 # is one parent an ancestor of the other?
2300 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2300 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2301 if fparent1 in fparentancestors:
2301 if fparent1 in fparentancestors:
2302 fparent1, fparent2 = fparent2, nullid
2302 fparent1, fparent2 = fparent2, nullid
2303 elif fparent2 in fparentancestors:
2303 elif fparent2 in fparentancestors:
2304 fparent2 = nullid
2304 fparent2 = nullid
2305
2305
2306 # is the file changed?
2306 # is the file changed?
2307 text = fctx.data()
2307 text = fctx.data()
2308 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2308 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2309 changelist.append(fname)
2309 changelist.append(fname)
2310 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2310 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2311 # are just the flags changed during merge?
2311 # are just the flags changed during merge?
2312 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2312 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2313 changelist.append(fname)
2313 changelist.append(fname)
2314
2314
2315 return fparent1
2315 return fparent1
2316
2316
2317 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2317 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2318 """check for commit arguments that aren't committable"""
2318 """check for commit arguments that aren't committable"""
2319 if match.isexact() or match.prefix():
2319 if match.isexact() or match.prefix():
2320 matched = set(status.modified + status.added + status.removed)
2320 matched = set(status.modified + status.added + status.removed)
2321
2321
2322 for f in match.files():
2322 for f in match.files():
2323 f = self.dirstate.normalize(f)
2323 f = self.dirstate.normalize(f)
2324 if f == '.' or f in matched or f in wctx.substate:
2324 if f == '.' or f in matched or f in wctx.substate:
2325 continue
2325 continue
2326 if f in status.deleted:
2326 if f in status.deleted:
2327 fail(f, _('file not found!'))
2327 fail(f, _('file not found!'))
2328 if f in vdirs: # visited directory
2328 if f in vdirs: # visited directory
2329 d = f + '/'
2329 d = f + '/'
2330 for mf in matched:
2330 for mf in matched:
2331 if mf.startswith(d):
2331 if mf.startswith(d):
2332 break
2332 break
2333 else:
2333 else:
2334 fail(f, _("no match under directory!"))
2334 fail(f, _("no match under directory!"))
2335 elif f not in self.dirstate:
2335 elif f not in self.dirstate:
2336 fail(f, _("file not tracked!"))
2336 fail(f, _("file not tracked!"))
2337
2337
2338 @unfilteredmethod
2338 @unfilteredmethod
2339 def commit(self, text="", user=None, date=None, match=None, force=False,
2339 def commit(self, text="", user=None, date=None, match=None, force=False,
2340 editor=False, extra=None):
2340 editor=False, extra=None):
2341 """Add a new revision to current repository.
2341 """Add a new revision to current repository.
2342
2342
2343 Revision information is gathered from the working directory,
2343 Revision information is gathered from the working directory,
2344 match can be used to filter the committed files. If editor is
2344 match can be used to filter the committed files. If editor is
2345 supplied, it is called to get a commit message.
2345 supplied, it is called to get a commit message.
2346 """
2346 """
2347 if extra is None:
2347 if extra is None:
2348 extra = {}
2348 extra = {}
2349
2349
2350 def fail(f, msg):
2350 def fail(f, msg):
2351 raise error.Abort('%s: %s' % (f, msg))
2351 raise error.Abort('%s: %s' % (f, msg))
2352
2352
2353 if not match:
2353 if not match:
2354 match = matchmod.always(self.root, '')
2354 match = matchmod.always(self.root, '')
2355
2355
2356 if not force:
2356 if not force:
2357 vdirs = []
2357 vdirs = []
2358 match.explicitdir = vdirs.append
2358 match.explicitdir = vdirs.append
2359 match.bad = fail
2359 match.bad = fail
2360
2360
2361 wlock = lock = tr = None
2361 wlock = lock = tr = None
2362 try:
2362 try:
2363 wlock = self.wlock()
2363 wlock = self.wlock()
2364 lock = self.lock() # for recent changelog (see issue4368)
2364 lock = self.lock() # for recent changelog (see issue4368)
2365
2365
2366 wctx = self[None]
2366 wctx = self[None]
2367 merge = len(wctx.parents()) > 1
2367 merge = len(wctx.parents()) > 1
2368
2368
2369 if not force and merge and not match.always():
2369 if not force and merge and not match.always():
2370 raise error.Abort(_('cannot partially commit a merge '
2370 raise error.Abort(_('cannot partially commit a merge '
2371 '(do not specify files or patterns)'))
2371 '(do not specify files or patterns)'))
2372
2372
2373 status = self.status(match=match, clean=force)
2373 status = self.status(match=match, clean=force)
2374 if force:
2374 if force:
2375 status.modified.extend(status.clean) # mq may commit clean files
2375 status.modified.extend(status.clean) # mq may commit clean files
2376
2376
2377 # check subrepos
2377 # check subrepos
2378 subs, commitsubs, newstate = subrepoutil.precommit(
2378 subs, commitsubs, newstate = subrepoutil.precommit(
2379 self.ui, wctx, status, match, force=force)
2379 self.ui, wctx, status, match, force=force)
2380
2380
2381 # make sure all explicit patterns are matched
2381 # make sure all explicit patterns are matched
2382 if not force:
2382 if not force:
2383 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2383 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2384
2384
2385 cctx = context.workingcommitctx(self, status,
2385 cctx = context.workingcommitctx(self, status,
2386 text, user, date, extra)
2386 text, user, date, extra)
2387
2387
2388 # internal config: ui.allowemptycommit
2388 # internal config: ui.allowemptycommit
2389 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2389 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2390 or extra.get('close') or merge or cctx.files()
2390 or extra.get('close') or merge or cctx.files()
2391 or self.ui.configbool('ui', 'allowemptycommit'))
2391 or self.ui.configbool('ui', 'allowemptycommit'))
2392 if not allowemptycommit:
2392 if not allowemptycommit:
2393 return None
2393 return None
2394
2394
2395 if merge and cctx.deleted():
2395 if merge and cctx.deleted():
2396 raise error.Abort(_("cannot commit merge with missing files"))
2396 raise error.Abort(_("cannot commit merge with missing files"))
2397
2397
2398 ms = mergemod.mergestate.read(self)
2398 ms = mergemod.mergestate.read(self)
2399 mergeutil.checkunresolved(ms)
2399 mergeutil.checkunresolved(ms)
2400
2400
2401 if editor:
2401 if editor:
2402 cctx._text = editor(self, cctx, subs)
2402 cctx._text = editor(self, cctx, subs)
2403 edited = (text != cctx._text)
2403 edited = (text != cctx._text)
2404
2404
2405 # Save commit message in case this transaction gets rolled back
2405 # Save commit message in case this transaction gets rolled back
2406 # (e.g. by a pretxncommit hook). Leave the content alone on
2406 # (e.g. by a pretxncommit hook). Leave the content alone on
2407 # the assumption that the user will use the same editor again.
2407 # the assumption that the user will use the same editor again.
2408 msgfn = self.savecommitmessage(cctx._text)
2408 msgfn = self.savecommitmessage(cctx._text)
2409
2409
2410 # commit subs and write new state
2410 # commit subs and write new state
2411 if subs:
2411 if subs:
2412 for s in sorted(commitsubs):
2412 for s in sorted(commitsubs):
2413 sub = wctx.sub(s)
2413 sub = wctx.sub(s)
2414 self.ui.status(_('committing subrepository %s\n') %
2414 self.ui.status(_('committing subrepository %s\n') %
2415 subrepoutil.subrelpath(sub))
2415 subrepoutil.subrelpath(sub))
2416 sr = sub.commit(cctx._text, user, date)
2416 sr = sub.commit(cctx._text, user, date)
2417 newstate[s] = (newstate[s][0], sr)
2417 newstate[s] = (newstate[s][0], sr)
2418 subrepoutil.writestate(self, newstate)
2418 subrepoutil.writestate(self, newstate)
2419
2419
2420 p1, p2 = self.dirstate.parents()
2420 p1, p2 = self.dirstate.parents()
2421 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2421 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2422 try:
2422 try:
2423 self.hook("precommit", throw=True, parent1=hookp1,
2423 self.hook("precommit", throw=True, parent1=hookp1,
2424 parent2=hookp2)
2424 parent2=hookp2)
2425 tr = self.transaction('commit')
2425 tr = self.transaction('commit')
2426 ret = self.commitctx(cctx, True)
2426 ret = self.commitctx(cctx, True)
2427 except: # re-raises
2427 except: # re-raises
2428 if edited:
2428 if edited:
2429 self.ui.write(
2429 self.ui.write(
2430 _('note: commit message saved in %s\n') % msgfn)
2430 _('note: commit message saved in %s\n') % msgfn)
2431 raise
2431 raise
2432 # update bookmarks, dirstate and mergestate
2432 # update bookmarks, dirstate and mergestate
2433 bookmarks.update(self, [p1, p2], ret)
2433 bookmarks.update(self, [p1, p2], ret)
2434 cctx.markcommitted(ret)
2434 cctx.markcommitted(ret)
2435 ms.reset()
2435 ms.reset()
2436 tr.close()
2436 tr.close()
2437
2437
2438 finally:
2438 finally:
2439 lockmod.release(tr, lock, wlock)
2439 lockmod.release(tr, lock, wlock)
2440
2440
2441 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2441 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2442 # hack for command that use a temporary commit (eg: histedit)
2442 # hack for command that use a temporary commit (eg: histedit)
2443 # temporary commit got stripped before hook release
2443 # temporary commit got stripped before hook release
2444 if self.changelog.hasnode(ret):
2444 if self.changelog.hasnode(ret):
2445 self.hook("commit", node=node, parent1=parent1,
2445 self.hook("commit", node=node, parent1=parent1,
2446 parent2=parent2)
2446 parent2=parent2)
2447 self._afterlock(commithook)
2447 self._afterlock(commithook)
2448 return ret
2448 return ret
2449
2449
2450 @unfilteredmethod
2450 @unfilteredmethod
2451 def commitctx(self, ctx, error=False):
2451 def commitctx(self, ctx, error=False):
2452 """Add a new revision to current repository.
2452 """Add a new revision to current repository.
2453 Revision information is passed via the context argument.
2453 Revision information is passed via the context argument.
2454
2454
2455 ctx.files() should list all files involved in this commit, i.e.
2455 ctx.files() should list all files involved in this commit, i.e.
2456 modified/added/removed files. On merge, it may be wider than the
2456 modified/added/removed files. On merge, it may be wider than the
2457 ctx.files() to be committed, since any file nodes derived directly
2457 ctx.files() to be committed, since any file nodes derived directly
2458 from p1 or p2 are excluded from the committed ctx.files().
2458 from p1 or p2 are excluded from the committed ctx.files().
2459 """
2459 """
2460
2460
2461 tr = None
2461 tr = None
2462 p1, p2 = ctx.p1(), ctx.p2()
2462 p1, p2 = ctx.p1(), ctx.p2()
2463 user = ctx.user()
2463 user = ctx.user()
2464
2464
2465 lock = self.lock()
2465 lock = self.lock()
2466 try:
2466 try:
2467 tr = self.transaction("commit")
2467 tr = self.transaction("commit")
2468 trp = weakref.proxy(tr)
2468 trp = weakref.proxy(tr)
2469
2469
2470 if ctx.manifestnode():
2470 if ctx.manifestnode():
2471 # reuse an existing manifest revision
2471 # reuse an existing manifest revision
2472 self.ui.debug('reusing known manifest\n')
2472 self.ui.debug('reusing known manifest\n')
2473 mn = ctx.manifestnode()
2473 mn = ctx.manifestnode()
2474 files = ctx.files()
2474 files = ctx.files()
2475 elif ctx.files():
2475 elif ctx.files():
2476 m1ctx = p1.manifestctx()
2476 m1ctx = p1.manifestctx()
2477 m2ctx = p2.manifestctx()
2477 m2ctx = p2.manifestctx()
2478 mctx = m1ctx.copy()
2478 mctx = m1ctx.copy()
2479
2479
2480 m = mctx.read()
2480 m = mctx.read()
2481 m1 = m1ctx.read()
2481 m1 = m1ctx.read()
2482 m2 = m2ctx.read()
2482 m2 = m2ctx.read()
2483
2483
2484 # check in files
2484 # check in files
2485 added = []
2485 added = []
2486 changed = []
2486 changed = []
2487 removed = list(ctx.removed())
2487 removed = list(ctx.removed())
2488 linkrev = len(self)
2488 linkrev = len(self)
2489 self.ui.note(_("committing files:\n"))
2489 self.ui.note(_("committing files:\n"))
2490 for f in sorted(ctx.modified() + ctx.added()):
2490 for f in sorted(ctx.modified() + ctx.added()):
2491 self.ui.note(f + "\n")
2491 self.ui.note(f + "\n")
2492 try:
2492 try:
2493 fctx = ctx[f]
2493 fctx = ctx[f]
2494 if fctx is None:
2494 if fctx is None:
2495 removed.append(f)
2495 removed.append(f)
2496 else:
2496 else:
2497 added.append(f)
2497 added.append(f)
2498 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2498 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2499 trp, changed)
2499 trp, changed)
2500 m.setflag(f, fctx.flags())
2500 m.setflag(f, fctx.flags())
2501 except OSError as inst:
2501 except OSError as inst:
2502 self.ui.warn(_("trouble committing %s!\n") % f)
2502 self.ui.warn(_("trouble committing %s!\n") % f)
2503 raise
2503 raise
2504 except IOError as inst:
2504 except IOError as inst:
2505 errcode = getattr(inst, 'errno', errno.ENOENT)
2505 errcode = getattr(inst, 'errno', errno.ENOENT)
2506 if error or errcode and errcode != errno.ENOENT:
2506 if error or errcode and errcode != errno.ENOENT:
2507 self.ui.warn(_("trouble committing %s!\n") % f)
2507 self.ui.warn(_("trouble committing %s!\n") % f)
2508 raise
2508 raise
2509
2509
2510 # update manifest
2510 # update manifest
2511 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2511 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2512 drop = [f for f in removed if f in m]
2512 drop = [f for f in removed if f in m]
2513 for f in drop:
2513 for f in drop:
2514 del m[f]
2514 del m[f]
2515 files = changed + removed
2515 files = changed + removed
2516 md = None
2516 md = None
2517 if not files:
2517 if not files:
2518 # if no "files" actually changed in terms of the changelog,
2518 # if no "files" actually changed in terms of the changelog,
2519 # try hard to detect unmodified manifest entry so that the
2519 # try hard to detect unmodified manifest entry so that the
2520 # exact same commit can be reproduced later on convert.
2520 # exact same commit can be reproduced later on convert.
2521 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2521 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2522 if not files and md:
2522 if not files and md:
2523 self.ui.debug('not reusing manifest (no file change in '
2523 self.ui.debug('not reusing manifest (no file change in '
2524 'changelog, but manifest differs)\n')
2524 'changelog, but manifest differs)\n')
2525 if files or md:
2525 if files or md:
2526 self.ui.note(_("committing manifest\n"))
2526 self.ui.note(_("committing manifest\n"))
2527 # we're using narrowmatch here since it's already applied at
2527 # we're using narrowmatch here since it's already applied at
2528 # other stages (such as dirstate.walk), so we're already
2528 # other stages (such as dirstate.walk), so we're already
2529 # ignoring things outside of narrowspec in most cases. The
2529 # ignoring things outside of narrowspec in most cases. The
2530 # one case where we might have files outside the narrowspec
2530 # one case where we might have files outside the narrowspec
2531 # at this point is merges, and we already error out in the
2531 # at this point is merges, and we already error out in the
2532 # case where the merge has files outside of the narrowspec,
2532 # case where the merge has files outside of the narrowspec,
2533 # so this is safe.
2533 # so this is safe.
2534 mn = mctx.write(trp, linkrev,
2534 mn = mctx.write(trp, linkrev,
2535 p1.manifestnode(), p2.manifestnode(),
2535 p1.manifestnode(), p2.manifestnode(),
2536 added, drop, match=self.narrowmatch())
2536 added, drop, match=self.narrowmatch())
2537 else:
2537 else:
2538 self.ui.debug('reusing manifest form p1 (listed files '
2538 self.ui.debug('reusing manifest form p1 (listed files '
2539 'actually unchanged)\n')
2539 'actually unchanged)\n')
2540 mn = p1.manifestnode()
2540 mn = p1.manifestnode()
2541 else:
2541 else:
2542 self.ui.debug('reusing manifest from p1 (no file change)\n')
2542 self.ui.debug('reusing manifest from p1 (no file change)\n')
2543 mn = p1.manifestnode()
2543 mn = p1.manifestnode()
2544 files = []
2544 files = []
2545
2545
2546 # update changelog
2546 # update changelog
2547 self.ui.note(_("committing changelog\n"))
2547 self.ui.note(_("committing changelog\n"))
2548 self.changelog.delayupdate(tr)
2548 self.changelog.delayupdate(tr)
2549 n = self.changelog.add(mn, files, ctx.description(),
2549 n = self.changelog.add(mn, files, ctx.description(),
2550 trp, p1.node(), p2.node(),
2550 trp, p1.node(), p2.node(),
2551 user, ctx.date(), ctx.extra().copy())
2551 user, ctx.date(), ctx.extra().copy())
2552 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2552 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2553 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2553 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2554 parent2=xp2)
2554 parent2=xp2)
2555 # set the new commit is proper phase
2555 # set the new commit is proper phase
2556 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2556 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2557 if targetphase:
2557 if targetphase:
2558 # retract boundary do not alter parent changeset.
2558 # retract boundary do not alter parent changeset.
2559 # if a parent have higher the resulting phase will
2559 # if a parent have higher the resulting phase will
2560 # be compliant anyway
2560 # be compliant anyway
2561 #
2561 #
2562 # if minimal phase was 0 we don't need to retract anything
2562 # if minimal phase was 0 we don't need to retract anything
2563 phases.registernew(self, tr, targetphase, [n])
2563 phases.registernew(self, tr, targetphase, [n])
2564 tr.close()
2564 tr.close()
2565 return n
2565 return n
2566 finally:
2566 finally:
2567 if tr:
2567 if tr:
2568 tr.release()
2568 tr.release()
2569 lock.release()
2569 lock.release()
2570
2570
2571 @unfilteredmethod
2571 @unfilteredmethod
2572 def destroying(self):
2572 def destroying(self):
2573 '''Inform the repository that nodes are about to be destroyed.
2573 '''Inform the repository that nodes are about to be destroyed.
2574 Intended for use by strip and rollback, so there's a common
2574 Intended for use by strip and rollback, so there's a common
2575 place for anything that has to be done before destroying history.
2575 place for anything that has to be done before destroying history.
2576
2576
2577 This is mostly useful for saving state that is in memory and waiting
2577 This is mostly useful for saving state that is in memory and waiting
2578 to be flushed when the current lock is released. Because a call to
2578 to be flushed when the current lock is released. Because a call to
2579 destroyed is imminent, the repo will be invalidated causing those
2579 destroyed is imminent, the repo will be invalidated causing those
2580 changes to stay in memory (waiting for the next unlock), or vanish
2580 changes to stay in memory (waiting for the next unlock), or vanish
2581 completely.
2581 completely.
2582 '''
2582 '''
2583 # When using the same lock to commit and strip, the phasecache is left
2583 # When using the same lock to commit and strip, the phasecache is left
2584 # dirty after committing. Then when we strip, the repo is invalidated,
2584 # dirty after committing. Then when we strip, the repo is invalidated,
2585 # causing those changes to disappear.
2585 # causing those changes to disappear.
2586 if '_phasecache' in vars(self):
2586 if '_phasecache' in vars(self):
2587 self._phasecache.write()
2587 self._phasecache.write()
2588
2588
2589 @unfilteredmethod
2589 @unfilteredmethod
2590 def destroyed(self):
2590 def destroyed(self):
2591 '''Inform the repository that nodes have been destroyed.
2591 '''Inform the repository that nodes have been destroyed.
2592 Intended for use by strip and rollback, so there's a common
2592 Intended for use by strip and rollback, so there's a common
2593 place for anything that has to be done after destroying history.
2593 place for anything that has to be done after destroying history.
2594 '''
2594 '''
2595 # When one tries to:
2595 # When one tries to:
2596 # 1) destroy nodes thus calling this method (e.g. strip)
2596 # 1) destroy nodes thus calling this method (e.g. strip)
2597 # 2) use phasecache somewhere (e.g. commit)
2597 # 2) use phasecache somewhere (e.g. commit)
2598 #
2598 #
2599 # then 2) will fail because the phasecache contains nodes that were
2599 # then 2) will fail because the phasecache contains nodes that were
2600 # removed. We can either remove phasecache from the filecache,
2600 # removed. We can either remove phasecache from the filecache,
2601 # causing it to reload next time it is accessed, or simply filter
2601 # causing it to reload next time it is accessed, or simply filter
2602 # the removed nodes now and write the updated cache.
2602 # the removed nodes now and write the updated cache.
2603 self._phasecache.filterunknown(self)
2603 self._phasecache.filterunknown(self)
2604 self._phasecache.write()
2604 self._phasecache.write()
2605
2605
2606 # refresh all repository caches
2606 # refresh all repository caches
2607 self.updatecaches()
2607 self.updatecaches()
2608
2608
2609 # Ensure the persistent tag cache is updated. Doing it now
2609 # Ensure the persistent tag cache is updated. Doing it now
2610 # means that the tag cache only has to worry about destroyed
2610 # means that the tag cache only has to worry about destroyed
2611 # heads immediately after a strip/rollback. That in turn
2611 # heads immediately after a strip/rollback. That in turn
2612 # guarantees that "cachetip == currenttip" (comparing both rev
2612 # guarantees that "cachetip == currenttip" (comparing both rev
2613 # and node) always means no nodes have been added or destroyed.
2613 # and node) always means no nodes have been added or destroyed.
2614
2614
2615 # XXX this is suboptimal when qrefresh'ing: we strip the current
2615 # XXX this is suboptimal when qrefresh'ing: we strip the current
2616 # head, refresh the tag cache, then immediately add a new head.
2616 # head, refresh the tag cache, then immediately add a new head.
2617 # But I think doing it this way is necessary for the "instant
2617 # But I think doing it this way is necessary for the "instant
2618 # tag cache retrieval" case to work.
2618 # tag cache retrieval" case to work.
2619 self.invalidate()
2619 self.invalidate()
2620
2620
2621 def status(self, node1='.', node2=None, match=None,
2621 def status(self, node1='.', node2=None, match=None,
2622 ignored=False, clean=False, unknown=False,
2622 ignored=False, clean=False, unknown=False,
2623 listsubrepos=False):
2623 listsubrepos=False):
2624 '''a convenience method that calls node1.status(node2)'''
2624 '''a convenience method that calls node1.status(node2)'''
2625 return self[node1].status(node2, match, ignored, clean, unknown,
2625 return self[node1].status(node2, match, ignored, clean, unknown,
2626 listsubrepos)
2626 listsubrepos)
2627
2627
2628 def addpostdsstatus(self, ps):
2628 def addpostdsstatus(self, ps):
2629 """Add a callback to run within the wlock, at the point at which status
2629 """Add a callback to run within the wlock, at the point at which status
2630 fixups happen.
2630 fixups happen.
2631
2631
2632 On status completion, callback(wctx, status) will be called with the
2632 On status completion, callback(wctx, status) will be called with the
2633 wlock held, unless the dirstate has changed from underneath or the wlock
2633 wlock held, unless the dirstate has changed from underneath or the wlock
2634 couldn't be grabbed.
2634 couldn't be grabbed.
2635
2635
2636 Callbacks should not capture and use a cached copy of the dirstate --
2636 Callbacks should not capture and use a cached copy of the dirstate --
2637 it might change in the meanwhile. Instead, they should access the
2637 it might change in the meanwhile. Instead, they should access the
2638 dirstate via wctx.repo().dirstate.
2638 dirstate via wctx.repo().dirstate.
2639
2639
2640 This list is emptied out after each status run -- extensions should
2640 This list is emptied out after each status run -- extensions should
2641 make sure it adds to this list each time dirstate.status is called.
2641 make sure it adds to this list each time dirstate.status is called.
2642 Extensions should also make sure they don't call this for statuses
2642 Extensions should also make sure they don't call this for statuses
2643 that don't involve the dirstate.
2643 that don't involve the dirstate.
2644 """
2644 """
2645
2645
2646 # The list is located here for uniqueness reasons -- it is actually
2646 # The list is located here for uniqueness reasons -- it is actually
2647 # managed by the workingctx, but that isn't unique per-repo.
2647 # managed by the workingctx, but that isn't unique per-repo.
2648 self._postdsstatus.append(ps)
2648 self._postdsstatus.append(ps)
2649
2649
2650 def postdsstatus(self):
2650 def postdsstatus(self):
2651 """Used by workingctx to get the list of post-dirstate-status hooks."""
2651 """Used by workingctx to get the list of post-dirstate-status hooks."""
2652 return self._postdsstatus
2652 return self._postdsstatus
2653
2653
2654 def clearpostdsstatus(self):
2654 def clearpostdsstatus(self):
2655 """Used by workingctx to clear post-dirstate-status hooks."""
2655 """Used by workingctx to clear post-dirstate-status hooks."""
2656 del self._postdsstatus[:]
2656 del self._postdsstatus[:]
2657
2657
2658 def heads(self, start=None):
2658 def heads(self, start=None):
2659 if start is None:
2659 if start is None:
2660 cl = self.changelog
2660 cl = self.changelog
2661 headrevs = reversed(cl.headrevs())
2661 headrevs = reversed(cl.headrevs())
2662 return [cl.node(rev) for rev in headrevs]
2662 return [cl.node(rev) for rev in headrevs]
2663
2663
2664 heads = self.changelog.heads(start)
2664 heads = self.changelog.heads(start)
2665 # sort the output in rev descending order
2665 # sort the output in rev descending order
2666 return sorted(heads, key=self.changelog.rev, reverse=True)
2666 return sorted(heads, key=self.changelog.rev, reverse=True)
2667
2667
2668 def branchheads(self, branch=None, start=None, closed=False):
2668 def branchheads(self, branch=None, start=None, closed=False):
2669 '''return a (possibly filtered) list of heads for the given branch
2669 '''return a (possibly filtered) list of heads for the given branch
2670
2670
2671 Heads are returned in topological order, from newest to oldest.
2671 Heads are returned in topological order, from newest to oldest.
2672 If branch is None, use the dirstate branch.
2672 If branch is None, use the dirstate branch.
2673 If start is not None, return only heads reachable from start.
2673 If start is not None, return only heads reachable from start.
2674 If closed is True, return heads that are marked as closed as well.
2674 If closed is True, return heads that are marked as closed as well.
2675 '''
2675 '''
2676 if branch is None:
2676 if branch is None:
2677 branch = self[None].branch()
2677 branch = self[None].branch()
2678 branches = self.branchmap()
2678 branches = self.branchmap()
2679 if branch not in branches:
2679 if branch not in branches:
2680 return []
2680 return []
2681 # the cache returns heads ordered lowest to highest
2681 # the cache returns heads ordered lowest to highest
2682 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2682 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2683 if start is not None:
2683 if start is not None:
2684 # filter out the heads that cannot be reached from startrev
2684 # filter out the heads that cannot be reached from startrev
2685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2686 bheads = [h for h in bheads if h in fbheads]
2686 bheads = [h for h in bheads if h in fbheads]
2687 return bheads
2687 return bheads
2688
2688
2689 def branches(self, nodes):
2689 def branches(self, nodes):
2690 if not nodes:
2690 if not nodes:
2691 nodes = [self.changelog.tip()]
2691 nodes = [self.changelog.tip()]
2692 b = []
2692 b = []
2693 for n in nodes:
2693 for n in nodes:
2694 t = n
2694 t = n
2695 while True:
2695 while True:
2696 p = self.changelog.parents(n)
2696 p = self.changelog.parents(n)
2697 if p[1] != nullid or p[0] == nullid:
2697 if p[1] != nullid or p[0] == nullid:
2698 b.append((t, n, p[0], p[1]))
2698 b.append((t, n, p[0], p[1]))
2699 break
2699 break
2700 n = p[0]
2700 n = p[0]
2701 return b
2701 return b
2702
2702
2703 def between(self, pairs):
2703 def between(self, pairs):
2704 r = []
2704 r = []
2705
2705
2706 for top, bottom in pairs:
2706 for top, bottom in pairs:
2707 n, l, i = top, [], 0
2707 n, l, i = top, [], 0
2708 f = 1
2708 f = 1
2709
2709
2710 while n != bottom and n != nullid:
2710 while n != bottom and n != nullid:
2711 p = self.changelog.parents(n)[0]
2711 p = self.changelog.parents(n)[0]
2712 if i == f:
2712 if i == f:
2713 l.append(n)
2713 l.append(n)
2714 f = f * 2
2714 f = f * 2
2715 n = p
2715 n = p
2716 i += 1
2716 i += 1
2717
2717
2718 r.append(l)
2718 r.append(l)
2719
2719
2720 return r
2720 return r
2721
2721
2722 def checkpush(self, pushop):
2722 def checkpush(self, pushop):
2723 """Extensions can override this function if additional checks have
2723 """Extensions can override this function if additional checks have
2724 to be performed before pushing, or call it if they override push
2724 to be performed before pushing, or call it if they override push
2725 command.
2725 command.
2726 """
2726 """
2727
2727
2728 @unfilteredpropertycache
2728 @unfilteredpropertycache
2729 def prepushoutgoinghooks(self):
2729 def prepushoutgoinghooks(self):
2730 """Return util.hooks consists of a pushop with repo, remote, outgoing
2730 """Return util.hooks consists of a pushop with repo, remote, outgoing
2731 methods, which are called before pushing changesets.
2731 methods, which are called before pushing changesets.
2732 """
2732 """
2733 return util.hooks()
2733 return util.hooks()
2734
2734
2735 def pushkey(self, namespace, key, old, new):
2735 def pushkey(self, namespace, key, old, new):
2736 try:
2736 try:
2737 tr = self.currenttransaction()
2737 tr = self.currenttransaction()
2738 hookargs = {}
2738 hookargs = {}
2739 if tr is not None:
2739 if tr is not None:
2740 hookargs.update(tr.hookargs)
2740 hookargs.update(tr.hookargs)
2741 hookargs = pycompat.strkwargs(hookargs)
2741 hookargs = pycompat.strkwargs(hookargs)
2742 hookargs[r'namespace'] = namespace
2742 hookargs[r'namespace'] = namespace
2743 hookargs[r'key'] = key
2743 hookargs[r'key'] = key
2744 hookargs[r'old'] = old
2744 hookargs[r'old'] = old
2745 hookargs[r'new'] = new
2745 hookargs[r'new'] = new
2746 self.hook('prepushkey', throw=True, **hookargs)
2746 self.hook('prepushkey', throw=True, **hookargs)
2747 except error.HookAbort as exc:
2747 except error.HookAbort as exc:
2748 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2748 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2749 if exc.hint:
2749 if exc.hint:
2750 self.ui.write_err(_("(%s)\n") % exc.hint)
2750 self.ui.write_err(_("(%s)\n") % exc.hint)
2751 return False
2751 return False
2752 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2752 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2753 ret = pushkey.push(self, namespace, key, old, new)
2753 ret = pushkey.push(self, namespace, key, old, new)
2754 def runhook():
2754 def runhook():
2755 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2755 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2756 ret=ret)
2756 ret=ret)
2757 self._afterlock(runhook)
2757 self._afterlock(runhook)
2758 return ret
2758 return ret
2759
2759
2760 def listkeys(self, namespace):
2760 def listkeys(self, namespace):
2761 self.hook('prelistkeys', throw=True, namespace=namespace)
2761 self.hook('prelistkeys', throw=True, namespace=namespace)
2762 self.ui.debug('listing keys for "%s"\n' % namespace)
2762 self.ui.debug('listing keys for "%s"\n' % namespace)
2763 values = pushkey.list(self, namespace)
2763 values = pushkey.list(self, namespace)
2764 self.hook('listkeys', namespace=namespace, values=values)
2764 self.hook('listkeys', namespace=namespace, values=values)
2765 return values
2765 return values
2766
2766
2767 def debugwireargs(self, one, two, three=None, four=None, five=None):
2767 def debugwireargs(self, one, two, three=None, four=None, five=None):
2768 '''used to test argument passing over the wire'''
2768 '''used to test argument passing over the wire'''
2769 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2769 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2770 pycompat.bytestr(four),
2770 pycompat.bytestr(four),
2771 pycompat.bytestr(five))
2771 pycompat.bytestr(five))
2772
2772
2773 def savecommitmessage(self, text):
2773 def savecommitmessage(self, text):
2774 fp = self.vfs('last-message.txt', 'wb')
2774 fp = self.vfs('last-message.txt', 'wb')
2775 try:
2775 try:
2776 fp.write(text)
2776 fp.write(text)
2777 finally:
2777 finally:
2778 fp.close()
2778 fp.close()
2779 return self.pathto(fp.name[len(self.root) + 1:])
2779 return self.pathto(fp.name[len(self.root) + 1:])
2780
2780
2781 # used to avoid circular references so destructors work
2781 # used to avoid circular references so destructors work
2782 def aftertrans(files):
2782 def aftertrans(files):
2783 renamefiles = [tuple(t) for t in files]
2783 renamefiles = [tuple(t) for t in files]
2784 def a():
2784 def a():
2785 for vfs, src, dest in renamefiles:
2785 for vfs, src, dest in renamefiles:
2786 # if src and dest refer to a same file, vfs.rename is a no-op,
2786 # if src and dest refer to a same file, vfs.rename is a no-op,
2787 # leaving both src and dest on disk. delete dest to make sure
2787 # leaving both src and dest on disk. delete dest to make sure
2788 # the rename couldn't be such a no-op.
2788 # the rename couldn't be such a no-op.
2789 vfs.tryunlink(dest)
2789 vfs.tryunlink(dest)
2790 try:
2790 try:
2791 vfs.rename(src, dest)
2791 vfs.rename(src, dest)
2792 except OSError: # journal file does not yet exist
2792 except OSError: # journal file does not yet exist
2793 pass
2793 pass
2794 return a
2794 return a
2795
2795
2796 def undoname(fn):
2796 def undoname(fn):
2797 base, name = os.path.split(fn)
2797 base, name = os.path.split(fn)
2798 assert name.startswith('journal')
2798 assert name.startswith('journal')
2799 return os.path.join(base, name.replace('journal', 'undo', 1))
2799 return os.path.join(base, name.replace('journal', 'undo', 1))
2800
2800
2801 def instance(ui, path, create, intents=None, createopts=None):
2801 def instance(ui, path, create, intents=None, createopts=None):
2802 localpath = util.urllocalpath(path)
2802 localpath = util.urllocalpath(path)
2803 if create:
2803 if create:
2804 createrepository(ui, localpath, createopts=createopts)
2804 createrepository(ui, localpath, createopts=createopts)
2805
2805
2806 return makelocalrepository(ui, localpath, intents=intents)
2806 return makelocalrepository(ui, localpath, intents=intents)
2807
2807
2808 def islocal(path):
2808 def islocal(path):
2809 return True
2809 return True
2810
2810
2811 def newreporequirements(ui, createopts=None):
2811 def defaultcreateopts(ui, createopts=None):
2812 """Populate the default creation options for a repository.
2813
2814 A dictionary of explicitly requested creation options can be passed
2815 in. Missing keys will be populated.
2816 """
2817 createopts = dict(createopts or {})
2818
2819 if 'backend' not in createopts:
2820 # experimental config: storage.new-repo-backend
2821 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2822
2823 return createopts
2824
2825 def newreporequirements(ui, createopts):
2812 """Determine the set of requirements for a new local repository.
2826 """Determine the set of requirements for a new local repository.
2813
2827
2814 Extensions can wrap this function to specify custom requirements for
2828 Extensions can wrap this function to specify custom requirements for
2815 new repositories.
2829 new repositories.
2816 """
2830 """
2817 createopts = createopts or {}
2818
2819 # If the repo is being created from a shared repository, we copy
2831 # If the repo is being created from a shared repository, we copy
2820 # its requirements.
2832 # its requirements.
2821 if 'sharedrepo' in createopts:
2833 if 'sharedrepo' in createopts:
2822 requirements = set(createopts['sharedrepo'].requirements)
2834 requirements = set(createopts['sharedrepo'].requirements)
2823 if createopts.get('sharedrelative'):
2835 if createopts.get('sharedrelative'):
2824 requirements.add('relshared')
2836 requirements.add('relshared')
2825 else:
2837 else:
2826 requirements.add('shared')
2838 requirements.add('shared')
2827
2839
2828 return requirements
2840 return requirements
2829
2841
2842 if 'backend' not in createopts:
2843 raise error.ProgrammingError('backend key not present in createopts; '
2844 'was defaultcreateopts() called?')
2845
2846 if createopts['backend'] != 'revlogv1':
2847 raise error.Abort(_('unable to determine repository requirements for '
2848 'storage backend: %s') % createopts['backend'])
2849
2830 requirements = {'revlogv1'}
2850 requirements = {'revlogv1'}
2831 if ui.configbool('format', 'usestore'):
2851 if ui.configbool('format', 'usestore'):
2832 requirements.add('store')
2852 requirements.add('store')
2833 if ui.configbool('format', 'usefncache'):
2853 if ui.configbool('format', 'usefncache'):
2834 requirements.add('fncache')
2854 requirements.add('fncache')
2835 if ui.configbool('format', 'dotencode'):
2855 if ui.configbool('format', 'dotencode'):
2836 requirements.add('dotencode')
2856 requirements.add('dotencode')
2837
2857
2838 compengine = ui.config('experimental', 'format.compression')
2858 compengine = ui.config('experimental', 'format.compression')
2839 if compengine not in util.compengines:
2859 if compengine not in util.compengines:
2840 raise error.Abort(_('compression engine %s defined by '
2860 raise error.Abort(_('compression engine %s defined by '
2841 'experimental.format.compression not available') %
2861 'experimental.format.compression not available') %
2842 compengine,
2862 compengine,
2843 hint=_('run "hg debuginstall" to list available '
2863 hint=_('run "hg debuginstall" to list available '
2844 'compression engines'))
2864 'compression engines'))
2845
2865
2846 # zlib is the historical default and doesn't need an explicit requirement.
2866 # zlib is the historical default and doesn't need an explicit requirement.
2847 if compengine != 'zlib':
2867 if compengine != 'zlib':
2848 requirements.add('exp-compression-%s' % compengine)
2868 requirements.add('exp-compression-%s' % compengine)
2849
2869
2850 if scmutil.gdinitconfig(ui):
2870 if scmutil.gdinitconfig(ui):
2851 requirements.add('generaldelta')
2871 requirements.add('generaldelta')
2852 if ui.configbool('experimental', 'treemanifest'):
2872 if ui.configbool('experimental', 'treemanifest'):
2853 requirements.add('treemanifest')
2873 requirements.add('treemanifest')
2854 # experimental config: format.sparse-revlog
2874 # experimental config: format.sparse-revlog
2855 if ui.configbool('format', 'sparse-revlog'):
2875 if ui.configbool('format', 'sparse-revlog'):
2856 requirements.add(SPARSEREVLOG_REQUIREMENT)
2876 requirements.add(SPARSEREVLOG_REQUIREMENT)
2857
2877
2858 revlogv2 = ui.config('experimental', 'revlogv2')
2878 revlogv2 = ui.config('experimental', 'revlogv2')
2859 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2879 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2860 requirements.remove('revlogv1')
2880 requirements.remove('revlogv1')
2861 # generaldelta is implied by revlogv2.
2881 # generaldelta is implied by revlogv2.
2862 requirements.discard('generaldelta')
2882 requirements.discard('generaldelta')
2863 requirements.add(REVLOGV2_REQUIREMENT)
2883 requirements.add(REVLOGV2_REQUIREMENT)
2864 # experimental config: format.internal-phase
2884 # experimental config: format.internal-phase
2865 if ui.configbool('format', 'internal-phase'):
2885 if ui.configbool('format', 'internal-phase'):
2866 requirements.add('internal-phase')
2886 requirements.add('internal-phase')
2867
2887
2868 if createopts.get('narrowfiles'):
2888 if createopts.get('narrowfiles'):
2869 requirements.add(repository.NARROW_REQUIREMENT)
2889 requirements.add(repository.NARROW_REQUIREMENT)
2870
2890
2871 return requirements
2891 return requirements
2872
2892
2873 def filterknowncreateopts(ui, createopts):
2893 def filterknowncreateopts(ui, createopts):
2874 """Filters a dict of repo creation options against options that are known.
2894 """Filters a dict of repo creation options against options that are known.
2875
2895
2876 Receives a dict of repo creation options and returns a dict of those
2896 Receives a dict of repo creation options and returns a dict of those
2877 options that we don't know how to handle.
2897 options that we don't know how to handle.
2878
2898
2879 This function is called as part of repository creation. If the
2899 This function is called as part of repository creation. If the
2880 returned dict contains any items, repository creation will not
2900 returned dict contains any items, repository creation will not
2881 be allowed, as it means there was a request to create a repository
2901 be allowed, as it means there was a request to create a repository
2882 with options not recognized by loaded code.
2902 with options not recognized by loaded code.
2883
2903
2884 Extensions can wrap this function to filter out creation options
2904 Extensions can wrap this function to filter out creation options
2885 they know how to handle.
2905 they know how to handle.
2886 """
2906 """
2887 known = {
2907 known = {
2908 'backend',
2888 'narrowfiles',
2909 'narrowfiles',
2889 'sharedrepo',
2910 'sharedrepo',
2890 'sharedrelative',
2911 'sharedrelative',
2891 'shareditems',
2912 'shareditems',
2892 }
2913 }
2893
2914
2894 return {k: v for k, v in createopts.items() if k not in known}
2915 return {k: v for k, v in createopts.items() if k not in known}
2895
2916
2896 def createrepository(ui, path, createopts=None):
2917 def createrepository(ui, path, createopts=None):
2897 """Create a new repository in a vfs.
2918 """Create a new repository in a vfs.
2898
2919
2899 ``path`` path to the new repo's working directory.
2920 ``path`` path to the new repo's working directory.
2900 ``createopts`` options for the new repository.
2921 ``createopts`` options for the new repository.
2901
2922
2902 The following keys for ``createopts`` are recognized:
2923 The following keys for ``createopts`` are recognized:
2903
2924
2925 backend
2926 The storage backend to use.
2904 narrowfiles
2927 narrowfiles
2905 Set up repository to support narrow file storage.
2928 Set up repository to support narrow file storage.
2906 sharedrepo
2929 sharedrepo
2907 Repository object from which storage should be shared.
2930 Repository object from which storage should be shared.
2908 sharedrelative
2931 sharedrelative
2909 Boolean indicating if the path to the shared repo should be
2932 Boolean indicating if the path to the shared repo should be
2910 stored as relative. By default, the pointer to the "parent" repo
2933 stored as relative. By default, the pointer to the "parent" repo
2911 is stored as an absolute path.
2934 is stored as an absolute path.
2912 shareditems
2935 shareditems
2913 Set of items to share to the new repository (in addition to storage).
2936 Set of items to share to the new repository (in addition to storage).
2914 """
2937 """
2915 createopts = createopts or {}
2938 createopts = defaultcreateopts(ui, createopts=createopts)
2916
2939
2917 unknownopts = filterknowncreateopts(ui, createopts)
2940 unknownopts = filterknowncreateopts(ui, createopts)
2918
2941
2919 if not isinstance(unknownopts, dict):
2942 if not isinstance(unknownopts, dict):
2920 raise error.ProgrammingError('filterknowncreateopts() did not return '
2943 raise error.ProgrammingError('filterknowncreateopts() did not return '
2921 'a dict')
2944 'a dict')
2922
2945
2923 if unknownopts:
2946 if unknownopts:
2924 raise error.Abort(_('unable to create repository because of unknown '
2947 raise error.Abort(_('unable to create repository because of unknown '
2925 'creation option: %s') %
2948 'creation option: %s') %
2926 ', '.join(sorted(unknownopts)),
2949 ', '.join(sorted(unknownopts)),
2927 hint=_('is a required extension not loaded?'))
2950 hint=_('is a required extension not loaded?'))
2928
2951
2929 requirements = newreporequirements(ui, createopts=createopts)
2952 requirements = newreporequirements(ui, createopts=createopts)
2930
2953
2931 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2954 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2932
2955
2933 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2956 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2934 if hgvfs.exists():
2957 if hgvfs.exists():
2935 raise error.RepoError(_('repository %s already exists') % path)
2958 raise error.RepoError(_('repository %s already exists') % path)
2936
2959
2937 if 'sharedrepo' in createopts:
2960 if 'sharedrepo' in createopts:
2938 sharedpath = createopts['sharedrepo'].sharedpath
2961 sharedpath = createopts['sharedrepo'].sharedpath
2939
2962
2940 if createopts.get('sharedrelative'):
2963 if createopts.get('sharedrelative'):
2941 try:
2964 try:
2942 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2965 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2943 except (IOError, ValueError) as e:
2966 except (IOError, ValueError) as e:
2944 # ValueError is raised on Windows if the drive letters differ
2967 # ValueError is raised on Windows if the drive letters differ
2945 # on each path.
2968 # on each path.
2946 raise error.Abort(_('cannot calculate relative path'),
2969 raise error.Abort(_('cannot calculate relative path'),
2947 hint=stringutil.forcebytestr(e))
2970 hint=stringutil.forcebytestr(e))
2948
2971
2949 if not wdirvfs.exists():
2972 if not wdirvfs.exists():
2950 wdirvfs.makedirs()
2973 wdirvfs.makedirs()
2951
2974
2952 hgvfs.makedir(notindexed=True)
2975 hgvfs.makedir(notindexed=True)
2953
2976
2954 if b'store' in requirements and 'sharedrepo' not in createopts:
2977 if b'store' in requirements and 'sharedrepo' not in createopts:
2955 hgvfs.mkdir(b'store')
2978 hgvfs.mkdir(b'store')
2956
2979
2957 # We create an invalid changelog outside the store so very old
2980 # We create an invalid changelog outside the store so very old
2958 # Mercurial versions (which didn't know about the requirements
2981 # Mercurial versions (which didn't know about the requirements
2959 # file) encounter an error on reading the changelog. This
2982 # file) encounter an error on reading the changelog. This
2960 # effectively locks out old clients and prevents them from
2983 # effectively locks out old clients and prevents them from
2961 # mucking with a repo in an unknown format.
2984 # mucking with a repo in an unknown format.
2962 #
2985 #
2963 # The revlog header has version 2, which won't be recognized by
2986 # The revlog header has version 2, which won't be recognized by
2964 # such old clients.
2987 # such old clients.
2965 hgvfs.append(b'00changelog.i',
2988 hgvfs.append(b'00changelog.i',
2966 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2989 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2967 b'layout')
2990 b'layout')
2968
2991
2969 scmutil.writerequires(hgvfs, requirements)
2992 scmutil.writerequires(hgvfs, requirements)
2970
2993
2971 # Write out file telling readers where to find the shared store.
2994 # Write out file telling readers where to find the shared store.
2972 if 'sharedrepo' in createopts:
2995 if 'sharedrepo' in createopts:
2973 hgvfs.write(b'sharedpath', sharedpath)
2996 hgvfs.write(b'sharedpath', sharedpath)
2974
2997
2975 if createopts.get('shareditems'):
2998 if createopts.get('shareditems'):
2976 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2999 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2977 hgvfs.write(b'shared', shared)
3000 hgvfs.write(b'shared', shared)
2978
3001
2979 def poisonrepository(repo):
3002 def poisonrepository(repo):
2980 """Poison a repository instance so it can no longer be used."""
3003 """Poison a repository instance so it can no longer be used."""
2981 # Perform any cleanup on the instance.
3004 # Perform any cleanup on the instance.
2982 repo.close()
3005 repo.close()
2983
3006
2984 # Our strategy is to replace the type of the object with one that
3007 # Our strategy is to replace the type of the object with one that
2985 # has all attribute lookups result in error.
3008 # has all attribute lookups result in error.
2986 #
3009 #
2987 # But we have to allow the close() method because some constructors
3010 # But we have to allow the close() method because some constructors
2988 # of repos call close() on repo references.
3011 # of repos call close() on repo references.
2989 class poisonedrepository(object):
3012 class poisonedrepository(object):
2990 def __getattribute__(self, item):
3013 def __getattribute__(self, item):
2991 if item == r'close':
3014 if item == r'close':
2992 return object.__getattribute__(self, item)
3015 return object.__getattribute__(self, item)
2993
3016
2994 raise error.ProgrammingError('repo instances should not be used '
3017 raise error.ProgrammingError('repo instances should not be used '
2995 'after unshare')
3018 'after unshare')
2996
3019
2997 def close(self):
3020 def close(self):
2998 pass
3021 pass
2999
3022
3000 # We may have a repoview, which intercepts __setattr__. So be sure
3023 # We may have a repoview, which intercepts __setattr__. So be sure
3001 # we operate at the lowest level possible.
3024 # we operate at the lowest level possible.
3002 object.__setattr__(repo, r'__class__', poisonedrepository)
3025 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,895 +1,897
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 def requiredsourcerequirements(repo):
27 def requiredsourcerequirements(repo):
28 """Obtain requirements required to be present to upgrade a repo.
28 """Obtain requirements required to be present to upgrade a repo.
29
29
30 An upgrade will not be allowed if the repository doesn't have the
30 An upgrade will not be allowed if the repository doesn't have the
31 requirements returned by this function.
31 requirements returned by this function.
32 """
32 """
33 return {
33 return {
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'revlogv1',
35 'revlogv1',
36 # Introduced in Mercurial 0.9.2.
36 # Introduced in Mercurial 0.9.2.
37 'store',
37 'store',
38 }
38 }
39
39
40 def blocksourcerequirements(repo):
40 def blocksourcerequirements(repo):
41 """Obtain requirements that will prevent an upgrade from occurring.
41 """Obtain requirements that will prevent an upgrade from occurring.
42
42
43 An upgrade cannot be performed if the source repository contains a
43 An upgrade cannot be performed if the source repository contains a
44 requirements in the returned set.
44 requirements in the returned set.
45 """
45 """
46 return {
46 return {
47 # The upgrade code does not yet support these experimental features.
47 # The upgrade code does not yet support these experimental features.
48 # This is an artificial limitation.
48 # This is an artificial limitation.
49 'treemanifest',
49 'treemanifest',
50 # This was a precursor to generaldelta and was never enabled by default.
50 # This was a precursor to generaldelta and was never enabled by default.
51 # It should (hopefully) not exist in the wild.
51 # It should (hopefully) not exist in the wild.
52 'parentdelta',
52 'parentdelta',
53 # Upgrade should operate on the actual store, not the shared link.
53 # Upgrade should operate on the actual store, not the shared link.
54 'shared',
54 'shared',
55 }
55 }
56
56
57 def supportremovedrequirements(repo):
57 def supportremovedrequirements(repo):
58 """Obtain requirements that can be removed during an upgrade.
58 """Obtain requirements that can be removed during an upgrade.
59
59
60 If an upgrade were to create a repository that dropped a requirement,
60 If an upgrade were to create a repository that dropped a requirement,
61 the dropped requirement must appear in the returned set for the upgrade
61 the dropped requirement must appear in the returned set for the upgrade
62 to be allowed.
62 to be allowed.
63 """
63 """
64 return {
64 return {
65 localrepo.SPARSEREVLOG_REQUIREMENT,
65 localrepo.SPARSEREVLOG_REQUIREMENT,
66 }
66 }
67
67
68 def supporteddestrequirements(repo):
68 def supporteddestrequirements(repo):
69 """Obtain requirements that upgrade supports in the destination.
69 """Obtain requirements that upgrade supports in the destination.
70
70
71 If the result of the upgrade would create requirements not in this set,
71 If the result of the upgrade would create requirements not in this set,
72 the upgrade is disallowed.
72 the upgrade is disallowed.
73
73
74 Extensions should monkeypatch this to add their custom requirements.
74 Extensions should monkeypatch this to add their custom requirements.
75 """
75 """
76 return {
76 return {
77 'dotencode',
77 'dotencode',
78 'fncache',
78 'fncache',
79 'generaldelta',
79 'generaldelta',
80 'revlogv1',
80 'revlogv1',
81 'store',
81 'store',
82 localrepo.SPARSEREVLOG_REQUIREMENT,
82 localrepo.SPARSEREVLOG_REQUIREMENT,
83 }
83 }
84
84
85 def allowednewrequirements(repo):
85 def allowednewrequirements(repo):
86 """Obtain requirements that can be added to a repository during upgrade.
86 """Obtain requirements that can be added to a repository during upgrade.
87
87
88 This is used to disallow proposed requirements from being added when
88 This is used to disallow proposed requirements from being added when
89 they weren't present before.
89 they weren't present before.
90
90
91 We use a list of allowed requirement additions instead of a list of known
91 We use a list of allowed requirement additions instead of a list of known
92 bad additions because the whitelist approach is safer and will prevent
92 bad additions because the whitelist approach is safer and will prevent
93 future, unknown requirements from accidentally being added.
93 future, unknown requirements from accidentally being added.
94 """
94 """
95 return {
95 return {
96 'dotencode',
96 'dotencode',
97 'fncache',
97 'fncache',
98 'generaldelta',
98 'generaldelta',
99 localrepo.SPARSEREVLOG_REQUIREMENT,
99 localrepo.SPARSEREVLOG_REQUIREMENT,
100 }
100 }
101
101
102 def preservedrequirements(repo):
102 def preservedrequirements(repo):
103 return set()
103 return set()
104
104
105 deficiency = 'deficiency'
105 deficiency = 'deficiency'
106 optimisation = 'optimization'
106 optimisation = 'optimization'
107
107
108 class improvement(object):
108 class improvement(object):
109 """Represents an improvement that can be made as part of an upgrade.
109 """Represents an improvement that can be made as part of an upgrade.
110
110
111 The following attributes are defined on each instance:
111 The following attributes are defined on each instance:
112
112
113 name
113 name
114 Machine-readable string uniquely identifying this improvement. It
114 Machine-readable string uniquely identifying this improvement. It
115 will be mapped to an action later in the upgrade process.
115 will be mapped to an action later in the upgrade process.
116
116
117 type
117 type
118 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
118 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
119 problem. An optimization is an action (sometimes optional) that
119 problem. An optimization is an action (sometimes optional) that
120 can be taken to further improve the state of the repository.
120 can be taken to further improve the state of the repository.
121
121
122 description
122 description
123 Message intended for humans explaining the improvement in more detail,
123 Message intended for humans explaining the improvement in more detail,
124 including the implications of it. For ``deficiency`` types, should be
124 including the implications of it. For ``deficiency`` types, should be
125 worded in the present tense. For ``optimisation`` types, should be
125 worded in the present tense. For ``optimisation`` types, should be
126 worded in the future tense.
126 worded in the future tense.
127
127
128 upgrademessage
128 upgrademessage
129 Message intended for humans explaining what an upgrade addressing this
129 Message intended for humans explaining what an upgrade addressing this
130 issue will do. Should be worded in the future tense.
130 issue will do. Should be worded in the future tense.
131 """
131 """
132 def __init__(self, name, type, description, upgrademessage):
132 def __init__(self, name, type, description, upgrademessage):
133 self.name = name
133 self.name = name
134 self.type = type
134 self.type = type
135 self.description = description
135 self.description = description
136 self.upgrademessage = upgrademessage
136 self.upgrademessage = upgrademessage
137
137
138 def __eq__(self, other):
138 def __eq__(self, other):
139 if not isinstance(other, improvement):
139 if not isinstance(other, improvement):
140 # This is what python tell use to do
140 # This is what python tell use to do
141 return NotImplemented
141 return NotImplemented
142 return self.name == other.name
142 return self.name == other.name
143
143
144 def __ne__(self, other):
144 def __ne__(self, other):
145 return not self == other
145 return not self == other
146
146
147 def __hash__(self):
147 def __hash__(self):
148 return hash(self.name)
148 return hash(self.name)
149
149
150 allformatvariant = []
150 allformatvariant = []
151
151
152 def registerformatvariant(cls):
152 def registerformatvariant(cls):
153 allformatvariant.append(cls)
153 allformatvariant.append(cls)
154 return cls
154 return cls
155
155
156 class formatvariant(improvement):
156 class formatvariant(improvement):
157 """an improvement subclass dedicated to repository format"""
157 """an improvement subclass dedicated to repository format"""
158 type = deficiency
158 type = deficiency
159 ### The following attributes should be defined for each class:
159 ### The following attributes should be defined for each class:
160
160
161 # machine-readable string uniquely identifying this improvement. it will be
161 # machine-readable string uniquely identifying this improvement. it will be
162 # mapped to an action later in the upgrade process.
162 # mapped to an action later in the upgrade process.
163 name = None
163 name = None
164
164
165 # message intended for humans explaining the improvement in more detail,
165 # message intended for humans explaining the improvement in more detail,
166 # including the implications of it ``deficiency`` types, should be worded
166 # including the implications of it ``deficiency`` types, should be worded
167 # in the present tense.
167 # in the present tense.
168 description = None
168 description = None
169
169
170 # message intended for humans explaining what an upgrade addressing this
170 # message intended for humans explaining what an upgrade addressing this
171 # issue will do. should be worded in the future tense.
171 # issue will do. should be worded in the future tense.
172 upgrademessage = None
172 upgrademessage = None
173
173
174 # value of current Mercurial default for new repository
174 # value of current Mercurial default for new repository
175 default = None
175 default = None
176
176
177 def __init__(self):
177 def __init__(self):
178 raise NotImplementedError()
178 raise NotImplementedError()
179
179
180 @staticmethod
180 @staticmethod
181 def fromrepo(repo):
181 def fromrepo(repo):
182 """current value of the variant in the repository"""
182 """current value of the variant in the repository"""
183 raise NotImplementedError()
183 raise NotImplementedError()
184
184
185 @staticmethod
185 @staticmethod
186 def fromconfig(repo):
186 def fromconfig(repo):
187 """current value of the variant in the configuration"""
187 """current value of the variant in the configuration"""
188 raise NotImplementedError()
188 raise NotImplementedError()
189
189
190 class requirementformatvariant(formatvariant):
190 class requirementformatvariant(formatvariant):
191 """formatvariant based on a 'requirement' name.
191 """formatvariant based on a 'requirement' name.
192
192
193 Many format variant are controlled by a 'requirement'. We define a small
193 Many format variant are controlled by a 'requirement'. We define a small
194 subclass to factor the code.
194 subclass to factor the code.
195 """
195 """
196
196
197 # the requirement that control this format variant
197 # the requirement that control this format variant
198 _requirement = None
198 _requirement = None
199
199
200 @staticmethod
200 @staticmethod
201 def _newreporequirements(ui):
201 def _newreporequirements(ui):
202 return localrepo.newreporequirements(ui)
202 return localrepo.newreporequirements(
203 ui, localrepo.defaultcreateopts(ui))
203
204
204 @classmethod
205 @classmethod
205 def fromrepo(cls, repo):
206 def fromrepo(cls, repo):
206 assert cls._requirement is not None
207 assert cls._requirement is not None
207 return cls._requirement in repo.requirements
208 return cls._requirement in repo.requirements
208
209
209 @classmethod
210 @classmethod
210 def fromconfig(cls, repo):
211 def fromconfig(cls, repo):
211 assert cls._requirement is not None
212 assert cls._requirement is not None
212 return cls._requirement in cls._newreporequirements(repo.ui)
213 return cls._requirement in cls._newreporequirements(repo.ui)
213
214
214 @registerformatvariant
215 @registerformatvariant
215 class fncache(requirementformatvariant):
216 class fncache(requirementformatvariant):
216 name = 'fncache'
217 name = 'fncache'
217
218
218 _requirement = 'fncache'
219 _requirement = 'fncache'
219
220
220 default = True
221 default = True
221
222
222 description = _('long and reserved filenames may not work correctly; '
223 description = _('long and reserved filenames may not work correctly; '
223 'repository performance is sub-optimal')
224 'repository performance is sub-optimal')
224
225
225 upgrademessage = _('repository will be more resilient to storing '
226 upgrademessage = _('repository will be more resilient to storing '
226 'certain paths and performance of certain '
227 'certain paths and performance of certain '
227 'operations should be improved')
228 'operations should be improved')
228
229
229 @registerformatvariant
230 @registerformatvariant
230 class dotencode(requirementformatvariant):
231 class dotencode(requirementformatvariant):
231 name = 'dotencode'
232 name = 'dotencode'
232
233
233 _requirement = 'dotencode'
234 _requirement = 'dotencode'
234
235
235 default = True
236 default = True
236
237
237 description = _('storage of filenames beginning with a period or '
238 description = _('storage of filenames beginning with a period or '
238 'space may not work correctly')
239 'space may not work correctly')
239
240
240 upgrademessage = _('repository will be better able to store files '
241 upgrademessage = _('repository will be better able to store files '
241 'beginning with a space or period')
242 'beginning with a space or period')
242
243
243 @registerformatvariant
244 @registerformatvariant
244 class generaldelta(requirementformatvariant):
245 class generaldelta(requirementformatvariant):
245 name = 'generaldelta'
246 name = 'generaldelta'
246
247
247 _requirement = 'generaldelta'
248 _requirement = 'generaldelta'
248
249
249 default = True
250 default = True
250
251
251 description = _('deltas within internal storage are unable to '
252 description = _('deltas within internal storage are unable to '
252 'choose optimal revisions; repository is larger and '
253 'choose optimal revisions; repository is larger and '
253 'slower than it could be; interaction with other '
254 'slower than it could be; interaction with other '
254 'repositories may require extra network and CPU '
255 'repositories may require extra network and CPU '
255 'resources, making "hg push" and "hg pull" slower')
256 'resources, making "hg push" and "hg pull" slower')
256
257
257 upgrademessage = _('repository storage will be able to create '
258 upgrademessage = _('repository storage will be able to create '
258 'optimal deltas; new repository data will be '
259 'optimal deltas; new repository data will be '
259 'smaller and read times should decrease; '
260 'smaller and read times should decrease; '
260 'interacting with other repositories using this '
261 'interacting with other repositories using this '
261 'storage model should require less network and '
262 'storage model should require less network and '
262 'CPU resources, making "hg push" and "hg pull" '
263 'CPU resources, making "hg push" and "hg pull" '
263 'faster')
264 'faster')
264
265
265 @registerformatvariant
266 @registerformatvariant
266 class sparserevlog(requirementformatvariant):
267 class sparserevlog(requirementformatvariant):
267 name = 'sparserevlog'
268 name = 'sparserevlog'
268
269
269 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
270 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
270
271
271 default = False
272 default = False
272
273
273 description = _('in order to limit disk reading and memory usage on older '
274 description = _('in order to limit disk reading and memory usage on older '
274 'version, the span of a delta chain from its root to its '
275 'version, the span of a delta chain from its root to its '
275 'end is limited, whatever the relevant data in this span. '
276 'end is limited, whatever the relevant data in this span. '
276 'This can severly limit Mercurial ability to build good '
277 'This can severly limit Mercurial ability to build good '
277 'chain of delta resulting is much more storage space being '
278 'chain of delta resulting is much more storage space being '
278 'taken and limit reusability of on disk delta during '
279 'taken and limit reusability of on disk delta during '
279 'exchange.'
280 'exchange.'
280 )
281 )
281
282
282 upgrademessage = _('Revlog supports delta chain with more unused data '
283 upgrademessage = _('Revlog supports delta chain with more unused data '
283 'between payload. These gaps will be skipped at read '
284 'between payload. These gaps will be skipped at read '
284 'time. This allows for better delta chains, making a '
285 'time. This allows for better delta chains, making a '
285 'better compression and faster exchange with server.')
286 'better compression and faster exchange with server.')
286
287
287 @registerformatvariant
288 @registerformatvariant
288 class removecldeltachain(formatvariant):
289 class removecldeltachain(formatvariant):
289 name = 'plain-cl-delta'
290 name = 'plain-cl-delta'
290
291
291 default = True
292 default = True
292
293
293 description = _('changelog storage is using deltas instead of '
294 description = _('changelog storage is using deltas instead of '
294 'raw entries; changelog reading and any '
295 'raw entries; changelog reading and any '
295 'operation relying on changelog data are slower '
296 'operation relying on changelog data are slower '
296 'than they could be')
297 'than they could be')
297
298
298 upgrademessage = _('changelog storage will be reformated to '
299 upgrademessage = _('changelog storage will be reformated to '
299 'store raw entries; changelog reading will be '
300 'store raw entries; changelog reading will be '
300 'faster; changelog size may be reduced')
301 'faster; changelog size may be reduced')
301
302
302 @staticmethod
303 @staticmethod
303 def fromrepo(repo):
304 def fromrepo(repo):
304 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
305 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
305 # changelogs with deltas.
306 # changelogs with deltas.
306 cl = repo.changelog
307 cl = repo.changelog
307 chainbase = cl.chainbase
308 chainbase = cl.chainbase
308 return all(rev == chainbase(rev) for rev in cl)
309 return all(rev == chainbase(rev) for rev in cl)
309
310
310 @staticmethod
311 @staticmethod
311 def fromconfig(repo):
312 def fromconfig(repo):
312 return True
313 return True
313
314
314 @registerformatvariant
315 @registerformatvariant
315 class compressionengine(formatvariant):
316 class compressionengine(formatvariant):
316 name = 'compression'
317 name = 'compression'
317 default = 'zlib'
318 default = 'zlib'
318
319
319 description = _('Compresion algorithm used to compress data. '
320 description = _('Compresion algorithm used to compress data. '
320 'Some engine are faster than other')
321 'Some engine are faster than other')
321
322
322 upgrademessage = _('revlog content will be recompressed with the new '
323 upgrademessage = _('revlog content will be recompressed with the new '
323 'algorithm.')
324 'algorithm.')
324
325
325 @classmethod
326 @classmethod
326 def fromrepo(cls, repo):
327 def fromrepo(cls, repo):
327 for req in repo.requirements:
328 for req in repo.requirements:
328 if req.startswith('exp-compression-'):
329 if req.startswith('exp-compression-'):
329 return req.split('-', 2)[2]
330 return req.split('-', 2)[2]
330 return 'zlib'
331 return 'zlib'
331
332
332 @classmethod
333 @classmethod
333 def fromconfig(cls, repo):
334 def fromconfig(cls, repo):
334 return repo.ui.config('experimental', 'format.compression')
335 return repo.ui.config('experimental', 'format.compression')
335
336
336 def finddeficiencies(repo):
337 def finddeficiencies(repo):
337 """returns a list of deficiencies that the repo suffer from"""
338 """returns a list of deficiencies that the repo suffer from"""
338 deficiencies = []
339 deficiencies = []
339
340
340 # We could detect lack of revlogv1 and store here, but they were added
341 # We could detect lack of revlogv1 and store here, but they were added
341 # in 0.9.2 and we don't support upgrading repos without these
342 # in 0.9.2 and we don't support upgrading repos without these
342 # requirements, so let's not bother.
343 # requirements, so let's not bother.
343
344
344 for fv in allformatvariant:
345 for fv in allformatvariant:
345 if not fv.fromrepo(repo):
346 if not fv.fromrepo(repo):
346 deficiencies.append(fv)
347 deficiencies.append(fv)
347
348
348 return deficiencies
349 return deficiencies
349
350
350 def findoptimizations(repo):
351 def findoptimizations(repo):
351 """Determine optimisation that could be used during upgrade"""
352 """Determine optimisation that could be used during upgrade"""
352 # These are unconditionally added. There is logic later that figures out
353 # These are unconditionally added. There is logic later that figures out
353 # which ones to apply.
354 # which ones to apply.
354 optimizations = []
355 optimizations = []
355
356
356 optimizations.append(improvement(
357 optimizations.append(improvement(
357 name='redeltaparent',
358 name='redeltaparent',
358 type=optimisation,
359 type=optimisation,
359 description=_('deltas within internal storage will be recalculated to '
360 description=_('deltas within internal storage will be recalculated to '
360 'choose an optimal base revision where this was not '
361 'choose an optimal base revision where this was not '
361 'already done; the size of the repository may shrink and '
362 'already done; the size of the repository may shrink and '
362 'various operations may become faster; the first time '
363 'various operations may become faster; the first time '
363 'this optimization is performed could slow down upgrade '
364 'this optimization is performed could slow down upgrade '
364 'execution considerably; subsequent invocations should '
365 'execution considerably; subsequent invocations should '
365 'not run noticeably slower'),
366 'not run noticeably slower'),
366 upgrademessage=_('deltas within internal storage will choose a new '
367 upgrademessage=_('deltas within internal storage will choose a new '
367 'base revision if needed')))
368 'base revision if needed')))
368
369
369 optimizations.append(improvement(
370 optimizations.append(improvement(
370 name='redeltamultibase',
371 name='redeltamultibase',
371 type=optimisation,
372 type=optimisation,
372 description=_('deltas within internal storage will be recalculated '
373 description=_('deltas within internal storage will be recalculated '
373 'against multiple base revision and the smallest '
374 'against multiple base revision and the smallest '
374 'difference will be used; the size of the repository may '
375 'difference will be used; the size of the repository may '
375 'shrink significantly when there are many merges; this '
376 'shrink significantly when there are many merges; this '
376 'optimization will slow down execution in proportion to '
377 'optimization will slow down execution in proportion to '
377 'the number of merges in the repository and the amount '
378 'the number of merges in the repository and the amount '
378 'of files in the repository; this slow down should not '
379 'of files in the repository; this slow down should not '
379 'be significant unless there are tens of thousands of '
380 'be significant unless there are tens of thousands of '
380 'files and thousands of merges'),
381 'files and thousands of merges'),
381 upgrademessage=_('deltas within internal storage will choose an '
382 upgrademessage=_('deltas within internal storage will choose an '
382 'optimal delta by computing deltas against multiple '
383 'optimal delta by computing deltas against multiple '
383 'parents; may slow down execution time '
384 'parents; may slow down execution time '
384 'significantly')))
385 'significantly')))
385
386
386 optimizations.append(improvement(
387 optimizations.append(improvement(
387 name='redeltaall',
388 name='redeltaall',
388 type=optimisation,
389 type=optimisation,
389 description=_('deltas within internal storage will always be '
390 description=_('deltas within internal storage will always be '
390 'recalculated without reusing prior deltas; this will '
391 'recalculated without reusing prior deltas; this will '
391 'likely make execution run several times slower; this '
392 'likely make execution run several times slower; this '
392 'optimization is typically not needed'),
393 'optimization is typically not needed'),
393 upgrademessage=_('deltas within internal storage will be fully '
394 upgrademessage=_('deltas within internal storage will be fully '
394 'recomputed; this will likely drastically slow down '
395 'recomputed; this will likely drastically slow down '
395 'execution time')))
396 'execution time')))
396
397
397 optimizations.append(improvement(
398 optimizations.append(improvement(
398 name='redeltafulladd',
399 name='redeltafulladd',
399 type=optimisation,
400 type=optimisation,
400 description=_('every revision will be re-added as if it was new '
401 description=_('every revision will be re-added as if it was new '
401 'content. It will go through the full storage '
402 'content. It will go through the full storage '
402 'mechanism giving extensions a chance to process it '
403 'mechanism giving extensions a chance to process it '
403 '(eg. lfs). This is similar to "redeltaall" but even '
404 '(eg. lfs). This is similar to "redeltaall" but even '
404 'slower since more logic is involved.'),
405 'slower since more logic is involved.'),
405 upgrademessage=_('each revision will be added as new content to the '
406 upgrademessage=_('each revision will be added as new content to the '
406 'internal storage; this will likely drastically slow '
407 'internal storage; this will likely drastically slow '
407 'down execution time, but some extensions might need '
408 'down execution time, but some extensions might need '
408 'it')))
409 'it')))
409
410
410 return optimizations
411 return optimizations
411
412
412 def determineactions(repo, deficiencies, sourcereqs, destreqs):
413 def determineactions(repo, deficiencies, sourcereqs, destreqs):
413 """Determine upgrade actions that will be performed.
414 """Determine upgrade actions that will be performed.
414
415
415 Given a list of improvements as returned by ``finddeficiencies`` and
416 Given a list of improvements as returned by ``finddeficiencies`` and
416 ``findoptimizations``, determine the list of upgrade actions that
417 ``findoptimizations``, determine the list of upgrade actions that
417 will be performed.
418 will be performed.
418
419
419 The role of this function is to filter improvements if needed, apply
420 The role of this function is to filter improvements if needed, apply
420 recommended optimizations from the improvements list that make sense,
421 recommended optimizations from the improvements list that make sense,
421 etc.
422 etc.
422
423
423 Returns a list of action names.
424 Returns a list of action names.
424 """
425 """
425 newactions = []
426 newactions = []
426
427
427 knownreqs = supporteddestrequirements(repo)
428 knownreqs = supporteddestrequirements(repo)
428
429
429 for d in deficiencies:
430 for d in deficiencies:
430 name = d.name
431 name = d.name
431
432
432 # If the action is a requirement that doesn't show up in the
433 # If the action is a requirement that doesn't show up in the
433 # destination requirements, prune the action.
434 # destination requirements, prune the action.
434 if name in knownreqs and name not in destreqs:
435 if name in knownreqs and name not in destreqs:
435 continue
436 continue
436
437
437 newactions.append(d)
438 newactions.append(d)
438
439
439 # FUTURE consider adding some optimizations here for certain transitions.
440 # FUTURE consider adding some optimizations here for certain transitions.
440 # e.g. adding generaldelta could schedule parent redeltas.
441 # e.g. adding generaldelta could schedule parent redeltas.
441
442
442 return newactions
443 return newactions
443
444
444 def _revlogfrompath(repo, path):
445 def _revlogfrompath(repo, path):
445 """Obtain a revlog from a repo path.
446 """Obtain a revlog from a repo path.
446
447
447 An instance of the appropriate class is returned.
448 An instance of the appropriate class is returned.
448 """
449 """
449 if path == '00changelog.i':
450 if path == '00changelog.i':
450 return changelog.changelog(repo.svfs)
451 return changelog.changelog(repo.svfs)
451 elif path.endswith('00manifest.i'):
452 elif path.endswith('00manifest.i'):
452 mandir = path[:-len('00manifest.i')]
453 mandir = path[:-len('00manifest.i')]
453 return manifest.manifestrevlog(repo.svfs, tree=mandir)
454 return manifest.manifestrevlog(repo.svfs, tree=mandir)
454 else:
455 else:
455 #reverse of "/".join(("data", path + ".i"))
456 #reverse of "/".join(("data", path + ".i"))
456 return filelog.filelog(repo.svfs, path[5:-2])
457 return filelog.filelog(repo.svfs, path[5:-2])
457
458
458 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, deltabothparents):
459 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, deltabothparents):
459 """Copy revlogs between 2 repos."""
460 """Copy revlogs between 2 repos."""
460 revcount = 0
461 revcount = 0
461 srcsize = 0
462 srcsize = 0
462 srcrawsize = 0
463 srcrawsize = 0
463 dstsize = 0
464 dstsize = 0
464 fcount = 0
465 fcount = 0
465 frevcount = 0
466 frevcount = 0
466 fsrcsize = 0
467 fsrcsize = 0
467 frawsize = 0
468 frawsize = 0
468 fdstsize = 0
469 fdstsize = 0
469 mcount = 0
470 mcount = 0
470 mrevcount = 0
471 mrevcount = 0
471 msrcsize = 0
472 msrcsize = 0
472 mrawsize = 0
473 mrawsize = 0
473 mdstsize = 0
474 mdstsize = 0
474 crevcount = 0
475 crevcount = 0
475 csrcsize = 0
476 csrcsize = 0
476 crawsize = 0
477 crawsize = 0
477 cdstsize = 0
478 cdstsize = 0
478
479
479 # Perform a pass to collect metadata. This validates we can open all
480 # Perform a pass to collect metadata. This validates we can open all
480 # source files and allows a unified progress bar to be displayed.
481 # source files and allows a unified progress bar to be displayed.
481 for unencoded, encoded, size in srcrepo.store.walk():
482 for unencoded, encoded, size in srcrepo.store.walk():
482 if unencoded.endswith('.d'):
483 if unencoded.endswith('.d'):
483 continue
484 continue
484
485
485 rl = _revlogfrompath(srcrepo, unencoded)
486 rl = _revlogfrompath(srcrepo, unencoded)
486
487
487 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
488 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
488 trackedsize=True, storedsize=True)
489 trackedsize=True, storedsize=True)
489
490
490 revcount += info['revisionscount'] or 0
491 revcount += info['revisionscount'] or 0
491 datasize = info['storedsize'] or 0
492 datasize = info['storedsize'] or 0
492 rawsize = info['trackedsize'] or 0
493 rawsize = info['trackedsize'] or 0
493
494
494 srcsize += datasize
495 srcsize += datasize
495 srcrawsize += rawsize
496 srcrawsize += rawsize
496
497
497 # This is for the separate progress bars.
498 # This is for the separate progress bars.
498 if isinstance(rl, changelog.changelog):
499 if isinstance(rl, changelog.changelog):
499 crevcount += len(rl)
500 crevcount += len(rl)
500 csrcsize += datasize
501 csrcsize += datasize
501 crawsize += rawsize
502 crawsize += rawsize
502 elif isinstance(rl, manifest.manifestrevlog):
503 elif isinstance(rl, manifest.manifestrevlog):
503 mcount += 1
504 mcount += 1
504 mrevcount += len(rl)
505 mrevcount += len(rl)
505 msrcsize += datasize
506 msrcsize += datasize
506 mrawsize += rawsize
507 mrawsize += rawsize
507 elif isinstance(rl, filelog.filelog):
508 elif isinstance(rl, filelog.filelog):
508 fcount += 1
509 fcount += 1
509 frevcount += len(rl)
510 frevcount += len(rl)
510 fsrcsize += datasize
511 fsrcsize += datasize
511 frawsize += rawsize
512 frawsize += rawsize
512 else:
513 else:
513 error.ProgrammingError('unknown revlog type')
514 error.ProgrammingError('unknown revlog type')
514
515
515 if not revcount:
516 if not revcount:
516 return
517 return
517
518
518 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
519 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
519 '%d in changelog)\n') %
520 '%d in changelog)\n') %
520 (revcount, frevcount, mrevcount, crevcount))
521 (revcount, frevcount, mrevcount, crevcount))
521 ui.write(_('migrating %s in store; %s tracked data\n') % (
522 ui.write(_('migrating %s in store; %s tracked data\n') % (
522 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
523 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
523
524
524 # Used to keep track of progress.
525 # Used to keep track of progress.
525 progress = None
526 progress = None
526 def oncopiedrevision(rl, rev, node):
527 def oncopiedrevision(rl, rev, node):
527 progress.increment()
528 progress.increment()
528
529
529 # Do the actual copying.
530 # Do the actual copying.
530 # FUTURE this operation can be farmed off to worker processes.
531 # FUTURE this operation can be farmed off to worker processes.
531 seen = set()
532 seen = set()
532 for unencoded, encoded, size in srcrepo.store.walk():
533 for unencoded, encoded, size in srcrepo.store.walk():
533 if unencoded.endswith('.d'):
534 if unencoded.endswith('.d'):
534 continue
535 continue
535
536
536 oldrl = _revlogfrompath(srcrepo, unencoded)
537 oldrl = _revlogfrompath(srcrepo, unencoded)
537 newrl = _revlogfrompath(dstrepo, unencoded)
538 newrl = _revlogfrompath(dstrepo, unencoded)
538
539
539 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
540 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
540 ui.write(_('finished migrating %d manifest revisions across %d '
541 ui.write(_('finished migrating %d manifest revisions across %d '
541 'manifests; change in size: %s\n') %
542 'manifests; change in size: %s\n') %
542 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
543 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
543
544
544 ui.write(_('migrating changelog containing %d revisions '
545 ui.write(_('migrating changelog containing %d revisions '
545 '(%s in store; %s tracked data)\n') %
546 '(%s in store; %s tracked data)\n') %
546 (crevcount, util.bytecount(csrcsize),
547 (crevcount, util.bytecount(csrcsize),
547 util.bytecount(crawsize)))
548 util.bytecount(crawsize)))
548 seen.add('c')
549 seen.add('c')
549 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
550 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
550 total=crevcount)
551 total=crevcount)
551 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
552 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
552 ui.write(_('finished migrating %d filelog revisions across %d '
553 ui.write(_('finished migrating %d filelog revisions across %d '
553 'filelogs; change in size: %s\n') %
554 'filelogs; change in size: %s\n') %
554 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
555 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
555
556
556 ui.write(_('migrating %d manifests containing %d revisions '
557 ui.write(_('migrating %d manifests containing %d revisions '
557 '(%s in store; %s tracked data)\n') %
558 '(%s in store; %s tracked data)\n') %
558 (mcount, mrevcount, util.bytecount(msrcsize),
559 (mcount, mrevcount, util.bytecount(msrcsize),
559 util.bytecount(mrawsize)))
560 util.bytecount(mrawsize)))
560 seen.add('m')
561 seen.add('m')
561 if progress:
562 if progress:
562 progress.complete()
563 progress.complete()
563 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
564 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
564 total=mrevcount)
565 total=mrevcount)
565 elif 'f' not in seen:
566 elif 'f' not in seen:
566 ui.write(_('migrating %d filelogs containing %d revisions '
567 ui.write(_('migrating %d filelogs containing %d revisions '
567 '(%s in store; %s tracked data)\n') %
568 '(%s in store; %s tracked data)\n') %
568 (fcount, frevcount, util.bytecount(fsrcsize),
569 (fcount, frevcount, util.bytecount(fsrcsize),
569 util.bytecount(frawsize)))
570 util.bytecount(frawsize)))
570 seen.add('f')
571 seen.add('f')
571 if progress:
572 if progress:
572 progress.complete()
573 progress.complete()
573 progress = srcrepo.ui.makeprogress(_('file revisions'),
574 progress = srcrepo.ui.makeprogress(_('file revisions'),
574 total=frevcount)
575 total=frevcount)
575
576
576
577
577 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
578 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
578 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
579 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
579 deltareuse=deltareuse,
580 deltareuse=deltareuse,
580 deltabothparents=deltabothparents)
581 deltabothparents=deltabothparents)
581
582
582 info = newrl.storageinfo(storedsize=True)
583 info = newrl.storageinfo(storedsize=True)
583 datasize = info['storedsize'] or 0
584 datasize = info['storedsize'] or 0
584
585
585 dstsize += datasize
586 dstsize += datasize
586
587
587 if isinstance(newrl, changelog.changelog):
588 if isinstance(newrl, changelog.changelog):
588 cdstsize += datasize
589 cdstsize += datasize
589 elif isinstance(newrl, manifest.manifestrevlog):
590 elif isinstance(newrl, manifest.manifestrevlog):
590 mdstsize += datasize
591 mdstsize += datasize
591 else:
592 else:
592 fdstsize += datasize
593 fdstsize += datasize
593
594
594 progress.complete()
595 progress.complete()
595
596
596 ui.write(_('finished migrating %d changelog revisions; change in size: '
597 ui.write(_('finished migrating %d changelog revisions; change in size: '
597 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
598 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
598
599
599 ui.write(_('finished migrating %d total revisions; total change in store '
600 ui.write(_('finished migrating %d total revisions; total change in store '
600 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
601 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
601
602
602 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
603 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
603 """Determine whether to copy a store file during upgrade.
604 """Determine whether to copy a store file during upgrade.
604
605
605 This function is called when migrating store files from ``srcrepo`` to
606 This function is called when migrating store files from ``srcrepo`` to
606 ``dstrepo`` as part of upgrading a repository.
607 ``dstrepo`` as part of upgrading a repository.
607
608
608 Args:
609 Args:
609 srcrepo: repo we are copying from
610 srcrepo: repo we are copying from
610 dstrepo: repo we are copying to
611 dstrepo: repo we are copying to
611 requirements: set of requirements for ``dstrepo``
612 requirements: set of requirements for ``dstrepo``
612 path: store file being examined
613 path: store file being examined
613 mode: the ``ST_MODE`` file type of ``path``
614 mode: the ``ST_MODE`` file type of ``path``
614 st: ``stat`` data structure for ``path``
615 st: ``stat`` data structure for ``path``
615
616
616 Function should return ``True`` if the file is to be copied.
617 Function should return ``True`` if the file is to be copied.
617 """
618 """
618 # Skip revlogs.
619 # Skip revlogs.
619 if path.endswith(('.i', '.d')):
620 if path.endswith(('.i', '.d')):
620 return False
621 return False
621 # Skip transaction related files.
622 # Skip transaction related files.
622 if path.startswith('undo'):
623 if path.startswith('undo'):
623 return False
624 return False
624 # Only copy regular files.
625 # Only copy regular files.
625 if mode != stat.S_IFREG:
626 if mode != stat.S_IFREG:
626 return False
627 return False
627 # Skip other skipped files.
628 # Skip other skipped files.
628 if path in ('lock', 'fncache'):
629 if path in ('lock', 'fncache'):
629 return False
630 return False
630
631
631 return True
632 return True
632
633
633 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
634 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
634 """Hook point for extensions to perform additional actions during upgrade.
635 """Hook point for extensions to perform additional actions during upgrade.
635
636
636 This function is called after revlogs and store files have been copied but
637 This function is called after revlogs and store files have been copied but
637 before the new store is swapped into the original location.
638 before the new store is swapped into the original location.
638 """
639 """
639
640
640 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
641 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
641 """Do the low-level work of upgrading a repository.
642 """Do the low-level work of upgrading a repository.
642
643
643 The upgrade is effectively performed as a copy between a source
644 The upgrade is effectively performed as a copy between a source
644 repository and a temporary destination repository.
645 repository and a temporary destination repository.
645
646
646 The source repository is unmodified for as long as possible so the
647 The source repository is unmodified for as long as possible so the
647 upgrade can abort at any time without causing loss of service for
648 upgrade can abort at any time without causing loss of service for
648 readers and without corrupting the source repository.
649 readers and without corrupting the source repository.
649 """
650 """
650 assert srcrepo.currentwlock()
651 assert srcrepo.currentwlock()
651 assert dstrepo.currentwlock()
652 assert dstrepo.currentwlock()
652
653
653 ui.write(_('(it is safe to interrupt this process any time before '
654 ui.write(_('(it is safe to interrupt this process any time before '
654 'data migration completes)\n'))
655 'data migration completes)\n'))
655
656
656 if 'redeltaall' in actions:
657 if 'redeltaall' in actions:
657 deltareuse = revlog.revlog.DELTAREUSENEVER
658 deltareuse = revlog.revlog.DELTAREUSENEVER
658 elif 'redeltaparent' in actions:
659 elif 'redeltaparent' in actions:
659 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
660 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
660 elif 'redeltamultibase' in actions:
661 elif 'redeltamultibase' in actions:
661 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
662 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
662 elif 'redeltafulladd' in actions:
663 elif 'redeltafulladd' in actions:
663 deltareuse = revlog.revlog.DELTAREUSEFULLADD
664 deltareuse = revlog.revlog.DELTAREUSEFULLADD
664 else:
665 else:
665 deltareuse = revlog.revlog.DELTAREUSEALWAYS
666 deltareuse = revlog.revlog.DELTAREUSEALWAYS
666
667
667 with dstrepo.transaction('upgrade') as tr:
668 with dstrepo.transaction('upgrade') as tr:
668 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
669 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
669 'redeltamultibase' in actions)
670 'redeltamultibase' in actions)
670
671
671 # Now copy other files in the store directory.
672 # Now copy other files in the store directory.
672 # The sorted() makes execution deterministic.
673 # The sorted() makes execution deterministic.
673 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
674 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
674 if not _filterstorefile(srcrepo, dstrepo, requirements,
675 if not _filterstorefile(srcrepo, dstrepo, requirements,
675 p, kind, st):
676 p, kind, st):
676 continue
677 continue
677
678
678 srcrepo.ui.write(_('copying %s\n') % p)
679 srcrepo.ui.write(_('copying %s\n') % p)
679 src = srcrepo.store.rawvfs.join(p)
680 src = srcrepo.store.rawvfs.join(p)
680 dst = dstrepo.store.rawvfs.join(p)
681 dst = dstrepo.store.rawvfs.join(p)
681 util.copyfile(src, dst, copystat=True)
682 util.copyfile(src, dst, copystat=True)
682
683
683 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
684 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
684
685
685 ui.write(_('data fully migrated to temporary repository\n'))
686 ui.write(_('data fully migrated to temporary repository\n'))
686
687
687 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
688 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
688 backupvfs = vfsmod.vfs(backuppath)
689 backupvfs = vfsmod.vfs(backuppath)
689
690
690 # Make a backup of requires file first, as it is the first to be modified.
691 # Make a backup of requires file first, as it is the first to be modified.
691 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
692 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
692
693
693 # We install an arbitrary requirement that clients must not support
694 # We install an arbitrary requirement that clients must not support
694 # as a mechanism to lock out new clients during the data swap. This is
695 # as a mechanism to lock out new clients during the data swap. This is
695 # better than allowing a client to continue while the repository is in
696 # better than allowing a client to continue while the repository is in
696 # an inconsistent state.
697 # an inconsistent state.
697 ui.write(_('marking source repository as being upgraded; clients will be '
698 ui.write(_('marking source repository as being upgraded; clients will be '
698 'unable to read from repository\n'))
699 'unable to read from repository\n'))
699 scmutil.writerequires(srcrepo.vfs,
700 scmutil.writerequires(srcrepo.vfs,
700 srcrepo.requirements | {'upgradeinprogress'})
701 srcrepo.requirements | {'upgradeinprogress'})
701
702
702 ui.write(_('starting in-place swap of repository data\n'))
703 ui.write(_('starting in-place swap of repository data\n'))
703 ui.write(_('replaced files will be backed up at %s\n') %
704 ui.write(_('replaced files will be backed up at %s\n') %
704 backuppath)
705 backuppath)
705
706
706 # Now swap in the new store directory. Doing it as a rename should make
707 # Now swap in the new store directory. Doing it as a rename should make
707 # the operation nearly instantaneous and atomic (at least in well-behaved
708 # the operation nearly instantaneous and atomic (at least in well-behaved
708 # environments).
709 # environments).
709 ui.write(_('replacing store...\n'))
710 ui.write(_('replacing store...\n'))
710 tstart = util.timer()
711 tstart = util.timer()
711 util.rename(srcrepo.spath, backupvfs.join('store'))
712 util.rename(srcrepo.spath, backupvfs.join('store'))
712 util.rename(dstrepo.spath, srcrepo.spath)
713 util.rename(dstrepo.spath, srcrepo.spath)
713 elapsed = util.timer() - tstart
714 elapsed = util.timer() - tstart
714 ui.write(_('store replacement complete; repository was inconsistent for '
715 ui.write(_('store replacement complete; repository was inconsistent for '
715 '%0.1fs\n') % elapsed)
716 '%0.1fs\n') % elapsed)
716
717
717 # We first write the requirements file. Any new requirements will lock
718 # We first write the requirements file. Any new requirements will lock
718 # out legacy clients.
719 # out legacy clients.
719 ui.write(_('finalizing requirements file and making repository readable '
720 ui.write(_('finalizing requirements file and making repository readable '
720 'again\n'))
721 'again\n'))
721 scmutil.writerequires(srcrepo.vfs, requirements)
722 scmutil.writerequires(srcrepo.vfs, requirements)
722
723
723 # The lock file from the old store won't be removed because nothing has a
724 # The lock file from the old store won't be removed because nothing has a
724 # reference to its new location. So clean it up manually. Alternatively, we
725 # reference to its new location. So clean it up manually. Alternatively, we
725 # could update srcrepo.svfs and other variables to point to the new
726 # could update srcrepo.svfs and other variables to point to the new
726 # location. This is simpler.
727 # location. This is simpler.
727 backupvfs.unlink('store/lock')
728 backupvfs.unlink('store/lock')
728
729
729 return backuppath
730 return backuppath
730
731
731 def upgraderepo(ui, repo, run=False, optimize=None):
732 def upgraderepo(ui, repo, run=False, optimize=None):
732 """Upgrade a repository in place."""
733 """Upgrade a repository in place."""
733 optimize = set(optimize or [])
734 optimize = set(optimize or [])
734 repo = repo.unfiltered()
735 repo = repo.unfiltered()
735
736
736 # Ensure the repository can be upgraded.
737 # Ensure the repository can be upgraded.
737 missingreqs = requiredsourcerequirements(repo) - repo.requirements
738 missingreqs = requiredsourcerequirements(repo) - repo.requirements
738 if missingreqs:
739 if missingreqs:
739 raise error.Abort(_('cannot upgrade repository; requirement '
740 raise error.Abort(_('cannot upgrade repository; requirement '
740 'missing: %s') % _(', ').join(sorted(missingreqs)))
741 'missing: %s') % _(', ').join(sorted(missingreqs)))
741
742
742 blockedreqs = blocksourcerequirements(repo) & repo.requirements
743 blockedreqs = blocksourcerequirements(repo) & repo.requirements
743 if blockedreqs:
744 if blockedreqs:
744 raise error.Abort(_('cannot upgrade repository; unsupported source '
745 raise error.Abort(_('cannot upgrade repository; unsupported source '
745 'requirement: %s') %
746 'requirement: %s') %
746 _(', ').join(sorted(blockedreqs)))
747 _(', ').join(sorted(blockedreqs)))
747
748
748 # FUTURE there is potentially a need to control the wanted requirements via
749 # FUTURE there is potentially a need to control the wanted requirements via
749 # command arguments or via an extension hook point.
750 # command arguments or via an extension hook point.
750 newreqs = localrepo.newreporequirements(repo.ui)
751 newreqs = localrepo.newreporequirements(
752 repo.ui, localrepo.defaultcreateopts(repo.ui))
751 newreqs.update(preservedrequirements(repo))
753 newreqs.update(preservedrequirements(repo))
752
754
753 noremovereqs = (repo.requirements - newreqs -
755 noremovereqs = (repo.requirements - newreqs -
754 supportremovedrequirements(repo))
756 supportremovedrequirements(repo))
755 if noremovereqs:
757 if noremovereqs:
756 raise error.Abort(_('cannot upgrade repository; requirement would be '
758 raise error.Abort(_('cannot upgrade repository; requirement would be '
757 'removed: %s') % _(', ').join(sorted(noremovereqs)))
759 'removed: %s') % _(', ').join(sorted(noremovereqs)))
758
760
759 noaddreqs = (newreqs - repo.requirements -
761 noaddreqs = (newreqs - repo.requirements -
760 allowednewrequirements(repo))
762 allowednewrequirements(repo))
761 if noaddreqs:
763 if noaddreqs:
762 raise error.Abort(_('cannot upgrade repository; do not support adding '
764 raise error.Abort(_('cannot upgrade repository; do not support adding '
763 'requirement: %s') %
765 'requirement: %s') %
764 _(', ').join(sorted(noaddreqs)))
766 _(', ').join(sorted(noaddreqs)))
765
767
766 unsupportedreqs = newreqs - supporteddestrequirements(repo)
768 unsupportedreqs = newreqs - supporteddestrequirements(repo)
767 if unsupportedreqs:
769 if unsupportedreqs:
768 raise error.Abort(_('cannot upgrade repository; do not support '
770 raise error.Abort(_('cannot upgrade repository; do not support '
769 'destination requirement: %s') %
771 'destination requirement: %s') %
770 _(', ').join(sorted(unsupportedreqs)))
772 _(', ').join(sorted(unsupportedreqs)))
771
773
772 # Find and validate all improvements that can be made.
774 # Find and validate all improvements that can be made.
773 alloptimizations = findoptimizations(repo)
775 alloptimizations = findoptimizations(repo)
774
776
775 # Apply and Validate arguments.
777 # Apply and Validate arguments.
776 optimizations = []
778 optimizations = []
777 for o in alloptimizations:
779 for o in alloptimizations:
778 if o.name in optimize:
780 if o.name in optimize:
779 optimizations.append(o)
781 optimizations.append(o)
780 optimize.discard(o.name)
782 optimize.discard(o.name)
781
783
782 if optimize: # anything left is unknown
784 if optimize: # anything left is unknown
783 raise error.Abort(_('unknown optimization action requested: %s') %
785 raise error.Abort(_('unknown optimization action requested: %s') %
784 ', '.join(sorted(optimize)),
786 ', '.join(sorted(optimize)),
785 hint=_('run without arguments to see valid '
787 hint=_('run without arguments to see valid '
786 'optimizations'))
788 'optimizations'))
787
789
788 deficiencies = finddeficiencies(repo)
790 deficiencies = finddeficiencies(repo)
789 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
791 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
790 actions.extend(o for o in sorted(optimizations)
792 actions.extend(o for o in sorted(optimizations)
791 # determineactions could have added optimisation
793 # determineactions could have added optimisation
792 if o not in actions)
794 if o not in actions)
793
795
794 def printrequirements():
796 def printrequirements():
795 ui.write(_('requirements\n'))
797 ui.write(_('requirements\n'))
796 ui.write(_(' preserved: %s\n') %
798 ui.write(_(' preserved: %s\n') %
797 _(', ').join(sorted(newreqs & repo.requirements)))
799 _(', ').join(sorted(newreqs & repo.requirements)))
798
800
799 if repo.requirements - newreqs:
801 if repo.requirements - newreqs:
800 ui.write(_(' removed: %s\n') %
802 ui.write(_(' removed: %s\n') %
801 _(', ').join(sorted(repo.requirements - newreqs)))
803 _(', ').join(sorted(repo.requirements - newreqs)))
802
804
803 if newreqs - repo.requirements:
805 if newreqs - repo.requirements:
804 ui.write(_(' added: %s\n') %
806 ui.write(_(' added: %s\n') %
805 _(', ').join(sorted(newreqs - repo.requirements)))
807 _(', ').join(sorted(newreqs - repo.requirements)))
806
808
807 ui.write('\n')
809 ui.write('\n')
808
810
809 def printupgradeactions():
811 def printupgradeactions():
810 for a in actions:
812 for a in actions:
811 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
813 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
812
814
813 if not run:
815 if not run:
814 fromconfig = []
816 fromconfig = []
815 onlydefault = []
817 onlydefault = []
816
818
817 for d in deficiencies:
819 for d in deficiencies:
818 if d.fromconfig(repo):
820 if d.fromconfig(repo):
819 fromconfig.append(d)
821 fromconfig.append(d)
820 elif d.default:
822 elif d.default:
821 onlydefault.append(d)
823 onlydefault.append(d)
822
824
823 if fromconfig or onlydefault:
825 if fromconfig or onlydefault:
824
826
825 if fromconfig:
827 if fromconfig:
826 ui.write(_('repository lacks features recommended by '
828 ui.write(_('repository lacks features recommended by '
827 'current config options:\n\n'))
829 'current config options:\n\n'))
828 for i in fromconfig:
830 for i in fromconfig:
829 ui.write('%s\n %s\n\n' % (i.name, i.description))
831 ui.write('%s\n %s\n\n' % (i.name, i.description))
830
832
831 if onlydefault:
833 if onlydefault:
832 ui.write(_('repository lacks features used by the default '
834 ui.write(_('repository lacks features used by the default '
833 'config options:\n\n'))
835 'config options:\n\n'))
834 for i in onlydefault:
836 for i in onlydefault:
835 ui.write('%s\n %s\n\n' % (i.name, i.description))
837 ui.write('%s\n %s\n\n' % (i.name, i.description))
836
838
837 ui.write('\n')
839 ui.write('\n')
838 else:
840 else:
839 ui.write(_('(no feature deficiencies found in existing '
841 ui.write(_('(no feature deficiencies found in existing '
840 'repository)\n'))
842 'repository)\n'))
841
843
842 ui.write(_('performing an upgrade with "--run" will make the following '
844 ui.write(_('performing an upgrade with "--run" will make the following '
843 'changes:\n\n'))
845 'changes:\n\n'))
844
846
845 printrequirements()
847 printrequirements()
846 printupgradeactions()
848 printupgradeactions()
847
849
848 unusedoptimize = [i for i in alloptimizations if i not in actions]
850 unusedoptimize = [i for i in alloptimizations if i not in actions]
849
851
850 if unusedoptimize:
852 if unusedoptimize:
851 ui.write(_('additional optimizations are available by specifying '
853 ui.write(_('additional optimizations are available by specifying '
852 '"--optimize <name>":\n\n'))
854 '"--optimize <name>":\n\n'))
853 for i in unusedoptimize:
855 for i in unusedoptimize:
854 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
856 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
855 return
857 return
856
858
857 # Else we're in the run=true case.
859 # Else we're in the run=true case.
858 ui.write(_('upgrade will perform the following actions:\n\n'))
860 ui.write(_('upgrade will perform the following actions:\n\n'))
859 printrequirements()
861 printrequirements()
860 printupgradeactions()
862 printupgradeactions()
861
863
862 upgradeactions = [a.name for a in actions]
864 upgradeactions = [a.name for a in actions]
863
865
864 ui.write(_('beginning upgrade...\n'))
866 ui.write(_('beginning upgrade...\n'))
865 with repo.wlock(), repo.lock():
867 with repo.wlock(), repo.lock():
866 ui.write(_('repository locked and read-only\n'))
868 ui.write(_('repository locked and read-only\n'))
867 # Our strategy for upgrading the repository is to create a new,
869 # Our strategy for upgrading the repository is to create a new,
868 # temporary repository, write data to it, then do a swap of the
870 # temporary repository, write data to it, then do a swap of the
869 # data. There are less heavyweight ways to do this, but it is easier
871 # data. There are less heavyweight ways to do this, but it is easier
870 # to create a new repo object than to instantiate all the components
872 # to create a new repo object than to instantiate all the components
871 # (like the store) separately.
873 # (like the store) separately.
872 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
874 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
873 backuppath = None
875 backuppath = None
874 try:
876 try:
875 ui.write(_('creating temporary repository to stage migrated '
877 ui.write(_('creating temporary repository to stage migrated '
876 'data: %s\n') % tmppath)
878 'data: %s\n') % tmppath)
877
879
878 # clone ui without using ui.copy because repo.ui is protected
880 # clone ui without using ui.copy because repo.ui is protected
879 repoui = repo.ui.__class__(repo.ui)
881 repoui = repo.ui.__class__(repo.ui)
880 dstrepo = hg.repository(repoui, path=tmppath, create=True)
882 dstrepo = hg.repository(repoui, path=tmppath, create=True)
881
883
882 with dstrepo.wlock(), dstrepo.lock():
884 with dstrepo.wlock(), dstrepo.lock():
883 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
885 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
884 upgradeactions)
886 upgradeactions)
885
887
886 finally:
888 finally:
887 ui.write(_('removing temporary repository %s\n') % tmppath)
889 ui.write(_('removing temporary repository %s\n') % tmppath)
888 repo.vfs.rmtree(tmppath, forcibly=True)
890 repo.vfs.rmtree(tmppath, forcibly=True)
889
891
890 if backuppath:
892 if backuppath:
891 ui.warn(_('copy of old repository backed up at %s\n') %
893 ui.warn(_('copy of old repository backed up at %s\n') %
892 backuppath)
894 backuppath)
893 ui.warn(_('the old repository will not be deleted; remove '
895 ui.warn(_('the old repository will not be deleted; remove '
894 'it to free up disk space once the upgraded '
896 'it to free up disk space once the upgraded '
895 'repository is verified\n'))
897 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now