##// END OF EJS Templates
lock: add internal config to not replace signal handlers while locking...
Yuya Nishihara -
r38157:8c828beb stable
parent child Browse files
Show More
@@ -1,1344 +1,1347 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 coreconfigitem('alias', '.*',
116 coreconfigitem('alias', '.*',
117 default=dynamicdefault,
117 default=dynamicdefault,
118 generic=True,
118 generic=True,
119 )
119 )
120 coreconfigitem('annotate', 'nodates',
120 coreconfigitem('annotate', 'nodates',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'showfunc',
123 coreconfigitem('annotate', 'showfunc',
124 default=False,
124 default=False,
125 )
125 )
126 coreconfigitem('annotate', 'unified',
126 coreconfigitem('annotate', 'unified',
127 default=None,
127 default=None,
128 )
128 )
129 coreconfigitem('annotate', 'git',
129 coreconfigitem('annotate', 'git',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorews',
132 coreconfigitem('annotate', 'ignorews',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignorewsamount',
135 coreconfigitem('annotate', 'ignorewsamount',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignoreblanklines',
138 coreconfigitem('annotate', 'ignoreblanklines',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'ignorewseol',
141 coreconfigitem('annotate', 'ignorewseol',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'nobinary',
144 coreconfigitem('annotate', 'nobinary',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('annotate', 'noprefix',
147 coreconfigitem('annotate', 'noprefix',
148 default=False,
148 default=False,
149 )
149 )
150 coreconfigitem('auth', 'cookiefile',
150 coreconfigitem('auth', 'cookiefile',
151 default=None,
151 default=None,
152 )
152 )
153 # bookmarks.pushing: internal hack for discovery
153 # bookmarks.pushing: internal hack for discovery
154 coreconfigitem('bookmarks', 'pushing',
154 coreconfigitem('bookmarks', 'pushing',
155 default=list,
155 default=list,
156 )
156 )
157 # bundle.mainreporoot: internal hack for bundlerepo
157 # bundle.mainreporoot: internal hack for bundlerepo
158 coreconfigitem('bundle', 'mainreporoot',
158 coreconfigitem('bundle', 'mainreporoot',
159 default='',
159 default='',
160 )
160 )
161 # bundle.reorder: experimental config
161 # bundle.reorder: experimental config
162 coreconfigitem('bundle', 'reorder',
162 coreconfigitem('bundle', 'reorder',
163 default='auto',
163 default='auto',
164 )
164 )
165 coreconfigitem('censor', 'policy',
165 coreconfigitem('censor', 'policy',
166 default='abort',
166 default='abort',
167 )
167 )
168 coreconfigitem('chgserver', 'idletimeout',
168 coreconfigitem('chgserver', 'idletimeout',
169 default=3600,
169 default=3600,
170 )
170 )
171 coreconfigitem('chgserver', 'skiphash',
171 coreconfigitem('chgserver', 'skiphash',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem('cmdserver', 'log',
174 coreconfigitem('cmdserver', 'log',
175 default=None,
175 default=None,
176 )
176 )
177 coreconfigitem('color', '.*',
177 coreconfigitem('color', '.*',
178 default=None,
178 default=None,
179 generic=True,
179 generic=True,
180 )
180 )
181 coreconfigitem('color', 'mode',
181 coreconfigitem('color', 'mode',
182 default='auto',
182 default='auto',
183 )
183 )
184 coreconfigitem('color', 'pagermode',
184 coreconfigitem('color', 'pagermode',
185 default=dynamicdefault,
185 default=dynamicdefault,
186 )
186 )
187 coreconfigitem('commands', 'show.aliasprefix',
187 coreconfigitem('commands', 'show.aliasprefix',
188 default=list,
188 default=list,
189 )
189 )
190 coreconfigitem('commands', 'status.relative',
190 coreconfigitem('commands', 'status.relative',
191 default=False,
191 default=False,
192 )
192 )
193 coreconfigitem('commands', 'status.skipstates',
193 coreconfigitem('commands', 'status.skipstates',
194 default=[],
194 default=[],
195 )
195 )
196 coreconfigitem('commands', 'status.verbose',
196 coreconfigitem('commands', 'status.verbose',
197 default=False,
197 default=False,
198 )
198 )
199 coreconfigitem('commands', 'update.check',
199 coreconfigitem('commands', 'update.check',
200 default=None,
200 default=None,
201 # Deprecated, remove after 4.4 release
201 # Deprecated, remove after 4.4 release
202 alias=[('experimental', 'updatecheck')]
202 alias=[('experimental', 'updatecheck')]
203 )
203 )
204 coreconfigitem('commands', 'update.requiredest',
204 coreconfigitem('commands', 'update.requiredest',
205 default=False,
205 default=False,
206 )
206 )
207 coreconfigitem('committemplate', '.*',
207 coreconfigitem('committemplate', '.*',
208 default=None,
208 default=None,
209 generic=True,
209 generic=True,
210 )
210 )
211 coreconfigitem('convert', 'cvsps.cache',
211 coreconfigitem('convert', 'cvsps.cache',
212 default=True,
212 default=True,
213 )
213 )
214 coreconfigitem('convert', 'cvsps.fuzz',
214 coreconfigitem('convert', 'cvsps.fuzz',
215 default=60,
215 default=60,
216 )
216 )
217 coreconfigitem('convert', 'cvsps.logencoding',
217 coreconfigitem('convert', 'cvsps.logencoding',
218 default=None,
218 default=None,
219 )
219 )
220 coreconfigitem('convert', 'cvsps.mergefrom',
220 coreconfigitem('convert', 'cvsps.mergefrom',
221 default=None,
221 default=None,
222 )
222 )
223 coreconfigitem('convert', 'cvsps.mergeto',
223 coreconfigitem('convert', 'cvsps.mergeto',
224 default=None,
224 default=None,
225 )
225 )
226 coreconfigitem('convert', 'git.committeractions',
226 coreconfigitem('convert', 'git.committeractions',
227 default=lambda: ['messagedifferent'],
227 default=lambda: ['messagedifferent'],
228 )
228 )
229 coreconfigitem('convert', 'git.extrakeys',
229 coreconfigitem('convert', 'git.extrakeys',
230 default=list,
230 default=list,
231 )
231 )
232 coreconfigitem('convert', 'git.findcopiesharder',
232 coreconfigitem('convert', 'git.findcopiesharder',
233 default=False,
233 default=False,
234 )
234 )
235 coreconfigitem('convert', 'git.remoteprefix',
235 coreconfigitem('convert', 'git.remoteprefix',
236 default='remote',
236 default='remote',
237 )
237 )
238 coreconfigitem('convert', 'git.renamelimit',
238 coreconfigitem('convert', 'git.renamelimit',
239 default=400,
239 default=400,
240 )
240 )
241 coreconfigitem('convert', 'git.saverev',
241 coreconfigitem('convert', 'git.saverev',
242 default=True,
242 default=True,
243 )
243 )
244 coreconfigitem('convert', 'git.similarity',
244 coreconfigitem('convert', 'git.similarity',
245 default=50,
245 default=50,
246 )
246 )
247 coreconfigitem('convert', 'git.skipsubmodules',
247 coreconfigitem('convert', 'git.skipsubmodules',
248 default=False,
248 default=False,
249 )
249 )
250 coreconfigitem('convert', 'hg.clonebranches',
250 coreconfigitem('convert', 'hg.clonebranches',
251 default=False,
251 default=False,
252 )
252 )
253 coreconfigitem('convert', 'hg.ignoreerrors',
253 coreconfigitem('convert', 'hg.ignoreerrors',
254 default=False,
254 default=False,
255 )
255 )
256 coreconfigitem('convert', 'hg.revs',
256 coreconfigitem('convert', 'hg.revs',
257 default=None,
257 default=None,
258 )
258 )
259 coreconfigitem('convert', 'hg.saverev',
259 coreconfigitem('convert', 'hg.saverev',
260 default=False,
260 default=False,
261 )
261 )
262 coreconfigitem('convert', 'hg.sourcename',
262 coreconfigitem('convert', 'hg.sourcename',
263 default=None,
263 default=None,
264 )
264 )
265 coreconfigitem('convert', 'hg.startrev',
265 coreconfigitem('convert', 'hg.startrev',
266 default=None,
266 default=None,
267 )
267 )
268 coreconfigitem('convert', 'hg.tagsbranch',
268 coreconfigitem('convert', 'hg.tagsbranch',
269 default='default',
269 default='default',
270 )
270 )
271 coreconfigitem('convert', 'hg.usebranchnames',
271 coreconfigitem('convert', 'hg.usebranchnames',
272 default=True,
272 default=True,
273 )
273 )
274 coreconfigitem('convert', 'ignoreancestorcheck',
274 coreconfigitem('convert', 'ignoreancestorcheck',
275 default=False,
275 default=False,
276 )
276 )
277 coreconfigitem('convert', 'localtimezone',
277 coreconfigitem('convert', 'localtimezone',
278 default=False,
278 default=False,
279 )
279 )
280 coreconfigitem('convert', 'p4.encoding',
280 coreconfigitem('convert', 'p4.encoding',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem('convert', 'p4.startrev',
283 coreconfigitem('convert', 'p4.startrev',
284 default=0,
284 default=0,
285 )
285 )
286 coreconfigitem('convert', 'skiptags',
286 coreconfigitem('convert', 'skiptags',
287 default=False,
287 default=False,
288 )
288 )
289 coreconfigitem('convert', 'svn.debugsvnlog',
289 coreconfigitem('convert', 'svn.debugsvnlog',
290 default=True,
290 default=True,
291 )
291 )
292 coreconfigitem('convert', 'svn.trunk',
292 coreconfigitem('convert', 'svn.trunk',
293 default=None,
293 default=None,
294 )
294 )
295 coreconfigitem('convert', 'svn.tags',
295 coreconfigitem('convert', 'svn.tags',
296 default=None,
296 default=None,
297 )
297 )
298 coreconfigitem('convert', 'svn.branches',
298 coreconfigitem('convert', 'svn.branches',
299 default=None,
299 default=None,
300 )
300 )
301 coreconfigitem('convert', 'svn.startrev',
301 coreconfigitem('convert', 'svn.startrev',
302 default=0,
302 default=0,
303 )
303 )
304 coreconfigitem('debug', 'dirstate.delaywrite',
304 coreconfigitem('debug', 'dirstate.delaywrite',
305 default=0,
305 default=0,
306 )
306 )
307 coreconfigitem('defaults', '.*',
307 coreconfigitem('defaults', '.*',
308 default=None,
308 default=None,
309 generic=True,
309 generic=True,
310 )
310 )
311 coreconfigitem('devel', 'all-warnings',
311 coreconfigitem('devel', 'all-warnings',
312 default=False,
312 default=False,
313 )
313 )
314 coreconfigitem('devel', 'bundle2.debug',
314 coreconfigitem('devel', 'bundle2.debug',
315 default=False,
315 default=False,
316 )
316 )
317 coreconfigitem('devel', 'cache-vfs',
317 coreconfigitem('devel', 'cache-vfs',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem('devel', 'check-locks',
320 coreconfigitem('devel', 'check-locks',
321 default=False,
321 default=False,
322 )
322 )
323 coreconfigitem('devel', 'check-relroot',
323 coreconfigitem('devel', 'check-relroot',
324 default=False,
324 default=False,
325 )
325 )
326 coreconfigitem('devel', 'default-date',
326 coreconfigitem('devel', 'default-date',
327 default=None,
327 default=None,
328 )
328 )
329 coreconfigitem('devel', 'deprec-warn',
329 coreconfigitem('devel', 'deprec-warn',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem('devel', 'disableloaddefaultcerts',
332 coreconfigitem('devel', 'disableloaddefaultcerts',
333 default=False,
333 default=False,
334 )
334 )
335 coreconfigitem('devel', 'warn-empty-changegroup',
335 coreconfigitem('devel', 'warn-empty-changegroup',
336 default=False,
336 default=False,
337 )
337 )
338 coreconfigitem('devel', 'legacy.exchange',
338 coreconfigitem('devel', 'legacy.exchange',
339 default=list,
339 default=list,
340 )
340 )
341 coreconfigitem('devel', 'servercafile',
341 coreconfigitem('devel', 'servercafile',
342 default='',
342 default='',
343 )
343 )
344 coreconfigitem('devel', 'serverexactprotocol',
344 coreconfigitem('devel', 'serverexactprotocol',
345 default='',
345 default='',
346 )
346 )
347 coreconfigitem('devel', 'serverrequirecert',
347 coreconfigitem('devel', 'serverrequirecert',
348 default=False,
348 default=False,
349 )
349 )
350 coreconfigitem('devel', 'strip-obsmarkers',
350 coreconfigitem('devel', 'strip-obsmarkers',
351 default=True,
351 default=True,
352 )
352 )
353 coreconfigitem('devel', 'warn-config',
353 coreconfigitem('devel', 'warn-config',
354 default=None,
354 default=None,
355 )
355 )
356 coreconfigitem('devel', 'warn-config-default',
356 coreconfigitem('devel', 'warn-config-default',
357 default=None,
357 default=None,
358 )
358 )
359 coreconfigitem('devel', 'user.obsmarker',
359 coreconfigitem('devel', 'user.obsmarker',
360 default=None,
360 default=None,
361 )
361 )
362 coreconfigitem('devel', 'warn-config-unknown',
362 coreconfigitem('devel', 'warn-config-unknown',
363 default=None,
363 default=None,
364 )
364 )
365 coreconfigitem('devel', 'debug.peer-request',
365 coreconfigitem('devel', 'debug.peer-request',
366 default=False,
366 default=False,
367 )
367 )
368 coreconfigitem('diff', 'nodates',
368 coreconfigitem('diff', 'nodates',
369 default=False,
369 default=False,
370 )
370 )
371 coreconfigitem('diff', 'showfunc',
371 coreconfigitem('diff', 'showfunc',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem('diff', 'unified',
374 coreconfigitem('diff', 'unified',
375 default=None,
375 default=None,
376 )
376 )
377 coreconfigitem('diff', 'git',
377 coreconfigitem('diff', 'git',
378 default=False,
378 default=False,
379 )
379 )
380 coreconfigitem('diff', 'ignorews',
380 coreconfigitem('diff', 'ignorews',
381 default=False,
381 default=False,
382 )
382 )
383 coreconfigitem('diff', 'ignorewsamount',
383 coreconfigitem('diff', 'ignorewsamount',
384 default=False,
384 default=False,
385 )
385 )
386 coreconfigitem('diff', 'ignoreblanklines',
386 coreconfigitem('diff', 'ignoreblanklines',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem('diff', 'ignorewseol',
389 coreconfigitem('diff', 'ignorewseol',
390 default=False,
390 default=False,
391 )
391 )
392 coreconfigitem('diff', 'nobinary',
392 coreconfigitem('diff', 'nobinary',
393 default=False,
393 default=False,
394 )
394 )
395 coreconfigitem('diff', 'noprefix',
395 coreconfigitem('diff', 'noprefix',
396 default=False,
396 default=False,
397 )
397 )
398 coreconfigitem('email', 'bcc',
398 coreconfigitem('email', 'bcc',
399 default=None,
399 default=None,
400 )
400 )
401 coreconfigitem('email', 'cc',
401 coreconfigitem('email', 'cc',
402 default=None,
402 default=None,
403 )
403 )
404 coreconfigitem('email', 'charsets',
404 coreconfigitem('email', 'charsets',
405 default=list,
405 default=list,
406 )
406 )
407 coreconfigitem('email', 'from',
407 coreconfigitem('email', 'from',
408 default=None,
408 default=None,
409 )
409 )
410 coreconfigitem('email', 'method',
410 coreconfigitem('email', 'method',
411 default='smtp',
411 default='smtp',
412 )
412 )
413 coreconfigitem('email', 'reply-to',
413 coreconfigitem('email', 'reply-to',
414 default=None,
414 default=None,
415 )
415 )
416 coreconfigitem('email', 'to',
416 coreconfigitem('email', 'to',
417 default=None,
417 default=None,
418 )
418 )
419 coreconfigitem('experimental', 'archivemetatemplate',
419 coreconfigitem('experimental', 'archivemetatemplate',
420 default=dynamicdefault,
420 default=dynamicdefault,
421 )
421 )
422 coreconfigitem('experimental', 'bundle-phases',
422 coreconfigitem('experimental', 'bundle-phases',
423 default=False,
423 default=False,
424 )
424 )
425 coreconfigitem('experimental', 'bundle2-advertise',
425 coreconfigitem('experimental', 'bundle2-advertise',
426 default=True,
426 default=True,
427 )
427 )
428 coreconfigitem('experimental', 'bundle2-output-capture',
428 coreconfigitem('experimental', 'bundle2-output-capture',
429 default=False,
429 default=False,
430 )
430 )
431 coreconfigitem('experimental', 'bundle2.pushback',
431 coreconfigitem('experimental', 'bundle2.pushback',
432 default=False,
432 default=False,
433 )
433 )
434 coreconfigitem('experimental', 'bundle2.stream',
434 coreconfigitem('experimental', 'bundle2.stream',
435 default=False,
435 default=False,
436 )
436 )
437 coreconfigitem('experimental', 'bundle2lazylocking',
437 coreconfigitem('experimental', 'bundle2lazylocking',
438 default=False,
438 default=False,
439 )
439 )
440 coreconfigitem('experimental', 'bundlecomplevel',
440 coreconfigitem('experimental', 'bundlecomplevel',
441 default=None,
441 default=None,
442 )
442 )
443 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
443 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
444 default=None,
444 default=None,
445 )
445 )
446 coreconfigitem('experimental', 'bundlecomplevel.gzip',
446 coreconfigitem('experimental', 'bundlecomplevel.gzip',
447 default=None,
447 default=None,
448 )
448 )
449 coreconfigitem('experimental', 'bundlecomplevel.none',
449 coreconfigitem('experimental', 'bundlecomplevel.none',
450 default=None,
450 default=None,
451 )
451 )
452 coreconfigitem('experimental', 'bundlecomplevel.zstd',
452 coreconfigitem('experimental', 'bundlecomplevel.zstd',
453 default=None,
453 default=None,
454 )
454 )
455 coreconfigitem('experimental', 'changegroup3',
455 coreconfigitem('experimental', 'changegroup3',
456 default=False,
456 default=False,
457 )
457 )
458 coreconfigitem('experimental', 'clientcompressionengines',
458 coreconfigitem('experimental', 'clientcompressionengines',
459 default=list,
459 default=list,
460 )
460 )
461 coreconfigitem('experimental', 'copytrace',
461 coreconfigitem('experimental', 'copytrace',
462 default='on',
462 default='on',
463 )
463 )
464 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
464 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
465 default=100,
465 default=100,
466 )
466 )
467 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
467 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
468 default=100,
468 default=100,
469 )
469 )
470 coreconfigitem('experimental', 'crecordtest',
470 coreconfigitem('experimental', 'crecordtest',
471 default=None,
471 default=None,
472 )
472 )
473 coreconfigitem('experimental', 'directaccess',
473 coreconfigitem('experimental', 'directaccess',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem('experimental', 'directaccess.revnums',
476 coreconfigitem('experimental', 'directaccess.revnums',
477 default=False,
477 default=False,
478 )
478 )
479 coreconfigitem('experimental', 'editortmpinhg',
479 coreconfigitem('experimental', 'editortmpinhg',
480 default=False,
480 default=False,
481 )
481 )
482 coreconfigitem('experimental', 'evolution',
482 coreconfigitem('experimental', 'evolution',
483 default=list,
483 default=list,
484 )
484 )
485 coreconfigitem('experimental', 'evolution.allowdivergence',
485 coreconfigitem('experimental', 'evolution.allowdivergence',
486 default=False,
486 default=False,
487 alias=[('experimental', 'allowdivergence')]
487 alias=[('experimental', 'allowdivergence')]
488 )
488 )
489 coreconfigitem('experimental', 'evolution.allowunstable',
489 coreconfigitem('experimental', 'evolution.allowunstable',
490 default=None,
490 default=None,
491 )
491 )
492 coreconfigitem('experimental', 'evolution.createmarkers',
492 coreconfigitem('experimental', 'evolution.createmarkers',
493 default=None,
493 default=None,
494 )
494 )
495 coreconfigitem('experimental', 'evolution.effect-flags',
495 coreconfigitem('experimental', 'evolution.effect-flags',
496 default=True,
496 default=True,
497 alias=[('experimental', 'effect-flags')]
497 alias=[('experimental', 'effect-flags')]
498 )
498 )
499 coreconfigitem('experimental', 'evolution.exchange',
499 coreconfigitem('experimental', 'evolution.exchange',
500 default=None,
500 default=None,
501 )
501 )
502 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
502 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
503 default=False,
503 default=False,
504 )
504 )
505 coreconfigitem('experimental', 'evolution.report-instabilities',
505 coreconfigitem('experimental', 'evolution.report-instabilities',
506 default=True,
506 default=True,
507 )
507 )
508 coreconfigitem('experimental', 'evolution.track-operation',
508 coreconfigitem('experimental', 'evolution.track-operation',
509 default=True,
509 default=True,
510 )
510 )
511 coreconfigitem('experimental', 'worddiff',
511 coreconfigitem('experimental', 'worddiff',
512 default=False,
512 default=False,
513 )
513 )
514 coreconfigitem('experimental', 'maxdeltachainspan',
514 coreconfigitem('experimental', 'maxdeltachainspan',
515 default=-1,
515 default=-1,
516 )
516 )
517 coreconfigitem('experimental', 'mergetempdirprefix',
517 coreconfigitem('experimental', 'mergetempdirprefix',
518 default=None,
518 default=None,
519 )
519 )
520 coreconfigitem('experimental', 'mmapindexthreshold',
520 coreconfigitem('experimental', 'mmapindexthreshold',
521 default=None,
521 default=None,
522 )
522 )
523 coreconfigitem('experimental', 'nonnormalparanoidcheck',
523 coreconfigitem('experimental', 'nonnormalparanoidcheck',
524 default=False,
524 default=False,
525 )
525 )
526 coreconfigitem('experimental', 'exportableenviron',
526 coreconfigitem('experimental', 'exportableenviron',
527 default=list,
527 default=list,
528 )
528 )
529 coreconfigitem('experimental', 'extendedheader.index',
529 coreconfigitem('experimental', 'extendedheader.index',
530 default=None,
530 default=None,
531 )
531 )
532 coreconfigitem('experimental', 'extendedheader.similarity',
532 coreconfigitem('experimental', 'extendedheader.similarity',
533 default=False,
533 default=False,
534 )
534 )
535 coreconfigitem('experimental', 'format.compression',
535 coreconfigitem('experimental', 'format.compression',
536 default='zlib',
536 default='zlib',
537 )
537 )
538 coreconfigitem('experimental', 'graphshorten',
538 coreconfigitem('experimental', 'graphshorten',
539 default=False,
539 default=False,
540 )
540 )
541 coreconfigitem('experimental', 'graphstyle.parent',
541 coreconfigitem('experimental', 'graphstyle.parent',
542 default=dynamicdefault,
542 default=dynamicdefault,
543 )
543 )
544 coreconfigitem('experimental', 'graphstyle.missing',
544 coreconfigitem('experimental', 'graphstyle.missing',
545 default=dynamicdefault,
545 default=dynamicdefault,
546 )
546 )
547 coreconfigitem('experimental', 'graphstyle.grandparent',
547 coreconfigitem('experimental', 'graphstyle.grandparent',
548 default=dynamicdefault,
548 default=dynamicdefault,
549 )
549 )
550 coreconfigitem('experimental', 'hook-track-tags',
550 coreconfigitem('experimental', 'hook-track-tags',
551 default=False,
551 default=False,
552 )
552 )
553 coreconfigitem('experimental', 'httppeer.advertise-v2',
553 coreconfigitem('experimental', 'httppeer.advertise-v2',
554 default=False,
554 default=False,
555 )
555 )
556 coreconfigitem('experimental', 'httppostargs',
556 coreconfigitem('experimental', 'httppostargs',
557 default=False,
557 default=False,
558 )
558 )
559 coreconfigitem('experimental', 'mergedriver',
559 coreconfigitem('experimental', 'mergedriver',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
562 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
563 default=False,
563 default=False,
564 )
564 )
565 coreconfigitem('experimental', 'remotenames',
565 coreconfigitem('experimental', 'remotenames',
566 default=False,
566 default=False,
567 )
567 )
568 coreconfigitem('experimental', 'revlogv2',
568 coreconfigitem('experimental', 'revlogv2',
569 default=None,
569 default=None,
570 )
570 )
571 coreconfigitem('experimental', 'single-head-per-branch',
571 coreconfigitem('experimental', 'single-head-per-branch',
572 default=False,
572 default=False,
573 )
573 )
574 coreconfigitem('experimental', 'sshserver.support-v2',
574 coreconfigitem('experimental', 'sshserver.support-v2',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem('experimental', 'spacemovesdown',
577 coreconfigitem('experimental', 'spacemovesdown',
578 default=False,
578 default=False,
579 )
579 )
580 coreconfigitem('experimental', 'sparse-read',
580 coreconfigitem('experimental', 'sparse-read',
581 default=False,
581 default=False,
582 )
582 )
583 coreconfigitem('experimental', 'sparse-read.density-threshold',
583 coreconfigitem('experimental', 'sparse-read.density-threshold',
584 default=0.25,
584 default=0.25,
585 )
585 )
586 coreconfigitem('experimental', 'sparse-read.min-gap-size',
586 coreconfigitem('experimental', 'sparse-read.min-gap-size',
587 default='256K',
587 default='256K',
588 )
588 )
589 coreconfigitem('experimental', 'treemanifest',
589 coreconfigitem('experimental', 'treemanifest',
590 default=False,
590 default=False,
591 )
591 )
592 coreconfigitem('experimental', 'update.atomic-file',
592 coreconfigitem('experimental', 'update.atomic-file',
593 default=False,
593 default=False,
594 )
594 )
595 coreconfigitem('experimental', 'sshpeer.advertise-v2',
595 coreconfigitem('experimental', 'sshpeer.advertise-v2',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem('experimental', 'web.apiserver',
598 coreconfigitem('experimental', 'web.apiserver',
599 default=False,
599 default=False,
600 )
600 )
601 coreconfigitem('experimental', 'web.api.http-v2',
601 coreconfigitem('experimental', 'web.api.http-v2',
602 default=False,
602 default=False,
603 )
603 )
604 coreconfigitem('experimental', 'web.api.debugreflect',
604 coreconfigitem('experimental', 'web.api.debugreflect',
605 default=False,
605 default=False,
606 )
606 )
607 coreconfigitem('experimental', 'xdiff',
607 coreconfigitem('experimental', 'xdiff',
608 default=False,
608 default=False,
609 )
609 )
610 coreconfigitem('extensions', '.*',
610 coreconfigitem('extensions', '.*',
611 default=None,
611 default=None,
612 generic=True,
612 generic=True,
613 )
613 )
614 coreconfigitem('extdata', '.*',
614 coreconfigitem('extdata', '.*',
615 default=None,
615 default=None,
616 generic=True,
616 generic=True,
617 )
617 )
618 coreconfigitem('format', 'aggressivemergedeltas',
618 coreconfigitem('format', 'aggressivemergedeltas',
619 default=False,
619 default=False,
620 )
620 )
621 coreconfigitem('format', 'chunkcachesize',
621 coreconfigitem('format', 'chunkcachesize',
622 default=None,
622 default=None,
623 )
623 )
624 coreconfigitem('format', 'dotencode',
624 coreconfigitem('format', 'dotencode',
625 default=True,
625 default=True,
626 )
626 )
627 coreconfigitem('format', 'generaldelta',
627 coreconfigitem('format', 'generaldelta',
628 default=False,
628 default=False,
629 )
629 )
630 coreconfigitem('format', 'manifestcachesize',
630 coreconfigitem('format', 'manifestcachesize',
631 default=None,
631 default=None,
632 )
632 )
633 coreconfigitem('format', 'maxchainlen',
633 coreconfigitem('format', 'maxchainlen',
634 default=None,
634 default=None,
635 )
635 )
636 coreconfigitem('format', 'obsstore-version',
636 coreconfigitem('format', 'obsstore-version',
637 default=None,
637 default=None,
638 )
638 )
639 coreconfigitem('format', 'usefncache',
639 coreconfigitem('format', 'usefncache',
640 default=True,
640 default=True,
641 )
641 )
642 coreconfigitem('format', 'usegeneraldelta',
642 coreconfigitem('format', 'usegeneraldelta',
643 default=True,
643 default=True,
644 )
644 )
645 coreconfigitem('format', 'usestore',
645 coreconfigitem('format', 'usestore',
646 default=True,
646 default=True,
647 )
647 )
648 coreconfigitem('fsmonitor', 'warn_when_unused',
648 coreconfigitem('fsmonitor', 'warn_when_unused',
649 default=True,
649 default=True,
650 )
650 )
651 coreconfigitem('fsmonitor', 'warn_update_file_count',
651 coreconfigitem('fsmonitor', 'warn_update_file_count',
652 default=50000,
652 default=50000,
653 )
653 )
654 coreconfigitem('hooks', '.*',
654 coreconfigitem('hooks', '.*',
655 default=dynamicdefault,
655 default=dynamicdefault,
656 generic=True,
656 generic=True,
657 )
657 )
658 coreconfigitem('hgweb-paths', '.*',
658 coreconfigitem('hgweb-paths', '.*',
659 default=list,
659 default=list,
660 generic=True,
660 generic=True,
661 )
661 )
662 coreconfigitem('hostfingerprints', '.*',
662 coreconfigitem('hostfingerprints', '.*',
663 default=list,
663 default=list,
664 generic=True,
664 generic=True,
665 )
665 )
666 coreconfigitem('hostsecurity', 'ciphers',
666 coreconfigitem('hostsecurity', 'ciphers',
667 default=None,
667 default=None,
668 )
668 )
669 coreconfigitem('hostsecurity', 'disabletls10warning',
669 coreconfigitem('hostsecurity', 'disabletls10warning',
670 default=False,
670 default=False,
671 )
671 )
672 coreconfigitem('hostsecurity', 'minimumprotocol',
672 coreconfigitem('hostsecurity', 'minimumprotocol',
673 default=dynamicdefault,
673 default=dynamicdefault,
674 )
674 )
675 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
675 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
676 default=dynamicdefault,
676 default=dynamicdefault,
677 generic=True,
677 generic=True,
678 )
678 )
679 coreconfigitem('hostsecurity', '.*:ciphers$',
679 coreconfigitem('hostsecurity', '.*:ciphers$',
680 default=dynamicdefault,
680 default=dynamicdefault,
681 generic=True,
681 generic=True,
682 )
682 )
683 coreconfigitem('hostsecurity', '.*:fingerprints$',
683 coreconfigitem('hostsecurity', '.*:fingerprints$',
684 default=list,
684 default=list,
685 generic=True,
685 generic=True,
686 )
686 )
687 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
687 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
688 default=None,
688 default=None,
689 generic=True,
689 generic=True,
690 )
690 )
691
691
692 coreconfigitem('http_proxy', 'always',
692 coreconfigitem('http_proxy', 'always',
693 default=False,
693 default=False,
694 )
694 )
695 coreconfigitem('http_proxy', 'host',
695 coreconfigitem('http_proxy', 'host',
696 default=None,
696 default=None,
697 )
697 )
698 coreconfigitem('http_proxy', 'no',
698 coreconfigitem('http_proxy', 'no',
699 default=list,
699 default=list,
700 )
700 )
701 coreconfigitem('http_proxy', 'passwd',
701 coreconfigitem('http_proxy', 'passwd',
702 default=None,
702 default=None,
703 )
703 )
704 coreconfigitem('http_proxy', 'user',
704 coreconfigitem('http_proxy', 'user',
705 default=None,
705 default=None,
706 )
706 )
707 coreconfigitem('logtoprocess', 'commandexception',
707 coreconfigitem('logtoprocess', 'commandexception',
708 default=None,
708 default=None,
709 )
709 )
710 coreconfigitem('logtoprocess', 'commandfinish',
710 coreconfigitem('logtoprocess', 'commandfinish',
711 default=None,
711 default=None,
712 )
712 )
713 coreconfigitem('logtoprocess', 'command',
713 coreconfigitem('logtoprocess', 'command',
714 default=None,
714 default=None,
715 )
715 )
716 coreconfigitem('logtoprocess', 'develwarn',
716 coreconfigitem('logtoprocess', 'develwarn',
717 default=None,
717 default=None,
718 )
718 )
719 coreconfigitem('logtoprocess', 'uiblocked',
719 coreconfigitem('logtoprocess', 'uiblocked',
720 default=None,
720 default=None,
721 )
721 )
722 coreconfigitem('merge', 'checkunknown',
722 coreconfigitem('merge', 'checkunknown',
723 default='abort',
723 default='abort',
724 )
724 )
725 coreconfigitem('merge', 'checkignored',
725 coreconfigitem('merge', 'checkignored',
726 default='abort',
726 default='abort',
727 )
727 )
728 coreconfigitem('experimental', 'merge.checkpathconflicts',
728 coreconfigitem('experimental', 'merge.checkpathconflicts',
729 default=False,
729 default=False,
730 )
730 )
731 coreconfigitem('merge', 'followcopies',
731 coreconfigitem('merge', 'followcopies',
732 default=True,
732 default=True,
733 )
733 )
734 coreconfigitem('merge', 'on-failure',
734 coreconfigitem('merge', 'on-failure',
735 default='continue',
735 default='continue',
736 )
736 )
737 coreconfigitem('merge', 'preferancestor',
737 coreconfigitem('merge', 'preferancestor',
738 default=lambda: ['*'],
738 default=lambda: ['*'],
739 )
739 )
740 coreconfigitem('merge-tools', '.*',
740 coreconfigitem('merge-tools', '.*',
741 default=None,
741 default=None,
742 generic=True,
742 generic=True,
743 )
743 )
744 coreconfigitem('merge-tools', br'.*\.args$',
744 coreconfigitem('merge-tools', br'.*\.args$',
745 default="$local $base $other",
745 default="$local $base $other",
746 generic=True,
746 generic=True,
747 priority=-1,
747 priority=-1,
748 )
748 )
749 coreconfigitem('merge-tools', br'.*\.binary$',
749 coreconfigitem('merge-tools', br'.*\.binary$',
750 default=False,
750 default=False,
751 generic=True,
751 generic=True,
752 priority=-1,
752 priority=-1,
753 )
753 )
754 coreconfigitem('merge-tools', br'.*\.check$',
754 coreconfigitem('merge-tools', br'.*\.check$',
755 default=list,
755 default=list,
756 generic=True,
756 generic=True,
757 priority=-1,
757 priority=-1,
758 )
758 )
759 coreconfigitem('merge-tools', br'.*\.checkchanged$',
759 coreconfigitem('merge-tools', br'.*\.checkchanged$',
760 default=False,
760 default=False,
761 generic=True,
761 generic=True,
762 priority=-1,
762 priority=-1,
763 )
763 )
764 coreconfigitem('merge-tools', br'.*\.executable$',
764 coreconfigitem('merge-tools', br'.*\.executable$',
765 default=dynamicdefault,
765 default=dynamicdefault,
766 generic=True,
766 generic=True,
767 priority=-1,
767 priority=-1,
768 )
768 )
769 coreconfigitem('merge-tools', br'.*\.fixeol$',
769 coreconfigitem('merge-tools', br'.*\.fixeol$',
770 default=False,
770 default=False,
771 generic=True,
771 generic=True,
772 priority=-1,
772 priority=-1,
773 )
773 )
774 coreconfigitem('merge-tools', br'.*\.gui$',
774 coreconfigitem('merge-tools', br'.*\.gui$',
775 default=False,
775 default=False,
776 generic=True,
776 generic=True,
777 priority=-1,
777 priority=-1,
778 )
778 )
779 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
779 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
780 default='basic',
780 default='basic',
781 generic=True,
781 generic=True,
782 priority=-1,
782 priority=-1,
783 )
783 )
784 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
784 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
785 default=dynamicdefault, # take from ui.mergemarkertemplate
785 default=dynamicdefault, # take from ui.mergemarkertemplate
786 generic=True,
786 generic=True,
787 priority=-1,
787 priority=-1,
788 )
788 )
789 coreconfigitem('merge-tools', br'.*\.priority$',
789 coreconfigitem('merge-tools', br'.*\.priority$',
790 default=0,
790 default=0,
791 generic=True,
791 generic=True,
792 priority=-1,
792 priority=-1,
793 )
793 )
794 coreconfigitem('merge-tools', br'.*\.premerge$',
794 coreconfigitem('merge-tools', br'.*\.premerge$',
795 default=dynamicdefault,
795 default=dynamicdefault,
796 generic=True,
796 generic=True,
797 priority=-1,
797 priority=-1,
798 )
798 )
799 coreconfigitem('merge-tools', br'.*\.symlink$',
799 coreconfigitem('merge-tools', br'.*\.symlink$',
800 default=False,
800 default=False,
801 generic=True,
801 generic=True,
802 priority=-1,
802 priority=-1,
803 )
803 )
804 coreconfigitem('pager', 'attend-.*',
804 coreconfigitem('pager', 'attend-.*',
805 default=dynamicdefault,
805 default=dynamicdefault,
806 generic=True,
806 generic=True,
807 )
807 )
808 coreconfigitem('pager', 'ignore',
808 coreconfigitem('pager', 'ignore',
809 default=list,
809 default=list,
810 )
810 )
811 coreconfigitem('pager', 'pager',
811 coreconfigitem('pager', 'pager',
812 default=dynamicdefault,
812 default=dynamicdefault,
813 )
813 )
814 coreconfigitem('patch', 'eol',
814 coreconfigitem('patch', 'eol',
815 default='strict',
815 default='strict',
816 )
816 )
817 coreconfigitem('patch', 'fuzz',
817 coreconfigitem('patch', 'fuzz',
818 default=2,
818 default=2,
819 )
819 )
820 coreconfigitem('paths', 'default',
820 coreconfigitem('paths', 'default',
821 default=None,
821 default=None,
822 )
822 )
823 coreconfigitem('paths', 'default-push',
823 coreconfigitem('paths', 'default-push',
824 default=None,
824 default=None,
825 )
825 )
826 coreconfigitem('paths', '.*',
826 coreconfigitem('paths', '.*',
827 default=None,
827 default=None,
828 generic=True,
828 generic=True,
829 )
829 )
830 coreconfigitem('phases', 'checksubrepos',
830 coreconfigitem('phases', 'checksubrepos',
831 default='follow',
831 default='follow',
832 )
832 )
833 coreconfigitem('phases', 'new-commit',
833 coreconfigitem('phases', 'new-commit',
834 default='draft',
834 default='draft',
835 )
835 )
836 coreconfigitem('phases', 'publish',
836 coreconfigitem('phases', 'publish',
837 default=True,
837 default=True,
838 )
838 )
839 coreconfigitem('profiling', 'enabled',
839 coreconfigitem('profiling', 'enabled',
840 default=False,
840 default=False,
841 )
841 )
842 coreconfigitem('profiling', 'format',
842 coreconfigitem('profiling', 'format',
843 default='text',
843 default='text',
844 )
844 )
845 coreconfigitem('profiling', 'freq',
845 coreconfigitem('profiling', 'freq',
846 default=1000,
846 default=1000,
847 )
847 )
848 coreconfigitem('profiling', 'limit',
848 coreconfigitem('profiling', 'limit',
849 default=30,
849 default=30,
850 )
850 )
851 coreconfigitem('profiling', 'nested',
851 coreconfigitem('profiling', 'nested',
852 default=0,
852 default=0,
853 )
853 )
854 coreconfigitem('profiling', 'output',
854 coreconfigitem('profiling', 'output',
855 default=None,
855 default=None,
856 )
856 )
857 coreconfigitem('profiling', 'showmax',
857 coreconfigitem('profiling', 'showmax',
858 default=0.999,
858 default=0.999,
859 )
859 )
860 coreconfigitem('profiling', 'showmin',
860 coreconfigitem('profiling', 'showmin',
861 default=dynamicdefault,
861 default=dynamicdefault,
862 )
862 )
863 coreconfigitem('profiling', 'sort',
863 coreconfigitem('profiling', 'sort',
864 default='inlinetime',
864 default='inlinetime',
865 )
865 )
866 coreconfigitem('profiling', 'statformat',
866 coreconfigitem('profiling', 'statformat',
867 default='hotpath',
867 default='hotpath',
868 )
868 )
869 coreconfigitem('profiling', 'type',
869 coreconfigitem('profiling', 'type',
870 default='stat',
870 default='stat',
871 )
871 )
872 coreconfigitem('progress', 'assume-tty',
872 coreconfigitem('progress', 'assume-tty',
873 default=False,
873 default=False,
874 )
874 )
875 coreconfigitem('progress', 'changedelay',
875 coreconfigitem('progress', 'changedelay',
876 default=1,
876 default=1,
877 )
877 )
878 coreconfigitem('progress', 'clear-complete',
878 coreconfigitem('progress', 'clear-complete',
879 default=True,
879 default=True,
880 )
880 )
881 coreconfigitem('progress', 'debug',
881 coreconfigitem('progress', 'debug',
882 default=False,
882 default=False,
883 )
883 )
884 coreconfigitem('progress', 'delay',
884 coreconfigitem('progress', 'delay',
885 default=3,
885 default=3,
886 )
886 )
887 coreconfigitem('progress', 'disable',
887 coreconfigitem('progress', 'disable',
888 default=False,
888 default=False,
889 )
889 )
890 coreconfigitem('progress', 'estimateinterval',
890 coreconfigitem('progress', 'estimateinterval',
891 default=60.0,
891 default=60.0,
892 )
892 )
893 coreconfigitem('progress', 'format',
893 coreconfigitem('progress', 'format',
894 default=lambda: ['topic', 'bar', 'number', 'estimate'],
894 default=lambda: ['topic', 'bar', 'number', 'estimate'],
895 )
895 )
896 coreconfigitem('progress', 'refresh',
896 coreconfigitem('progress', 'refresh',
897 default=0.1,
897 default=0.1,
898 )
898 )
899 coreconfigitem('progress', 'width',
899 coreconfigitem('progress', 'width',
900 default=dynamicdefault,
900 default=dynamicdefault,
901 )
901 )
902 coreconfigitem('push', 'pushvars.server',
902 coreconfigitem('push', 'pushvars.server',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem('server', 'bookmarks-pushkey-compat',
905 coreconfigitem('server', 'bookmarks-pushkey-compat',
906 default=True,
906 default=True,
907 )
907 )
908 coreconfigitem('server', 'bundle1',
908 coreconfigitem('server', 'bundle1',
909 default=True,
909 default=True,
910 )
910 )
911 coreconfigitem('server', 'bundle1gd',
911 coreconfigitem('server', 'bundle1gd',
912 default=None,
912 default=None,
913 )
913 )
914 coreconfigitem('server', 'bundle1.pull',
914 coreconfigitem('server', 'bundle1.pull',
915 default=None,
915 default=None,
916 )
916 )
917 coreconfigitem('server', 'bundle1gd.pull',
917 coreconfigitem('server', 'bundle1gd.pull',
918 default=None,
918 default=None,
919 )
919 )
920 coreconfigitem('server', 'bundle1.push',
920 coreconfigitem('server', 'bundle1.push',
921 default=None,
921 default=None,
922 )
922 )
923 coreconfigitem('server', 'bundle1gd.push',
923 coreconfigitem('server', 'bundle1gd.push',
924 default=None,
924 default=None,
925 )
925 )
926 coreconfigitem('server', 'compressionengines',
926 coreconfigitem('server', 'compressionengines',
927 default=list,
927 default=list,
928 )
928 )
929 coreconfigitem('server', 'concurrent-push-mode',
929 coreconfigitem('server', 'concurrent-push-mode',
930 default='strict',
930 default='strict',
931 )
931 )
932 coreconfigitem('server', 'disablefullbundle',
932 coreconfigitem('server', 'disablefullbundle',
933 default=False,
933 default=False,
934 )
934 )
935 coreconfigitem('server', 'streamunbundle',
935 coreconfigitem('server', 'streamunbundle',
936 default=False,
936 default=False,
937 )
937 )
938 coreconfigitem('server', 'pullbundle',
938 coreconfigitem('server', 'pullbundle',
939 default=False,
939 default=False,
940 )
940 )
941 coreconfigitem('server', 'maxhttpheaderlen',
941 coreconfigitem('server', 'maxhttpheaderlen',
942 default=1024,
942 default=1024,
943 )
943 )
944 coreconfigitem('server', 'preferuncompressed',
944 coreconfigitem('server', 'preferuncompressed',
945 default=False,
945 default=False,
946 )
946 )
947 coreconfigitem('server', 'uncompressed',
947 coreconfigitem('server', 'uncompressed',
948 default=True,
948 default=True,
949 )
949 )
950 coreconfigitem('server', 'uncompressedallowsecret',
950 coreconfigitem('server', 'uncompressedallowsecret',
951 default=False,
951 default=False,
952 )
952 )
953 coreconfigitem('server', 'validate',
953 coreconfigitem('server', 'validate',
954 default=False,
954 default=False,
955 )
955 )
956 coreconfigitem('server', 'zliblevel',
956 coreconfigitem('server', 'zliblevel',
957 default=-1,
957 default=-1,
958 )
958 )
959 coreconfigitem('server', 'zstdlevel',
959 coreconfigitem('server', 'zstdlevel',
960 default=3,
960 default=3,
961 )
961 )
962 coreconfigitem('share', 'pool',
962 coreconfigitem('share', 'pool',
963 default=None,
963 default=None,
964 )
964 )
965 coreconfigitem('share', 'poolnaming',
965 coreconfigitem('share', 'poolnaming',
966 default='identity',
966 default='identity',
967 )
967 )
968 coreconfigitem('smtp', 'host',
968 coreconfigitem('smtp', 'host',
969 default=None,
969 default=None,
970 )
970 )
971 coreconfigitem('smtp', 'local_hostname',
971 coreconfigitem('smtp', 'local_hostname',
972 default=None,
972 default=None,
973 )
973 )
974 coreconfigitem('smtp', 'password',
974 coreconfigitem('smtp', 'password',
975 default=None,
975 default=None,
976 )
976 )
977 coreconfigitem('smtp', 'port',
977 coreconfigitem('smtp', 'port',
978 default=dynamicdefault,
978 default=dynamicdefault,
979 )
979 )
980 coreconfigitem('smtp', 'tls',
980 coreconfigitem('smtp', 'tls',
981 default='none',
981 default='none',
982 )
982 )
983 coreconfigitem('smtp', 'username',
983 coreconfigitem('smtp', 'username',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem('sparse', 'missingwarning',
986 coreconfigitem('sparse', 'missingwarning',
987 default=True,
987 default=True,
988 )
988 )
989 coreconfigitem('subrepos', 'allowed',
989 coreconfigitem('subrepos', 'allowed',
990 default=dynamicdefault, # to make backporting simpler
990 default=dynamicdefault, # to make backporting simpler
991 )
991 )
992 coreconfigitem('subrepos', 'hg:allowed',
992 coreconfigitem('subrepos', 'hg:allowed',
993 default=dynamicdefault,
993 default=dynamicdefault,
994 )
994 )
995 coreconfigitem('subrepos', 'git:allowed',
995 coreconfigitem('subrepos', 'git:allowed',
996 default=dynamicdefault,
996 default=dynamicdefault,
997 )
997 )
998 coreconfigitem('subrepos', 'svn:allowed',
998 coreconfigitem('subrepos', 'svn:allowed',
999 default=dynamicdefault,
999 default=dynamicdefault,
1000 )
1000 )
1001 coreconfigitem('templates', '.*',
1001 coreconfigitem('templates', '.*',
1002 default=None,
1002 default=None,
1003 generic=True,
1003 generic=True,
1004 )
1004 )
1005 coreconfigitem('trusted', 'groups',
1005 coreconfigitem('trusted', 'groups',
1006 default=list,
1006 default=list,
1007 )
1007 )
1008 coreconfigitem('trusted', 'users',
1008 coreconfigitem('trusted', 'users',
1009 default=list,
1009 default=list,
1010 )
1010 )
1011 coreconfigitem('ui', '_usedassubrepo',
1011 coreconfigitem('ui', '_usedassubrepo',
1012 default=False,
1012 default=False,
1013 )
1013 )
1014 coreconfigitem('ui', 'allowemptycommit',
1014 coreconfigitem('ui', 'allowemptycommit',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem('ui', 'archivemeta',
1017 coreconfigitem('ui', 'archivemeta',
1018 default=True,
1018 default=True,
1019 )
1019 )
1020 coreconfigitem('ui', 'askusername',
1020 coreconfigitem('ui', 'askusername',
1021 default=False,
1021 default=False,
1022 )
1022 )
1023 coreconfigitem('ui', 'clonebundlefallback',
1023 coreconfigitem('ui', 'clonebundlefallback',
1024 default=False,
1024 default=False,
1025 )
1025 )
1026 coreconfigitem('ui', 'clonebundleprefers',
1026 coreconfigitem('ui', 'clonebundleprefers',
1027 default=list,
1027 default=list,
1028 )
1028 )
1029 coreconfigitem('ui', 'clonebundles',
1029 coreconfigitem('ui', 'clonebundles',
1030 default=True,
1030 default=True,
1031 )
1031 )
1032 coreconfigitem('ui', 'color',
1032 coreconfigitem('ui', 'color',
1033 default='auto',
1033 default='auto',
1034 )
1034 )
1035 coreconfigitem('ui', 'commitsubrepos',
1035 coreconfigitem('ui', 'commitsubrepos',
1036 default=False,
1036 default=False,
1037 )
1037 )
1038 coreconfigitem('ui', 'debug',
1038 coreconfigitem('ui', 'debug',
1039 default=False,
1039 default=False,
1040 )
1040 )
1041 coreconfigitem('ui', 'debugger',
1041 coreconfigitem('ui', 'debugger',
1042 default=None,
1042 default=None,
1043 )
1043 )
1044 coreconfigitem('ui', 'editor',
1044 coreconfigitem('ui', 'editor',
1045 default=dynamicdefault,
1045 default=dynamicdefault,
1046 )
1046 )
1047 coreconfigitem('ui', 'fallbackencoding',
1047 coreconfigitem('ui', 'fallbackencoding',
1048 default=None,
1048 default=None,
1049 )
1049 )
1050 coreconfigitem('ui', 'forcecwd',
1050 coreconfigitem('ui', 'forcecwd',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem('ui', 'forcemerge',
1053 coreconfigitem('ui', 'forcemerge',
1054 default=None,
1054 default=None,
1055 )
1055 )
1056 coreconfigitem('ui', 'formatdebug',
1056 coreconfigitem('ui', 'formatdebug',
1057 default=False,
1057 default=False,
1058 )
1058 )
1059 coreconfigitem('ui', 'formatjson',
1059 coreconfigitem('ui', 'formatjson',
1060 default=False,
1060 default=False,
1061 )
1061 )
1062 coreconfigitem('ui', 'formatted',
1062 coreconfigitem('ui', 'formatted',
1063 default=None,
1063 default=None,
1064 )
1064 )
1065 coreconfigitem('ui', 'graphnodetemplate',
1065 coreconfigitem('ui', 'graphnodetemplate',
1066 default=None,
1066 default=None,
1067 )
1067 )
1068 coreconfigitem('ui', 'interactive',
1068 coreconfigitem('ui', 'interactive',
1069 default=None,
1069 default=None,
1070 )
1070 )
1071 coreconfigitem('ui', 'interface',
1071 coreconfigitem('ui', 'interface',
1072 default=None,
1072 default=None,
1073 )
1073 )
1074 coreconfigitem('ui', 'interface.chunkselector',
1074 coreconfigitem('ui', 'interface.chunkselector',
1075 default=None,
1075 default=None,
1076 )
1076 )
1077 coreconfigitem('ui', 'logblockedtimes',
1077 coreconfigitem('ui', 'logblockedtimes',
1078 default=False,
1078 default=False,
1079 )
1079 )
1080 coreconfigitem('ui', 'logtemplate',
1080 coreconfigitem('ui', 'logtemplate',
1081 default=None,
1081 default=None,
1082 )
1082 )
1083 coreconfigitem('ui', 'merge',
1083 coreconfigitem('ui', 'merge',
1084 default=None,
1084 default=None,
1085 )
1085 )
1086 coreconfigitem('ui', 'mergemarkers',
1086 coreconfigitem('ui', 'mergemarkers',
1087 default='basic',
1087 default='basic',
1088 )
1088 )
1089 coreconfigitem('ui', 'mergemarkertemplate',
1089 coreconfigitem('ui', 'mergemarkertemplate',
1090 default=('{node|short} '
1090 default=('{node|short} '
1091 '{ifeq(tags, "tip", "", '
1091 '{ifeq(tags, "tip", "", '
1092 'ifeq(tags, "", "", "{tags} "))}'
1092 'ifeq(tags, "", "", "{tags} "))}'
1093 '{if(bookmarks, "{bookmarks} ")}'
1093 '{if(bookmarks, "{bookmarks} ")}'
1094 '{ifeq(branch, "default", "", "{branch} ")}'
1094 '{ifeq(branch, "default", "", "{branch} ")}'
1095 '- {author|user}: {desc|firstline}')
1095 '- {author|user}: {desc|firstline}')
1096 )
1096 )
1097 coreconfigitem('ui', 'nontty',
1097 coreconfigitem('ui', 'nontty',
1098 default=False,
1098 default=False,
1099 )
1099 )
1100 coreconfigitem('ui', 'origbackuppath',
1100 coreconfigitem('ui', 'origbackuppath',
1101 default=None,
1101 default=None,
1102 )
1102 )
1103 coreconfigitem('ui', 'paginate',
1103 coreconfigitem('ui', 'paginate',
1104 default=True,
1104 default=True,
1105 )
1105 )
1106 coreconfigitem('ui', 'patch',
1106 coreconfigitem('ui', 'patch',
1107 default=None,
1107 default=None,
1108 )
1108 )
1109 coreconfigitem('ui', 'portablefilenames',
1109 coreconfigitem('ui', 'portablefilenames',
1110 default='warn',
1110 default='warn',
1111 )
1111 )
1112 coreconfigitem('ui', 'promptecho',
1112 coreconfigitem('ui', 'promptecho',
1113 default=False,
1113 default=False,
1114 )
1114 )
1115 coreconfigitem('ui', 'quiet',
1115 coreconfigitem('ui', 'quiet',
1116 default=False,
1116 default=False,
1117 )
1117 )
1118 coreconfigitem('ui', 'quietbookmarkmove',
1118 coreconfigitem('ui', 'quietbookmarkmove',
1119 default=False,
1119 default=False,
1120 )
1120 )
1121 coreconfigitem('ui', 'remotecmd',
1121 coreconfigitem('ui', 'remotecmd',
1122 default='hg',
1122 default='hg',
1123 )
1123 )
1124 coreconfigitem('ui', 'report_untrusted',
1124 coreconfigitem('ui', 'report_untrusted',
1125 default=True,
1125 default=True,
1126 )
1126 )
1127 coreconfigitem('ui', 'rollback',
1127 coreconfigitem('ui', 'rollback',
1128 default=True,
1128 default=True,
1129 )
1129 )
1130 coreconfigitem('ui', 'signal-safe-lock',
1131 default=True,
1132 )
1130 coreconfigitem('ui', 'slash',
1133 coreconfigitem('ui', 'slash',
1131 default=False,
1134 default=False,
1132 )
1135 )
1133 coreconfigitem('ui', 'ssh',
1136 coreconfigitem('ui', 'ssh',
1134 default='ssh',
1137 default='ssh',
1135 )
1138 )
1136 coreconfigitem('ui', 'ssherrorhint',
1139 coreconfigitem('ui', 'ssherrorhint',
1137 default=None,
1140 default=None,
1138 )
1141 )
1139 coreconfigitem('ui', 'statuscopies',
1142 coreconfigitem('ui', 'statuscopies',
1140 default=False,
1143 default=False,
1141 )
1144 )
1142 coreconfigitem('ui', 'strict',
1145 coreconfigitem('ui', 'strict',
1143 default=False,
1146 default=False,
1144 )
1147 )
1145 coreconfigitem('ui', 'style',
1148 coreconfigitem('ui', 'style',
1146 default='',
1149 default='',
1147 )
1150 )
1148 coreconfigitem('ui', 'supportcontact',
1151 coreconfigitem('ui', 'supportcontact',
1149 default=None,
1152 default=None,
1150 )
1153 )
1151 coreconfigitem('ui', 'textwidth',
1154 coreconfigitem('ui', 'textwidth',
1152 default=78,
1155 default=78,
1153 )
1156 )
1154 coreconfigitem('ui', 'timeout',
1157 coreconfigitem('ui', 'timeout',
1155 default='600',
1158 default='600',
1156 )
1159 )
1157 coreconfigitem('ui', 'timeout.warn',
1160 coreconfigitem('ui', 'timeout.warn',
1158 default=0,
1161 default=0,
1159 )
1162 )
1160 coreconfigitem('ui', 'traceback',
1163 coreconfigitem('ui', 'traceback',
1161 default=False,
1164 default=False,
1162 )
1165 )
1163 coreconfigitem('ui', 'tweakdefaults',
1166 coreconfigitem('ui', 'tweakdefaults',
1164 default=False,
1167 default=False,
1165 )
1168 )
1166 coreconfigitem('ui', 'username',
1169 coreconfigitem('ui', 'username',
1167 alias=[('ui', 'user')]
1170 alias=[('ui', 'user')]
1168 )
1171 )
1169 coreconfigitem('ui', 'verbose',
1172 coreconfigitem('ui', 'verbose',
1170 default=False,
1173 default=False,
1171 )
1174 )
1172 coreconfigitem('verify', 'skipflags',
1175 coreconfigitem('verify', 'skipflags',
1173 default=None,
1176 default=None,
1174 )
1177 )
1175 coreconfigitem('web', 'allowbz2',
1178 coreconfigitem('web', 'allowbz2',
1176 default=False,
1179 default=False,
1177 )
1180 )
1178 coreconfigitem('web', 'allowgz',
1181 coreconfigitem('web', 'allowgz',
1179 default=False,
1182 default=False,
1180 )
1183 )
1181 coreconfigitem('web', 'allow-pull',
1184 coreconfigitem('web', 'allow-pull',
1182 alias=[('web', 'allowpull')],
1185 alias=[('web', 'allowpull')],
1183 default=True,
1186 default=True,
1184 )
1187 )
1185 coreconfigitem('web', 'allow-push',
1188 coreconfigitem('web', 'allow-push',
1186 alias=[('web', 'allow_push')],
1189 alias=[('web', 'allow_push')],
1187 default=list,
1190 default=list,
1188 )
1191 )
1189 coreconfigitem('web', 'allowzip',
1192 coreconfigitem('web', 'allowzip',
1190 default=False,
1193 default=False,
1191 )
1194 )
1192 coreconfigitem('web', 'archivesubrepos',
1195 coreconfigitem('web', 'archivesubrepos',
1193 default=False,
1196 default=False,
1194 )
1197 )
1195 coreconfigitem('web', 'cache',
1198 coreconfigitem('web', 'cache',
1196 default=True,
1199 default=True,
1197 )
1200 )
1198 coreconfigitem('web', 'contact',
1201 coreconfigitem('web', 'contact',
1199 default=None,
1202 default=None,
1200 )
1203 )
1201 coreconfigitem('web', 'deny_push',
1204 coreconfigitem('web', 'deny_push',
1202 default=list,
1205 default=list,
1203 )
1206 )
1204 coreconfigitem('web', 'guessmime',
1207 coreconfigitem('web', 'guessmime',
1205 default=False,
1208 default=False,
1206 )
1209 )
1207 coreconfigitem('web', 'hidden',
1210 coreconfigitem('web', 'hidden',
1208 default=False,
1211 default=False,
1209 )
1212 )
1210 coreconfigitem('web', 'labels',
1213 coreconfigitem('web', 'labels',
1211 default=list,
1214 default=list,
1212 )
1215 )
1213 coreconfigitem('web', 'logoimg',
1216 coreconfigitem('web', 'logoimg',
1214 default='hglogo.png',
1217 default='hglogo.png',
1215 )
1218 )
1216 coreconfigitem('web', 'logourl',
1219 coreconfigitem('web', 'logourl',
1217 default='https://mercurial-scm.org/',
1220 default='https://mercurial-scm.org/',
1218 )
1221 )
1219 coreconfigitem('web', 'accesslog',
1222 coreconfigitem('web', 'accesslog',
1220 default='-',
1223 default='-',
1221 )
1224 )
1222 coreconfigitem('web', 'address',
1225 coreconfigitem('web', 'address',
1223 default='',
1226 default='',
1224 )
1227 )
1225 coreconfigitem('web', 'allow_archive',
1228 coreconfigitem('web', 'allow_archive',
1226 default=list,
1229 default=list,
1227 )
1230 )
1228 coreconfigitem('web', 'allow_read',
1231 coreconfigitem('web', 'allow_read',
1229 default=list,
1232 default=list,
1230 )
1233 )
1231 coreconfigitem('web', 'baseurl',
1234 coreconfigitem('web', 'baseurl',
1232 default=None,
1235 default=None,
1233 )
1236 )
1234 coreconfigitem('web', 'cacerts',
1237 coreconfigitem('web', 'cacerts',
1235 default=None,
1238 default=None,
1236 )
1239 )
1237 coreconfigitem('web', 'certificate',
1240 coreconfigitem('web', 'certificate',
1238 default=None,
1241 default=None,
1239 )
1242 )
1240 coreconfigitem('web', 'collapse',
1243 coreconfigitem('web', 'collapse',
1241 default=False,
1244 default=False,
1242 )
1245 )
1243 coreconfigitem('web', 'csp',
1246 coreconfigitem('web', 'csp',
1244 default=None,
1247 default=None,
1245 )
1248 )
1246 coreconfigitem('web', 'deny_read',
1249 coreconfigitem('web', 'deny_read',
1247 default=list,
1250 default=list,
1248 )
1251 )
1249 coreconfigitem('web', 'descend',
1252 coreconfigitem('web', 'descend',
1250 default=True,
1253 default=True,
1251 )
1254 )
1252 coreconfigitem('web', 'description',
1255 coreconfigitem('web', 'description',
1253 default="",
1256 default="",
1254 )
1257 )
1255 coreconfigitem('web', 'encoding',
1258 coreconfigitem('web', 'encoding',
1256 default=lambda: encoding.encoding,
1259 default=lambda: encoding.encoding,
1257 )
1260 )
1258 coreconfigitem('web', 'errorlog',
1261 coreconfigitem('web', 'errorlog',
1259 default='-',
1262 default='-',
1260 )
1263 )
1261 coreconfigitem('web', 'ipv6',
1264 coreconfigitem('web', 'ipv6',
1262 default=False,
1265 default=False,
1263 )
1266 )
1264 coreconfigitem('web', 'maxchanges',
1267 coreconfigitem('web', 'maxchanges',
1265 default=10,
1268 default=10,
1266 )
1269 )
1267 coreconfigitem('web', 'maxfiles',
1270 coreconfigitem('web', 'maxfiles',
1268 default=10,
1271 default=10,
1269 )
1272 )
1270 coreconfigitem('web', 'maxshortchanges',
1273 coreconfigitem('web', 'maxshortchanges',
1271 default=60,
1274 default=60,
1272 )
1275 )
1273 coreconfigitem('web', 'motd',
1276 coreconfigitem('web', 'motd',
1274 default='',
1277 default='',
1275 )
1278 )
1276 coreconfigitem('web', 'name',
1279 coreconfigitem('web', 'name',
1277 default=dynamicdefault,
1280 default=dynamicdefault,
1278 )
1281 )
1279 coreconfigitem('web', 'port',
1282 coreconfigitem('web', 'port',
1280 default=8000,
1283 default=8000,
1281 )
1284 )
1282 coreconfigitem('web', 'prefix',
1285 coreconfigitem('web', 'prefix',
1283 default='',
1286 default='',
1284 )
1287 )
1285 coreconfigitem('web', 'push_ssl',
1288 coreconfigitem('web', 'push_ssl',
1286 default=True,
1289 default=True,
1287 )
1290 )
1288 coreconfigitem('web', 'refreshinterval',
1291 coreconfigitem('web', 'refreshinterval',
1289 default=20,
1292 default=20,
1290 )
1293 )
1291 coreconfigitem('web', 'server-header',
1294 coreconfigitem('web', 'server-header',
1292 default=None,
1295 default=None,
1293 )
1296 )
1294 coreconfigitem('web', 'staticurl',
1297 coreconfigitem('web', 'staticurl',
1295 default=None,
1298 default=None,
1296 )
1299 )
1297 coreconfigitem('web', 'stripes',
1300 coreconfigitem('web', 'stripes',
1298 default=1,
1301 default=1,
1299 )
1302 )
1300 coreconfigitem('web', 'style',
1303 coreconfigitem('web', 'style',
1301 default='paper',
1304 default='paper',
1302 )
1305 )
1303 coreconfigitem('web', 'templates',
1306 coreconfigitem('web', 'templates',
1304 default=None,
1307 default=None,
1305 )
1308 )
1306 coreconfigitem('web', 'view',
1309 coreconfigitem('web', 'view',
1307 default='served',
1310 default='served',
1308 )
1311 )
1309 coreconfigitem('worker', 'backgroundclose',
1312 coreconfigitem('worker', 'backgroundclose',
1310 default=dynamicdefault,
1313 default=dynamicdefault,
1311 )
1314 )
1312 # Windows defaults to a limit of 512 open files. A buffer of 128
1315 # Windows defaults to a limit of 512 open files. A buffer of 128
1313 # should give us enough headway.
1316 # should give us enough headway.
1314 coreconfigitem('worker', 'backgroundclosemaxqueue',
1317 coreconfigitem('worker', 'backgroundclosemaxqueue',
1315 default=384,
1318 default=384,
1316 )
1319 )
1317 coreconfigitem('worker', 'backgroundcloseminfilecount',
1320 coreconfigitem('worker', 'backgroundcloseminfilecount',
1318 default=2048,
1321 default=2048,
1319 )
1322 )
1320 coreconfigitem('worker', 'backgroundclosethreadcount',
1323 coreconfigitem('worker', 'backgroundclosethreadcount',
1321 default=4,
1324 default=4,
1322 )
1325 )
1323 coreconfigitem('worker', 'enabled',
1326 coreconfigitem('worker', 'enabled',
1324 default=True,
1327 default=True,
1325 )
1328 )
1326 coreconfigitem('worker', 'numcpus',
1329 coreconfigitem('worker', 'numcpus',
1327 default=None,
1330 default=None,
1328 )
1331 )
1329
1332
1330 # Rebase related configuration moved to core because other extension are doing
1333 # Rebase related configuration moved to core because other extension are doing
1331 # strange things. For example, shelve import the extensions to reuse some bit
1334 # strange things. For example, shelve import the extensions to reuse some bit
1332 # without formally loading it.
1335 # without formally loading it.
1333 coreconfigitem('commands', 'rebase.requiredest',
1336 coreconfigitem('commands', 'rebase.requiredest',
1334 default=False,
1337 default=False,
1335 )
1338 )
1336 coreconfigitem('experimental', 'rebaseskipobsolete',
1339 coreconfigitem('experimental', 'rebaseskipobsolete',
1337 default=True,
1340 default=True,
1338 )
1341 )
1339 coreconfigitem('rebase', 'singletransaction',
1342 coreconfigitem('rebase', 'singletransaction',
1340 default=False,
1343 default=False,
1341 )
1344 )
1342 coreconfigitem('rebase', 'experimental.inmemory',
1345 coreconfigitem('rebase', 'experimental.inmemory',
1343 default=False,
1346 default=False,
1344 )
1347 )
@@ -1,2378 +1,2381 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # Functions receiving (ui, features) that extensions can register to impact
357 # Functions receiving (ui, features) that extensions can register to impact
358 # the ability to load repositories with custom requirements. Only
358 # the ability to load repositories with custom requirements. Only
359 # functions defined in loaded extensions are called.
359 # functions defined in loaded extensions are called.
360 #
360 #
361 # The function receives a set of requirement strings that the repository
361 # The function receives a set of requirement strings that the repository
362 # is capable of opening. Functions will typically add elements to the
362 # is capable of opening. Functions will typically add elements to the
363 # set to reflect that the extension knows how to handle that requirements.
363 # set to reflect that the extension knows how to handle that requirements.
364 featuresetupfuncs = set()
364 featuresetupfuncs = set()
365
365
366 @interfaceutil.implementer(repository.completelocalrepository)
366 @interfaceutil.implementer(repository.completelocalrepository)
367 class localrepository(object):
367 class localrepository(object):
368
368
369 # obsolete experimental requirements:
369 # obsolete experimental requirements:
370 # - manifestv2: An experimental new manifest format that allowed
370 # - manifestv2: An experimental new manifest format that allowed
371 # for stem compression of long paths. Experiment ended up not
371 # for stem compression of long paths. Experiment ended up not
372 # being successful (repository sizes went up due to worse delta
372 # being successful (repository sizes went up due to worse delta
373 # chains), and the code was deleted in 4.6.
373 # chains), and the code was deleted in 4.6.
374 supportedformats = {
374 supportedformats = {
375 'revlogv1',
375 'revlogv1',
376 'generaldelta',
376 'generaldelta',
377 'treemanifest',
377 'treemanifest',
378 REVLOGV2_REQUIREMENT,
378 REVLOGV2_REQUIREMENT,
379 }
379 }
380 _basesupported = supportedformats | {
380 _basesupported = supportedformats | {
381 'store',
381 'store',
382 'fncache',
382 'fncache',
383 'shared',
383 'shared',
384 'relshared',
384 'relshared',
385 'dotencode',
385 'dotencode',
386 'exp-sparse',
386 'exp-sparse',
387 }
387 }
388 openerreqs = {
388 openerreqs = {
389 'revlogv1',
389 'revlogv1',
390 'generaldelta',
390 'generaldelta',
391 'treemanifest',
391 'treemanifest',
392 }
392 }
393
393
394 # list of prefix for file which can be written without 'wlock'
394 # list of prefix for file which can be written without 'wlock'
395 # Extensions should extend this list when needed
395 # Extensions should extend this list when needed
396 _wlockfreeprefix = {
396 _wlockfreeprefix = {
397 # We migh consider requiring 'wlock' for the next
397 # We migh consider requiring 'wlock' for the next
398 # two, but pretty much all the existing code assume
398 # two, but pretty much all the existing code assume
399 # wlock is not needed so we keep them excluded for
399 # wlock is not needed so we keep them excluded for
400 # now.
400 # now.
401 'hgrc',
401 'hgrc',
402 'requires',
402 'requires',
403 # XXX cache is a complicatged business someone
403 # XXX cache is a complicatged business someone
404 # should investigate this in depth at some point
404 # should investigate this in depth at some point
405 'cache/',
405 'cache/',
406 # XXX shouldn't be dirstate covered by the wlock?
406 # XXX shouldn't be dirstate covered by the wlock?
407 'dirstate',
407 'dirstate',
408 # XXX bisect was still a bit too messy at the time
408 # XXX bisect was still a bit too messy at the time
409 # this changeset was introduced. Someone should fix
409 # this changeset was introduced. Someone should fix
410 # the remainig bit and drop this line
410 # the remainig bit and drop this line
411 'bisect.state',
411 'bisect.state',
412 }
412 }
413
413
414 def __init__(self, baseui, path, create=False, intents=None):
414 def __init__(self, baseui, path, create=False, intents=None):
415 self.requirements = set()
415 self.requirements = set()
416 self.filtername = None
416 self.filtername = None
417 # wvfs: rooted at the repository root, used to access the working copy
417 # wvfs: rooted at the repository root, used to access the working copy
418 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
419 # vfs: rooted at .hg, used to access repo files outside of .hg/store
419 # vfs: rooted at .hg, used to access repo files outside of .hg/store
420 self.vfs = None
420 self.vfs = None
421 # svfs: usually rooted at .hg/store, used to access repository history
421 # svfs: usually rooted at .hg/store, used to access repository history
422 # If this is a shared repository, this vfs may point to another
422 # If this is a shared repository, this vfs may point to another
423 # repository's .hg/store directory.
423 # repository's .hg/store directory.
424 self.svfs = None
424 self.svfs = None
425 self.root = self.wvfs.base
425 self.root = self.wvfs.base
426 self.path = self.wvfs.join(".hg")
426 self.path = self.wvfs.join(".hg")
427 self.origroot = path
427 self.origroot = path
428 # This is only used by context.workingctx.match in order to
428 # This is only used by context.workingctx.match in order to
429 # detect files in subrepos.
429 # detect files in subrepos.
430 self.auditor = pathutil.pathauditor(
430 self.auditor = pathutil.pathauditor(
431 self.root, callback=self._checknested)
431 self.root, callback=self._checknested)
432 # This is only used by context.basectx.match in order to detect
432 # This is only used by context.basectx.match in order to detect
433 # files in subrepos.
433 # files in subrepos.
434 self.nofsauditor = pathutil.pathauditor(
434 self.nofsauditor = pathutil.pathauditor(
435 self.root, callback=self._checknested, realfs=False, cached=True)
435 self.root, callback=self._checknested, realfs=False, cached=True)
436 self.baseui = baseui
436 self.baseui = baseui
437 self.ui = baseui.copy()
437 self.ui = baseui.copy()
438 self.ui.copy = baseui.copy # prevent copying repo configuration
438 self.ui.copy = baseui.copy # prevent copying repo configuration
439 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
439 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
440 if (self.ui.configbool('devel', 'all-warnings') or
440 if (self.ui.configbool('devel', 'all-warnings') or
441 self.ui.configbool('devel', 'check-locks')):
441 self.ui.configbool('devel', 'check-locks')):
442 self.vfs.audit = self._getvfsward(self.vfs.audit)
442 self.vfs.audit = self._getvfsward(self.vfs.audit)
443 # A list of callback to shape the phase if no data were found.
443 # A list of callback to shape the phase if no data were found.
444 # Callback are in the form: func(repo, roots) --> processed root.
444 # Callback are in the form: func(repo, roots) --> processed root.
445 # This list it to be filled by extension during repo setup
445 # This list it to be filled by extension during repo setup
446 self._phasedefaults = []
446 self._phasedefaults = []
447 try:
447 try:
448 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
448 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
449 self._loadextensions()
449 self._loadextensions()
450 except IOError:
450 except IOError:
451 pass
451 pass
452
452
453 if featuresetupfuncs:
453 if featuresetupfuncs:
454 self.supported = set(self._basesupported) # use private copy
454 self.supported = set(self._basesupported) # use private copy
455 extmods = set(m.__name__ for n, m
455 extmods = set(m.__name__ for n, m
456 in extensions.extensions(self.ui))
456 in extensions.extensions(self.ui))
457 for setupfunc in featuresetupfuncs:
457 for setupfunc in featuresetupfuncs:
458 if setupfunc.__module__ in extmods:
458 if setupfunc.__module__ in extmods:
459 setupfunc(self.ui, self.supported)
459 setupfunc(self.ui, self.supported)
460 else:
460 else:
461 self.supported = self._basesupported
461 self.supported = self._basesupported
462 color.setup(self.ui)
462 color.setup(self.ui)
463
463
464 # Add compression engines.
464 # Add compression engines.
465 for name in util.compengines:
465 for name in util.compengines:
466 engine = util.compengines[name]
466 engine = util.compengines[name]
467 if engine.revlogheader():
467 if engine.revlogheader():
468 self.supported.add('exp-compression-%s' % name)
468 self.supported.add('exp-compression-%s' % name)
469
469
470 if not self.vfs.isdir():
470 if not self.vfs.isdir():
471 if create:
471 if create:
472 self.requirements = newreporequirements(self)
472 self.requirements = newreporequirements(self)
473
473
474 if not self.wvfs.exists():
474 if not self.wvfs.exists():
475 self.wvfs.makedirs()
475 self.wvfs.makedirs()
476 self.vfs.makedir(notindexed=True)
476 self.vfs.makedir(notindexed=True)
477
477
478 if 'store' in self.requirements:
478 if 'store' in self.requirements:
479 self.vfs.mkdir("store")
479 self.vfs.mkdir("store")
480
480
481 # create an invalid changelog
481 # create an invalid changelog
482 self.vfs.append(
482 self.vfs.append(
483 "00changelog.i",
483 "00changelog.i",
484 '\0\0\0\2' # represents revlogv2
484 '\0\0\0\2' # represents revlogv2
485 ' dummy changelog to prevent using the old repo layout'
485 ' dummy changelog to prevent using the old repo layout'
486 )
486 )
487 else:
487 else:
488 raise error.RepoError(_("repository %s not found") % path)
488 raise error.RepoError(_("repository %s not found") % path)
489 elif create:
489 elif create:
490 raise error.RepoError(_("repository %s already exists") % path)
490 raise error.RepoError(_("repository %s already exists") % path)
491 else:
491 else:
492 try:
492 try:
493 self.requirements = scmutil.readrequires(
493 self.requirements = scmutil.readrequires(
494 self.vfs, self.supported)
494 self.vfs, self.supported)
495 except IOError as inst:
495 except IOError as inst:
496 if inst.errno != errno.ENOENT:
496 if inst.errno != errno.ENOENT:
497 raise
497 raise
498
498
499 cachepath = self.vfs.join('cache')
499 cachepath = self.vfs.join('cache')
500 self.sharedpath = self.path
500 self.sharedpath = self.path
501 try:
501 try:
502 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
503 if 'relshared' in self.requirements:
503 if 'relshared' in self.requirements:
504 sharedpath = self.vfs.join(sharedpath)
504 sharedpath = self.vfs.join(sharedpath)
505 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 vfs = vfsmod.vfs(sharedpath, realpath=True)
506 cachepath = vfs.join('cache')
506 cachepath = vfs.join('cache')
507 s = vfs.base
507 s = vfs.base
508 if not vfs.exists():
508 if not vfs.exists():
509 raise error.RepoError(
509 raise error.RepoError(
510 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 _('.hg/sharedpath points to nonexistent directory %s') % s)
511 self.sharedpath = s
511 self.sharedpath = s
512 except IOError as inst:
512 except IOError as inst:
513 if inst.errno != errno.ENOENT:
513 if inst.errno != errno.ENOENT:
514 raise
514 raise
515
515
516 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 if 'exp-sparse' in self.requirements and not sparse.enabled:
517 raise error.RepoError(_('repository is using sparse feature but '
517 raise error.RepoError(_('repository is using sparse feature but '
518 'sparse is not enabled; enable the '
518 'sparse is not enabled; enable the '
519 '"sparse" extensions to access'))
519 '"sparse" extensions to access'))
520
520
521 self.store = store.store(
521 self.store = store.store(
522 self.requirements, self.sharedpath,
522 self.requirements, self.sharedpath,
523 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 lambda base: vfsmod.vfs(base, cacheaudited=True))
524 self.spath = self.store.path
524 self.spath = self.store.path
525 self.svfs = self.store.vfs
525 self.svfs = self.store.vfs
526 self.sjoin = self.store.join
526 self.sjoin = self.store.join
527 self.vfs.createmode = self.store.createmode
527 self.vfs.createmode = self.store.createmode
528 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
529 self.cachevfs.createmode = self.store.createmode
529 self.cachevfs.createmode = self.store.createmode
530 if (self.ui.configbool('devel', 'all-warnings') or
530 if (self.ui.configbool('devel', 'all-warnings') or
531 self.ui.configbool('devel', 'check-locks')):
531 self.ui.configbool('devel', 'check-locks')):
532 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
533 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
534 else: # standard vfs
534 else: # standard vfs
535 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 self.svfs.audit = self._getsvfsward(self.svfs.audit)
536 self._applyopenerreqs()
536 self._applyopenerreqs()
537 if create:
537 if create:
538 self._writerequirements()
538 self._writerequirements()
539
539
540 self._dirstatevalidatewarned = False
540 self._dirstatevalidatewarned = False
541
541
542 self._branchcaches = {}
542 self._branchcaches = {}
543 self._revbranchcache = None
543 self._revbranchcache = None
544 self._filterpats = {}
544 self._filterpats = {}
545 self._datafilters = {}
545 self._datafilters = {}
546 self._transref = self._lockref = self._wlockref = None
546 self._transref = self._lockref = self._wlockref = None
547
547
548 # A cache for various files under .hg/ that tracks file changes,
548 # A cache for various files under .hg/ that tracks file changes,
549 # (used by the filecache decorator)
549 # (used by the filecache decorator)
550 #
550 #
551 # Maps a property name to its util.filecacheentry
551 # Maps a property name to its util.filecacheentry
552 self._filecache = {}
552 self._filecache = {}
553
553
554 # hold sets of revision to be filtered
554 # hold sets of revision to be filtered
555 # should be cleared when something might have changed the filter value:
555 # should be cleared when something might have changed the filter value:
556 # - new changesets,
556 # - new changesets,
557 # - phase change,
557 # - phase change,
558 # - new obsolescence marker,
558 # - new obsolescence marker,
559 # - working directory parent change,
559 # - working directory parent change,
560 # - bookmark changes
560 # - bookmark changes
561 self.filteredrevcache = {}
561 self.filteredrevcache = {}
562
562
563 # post-dirstate-status hooks
563 # post-dirstate-status hooks
564 self._postdsstatus = []
564 self._postdsstatus = []
565
565
566 # generic mapping between names and nodes
566 # generic mapping between names and nodes
567 self.names = namespaces.namespaces()
567 self.names = namespaces.namespaces()
568
568
569 # Key to signature value.
569 # Key to signature value.
570 self._sparsesignaturecache = {}
570 self._sparsesignaturecache = {}
571 # Signature to cached matcher instance.
571 # Signature to cached matcher instance.
572 self._sparsematchercache = {}
572 self._sparsematchercache = {}
573
573
574 def _getvfsward(self, origfunc):
574 def _getvfsward(self, origfunc):
575 """build a ward for self.vfs"""
575 """build a ward for self.vfs"""
576 rref = weakref.ref(self)
576 rref = weakref.ref(self)
577 def checkvfs(path, mode=None):
577 def checkvfs(path, mode=None):
578 ret = origfunc(path, mode=mode)
578 ret = origfunc(path, mode=mode)
579 repo = rref()
579 repo = rref()
580 if (repo is None
580 if (repo is None
581 or not util.safehasattr(repo, '_wlockref')
581 or not util.safehasattr(repo, '_wlockref')
582 or not util.safehasattr(repo, '_lockref')):
582 or not util.safehasattr(repo, '_lockref')):
583 return
583 return
584 if mode in (None, 'r', 'rb'):
584 if mode in (None, 'r', 'rb'):
585 return
585 return
586 if path.startswith(repo.path):
586 if path.startswith(repo.path):
587 # truncate name relative to the repository (.hg)
587 # truncate name relative to the repository (.hg)
588 path = path[len(repo.path) + 1:]
588 path = path[len(repo.path) + 1:]
589 if path.startswith('cache/'):
589 if path.startswith('cache/'):
590 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
590 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
591 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
591 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
592 if path.startswith('journal.'):
592 if path.startswith('journal.'):
593 # journal is covered by 'lock'
593 # journal is covered by 'lock'
594 if repo._currentlock(repo._lockref) is None:
594 if repo._currentlock(repo._lockref) is None:
595 repo.ui.develwarn('write with no lock: "%s"' % path,
595 repo.ui.develwarn('write with no lock: "%s"' % path,
596 stacklevel=2, config='check-locks')
596 stacklevel=2, config='check-locks')
597 elif repo._currentlock(repo._wlockref) is None:
597 elif repo._currentlock(repo._wlockref) is None:
598 # rest of vfs files are covered by 'wlock'
598 # rest of vfs files are covered by 'wlock'
599 #
599 #
600 # exclude special files
600 # exclude special files
601 for prefix in self._wlockfreeprefix:
601 for prefix in self._wlockfreeprefix:
602 if path.startswith(prefix):
602 if path.startswith(prefix):
603 return
603 return
604 repo.ui.develwarn('write with no wlock: "%s"' % path,
604 repo.ui.develwarn('write with no wlock: "%s"' % path,
605 stacklevel=2, config='check-locks')
605 stacklevel=2, config='check-locks')
606 return ret
606 return ret
607 return checkvfs
607 return checkvfs
608
608
609 def _getsvfsward(self, origfunc):
609 def _getsvfsward(self, origfunc):
610 """build a ward for self.svfs"""
610 """build a ward for self.svfs"""
611 rref = weakref.ref(self)
611 rref = weakref.ref(self)
612 def checksvfs(path, mode=None):
612 def checksvfs(path, mode=None):
613 ret = origfunc(path, mode=mode)
613 ret = origfunc(path, mode=mode)
614 repo = rref()
614 repo = rref()
615 if repo is None or not util.safehasattr(repo, '_lockref'):
615 if repo is None or not util.safehasattr(repo, '_lockref'):
616 return
616 return
617 if mode in (None, 'r', 'rb'):
617 if mode in (None, 'r', 'rb'):
618 return
618 return
619 if path.startswith(repo.sharedpath):
619 if path.startswith(repo.sharedpath):
620 # truncate name relative to the repository (.hg)
620 # truncate name relative to the repository (.hg)
621 path = path[len(repo.sharedpath) + 1:]
621 path = path[len(repo.sharedpath) + 1:]
622 if repo._currentlock(repo._lockref) is None:
622 if repo._currentlock(repo._lockref) is None:
623 repo.ui.develwarn('write with no lock: "%s"' % path,
623 repo.ui.develwarn('write with no lock: "%s"' % path,
624 stacklevel=3)
624 stacklevel=3)
625 return ret
625 return ret
626 return checksvfs
626 return checksvfs
627
627
628 def close(self):
628 def close(self):
629 self._writecaches()
629 self._writecaches()
630
630
631 def _loadextensions(self):
631 def _loadextensions(self):
632 extensions.loadall(self.ui)
632 extensions.loadall(self.ui)
633
633
634 def _writecaches(self):
634 def _writecaches(self):
635 if self._revbranchcache:
635 if self._revbranchcache:
636 self._revbranchcache.write()
636 self._revbranchcache.write()
637
637
638 def _restrictcapabilities(self, caps):
638 def _restrictcapabilities(self, caps):
639 if self.ui.configbool('experimental', 'bundle2-advertise'):
639 if self.ui.configbool('experimental', 'bundle2-advertise'):
640 caps = set(caps)
640 caps = set(caps)
641 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
641 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
642 role='client'))
642 role='client'))
643 caps.add('bundle2=' + urlreq.quote(capsblob))
643 caps.add('bundle2=' + urlreq.quote(capsblob))
644 return caps
644 return caps
645
645
646 def _applyopenerreqs(self):
646 def _applyopenerreqs(self):
647 self.svfs.options = dict((r, 1) for r in self.requirements
647 self.svfs.options = dict((r, 1) for r in self.requirements
648 if r in self.openerreqs)
648 if r in self.openerreqs)
649 # experimental config: format.chunkcachesize
649 # experimental config: format.chunkcachesize
650 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
650 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
651 if chunkcachesize is not None:
651 if chunkcachesize is not None:
652 self.svfs.options['chunkcachesize'] = chunkcachesize
652 self.svfs.options['chunkcachesize'] = chunkcachesize
653 # experimental config: format.maxchainlen
653 # experimental config: format.maxchainlen
654 maxchainlen = self.ui.configint('format', 'maxchainlen')
654 maxchainlen = self.ui.configint('format', 'maxchainlen')
655 if maxchainlen is not None:
655 if maxchainlen is not None:
656 self.svfs.options['maxchainlen'] = maxchainlen
656 self.svfs.options['maxchainlen'] = maxchainlen
657 # experimental config: format.manifestcachesize
657 # experimental config: format.manifestcachesize
658 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
658 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
659 if manifestcachesize is not None:
659 if manifestcachesize is not None:
660 self.svfs.options['manifestcachesize'] = manifestcachesize
660 self.svfs.options['manifestcachesize'] = manifestcachesize
661 # experimental config: format.aggressivemergedeltas
661 # experimental config: format.aggressivemergedeltas
662 aggressivemergedeltas = self.ui.configbool('format',
662 aggressivemergedeltas = self.ui.configbool('format',
663 'aggressivemergedeltas')
663 'aggressivemergedeltas')
664 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
664 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
665 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
665 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
666 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
666 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
667 if 0 <= chainspan:
667 if 0 <= chainspan:
668 self.svfs.options['maxdeltachainspan'] = chainspan
668 self.svfs.options['maxdeltachainspan'] = chainspan
669 mmapindexthreshold = self.ui.configbytes('experimental',
669 mmapindexthreshold = self.ui.configbytes('experimental',
670 'mmapindexthreshold')
670 'mmapindexthreshold')
671 if mmapindexthreshold is not None:
671 if mmapindexthreshold is not None:
672 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
672 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
673 withsparseread = self.ui.configbool('experimental', 'sparse-read')
673 withsparseread = self.ui.configbool('experimental', 'sparse-read')
674 srdensitythres = float(self.ui.config('experimental',
674 srdensitythres = float(self.ui.config('experimental',
675 'sparse-read.density-threshold'))
675 'sparse-read.density-threshold'))
676 srmingapsize = self.ui.configbytes('experimental',
676 srmingapsize = self.ui.configbytes('experimental',
677 'sparse-read.min-gap-size')
677 'sparse-read.min-gap-size')
678 self.svfs.options['with-sparse-read'] = withsparseread
678 self.svfs.options['with-sparse-read'] = withsparseread
679 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
679 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
680 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
680 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
681
681
682 for r in self.requirements:
682 for r in self.requirements:
683 if r.startswith('exp-compression-'):
683 if r.startswith('exp-compression-'):
684 self.svfs.options['compengine'] = r[len('exp-compression-'):]
684 self.svfs.options['compengine'] = r[len('exp-compression-'):]
685
685
686 # TODO move "revlogv2" to openerreqs once finalized.
686 # TODO move "revlogv2" to openerreqs once finalized.
687 if REVLOGV2_REQUIREMENT in self.requirements:
687 if REVLOGV2_REQUIREMENT in self.requirements:
688 self.svfs.options['revlogv2'] = True
688 self.svfs.options['revlogv2'] = True
689
689
690 def _writerequirements(self):
690 def _writerequirements(self):
691 scmutil.writerequires(self.vfs, self.requirements)
691 scmutil.writerequires(self.vfs, self.requirements)
692
692
693 def _checknested(self, path):
693 def _checknested(self, path):
694 """Determine if path is a legal nested repository."""
694 """Determine if path is a legal nested repository."""
695 if not path.startswith(self.root):
695 if not path.startswith(self.root):
696 return False
696 return False
697 subpath = path[len(self.root) + 1:]
697 subpath = path[len(self.root) + 1:]
698 normsubpath = util.pconvert(subpath)
698 normsubpath = util.pconvert(subpath)
699
699
700 # XXX: Checking against the current working copy is wrong in
700 # XXX: Checking against the current working copy is wrong in
701 # the sense that it can reject things like
701 # the sense that it can reject things like
702 #
702 #
703 # $ hg cat -r 10 sub/x.txt
703 # $ hg cat -r 10 sub/x.txt
704 #
704 #
705 # if sub/ is no longer a subrepository in the working copy
705 # if sub/ is no longer a subrepository in the working copy
706 # parent revision.
706 # parent revision.
707 #
707 #
708 # However, it can of course also allow things that would have
708 # However, it can of course also allow things that would have
709 # been rejected before, such as the above cat command if sub/
709 # been rejected before, such as the above cat command if sub/
710 # is a subrepository now, but was a normal directory before.
710 # is a subrepository now, but was a normal directory before.
711 # The old path auditor would have rejected by mistake since it
711 # The old path auditor would have rejected by mistake since it
712 # panics when it sees sub/.hg/.
712 # panics when it sees sub/.hg/.
713 #
713 #
714 # All in all, checking against the working copy seems sensible
714 # All in all, checking against the working copy seems sensible
715 # since we want to prevent access to nested repositories on
715 # since we want to prevent access to nested repositories on
716 # the filesystem *now*.
716 # the filesystem *now*.
717 ctx = self[None]
717 ctx = self[None]
718 parts = util.splitpath(subpath)
718 parts = util.splitpath(subpath)
719 while parts:
719 while parts:
720 prefix = '/'.join(parts)
720 prefix = '/'.join(parts)
721 if prefix in ctx.substate:
721 if prefix in ctx.substate:
722 if prefix == normsubpath:
722 if prefix == normsubpath:
723 return True
723 return True
724 else:
724 else:
725 sub = ctx.sub(prefix)
725 sub = ctx.sub(prefix)
726 return sub.checknested(subpath[len(prefix) + 1:])
726 return sub.checknested(subpath[len(prefix) + 1:])
727 else:
727 else:
728 parts.pop()
728 parts.pop()
729 return False
729 return False
730
730
731 def peer(self):
731 def peer(self):
732 return localpeer(self) # not cached to avoid reference cycle
732 return localpeer(self) # not cached to avoid reference cycle
733
733
734 def unfiltered(self):
734 def unfiltered(self):
735 """Return unfiltered version of the repository
735 """Return unfiltered version of the repository
736
736
737 Intended to be overwritten by filtered repo."""
737 Intended to be overwritten by filtered repo."""
738 return self
738 return self
739
739
740 def filtered(self, name, visibilityexceptions=None):
740 def filtered(self, name, visibilityexceptions=None):
741 """Return a filtered version of a repository"""
741 """Return a filtered version of a repository"""
742 cls = repoview.newtype(self.unfiltered().__class__)
742 cls = repoview.newtype(self.unfiltered().__class__)
743 return cls(self, name, visibilityexceptions)
743 return cls(self, name, visibilityexceptions)
744
744
745 @repofilecache('bookmarks', 'bookmarks.current')
745 @repofilecache('bookmarks', 'bookmarks.current')
746 def _bookmarks(self):
746 def _bookmarks(self):
747 return bookmarks.bmstore(self)
747 return bookmarks.bmstore(self)
748
748
749 @property
749 @property
750 def _activebookmark(self):
750 def _activebookmark(self):
751 return self._bookmarks.active
751 return self._bookmarks.active
752
752
753 # _phasesets depend on changelog. what we need is to call
753 # _phasesets depend on changelog. what we need is to call
754 # _phasecache.invalidate() if '00changelog.i' was changed, but it
754 # _phasecache.invalidate() if '00changelog.i' was changed, but it
755 # can't be easily expressed in filecache mechanism.
755 # can't be easily expressed in filecache mechanism.
756 @storecache('phaseroots', '00changelog.i')
756 @storecache('phaseroots', '00changelog.i')
757 def _phasecache(self):
757 def _phasecache(self):
758 return phases.phasecache(self, self._phasedefaults)
758 return phases.phasecache(self, self._phasedefaults)
759
759
760 @storecache('obsstore')
760 @storecache('obsstore')
761 def obsstore(self):
761 def obsstore(self):
762 return obsolete.makestore(self.ui, self)
762 return obsolete.makestore(self.ui, self)
763
763
764 @storecache('00changelog.i')
764 @storecache('00changelog.i')
765 def changelog(self):
765 def changelog(self):
766 return changelog.changelog(self.svfs,
766 return changelog.changelog(self.svfs,
767 trypending=txnutil.mayhavepending(self.root))
767 trypending=txnutil.mayhavepending(self.root))
768
768
769 def _constructmanifest(self):
769 def _constructmanifest(self):
770 # This is a temporary function while we migrate from manifest to
770 # This is a temporary function while we migrate from manifest to
771 # manifestlog. It allows bundlerepo and unionrepo to intercept the
771 # manifestlog. It allows bundlerepo and unionrepo to intercept the
772 # manifest creation.
772 # manifest creation.
773 return manifest.manifestrevlog(self.svfs)
773 return manifest.manifestrevlog(self.svfs)
774
774
775 @storecache('00manifest.i')
775 @storecache('00manifest.i')
776 def manifestlog(self):
776 def manifestlog(self):
777 return manifest.manifestlog(self.svfs, self)
777 return manifest.manifestlog(self.svfs, self)
778
778
779 @repofilecache('dirstate')
779 @repofilecache('dirstate')
780 def dirstate(self):
780 def dirstate(self):
781 sparsematchfn = lambda: sparse.matcher(self)
781 sparsematchfn = lambda: sparse.matcher(self)
782
782
783 return dirstate.dirstate(self.vfs, self.ui, self.root,
783 return dirstate.dirstate(self.vfs, self.ui, self.root,
784 self._dirstatevalidate, sparsematchfn)
784 self._dirstatevalidate, sparsematchfn)
785
785
786 def _dirstatevalidate(self, node):
786 def _dirstatevalidate(self, node):
787 try:
787 try:
788 self.changelog.rev(node)
788 self.changelog.rev(node)
789 return node
789 return node
790 except error.LookupError:
790 except error.LookupError:
791 if not self._dirstatevalidatewarned:
791 if not self._dirstatevalidatewarned:
792 self._dirstatevalidatewarned = True
792 self._dirstatevalidatewarned = True
793 self.ui.warn(_("warning: ignoring unknown"
793 self.ui.warn(_("warning: ignoring unknown"
794 " working parent %s!\n") % short(node))
794 " working parent %s!\n") % short(node))
795 return nullid
795 return nullid
796
796
797 @repofilecache(narrowspec.FILENAME)
797 @repofilecache(narrowspec.FILENAME)
798 def narrowpats(self):
798 def narrowpats(self):
799 """matcher patterns for this repository's narrowspec
799 """matcher patterns for this repository's narrowspec
800
800
801 A tuple of (includes, excludes).
801 A tuple of (includes, excludes).
802 """
802 """
803 source = self
803 source = self
804 if self.shared():
804 if self.shared():
805 from . import hg
805 from . import hg
806 source = hg.sharedreposource(self)
806 source = hg.sharedreposource(self)
807 return narrowspec.load(source)
807 return narrowspec.load(source)
808
808
809 @repofilecache(narrowspec.FILENAME)
809 @repofilecache(narrowspec.FILENAME)
810 def _narrowmatch(self):
810 def _narrowmatch(self):
811 if changegroup.NARROW_REQUIREMENT not in self.requirements:
811 if changegroup.NARROW_REQUIREMENT not in self.requirements:
812 return matchmod.always(self.root, '')
812 return matchmod.always(self.root, '')
813 include, exclude = self.narrowpats
813 include, exclude = self.narrowpats
814 return narrowspec.match(self.root, include=include, exclude=exclude)
814 return narrowspec.match(self.root, include=include, exclude=exclude)
815
815
816 # TODO(martinvonz): make this property-like instead?
816 # TODO(martinvonz): make this property-like instead?
817 def narrowmatch(self):
817 def narrowmatch(self):
818 return self._narrowmatch
818 return self._narrowmatch
819
819
820 def setnarrowpats(self, newincludes, newexcludes):
820 def setnarrowpats(self, newincludes, newexcludes):
821 target = self
821 target = self
822 if self.shared():
822 if self.shared():
823 from . import hg
823 from . import hg
824 target = hg.sharedreposource(self)
824 target = hg.sharedreposource(self)
825 narrowspec.save(target, newincludes, newexcludes)
825 narrowspec.save(target, newincludes, newexcludes)
826 self.invalidate(clearfilecache=True)
826 self.invalidate(clearfilecache=True)
827
827
828 def __getitem__(self, changeid):
828 def __getitem__(self, changeid):
829 if changeid is None:
829 if changeid is None:
830 return context.workingctx(self)
830 return context.workingctx(self)
831 if isinstance(changeid, context.basectx):
831 if isinstance(changeid, context.basectx):
832 return changeid
832 return changeid
833 if isinstance(changeid, slice):
833 if isinstance(changeid, slice):
834 # wdirrev isn't contiguous so the slice shouldn't include it
834 # wdirrev isn't contiguous so the slice shouldn't include it
835 return [context.changectx(self, i)
835 return [context.changectx(self, i)
836 for i in xrange(*changeid.indices(len(self)))
836 for i in xrange(*changeid.indices(len(self)))
837 if i not in self.changelog.filteredrevs]
837 if i not in self.changelog.filteredrevs]
838 try:
838 try:
839 return context.changectx(self, changeid)
839 return context.changectx(self, changeid)
840 except error.WdirUnsupported:
840 except error.WdirUnsupported:
841 return context.workingctx(self)
841 return context.workingctx(self)
842
842
843 def __contains__(self, changeid):
843 def __contains__(self, changeid):
844 """True if the given changeid exists
844 """True if the given changeid exists
845
845
846 error.LookupError is raised if an ambiguous node specified.
846 error.LookupError is raised if an ambiguous node specified.
847 """
847 """
848 try:
848 try:
849 self[changeid]
849 self[changeid]
850 return True
850 return True
851 except error.RepoLookupError:
851 except error.RepoLookupError:
852 return False
852 return False
853
853
854 def __nonzero__(self):
854 def __nonzero__(self):
855 return True
855 return True
856
856
857 __bool__ = __nonzero__
857 __bool__ = __nonzero__
858
858
859 def __len__(self):
859 def __len__(self):
860 # no need to pay the cost of repoview.changelog
860 # no need to pay the cost of repoview.changelog
861 unfi = self.unfiltered()
861 unfi = self.unfiltered()
862 return len(unfi.changelog)
862 return len(unfi.changelog)
863
863
864 def __iter__(self):
864 def __iter__(self):
865 return iter(self.changelog)
865 return iter(self.changelog)
866
866
867 def revs(self, expr, *args):
867 def revs(self, expr, *args):
868 '''Find revisions matching a revset.
868 '''Find revisions matching a revset.
869
869
870 The revset is specified as a string ``expr`` that may contain
870 The revset is specified as a string ``expr`` that may contain
871 %-formatting to escape certain types. See ``revsetlang.formatspec``.
871 %-formatting to escape certain types. See ``revsetlang.formatspec``.
872
872
873 Revset aliases from the configuration are not expanded. To expand
873 Revset aliases from the configuration are not expanded. To expand
874 user aliases, consider calling ``scmutil.revrange()`` or
874 user aliases, consider calling ``scmutil.revrange()`` or
875 ``repo.anyrevs([expr], user=True)``.
875 ``repo.anyrevs([expr], user=True)``.
876
876
877 Returns a revset.abstractsmartset, which is a list-like interface
877 Returns a revset.abstractsmartset, which is a list-like interface
878 that contains integer revisions.
878 that contains integer revisions.
879 '''
879 '''
880 expr = revsetlang.formatspec(expr, *args)
880 expr = revsetlang.formatspec(expr, *args)
881 m = revset.match(None, expr)
881 m = revset.match(None, expr)
882 return m(self)
882 return m(self)
883
883
884 def set(self, expr, *args):
884 def set(self, expr, *args):
885 '''Find revisions matching a revset and emit changectx instances.
885 '''Find revisions matching a revset and emit changectx instances.
886
886
887 This is a convenience wrapper around ``revs()`` that iterates the
887 This is a convenience wrapper around ``revs()`` that iterates the
888 result and is a generator of changectx instances.
888 result and is a generator of changectx instances.
889
889
890 Revset aliases from the configuration are not expanded. To expand
890 Revset aliases from the configuration are not expanded. To expand
891 user aliases, consider calling ``scmutil.revrange()``.
891 user aliases, consider calling ``scmutil.revrange()``.
892 '''
892 '''
893 for r in self.revs(expr, *args):
893 for r in self.revs(expr, *args):
894 yield self[r]
894 yield self[r]
895
895
896 def anyrevs(self, specs, user=False, localalias=None):
896 def anyrevs(self, specs, user=False, localalias=None):
897 '''Find revisions matching one of the given revsets.
897 '''Find revisions matching one of the given revsets.
898
898
899 Revset aliases from the configuration are not expanded by default. To
899 Revset aliases from the configuration are not expanded by default. To
900 expand user aliases, specify ``user=True``. To provide some local
900 expand user aliases, specify ``user=True``. To provide some local
901 definitions overriding user aliases, set ``localalias`` to
901 definitions overriding user aliases, set ``localalias`` to
902 ``{name: definitionstring}``.
902 ``{name: definitionstring}``.
903 '''
903 '''
904 if user:
904 if user:
905 m = revset.matchany(self.ui, specs,
905 m = revset.matchany(self.ui, specs,
906 lookup=revset.lookupfn(self),
906 lookup=revset.lookupfn(self),
907 localalias=localalias)
907 localalias=localalias)
908 else:
908 else:
909 m = revset.matchany(None, specs, localalias=localalias)
909 m = revset.matchany(None, specs, localalias=localalias)
910 return m(self)
910 return m(self)
911
911
912 def url(self):
912 def url(self):
913 return 'file:' + self.root
913 return 'file:' + self.root
914
914
915 def hook(self, name, throw=False, **args):
915 def hook(self, name, throw=False, **args):
916 """Call a hook, passing this repo instance.
916 """Call a hook, passing this repo instance.
917
917
918 This a convenience method to aid invoking hooks. Extensions likely
918 This a convenience method to aid invoking hooks. Extensions likely
919 won't call this unless they have registered a custom hook or are
919 won't call this unless they have registered a custom hook or are
920 replacing code that is expected to call a hook.
920 replacing code that is expected to call a hook.
921 """
921 """
922 return hook.hook(self.ui, self, name, throw, **args)
922 return hook.hook(self.ui, self, name, throw, **args)
923
923
924 @filteredpropertycache
924 @filteredpropertycache
925 def _tagscache(self):
925 def _tagscache(self):
926 '''Returns a tagscache object that contains various tags related
926 '''Returns a tagscache object that contains various tags related
927 caches.'''
927 caches.'''
928
928
929 # This simplifies its cache management by having one decorated
929 # This simplifies its cache management by having one decorated
930 # function (this one) and the rest simply fetch things from it.
930 # function (this one) and the rest simply fetch things from it.
931 class tagscache(object):
931 class tagscache(object):
932 def __init__(self):
932 def __init__(self):
933 # These two define the set of tags for this repository. tags
933 # These two define the set of tags for this repository. tags
934 # maps tag name to node; tagtypes maps tag name to 'global' or
934 # maps tag name to node; tagtypes maps tag name to 'global' or
935 # 'local'. (Global tags are defined by .hgtags across all
935 # 'local'. (Global tags are defined by .hgtags across all
936 # heads, and local tags are defined in .hg/localtags.)
936 # heads, and local tags are defined in .hg/localtags.)
937 # They constitute the in-memory cache of tags.
937 # They constitute the in-memory cache of tags.
938 self.tags = self.tagtypes = None
938 self.tags = self.tagtypes = None
939
939
940 self.nodetagscache = self.tagslist = None
940 self.nodetagscache = self.tagslist = None
941
941
942 cache = tagscache()
942 cache = tagscache()
943 cache.tags, cache.tagtypes = self._findtags()
943 cache.tags, cache.tagtypes = self._findtags()
944
944
945 return cache
945 return cache
946
946
947 def tags(self):
947 def tags(self):
948 '''return a mapping of tag to node'''
948 '''return a mapping of tag to node'''
949 t = {}
949 t = {}
950 if self.changelog.filteredrevs:
950 if self.changelog.filteredrevs:
951 tags, tt = self._findtags()
951 tags, tt = self._findtags()
952 else:
952 else:
953 tags = self._tagscache.tags
953 tags = self._tagscache.tags
954 for k, v in tags.iteritems():
954 for k, v in tags.iteritems():
955 try:
955 try:
956 # ignore tags to unknown nodes
956 # ignore tags to unknown nodes
957 self.changelog.rev(v)
957 self.changelog.rev(v)
958 t[k] = v
958 t[k] = v
959 except (error.LookupError, ValueError):
959 except (error.LookupError, ValueError):
960 pass
960 pass
961 return t
961 return t
962
962
963 def _findtags(self):
963 def _findtags(self):
964 '''Do the hard work of finding tags. Return a pair of dicts
964 '''Do the hard work of finding tags. Return a pair of dicts
965 (tags, tagtypes) where tags maps tag name to node, and tagtypes
965 (tags, tagtypes) where tags maps tag name to node, and tagtypes
966 maps tag name to a string like \'global\' or \'local\'.
966 maps tag name to a string like \'global\' or \'local\'.
967 Subclasses or extensions are free to add their own tags, but
967 Subclasses or extensions are free to add their own tags, but
968 should be aware that the returned dicts will be retained for the
968 should be aware that the returned dicts will be retained for the
969 duration of the localrepo object.'''
969 duration of the localrepo object.'''
970
970
971 # XXX what tagtype should subclasses/extensions use? Currently
971 # XXX what tagtype should subclasses/extensions use? Currently
972 # mq and bookmarks add tags, but do not set the tagtype at all.
972 # mq and bookmarks add tags, but do not set the tagtype at all.
973 # Should each extension invent its own tag type? Should there
973 # Should each extension invent its own tag type? Should there
974 # be one tagtype for all such "virtual" tags? Or is the status
974 # be one tagtype for all such "virtual" tags? Or is the status
975 # quo fine?
975 # quo fine?
976
976
977
977
978 # map tag name to (node, hist)
978 # map tag name to (node, hist)
979 alltags = tagsmod.findglobaltags(self.ui, self)
979 alltags = tagsmod.findglobaltags(self.ui, self)
980 # map tag name to tag type
980 # map tag name to tag type
981 tagtypes = dict((tag, 'global') for tag in alltags)
981 tagtypes = dict((tag, 'global') for tag in alltags)
982
982
983 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
983 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
984
984
985 # Build the return dicts. Have to re-encode tag names because
985 # Build the return dicts. Have to re-encode tag names because
986 # the tags module always uses UTF-8 (in order not to lose info
986 # the tags module always uses UTF-8 (in order not to lose info
987 # writing to the cache), but the rest of Mercurial wants them in
987 # writing to the cache), but the rest of Mercurial wants them in
988 # local encoding.
988 # local encoding.
989 tags = {}
989 tags = {}
990 for (name, (node, hist)) in alltags.iteritems():
990 for (name, (node, hist)) in alltags.iteritems():
991 if node != nullid:
991 if node != nullid:
992 tags[encoding.tolocal(name)] = node
992 tags[encoding.tolocal(name)] = node
993 tags['tip'] = self.changelog.tip()
993 tags['tip'] = self.changelog.tip()
994 tagtypes = dict([(encoding.tolocal(name), value)
994 tagtypes = dict([(encoding.tolocal(name), value)
995 for (name, value) in tagtypes.iteritems()])
995 for (name, value) in tagtypes.iteritems()])
996 return (tags, tagtypes)
996 return (tags, tagtypes)
997
997
998 def tagtype(self, tagname):
998 def tagtype(self, tagname):
999 '''
999 '''
1000 return the type of the given tag. result can be:
1000 return the type of the given tag. result can be:
1001
1001
1002 'local' : a local tag
1002 'local' : a local tag
1003 'global' : a global tag
1003 'global' : a global tag
1004 None : tag does not exist
1004 None : tag does not exist
1005 '''
1005 '''
1006
1006
1007 return self._tagscache.tagtypes.get(tagname)
1007 return self._tagscache.tagtypes.get(tagname)
1008
1008
1009 def tagslist(self):
1009 def tagslist(self):
1010 '''return a list of tags ordered by revision'''
1010 '''return a list of tags ordered by revision'''
1011 if not self._tagscache.tagslist:
1011 if not self._tagscache.tagslist:
1012 l = []
1012 l = []
1013 for t, n in self.tags().iteritems():
1013 for t, n in self.tags().iteritems():
1014 l.append((self.changelog.rev(n), t, n))
1014 l.append((self.changelog.rev(n), t, n))
1015 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1015 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1016
1016
1017 return self._tagscache.tagslist
1017 return self._tagscache.tagslist
1018
1018
1019 def nodetags(self, node):
1019 def nodetags(self, node):
1020 '''return the tags associated with a node'''
1020 '''return the tags associated with a node'''
1021 if not self._tagscache.nodetagscache:
1021 if not self._tagscache.nodetagscache:
1022 nodetagscache = {}
1022 nodetagscache = {}
1023 for t, n in self._tagscache.tags.iteritems():
1023 for t, n in self._tagscache.tags.iteritems():
1024 nodetagscache.setdefault(n, []).append(t)
1024 nodetagscache.setdefault(n, []).append(t)
1025 for tags in nodetagscache.itervalues():
1025 for tags in nodetagscache.itervalues():
1026 tags.sort()
1026 tags.sort()
1027 self._tagscache.nodetagscache = nodetagscache
1027 self._tagscache.nodetagscache = nodetagscache
1028 return self._tagscache.nodetagscache.get(node, [])
1028 return self._tagscache.nodetagscache.get(node, [])
1029
1029
1030 def nodebookmarks(self, node):
1030 def nodebookmarks(self, node):
1031 """return the list of bookmarks pointing to the specified node"""
1031 """return the list of bookmarks pointing to the specified node"""
1032 marks = []
1032 marks = []
1033 for bookmark, n in self._bookmarks.iteritems():
1033 for bookmark, n in self._bookmarks.iteritems():
1034 if n == node:
1034 if n == node:
1035 marks.append(bookmark)
1035 marks.append(bookmark)
1036 return sorted(marks)
1036 return sorted(marks)
1037
1037
1038 def branchmap(self):
1038 def branchmap(self):
1039 '''returns a dictionary {branch: [branchheads]} with branchheads
1039 '''returns a dictionary {branch: [branchheads]} with branchheads
1040 ordered by increasing revision number'''
1040 ordered by increasing revision number'''
1041 branchmap.updatecache(self)
1041 branchmap.updatecache(self)
1042 return self._branchcaches[self.filtername]
1042 return self._branchcaches[self.filtername]
1043
1043
1044 @unfilteredmethod
1044 @unfilteredmethod
1045 def revbranchcache(self):
1045 def revbranchcache(self):
1046 if not self._revbranchcache:
1046 if not self._revbranchcache:
1047 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1047 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1048 return self._revbranchcache
1048 return self._revbranchcache
1049
1049
1050 def branchtip(self, branch, ignoremissing=False):
1050 def branchtip(self, branch, ignoremissing=False):
1051 '''return the tip node for a given branch
1051 '''return the tip node for a given branch
1052
1052
1053 If ignoremissing is True, then this method will not raise an error.
1053 If ignoremissing is True, then this method will not raise an error.
1054 This is helpful for callers that only expect None for a missing branch
1054 This is helpful for callers that only expect None for a missing branch
1055 (e.g. namespace).
1055 (e.g. namespace).
1056
1056
1057 '''
1057 '''
1058 try:
1058 try:
1059 return self.branchmap().branchtip(branch)
1059 return self.branchmap().branchtip(branch)
1060 except KeyError:
1060 except KeyError:
1061 if not ignoremissing:
1061 if not ignoremissing:
1062 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1062 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1063 else:
1063 else:
1064 pass
1064 pass
1065
1065
1066 def lookup(self, key):
1066 def lookup(self, key):
1067 return scmutil.revsymbol(self, key).node()
1067 return scmutil.revsymbol(self, key).node()
1068
1068
1069 def lookupbranch(self, key):
1069 def lookupbranch(self, key):
1070 if key in self.branchmap():
1070 if key in self.branchmap():
1071 return key
1071 return key
1072
1072
1073 return scmutil.revsymbol(self, key).branch()
1073 return scmutil.revsymbol(self, key).branch()
1074
1074
1075 def known(self, nodes):
1075 def known(self, nodes):
1076 cl = self.changelog
1076 cl = self.changelog
1077 nm = cl.nodemap
1077 nm = cl.nodemap
1078 filtered = cl.filteredrevs
1078 filtered = cl.filteredrevs
1079 result = []
1079 result = []
1080 for n in nodes:
1080 for n in nodes:
1081 r = nm.get(n)
1081 r = nm.get(n)
1082 resp = not (r is None or r in filtered)
1082 resp = not (r is None or r in filtered)
1083 result.append(resp)
1083 result.append(resp)
1084 return result
1084 return result
1085
1085
1086 def local(self):
1086 def local(self):
1087 return self
1087 return self
1088
1088
1089 def publishing(self):
1089 def publishing(self):
1090 # it's safe (and desirable) to trust the publish flag unconditionally
1090 # it's safe (and desirable) to trust the publish flag unconditionally
1091 # so that we don't finalize changes shared between users via ssh or nfs
1091 # so that we don't finalize changes shared between users via ssh or nfs
1092 return self.ui.configbool('phases', 'publish', untrusted=True)
1092 return self.ui.configbool('phases', 'publish', untrusted=True)
1093
1093
1094 def cancopy(self):
1094 def cancopy(self):
1095 # so statichttprepo's override of local() works
1095 # so statichttprepo's override of local() works
1096 if not self.local():
1096 if not self.local():
1097 return False
1097 return False
1098 if not self.publishing():
1098 if not self.publishing():
1099 return True
1099 return True
1100 # if publishing we can't copy if there is filtered content
1100 # if publishing we can't copy if there is filtered content
1101 return not self.filtered('visible').changelog.filteredrevs
1101 return not self.filtered('visible').changelog.filteredrevs
1102
1102
1103 def shared(self):
1103 def shared(self):
1104 '''the type of shared repository (None if not shared)'''
1104 '''the type of shared repository (None if not shared)'''
1105 if self.sharedpath != self.path:
1105 if self.sharedpath != self.path:
1106 return 'store'
1106 return 'store'
1107 return None
1107 return None
1108
1108
1109 def wjoin(self, f, *insidef):
1109 def wjoin(self, f, *insidef):
1110 return self.vfs.reljoin(self.root, f, *insidef)
1110 return self.vfs.reljoin(self.root, f, *insidef)
1111
1111
1112 def file(self, f):
1112 def file(self, f):
1113 if f[0] == '/':
1113 if f[0] == '/':
1114 f = f[1:]
1114 f = f[1:]
1115 return filelog.filelog(self.svfs, f)
1115 return filelog.filelog(self.svfs, f)
1116
1116
1117 def setparents(self, p1, p2=nullid):
1117 def setparents(self, p1, p2=nullid):
1118 with self.dirstate.parentchange():
1118 with self.dirstate.parentchange():
1119 copies = self.dirstate.setparents(p1, p2)
1119 copies = self.dirstate.setparents(p1, p2)
1120 pctx = self[p1]
1120 pctx = self[p1]
1121 if copies:
1121 if copies:
1122 # Adjust copy records, the dirstate cannot do it, it
1122 # Adjust copy records, the dirstate cannot do it, it
1123 # requires access to parents manifests. Preserve them
1123 # requires access to parents manifests. Preserve them
1124 # only for entries added to first parent.
1124 # only for entries added to first parent.
1125 for f in copies:
1125 for f in copies:
1126 if f not in pctx and copies[f] in pctx:
1126 if f not in pctx and copies[f] in pctx:
1127 self.dirstate.copy(copies[f], f)
1127 self.dirstate.copy(copies[f], f)
1128 if p2 == nullid:
1128 if p2 == nullid:
1129 for f, s in sorted(self.dirstate.copies().items()):
1129 for f, s in sorted(self.dirstate.copies().items()):
1130 if f not in pctx and s not in pctx:
1130 if f not in pctx and s not in pctx:
1131 self.dirstate.copy(None, f)
1131 self.dirstate.copy(None, f)
1132
1132
1133 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1133 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1134 """changeid can be a changeset revision, node, or tag.
1134 """changeid can be a changeset revision, node, or tag.
1135 fileid can be a file revision or node."""
1135 fileid can be a file revision or node."""
1136 return context.filectx(self, path, changeid, fileid,
1136 return context.filectx(self, path, changeid, fileid,
1137 changectx=changectx)
1137 changectx=changectx)
1138
1138
1139 def getcwd(self):
1139 def getcwd(self):
1140 return self.dirstate.getcwd()
1140 return self.dirstate.getcwd()
1141
1141
1142 def pathto(self, f, cwd=None):
1142 def pathto(self, f, cwd=None):
1143 return self.dirstate.pathto(f, cwd)
1143 return self.dirstate.pathto(f, cwd)
1144
1144
1145 def _loadfilter(self, filter):
1145 def _loadfilter(self, filter):
1146 if filter not in self._filterpats:
1146 if filter not in self._filterpats:
1147 l = []
1147 l = []
1148 for pat, cmd in self.ui.configitems(filter):
1148 for pat, cmd in self.ui.configitems(filter):
1149 if cmd == '!':
1149 if cmd == '!':
1150 continue
1150 continue
1151 mf = matchmod.match(self.root, '', [pat])
1151 mf = matchmod.match(self.root, '', [pat])
1152 fn = None
1152 fn = None
1153 params = cmd
1153 params = cmd
1154 for name, filterfn in self._datafilters.iteritems():
1154 for name, filterfn in self._datafilters.iteritems():
1155 if cmd.startswith(name):
1155 if cmd.startswith(name):
1156 fn = filterfn
1156 fn = filterfn
1157 params = cmd[len(name):].lstrip()
1157 params = cmd[len(name):].lstrip()
1158 break
1158 break
1159 if not fn:
1159 if not fn:
1160 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1160 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1161 # Wrap old filters not supporting keyword arguments
1161 # Wrap old filters not supporting keyword arguments
1162 if not pycompat.getargspec(fn)[2]:
1162 if not pycompat.getargspec(fn)[2]:
1163 oldfn = fn
1163 oldfn = fn
1164 fn = lambda s, c, **kwargs: oldfn(s, c)
1164 fn = lambda s, c, **kwargs: oldfn(s, c)
1165 l.append((mf, fn, params))
1165 l.append((mf, fn, params))
1166 self._filterpats[filter] = l
1166 self._filterpats[filter] = l
1167 return self._filterpats[filter]
1167 return self._filterpats[filter]
1168
1168
1169 def _filter(self, filterpats, filename, data):
1169 def _filter(self, filterpats, filename, data):
1170 for mf, fn, cmd in filterpats:
1170 for mf, fn, cmd in filterpats:
1171 if mf(filename):
1171 if mf(filename):
1172 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1172 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1173 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1173 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1174 break
1174 break
1175
1175
1176 return data
1176 return data
1177
1177
1178 @unfilteredpropertycache
1178 @unfilteredpropertycache
1179 def _encodefilterpats(self):
1179 def _encodefilterpats(self):
1180 return self._loadfilter('encode')
1180 return self._loadfilter('encode')
1181
1181
1182 @unfilteredpropertycache
1182 @unfilteredpropertycache
1183 def _decodefilterpats(self):
1183 def _decodefilterpats(self):
1184 return self._loadfilter('decode')
1184 return self._loadfilter('decode')
1185
1185
1186 def adddatafilter(self, name, filter):
1186 def adddatafilter(self, name, filter):
1187 self._datafilters[name] = filter
1187 self._datafilters[name] = filter
1188
1188
1189 def wread(self, filename):
1189 def wread(self, filename):
1190 if self.wvfs.islink(filename):
1190 if self.wvfs.islink(filename):
1191 data = self.wvfs.readlink(filename)
1191 data = self.wvfs.readlink(filename)
1192 else:
1192 else:
1193 data = self.wvfs.read(filename)
1193 data = self.wvfs.read(filename)
1194 return self._filter(self._encodefilterpats, filename, data)
1194 return self._filter(self._encodefilterpats, filename, data)
1195
1195
1196 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1196 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1197 """write ``data`` into ``filename`` in the working directory
1197 """write ``data`` into ``filename`` in the working directory
1198
1198
1199 This returns length of written (maybe decoded) data.
1199 This returns length of written (maybe decoded) data.
1200 """
1200 """
1201 data = self._filter(self._decodefilterpats, filename, data)
1201 data = self._filter(self._decodefilterpats, filename, data)
1202 if 'l' in flags:
1202 if 'l' in flags:
1203 self.wvfs.symlink(data, filename)
1203 self.wvfs.symlink(data, filename)
1204 else:
1204 else:
1205 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1205 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1206 **kwargs)
1206 **kwargs)
1207 if 'x' in flags:
1207 if 'x' in flags:
1208 self.wvfs.setflags(filename, False, True)
1208 self.wvfs.setflags(filename, False, True)
1209 else:
1209 else:
1210 self.wvfs.setflags(filename, False, False)
1210 self.wvfs.setflags(filename, False, False)
1211 return len(data)
1211 return len(data)
1212
1212
1213 def wwritedata(self, filename, data):
1213 def wwritedata(self, filename, data):
1214 return self._filter(self._decodefilterpats, filename, data)
1214 return self._filter(self._decodefilterpats, filename, data)
1215
1215
1216 def currenttransaction(self):
1216 def currenttransaction(self):
1217 """return the current transaction or None if non exists"""
1217 """return the current transaction or None if non exists"""
1218 if self._transref:
1218 if self._transref:
1219 tr = self._transref()
1219 tr = self._transref()
1220 else:
1220 else:
1221 tr = None
1221 tr = None
1222
1222
1223 if tr and tr.running():
1223 if tr and tr.running():
1224 return tr
1224 return tr
1225 return None
1225 return None
1226
1226
1227 def transaction(self, desc, report=None):
1227 def transaction(self, desc, report=None):
1228 if (self.ui.configbool('devel', 'all-warnings')
1228 if (self.ui.configbool('devel', 'all-warnings')
1229 or self.ui.configbool('devel', 'check-locks')):
1229 or self.ui.configbool('devel', 'check-locks')):
1230 if self._currentlock(self._lockref) is None:
1230 if self._currentlock(self._lockref) is None:
1231 raise error.ProgrammingError('transaction requires locking')
1231 raise error.ProgrammingError('transaction requires locking')
1232 tr = self.currenttransaction()
1232 tr = self.currenttransaction()
1233 if tr is not None:
1233 if tr is not None:
1234 return tr.nest(name=desc)
1234 return tr.nest(name=desc)
1235
1235
1236 # abort here if the journal already exists
1236 # abort here if the journal already exists
1237 if self.svfs.exists("journal"):
1237 if self.svfs.exists("journal"):
1238 raise error.RepoError(
1238 raise error.RepoError(
1239 _("abandoned transaction found"),
1239 _("abandoned transaction found"),
1240 hint=_("run 'hg recover' to clean up transaction"))
1240 hint=_("run 'hg recover' to clean up transaction"))
1241
1241
1242 idbase = "%.40f#%f" % (random.random(), time.time())
1242 idbase = "%.40f#%f" % (random.random(), time.time())
1243 ha = hex(hashlib.sha1(idbase).digest())
1243 ha = hex(hashlib.sha1(idbase).digest())
1244 txnid = 'TXN:' + ha
1244 txnid = 'TXN:' + ha
1245 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1245 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1246
1246
1247 self._writejournal(desc)
1247 self._writejournal(desc)
1248 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1248 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1249 if report:
1249 if report:
1250 rp = report
1250 rp = report
1251 else:
1251 else:
1252 rp = self.ui.warn
1252 rp = self.ui.warn
1253 vfsmap = {'plain': self.vfs} # root of .hg/
1253 vfsmap = {'plain': self.vfs} # root of .hg/
1254 # we must avoid cyclic reference between repo and transaction.
1254 # we must avoid cyclic reference between repo and transaction.
1255 reporef = weakref.ref(self)
1255 reporef = weakref.ref(self)
1256 # Code to track tag movement
1256 # Code to track tag movement
1257 #
1257 #
1258 # Since tags are all handled as file content, it is actually quite hard
1258 # Since tags are all handled as file content, it is actually quite hard
1259 # to track these movement from a code perspective. So we fallback to a
1259 # to track these movement from a code perspective. So we fallback to a
1260 # tracking at the repository level. One could envision to track changes
1260 # tracking at the repository level. One could envision to track changes
1261 # to the '.hgtags' file through changegroup apply but that fails to
1261 # to the '.hgtags' file through changegroup apply but that fails to
1262 # cope with case where transaction expose new heads without changegroup
1262 # cope with case where transaction expose new heads without changegroup
1263 # being involved (eg: phase movement).
1263 # being involved (eg: phase movement).
1264 #
1264 #
1265 # For now, We gate the feature behind a flag since this likely comes
1265 # For now, We gate the feature behind a flag since this likely comes
1266 # with performance impacts. The current code run more often than needed
1266 # with performance impacts. The current code run more often than needed
1267 # and do not use caches as much as it could. The current focus is on
1267 # and do not use caches as much as it could. The current focus is on
1268 # the behavior of the feature so we disable it by default. The flag
1268 # the behavior of the feature so we disable it by default. The flag
1269 # will be removed when we are happy with the performance impact.
1269 # will be removed when we are happy with the performance impact.
1270 #
1270 #
1271 # Once this feature is no longer experimental move the following
1271 # Once this feature is no longer experimental move the following
1272 # documentation to the appropriate help section:
1272 # documentation to the appropriate help section:
1273 #
1273 #
1274 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1274 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1275 # tags (new or changed or deleted tags). In addition the details of
1275 # tags (new or changed or deleted tags). In addition the details of
1276 # these changes are made available in a file at:
1276 # these changes are made available in a file at:
1277 # ``REPOROOT/.hg/changes/tags.changes``.
1277 # ``REPOROOT/.hg/changes/tags.changes``.
1278 # Make sure you check for HG_TAG_MOVED before reading that file as it
1278 # Make sure you check for HG_TAG_MOVED before reading that file as it
1279 # might exist from a previous transaction even if no tag were touched
1279 # might exist from a previous transaction even if no tag were touched
1280 # in this one. Changes are recorded in a line base format::
1280 # in this one. Changes are recorded in a line base format::
1281 #
1281 #
1282 # <action> <hex-node> <tag-name>\n
1282 # <action> <hex-node> <tag-name>\n
1283 #
1283 #
1284 # Actions are defined as follow:
1284 # Actions are defined as follow:
1285 # "-R": tag is removed,
1285 # "-R": tag is removed,
1286 # "+A": tag is added,
1286 # "+A": tag is added,
1287 # "-M": tag is moved (old value),
1287 # "-M": tag is moved (old value),
1288 # "+M": tag is moved (new value),
1288 # "+M": tag is moved (new value),
1289 tracktags = lambda x: None
1289 tracktags = lambda x: None
1290 # experimental config: experimental.hook-track-tags
1290 # experimental config: experimental.hook-track-tags
1291 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1291 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1292 if desc != 'strip' and shouldtracktags:
1292 if desc != 'strip' and shouldtracktags:
1293 oldheads = self.changelog.headrevs()
1293 oldheads = self.changelog.headrevs()
1294 def tracktags(tr2):
1294 def tracktags(tr2):
1295 repo = reporef()
1295 repo = reporef()
1296 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1296 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1297 newheads = repo.changelog.headrevs()
1297 newheads = repo.changelog.headrevs()
1298 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1298 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1299 # notes: we compare lists here.
1299 # notes: we compare lists here.
1300 # As we do it only once buiding set would not be cheaper
1300 # As we do it only once buiding set would not be cheaper
1301 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1301 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1302 if changes:
1302 if changes:
1303 tr2.hookargs['tag_moved'] = '1'
1303 tr2.hookargs['tag_moved'] = '1'
1304 with repo.vfs('changes/tags.changes', 'w',
1304 with repo.vfs('changes/tags.changes', 'w',
1305 atomictemp=True) as changesfile:
1305 atomictemp=True) as changesfile:
1306 # note: we do not register the file to the transaction
1306 # note: we do not register the file to the transaction
1307 # because we needs it to still exist on the transaction
1307 # because we needs it to still exist on the transaction
1308 # is close (for txnclose hooks)
1308 # is close (for txnclose hooks)
1309 tagsmod.writediff(changesfile, changes)
1309 tagsmod.writediff(changesfile, changes)
1310 def validate(tr2):
1310 def validate(tr2):
1311 """will run pre-closing hooks"""
1311 """will run pre-closing hooks"""
1312 # XXX the transaction API is a bit lacking here so we take a hacky
1312 # XXX the transaction API is a bit lacking here so we take a hacky
1313 # path for now
1313 # path for now
1314 #
1314 #
1315 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1315 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1316 # dict is copied before these run. In addition we needs the data
1316 # dict is copied before these run. In addition we needs the data
1317 # available to in memory hooks too.
1317 # available to in memory hooks too.
1318 #
1318 #
1319 # Moreover, we also need to make sure this runs before txnclose
1319 # Moreover, we also need to make sure this runs before txnclose
1320 # hooks and there is no "pending" mechanism that would execute
1320 # hooks and there is no "pending" mechanism that would execute
1321 # logic only if hooks are about to run.
1321 # logic only if hooks are about to run.
1322 #
1322 #
1323 # Fixing this limitation of the transaction is also needed to track
1323 # Fixing this limitation of the transaction is also needed to track
1324 # other families of changes (bookmarks, phases, obsolescence).
1324 # other families of changes (bookmarks, phases, obsolescence).
1325 #
1325 #
1326 # This will have to be fixed before we remove the experimental
1326 # This will have to be fixed before we remove the experimental
1327 # gating.
1327 # gating.
1328 tracktags(tr2)
1328 tracktags(tr2)
1329 repo = reporef()
1329 repo = reporef()
1330 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1330 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1331 scmutil.enforcesinglehead(repo, tr2, desc)
1331 scmutil.enforcesinglehead(repo, tr2, desc)
1332 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1332 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1333 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1333 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1334 args = tr.hookargs.copy()
1334 args = tr.hookargs.copy()
1335 args.update(bookmarks.preparehookargs(name, old, new))
1335 args.update(bookmarks.preparehookargs(name, old, new))
1336 repo.hook('pretxnclose-bookmark', throw=True,
1336 repo.hook('pretxnclose-bookmark', throw=True,
1337 txnname=desc,
1337 txnname=desc,
1338 **pycompat.strkwargs(args))
1338 **pycompat.strkwargs(args))
1339 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1339 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1340 cl = repo.unfiltered().changelog
1340 cl = repo.unfiltered().changelog
1341 for rev, (old, new) in tr.changes['phases'].items():
1341 for rev, (old, new) in tr.changes['phases'].items():
1342 args = tr.hookargs.copy()
1342 args = tr.hookargs.copy()
1343 node = hex(cl.node(rev))
1343 node = hex(cl.node(rev))
1344 args.update(phases.preparehookargs(node, old, new))
1344 args.update(phases.preparehookargs(node, old, new))
1345 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1345 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1346 **pycompat.strkwargs(args))
1346 **pycompat.strkwargs(args))
1347
1347
1348 repo.hook('pretxnclose', throw=True,
1348 repo.hook('pretxnclose', throw=True,
1349 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1349 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1350 def releasefn(tr, success):
1350 def releasefn(tr, success):
1351 repo = reporef()
1351 repo = reporef()
1352 if success:
1352 if success:
1353 # this should be explicitly invoked here, because
1353 # this should be explicitly invoked here, because
1354 # in-memory changes aren't written out at closing
1354 # in-memory changes aren't written out at closing
1355 # transaction, if tr.addfilegenerator (via
1355 # transaction, if tr.addfilegenerator (via
1356 # dirstate.write or so) isn't invoked while
1356 # dirstate.write or so) isn't invoked while
1357 # transaction running
1357 # transaction running
1358 repo.dirstate.write(None)
1358 repo.dirstate.write(None)
1359 else:
1359 else:
1360 # discard all changes (including ones already written
1360 # discard all changes (including ones already written
1361 # out) in this transaction
1361 # out) in this transaction
1362 repo.dirstate.restorebackup(None, 'journal.dirstate')
1362 repo.dirstate.restorebackup(None, 'journal.dirstate')
1363
1363
1364 repo.invalidate(clearfilecache=True)
1364 repo.invalidate(clearfilecache=True)
1365
1365
1366 tr = transaction.transaction(rp, self.svfs, vfsmap,
1366 tr = transaction.transaction(rp, self.svfs, vfsmap,
1367 "journal",
1367 "journal",
1368 "undo",
1368 "undo",
1369 aftertrans(renames),
1369 aftertrans(renames),
1370 self.store.createmode,
1370 self.store.createmode,
1371 validator=validate,
1371 validator=validate,
1372 releasefn=releasefn,
1372 releasefn=releasefn,
1373 checkambigfiles=_cachedfiles,
1373 checkambigfiles=_cachedfiles,
1374 name=desc)
1374 name=desc)
1375 tr.changes['revs'] = xrange(0, 0)
1375 tr.changes['revs'] = xrange(0, 0)
1376 tr.changes['obsmarkers'] = set()
1376 tr.changes['obsmarkers'] = set()
1377 tr.changes['phases'] = {}
1377 tr.changes['phases'] = {}
1378 tr.changes['bookmarks'] = {}
1378 tr.changes['bookmarks'] = {}
1379
1379
1380 tr.hookargs['txnid'] = txnid
1380 tr.hookargs['txnid'] = txnid
1381 # note: writing the fncache only during finalize mean that the file is
1381 # note: writing the fncache only during finalize mean that the file is
1382 # outdated when running hooks. As fncache is used for streaming clone,
1382 # outdated when running hooks. As fncache is used for streaming clone,
1383 # this is not expected to break anything that happen during the hooks.
1383 # this is not expected to break anything that happen during the hooks.
1384 tr.addfinalize('flush-fncache', self.store.write)
1384 tr.addfinalize('flush-fncache', self.store.write)
1385 def txnclosehook(tr2):
1385 def txnclosehook(tr2):
1386 """To be run if transaction is successful, will schedule a hook run
1386 """To be run if transaction is successful, will schedule a hook run
1387 """
1387 """
1388 # Don't reference tr2 in hook() so we don't hold a reference.
1388 # Don't reference tr2 in hook() so we don't hold a reference.
1389 # This reduces memory consumption when there are multiple
1389 # This reduces memory consumption when there are multiple
1390 # transactions per lock. This can likely go away if issue5045
1390 # transactions per lock. This can likely go away if issue5045
1391 # fixes the function accumulation.
1391 # fixes the function accumulation.
1392 hookargs = tr2.hookargs
1392 hookargs = tr2.hookargs
1393
1393
1394 def hookfunc():
1394 def hookfunc():
1395 repo = reporef()
1395 repo = reporef()
1396 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1396 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1397 bmchanges = sorted(tr.changes['bookmarks'].items())
1397 bmchanges = sorted(tr.changes['bookmarks'].items())
1398 for name, (old, new) in bmchanges:
1398 for name, (old, new) in bmchanges:
1399 args = tr.hookargs.copy()
1399 args = tr.hookargs.copy()
1400 args.update(bookmarks.preparehookargs(name, old, new))
1400 args.update(bookmarks.preparehookargs(name, old, new))
1401 repo.hook('txnclose-bookmark', throw=False,
1401 repo.hook('txnclose-bookmark', throw=False,
1402 txnname=desc, **pycompat.strkwargs(args))
1402 txnname=desc, **pycompat.strkwargs(args))
1403
1403
1404 if hook.hashook(repo.ui, 'txnclose-phase'):
1404 if hook.hashook(repo.ui, 'txnclose-phase'):
1405 cl = repo.unfiltered().changelog
1405 cl = repo.unfiltered().changelog
1406 phasemv = sorted(tr.changes['phases'].items())
1406 phasemv = sorted(tr.changes['phases'].items())
1407 for rev, (old, new) in phasemv:
1407 for rev, (old, new) in phasemv:
1408 args = tr.hookargs.copy()
1408 args = tr.hookargs.copy()
1409 node = hex(cl.node(rev))
1409 node = hex(cl.node(rev))
1410 args.update(phases.preparehookargs(node, old, new))
1410 args.update(phases.preparehookargs(node, old, new))
1411 repo.hook('txnclose-phase', throw=False, txnname=desc,
1411 repo.hook('txnclose-phase', throw=False, txnname=desc,
1412 **pycompat.strkwargs(args))
1412 **pycompat.strkwargs(args))
1413
1413
1414 repo.hook('txnclose', throw=False, txnname=desc,
1414 repo.hook('txnclose', throw=False, txnname=desc,
1415 **pycompat.strkwargs(hookargs))
1415 **pycompat.strkwargs(hookargs))
1416 reporef()._afterlock(hookfunc)
1416 reporef()._afterlock(hookfunc)
1417 tr.addfinalize('txnclose-hook', txnclosehook)
1417 tr.addfinalize('txnclose-hook', txnclosehook)
1418 # Include a leading "-" to make it happen before the transaction summary
1418 # Include a leading "-" to make it happen before the transaction summary
1419 # reports registered via scmutil.registersummarycallback() whose names
1419 # reports registered via scmutil.registersummarycallback() whose names
1420 # are 00-txnreport etc. That way, the caches will be warm when the
1420 # are 00-txnreport etc. That way, the caches will be warm when the
1421 # callbacks run.
1421 # callbacks run.
1422 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1422 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1423 def txnaborthook(tr2):
1423 def txnaborthook(tr2):
1424 """To be run if transaction is aborted
1424 """To be run if transaction is aborted
1425 """
1425 """
1426 reporef().hook('txnabort', throw=False, txnname=desc,
1426 reporef().hook('txnabort', throw=False, txnname=desc,
1427 **pycompat.strkwargs(tr2.hookargs))
1427 **pycompat.strkwargs(tr2.hookargs))
1428 tr.addabort('txnabort-hook', txnaborthook)
1428 tr.addabort('txnabort-hook', txnaborthook)
1429 # avoid eager cache invalidation. in-memory data should be identical
1429 # avoid eager cache invalidation. in-memory data should be identical
1430 # to stored data if transaction has no error.
1430 # to stored data if transaction has no error.
1431 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1431 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1432 self._transref = weakref.ref(tr)
1432 self._transref = weakref.ref(tr)
1433 scmutil.registersummarycallback(self, tr, desc)
1433 scmutil.registersummarycallback(self, tr, desc)
1434 return tr
1434 return tr
1435
1435
1436 def _journalfiles(self):
1436 def _journalfiles(self):
1437 return ((self.svfs, 'journal'),
1437 return ((self.svfs, 'journal'),
1438 (self.vfs, 'journal.dirstate'),
1438 (self.vfs, 'journal.dirstate'),
1439 (self.vfs, 'journal.branch'),
1439 (self.vfs, 'journal.branch'),
1440 (self.vfs, 'journal.desc'),
1440 (self.vfs, 'journal.desc'),
1441 (self.vfs, 'journal.bookmarks'),
1441 (self.vfs, 'journal.bookmarks'),
1442 (self.svfs, 'journal.phaseroots'))
1442 (self.svfs, 'journal.phaseroots'))
1443
1443
1444 def undofiles(self):
1444 def undofiles(self):
1445 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1445 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1446
1446
1447 @unfilteredmethod
1447 @unfilteredmethod
1448 def _writejournal(self, desc):
1448 def _writejournal(self, desc):
1449 self.dirstate.savebackup(None, 'journal.dirstate')
1449 self.dirstate.savebackup(None, 'journal.dirstate')
1450 self.vfs.write("journal.branch",
1450 self.vfs.write("journal.branch",
1451 encoding.fromlocal(self.dirstate.branch()))
1451 encoding.fromlocal(self.dirstate.branch()))
1452 self.vfs.write("journal.desc",
1452 self.vfs.write("journal.desc",
1453 "%d\n%s\n" % (len(self), desc))
1453 "%d\n%s\n" % (len(self), desc))
1454 self.vfs.write("journal.bookmarks",
1454 self.vfs.write("journal.bookmarks",
1455 self.vfs.tryread("bookmarks"))
1455 self.vfs.tryread("bookmarks"))
1456 self.svfs.write("journal.phaseroots",
1456 self.svfs.write("journal.phaseroots",
1457 self.svfs.tryread("phaseroots"))
1457 self.svfs.tryread("phaseroots"))
1458
1458
1459 def recover(self):
1459 def recover(self):
1460 with self.lock():
1460 with self.lock():
1461 if self.svfs.exists("journal"):
1461 if self.svfs.exists("journal"):
1462 self.ui.status(_("rolling back interrupted transaction\n"))
1462 self.ui.status(_("rolling back interrupted transaction\n"))
1463 vfsmap = {'': self.svfs,
1463 vfsmap = {'': self.svfs,
1464 'plain': self.vfs,}
1464 'plain': self.vfs,}
1465 transaction.rollback(self.svfs, vfsmap, "journal",
1465 transaction.rollback(self.svfs, vfsmap, "journal",
1466 self.ui.warn,
1466 self.ui.warn,
1467 checkambigfiles=_cachedfiles)
1467 checkambigfiles=_cachedfiles)
1468 self.invalidate()
1468 self.invalidate()
1469 return True
1469 return True
1470 else:
1470 else:
1471 self.ui.warn(_("no interrupted transaction available\n"))
1471 self.ui.warn(_("no interrupted transaction available\n"))
1472 return False
1472 return False
1473
1473
1474 def rollback(self, dryrun=False, force=False):
1474 def rollback(self, dryrun=False, force=False):
1475 wlock = lock = dsguard = None
1475 wlock = lock = dsguard = None
1476 try:
1476 try:
1477 wlock = self.wlock()
1477 wlock = self.wlock()
1478 lock = self.lock()
1478 lock = self.lock()
1479 if self.svfs.exists("undo"):
1479 if self.svfs.exists("undo"):
1480 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1480 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1481
1481
1482 return self._rollback(dryrun, force, dsguard)
1482 return self._rollback(dryrun, force, dsguard)
1483 else:
1483 else:
1484 self.ui.warn(_("no rollback information available\n"))
1484 self.ui.warn(_("no rollback information available\n"))
1485 return 1
1485 return 1
1486 finally:
1486 finally:
1487 release(dsguard, lock, wlock)
1487 release(dsguard, lock, wlock)
1488
1488
1489 @unfilteredmethod # Until we get smarter cache management
1489 @unfilteredmethod # Until we get smarter cache management
1490 def _rollback(self, dryrun, force, dsguard):
1490 def _rollback(self, dryrun, force, dsguard):
1491 ui = self.ui
1491 ui = self.ui
1492 try:
1492 try:
1493 args = self.vfs.read('undo.desc').splitlines()
1493 args = self.vfs.read('undo.desc').splitlines()
1494 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1494 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1495 if len(args) >= 3:
1495 if len(args) >= 3:
1496 detail = args[2]
1496 detail = args[2]
1497 oldtip = oldlen - 1
1497 oldtip = oldlen - 1
1498
1498
1499 if detail and ui.verbose:
1499 if detail and ui.verbose:
1500 msg = (_('repository tip rolled back to revision %d'
1500 msg = (_('repository tip rolled back to revision %d'
1501 ' (undo %s: %s)\n')
1501 ' (undo %s: %s)\n')
1502 % (oldtip, desc, detail))
1502 % (oldtip, desc, detail))
1503 else:
1503 else:
1504 msg = (_('repository tip rolled back to revision %d'
1504 msg = (_('repository tip rolled back to revision %d'
1505 ' (undo %s)\n')
1505 ' (undo %s)\n')
1506 % (oldtip, desc))
1506 % (oldtip, desc))
1507 except IOError:
1507 except IOError:
1508 msg = _('rolling back unknown transaction\n')
1508 msg = _('rolling back unknown transaction\n')
1509 desc = None
1509 desc = None
1510
1510
1511 if not force and self['.'] != self['tip'] and desc == 'commit':
1511 if not force and self['.'] != self['tip'] and desc == 'commit':
1512 raise error.Abort(
1512 raise error.Abort(
1513 _('rollback of last commit while not checked out '
1513 _('rollback of last commit while not checked out '
1514 'may lose data'), hint=_('use -f to force'))
1514 'may lose data'), hint=_('use -f to force'))
1515
1515
1516 ui.status(msg)
1516 ui.status(msg)
1517 if dryrun:
1517 if dryrun:
1518 return 0
1518 return 0
1519
1519
1520 parents = self.dirstate.parents()
1520 parents = self.dirstate.parents()
1521 self.destroying()
1521 self.destroying()
1522 vfsmap = {'plain': self.vfs, '': self.svfs}
1522 vfsmap = {'plain': self.vfs, '': self.svfs}
1523 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1523 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1524 checkambigfiles=_cachedfiles)
1524 checkambigfiles=_cachedfiles)
1525 if self.vfs.exists('undo.bookmarks'):
1525 if self.vfs.exists('undo.bookmarks'):
1526 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1526 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1527 if self.svfs.exists('undo.phaseroots'):
1527 if self.svfs.exists('undo.phaseroots'):
1528 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1528 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1529 self.invalidate()
1529 self.invalidate()
1530
1530
1531 parentgone = (parents[0] not in self.changelog.nodemap or
1531 parentgone = (parents[0] not in self.changelog.nodemap or
1532 parents[1] not in self.changelog.nodemap)
1532 parents[1] not in self.changelog.nodemap)
1533 if parentgone:
1533 if parentgone:
1534 # prevent dirstateguard from overwriting already restored one
1534 # prevent dirstateguard from overwriting already restored one
1535 dsguard.close()
1535 dsguard.close()
1536
1536
1537 self.dirstate.restorebackup(None, 'undo.dirstate')
1537 self.dirstate.restorebackup(None, 'undo.dirstate')
1538 try:
1538 try:
1539 branch = self.vfs.read('undo.branch')
1539 branch = self.vfs.read('undo.branch')
1540 self.dirstate.setbranch(encoding.tolocal(branch))
1540 self.dirstate.setbranch(encoding.tolocal(branch))
1541 except IOError:
1541 except IOError:
1542 ui.warn(_('named branch could not be reset: '
1542 ui.warn(_('named branch could not be reset: '
1543 'current branch is still \'%s\'\n')
1543 'current branch is still \'%s\'\n')
1544 % self.dirstate.branch())
1544 % self.dirstate.branch())
1545
1545
1546 parents = tuple([p.rev() for p in self[None].parents()])
1546 parents = tuple([p.rev() for p in self[None].parents()])
1547 if len(parents) > 1:
1547 if len(parents) > 1:
1548 ui.status(_('working directory now based on '
1548 ui.status(_('working directory now based on '
1549 'revisions %d and %d\n') % parents)
1549 'revisions %d and %d\n') % parents)
1550 else:
1550 else:
1551 ui.status(_('working directory now based on '
1551 ui.status(_('working directory now based on '
1552 'revision %d\n') % parents)
1552 'revision %d\n') % parents)
1553 mergemod.mergestate.clean(self, self['.'].node())
1553 mergemod.mergestate.clean(self, self['.'].node())
1554
1554
1555 # TODO: if we know which new heads may result from this rollback, pass
1555 # TODO: if we know which new heads may result from this rollback, pass
1556 # them to destroy(), which will prevent the branchhead cache from being
1556 # them to destroy(), which will prevent the branchhead cache from being
1557 # invalidated.
1557 # invalidated.
1558 self.destroyed()
1558 self.destroyed()
1559 return 0
1559 return 0
1560
1560
1561 def _buildcacheupdater(self, newtransaction):
1561 def _buildcacheupdater(self, newtransaction):
1562 """called during transaction to build the callback updating cache
1562 """called during transaction to build the callback updating cache
1563
1563
1564 Lives on the repository to help extension who might want to augment
1564 Lives on the repository to help extension who might want to augment
1565 this logic. For this purpose, the created transaction is passed to the
1565 this logic. For this purpose, the created transaction is passed to the
1566 method.
1566 method.
1567 """
1567 """
1568 # we must avoid cyclic reference between repo and transaction.
1568 # we must avoid cyclic reference between repo and transaction.
1569 reporef = weakref.ref(self)
1569 reporef = weakref.ref(self)
1570 def updater(tr):
1570 def updater(tr):
1571 repo = reporef()
1571 repo = reporef()
1572 repo.updatecaches(tr)
1572 repo.updatecaches(tr)
1573 return updater
1573 return updater
1574
1574
1575 @unfilteredmethod
1575 @unfilteredmethod
1576 def updatecaches(self, tr=None, full=False):
1576 def updatecaches(self, tr=None, full=False):
1577 """warm appropriate caches
1577 """warm appropriate caches
1578
1578
1579 If this function is called after a transaction closed. The transaction
1579 If this function is called after a transaction closed. The transaction
1580 will be available in the 'tr' argument. This can be used to selectively
1580 will be available in the 'tr' argument. This can be used to selectively
1581 update caches relevant to the changes in that transaction.
1581 update caches relevant to the changes in that transaction.
1582
1582
1583 If 'full' is set, make sure all caches the function knows about have
1583 If 'full' is set, make sure all caches the function knows about have
1584 up-to-date data. Even the ones usually loaded more lazily.
1584 up-to-date data. Even the ones usually loaded more lazily.
1585 """
1585 """
1586 if tr is not None and tr.hookargs.get('source') == 'strip':
1586 if tr is not None and tr.hookargs.get('source') == 'strip':
1587 # During strip, many caches are invalid but
1587 # During strip, many caches are invalid but
1588 # later call to `destroyed` will refresh them.
1588 # later call to `destroyed` will refresh them.
1589 return
1589 return
1590
1590
1591 if tr is None or tr.changes['revs']:
1591 if tr is None or tr.changes['revs']:
1592 # updating the unfiltered branchmap should refresh all the others,
1592 # updating the unfiltered branchmap should refresh all the others,
1593 self.ui.debug('updating the branch cache\n')
1593 self.ui.debug('updating the branch cache\n')
1594 branchmap.updatecache(self.filtered('served'))
1594 branchmap.updatecache(self.filtered('served'))
1595
1595
1596 if full:
1596 if full:
1597 rbc = self.revbranchcache()
1597 rbc = self.revbranchcache()
1598 for r in self.changelog:
1598 for r in self.changelog:
1599 rbc.branchinfo(r)
1599 rbc.branchinfo(r)
1600 rbc.write()
1600 rbc.write()
1601
1601
1602 def invalidatecaches(self):
1602 def invalidatecaches(self):
1603
1603
1604 if '_tagscache' in vars(self):
1604 if '_tagscache' in vars(self):
1605 # can't use delattr on proxy
1605 # can't use delattr on proxy
1606 del self.__dict__['_tagscache']
1606 del self.__dict__['_tagscache']
1607
1607
1608 self.unfiltered()._branchcaches.clear()
1608 self.unfiltered()._branchcaches.clear()
1609 self.invalidatevolatilesets()
1609 self.invalidatevolatilesets()
1610 self._sparsesignaturecache.clear()
1610 self._sparsesignaturecache.clear()
1611
1611
1612 def invalidatevolatilesets(self):
1612 def invalidatevolatilesets(self):
1613 self.filteredrevcache.clear()
1613 self.filteredrevcache.clear()
1614 obsolete.clearobscaches(self)
1614 obsolete.clearobscaches(self)
1615
1615
1616 def invalidatedirstate(self):
1616 def invalidatedirstate(self):
1617 '''Invalidates the dirstate, causing the next call to dirstate
1617 '''Invalidates the dirstate, causing the next call to dirstate
1618 to check if it was modified since the last time it was read,
1618 to check if it was modified since the last time it was read,
1619 rereading it if it has.
1619 rereading it if it has.
1620
1620
1621 This is different to dirstate.invalidate() that it doesn't always
1621 This is different to dirstate.invalidate() that it doesn't always
1622 rereads the dirstate. Use dirstate.invalidate() if you want to
1622 rereads the dirstate. Use dirstate.invalidate() if you want to
1623 explicitly read the dirstate again (i.e. restoring it to a previous
1623 explicitly read the dirstate again (i.e. restoring it to a previous
1624 known good state).'''
1624 known good state).'''
1625 if hasunfilteredcache(self, 'dirstate'):
1625 if hasunfilteredcache(self, 'dirstate'):
1626 for k in self.dirstate._filecache:
1626 for k in self.dirstate._filecache:
1627 try:
1627 try:
1628 delattr(self.dirstate, k)
1628 delattr(self.dirstate, k)
1629 except AttributeError:
1629 except AttributeError:
1630 pass
1630 pass
1631 delattr(self.unfiltered(), 'dirstate')
1631 delattr(self.unfiltered(), 'dirstate')
1632
1632
1633 def invalidate(self, clearfilecache=False):
1633 def invalidate(self, clearfilecache=False):
1634 '''Invalidates both store and non-store parts other than dirstate
1634 '''Invalidates both store and non-store parts other than dirstate
1635
1635
1636 If a transaction is running, invalidation of store is omitted,
1636 If a transaction is running, invalidation of store is omitted,
1637 because discarding in-memory changes might cause inconsistency
1637 because discarding in-memory changes might cause inconsistency
1638 (e.g. incomplete fncache causes unintentional failure, but
1638 (e.g. incomplete fncache causes unintentional failure, but
1639 redundant one doesn't).
1639 redundant one doesn't).
1640 '''
1640 '''
1641 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1641 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1642 for k in list(self._filecache.keys()):
1642 for k in list(self._filecache.keys()):
1643 # dirstate is invalidated separately in invalidatedirstate()
1643 # dirstate is invalidated separately in invalidatedirstate()
1644 if k == 'dirstate':
1644 if k == 'dirstate':
1645 continue
1645 continue
1646 if (k == 'changelog' and
1646 if (k == 'changelog' and
1647 self.currenttransaction() and
1647 self.currenttransaction() and
1648 self.changelog._delayed):
1648 self.changelog._delayed):
1649 # The changelog object may store unwritten revisions. We don't
1649 # The changelog object may store unwritten revisions. We don't
1650 # want to lose them.
1650 # want to lose them.
1651 # TODO: Solve the problem instead of working around it.
1651 # TODO: Solve the problem instead of working around it.
1652 continue
1652 continue
1653
1653
1654 if clearfilecache:
1654 if clearfilecache:
1655 del self._filecache[k]
1655 del self._filecache[k]
1656 try:
1656 try:
1657 delattr(unfiltered, k)
1657 delattr(unfiltered, k)
1658 except AttributeError:
1658 except AttributeError:
1659 pass
1659 pass
1660 self.invalidatecaches()
1660 self.invalidatecaches()
1661 if not self.currenttransaction():
1661 if not self.currenttransaction():
1662 # TODO: Changing contents of store outside transaction
1662 # TODO: Changing contents of store outside transaction
1663 # causes inconsistency. We should make in-memory store
1663 # causes inconsistency. We should make in-memory store
1664 # changes detectable, and abort if changed.
1664 # changes detectable, and abort if changed.
1665 self.store.invalidatecaches()
1665 self.store.invalidatecaches()
1666
1666
1667 def invalidateall(self):
1667 def invalidateall(self):
1668 '''Fully invalidates both store and non-store parts, causing the
1668 '''Fully invalidates both store and non-store parts, causing the
1669 subsequent operation to reread any outside changes.'''
1669 subsequent operation to reread any outside changes.'''
1670 # extension should hook this to invalidate its caches
1670 # extension should hook this to invalidate its caches
1671 self.invalidate()
1671 self.invalidate()
1672 self.invalidatedirstate()
1672 self.invalidatedirstate()
1673
1673
1674 @unfilteredmethod
1674 @unfilteredmethod
1675 def _refreshfilecachestats(self, tr):
1675 def _refreshfilecachestats(self, tr):
1676 """Reload stats of cached files so that they are flagged as valid"""
1676 """Reload stats of cached files so that they are flagged as valid"""
1677 for k, ce in self._filecache.items():
1677 for k, ce in self._filecache.items():
1678 k = pycompat.sysstr(k)
1678 k = pycompat.sysstr(k)
1679 if k == r'dirstate' or k not in self.__dict__:
1679 if k == r'dirstate' or k not in self.__dict__:
1680 continue
1680 continue
1681 ce.refresh()
1681 ce.refresh()
1682
1682
1683 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1683 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1684 inheritchecker=None, parentenvvar=None):
1684 inheritchecker=None, parentenvvar=None):
1685 parentlock = None
1685 parentlock = None
1686 # the contents of parentenvvar are used by the underlying lock to
1686 # the contents of parentenvvar are used by the underlying lock to
1687 # determine whether it can be inherited
1687 # determine whether it can be inherited
1688 if parentenvvar is not None:
1688 if parentenvvar is not None:
1689 parentlock = encoding.environ.get(parentenvvar)
1689 parentlock = encoding.environ.get(parentenvvar)
1690
1690
1691 timeout = 0
1691 timeout = 0
1692 warntimeout = 0
1692 warntimeout = 0
1693 if wait:
1693 if wait:
1694 timeout = self.ui.configint("ui", "timeout")
1694 timeout = self.ui.configint("ui", "timeout")
1695 warntimeout = self.ui.configint("ui", "timeout.warn")
1695 warntimeout = self.ui.configint("ui", "timeout.warn")
1696 # internal config: ui.signal-safe-lock
1697 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1696
1698
1697 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1699 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1698 releasefn=releasefn,
1700 releasefn=releasefn,
1699 acquirefn=acquirefn, desc=desc,
1701 acquirefn=acquirefn, desc=desc,
1700 inheritchecker=inheritchecker,
1702 inheritchecker=inheritchecker,
1701 parentlock=parentlock)
1703 parentlock=parentlock,
1704 signalsafe=signalsafe)
1702 return l
1705 return l
1703
1706
1704 def _afterlock(self, callback):
1707 def _afterlock(self, callback):
1705 """add a callback to be run when the repository is fully unlocked
1708 """add a callback to be run when the repository is fully unlocked
1706
1709
1707 The callback will be executed when the outermost lock is released
1710 The callback will be executed when the outermost lock is released
1708 (with wlock being higher level than 'lock')."""
1711 (with wlock being higher level than 'lock')."""
1709 for ref in (self._wlockref, self._lockref):
1712 for ref in (self._wlockref, self._lockref):
1710 l = ref and ref()
1713 l = ref and ref()
1711 if l and l.held:
1714 if l and l.held:
1712 l.postrelease.append(callback)
1715 l.postrelease.append(callback)
1713 break
1716 break
1714 else: # no lock have been found.
1717 else: # no lock have been found.
1715 callback()
1718 callback()
1716
1719
1717 def lock(self, wait=True):
1720 def lock(self, wait=True):
1718 '''Lock the repository store (.hg/store) and return a weak reference
1721 '''Lock the repository store (.hg/store) and return a weak reference
1719 to the lock. Use this before modifying the store (e.g. committing or
1722 to the lock. Use this before modifying the store (e.g. committing or
1720 stripping). If you are opening a transaction, get a lock as well.)
1723 stripping). If you are opening a transaction, get a lock as well.)
1721
1724
1722 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1725 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1723 'wlock' first to avoid a dead-lock hazard.'''
1726 'wlock' first to avoid a dead-lock hazard.'''
1724 l = self._currentlock(self._lockref)
1727 l = self._currentlock(self._lockref)
1725 if l is not None:
1728 if l is not None:
1726 l.lock()
1729 l.lock()
1727 return l
1730 return l
1728
1731
1729 l = self._lock(self.svfs, "lock", wait, None,
1732 l = self._lock(self.svfs, "lock", wait, None,
1730 self.invalidate, _('repository %s') % self.origroot)
1733 self.invalidate, _('repository %s') % self.origroot)
1731 self._lockref = weakref.ref(l)
1734 self._lockref = weakref.ref(l)
1732 return l
1735 return l
1733
1736
1734 def _wlockchecktransaction(self):
1737 def _wlockchecktransaction(self):
1735 if self.currenttransaction() is not None:
1738 if self.currenttransaction() is not None:
1736 raise error.LockInheritanceContractViolation(
1739 raise error.LockInheritanceContractViolation(
1737 'wlock cannot be inherited in the middle of a transaction')
1740 'wlock cannot be inherited in the middle of a transaction')
1738
1741
1739 def wlock(self, wait=True):
1742 def wlock(self, wait=True):
1740 '''Lock the non-store parts of the repository (everything under
1743 '''Lock the non-store parts of the repository (everything under
1741 .hg except .hg/store) and return a weak reference to the lock.
1744 .hg except .hg/store) and return a weak reference to the lock.
1742
1745
1743 Use this before modifying files in .hg.
1746 Use this before modifying files in .hg.
1744
1747
1745 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1746 'wlock' first to avoid a dead-lock hazard.'''
1749 'wlock' first to avoid a dead-lock hazard.'''
1747 l = self._wlockref and self._wlockref()
1750 l = self._wlockref and self._wlockref()
1748 if l is not None and l.held:
1751 if l is not None and l.held:
1749 l.lock()
1752 l.lock()
1750 return l
1753 return l
1751
1754
1752 # We do not need to check for non-waiting lock acquisition. Such
1755 # We do not need to check for non-waiting lock acquisition. Such
1753 # acquisition would not cause dead-lock as they would just fail.
1756 # acquisition would not cause dead-lock as they would just fail.
1754 if wait and (self.ui.configbool('devel', 'all-warnings')
1757 if wait and (self.ui.configbool('devel', 'all-warnings')
1755 or self.ui.configbool('devel', 'check-locks')):
1758 or self.ui.configbool('devel', 'check-locks')):
1756 if self._currentlock(self._lockref) is not None:
1759 if self._currentlock(self._lockref) is not None:
1757 self.ui.develwarn('"wlock" acquired after "lock"')
1760 self.ui.develwarn('"wlock" acquired after "lock"')
1758
1761
1759 def unlock():
1762 def unlock():
1760 if self.dirstate.pendingparentchange():
1763 if self.dirstate.pendingparentchange():
1761 self.dirstate.invalidate()
1764 self.dirstate.invalidate()
1762 else:
1765 else:
1763 self.dirstate.write(None)
1766 self.dirstate.write(None)
1764
1767
1765 self._filecache['dirstate'].refresh()
1768 self._filecache['dirstate'].refresh()
1766
1769
1767 l = self._lock(self.vfs, "wlock", wait, unlock,
1770 l = self._lock(self.vfs, "wlock", wait, unlock,
1768 self.invalidatedirstate, _('working directory of %s') %
1771 self.invalidatedirstate, _('working directory of %s') %
1769 self.origroot,
1772 self.origroot,
1770 inheritchecker=self._wlockchecktransaction,
1773 inheritchecker=self._wlockchecktransaction,
1771 parentenvvar='HG_WLOCK_LOCKER')
1774 parentenvvar='HG_WLOCK_LOCKER')
1772 self._wlockref = weakref.ref(l)
1775 self._wlockref = weakref.ref(l)
1773 return l
1776 return l
1774
1777
1775 def _currentlock(self, lockref):
1778 def _currentlock(self, lockref):
1776 """Returns the lock if it's held, or None if it's not."""
1779 """Returns the lock if it's held, or None if it's not."""
1777 if lockref is None:
1780 if lockref is None:
1778 return None
1781 return None
1779 l = lockref()
1782 l = lockref()
1780 if l is None or not l.held:
1783 if l is None or not l.held:
1781 return None
1784 return None
1782 return l
1785 return l
1783
1786
1784 def currentwlock(self):
1787 def currentwlock(self):
1785 """Returns the wlock if it's held, or None if it's not."""
1788 """Returns the wlock if it's held, or None if it's not."""
1786 return self._currentlock(self._wlockref)
1789 return self._currentlock(self._wlockref)
1787
1790
1788 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1791 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1789 """
1792 """
1790 commit an individual file as part of a larger transaction
1793 commit an individual file as part of a larger transaction
1791 """
1794 """
1792
1795
1793 fname = fctx.path()
1796 fname = fctx.path()
1794 fparent1 = manifest1.get(fname, nullid)
1797 fparent1 = manifest1.get(fname, nullid)
1795 fparent2 = manifest2.get(fname, nullid)
1798 fparent2 = manifest2.get(fname, nullid)
1796 if isinstance(fctx, context.filectx):
1799 if isinstance(fctx, context.filectx):
1797 node = fctx.filenode()
1800 node = fctx.filenode()
1798 if node in [fparent1, fparent2]:
1801 if node in [fparent1, fparent2]:
1799 self.ui.debug('reusing %s filelog entry\n' % fname)
1802 self.ui.debug('reusing %s filelog entry\n' % fname)
1800 if manifest1.flags(fname) != fctx.flags():
1803 if manifest1.flags(fname) != fctx.flags():
1801 changelist.append(fname)
1804 changelist.append(fname)
1802 return node
1805 return node
1803
1806
1804 flog = self.file(fname)
1807 flog = self.file(fname)
1805 meta = {}
1808 meta = {}
1806 copy = fctx.renamed()
1809 copy = fctx.renamed()
1807 if copy and copy[0] != fname:
1810 if copy and copy[0] != fname:
1808 # Mark the new revision of this file as a copy of another
1811 # Mark the new revision of this file as a copy of another
1809 # file. This copy data will effectively act as a parent
1812 # file. This copy data will effectively act as a parent
1810 # of this new revision. If this is a merge, the first
1813 # of this new revision. If this is a merge, the first
1811 # parent will be the nullid (meaning "look up the copy data")
1814 # parent will be the nullid (meaning "look up the copy data")
1812 # and the second one will be the other parent. For example:
1815 # and the second one will be the other parent. For example:
1813 #
1816 #
1814 # 0 --- 1 --- 3 rev1 changes file foo
1817 # 0 --- 1 --- 3 rev1 changes file foo
1815 # \ / rev2 renames foo to bar and changes it
1818 # \ / rev2 renames foo to bar and changes it
1816 # \- 2 -/ rev3 should have bar with all changes and
1819 # \- 2 -/ rev3 should have bar with all changes and
1817 # should record that bar descends from
1820 # should record that bar descends from
1818 # bar in rev2 and foo in rev1
1821 # bar in rev2 and foo in rev1
1819 #
1822 #
1820 # this allows this merge to succeed:
1823 # this allows this merge to succeed:
1821 #
1824 #
1822 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1823 # \ / merging rev3 and rev4 should use bar@rev2
1826 # \ / merging rev3 and rev4 should use bar@rev2
1824 # \- 2 --- 4 as the merge base
1827 # \- 2 --- 4 as the merge base
1825 #
1828 #
1826
1829
1827 cfname = copy[0]
1830 cfname = copy[0]
1828 crev = manifest1.get(cfname)
1831 crev = manifest1.get(cfname)
1829 newfparent = fparent2
1832 newfparent = fparent2
1830
1833
1831 if manifest2: # branch merge
1834 if manifest2: # branch merge
1832 if fparent2 == nullid or crev is None: # copied on remote side
1835 if fparent2 == nullid or crev is None: # copied on remote side
1833 if cfname in manifest2:
1836 if cfname in manifest2:
1834 crev = manifest2[cfname]
1837 crev = manifest2[cfname]
1835 newfparent = fparent1
1838 newfparent = fparent1
1836
1839
1837 # Here, we used to search backwards through history to try to find
1840 # Here, we used to search backwards through history to try to find
1838 # where the file copy came from if the source of a copy was not in
1841 # where the file copy came from if the source of a copy was not in
1839 # the parent directory. However, this doesn't actually make sense to
1842 # the parent directory. However, this doesn't actually make sense to
1840 # do (what does a copy from something not in your working copy even
1843 # do (what does a copy from something not in your working copy even
1841 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1844 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1842 # the user that copy information was dropped, so if they didn't
1845 # the user that copy information was dropped, so if they didn't
1843 # expect this outcome it can be fixed, but this is the correct
1846 # expect this outcome it can be fixed, but this is the correct
1844 # behavior in this circumstance.
1847 # behavior in this circumstance.
1845
1848
1846 if crev:
1849 if crev:
1847 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1850 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1848 meta["copy"] = cfname
1851 meta["copy"] = cfname
1849 meta["copyrev"] = hex(crev)
1852 meta["copyrev"] = hex(crev)
1850 fparent1, fparent2 = nullid, newfparent
1853 fparent1, fparent2 = nullid, newfparent
1851 else:
1854 else:
1852 self.ui.warn(_("warning: can't find ancestor for '%s' "
1855 self.ui.warn(_("warning: can't find ancestor for '%s' "
1853 "copied from '%s'!\n") % (fname, cfname))
1856 "copied from '%s'!\n") % (fname, cfname))
1854
1857
1855 elif fparent1 == nullid:
1858 elif fparent1 == nullid:
1856 fparent1, fparent2 = fparent2, nullid
1859 fparent1, fparent2 = fparent2, nullid
1857 elif fparent2 != nullid:
1860 elif fparent2 != nullid:
1858 # is one parent an ancestor of the other?
1861 # is one parent an ancestor of the other?
1859 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1862 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1860 if fparent1 in fparentancestors:
1863 if fparent1 in fparentancestors:
1861 fparent1, fparent2 = fparent2, nullid
1864 fparent1, fparent2 = fparent2, nullid
1862 elif fparent2 in fparentancestors:
1865 elif fparent2 in fparentancestors:
1863 fparent2 = nullid
1866 fparent2 = nullid
1864
1867
1865 # is the file changed?
1868 # is the file changed?
1866 text = fctx.data()
1869 text = fctx.data()
1867 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1870 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1868 changelist.append(fname)
1871 changelist.append(fname)
1869 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1872 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1870 # are just the flags changed during merge?
1873 # are just the flags changed during merge?
1871 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1874 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1872 changelist.append(fname)
1875 changelist.append(fname)
1873
1876
1874 return fparent1
1877 return fparent1
1875
1878
1876 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1879 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1877 """check for commit arguments that aren't committable"""
1880 """check for commit arguments that aren't committable"""
1878 if match.isexact() or match.prefix():
1881 if match.isexact() or match.prefix():
1879 matched = set(status.modified + status.added + status.removed)
1882 matched = set(status.modified + status.added + status.removed)
1880
1883
1881 for f in match.files():
1884 for f in match.files():
1882 f = self.dirstate.normalize(f)
1885 f = self.dirstate.normalize(f)
1883 if f == '.' or f in matched or f in wctx.substate:
1886 if f == '.' or f in matched or f in wctx.substate:
1884 continue
1887 continue
1885 if f in status.deleted:
1888 if f in status.deleted:
1886 fail(f, _('file not found!'))
1889 fail(f, _('file not found!'))
1887 if f in vdirs: # visited directory
1890 if f in vdirs: # visited directory
1888 d = f + '/'
1891 d = f + '/'
1889 for mf in matched:
1892 for mf in matched:
1890 if mf.startswith(d):
1893 if mf.startswith(d):
1891 break
1894 break
1892 else:
1895 else:
1893 fail(f, _("no match under directory!"))
1896 fail(f, _("no match under directory!"))
1894 elif f not in self.dirstate:
1897 elif f not in self.dirstate:
1895 fail(f, _("file not tracked!"))
1898 fail(f, _("file not tracked!"))
1896
1899
1897 @unfilteredmethod
1900 @unfilteredmethod
1898 def commit(self, text="", user=None, date=None, match=None, force=False,
1901 def commit(self, text="", user=None, date=None, match=None, force=False,
1899 editor=False, extra=None):
1902 editor=False, extra=None):
1900 """Add a new revision to current repository.
1903 """Add a new revision to current repository.
1901
1904
1902 Revision information is gathered from the working directory,
1905 Revision information is gathered from the working directory,
1903 match can be used to filter the committed files. If editor is
1906 match can be used to filter the committed files. If editor is
1904 supplied, it is called to get a commit message.
1907 supplied, it is called to get a commit message.
1905 """
1908 """
1906 if extra is None:
1909 if extra is None:
1907 extra = {}
1910 extra = {}
1908
1911
1909 def fail(f, msg):
1912 def fail(f, msg):
1910 raise error.Abort('%s: %s' % (f, msg))
1913 raise error.Abort('%s: %s' % (f, msg))
1911
1914
1912 if not match:
1915 if not match:
1913 match = matchmod.always(self.root, '')
1916 match = matchmod.always(self.root, '')
1914
1917
1915 if not force:
1918 if not force:
1916 vdirs = []
1919 vdirs = []
1917 match.explicitdir = vdirs.append
1920 match.explicitdir = vdirs.append
1918 match.bad = fail
1921 match.bad = fail
1919
1922
1920 wlock = lock = tr = None
1923 wlock = lock = tr = None
1921 try:
1924 try:
1922 wlock = self.wlock()
1925 wlock = self.wlock()
1923 lock = self.lock() # for recent changelog (see issue4368)
1926 lock = self.lock() # for recent changelog (see issue4368)
1924
1927
1925 wctx = self[None]
1928 wctx = self[None]
1926 merge = len(wctx.parents()) > 1
1929 merge = len(wctx.parents()) > 1
1927
1930
1928 if not force and merge and not match.always():
1931 if not force and merge and not match.always():
1929 raise error.Abort(_('cannot partially commit a merge '
1932 raise error.Abort(_('cannot partially commit a merge '
1930 '(do not specify files or patterns)'))
1933 '(do not specify files or patterns)'))
1931
1934
1932 status = self.status(match=match, clean=force)
1935 status = self.status(match=match, clean=force)
1933 if force:
1936 if force:
1934 status.modified.extend(status.clean) # mq may commit clean files
1937 status.modified.extend(status.clean) # mq may commit clean files
1935
1938
1936 # check subrepos
1939 # check subrepos
1937 subs, commitsubs, newstate = subrepoutil.precommit(
1940 subs, commitsubs, newstate = subrepoutil.precommit(
1938 self.ui, wctx, status, match, force=force)
1941 self.ui, wctx, status, match, force=force)
1939
1942
1940 # make sure all explicit patterns are matched
1943 # make sure all explicit patterns are matched
1941 if not force:
1944 if not force:
1942 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1945 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1943
1946
1944 cctx = context.workingcommitctx(self, status,
1947 cctx = context.workingcommitctx(self, status,
1945 text, user, date, extra)
1948 text, user, date, extra)
1946
1949
1947 # internal config: ui.allowemptycommit
1950 # internal config: ui.allowemptycommit
1948 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1951 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1949 or extra.get('close') or merge or cctx.files()
1952 or extra.get('close') or merge or cctx.files()
1950 or self.ui.configbool('ui', 'allowemptycommit'))
1953 or self.ui.configbool('ui', 'allowemptycommit'))
1951 if not allowemptycommit:
1954 if not allowemptycommit:
1952 return None
1955 return None
1953
1956
1954 if merge and cctx.deleted():
1957 if merge and cctx.deleted():
1955 raise error.Abort(_("cannot commit merge with missing files"))
1958 raise error.Abort(_("cannot commit merge with missing files"))
1956
1959
1957 ms = mergemod.mergestate.read(self)
1960 ms = mergemod.mergestate.read(self)
1958 mergeutil.checkunresolved(ms)
1961 mergeutil.checkunresolved(ms)
1959
1962
1960 if editor:
1963 if editor:
1961 cctx._text = editor(self, cctx, subs)
1964 cctx._text = editor(self, cctx, subs)
1962 edited = (text != cctx._text)
1965 edited = (text != cctx._text)
1963
1966
1964 # Save commit message in case this transaction gets rolled back
1967 # Save commit message in case this transaction gets rolled back
1965 # (e.g. by a pretxncommit hook). Leave the content alone on
1968 # (e.g. by a pretxncommit hook). Leave the content alone on
1966 # the assumption that the user will use the same editor again.
1969 # the assumption that the user will use the same editor again.
1967 msgfn = self.savecommitmessage(cctx._text)
1970 msgfn = self.savecommitmessage(cctx._text)
1968
1971
1969 # commit subs and write new state
1972 # commit subs and write new state
1970 if subs:
1973 if subs:
1971 for s in sorted(commitsubs):
1974 for s in sorted(commitsubs):
1972 sub = wctx.sub(s)
1975 sub = wctx.sub(s)
1973 self.ui.status(_('committing subrepository %s\n') %
1976 self.ui.status(_('committing subrepository %s\n') %
1974 subrepoutil.subrelpath(sub))
1977 subrepoutil.subrelpath(sub))
1975 sr = sub.commit(cctx._text, user, date)
1978 sr = sub.commit(cctx._text, user, date)
1976 newstate[s] = (newstate[s][0], sr)
1979 newstate[s] = (newstate[s][0], sr)
1977 subrepoutil.writestate(self, newstate)
1980 subrepoutil.writestate(self, newstate)
1978
1981
1979 p1, p2 = self.dirstate.parents()
1982 p1, p2 = self.dirstate.parents()
1980 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1981 try:
1984 try:
1982 self.hook("precommit", throw=True, parent1=hookp1,
1985 self.hook("precommit", throw=True, parent1=hookp1,
1983 parent2=hookp2)
1986 parent2=hookp2)
1984 tr = self.transaction('commit')
1987 tr = self.transaction('commit')
1985 ret = self.commitctx(cctx, True)
1988 ret = self.commitctx(cctx, True)
1986 except: # re-raises
1989 except: # re-raises
1987 if edited:
1990 if edited:
1988 self.ui.write(
1991 self.ui.write(
1989 _('note: commit message saved in %s\n') % msgfn)
1992 _('note: commit message saved in %s\n') % msgfn)
1990 raise
1993 raise
1991 # update bookmarks, dirstate and mergestate
1994 # update bookmarks, dirstate and mergestate
1992 bookmarks.update(self, [p1, p2], ret)
1995 bookmarks.update(self, [p1, p2], ret)
1993 cctx.markcommitted(ret)
1996 cctx.markcommitted(ret)
1994 ms.reset()
1997 ms.reset()
1995 tr.close()
1998 tr.close()
1996
1999
1997 finally:
2000 finally:
1998 lockmod.release(tr, lock, wlock)
2001 lockmod.release(tr, lock, wlock)
1999
2002
2000 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2003 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2001 # hack for command that use a temporary commit (eg: histedit)
2004 # hack for command that use a temporary commit (eg: histedit)
2002 # temporary commit got stripped before hook release
2005 # temporary commit got stripped before hook release
2003 if self.changelog.hasnode(ret):
2006 if self.changelog.hasnode(ret):
2004 self.hook("commit", node=node, parent1=parent1,
2007 self.hook("commit", node=node, parent1=parent1,
2005 parent2=parent2)
2008 parent2=parent2)
2006 self._afterlock(commithook)
2009 self._afterlock(commithook)
2007 return ret
2010 return ret
2008
2011
2009 @unfilteredmethod
2012 @unfilteredmethod
2010 def commitctx(self, ctx, error=False):
2013 def commitctx(self, ctx, error=False):
2011 """Add a new revision to current repository.
2014 """Add a new revision to current repository.
2012 Revision information is passed via the context argument.
2015 Revision information is passed via the context argument.
2013 """
2016 """
2014
2017
2015 tr = None
2018 tr = None
2016 p1, p2 = ctx.p1(), ctx.p2()
2019 p1, p2 = ctx.p1(), ctx.p2()
2017 user = ctx.user()
2020 user = ctx.user()
2018
2021
2019 lock = self.lock()
2022 lock = self.lock()
2020 try:
2023 try:
2021 tr = self.transaction("commit")
2024 tr = self.transaction("commit")
2022 trp = weakref.proxy(tr)
2025 trp = weakref.proxy(tr)
2023
2026
2024 if ctx.manifestnode():
2027 if ctx.manifestnode():
2025 # reuse an existing manifest revision
2028 # reuse an existing manifest revision
2026 mn = ctx.manifestnode()
2029 mn = ctx.manifestnode()
2027 files = ctx.files()
2030 files = ctx.files()
2028 elif ctx.files():
2031 elif ctx.files():
2029 m1ctx = p1.manifestctx()
2032 m1ctx = p1.manifestctx()
2030 m2ctx = p2.manifestctx()
2033 m2ctx = p2.manifestctx()
2031 mctx = m1ctx.copy()
2034 mctx = m1ctx.copy()
2032
2035
2033 m = mctx.read()
2036 m = mctx.read()
2034 m1 = m1ctx.read()
2037 m1 = m1ctx.read()
2035 m2 = m2ctx.read()
2038 m2 = m2ctx.read()
2036
2039
2037 # check in files
2040 # check in files
2038 added = []
2041 added = []
2039 changed = []
2042 changed = []
2040 removed = list(ctx.removed())
2043 removed = list(ctx.removed())
2041 linkrev = len(self)
2044 linkrev = len(self)
2042 self.ui.note(_("committing files:\n"))
2045 self.ui.note(_("committing files:\n"))
2043 for f in sorted(ctx.modified() + ctx.added()):
2046 for f in sorted(ctx.modified() + ctx.added()):
2044 self.ui.note(f + "\n")
2047 self.ui.note(f + "\n")
2045 try:
2048 try:
2046 fctx = ctx[f]
2049 fctx = ctx[f]
2047 if fctx is None:
2050 if fctx is None:
2048 removed.append(f)
2051 removed.append(f)
2049 else:
2052 else:
2050 added.append(f)
2053 added.append(f)
2051 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2054 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2052 trp, changed)
2055 trp, changed)
2053 m.setflag(f, fctx.flags())
2056 m.setflag(f, fctx.flags())
2054 except OSError as inst:
2057 except OSError as inst:
2055 self.ui.warn(_("trouble committing %s!\n") % f)
2058 self.ui.warn(_("trouble committing %s!\n") % f)
2056 raise
2059 raise
2057 except IOError as inst:
2060 except IOError as inst:
2058 errcode = getattr(inst, 'errno', errno.ENOENT)
2061 errcode = getattr(inst, 'errno', errno.ENOENT)
2059 if error or errcode and errcode != errno.ENOENT:
2062 if error or errcode and errcode != errno.ENOENT:
2060 self.ui.warn(_("trouble committing %s!\n") % f)
2063 self.ui.warn(_("trouble committing %s!\n") % f)
2061 raise
2064 raise
2062
2065
2063 # update manifest
2066 # update manifest
2064 self.ui.note(_("committing manifest\n"))
2067 self.ui.note(_("committing manifest\n"))
2065 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2068 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2066 drop = [f for f in removed if f in m]
2069 drop = [f for f in removed if f in m]
2067 for f in drop:
2070 for f in drop:
2068 del m[f]
2071 del m[f]
2069 mn = mctx.write(trp, linkrev,
2072 mn = mctx.write(trp, linkrev,
2070 p1.manifestnode(), p2.manifestnode(),
2073 p1.manifestnode(), p2.manifestnode(),
2071 added, drop)
2074 added, drop)
2072 files = changed + removed
2075 files = changed + removed
2073 else:
2076 else:
2074 mn = p1.manifestnode()
2077 mn = p1.manifestnode()
2075 files = []
2078 files = []
2076
2079
2077 # update changelog
2080 # update changelog
2078 self.ui.note(_("committing changelog\n"))
2081 self.ui.note(_("committing changelog\n"))
2079 self.changelog.delayupdate(tr)
2082 self.changelog.delayupdate(tr)
2080 n = self.changelog.add(mn, files, ctx.description(),
2083 n = self.changelog.add(mn, files, ctx.description(),
2081 trp, p1.node(), p2.node(),
2084 trp, p1.node(), p2.node(),
2082 user, ctx.date(), ctx.extra().copy())
2085 user, ctx.date(), ctx.extra().copy())
2083 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2086 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2084 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2087 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2085 parent2=xp2)
2088 parent2=xp2)
2086 # set the new commit is proper phase
2089 # set the new commit is proper phase
2087 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2090 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2088 if targetphase:
2091 if targetphase:
2089 # retract boundary do not alter parent changeset.
2092 # retract boundary do not alter parent changeset.
2090 # if a parent have higher the resulting phase will
2093 # if a parent have higher the resulting phase will
2091 # be compliant anyway
2094 # be compliant anyway
2092 #
2095 #
2093 # if minimal phase was 0 we don't need to retract anything
2096 # if minimal phase was 0 we don't need to retract anything
2094 phases.registernew(self, tr, targetphase, [n])
2097 phases.registernew(self, tr, targetphase, [n])
2095 tr.close()
2098 tr.close()
2096 return n
2099 return n
2097 finally:
2100 finally:
2098 if tr:
2101 if tr:
2099 tr.release()
2102 tr.release()
2100 lock.release()
2103 lock.release()
2101
2104
2102 @unfilteredmethod
2105 @unfilteredmethod
2103 def destroying(self):
2106 def destroying(self):
2104 '''Inform the repository that nodes are about to be destroyed.
2107 '''Inform the repository that nodes are about to be destroyed.
2105 Intended for use by strip and rollback, so there's a common
2108 Intended for use by strip and rollback, so there's a common
2106 place for anything that has to be done before destroying history.
2109 place for anything that has to be done before destroying history.
2107
2110
2108 This is mostly useful for saving state that is in memory and waiting
2111 This is mostly useful for saving state that is in memory and waiting
2109 to be flushed when the current lock is released. Because a call to
2112 to be flushed when the current lock is released. Because a call to
2110 destroyed is imminent, the repo will be invalidated causing those
2113 destroyed is imminent, the repo will be invalidated causing those
2111 changes to stay in memory (waiting for the next unlock), or vanish
2114 changes to stay in memory (waiting for the next unlock), or vanish
2112 completely.
2115 completely.
2113 '''
2116 '''
2114 # When using the same lock to commit and strip, the phasecache is left
2117 # When using the same lock to commit and strip, the phasecache is left
2115 # dirty after committing. Then when we strip, the repo is invalidated,
2118 # dirty after committing. Then when we strip, the repo is invalidated,
2116 # causing those changes to disappear.
2119 # causing those changes to disappear.
2117 if '_phasecache' in vars(self):
2120 if '_phasecache' in vars(self):
2118 self._phasecache.write()
2121 self._phasecache.write()
2119
2122
2120 @unfilteredmethod
2123 @unfilteredmethod
2121 def destroyed(self):
2124 def destroyed(self):
2122 '''Inform the repository that nodes have been destroyed.
2125 '''Inform the repository that nodes have been destroyed.
2123 Intended for use by strip and rollback, so there's a common
2126 Intended for use by strip and rollback, so there's a common
2124 place for anything that has to be done after destroying history.
2127 place for anything that has to be done after destroying history.
2125 '''
2128 '''
2126 # When one tries to:
2129 # When one tries to:
2127 # 1) destroy nodes thus calling this method (e.g. strip)
2130 # 1) destroy nodes thus calling this method (e.g. strip)
2128 # 2) use phasecache somewhere (e.g. commit)
2131 # 2) use phasecache somewhere (e.g. commit)
2129 #
2132 #
2130 # then 2) will fail because the phasecache contains nodes that were
2133 # then 2) will fail because the phasecache contains nodes that were
2131 # removed. We can either remove phasecache from the filecache,
2134 # removed. We can either remove phasecache from the filecache,
2132 # causing it to reload next time it is accessed, or simply filter
2135 # causing it to reload next time it is accessed, or simply filter
2133 # the removed nodes now and write the updated cache.
2136 # the removed nodes now and write the updated cache.
2134 self._phasecache.filterunknown(self)
2137 self._phasecache.filterunknown(self)
2135 self._phasecache.write()
2138 self._phasecache.write()
2136
2139
2137 # refresh all repository caches
2140 # refresh all repository caches
2138 self.updatecaches()
2141 self.updatecaches()
2139
2142
2140 # Ensure the persistent tag cache is updated. Doing it now
2143 # Ensure the persistent tag cache is updated. Doing it now
2141 # means that the tag cache only has to worry about destroyed
2144 # means that the tag cache only has to worry about destroyed
2142 # heads immediately after a strip/rollback. That in turn
2145 # heads immediately after a strip/rollback. That in turn
2143 # guarantees that "cachetip == currenttip" (comparing both rev
2146 # guarantees that "cachetip == currenttip" (comparing both rev
2144 # and node) always means no nodes have been added or destroyed.
2147 # and node) always means no nodes have been added or destroyed.
2145
2148
2146 # XXX this is suboptimal when qrefresh'ing: we strip the current
2149 # XXX this is suboptimal when qrefresh'ing: we strip the current
2147 # head, refresh the tag cache, then immediately add a new head.
2150 # head, refresh the tag cache, then immediately add a new head.
2148 # But I think doing it this way is necessary for the "instant
2151 # But I think doing it this way is necessary for the "instant
2149 # tag cache retrieval" case to work.
2152 # tag cache retrieval" case to work.
2150 self.invalidate()
2153 self.invalidate()
2151
2154
2152 def status(self, node1='.', node2=None, match=None,
2155 def status(self, node1='.', node2=None, match=None,
2153 ignored=False, clean=False, unknown=False,
2156 ignored=False, clean=False, unknown=False,
2154 listsubrepos=False):
2157 listsubrepos=False):
2155 '''a convenience method that calls node1.status(node2)'''
2158 '''a convenience method that calls node1.status(node2)'''
2156 return self[node1].status(node2, match, ignored, clean, unknown,
2159 return self[node1].status(node2, match, ignored, clean, unknown,
2157 listsubrepos)
2160 listsubrepos)
2158
2161
2159 def addpostdsstatus(self, ps):
2162 def addpostdsstatus(self, ps):
2160 """Add a callback to run within the wlock, at the point at which status
2163 """Add a callback to run within the wlock, at the point at which status
2161 fixups happen.
2164 fixups happen.
2162
2165
2163 On status completion, callback(wctx, status) will be called with the
2166 On status completion, callback(wctx, status) will be called with the
2164 wlock held, unless the dirstate has changed from underneath or the wlock
2167 wlock held, unless the dirstate has changed from underneath or the wlock
2165 couldn't be grabbed.
2168 couldn't be grabbed.
2166
2169
2167 Callbacks should not capture and use a cached copy of the dirstate --
2170 Callbacks should not capture and use a cached copy of the dirstate --
2168 it might change in the meanwhile. Instead, they should access the
2171 it might change in the meanwhile. Instead, they should access the
2169 dirstate via wctx.repo().dirstate.
2172 dirstate via wctx.repo().dirstate.
2170
2173
2171 This list is emptied out after each status run -- extensions should
2174 This list is emptied out after each status run -- extensions should
2172 make sure it adds to this list each time dirstate.status is called.
2175 make sure it adds to this list each time dirstate.status is called.
2173 Extensions should also make sure they don't call this for statuses
2176 Extensions should also make sure they don't call this for statuses
2174 that don't involve the dirstate.
2177 that don't involve the dirstate.
2175 """
2178 """
2176
2179
2177 # The list is located here for uniqueness reasons -- it is actually
2180 # The list is located here for uniqueness reasons -- it is actually
2178 # managed by the workingctx, but that isn't unique per-repo.
2181 # managed by the workingctx, but that isn't unique per-repo.
2179 self._postdsstatus.append(ps)
2182 self._postdsstatus.append(ps)
2180
2183
2181 def postdsstatus(self):
2184 def postdsstatus(self):
2182 """Used by workingctx to get the list of post-dirstate-status hooks."""
2185 """Used by workingctx to get the list of post-dirstate-status hooks."""
2183 return self._postdsstatus
2186 return self._postdsstatus
2184
2187
2185 def clearpostdsstatus(self):
2188 def clearpostdsstatus(self):
2186 """Used by workingctx to clear post-dirstate-status hooks."""
2189 """Used by workingctx to clear post-dirstate-status hooks."""
2187 del self._postdsstatus[:]
2190 del self._postdsstatus[:]
2188
2191
2189 def heads(self, start=None):
2192 def heads(self, start=None):
2190 if start is None:
2193 if start is None:
2191 cl = self.changelog
2194 cl = self.changelog
2192 headrevs = reversed(cl.headrevs())
2195 headrevs = reversed(cl.headrevs())
2193 return [cl.node(rev) for rev in headrevs]
2196 return [cl.node(rev) for rev in headrevs]
2194
2197
2195 heads = self.changelog.heads(start)
2198 heads = self.changelog.heads(start)
2196 # sort the output in rev descending order
2199 # sort the output in rev descending order
2197 return sorted(heads, key=self.changelog.rev, reverse=True)
2200 return sorted(heads, key=self.changelog.rev, reverse=True)
2198
2201
2199 def branchheads(self, branch=None, start=None, closed=False):
2202 def branchheads(self, branch=None, start=None, closed=False):
2200 '''return a (possibly filtered) list of heads for the given branch
2203 '''return a (possibly filtered) list of heads for the given branch
2201
2204
2202 Heads are returned in topological order, from newest to oldest.
2205 Heads are returned in topological order, from newest to oldest.
2203 If branch is None, use the dirstate branch.
2206 If branch is None, use the dirstate branch.
2204 If start is not None, return only heads reachable from start.
2207 If start is not None, return only heads reachable from start.
2205 If closed is True, return heads that are marked as closed as well.
2208 If closed is True, return heads that are marked as closed as well.
2206 '''
2209 '''
2207 if branch is None:
2210 if branch is None:
2208 branch = self[None].branch()
2211 branch = self[None].branch()
2209 branches = self.branchmap()
2212 branches = self.branchmap()
2210 if branch not in branches:
2213 if branch not in branches:
2211 return []
2214 return []
2212 # the cache returns heads ordered lowest to highest
2215 # the cache returns heads ordered lowest to highest
2213 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2216 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2214 if start is not None:
2217 if start is not None:
2215 # filter out the heads that cannot be reached from startrev
2218 # filter out the heads that cannot be reached from startrev
2216 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2219 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2217 bheads = [h for h in bheads if h in fbheads]
2220 bheads = [h for h in bheads if h in fbheads]
2218 return bheads
2221 return bheads
2219
2222
2220 def branches(self, nodes):
2223 def branches(self, nodes):
2221 if not nodes:
2224 if not nodes:
2222 nodes = [self.changelog.tip()]
2225 nodes = [self.changelog.tip()]
2223 b = []
2226 b = []
2224 for n in nodes:
2227 for n in nodes:
2225 t = n
2228 t = n
2226 while True:
2229 while True:
2227 p = self.changelog.parents(n)
2230 p = self.changelog.parents(n)
2228 if p[1] != nullid or p[0] == nullid:
2231 if p[1] != nullid or p[0] == nullid:
2229 b.append((t, n, p[0], p[1]))
2232 b.append((t, n, p[0], p[1]))
2230 break
2233 break
2231 n = p[0]
2234 n = p[0]
2232 return b
2235 return b
2233
2236
2234 def between(self, pairs):
2237 def between(self, pairs):
2235 r = []
2238 r = []
2236
2239
2237 for top, bottom in pairs:
2240 for top, bottom in pairs:
2238 n, l, i = top, [], 0
2241 n, l, i = top, [], 0
2239 f = 1
2242 f = 1
2240
2243
2241 while n != bottom and n != nullid:
2244 while n != bottom and n != nullid:
2242 p = self.changelog.parents(n)[0]
2245 p = self.changelog.parents(n)[0]
2243 if i == f:
2246 if i == f:
2244 l.append(n)
2247 l.append(n)
2245 f = f * 2
2248 f = f * 2
2246 n = p
2249 n = p
2247 i += 1
2250 i += 1
2248
2251
2249 r.append(l)
2252 r.append(l)
2250
2253
2251 return r
2254 return r
2252
2255
2253 def checkpush(self, pushop):
2256 def checkpush(self, pushop):
2254 """Extensions can override this function if additional checks have
2257 """Extensions can override this function if additional checks have
2255 to be performed before pushing, or call it if they override push
2258 to be performed before pushing, or call it if they override push
2256 command.
2259 command.
2257 """
2260 """
2258
2261
2259 @unfilteredpropertycache
2262 @unfilteredpropertycache
2260 def prepushoutgoinghooks(self):
2263 def prepushoutgoinghooks(self):
2261 """Return util.hooks consists of a pushop with repo, remote, outgoing
2264 """Return util.hooks consists of a pushop with repo, remote, outgoing
2262 methods, which are called before pushing changesets.
2265 methods, which are called before pushing changesets.
2263 """
2266 """
2264 return util.hooks()
2267 return util.hooks()
2265
2268
2266 def pushkey(self, namespace, key, old, new):
2269 def pushkey(self, namespace, key, old, new):
2267 try:
2270 try:
2268 tr = self.currenttransaction()
2271 tr = self.currenttransaction()
2269 hookargs = {}
2272 hookargs = {}
2270 if tr is not None:
2273 if tr is not None:
2271 hookargs.update(tr.hookargs)
2274 hookargs.update(tr.hookargs)
2272 hookargs = pycompat.strkwargs(hookargs)
2275 hookargs = pycompat.strkwargs(hookargs)
2273 hookargs[r'namespace'] = namespace
2276 hookargs[r'namespace'] = namespace
2274 hookargs[r'key'] = key
2277 hookargs[r'key'] = key
2275 hookargs[r'old'] = old
2278 hookargs[r'old'] = old
2276 hookargs[r'new'] = new
2279 hookargs[r'new'] = new
2277 self.hook('prepushkey', throw=True, **hookargs)
2280 self.hook('prepushkey', throw=True, **hookargs)
2278 except error.HookAbort as exc:
2281 except error.HookAbort as exc:
2279 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2282 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2280 if exc.hint:
2283 if exc.hint:
2281 self.ui.write_err(_("(%s)\n") % exc.hint)
2284 self.ui.write_err(_("(%s)\n") % exc.hint)
2282 return False
2285 return False
2283 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2286 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2284 ret = pushkey.push(self, namespace, key, old, new)
2287 ret = pushkey.push(self, namespace, key, old, new)
2285 def runhook():
2288 def runhook():
2286 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2289 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2287 ret=ret)
2290 ret=ret)
2288 self._afterlock(runhook)
2291 self._afterlock(runhook)
2289 return ret
2292 return ret
2290
2293
2291 def listkeys(self, namespace):
2294 def listkeys(self, namespace):
2292 self.hook('prelistkeys', throw=True, namespace=namespace)
2295 self.hook('prelistkeys', throw=True, namespace=namespace)
2293 self.ui.debug('listing keys for "%s"\n' % namespace)
2296 self.ui.debug('listing keys for "%s"\n' % namespace)
2294 values = pushkey.list(self, namespace)
2297 values = pushkey.list(self, namespace)
2295 self.hook('listkeys', namespace=namespace, values=values)
2298 self.hook('listkeys', namespace=namespace, values=values)
2296 return values
2299 return values
2297
2300
2298 def debugwireargs(self, one, two, three=None, four=None, five=None):
2301 def debugwireargs(self, one, two, three=None, four=None, five=None):
2299 '''used to test argument passing over the wire'''
2302 '''used to test argument passing over the wire'''
2300 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2303 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2301 pycompat.bytestr(four),
2304 pycompat.bytestr(four),
2302 pycompat.bytestr(five))
2305 pycompat.bytestr(five))
2303
2306
2304 def savecommitmessage(self, text):
2307 def savecommitmessage(self, text):
2305 fp = self.vfs('last-message.txt', 'wb')
2308 fp = self.vfs('last-message.txt', 'wb')
2306 try:
2309 try:
2307 fp.write(text)
2310 fp.write(text)
2308 finally:
2311 finally:
2309 fp.close()
2312 fp.close()
2310 return self.pathto(fp.name[len(self.root) + 1:])
2313 return self.pathto(fp.name[len(self.root) + 1:])
2311
2314
2312 # used to avoid circular references so destructors work
2315 # used to avoid circular references so destructors work
2313 def aftertrans(files):
2316 def aftertrans(files):
2314 renamefiles = [tuple(t) for t in files]
2317 renamefiles = [tuple(t) for t in files]
2315 def a():
2318 def a():
2316 for vfs, src, dest in renamefiles:
2319 for vfs, src, dest in renamefiles:
2317 # if src and dest refer to a same file, vfs.rename is a no-op,
2320 # if src and dest refer to a same file, vfs.rename is a no-op,
2318 # leaving both src and dest on disk. delete dest to make sure
2321 # leaving both src and dest on disk. delete dest to make sure
2319 # the rename couldn't be such a no-op.
2322 # the rename couldn't be such a no-op.
2320 vfs.tryunlink(dest)
2323 vfs.tryunlink(dest)
2321 try:
2324 try:
2322 vfs.rename(src, dest)
2325 vfs.rename(src, dest)
2323 except OSError: # journal file does not yet exist
2326 except OSError: # journal file does not yet exist
2324 pass
2327 pass
2325 return a
2328 return a
2326
2329
2327 def undoname(fn):
2330 def undoname(fn):
2328 base, name = os.path.split(fn)
2331 base, name = os.path.split(fn)
2329 assert name.startswith('journal')
2332 assert name.startswith('journal')
2330 return os.path.join(base, name.replace('journal', 'undo', 1))
2333 return os.path.join(base, name.replace('journal', 'undo', 1))
2331
2334
2332 def instance(ui, path, create, intents=None):
2335 def instance(ui, path, create, intents=None):
2333 return localrepository(ui, util.urllocalpath(path), create,
2336 return localrepository(ui, util.urllocalpath(path), create,
2334 intents=intents)
2337 intents=intents)
2335
2338
2336 def islocal(path):
2339 def islocal(path):
2337 return True
2340 return True
2338
2341
2339 def newreporequirements(repo):
2342 def newreporequirements(repo):
2340 """Determine the set of requirements for a new local repository.
2343 """Determine the set of requirements for a new local repository.
2341
2344
2342 Extensions can wrap this function to specify custom requirements for
2345 Extensions can wrap this function to specify custom requirements for
2343 new repositories.
2346 new repositories.
2344 """
2347 """
2345 ui = repo.ui
2348 ui = repo.ui
2346 requirements = {'revlogv1'}
2349 requirements = {'revlogv1'}
2347 if ui.configbool('format', 'usestore'):
2350 if ui.configbool('format', 'usestore'):
2348 requirements.add('store')
2351 requirements.add('store')
2349 if ui.configbool('format', 'usefncache'):
2352 if ui.configbool('format', 'usefncache'):
2350 requirements.add('fncache')
2353 requirements.add('fncache')
2351 if ui.configbool('format', 'dotencode'):
2354 if ui.configbool('format', 'dotencode'):
2352 requirements.add('dotencode')
2355 requirements.add('dotencode')
2353
2356
2354 compengine = ui.config('experimental', 'format.compression')
2357 compengine = ui.config('experimental', 'format.compression')
2355 if compengine not in util.compengines:
2358 if compengine not in util.compengines:
2356 raise error.Abort(_('compression engine %s defined by '
2359 raise error.Abort(_('compression engine %s defined by '
2357 'experimental.format.compression not available') %
2360 'experimental.format.compression not available') %
2358 compengine,
2361 compengine,
2359 hint=_('run "hg debuginstall" to list available '
2362 hint=_('run "hg debuginstall" to list available '
2360 'compression engines'))
2363 'compression engines'))
2361
2364
2362 # zlib is the historical default and doesn't need an explicit requirement.
2365 # zlib is the historical default and doesn't need an explicit requirement.
2363 if compengine != 'zlib':
2366 if compengine != 'zlib':
2364 requirements.add('exp-compression-%s' % compengine)
2367 requirements.add('exp-compression-%s' % compengine)
2365
2368
2366 if scmutil.gdinitconfig(ui):
2369 if scmutil.gdinitconfig(ui):
2367 requirements.add('generaldelta')
2370 requirements.add('generaldelta')
2368 if ui.configbool('experimental', 'treemanifest'):
2371 if ui.configbool('experimental', 'treemanifest'):
2369 requirements.add('treemanifest')
2372 requirements.add('treemanifest')
2370
2373
2371 revlogv2 = ui.config('experimental', 'revlogv2')
2374 revlogv2 = ui.config('experimental', 'revlogv2')
2372 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2375 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2373 requirements.remove('revlogv1')
2376 requirements.remove('revlogv1')
2374 # generaldelta is implied by revlogv2.
2377 # generaldelta is implied by revlogv2.
2375 requirements.discard('generaldelta')
2378 requirements.discard('generaldelta')
2376 requirements.add(REVLOGV2_REQUIREMENT)
2379 requirements.add(REVLOGV2_REQUIREMENT)
2377
2380
2378 return requirements
2381 return requirements
@@ -1,392 +1,397 b''
1 # lock.py - simple advisory locking scheme for mercurial
1 # lock.py - simple advisory locking scheme for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import os
12 import os
13 import signal
13 import signal
14 import socket
14 import socket
15 import time
15 import time
16 import warnings
16 import warnings
17
17
18 from .i18n import _
18 from .i18n import _
19
19
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 pycompat,
23 pycompat,
24 util,
24 )
25 )
25
26
26 from .utils import (
27 from .utils import (
27 procutil,
28 procutil,
28 )
29 )
29
30
30 def _getlockprefix():
31 def _getlockprefix():
31 """Return a string which is used to differentiate pid namespaces
32 """Return a string which is used to differentiate pid namespaces
32
33
33 It's useful to detect "dead" processes and remove stale locks with
34 It's useful to detect "dead" processes and remove stale locks with
34 confidence. Typically it's just hostname. On modern linux, we include an
35 confidence. Typically it's just hostname. On modern linux, we include an
35 extra Linux-specific pid namespace identifier.
36 extra Linux-specific pid namespace identifier.
36 """
37 """
37 result = encoding.strtolocal(socket.gethostname())
38 result = encoding.strtolocal(socket.gethostname())
38 if pycompat.sysplatform.startswith('linux'):
39 if pycompat.sysplatform.startswith('linux'):
39 try:
40 try:
40 result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
41 result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
41 except OSError as ex:
42 except OSError as ex:
42 if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
43 if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
43 raise
44 raise
44 return result
45 return result
45
46
46 @contextlib.contextmanager
47 @contextlib.contextmanager
47 def _delayedinterrupt():
48 def _delayedinterrupt():
48 """Block signal interrupt while doing something critical
49 """Block signal interrupt while doing something critical
49
50
50 This makes sure that the code block wrapped by this context manager won't
51 This makes sure that the code block wrapped by this context manager won't
51 be interrupted.
52 be interrupted.
52
53
53 For Windows developers: It appears not possible to guard time.sleep()
54 For Windows developers: It appears not possible to guard time.sleep()
54 from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
55 from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
55 working.
56 working.
56 """
57 """
57 assertedsigs = []
58 assertedsigs = []
58 blocked = False
59 blocked = False
59 orighandlers = {}
60 orighandlers = {}
60
61
61 def raiseinterrupt(num):
62 def raiseinterrupt(num):
62 if (num == getattr(signal, 'SIGINT', None) or
63 if (num == getattr(signal, 'SIGINT', None) or
63 num == getattr(signal, 'CTRL_C_EVENT', None)):
64 num == getattr(signal, 'CTRL_C_EVENT', None)):
64 raise KeyboardInterrupt
65 raise KeyboardInterrupt
65 else:
66 else:
66 raise error.SignalInterrupt
67 raise error.SignalInterrupt
67 def catchterm(num, frame):
68 def catchterm(num, frame):
68 if blocked:
69 if blocked:
69 assertedsigs.append(num)
70 assertedsigs.append(num)
70 else:
71 else:
71 raiseinterrupt(num)
72 raiseinterrupt(num)
72
73
73 try:
74 try:
74 # save handlers first so they can be restored even if a setup is
75 # save handlers first so they can be restored even if a setup is
75 # interrupted between signal.signal() and orighandlers[] =.
76 # interrupted between signal.signal() and orighandlers[] =.
76 for name in ['CTRL_C_EVENT', 'SIGINT', 'SIGBREAK', 'SIGHUP', 'SIGTERM']:
77 for name in ['CTRL_C_EVENT', 'SIGINT', 'SIGBREAK', 'SIGHUP', 'SIGTERM']:
77 num = getattr(signal, name, None)
78 num = getattr(signal, name, None)
78 if num and num not in orighandlers:
79 if num and num not in orighandlers:
79 orighandlers[num] = signal.getsignal(num)
80 orighandlers[num] = signal.getsignal(num)
80 try:
81 try:
81 for num in orighandlers:
82 for num in orighandlers:
82 signal.signal(num, catchterm)
83 signal.signal(num, catchterm)
83 except ValueError:
84 except ValueError:
84 pass # in a thread? no luck
85 pass # in a thread? no luck
85
86
86 blocked = True
87 blocked = True
87 yield
88 yield
88 finally:
89 finally:
89 # no simple way to reliably restore all signal handlers because
90 # no simple way to reliably restore all signal handlers because
90 # any loops, recursive function calls, except blocks, etc. can be
91 # any loops, recursive function calls, except blocks, etc. can be
91 # interrupted. so instead, make catchterm() raise interrupt.
92 # interrupted. so instead, make catchterm() raise interrupt.
92 blocked = False
93 blocked = False
93 try:
94 try:
94 for num, handler in orighandlers.items():
95 for num, handler in orighandlers.items():
95 signal.signal(num, handler)
96 signal.signal(num, handler)
96 except ValueError:
97 except ValueError:
97 pass # in a thread?
98 pass # in a thread?
98
99
99 # re-raise interrupt exception if any, which may be shadowed by a new
100 # re-raise interrupt exception if any, which may be shadowed by a new
100 # interrupt occurred while re-raising the first one
101 # interrupt occurred while re-raising the first one
101 if assertedsigs:
102 if assertedsigs:
102 raiseinterrupt(assertedsigs[0])
103 raiseinterrupt(assertedsigs[0])
103
104
104 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
105 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
105 """return an acquired lock or raise an a LockHeld exception
106 """return an acquired lock or raise an a LockHeld exception
106
107
107 This function is responsible to issue warnings and or debug messages about
108 This function is responsible to issue warnings and or debug messages about
108 the held lock while trying to acquires it."""
109 the held lock while trying to acquires it."""
109
110
110 def printwarning(printer, locker):
111 def printwarning(printer, locker):
111 """issue the usual "waiting on lock" message through any channel"""
112 """issue the usual "waiting on lock" message through any channel"""
112 # show more details for new-style locks
113 # show more details for new-style locks
113 if ':' in locker:
114 if ':' in locker:
114 host, pid = locker.split(":", 1)
115 host, pid = locker.split(":", 1)
115 msg = (_("waiting for lock on %s held by process %r on host %r\n")
116 msg = (_("waiting for lock on %s held by process %r on host %r\n")
116 % (pycompat.bytestr(l.desc), pycompat.bytestr(pid),
117 % (pycompat.bytestr(l.desc), pycompat.bytestr(pid),
117 pycompat.bytestr(host)))
118 pycompat.bytestr(host)))
118 else:
119 else:
119 msg = (_("waiting for lock on %s held by %r\n")
120 msg = (_("waiting for lock on %s held by %r\n")
120 % (l.desc, pycompat.bytestr(locker)))
121 % (l.desc, pycompat.bytestr(locker)))
121 printer(msg)
122 printer(msg)
122
123
123 l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
124 l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
124
125
125 debugidx = 0 if (warntimeout and timeout) else -1
126 debugidx = 0 if (warntimeout and timeout) else -1
126 warningidx = 0
127 warningidx = 0
127 if not timeout:
128 if not timeout:
128 warningidx = -1
129 warningidx = -1
129 elif warntimeout:
130 elif warntimeout:
130 warningidx = warntimeout
131 warningidx = warntimeout
131
132
132 delay = 0
133 delay = 0
133 while True:
134 while True:
134 try:
135 try:
135 l._trylock()
136 l._trylock()
136 break
137 break
137 except error.LockHeld as inst:
138 except error.LockHeld as inst:
138 if delay == debugidx:
139 if delay == debugidx:
139 printwarning(ui.debug, inst.locker)
140 printwarning(ui.debug, inst.locker)
140 if delay == warningidx:
141 if delay == warningidx:
141 printwarning(ui.warn, inst.locker)
142 printwarning(ui.warn, inst.locker)
142 if timeout <= delay:
143 if timeout <= delay:
143 raise error.LockHeld(errno.ETIMEDOUT, inst.filename,
144 raise error.LockHeld(errno.ETIMEDOUT, inst.filename,
144 l.desc, inst.locker)
145 l.desc, inst.locker)
145 time.sleep(1)
146 time.sleep(1)
146 delay += 1
147 delay += 1
147
148
148 l.delay = delay
149 l.delay = delay
149 if l.delay:
150 if l.delay:
150 if 0 <= warningidx <= l.delay:
151 if 0 <= warningidx <= l.delay:
151 ui.warn(_("got lock after %d seconds\n") % l.delay)
152 ui.warn(_("got lock after %d seconds\n") % l.delay)
152 else:
153 else:
153 ui.debug("got lock after %d seconds\n" % l.delay)
154 ui.debug("got lock after %d seconds\n" % l.delay)
154 if l.acquirefn:
155 if l.acquirefn:
155 l.acquirefn()
156 l.acquirefn()
156 return l
157 return l
157
158
158 class lock(object):
159 class lock(object):
159 '''An advisory lock held by one process to control access to a set
160 '''An advisory lock held by one process to control access to a set
160 of files. Non-cooperating processes or incorrectly written scripts
161 of files. Non-cooperating processes or incorrectly written scripts
161 can ignore Mercurial's locking scheme and stomp all over the
162 can ignore Mercurial's locking scheme and stomp all over the
162 repository, so don't do that.
163 repository, so don't do that.
163
164
164 Typically used via localrepository.lock() to lock the repository
165 Typically used via localrepository.lock() to lock the repository
165 store (.hg/store/) or localrepository.wlock() to lock everything
166 store (.hg/store/) or localrepository.wlock() to lock everything
166 else under .hg/.'''
167 else under .hg/.'''
167
168
168 # lock is symlink on platforms that support it, file on others.
169 # lock is symlink on platforms that support it, file on others.
169
170
170 # symlink is used because create of directory entry and contents
171 # symlink is used because create of directory entry and contents
171 # are atomic even over nfs.
172 # are atomic even over nfs.
172
173
173 # old-style lock: symlink to pid
174 # old-style lock: symlink to pid
174 # new-style lock: symlink to hostname:pid
175 # new-style lock: symlink to hostname:pid
175
176
176 _host = None
177 _host = None
177
178
178 def __init__(self, vfs, fname, timeout=-1, releasefn=None, acquirefn=None,
179 def __init__(self, vfs, fname, timeout=-1, releasefn=None, acquirefn=None,
179 desc=None, inheritchecker=None, parentlock=None,
180 desc=None, inheritchecker=None, parentlock=None,
180 dolock=True):
181 signalsafe=True, dolock=True):
181 self.vfs = vfs
182 self.vfs = vfs
182 self.f = fname
183 self.f = fname
183 self.held = 0
184 self.held = 0
184 self.timeout = timeout
185 self.timeout = timeout
185 self.releasefn = releasefn
186 self.releasefn = releasefn
186 self.acquirefn = acquirefn
187 self.acquirefn = acquirefn
187 self.desc = desc
188 self.desc = desc
188 self._inheritchecker = inheritchecker
189 self._inheritchecker = inheritchecker
189 self.parentlock = parentlock
190 self.parentlock = parentlock
190 self._parentheld = False
191 self._parentheld = False
191 self._inherited = False
192 self._inherited = False
193 if signalsafe:
194 self._maybedelayedinterrupt = _delayedinterrupt
195 else:
196 self._maybedelayedinterrupt = util.nullcontextmanager
192 self.postrelease = []
197 self.postrelease = []
193 self.pid = self._getpid()
198 self.pid = self._getpid()
194 if dolock:
199 if dolock:
195 self.delay = self.lock()
200 self.delay = self.lock()
196 if self.acquirefn:
201 if self.acquirefn:
197 self.acquirefn()
202 self.acquirefn()
198
203
199 def __enter__(self):
204 def __enter__(self):
200 return self
205 return self
201
206
202 def __exit__(self, exc_type, exc_value, exc_tb):
207 def __exit__(self, exc_type, exc_value, exc_tb):
203 self.release()
208 self.release()
204
209
205 def __del__(self):
210 def __del__(self):
206 if self.held:
211 if self.held:
207 warnings.warn("use lock.release instead of del lock",
212 warnings.warn("use lock.release instead of del lock",
208 category=DeprecationWarning,
213 category=DeprecationWarning,
209 stacklevel=2)
214 stacklevel=2)
210
215
211 # ensure the lock will be removed
216 # ensure the lock will be removed
212 # even if recursive locking did occur
217 # even if recursive locking did occur
213 self.held = 1
218 self.held = 1
214
219
215 self.release()
220 self.release()
216
221
217 def _getpid(self):
222 def _getpid(self):
218 # wrapper around procutil.getpid() to make testing easier
223 # wrapper around procutil.getpid() to make testing easier
219 return procutil.getpid()
224 return procutil.getpid()
220
225
221 def lock(self):
226 def lock(self):
222 timeout = self.timeout
227 timeout = self.timeout
223 while True:
228 while True:
224 try:
229 try:
225 self._trylock()
230 self._trylock()
226 return self.timeout - timeout
231 return self.timeout - timeout
227 except error.LockHeld as inst:
232 except error.LockHeld as inst:
228 if timeout != 0:
233 if timeout != 0:
229 time.sleep(1)
234 time.sleep(1)
230 if timeout > 0:
235 if timeout > 0:
231 timeout -= 1
236 timeout -= 1
232 continue
237 continue
233 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
238 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
234 inst.locker)
239 inst.locker)
235
240
236 def _trylock(self):
241 def _trylock(self):
237 if self.held:
242 if self.held:
238 self.held += 1
243 self.held += 1
239 return
244 return
240 if lock._host is None:
245 if lock._host is None:
241 lock._host = _getlockprefix()
246 lock._host = _getlockprefix()
242 lockname = '%s:%d' % (lock._host, self.pid)
247 lockname = '%s:%d' % (lock._host, self.pid)
243 retry = 5
248 retry = 5
244 while not self.held and retry:
249 while not self.held and retry:
245 retry -= 1
250 retry -= 1
246 try:
251 try:
247 with _delayedinterrupt():
252 with self._maybedelayedinterrupt():
248 self.vfs.makelock(lockname, self.f)
253 self.vfs.makelock(lockname, self.f)
249 self.held = 1
254 self.held = 1
250 except (OSError, IOError) as why:
255 except (OSError, IOError) as why:
251 if why.errno == errno.EEXIST:
256 if why.errno == errno.EEXIST:
252 locker = self._readlock()
257 locker = self._readlock()
253 if locker is None:
258 if locker is None:
254 continue
259 continue
255
260
256 # special case where a parent process holds the lock -- this
261 # special case where a parent process holds the lock -- this
257 # is different from the pid being different because we do
262 # is different from the pid being different because we do
258 # want the unlock and postrelease functions to be called,
263 # want the unlock and postrelease functions to be called,
259 # but the lockfile to not be removed.
264 # but the lockfile to not be removed.
260 if locker == self.parentlock:
265 if locker == self.parentlock:
261 self._parentheld = True
266 self._parentheld = True
262 self.held = 1
267 self.held = 1
263 return
268 return
264 locker = self._testlock(locker)
269 locker = self._testlock(locker)
265 if locker is not None:
270 if locker is not None:
266 raise error.LockHeld(errno.EAGAIN,
271 raise error.LockHeld(errno.EAGAIN,
267 self.vfs.join(self.f), self.desc,
272 self.vfs.join(self.f), self.desc,
268 locker)
273 locker)
269 else:
274 else:
270 raise error.LockUnavailable(why.errno, why.strerror,
275 raise error.LockUnavailable(why.errno, why.strerror,
271 why.filename, self.desc)
276 why.filename, self.desc)
272
277
273 if not self.held:
278 if not self.held:
274 # use empty locker to mean "busy for frequent lock/unlock
279 # use empty locker to mean "busy for frequent lock/unlock
275 # by many processes"
280 # by many processes"
276 raise error.LockHeld(errno.EAGAIN,
281 raise error.LockHeld(errno.EAGAIN,
277 self.vfs.join(self.f), self.desc, "")
282 self.vfs.join(self.f), self.desc, "")
278
283
279 def _readlock(self):
284 def _readlock(self):
280 """read lock and return its value
285 """read lock and return its value
281
286
282 Returns None if no lock exists, pid for old-style locks, and host:pid
287 Returns None if no lock exists, pid for old-style locks, and host:pid
283 for new-style locks.
288 for new-style locks.
284 """
289 """
285 try:
290 try:
286 return self.vfs.readlock(self.f)
291 return self.vfs.readlock(self.f)
287 except (OSError, IOError) as why:
292 except (OSError, IOError) as why:
288 if why.errno == errno.ENOENT:
293 if why.errno == errno.ENOENT:
289 return None
294 return None
290 raise
295 raise
291
296
292 def _testlock(self, locker):
297 def _testlock(self, locker):
293 if locker is None:
298 if locker is None:
294 return None
299 return None
295 try:
300 try:
296 host, pid = locker.split(":", 1)
301 host, pid = locker.split(":", 1)
297 except ValueError:
302 except ValueError:
298 return locker
303 return locker
299 if host != lock._host:
304 if host != lock._host:
300 return locker
305 return locker
301 try:
306 try:
302 pid = int(pid)
307 pid = int(pid)
303 except ValueError:
308 except ValueError:
304 return locker
309 return locker
305 if procutil.testpid(pid):
310 if procutil.testpid(pid):
306 return locker
311 return locker
307 # if locker dead, break lock. must do this with another lock
312 # if locker dead, break lock. must do this with another lock
308 # held, or can race and break valid lock.
313 # held, or can race and break valid lock.
309 try:
314 try:
310 l = lock(self.vfs, self.f + '.break', timeout=0)
315 l = lock(self.vfs, self.f + '.break', timeout=0)
311 self.vfs.unlink(self.f)
316 self.vfs.unlink(self.f)
312 l.release()
317 l.release()
313 except error.LockError:
318 except error.LockError:
314 return locker
319 return locker
315
320
316 def testlock(self):
321 def testlock(self):
317 """return id of locker if lock is valid, else None.
322 """return id of locker if lock is valid, else None.
318
323
319 If old-style lock, we cannot tell what machine locker is on.
324 If old-style lock, we cannot tell what machine locker is on.
320 with new-style lock, if locker is on this machine, we can
325 with new-style lock, if locker is on this machine, we can
321 see if locker is alive. If locker is on this machine but
326 see if locker is alive. If locker is on this machine but
322 not alive, we can safely break lock.
327 not alive, we can safely break lock.
323
328
324 The lock file is only deleted when None is returned.
329 The lock file is only deleted when None is returned.
325
330
326 """
331 """
327 locker = self._readlock()
332 locker = self._readlock()
328 return self._testlock(locker)
333 return self._testlock(locker)
329
334
330 @contextlib.contextmanager
335 @contextlib.contextmanager
331 def inherit(self):
336 def inherit(self):
332 """context for the lock to be inherited by a Mercurial subprocess.
337 """context for the lock to be inherited by a Mercurial subprocess.
333
338
334 Yields a string that will be recognized by the lock in the subprocess.
339 Yields a string that will be recognized by the lock in the subprocess.
335 Communicating this string to the subprocess needs to be done separately
340 Communicating this string to the subprocess needs to be done separately
336 -- typically by an environment variable.
341 -- typically by an environment variable.
337 """
342 """
338 if not self.held:
343 if not self.held:
339 raise error.LockInheritanceContractViolation(
344 raise error.LockInheritanceContractViolation(
340 'inherit can only be called while lock is held')
345 'inherit can only be called while lock is held')
341 if self._inherited:
346 if self._inherited:
342 raise error.LockInheritanceContractViolation(
347 raise error.LockInheritanceContractViolation(
343 'inherit cannot be called while lock is already inherited')
348 'inherit cannot be called while lock is already inherited')
344 if self._inheritchecker is not None:
349 if self._inheritchecker is not None:
345 self._inheritchecker()
350 self._inheritchecker()
346 if self.releasefn:
351 if self.releasefn:
347 self.releasefn()
352 self.releasefn()
348 if self._parentheld:
353 if self._parentheld:
349 lockname = self.parentlock
354 lockname = self.parentlock
350 else:
355 else:
351 lockname = b'%s:%d' % (lock._host, self.pid)
356 lockname = b'%s:%d' % (lock._host, self.pid)
352 self._inherited = True
357 self._inherited = True
353 try:
358 try:
354 yield lockname
359 yield lockname
355 finally:
360 finally:
356 if self.acquirefn:
361 if self.acquirefn:
357 self.acquirefn()
362 self.acquirefn()
358 self._inherited = False
363 self._inherited = False
359
364
360 def release(self):
365 def release(self):
361 """release the lock and execute callback function if any
366 """release the lock and execute callback function if any
362
367
363 If the lock has been acquired multiple times, the actual release is
368 If the lock has been acquired multiple times, the actual release is
364 delayed to the last release call."""
369 delayed to the last release call."""
365 if self.held > 1:
370 if self.held > 1:
366 self.held -= 1
371 self.held -= 1
367 elif self.held == 1:
372 elif self.held == 1:
368 self.held = 0
373 self.held = 0
369 if self._getpid() != self.pid:
374 if self._getpid() != self.pid:
370 # we forked, and are not the parent
375 # we forked, and are not the parent
371 return
376 return
372 try:
377 try:
373 if self.releasefn:
378 if self.releasefn:
374 self.releasefn()
379 self.releasefn()
375 finally:
380 finally:
376 if not self._parentheld:
381 if not self._parentheld:
377 try:
382 try:
378 self.vfs.unlink(self.f)
383 self.vfs.unlink(self.f)
379 except OSError:
384 except OSError:
380 pass
385 pass
381 # The postrelease functions typically assume the lock is not held
386 # The postrelease functions typically assume the lock is not held
382 # at all.
387 # at all.
383 if not self._parentheld:
388 if not self._parentheld:
384 for callback in self.postrelease:
389 for callback in self.postrelease:
385 callback()
390 callback()
386 # Prevent double usage and help clear cycles.
391 # Prevent double usage and help clear cycles.
387 self.postrelease = None
392 self.postrelease = None
388
393
389 def release(*locks):
394 def release(*locks):
390 for lock in locks:
395 for lock in locks:
391 if lock is not None:
396 if lock is not None:
392 lock.release()
397 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now