##// END OF EJS Templates
compression: introduce a `storage.revlog.zlib.level` configuration...
marmoute -
r42210:1fac9b93 default
parent child Browse files
Show More
@@ -1,1467 +1,1470 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 def _registerdiffopts(section, configprefix=''):
116 def _registerdiffopts(section, configprefix=''):
117 coreconfigitem(section, configprefix + 'nodates',
117 coreconfigitem(section, configprefix + 'nodates',
118 default=False,
118 default=False,
119 )
119 )
120 coreconfigitem(section, configprefix + 'showfunc',
120 coreconfigitem(section, configprefix + 'showfunc',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem(section, configprefix + 'unified',
123 coreconfigitem(section, configprefix + 'unified',
124 default=None,
124 default=None,
125 )
125 )
126 coreconfigitem(section, configprefix + 'git',
126 coreconfigitem(section, configprefix + 'git',
127 default=False,
127 default=False,
128 )
128 )
129 coreconfigitem(section, configprefix + 'ignorews',
129 coreconfigitem(section, configprefix + 'ignorews',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem(section, configprefix + 'ignorewsamount',
132 coreconfigitem(section, configprefix + 'ignorewsamount',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem(section, configprefix + 'ignoreblanklines',
135 coreconfigitem(section, configprefix + 'ignoreblanklines',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem(section, configprefix + 'ignorewseol',
138 coreconfigitem(section, configprefix + 'ignorewseol',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem(section, configprefix + 'nobinary',
141 coreconfigitem(section, configprefix + 'nobinary',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(section, configprefix + 'noprefix',
144 coreconfigitem(section, configprefix + 'noprefix',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem(section, configprefix + 'word-diff',
147 coreconfigitem(section, configprefix + 'word-diff',
148 default=False,
148 default=False,
149 )
149 )
150
150
151 coreconfigitem('alias', '.*',
151 coreconfigitem('alias', '.*',
152 default=dynamicdefault,
152 default=dynamicdefault,
153 generic=True,
153 generic=True,
154 )
154 )
155 coreconfigitem('auth', 'cookiefile',
155 coreconfigitem('auth', 'cookiefile',
156 default=None,
156 default=None,
157 )
157 )
158 _registerdiffopts(section='annotate')
158 _registerdiffopts(section='annotate')
159 # bookmarks.pushing: internal hack for discovery
159 # bookmarks.pushing: internal hack for discovery
160 coreconfigitem('bookmarks', 'pushing',
160 coreconfigitem('bookmarks', 'pushing',
161 default=list,
161 default=list,
162 )
162 )
163 # bundle.mainreporoot: internal hack for bundlerepo
163 # bundle.mainreporoot: internal hack for bundlerepo
164 coreconfigitem('bundle', 'mainreporoot',
164 coreconfigitem('bundle', 'mainreporoot',
165 default='',
165 default='',
166 )
166 )
167 coreconfigitem('censor', 'policy',
167 coreconfigitem('censor', 'policy',
168 default='abort',
168 default='abort',
169 )
169 )
170 coreconfigitem('chgserver', 'idletimeout',
170 coreconfigitem('chgserver', 'idletimeout',
171 default=3600,
171 default=3600,
172 )
172 )
173 coreconfigitem('chgserver', 'skiphash',
173 coreconfigitem('chgserver', 'skiphash',
174 default=False,
174 default=False,
175 )
175 )
176 coreconfigitem('cmdserver', 'log',
176 coreconfigitem('cmdserver', 'log',
177 default=None,
177 default=None,
178 )
178 )
179 coreconfigitem('cmdserver', 'max-log-files',
179 coreconfigitem('cmdserver', 'max-log-files',
180 default=7,
180 default=7,
181 )
181 )
182 coreconfigitem('cmdserver', 'max-log-size',
182 coreconfigitem('cmdserver', 'max-log-size',
183 default='1 MB',
183 default='1 MB',
184 )
184 )
185 coreconfigitem('cmdserver', 'max-repo-cache',
185 coreconfigitem('cmdserver', 'max-repo-cache',
186 default=0,
186 default=0,
187 )
187 )
188 coreconfigitem('cmdserver', 'message-encodings',
188 coreconfigitem('cmdserver', 'message-encodings',
189 default=list,
189 default=list,
190 )
190 )
191 coreconfigitem('cmdserver', 'track-log',
191 coreconfigitem('cmdserver', 'track-log',
192 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
192 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
193 )
193 )
194 coreconfigitem('color', '.*',
194 coreconfigitem('color', '.*',
195 default=None,
195 default=None,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem('color', 'mode',
198 coreconfigitem('color', 'mode',
199 default='auto',
199 default='auto',
200 )
200 )
201 coreconfigitem('color', 'pagermode',
201 coreconfigitem('color', 'pagermode',
202 default=dynamicdefault,
202 default=dynamicdefault,
203 )
203 )
204 _registerdiffopts(section='commands', configprefix='commit.interactive.')
204 _registerdiffopts(section='commands', configprefix='commit.interactive.')
205 coreconfigitem('commands', 'grep.all-files',
205 coreconfigitem('commands', 'grep.all-files',
206 default=False,
206 default=False,
207 )
207 )
208 coreconfigitem('commands', 'resolve.confirm',
208 coreconfigitem('commands', 'resolve.confirm',
209 default=False,
209 default=False,
210 )
210 )
211 coreconfigitem('commands', 'resolve.explicit-re-merge',
211 coreconfigitem('commands', 'resolve.explicit-re-merge',
212 default=False,
212 default=False,
213 )
213 )
214 coreconfigitem('commands', 'resolve.mark-check',
214 coreconfigitem('commands', 'resolve.mark-check',
215 default='none',
215 default='none',
216 )
216 )
217 _registerdiffopts(section='commands', configprefix='revert.interactive.')
217 _registerdiffopts(section='commands', configprefix='revert.interactive.')
218 coreconfigitem('commands', 'show.aliasprefix',
218 coreconfigitem('commands', 'show.aliasprefix',
219 default=list,
219 default=list,
220 )
220 )
221 coreconfigitem('commands', 'status.relative',
221 coreconfigitem('commands', 'status.relative',
222 default=False,
222 default=False,
223 )
223 )
224 coreconfigitem('commands', 'status.skipstates',
224 coreconfigitem('commands', 'status.skipstates',
225 default=[],
225 default=[],
226 )
226 )
227 coreconfigitem('commands', 'status.terse',
227 coreconfigitem('commands', 'status.terse',
228 default='',
228 default='',
229 )
229 )
230 coreconfigitem('commands', 'status.verbose',
230 coreconfigitem('commands', 'status.verbose',
231 default=False,
231 default=False,
232 )
232 )
233 coreconfigitem('commands', 'update.check',
233 coreconfigitem('commands', 'update.check',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem('commands', 'update.requiredest',
236 coreconfigitem('commands', 'update.requiredest',
237 default=False,
237 default=False,
238 )
238 )
239 coreconfigitem('committemplate', '.*',
239 coreconfigitem('committemplate', '.*',
240 default=None,
240 default=None,
241 generic=True,
241 generic=True,
242 )
242 )
243 coreconfigitem('convert', 'bzr.saverev',
243 coreconfigitem('convert', 'bzr.saverev',
244 default=True,
244 default=True,
245 )
245 )
246 coreconfigitem('convert', 'cvsps.cache',
246 coreconfigitem('convert', 'cvsps.cache',
247 default=True,
247 default=True,
248 )
248 )
249 coreconfigitem('convert', 'cvsps.fuzz',
249 coreconfigitem('convert', 'cvsps.fuzz',
250 default=60,
250 default=60,
251 )
251 )
252 coreconfigitem('convert', 'cvsps.logencoding',
252 coreconfigitem('convert', 'cvsps.logencoding',
253 default=None,
253 default=None,
254 )
254 )
255 coreconfigitem('convert', 'cvsps.mergefrom',
255 coreconfigitem('convert', 'cvsps.mergefrom',
256 default=None,
256 default=None,
257 )
257 )
258 coreconfigitem('convert', 'cvsps.mergeto',
258 coreconfigitem('convert', 'cvsps.mergeto',
259 default=None,
259 default=None,
260 )
260 )
261 coreconfigitem('convert', 'git.committeractions',
261 coreconfigitem('convert', 'git.committeractions',
262 default=lambda: ['messagedifferent'],
262 default=lambda: ['messagedifferent'],
263 )
263 )
264 coreconfigitem('convert', 'git.extrakeys',
264 coreconfigitem('convert', 'git.extrakeys',
265 default=list,
265 default=list,
266 )
266 )
267 coreconfigitem('convert', 'git.findcopiesharder',
267 coreconfigitem('convert', 'git.findcopiesharder',
268 default=False,
268 default=False,
269 )
269 )
270 coreconfigitem('convert', 'git.remoteprefix',
270 coreconfigitem('convert', 'git.remoteprefix',
271 default='remote',
271 default='remote',
272 )
272 )
273 coreconfigitem('convert', 'git.renamelimit',
273 coreconfigitem('convert', 'git.renamelimit',
274 default=400,
274 default=400,
275 )
275 )
276 coreconfigitem('convert', 'git.saverev',
276 coreconfigitem('convert', 'git.saverev',
277 default=True,
277 default=True,
278 )
278 )
279 coreconfigitem('convert', 'git.similarity',
279 coreconfigitem('convert', 'git.similarity',
280 default=50,
280 default=50,
281 )
281 )
282 coreconfigitem('convert', 'git.skipsubmodules',
282 coreconfigitem('convert', 'git.skipsubmodules',
283 default=False,
283 default=False,
284 )
284 )
285 coreconfigitem('convert', 'hg.clonebranches',
285 coreconfigitem('convert', 'hg.clonebranches',
286 default=False,
286 default=False,
287 )
287 )
288 coreconfigitem('convert', 'hg.ignoreerrors',
288 coreconfigitem('convert', 'hg.ignoreerrors',
289 default=False,
289 default=False,
290 )
290 )
291 coreconfigitem('convert', 'hg.revs',
291 coreconfigitem('convert', 'hg.revs',
292 default=None,
292 default=None,
293 )
293 )
294 coreconfigitem('convert', 'hg.saverev',
294 coreconfigitem('convert', 'hg.saverev',
295 default=False,
295 default=False,
296 )
296 )
297 coreconfigitem('convert', 'hg.sourcename',
297 coreconfigitem('convert', 'hg.sourcename',
298 default=None,
298 default=None,
299 )
299 )
300 coreconfigitem('convert', 'hg.startrev',
300 coreconfigitem('convert', 'hg.startrev',
301 default=None,
301 default=None,
302 )
302 )
303 coreconfigitem('convert', 'hg.tagsbranch',
303 coreconfigitem('convert', 'hg.tagsbranch',
304 default='default',
304 default='default',
305 )
305 )
306 coreconfigitem('convert', 'hg.usebranchnames',
306 coreconfigitem('convert', 'hg.usebranchnames',
307 default=True,
307 default=True,
308 )
308 )
309 coreconfigitem('convert', 'ignoreancestorcheck',
309 coreconfigitem('convert', 'ignoreancestorcheck',
310 default=False,
310 default=False,
311 )
311 )
312 coreconfigitem('convert', 'localtimezone',
312 coreconfigitem('convert', 'localtimezone',
313 default=False,
313 default=False,
314 )
314 )
315 coreconfigitem('convert', 'p4.encoding',
315 coreconfigitem('convert', 'p4.encoding',
316 default=dynamicdefault,
316 default=dynamicdefault,
317 )
317 )
318 coreconfigitem('convert', 'p4.startrev',
318 coreconfigitem('convert', 'p4.startrev',
319 default=0,
319 default=0,
320 )
320 )
321 coreconfigitem('convert', 'skiptags',
321 coreconfigitem('convert', 'skiptags',
322 default=False,
322 default=False,
323 )
323 )
324 coreconfigitem('convert', 'svn.debugsvnlog',
324 coreconfigitem('convert', 'svn.debugsvnlog',
325 default=True,
325 default=True,
326 )
326 )
327 coreconfigitem('convert', 'svn.trunk',
327 coreconfigitem('convert', 'svn.trunk',
328 default=None,
328 default=None,
329 )
329 )
330 coreconfigitem('convert', 'svn.tags',
330 coreconfigitem('convert', 'svn.tags',
331 default=None,
331 default=None,
332 )
332 )
333 coreconfigitem('convert', 'svn.branches',
333 coreconfigitem('convert', 'svn.branches',
334 default=None,
334 default=None,
335 )
335 )
336 coreconfigitem('convert', 'svn.startrev',
336 coreconfigitem('convert', 'svn.startrev',
337 default=0,
337 default=0,
338 )
338 )
339 coreconfigitem('debug', 'dirstate.delaywrite',
339 coreconfigitem('debug', 'dirstate.delaywrite',
340 default=0,
340 default=0,
341 )
341 )
342 coreconfigitem('defaults', '.*',
342 coreconfigitem('defaults', '.*',
343 default=None,
343 default=None,
344 generic=True,
344 generic=True,
345 )
345 )
346 coreconfigitem('devel', 'all-warnings',
346 coreconfigitem('devel', 'all-warnings',
347 default=False,
347 default=False,
348 )
348 )
349 coreconfigitem('devel', 'bundle2.debug',
349 coreconfigitem('devel', 'bundle2.debug',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem('devel', 'bundle.delta',
352 coreconfigitem('devel', 'bundle.delta',
353 default='',
353 default='',
354 )
354 )
355 coreconfigitem('devel', 'cache-vfs',
355 coreconfigitem('devel', 'cache-vfs',
356 default=None,
356 default=None,
357 )
357 )
358 coreconfigitem('devel', 'check-locks',
358 coreconfigitem('devel', 'check-locks',
359 default=False,
359 default=False,
360 )
360 )
361 coreconfigitem('devel', 'check-relroot',
361 coreconfigitem('devel', 'check-relroot',
362 default=False,
362 default=False,
363 )
363 )
364 coreconfigitem('devel', 'default-date',
364 coreconfigitem('devel', 'default-date',
365 default=None,
365 default=None,
366 )
366 )
367 coreconfigitem('devel', 'deprec-warn',
367 coreconfigitem('devel', 'deprec-warn',
368 default=False,
368 default=False,
369 )
369 )
370 coreconfigitem('devel', 'disableloaddefaultcerts',
370 coreconfigitem('devel', 'disableloaddefaultcerts',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem('devel', 'warn-empty-changegroup',
373 coreconfigitem('devel', 'warn-empty-changegroup',
374 default=False,
374 default=False,
375 )
375 )
376 coreconfigitem('devel', 'legacy.exchange',
376 coreconfigitem('devel', 'legacy.exchange',
377 default=list,
377 default=list,
378 )
378 )
379 coreconfigitem('devel', 'servercafile',
379 coreconfigitem('devel', 'servercafile',
380 default='',
380 default='',
381 )
381 )
382 coreconfigitem('devel', 'serverexactprotocol',
382 coreconfigitem('devel', 'serverexactprotocol',
383 default='',
383 default='',
384 )
384 )
385 coreconfigitem('devel', 'serverrequirecert',
385 coreconfigitem('devel', 'serverrequirecert',
386 default=False,
386 default=False,
387 )
387 )
388 coreconfigitem('devel', 'strip-obsmarkers',
388 coreconfigitem('devel', 'strip-obsmarkers',
389 default=True,
389 default=True,
390 )
390 )
391 coreconfigitem('devel', 'warn-config',
391 coreconfigitem('devel', 'warn-config',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem('devel', 'warn-config-default',
394 coreconfigitem('devel', 'warn-config-default',
395 default=None,
395 default=None,
396 )
396 )
397 coreconfigitem('devel', 'user.obsmarker',
397 coreconfigitem('devel', 'user.obsmarker',
398 default=None,
398 default=None,
399 )
399 )
400 coreconfigitem('devel', 'warn-config-unknown',
400 coreconfigitem('devel', 'warn-config-unknown',
401 default=None,
401 default=None,
402 )
402 )
403 coreconfigitem('devel', 'debug.copies',
403 coreconfigitem('devel', 'debug.copies',
404 default=False,
404 default=False,
405 )
405 )
406 coreconfigitem('devel', 'debug.extensions',
406 coreconfigitem('devel', 'debug.extensions',
407 default=False,
407 default=False,
408 )
408 )
409 coreconfigitem('devel', 'debug.peer-request',
409 coreconfigitem('devel', 'debug.peer-request',
410 default=False,
410 default=False,
411 )
411 )
412 _registerdiffopts(section='diff')
412 _registerdiffopts(section='diff')
413 coreconfigitem('email', 'bcc',
413 coreconfigitem('email', 'bcc',
414 default=None,
414 default=None,
415 )
415 )
416 coreconfigitem('email', 'cc',
416 coreconfigitem('email', 'cc',
417 default=None,
417 default=None,
418 )
418 )
419 coreconfigitem('email', 'charsets',
419 coreconfigitem('email', 'charsets',
420 default=list,
420 default=list,
421 )
421 )
422 coreconfigitem('email', 'from',
422 coreconfigitem('email', 'from',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem('email', 'method',
425 coreconfigitem('email', 'method',
426 default='smtp',
426 default='smtp',
427 )
427 )
428 coreconfigitem('email', 'reply-to',
428 coreconfigitem('email', 'reply-to',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem('email', 'to',
431 coreconfigitem('email', 'to',
432 default=None,
432 default=None,
433 )
433 )
434 coreconfigitem('experimental', 'archivemetatemplate',
434 coreconfigitem('experimental', 'archivemetatemplate',
435 default=dynamicdefault,
435 default=dynamicdefault,
436 )
436 )
437 coreconfigitem('experimental', 'auto-publish',
437 coreconfigitem('experimental', 'auto-publish',
438 default='publish',
438 default='publish',
439 )
439 )
440 coreconfigitem('experimental', 'bundle-phases',
440 coreconfigitem('experimental', 'bundle-phases',
441 default=False,
441 default=False,
442 )
442 )
443 coreconfigitem('experimental', 'bundle2-advertise',
443 coreconfigitem('experimental', 'bundle2-advertise',
444 default=True,
444 default=True,
445 )
445 )
446 coreconfigitem('experimental', 'bundle2-output-capture',
446 coreconfigitem('experimental', 'bundle2-output-capture',
447 default=False,
447 default=False,
448 )
448 )
449 coreconfigitem('experimental', 'bundle2.pushback',
449 coreconfigitem('experimental', 'bundle2.pushback',
450 default=False,
450 default=False,
451 )
451 )
452 coreconfigitem('experimental', 'bundle2lazylocking',
452 coreconfigitem('experimental', 'bundle2lazylocking',
453 default=False,
453 default=False,
454 )
454 )
455 coreconfigitem('experimental', 'bundlecomplevel',
455 coreconfigitem('experimental', 'bundlecomplevel',
456 default=None,
456 default=None,
457 )
457 )
458 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
458 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
459 default=None,
459 default=None,
460 )
460 )
461 coreconfigitem('experimental', 'bundlecomplevel.gzip',
461 coreconfigitem('experimental', 'bundlecomplevel.gzip',
462 default=None,
462 default=None,
463 )
463 )
464 coreconfigitem('experimental', 'bundlecomplevel.none',
464 coreconfigitem('experimental', 'bundlecomplevel.none',
465 default=None,
465 default=None,
466 )
466 )
467 coreconfigitem('experimental', 'bundlecomplevel.zstd',
467 coreconfigitem('experimental', 'bundlecomplevel.zstd',
468 default=None,
468 default=None,
469 )
469 )
470 coreconfigitem('experimental', 'changegroup3',
470 coreconfigitem('experimental', 'changegroup3',
471 default=False,
471 default=False,
472 )
472 )
473 coreconfigitem('experimental', 'cleanup-as-archived',
473 coreconfigitem('experimental', 'cleanup-as-archived',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem('experimental', 'clientcompressionengines',
476 coreconfigitem('experimental', 'clientcompressionengines',
477 default=list,
477 default=list,
478 )
478 )
479 coreconfigitem('experimental', 'copytrace',
479 coreconfigitem('experimental', 'copytrace',
480 default='on',
480 default='on',
481 )
481 )
482 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
482 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
483 default=100,
483 default=100,
484 )
484 )
485 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
485 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
486 default=100,
486 default=100,
487 )
487 )
488 coreconfigitem('experimental', 'copies.read-from',
488 coreconfigitem('experimental', 'copies.read-from',
489 default="filelog-only",
489 default="filelog-only",
490 )
490 )
491 coreconfigitem('experimental', 'crecordtest',
491 coreconfigitem('experimental', 'crecordtest',
492 default=None,
492 default=None,
493 )
493 )
494 coreconfigitem('experimental', 'directaccess',
494 coreconfigitem('experimental', 'directaccess',
495 default=False,
495 default=False,
496 )
496 )
497 coreconfigitem('experimental', 'directaccess.revnums',
497 coreconfigitem('experimental', 'directaccess.revnums',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem('experimental', 'editortmpinhg',
500 coreconfigitem('experimental', 'editortmpinhg',
501 default=False,
501 default=False,
502 )
502 )
503 coreconfigitem('experimental', 'evolution',
503 coreconfigitem('experimental', 'evolution',
504 default=list,
504 default=list,
505 )
505 )
506 coreconfigitem('experimental', 'evolution.allowdivergence',
506 coreconfigitem('experimental', 'evolution.allowdivergence',
507 default=False,
507 default=False,
508 alias=[('experimental', 'allowdivergence')]
508 alias=[('experimental', 'allowdivergence')]
509 )
509 )
510 coreconfigitem('experimental', 'evolution.allowunstable',
510 coreconfigitem('experimental', 'evolution.allowunstable',
511 default=None,
511 default=None,
512 )
512 )
513 coreconfigitem('experimental', 'evolution.createmarkers',
513 coreconfigitem('experimental', 'evolution.createmarkers',
514 default=None,
514 default=None,
515 )
515 )
516 coreconfigitem('experimental', 'evolution.effect-flags',
516 coreconfigitem('experimental', 'evolution.effect-flags',
517 default=True,
517 default=True,
518 alias=[('experimental', 'effect-flags')]
518 alias=[('experimental', 'effect-flags')]
519 )
519 )
520 coreconfigitem('experimental', 'evolution.exchange',
520 coreconfigitem('experimental', 'evolution.exchange',
521 default=None,
521 default=None,
522 )
522 )
523 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
523 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
524 default=False,
524 default=False,
525 )
525 )
526 coreconfigitem('experimental', 'evolution.report-instabilities',
526 coreconfigitem('experimental', 'evolution.report-instabilities',
527 default=True,
527 default=True,
528 )
528 )
529 coreconfigitem('experimental', 'evolution.track-operation',
529 coreconfigitem('experimental', 'evolution.track-operation',
530 default=True,
530 default=True,
531 )
531 )
532 coreconfigitem('experimental', 'maxdeltachainspan',
532 coreconfigitem('experimental', 'maxdeltachainspan',
533 default=-1,
533 default=-1,
534 )
534 )
535 coreconfigitem('experimental', 'mergetempdirprefix',
535 coreconfigitem('experimental', 'mergetempdirprefix',
536 default=None,
536 default=None,
537 )
537 )
538 coreconfigitem('experimental', 'mmapindexthreshold',
538 coreconfigitem('experimental', 'mmapindexthreshold',
539 default=None,
539 default=None,
540 )
540 )
541 coreconfigitem('experimental', 'narrow',
541 coreconfigitem('experimental', 'narrow',
542 default=False,
542 default=False,
543 )
543 )
544 coreconfigitem('experimental', 'nonnormalparanoidcheck',
544 coreconfigitem('experimental', 'nonnormalparanoidcheck',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem('experimental', 'exportableenviron',
547 coreconfigitem('experimental', 'exportableenviron',
548 default=list,
548 default=list,
549 )
549 )
550 coreconfigitem('experimental', 'extendedheader.index',
550 coreconfigitem('experimental', 'extendedheader.index',
551 default=None,
551 default=None,
552 )
552 )
553 coreconfigitem('experimental', 'extendedheader.similarity',
553 coreconfigitem('experimental', 'extendedheader.similarity',
554 default=False,
554 default=False,
555 )
555 )
556 coreconfigitem('experimental', 'format.compression',
556 coreconfigitem('experimental', 'format.compression',
557 default='zlib',
557 default='zlib',
558 )
558 )
559 coreconfigitem('experimental', 'graphshorten',
559 coreconfigitem('experimental', 'graphshorten',
560 default=False,
560 default=False,
561 )
561 )
562 coreconfigitem('experimental', 'graphstyle.parent',
562 coreconfigitem('experimental', 'graphstyle.parent',
563 default=dynamicdefault,
563 default=dynamicdefault,
564 )
564 )
565 coreconfigitem('experimental', 'graphstyle.missing',
565 coreconfigitem('experimental', 'graphstyle.missing',
566 default=dynamicdefault,
566 default=dynamicdefault,
567 )
567 )
568 coreconfigitem('experimental', 'graphstyle.grandparent',
568 coreconfigitem('experimental', 'graphstyle.grandparent',
569 default=dynamicdefault,
569 default=dynamicdefault,
570 )
570 )
571 coreconfigitem('experimental', 'hook-track-tags',
571 coreconfigitem('experimental', 'hook-track-tags',
572 default=False,
572 default=False,
573 )
573 )
574 coreconfigitem('experimental', 'httppeer.advertise-v2',
574 coreconfigitem('experimental', 'httppeer.advertise-v2',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
577 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
578 default=None,
578 default=None,
579 )
579 )
580 coreconfigitem('experimental', 'httppostargs',
580 coreconfigitem('experimental', 'httppostargs',
581 default=False,
581 default=False,
582 )
582 )
583 coreconfigitem('experimental', 'mergedriver',
583 coreconfigitem('experimental', 'mergedriver',
584 default=None,
584 default=None,
585 )
585 )
586 coreconfigitem('experimental', 'nointerrupt', default=False)
586 coreconfigitem('experimental', 'nointerrupt', default=False)
587 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
587 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
588
588
589 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
589 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
590 default=False,
590 default=False,
591 )
591 )
592 coreconfigitem('experimental', 'remotenames',
592 coreconfigitem('experimental', 'remotenames',
593 default=False,
593 default=False,
594 )
594 )
595 coreconfigitem('experimental', 'removeemptydirs',
595 coreconfigitem('experimental', 'removeemptydirs',
596 default=True,
596 default=True,
597 )
597 )
598 coreconfigitem('experimental', 'revert.interactive.select-to-keep',
598 coreconfigitem('experimental', 'revert.interactive.select-to-keep',
599 default=False,
599 default=False,
600 )
600 )
601 coreconfigitem('experimental', 'revisions.prefixhexnode',
601 coreconfigitem('experimental', 'revisions.prefixhexnode',
602 default=False,
602 default=False,
603 )
603 )
604 coreconfigitem('experimental', 'revlogv2',
604 coreconfigitem('experimental', 'revlogv2',
605 default=None,
605 default=None,
606 )
606 )
607 coreconfigitem('experimental', 'revisions.disambiguatewithin',
607 coreconfigitem('experimental', 'revisions.disambiguatewithin',
608 default=None,
608 default=None,
609 )
609 )
610 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
610 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
611 default=50000,
611 default=50000,
612 )
612 )
613 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
613 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
614 default=100000,
614 default=100000,
615 )
615 )
616 coreconfigitem('experimental', 'server.stream-narrow-clones',
616 coreconfigitem('experimental', 'server.stream-narrow-clones',
617 default=False,
617 default=False,
618 )
618 )
619 coreconfigitem('experimental', 'single-head-per-branch',
619 coreconfigitem('experimental', 'single-head-per-branch',
620 default=False,
620 default=False,
621 )
621 )
622 coreconfigitem('experimental', 'sshserver.support-v2',
622 coreconfigitem('experimental', 'sshserver.support-v2',
623 default=False,
623 default=False,
624 )
624 )
625 coreconfigitem('experimental', 'sparse-read',
625 coreconfigitem('experimental', 'sparse-read',
626 default=False,
626 default=False,
627 )
627 )
628 coreconfigitem('experimental', 'sparse-read.density-threshold',
628 coreconfigitem('experimental', 'sparse-read.density-threshold',
629 default=0.50,
629 default=0.50,
630 )
630 )
631 coreconfigitem('experimental', 'sparse-read.min-gap-size',
631 coreconfigitem('experimental', 'sparse-read.min-gap-size',
632 default='65K',
632 default='65K',
633 )
633 )
634 coreconfigitem('experimental', 'treemanifest',
634 coreconfigitem('experimental', 'treemanifest',
635 default=False,
635 default=False,
636 )
636 )
637 coreconfigitem('experimental', 'update.atomic-file',
637 coreconfigitem('experimental', 'update.atomic-file',
638 default=False,
638 default=False,
639 )
639 )
640 coreconfigitem('experimental', 'sshpeer.advertise-v2',
640 coreconfigitem('experimental', 'sshpeer.advertise-v2',
641 default=False,
641 default=False,
642 )
642 )
643 coreconfigitem('experimental', 'web.apiserver',
643 coreconfigitem('experimental', 'web.apiserver',
644 default=False,
644 default=False,
645 )
645 )
646 coreconfigitem('experimental', 'web.api.http-v2',
646 coreconfigitem('experimental', 'web.api.http-v2',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem('experimental', 'web.api.debugreflect',
649 coreconfigitem('experimental', 'web.api.debugreflect',
650 default=False,
650 default=False,
651 )
651 )
652 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
652 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
653 default=False,
653 default=False,
654 )
654 )
655 coreconfigitem('experimental', 'xdiff',
655 coreconfigitem('experimental', 'xdiff',
656 default=False,
656 default=False,
657 )
657 )
658 coreconfigitem('extensions', '.*',
658 coreconfigitem('extensions', '.*',
659 default=None,
659 default=None,
660 generic=True,
660 generic=True,
661 )
661 )
662 coreconfigitem('extdata', '.*',
662 coreconfigitem('extdata', '.*',
663 default=None,
663 default=None,
664 generic=True,
664 generic=True,
665 )
665 )
666 coreconfigitem('format', 'chunkcachesize',
666 coreconfigitem('format', 'chunkcachesize',
667 default=None,
667 default=None,
668 )
668 )
669 coreconfigitem('format', 'dotencode',
669 coreconfigitem('format', 'dotencode',
670 default=True,
670 default=True,
671 )
671 )
672 coreconfigitem('format', 'generaldelta',
672 coreconfigitem('format', 'generaldelta',
673 default=False,
673 default=False,
674 )
674 )
675 coreconfigitem('format', 'manifestcachesize',
675 coreconfigitem('format', 'manifestcachesize',
676 default=None,
676 default=None,
677 )
677 )
678 coreconfigitem('format', 'maxchainlen',
678 coreconfigitem('format', 'maxchainlen',
679 default=dynamicdefault,
679 default=dynamicdefault,
680 )
680 )
681 coreconfigitem('format', 'obsstore-version',
681 coreconfigitem('format', 'obsstore-version',
682 default=None,
682 default=None,
683 )
683 )
684 coreconfigitem('format', 'sparse-revlog',
684 coreconfigitem('format', 'sparse-revlog',
685 default=True,
685 default=True,
686 )
686 )
687 coreconfigitem('format', 'usefncache',
687 coreconfigitem('format', 'usefncache',
688 default=True,
688 default=True,
689 )
689 )
690 coreconfigitem('format', 'usegeneraldelta',
690 coreconfigitem('format', 'usegeneraldelta',
691 default=True,
691 default=True,
692 )
692 )
693 coreconfigitem('format', 'usestore',
693 coreconfigitem('format', 'usestore',
694 default=True,
694 default=True,
695 )
695 )
696 coreconfigitem('format', 'internal-phase',
696 coreconfigitem('format', 'internal-phase',
697 default=False,
697 default=False,
698 )
698 )
699 coreconfigitem('fsmonitor', 'warn_when_unused',
699 coreconfigitem('fsmonitor', 'warn_when_unused',
700 default=True,
700 default=True,
701 )
701 )
702 coreconfigitem('fsmonitor', 'warn_update_file_count',
702 coreconfigitem('fsmonitor', 'warn_update_file_count',
703 default=50000,
703 default=50000,
704 )
704 )
705 coreconfigitem('help', br'hidden-command\..*',
705 coreconfigitem('help', br'hidden-command\..*',
706 default=False,
706 default=False,
707 generic=True,
707 generic=True,
708 )
708 )
709 coreconfigitem('help', br'hidden-topic\..*',
709 coreconfigitem('help', br'hidden-topic\..*',
710 default=False,
710 default=False,
711 generic=True,
711 generic=True,
712 )
712 )
713 coreconfigitem('hooks', '.*',
713 coreconfigitem('hooks', '.*',
714 default=dynamicdefault,
714 default=dynamicdefault,
715 generic=True,
715 generic=True,
716 )
716 )
717 coreconfigitem('hgweb-paths', '.*',
717 coreconfigitem('hgweb-paths', '.*',
718 default=list,
718 default=list,
719 generic=True,
719 generic=True,
720 )
720 )
721 coreconfigitem('hostfingerprints', '.*',
721 coreconfigitem('hostfingerprints', '.*',
722 default=list,
722 default=list,
723 generic=True,
723 generic=True,
724 )
724 )
725 coreconfigitem('hostsecurity', 'ciphers',
725 coreconfigitem('hostsecurity', 'ciphers',
726 default=None,
726 default=None,
727 )
727 )
728 coreconfigitem('hostsecurity', 'disabletls10warning',
728 coreconfigitem('hostsecurity', 'disabletls10warning',
729 default=False,
729 default=False,
730 )
730 )
731 coreconfigitem('hostsecurity', 'minimumprotocol',
731 coreconfigitem('hostsecurity', 'minimumprotocol',
732 default=dynamicdefault,
732 default=dynamicdefault,
733 )
733 )
734 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
734 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
735 default=dynamicdefault,
735 default=dynamicdefault,
736 generic=True,
736 generic=True,
737 )
737 )
738 coreconfigitem('hostsecurity', '.*:ciphers$',
738 coreconfigitem('hostsecurity', '.*:ciphers$',
739 default=dynamicdefault,
739 default=dynamicdefault,
740 generic=True,
740 generic=True,
741 )
741 )
742 coreconfigitem('hostsecurity', '.*:fingerprints$',
742 coreconfigitem('hostsecurity', '.*:fingerprints$',
743 default=list,
743 default=list,
744 generic=True,
744 generic=True,
745 )
745 )
746 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
746 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
747 default=None,
747 default=None,
748 generic=True,
748 generic=True,
749 )
749 )
750
750
751 coreconfigitem('http_proxy', 'always',
751 coreconfigitem('http_proxy', 'always',
752 default=False,
752 default=False,
753 )
753 )
754 coreconfigitem('http_proxy', 'host',
754 coreconfigitem('http_proxy', 'host',
755 default=None,
755 default=None,
756 )
756 )
757 coreconfigitem('http_proxy', 'no',
757 coreconfigitem('http_proxy', 'no',
758 default=list,
758 default=list,
759 )
759 )
760 coreconfigitem('http_proxy', 'passwd',
760 coreconfigitem('http_proxy', 'passwd',
761 default=None,
761 default=None,
762 )
762 )
763 coreconfigitem('http_proxy', 'user',
763 coreconfigitem('http_proxy', 'user',
764 default=None,
764 default=None,
765 )
765 )
766
766
767 coreconfigitem('http', 'timeout',
767 coreconfigitem('http', 'timeout',
768 default=None,
768 default=None,
769 )
769 )
770
770
771 coreconfigitem('logtoprocess', 'commandexception',
771 coreconfigitem('logtoprocess', 'commandexception',
772 default=None,
772 default=None,
773 )
773 )
774 coreconfigitem('logtoprocess', 'commandfinish',
774 coreconfigitem('logtoprocess', 'commandfinish',
775 default=None,
775 default=None,
776 )
776 )
777 coreconfigitem('logtoprocess', 'command',
777 coreconfigitem('logtoprocess', 'command',
778 default=None,
778 default=None,
779 )
779 )
780 coreconfigitem('logtoprocess', 'develwarn',
780 coreconfigitem('logtoprocess', 'develwarn',
781 default=None,
781 default=None,
782 )
782 )
783 coreconfigitem('logtoprocess', 'uiblocked',
783 coreconfigitem('logtoprocess', 'uiblocked',
784 default=None,
784 default=None,
785 )
785 )
786 coreconfigitem('merge', 'checkunknown',
786 coreconfigitem('merge', 'checkunknown',
787 default='abort',
787 default='abort',
788 )
788 )
789 coreconfigitem('merge', 'checkignored',
789 coreconfigitem('merge', 'checkignored',
790 default='abort',
790 default='abort',
791 )
791 )
792 coreconfigitem('experimental', 'merge.checkpathconflicts',
792 coreconfigitem('experimental', 'merge.checkpathconflicts',
793 default=False,
793 default=False,
794 )
794 )
795 coreconfigitem('merge', 'followcopies',
795 coreconfigitem('merge', 'followcopies',
796 default=True,
796 default=True,
797 )
797 )
798 coreconfigitem('merge', 'on-failure',
798 coreconfigitem('merge', 'on-failure',
799 default='continue',
799 default='continue',
800 )
800 )
801 coreconfigitem('merge', 'preferancestor',
801 coreconfigitem('merge', 'preferancestor',
802 default=lambda: ['*'],
802 default=lambda: ['*'],
803 )
803 )
804 coreconfigitem('merge', 'strict-capability-check',
804 coreconfigitem('merge', 'strict-capability-check',
805 default=False,
805 default=False,
806 )
806 )
807 coreconfigitem('merge-tools', '.*',
807 coreconfigitem('merge-tools', '.*',
808 default=None,
808 default=None,
809 generic=True,
809 generic=True,
810 )
810 )
811 coreconfigitem('merge-tools', br'.*\.args$',
811 coreconfigitem('merge-tools', br'.*\.args$',
812 default="$local $base $other",
812 default="$local $base $other",
813 generic=True,
813 generic=True,
814 priority=-1,
814 priority=-1,
815 )
815 )
816 coreconfigitem('merge-tools', br'.*\.binary$',
816 coreconfigitem('merge-tools', br'.*\.binary$',
817 default=False,
817 default=False,
818 generic=True,
818 generic=True,
819 priority=-1,
819 priority=-1,
820 )
820 )
821 coreconfigitem('merge-tools', br'.*\.check$',
821 coreconfigitem('merge-tools', br'.*\.check$',
822 default=list,
822 default=list,
823 generic=True,
823 generic=True,
824 priority=-1,
824 priority=-1,
825 )
825 )
826 coreconfigitem('merge-tools', br'.*\.checkchanged$',
826 coreconfigitem('merge-tools', br'.*\.checkchanged$',
827 default=False,
827 default=False,
828 generic=True,
828 generic=True,
829 priority=-1,
829 priority=-1,
830 )
830 )
831 coreconfigitem('merge-tools', br'.*\.executable$',
831 coreconfigitem('merge-tools', br'.*\.executable$',
832 default=dynamicdefault,
832 default=dynamicdefault,
833 generic=True,
833 generic=True,
834 priority=-1,
834 priority=-1,
835 )
835 )
836 coreconfigitem('merge-tools', br'.*\.fixeol$',
836 coreconfigitem('merge-tools', br'.*\.fixeol$',
837 default=False,
837 default=False,
838 generic=True,
838 generic=True,
839 priority=-1,
839 priority=-1,
840 )
840 )
841 coreconfigitem('merge-tools', br'.*\.gui$',
841 coreconfigitem('merge-tools', br'.*\.gui$',
842 default=False,
842 default=False,
843 generic=True,
843 generic=True,
844 priority=-1,
844 priority=-1,
845 )
845 )
846 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
846 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
847 default='basic',
847 default='basic',
848 generic=True,
848 generic=True,
849 priority=-1,
849 priority=-1,
850 )
850 )
851 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
851 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
852 default=dynamicdefault, # take from ui.mergemarkertemplate
852 default=dynamicdefault, # take from ui.mergemarkertemplate
853 generic=True,
853 generic=True,
854 priority=-1,
854 priority=-1,
855 )
855 )
856 coreconfigitem('merge-tools', br'.*\.priority$',
856 coreconfigitem('merge-tools', br'.*\.priority$',
857 default=0,
857 default=0,
858 generic=True,
858 generic=True,
859 priority=-1,
859 priority=-1,
860 )
860 )
861 coreconfigitem('merge-tools', br'.*\.premerge$',
861 coreconfigitem('merge-tools', br'.*\.premerge$',
862 default=dynamicdefault,
862 default=dynamicdefault,
863 generic=True,
863 generic=True,
864 priority=-1,
864 priority=-1,
865 )
865 )
866 coreconfigitem('merge-tools', br'.*\.symlink$',
866 coreconfigitem('merge-tools', br'.*\.symlink$',
867 default=False,
867 default=False,
868 generic=True,
868 generic=True,
869 priority=-1,
869 priority=-1,
870 )
870 )
871 coreconfigitem('pager', 'attend-.*',
871 coreconfigitem('pager', 'attend-.*',
872 default=dynamicdefault,
872 default=dynamicdefault,
873 generic=True,
873 generic=True,
874 )
874 )
875 coreconfigitem('pager', 'ignore',
875 coreconfigitem('pager', 'ignore',
876 default=list,
876 default=list,
877 )
877 )
878 coreconfigitem('pager', 'pager',
878 coreconfigitem('pager', 'pager',
879 default=dynamicdefault,
879 default=dynamicdefault,
880 )
880 )
881 coreconfigitem('patch', 'eol',
881 coreconfigitem('patch', 'eol',
882 default='strict',
882 default='strict',
883 )
883 )
884 coreconfigitem('patch', 'fuzz',
884 coreconfigitem('patch', 'fuzz',
885 default=2,
885 default=2,
886 )
886 )
887 coreconfigitem('paths', 'default',
887 coreconfigitem('paths', 'default',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem('paths', 'default-push',
890 coreconfigitem('paths', 'default-push',
891 default=None,
891 default=None,
892 )
892 )
893 coreconfigitem('paths', '.*',
893 coreconfigitem('paths', '.*',
894 default=None,
894 default=None,
895 generic=True,
895 generic=True,
896 )
896 )
897 coreconfigitem('phases', 'checksubrepos',
897 coreconfigitem('phases', 'checksubrepos',
898 default='follow',
898 default='follow',
899 )
899 )
900 coreconfigitem('phases', 'new-commit',
900 coreconfigitem('phases', 'new-commit',
901 default='draft',
901 default='draft',
902 )
902 )
903 coreconfigitem('phases', 'publish',
903 coreconfigitem('phases', 'publish',
904 default=True,
904 default=True,
905 )
905 )
906 coreconfigitem('profiling', 'enabled',
906 coreconfigitem('profiling', 'enabled',
907 default=False,
907 default=False,
908 )
908 )
909 coreconfigitem('profiling', 'format',
909 coreconfigitem('profiling', 'format',
910 default='text',
910 default='text',
911 )
911 )
912 coreconfigitem('profiling', 'freq',
912 coreconfigitem('profiling', 'freq',
913 default=1000,
913 default=1000,
914 )
914 )
915 coreconfigitem('profiling', 'limit',
915 coreconfigitem('profiling', 'limit',
916 default=30,
916 default=30,
917 )
917 )
918 coreconfigitem('profiling', 'nested',
918 coreconfigitem('profiling', 'nested',
919 default=0,
919 default=0,
920 )
920 )
921 coreconfigitem('profiling', 'output',
921 coreconfigitem('profiling', 'output',
922 default=None,
922 default=None,
923 )
923 )
924 coreconfigitem('profiling', 'showmax',
924 coreconfigitem('profiling', 'showmax',
925 default=0.999,
925 default=0.999,
926 )
926 )
927 coreconfigitem('profiling', 'showmin',
927 coreconfigitem('profiling', 'showmin',
928 default=dynamicdefault,
928 default=dynamicdefault,
929 )
929 )
930 coreconfigitem('profiling', 'sort',
930 coreconfigitem('profiling', 'sort',
931 default='inlinetime',
931 default='inlinetime',
932 )
932 )
933 coreconfigitem('profiling', 'statformat',
933 coreconfigitem('profiling', 'statformat',
934 default='hotpath',
934 default='hotpath',
935 )
935 )
936 coreconfigitem('profiling', 'time-track',
936 coreconfigitem('profiling', 'time-track',
937 default=dynamicdefault,
937 default=dynamicdefault,
938 )
938 )
939 coreconfigitem('profiling', 'type',
939 coreconfigitem('profiling', 'type',
940 default='stat',
940 default='stat',
941 )
941 )
942 coreconfigitem('progress', 'assume-tty',
942 coreconfigitem('progress', 'assume-tty',
943 default=False,
943 default=False,
944 )
944 )
945 coreconfigitem('progress', 'changedelay',
945 coreconfigitem('progress', 'changedelay',
946 default=1,
946 default=1,
947 )
947 )
948 coreconfigitem('progress', 'clear-complete',
948 coreconfigitem('progress', 'clear-complete',
949 default=True,
949 default=True,
950 )
950 )
951 coreconfigitem('progress', 'debug',
951 coreconfigitem('progress', 'debug',
952 default=False,
952 default=False,
953 )
953 )
954 coreconfigitem('progress', 'delay',
954 coreconfigitem('progress', 'delay',
955 default=3,
955 default=3,
956 )
956 )
957 coreconfigitem('progress', 'disable',
957 coreconfigitem('progress', 'disable',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem('progress', 'estimateinterval',
960 coreconfigitem('progress', 'estimateinterval',
961 default=60.0,
961 default=60.0,
962 )
962 )
963 coreconfigitem('progress', 'format',
963 coreconfigitem('progress', 'format',
964 default=lambda: ['topic', 'bar', 'number', 'estimate'],
964 default=lambda: ['topic', 'bar', 'number', 'estimate'],
965 )
965 )
966 coreconfigitem('progress', 'refresh',
966 coreconfigitem('progress', 'refresh',
967 default=0.1,
967 default=0.1,
968 )
968 )
969 coreconfigitem('progress', 'width',
969 coreconfigitem('progress', 'width',
970 default=dynamicdefault,
970 default=dynamicdefault,
971 )
971 )
972 coreconfigitem('push', 'pushvars.server',
972 coreconfigitem('push', 'pushvars.server',
973 default=False,
973 default=False,
974 )
974 )
975 coreconfigitem('rewrite', 'backup-bundle',
975 coreconfigitem('rewrite', 'backup-bundle',
976 default=True,
976 default=True,
977 alias=[('ui', 'history-editing-backup')],
977 alias=[('ui', 'history-editing-backup')],
978 )
978 )
979 coreconfigitem('rewrite', 'update-timestamp',
979 coreconfigitem('rewrite', 'update-timestamp',
980 default=False,
980 default=False,
981 )
981 )
982 coreconfigitem('storage', 'new-repo-backend',
982 coreconfigitem('storage', 'new-repo-backend',
983 default='revlogv1',
983 default='revlogv1',
984 )
984 )
985 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
985 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
986 default=True,
986 default=True,
987 alias=[('format', 'aggressivemergedeltas')],
987 alias=[('format', 'aggressivemergedeltas')],
988 )
988 )
989 coreconfigitem('storage', 'revlog.reuse-external-delta',
989 coreconfigitem('storage', 'revlog.reuse-external-delta',
990 default=True,
990 default=True,
991 )
991 )
992 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
992 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
993 default=None,
993 default=None,
994 )
994 )
995 coreconfigitem('storage', 'revlog.zlib.level',
996 default=None,
997 )
995 coreconfigitem('server', 'bookmarks-pushkey-compat',
998 coreconfigitem('server', 'bookmarks-pushkey-compat',
996 default=True,
999 default=True,
997 )
1000 )
998 coreconfigitem('server', 'bundle1',
1001 coreconfigitem('server', 'bundle1',
999 default=True,
1002 default=True,
1000 )
1003 )
1001 coreconfigitem('server', 'bundle1gd',
1004 coreconfigitem('server', 'bundle1gd',
1002 default=None,
1005 default=None,
1003 )
1006 )
1004 coreconfigitem('server', 'bundle1.pull',
1007 coreconfigitem('server', 'bundle1.pull',
1005 default=None,
1008 default=None,
1006 )
1009 )
1007 coreconfigitem('server', 'bundle1gd.pull',
1010 coreconfigitem('server', 'bundle1gd.pull',
1008 default=None,
1011 default=None,
1009 )
1012 )
1010 coreconfigitem('server', 'bundle1.push',
1013 coreconfigitem('server', 'bundle1.push',
1011 default=None,
1014 default=None,
1012 )
1015 )
1013 coreconfigitem('server', 'bundle1gd.push',
1016 coreconfigitem('server', 'bundle1gd.push',
1014 default=None,
1017 default=None,
1015 )
1018 )
1016 coreconfigitem('server', 'bundle2.stream',
1019 coreconfigitem('server', 'bundle2.stream',
1017 default=True,
1020 default=True,
1018 alias=[('experimental', 'bundle2.stream')]
1021 alias=[('experimental', 'bundle2.stream')]
1019 )
1022 )
1020 coreconfigitem('server', 'compressionengines',
1023 coreconfigitem('server', 'compressionengines',
1021 default=list,
1024 default=list,
1022 )
1025 )
1023 coreconfigitem('server', 'concurrent-push-mode',
1026 coreconfigitem('server', 'concurrent-push-mode',
1024 default='strict',
1027 default='strict',
1025 )
1028 )
1026 coreconfigitem('server', 'disablefullbundle',
1029 coreconfigitem('server', 'disablefullbundle',
1027 default=False,
1030 default=False,
1028 )
1031 )
1029 coreconfigitem('server', 'maxhttpheaderlen',
1032 coreconfigitem('server', 'maxhttpheaderlen',
1030 default=1024,
1033 default=1024,
1031 )
1034 )
1032 coreconfigitem('server', 'pullbundle',
1035 coreconfigitem('server', 'pullbundle',
1033 default=False,
1036 default=False,
1034 )
1037 )
1035 coreconfigitem('server', 'preferuncompressed',
1038 coreconfigitem('server', 'preferuncompressed',
1036 default=False,
1039 default=False,
1037 )
1040 )
1038 coreconfigitem('server', 'streamunbundle',
1041 coreconfigitem('server', 'streamunbundle',
1039 default=False,
1042 default=False,
1040 )
1043 )
1041 coreconfigitem('server', 'uncompressed',
1044 coreconfigitem('server', 'uncompressed',
1042 default=True,
1045 default=True,
1043 )
1046 )
1044 coreconfigitem('server', 'uncompressedallowsecret',
1047 coreconfigitem('server', 'uncompressedallowsecret',
1045 default=False,
1048 default=False,
1046 )
1049 )
1047 coreconfigitem('server', 'view',
1050 coreconfigitem('server', 'view',
1048 default='served',
1051 default='served',
1049 )
1052 )
1050 coreconfigitem('server', 'validate',
1053 coreconfigitem('server', 'validate',
1051 default=False,
1054 default=False,
1052 )
1055 )
1053 coreconfigitem('server', 'zliblevel',
1056 coreconfigitem('server', 'zliblevel',
1054 default=-1,
1057 default=-1,
1055 )
1058 )
1056 coreconfigitem('server', 'zstdlevel',
1059 coreconfigitem('server', 'zstdlevel',
1057 default=3,
1060 default=3,
1058 )
1061 )
1059 coreconfigitem('share', 'pool',
1062 coreconfigitem('share', 'pool',
1060 default=None,
1063 default=None,
1061 )
1064 )
1062 coreconfigitem('share', 'poolnaming',
1065 coreconfigitem('share', 'poolnaming',
1063 default='identity',
1066 default='identity',
1064 )
1067 )
1065 coreconfigitem('smtp', 'host',
1068 coreconfigitem('smtp', 'host',
1066 default=None,
1069 default=None,
1067 )
1070 )
1068 coreconfigitem('smtp', 'local_hostname',
1071 coreconfigitem('smtp', 'local_hostname',
1069 default=None,
1072 default=None,
1070 )
1073 )
1071 coreconfigitem('smtp', 'password',
1074 coreconfigitem('smtp', 'password',
1072 default=None,
1075 default=None,
1073 )
1076 )
1074 coreconfigitem('smtp', 'port',
1077 coreconfigitem('smtp', 'port',
1075 default=dynamicdefault,
1078 default=dynamicdefault,
1076 )
1079 )
1077 coreconfigitem('smtp', 'tls',
1080 coreconfigitem('smtp', 'tls',
1078 default='none',
1081 default='none',
1079 )
1082 )
1080 coreconfigitem('smtp', 'username',
1083 coreconfigitem('smtp', 'username',
1081 default=None,
1084 default=None,
1082 )
1085 )
1083 coreconfigitem('sparse', 'missingwarning',
1086 coreconfigitem('sparse', 'missingwarning',
1084 default=True,
1087 default=True,
1085 )
1088 )
1086 coreconfigitem('subrepos', 'allowed',
1089 coreconfigitem('subrepos', 'allowed',
1087 default=dynamicdefault, # to make backporting simpler
1090 default=dynamicdefault, # to make backporting simpler
1088 )
1091 )
1089 coreconfigitem('subrepos', 'hg:allowed',
1092 coreconfigitem('subrepos', 'hg:allowed',
1090 default=dynamicdefault,
1093 default=dynamicdefault,
1091 )
1094 )
1092 coreconfigitem('subrepos', 'git:allowed',
1095 coreconfigitem('subrepos', 'git:allowed',
1093 default=dynamicdefault,
1096 default=dynamicdefault,
1094 )
1097 )
1095 coreconfigitem('subrepos', 'svn:allowed',
1098 coreconfigitem('subrepos', 'svn:allowed',
1096 default=dynamicdefault,
1099 default=dynamicdefault,
1097 )
1100 )
1098 coreconfigitem('templates', '.*',
1101 coreconfigitem('templates', '.*',
1099 default=None,
1102 default=None,
1100 generic=True,
1103 generic=True,
1101 )
1104 )
1102 coreconfigitem('templateconfig', '.*',
1105 coreconfigitem('templateconfig', '.*',
1103 default=dynamicdefault,
1106 default=dynamicdefault,
1104 generic=True,
1107 generic=True,
1105 )
1108 )
1106 coreconfigitem('trusted', 'groups',
1109 coreconfigitem('trusted', 'groups',
1107 default=list,
1110 default=list,
1108 )
1111 )
1109 coreconfigitem('trusted', 'users',
1112 coreconfigitem('trusted', 'users',
1110 default=list,
1113 default=list,
1111 )
1114 )
1112 coreconfigitem('ui', '_usedassubrepo',
1115 coreconfigitem('ui', '_usedassubrepo',
1113 default=False,
1116 default=False,
1114 )
1117 )
1115 coreconfigitem('ui', 'allowemptycommit',
1118 coreconfigitem('ui', 'allowemptycommit',
1116 default=False,
1119 default=False,
1117 )
1120 )
1118 coreconfigitem('ui', 'archivemeta',
1121 coreconfigitem('ui', 'archivemeta',
1119 default=True,
1122 default=True,
1120 )
1123 )
1121 coreconfigitem('ui', 'askusername',
1124 coreconfigitem('ui', 'askusername',
1122 default=False,
1125 default=False,
1123 )
1126 )
1124 coreconfigitem('ui', 'clonebundlefallback',
1127 coreconfigitem('ui', 'clonebundlefallback',
1125 default=False,
1128 default=False,
1126 )
1129 )
1127 coreconfigitem('ui', 'clonebundleprefers',
1130 coreconfigitem('ui', 'clonebundleprefers',
1128 default=list,
1131 default=list,
1129 )
1132 )
1130 coreconfigitem('ui', 'clonebundles',
1133 coreconfigitem('ui', 'clonebundles',
1131 default=True,
1134 default=True,
1132 )
1135 )
1133 coreconfigitem('ui', 'color',
1136 coreconfigitem('ui', 'color',
1134 default='auto',
1137 default='auto',
1135 )
1138 )
1136 coreconfigitem('ui', 'commitsubrepos',
1139 coreconfigitem('ui', 'commitsubrepos',
1137 default=False,
1140 default=False,
1138 )
1141 )
1139 coreconfigitem('ui', 'debug',
1142 coreconfigitem('ui', 'debug',
1140 default=False,
1143 default=False,
1141 )
1144 )
1142 coreconfigitem('ui', 'debugger',
1145 coreconfigitem('ui', 'debugger',
1143 default=None,
1146 default=None,
1144 )
1147 )
1145 coreconfigitem('ui', 'editor',
1148 coreconfigitem('ui', 'editor',
1146 default=dynamicdefault,
1149 default=dynamicdefault,
1147 )
1150 )
1148 coreconfigitem('ui', 'fallbackencoding',
1151 coreconfigitem('ui', 'fallbackencoding',
1149 default=None,
1152 default=None,
1150 )
1153 )
1151 coreconfigitem('ui', 'forcecwd',
1154 coreconfigitem('ui', 'forcecwd',
1152 default=None,
1155 default=None,
1153 )
1156 )
1154 coreconfigitem('ui', 'forcemerge',
1157 coreconfigitem('ui', 'forcemerge',
1155 default=None,
1158 default=None,
1156 )
1159 )
1157 coreconfigitem('ui', 'formatdebug',
1160 coreconfigitem('ui', 'formatdebug',
1158 default=False,
1161 default=False,
1159 )
1162 )
1160 coreconfigitem('ui', 'formatjson',
1163 coreconfigitem('ui', 'formatjson',
1161 default=False,
1164 default=False,
1162 )
1165 )
1163 coreconfigitem('ui', 'formatted',
1166 coreconfigitem('ui', 'formatted',
1164 default=None,
1167 default=None,
1165 )
1168 )
1166 coreconfigitem('ui', 'graphnodetemplate',
1169 coreconfigitem('ui', 'graphnodetemplate',
1167 default=None,
1170 default=None,
1168 )
1171 )
1169 coreconfigitem('ui', 'interactive',
1172 coreconfigitem('ui', 'interactive',
1170 default=None,
1173 default=None,
1171 )
1174 )
1172 coreconfigitem('ui', 'interface',
1175 coreconfigitem('ui', 'interface',
1173 default=None,
1176 default=None,
1174 )
1177 )
1175 coreconfigitem('ui', 'interface.chunkselector',
1178 coreconfigitem('ui', 'interface.chunkselector',
1176 default=None,
1179 default=None,
1177 )
1180 )
1178 coreconfigitem('ui', 'large-file-limit',
1181 coreconfigitem('ui', 'large-file-limit',
1179 default=10000000,
1182 default=10000000,
1180 )
1183 )
1181 coreconfigitem('ui', 'logblockedtimes',
1184 coreconfigitem('ui', 'logblockedtimes',
1182 default=False,
1185 default=False,
1183 )
1186 )
1184 coreconfigitem('ui', 'logtemplate',
1187 coreconfigitem('ui', 'logtemplate',
1185 default=None,
1188 default=None,
1186 )
1189 )
1187 coreconfigitem('ui', 'merge',
1190 coreconfigitem('ui', 'merge',
1188 default=None,
1191 default=None,
1189 )
1192 )
1190 coreconfigitem('ui', 'mergemarkers',
1193 coreconfigitem('ui', 'mergemarkers',
1191 default='basic',
1194 default='basic',
1192 )
1195 )
1193 coreconfigitem('ui', 'mergemarkertemplate',
1196 coreconfigitem('ui', 'mergemarkertemplate',
1194 default=('{node|short} '
1197 default=('{node|short} '
1195 '{ifeq(tags, "tip", "", '
1198 '{ifeq(tags, "tip", "", '
1196 'ifeq(tags, "", "", "{tags} "))}'
1199 'ifeq(tags, "", "", "{tags} "))}'
1197 '{if(bookmarks, "{bookmarks} ")}'
1200 '{if(bookmarks, "{bookmarks} ")}'
1198 '{ifeq(branch, "default", "", "{branch} ")}'
1201 '{ifeq(branch, "default", "", "{branch} ")}'
1199 '- {author|user}: {desc|firstline}')
1202 '- {author|user}: {desc|firstline}')
1200 )
1203 )
1201 coreconfigitem('ui', 'message-output',
1204 coreconfigitem('ui', 'message-output',
1202 default='stdio',
1205 default='stdio',
1203 )
1206 )
1204 coreconfigitem('ui', 'nontty',
1207 coreconfigitem('ui', 'nontty',
1205 default=False,
1208 default=False,
1206 )
1209 )
1207 coreconfigitem('ui', 'origbackuppath',
1210 coreconfigitem('ui', 'origbackuppath',
1208 default=None,
1211 default=None,
1209 )
1212 )
1210 coreconfigitem('ui', 'paginate',
1213 coreconfigitem('ui', 'paginate',
1211 default=True,
1214 default=True,
1212 )
1215 )
1213 coreconfigitem('ui', 'patch',
1216 coreconfigitem('ui', 'patch',
1214 default=None,
1217 default=None,
1215 )
1218 )
1216 coreconfigitem('ui', 'pre-merge-tool-output-template',
1219 coreconfigitem('ui', 'pre-merge-tool-output-template',
1217 default=None,
1220 default=None,
1218 )
1221 )
1219 coreconfigitem('ui', 'portablefilenames',
1222 coreconfigitem('ui', 'portablefilenames',
1220 default='warn',
1223 default='warn',
1221 )
1224 )
1222 coreconfigitem('ui', 'promptecho',
1225 coreconfigitem('ui', 'promptecho',
1223 default=False,
1226 default=False,
1224 )
1227 )
1225 coreconfigitem('ui', 'quiet',
1228 coreconfigitem('ui', 'quiet',
1226 default=False,
1229 default=False,
1227 )
1230 )
1228 coreconfigitem('ui', 'quietbookmarkmove',
1231 coreconfigitem('ui', 'quietbookmarkmove',
1229 default=False,
1232 default=False,
1230 )
1233 )
1231 coreconfigitem('ui', 'relative-paths',
1234 coreconfigitem('ui', 'relative-paths',
1232 default='legacy',
1235 default='legacy',
1233 )
1236 )
1234 coreconfigitem('ui', 'remotecmd',
1237 coreconfigitem('ui', 'remotecmd',
1235 default='hg',
1238 default='hg',
1236 )
1239 )
1237 coreconfigitem('ui', 'report_untrusted',
1240 coreconfigitem('ui', 'report_untrusted',
1238 default=True,
1241 default=True,
1239 )
1242 )
1240 coreconfigitem('ui', 'rollback',
1243 coreconfigitem('ui', 'rollback',
1241 default=True,
1244 default=True,
1242 )
1245 )
1243 coreconfigitem('ui', 'signal-safe-lock',
1246 coreconfigitem('ui', 'signal-safe-lock',
1244 default=True,
1247 default=True,
1245 )
1248 )
1246 coreconfigitem('ui', 'slash',
1249 coreconfigitem('ui', 'slash',
1247 default=False,
1250 default=False,
1248 )
1251 )
1249 coreconfigitem('ui', 'ssh',
1252 coreconfigitem('ui', 'ssh',
1250 default='ssh',
1253 default='ssh',
1251 )
1254 )
1252 coreconfigitem('ui', 'ssherrorhint',
1255 coreconfigitem('ui', 'ssherrorhint',
1253 default=None,
1256 default=None,
1254 )
1257 )
1255 coreconfigitem('ui', 'statuscopies',
1258 coreconfigitem('ui', 'statuscopies',
1256 default=False,
1259 default=False,
1257 )
1260 )
1258 coreconfigitem('ui', 'strict',
1261 coreconfigitem('ui', 'strict',
1259 default=False,
1262 default=False,
1260 )
1263 )
1261 coreconfigitem('ui', 'style',
1264 coreconfigitem('ui', 'style',
1262 default='',
1265 default='',
1263 )
1266 )
1264 coreconfigitem('ui', 'supportcontact',
1267 coreconfigitem('ui', 'supportcontact',
1265 default=None,
1268 default=None,
1266 )
1269 )
1267 coreconfigitem('ui', 'textwidth',
1270 coreconfigitem('ui', 'textwidth',
1268 default=78,
1271 default=78,
1269 )
1272 )
1270 coreconfigitem('ui', 'timeout',
1273 coreconfigitem('ui', 'timeout',
1271 default='600',
1274 default='600',
1272 )
1275 )
1273 coreconfigitem('ui', 'timeout.warn',
1276 coreconfigitem('ui', 'timeout.warn',
1274 default=0,
1277 default=0,
1275 )
1278 )
1276 coreconfigitem('ui', 'traceback',
1279 coreconfigitem('ui', 'traceback',
1277 default=False,
1280 default=False,
1278 )
1281 )
1279 coreconfigitem('ui', 'tweakdefaults',
1282 coreconfigitem('ui', 'tweakdefaults',
1280 default=False,
1283 default=False,
1281 )
1284 )
1282 coreconfigitem('ui', 'username',
1285 coreconfigitem('ui', 'username',
1283 alias=[('ui', 'user')]
1286 alias=[('ui', 'user')]
1284 )
1287 )
1285 coreconfigitem('ui', 'verbose',
1288 coreconfigitem('ui', 'verbose',
1286 default=False,
1289 default=False,
1287 )
1290 )
1288 coreconfigitem('verify', 'skipflags',
1291 coreconfigitem('verify', 'skipflags',
1289 default=None,
1292 default=None,
1290 )
1293 )
1291 coreconfigitem('web', 'allowbz2',
1294 coreconfigitem('web', 'allowbz2',
1292 default=False,
1295 default=False,
1293 )
1296 )
1294 coreconfigitem('web', 'allowgz',
1297 coreconfigitem('web', 'allowgz',
1295 default=False,
1298 default=False,
1296 )
1299 )
1297 coreconfigitem('web', 'allow-pull',
1300 coreconfigitem('web', 'allow-pull',
1298 alias=[('web', 'allowpull')],
1301 alias=[('web', 'allowpull')],
1299 default=True,
1302 default=True,
1300 )
1303 )
1301 coreconfigitem('web', 'allow-push',
1304 coreconfigitem('web', 'allow-push',
1302 alias=[('web', 'allow_push')],
1305 alias=[('web', 'allow_push')],
1303 default=list,
1306 default=list,
1304 )
1307 )
1305 coreconfigitem('web', 'allowzip',
1308 coreconfigitem('web', 'allowzip',
1306 default=False,
1309 default=False,
1307 )
1310 )
1308 coreconfigitem('web', 'archivesubrepos',
1311 coreconfigitem('web', 'archivesubrepos',
1309 default=False,
1312 default=False,
1310 )
1313 )
1311 coreconfigitem('web', 'cache',
1314 coreconfigitem('web', 'cache',
1312 default=True,
1315 default=True,
1313 )
1316 )
1314 coreconfigitem('web', 'comparisoncontext',
1317 coreconfigitem('web', 'comparisoncontext',
1315 default=5,
1318 default=5,
1316 )
1319 )
1317 coreconfigitem('web', 'contact',
1320 coreconfigitem('web', 'contact',
1318 default=None,
1321 default=None,
1319 )
1322 )
1320 coreconfigitem('web', 'deny_push',
1323 coreconfigitem('web', 'deny_push',
1321 default=list,
1324 default=list,
1322 )
1325 )
1323 coreconfigitem('web', 'guessmime',
1326 coreconfigitem('web', 'guessmime',
1324 default=False,
1327 default=False,
1325 )
1328 )
1326 coreconfigitem('web', 'hidden',
1329 coreconfigitem('web', 'hidden',
1327 default=False,
1330 default=False,
1328 )
1331 )
1329 coreconfigitem('web', 'labels',
1332 coreconfigitem('web', 'labels',
1330 default=list,
1333 default=list,
1331 )
1334 )
1332 coreconfigitem('web', 'logoimg',
1335 coreconfigitem('web', 'logoimg',
1333 default='hglogo.png',
1336 default='hglogo.png',
1334 )
1337 )
1335 coreconfigitem('web', 'logourl',
1338 coreconfigitem('web', 'logourl',
1336 default='https://mercurial-scm.org/',
1339 default='https://mercurial-scm.org/',
1337 )
1340 )
1338 coreconfigitem('web', 'accesslog',
1341 coreconfigitem('web', 'accesslog',
1339 default='-',
1342 default='-',
1340 )
1343 )
1341 coreconfigitem('web', 'address',
1344 coreconfigitem('web', 'address',
1342 default='',
1345 default='',
1343 )
1346 )
1344 coreconfigitem('web', 'allow-archive',
1347 coreconfigitem('web', 'allow-archive',
1345 alias=[('web', 'allow_archive')],
1348 alias=[('web', 'allow_archive')],
1346 default=list,
1349 default=list,
1347 )
1350 )
1348 coreconfigitem('web', 'allow_read',
1351 coreconfigitem('web', 'allow_read',
1349 default=list,
1352 default=list,
1350 )
1353 )
1351 coreconfigitem('web', 'baseurl',
1354 coreconfigitem('web', 'baseurl',
1352 default=None,
1355 default=None,
1353 )
1356 )
1354 coreconfigitem('web', 'cacerts',
1357 coreconfigitem('web', 'cacerts',
1355 default=None,
1358 default=None,
1356 )
1359 )
1357 coreconfigitem('web', 'certificate',
1360 coreconfigitem('web', 'certificate',
1358 default=None,
1361 default=None,
1359 )
1362 )
1360 coreconfigitem('web', 'collapse',
1363 coreconfigitem('web', 'collapse',
1361 default=False,
1364 default=False,
1362 )
1365 )
1363 coreconfigitem('web', 'csp',
1366 coreconfigitem('web', 'csp',
1364 default=None,
1367 default=None,
1365 )
1368 )
1366 coreconfigitem('web', 'deny_read',
1369 coreconfigitem('web', 'deny_read',
1367 default=list,
1370 default=list,
1368 )
1371 )
1369 coreconfigitem('web', 'descend',
1372 coreconfigitem('web', 'descend',
1370 default=True,
1373 default=True,
1371 )
1374 )
1372 coreconfigitem('web', 'description',
1375 coreconfigitem('web', 'description',
1373 default="",
1376 default="",
1374 )
1377 )
1375 coreconfigitem('web', 'encoding',
1378 coreconfigitem('web', 'encoding',
1376 default=lambda: encoding.encoding,
1379 default=lambda: encoding.encoding,
1377 )
1380 )
1378 coreconfigitem('web', 'errorlog',
1381 coreconfigitem('web', 'errorlog',
1379 default='-',
1382 default='-',
1380 )
1383 )
1381 coreconfigitem('web', 'ipv6',
1384 coreconfigitem('web', 'ipv6',
1382 default=False,
1385 default=False,
1383 )
1386 )
1384 coreconfigitem('web', 'maxchanges',
1387 coreconfigitem('web', 'maxchanges',
1385 default=10,
1388 default=10,
1386 )
1389 )
1387 coreconfigitem('web', 'maxfiles',
1390 coreconfigitem('web', 'maxfiles',
1388 default=10,
1391 default=10,
1389 )
1392 )
1390 coreconfigitem('web', 'maxshortchanges',
1393 coreconfigitem('web', 'maxshortchanges',
1391 default=60,
1394 default=60,
1392 )
1395 )
1393 coreconfigitem('web', 'motd',
1396 coreconfigitem('web', 'motd',
1394 default='',
1397 default='',
1395 )
1398 )
1396 coreconfigitem('web', 'name',
1399 coreconfigitem('web', 'name',
1397 default=dynamicdefault,
1400 default=dynamicdefault,
1398 )
1401 )
1399 coreconfigitem('web', 'port',
1402 coreconfigitem('web', 'port',
1400 default=8000,
1403 default=8000,
1401 )
1404 )
1402 coreconfigitem('web', 'prefix',
1405 coreconfigitem('web', 'prefix',
1403 default='',
1406 default='',
1404 )
1407 )
1405 coreconfigitem('web', 'push_ssl',
1408 coreconfigitem('web', 'push_ssl',
1406 default=True,
1409 default=True,
1407 )
1410 )
1408 coreconfigitem('web', 'refreshinterval',
1411 coreconfigitem('web', 'refreshinterval',
1409 default=20,
1412 default=20,
1410 )
1413 )
1411 coreconfigitem('web', 'server-header',
1414 coreconfigitem('web', 'server-header',
1412 default=None,
1415 default=None,
1413 )
1416 )
1414 coreconfigitem('web', 'static',
1417 coreconfigitem('web', 'static',
1415 default=None,
1418 default=None,
1416 )
1419 )
1417 coreconfigitem('web', 'staticurl',
1420 coreconfigitem('web', 'staticurl',
1418 default=None,
1421 default=None,
1419 )
1422 )
1420 coreconfigitem('web', 'stripes',
1423 coreconfigitem('web', 'stripes',
1421 default=1,
1424 default=1,
1422 )
1425 )
1423 coreconfigitem('web', 'style',
1426 coreconfigitem('web', 'style',
1424 default='paper',
1427 default='paper',
1425 )
1428 )
1426 coreconfigitem('web', 'templates',
1429 coreconfigitem('web', 'templates',
1427 default=None,
1430 default=None,
1428 )
1431 )
1429 coreconfigitem('web', 'view',
1432 coreconfigitem('web', 'view',
1430 default='served',
1433 default='served',
1431 )
1434 )
1432 coreconfigitem('worker', 'backgroundclose',
1435 coreconfigitem('worker', 'backgroundclose',
1433 default=dynamicdefault,
1436 default=dynamicdefault,
1434 )
1437 )
1435 # Windows defaults to a limit of 512 open files. A buffer of 128
1438 # Windows defaults to a limit of 512 open files. A buffer of 128
1436 # should give us enough headway.
1439 # should give us enough headway.
1437 coreconfigitem('worker', 'backgroundclosemaxqueue',
1440 coreconfigitem('worker', 'backgroundclosemaxqueue',
1438 default=384,
1441 default=384,
1439 )
1442 )
1440 coreconfigitem('worker', 'backgroundcloseminfilecount',
1443 coreconfigitem('worker', 'backgroundcloseminfilecount',
1441 default=2048,
1444 default=2048,
1442 )
1445 )
1443 coreconfigitem('worker', 'backgroundclosethreadcount',
1446 coreconfigitem('worker', 'backgroundclosethreadcount',
1444 default=4,
1447 default=4,
1445 )
1448 )
1446 coreconfigitem('worker', 'enabled',
1449 coreconfigitem('worker', 'enabled',
1447 default=True,
1450 default=True,
1448 )
1451 )
1449 coreconfigitem('worker', 'numcpus',
1452 coreconfigitem('worker', 'numcpus',
1450 default=None,
1453 default=None,
1451 )
1454 )
1452
1455
1453 # Rebase related configuration moved to core because other extension are doing
1456 # Rebase related configuration moved to core because other extension are doing
1454 # strange things. For example, shelve import the extensions to reuse some bit
1457 # strange things. For example, shelve import the extensions to reuse some bit
1455 # without formally loading it.
1458 # without formally loading it.
1456 coreconfigitem('commands', 'rebase.requiredest',
1459 coreconfigitem('commands', 'rebase.requiredest',
1457 default=False,
1460 default=False,
1458 )
1461 )
1459 coreconfigitem('experimental', 'rebaseskipobsolete',
1462 coreconfigitem('experimental', 'rebaseskipobsolete',
1460 default=True,
1463 default=True,
1461 )
1464 )
1462 coreconfigitem('rebase', 'singletransaction',
1465 coreconfigitem('rebase', 'singletransaction',
1463 default=False,
1466 default=False,
1464 )
1467 )
1465 coreconfigitem('rebase', 'experimental.inmemory',
1468 coreconfigitem('rebase', 'experimental.inmemory',
1466 default=False,
1469 default=False,
1467 )
1470 )
@@ -1,2823 +1,2828 b''
1 The Mercurial system uses a set of configuration files to control
1 The Mercurial system uses a set of configuration files to control
2 aspects of its behavior.
2 aspects of its behavior.
3
3
4 Troubleshooting
4 Troubleshooting
5 ===============
5 ===============
6
6
7 If you're having problems with your configuration,
7 If you're having problems with your configuration,
8 :hg:`config --debug` can help you understand what is introducing
8 :hg:`config --debug` can help you understand what is introducing
9 a setting into your environment.
9 a setting into your environment.
10
10
11 See :hg:`help config.syntax` and :hg:`help config.files`
11 See :hg:`help config.syntax` and :hg:`help config.files`
12 for information about how and where to override things.
12 for information about how and where to override things.
13
13
14 Structure
14 Structure
15 =========
15 =========
16
16
17 The configuration files use a simple ini-file format. A configuration
17 The configuration files use a simple ini-file format. A configuration
18 file consists of sections, led by a ``[section]`` header and followed
18 file consists of sections, led by a ``[section]`` header and followed
19 by ``name = value`` entries::
19 by ``name = value`` entries::
20
20
21 [ui]
21 [ui]
22 username = Firstname Lastname <firstname.lastname@example.net>
22 username = Firstname Lastname <firstname.lastname@example.net>
23 verbose = True
23 verbose = True
24
24
25 The above entries will be referred to as ``ui.username`` and
25 The above entries will be referred to as ``ui.username`` and
26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
27
27
28 Files
28 Files
29 =====
29 =====
30
30
31 Mercurial reads configuration data from several files, if they exist.
31 Mercurial reads configuration data from several files, if they exist.
32 These files do not exist by default and you will have to create the
32 These files do not exist by default and you will have to create the
33 appropriate configuration files yourself:
33 appropriate configuration files yourself:
34
34
35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
36
36
37 Global configuration like the username setting is typically put into:
37 Global configuration like the username setting is typically put into:
38
38
39 .. container:: windows
39 .. container:: windows
40
40
41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
42
42
43 .. container:: unix.plan9
43 .. container:: unix.plan9
44
44
45 - ``$HOME/.hgrc`` (on Unix, Plan9)
45 - ``$HOME/.hgrc`` (on Unix, Plan9)
46
46
47 The names of these files depend on the system on which Mercurial is
47 The names of these files depend on the system on which Mercurial is
48 installed. ``*.rc`` files from a single directory are read in
48 installed. ``*.rc`` files from a single directory are read in
49 alphabetical order, later ones overriding earlier ones. Where multiple
49 alphabetical order, later ones overriding earlier ones. Where multiple
50 paths are given below, settings from earlier paths override later
50 paths are given below, settings from earlier paths override later
51 ones.
51 ones.
52
52
53 .. container:: verbose.unix
53 .. container:: verbose.unix
54
54
55 On Unix, the following files are consulted:
55 On Unix, the following files are consulted:
56
56
57 - ``<repo>/.hg/hgrc`` (per-repository)
57 - ``<repo>/.hg/hgrc`` (per-repository)
58 - ``$HOME/.hgrc`` (per-user)
58 - ``$HOME/.hgrc`` (per-user)
59 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
59 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
60 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
60 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
61 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
61 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
62 - ``/etc/mercurial/hgrc`` (per-system)
62 - ``/etc/mercurial/hgrc`` (per-system)
63 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
63 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
64 - ``<internal>/default.d/*.rc`` (defaults)
64 - ``<internal>/default.d/*.rc`` (defaults)
65
65
66 .. container:: verbose.windows
66 .. container:: verbose.windows
67
67
68 On Windows, the following files are consulted:
68 On Windows, the following files are consulted:
69
69
70 - ``<repo>/.hg/hgrc`` (per-repository)
70 - ``<repo>/.hg/hgrc`` (per-repository)
71 - ``%USERPROFILE%\.hgrc`` (per-user)
71 - ``%USERPROFILE%\.hgrc`` (per-user)
72 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
72 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
73 - ``%HOME%\.hgrc`` (per-user)
73 - ``%HOME%\.hgrc`` (per-user)
74 - ``%HOME%\Mercurial.ini`` (per-user)
74 - ``%HOME%\Mercurial.ini`` (per-user)
75 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-installation)
75 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-installation)
76 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
76 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
77 - ``<install-dir>\Mercurial.ini`` (per-installation)
77 - ``<install-dir>\Mercurial.ini`` (per-installation)
78 - ``<internal>/default.d/*.rc`` (defaults)
78 - ``<internal>/default.d/*.rc`` (defaults)
79
79
80 .. note::
80 .. note::
81
81
82 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
82 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
83 is used when running 32-bit Python on 64-bit Windows.
83 is used when running 32-bit Python on 64-bit Windows.
84
84
85 .. container:: windows
85 .. container:: windows
86
86
87 On Windows 9x, ``%HOME%`` is replaced by ``%APPDATA%``.
87 On Windows 9x, ``%HOME%`` is replaced by ``%APPDATA%``.
88
88
89 .. container:: verbose.plan9
89 .. container:: verbose.plan9
90
90
91 On Plan9, the following files are consulted:
91 On Plan9, the following files are consulted:
92
92
93 - ``<repo>/.hg/hgrc`` (per-repository)
93 - ``<repo>/.hg/hgrc`` (per-repository)
94 - ``$home/lib/hgrc`` (per-user)
94 - ``$home/lib/hgrc`` (per-user)
95 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
95 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
96 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
96 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
97 - ``/lib/mercurial/hgrc`` (per-system)
97 - ``/lib/mercurial/hgrc`` (per-system)
98 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
98 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
99 - ``<internal>/default.d/*.rc`` (defaults)
99 - ``<internal>/default.d/*.rc`` (defaults)
100
100
101 Per-repository configuration options only apply in a
101 Per-repository configuration options only apply in a
102 particular repository. This file is not version-controlled, and
102 particular repository. This file is not version-controlled, and
103 will not get transferred during a "clone" operation. Options in
103 will not get transferred during a "clone" operation. Options in
104 this file override options in all other configuration files.
104 this file override options in all other configuration files.
105
105
106 .. container:: unix.plan9
106 .. container:: unix.plan9
107
107
108 On Plan 9 and Unix, most of this file will be ignored if it doesn't
108 On Plan 9 and Unix, most of this file will be ignored if it doesn't
109 belong to a trusted user or to a trusted group. See
109 belong to a trusted user or to a trusted group. See
110 :hg:`help config.trusted` for more details.
110 :hg:`help config.trusted` for more details.
111
111
112 Per-user configuration file(s) are for the user running Mercurial. Options
112 Per-user configuration file(s) are for the user running Mercurial. Options
113 in these files apply to all Mercurial commands executed by this user in any
113 in these files apply to all Mercurial commands executed by this user in any
114 directory. Options in these files override per-system and per-installation
114 directory. Options in these files override per-system and per-installation
115 options.
115 options.
116
116
117 Per-installation configuration files are searched for in the
117 Per-installation configuration files are searched for in the
118 directory where Mercurial is installed. ``<install-root>`` is the
118 directory where Mercurial is installed. ``<install-root>`` is the
119 parent directory of the **hg** executable (or symlink) being run.
119 parent directory of the **hg** executable (or symlink) being run.
120
120
121 .. container:: unix.plan9
121 .. container:: unix.plan9
122
122
123 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
123 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
124 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
124 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
125 files apply to all Mercurial commands executed by any user in any
125 files apply to all Mercurial commands executed by any user in any
126 directory.
126 directory.
127
127
128 Per-installation configuration files are for the system on
128 Per-installation configuration files are for the system on
129 which Mercurial is running. Options in these files apply to all
129 which Mercurial is running. Options in these files apply to all
130 Mercurial commands executed by any user in any directory. Registry
130 Mercurial commands executed by any user in any directory. Registry
131 keys contain PATH-like strings, every part of which must reference
131 keys contain PATH-like strings, every part of which must reference
132 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
132 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
133 be read. Mercurial checks each of these locations in the specified
133 be read. Mercurial checks each of these locations in the specified
134 order until one or more configuration files are detected.
134 order until one or more configuration files are detected.
135
135
136 Per-system configuration files are for the system on which Mercurial
136 Per-system configuration files are for the system on which Mercurial
137 is running. Options in these files apply to all Mercurial commands
137 is running. Options in these files apply to all Mercurial commands
138 executed by any user in any directory. Options in these files
138 executed by any user in any directory. Options in these files
139 override per-installation options.
139 override per-installation options.
140
140
141 Mercurial comes with some default configuration. The default configuration
141 Mercurial comes with some default configuration. The default configuration
142 files are installed with Mercurial and will be overwritten on upgrades. Default
142 files are installed with Mercurial and will be overwritten on upgrades. Default
143 configuration files should never be edited by users or administrators but can
143 configuration files should never be edited by users or administrators but can
144 be overridden in other configuration files. So far the directory only contains
144 be overridden in other configuration files. So far the directory only contains
145 merge tool configuration but packagers can also put other default configuration
145 merge tool configuration but packagers can also put other default configuration
146 there.
146 there.
147
147
148 Syntax
148 Syntax
149 ======
149 ======
150
150
151 A configuration file consists of sections, led by a ``[section]`` header
151 A configuration file consists of sections, led by a ``[section]`` header
152 and followed by ``name = value`` entries (sometimes called
152 and followed by ``name = value`` entries (sometimes called
153 ``configuration keys``)::
153 ``configuration keys``)::
154
154
155 [spam]
155 [spam]
156 eggs=ham
156 eggs=ham
157 green=
157 green=
158 eggs
158 eggs
159
159
160 Each line contains one entry. If the lines that follow are indented,
160 Each line contains one entry. If the lines that follow are indented,
161 they are treated as continuations of that entry. Leading whitespace is
161 they are treated as continuations of that entry. Leading whitespace is
162 removed from values. Empty lines are skipped. Lines beginning with
162 removed from values. Empty lines are skipped. Lines beginning with
163 ``#`` or ``;`` are ignored and may be used to provide comments.
163 ``#`` or ``;`` are ignored and may be used to provide comments.
164
164
165 Configuration keys can be set multiple times, in which case Mercurial
165 Configuration keys can be set multiple times, in which case Mercurial
166 will use the value that was configured last. As an example::
166 will use the value that was configured last. As an example::
167
167
168 [spam]
168 [spam]
169 eggs=large
169 eggs=large
170 ham=serrano
170 ham=serrano
171 eggs=small
171 eggs=small
172
172
173 This would set the configuration key named ``eggs`` to ``small``.
173 This would set the configuration key named ``eggs`` to ``small``.
174
174
175 It is also possible to define a section multiple times. A section can
175 It is also possible to define a section multiple times. A section can
176 be redefined on the same and/or on different configuration files. For
176 be redefined on the same and/or on different configuration files. For
177 example::
177 example::
178
178
179 [foo]
179 [foo]
180 eggs=large
180 eggs=large
181 ham=serrano
181 ham=serrano
182 eggs=small
182 eggs=small
183
183
184 [bar]
184 [bar]
185 eggs=ham
185 eggs=ham
186 green=
186 green=
187 eggs
187 eggs
188
188
189 [foo]
189 [foo]
190 ham=prosciutto
190 ham=prosciutto
191 eggs=medium
191 eggs=medium
192 bread=toasted
192 bread=toasted
193
193
194 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
194 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
195 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
195 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
196 respectively. As you can see there only thing that matters is the last
196 respectively. As you can see there only thing that matters is the last
197 value that was set for each of the configuration keys.
197 value that was set for each of the configuration keys.
198
198
199 If a configuration key is set multiple times in different
199 If a configuration key is set multiple times in different
200 configuration files the final value will depend on the order in which
200 configuration files the final value will depend on the order in which
201 the different configuration files are read, with settings from earlier
201 the different configuration files are read, with settings from earlier
202 paths overriding later ones as described on the ``Files`` section
202 paths overriding later ones as described on the ``Files`` section
203 above.
203 above.
204
204
205 A line of the form ``%include file`` will include ``file`` into the
205 A line of the form ``%include file`` will include ``file`` into the
206 current configuration file. The inclusion is recursive, which means
206 current configuration file. The inclusion is recursive, which means
207 that included files can include other files. Filenames are relative to
207 that included files can include other files. Filenames are relative to
208 the configuration file in which the ``%include`` directive is found.
208 the configuration file in which the ``%include`` directive is found.
209 Environment variables and ``~user`` constructs are expanded in
209 Environment variables and ``~user`` constructs are expanded in
210 ``file``. This lets you do something like::
210 ``file``. This lets you do something like::
211
211
212 %include ~/.hgrc.d/$HOST.rc
212 %include ~/.hgrc.d/$HOST.rc
213
213
214 to include a different configuration file on each computer you use.
214 to include a different configuration file on each computer you use.
215
215
216 A line with ``%unset name`` will remove ``name`` from the current
216 A line with ``%unset name`` will remove ``name`` from the current
217 section, if it has been set previously.
217 section, if it has been set previously.
218
218
219 The values are either free-form text strings, lists of text strings,
219 The values are either free-form text strings, lists of text strings,
220 or Boolean values. Boolean values can be set to true using any of "1",
220 or Boolean values. Boolean values can be set to true using any of "1",
221 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
221 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
222 (all case insensitive).
222 (all case insensitive).
223
223
224 List values are separated by whitespace or comma, except when values are
224 List values are separated by whitespace or comma, except when values are
225 placed in double quotation marks::
225 placed in double quotation marks::
226
226
227 allow_read = "John Doe, PhD", brian, betty
227 allow_read = "John Doe, PhD", brian, betty
228
228
229 Quotation marks can be escaped by prefixing them with a backslash. Only
229 Quotation marks can be escaped by prefixing them with a backslash. Only
230 quotation marks at the beginning of a word is counted as a quotation
230 quotation marks at the beginning of a word is counted as a quotation
231 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
231 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
232
232
233 Sections
233 Sections
234 ========
234 ========
235
235
236 This section describes the different sections that may appear in a
236 This section describes the different sections that may appear in a
237 Mercurial configuration file, the purpose of each section, its possible
237 Mercurial configuration file, the purpose of each section, its possible
238 keys, and their possible values.
238 keys, and their possible values.
239
239
240 ``alias``
240 ``alias``
241 ---------
241 ---------
242
242
243 Defines command aliases.
243 Defines command aliases.
244
244
245 Aliases allow you to define your own commands in terms of other
245 Aliases allow you to define your own commands in terms of other
246 commands (or aliases), optionally including arguments. Positional
246 commands (or aliases), optionally including arguments. Positional
247 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
247 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
248 are expanded by Mercurial before execution. Positional arguments not
248 are expanded by Mercurial before execution. Positional arguments not
249 already used by ``$N`` in the definition are put at the end of the
249 already used by ``$N`` in the definition are put at the end of the
250 command to be executed.
250 command to be executed.
251
251
252 Alias definitions consist of lines of the form::
252 Alias definitions consist of lines of the form::
253
253
254 <alias> = <command> [<argument>]...
254 <alias> = <command> [<argument>]...
255
255
256 For example, this definition::
256 For example, this definition::
257
257
258 latest = log --limit 5
258 latest = log --limit 5
259
259
260 creates a new command ``latest`` that shows only the five most recent
260 creates a new command ``latest`` that shows only the five most recent
261 changesets. You can define subsequent aliases using earlier ones::
261 changesets. You can define subsequent aliases using earlier ones::
262
262
263 stable5 = latest -b stable
263 stable5 = latest -b stable
264
264
265 .. note::
265 .. note::
266
266
267 It is possible to create aliases with the same names as
267 It is possible to create aliases with the same names as
268 existing commands, which will then override the original
268 existing commands, which will then override the original
269 definitions. This is almost always a bad idea!
269 definitions. This is almost always a bad idea!
270
270
271 An alias can start with an exclamation point (``!``) to make it a
271 An alias can start with an exclamation point (``!``) to make it a
272 shell alias. A shell alias is executed with the shell and will let you
272 shell alias. A shell alias is executed with the shell and will let you
273 run arbitrary commands. As an example, ::
273 run arbitrary commands. As an example, ::
274
274
275 echo = !echo $@
275 echo = !echo $@
276
276
277 will let you do ``hg echo foo`` to have ``foo`` printed in your
277 will let you do ``hg echo foo`` to have ``foo`` printed in your
278 terminal. A better example might be::
278 terminal. A better example might be::
279
279
280 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
280 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
281
281
282 which will make ``hg purge`` delete all unknown files in the
282 which will make ``hg purge`` delete all unknown files in the
283 repository in the same manner as the purge extension.
283 repository in the same manner as the purge extension.
284
284
285 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
285 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
286 expand to the command arguments. Unmatched arguments are
286 expand to the command arguments. Unmatched arguments are
287 removed. ``$0`` expands to the alias name and ``$@`` expands to all
287 removed. ``$0`` expands to the alias name and ``$@`` expands to all
288 arguments separated by a space. ``"$@"`` (with quotes) expands to all
288 arguments separated by a space. ``"$@"`` (with quotes) expands to all
289 arguments quoted individually and separated by a space. These expansions
289 arguments quoted individually and separated by a space. These expansions
290 happen before the command is passed to the shell.
290 happen before the command is passed to the shell.
291
291
292 Shell aliases are executed in an environment where ``$HG`` expands to
292 Shell aliases are executed in an environment where ``$HG`` expands to
293 the path of the Mercurial that was used to execute the alias. This is
293 the path of the Mercurial that was used to execute the alias. This is
294 useful when you want to call further Mercurial commands in a shell
294 useful when you want to call further Mercurial commands in a shell
295 alias, as was done above for the purge alias. In addition,
295 alias, as was done above for the purge alias. In addition,
296 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
296 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
297 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
297 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
298
298
299 .. note::
299 .. note::
300
300
301 Some global configuration options such as ``-R`` are
301 Some global configuration options such as ``-R`` are
302 processed before shell aliases and will thus not be passed to
302 processed before shell aliases and will thus not be passed to
303 aliases.
303 aliases.
304
304
305
305
306 ``annotate``
306 ``annotate``
307 ------------
307 ------------
308
308
309 Settings used when displaying file annotations. All values are
309 Settings used when displaying file annotations. All values are
310 Booleans and default to False. See :hg:`help config.diff` for
310 Booleans and default to False. See :hg:`help config.diff` for
311 related options for the diff command.
311 related options for the diff command.
312
312
313 ``ignorews``
313 ``ignorews``
314 Ignore white space when comparing lines.
314 Ignore white space when comparing lines.
315
315
316 ``ignorewseol``
316 ``ignorewseol``
317 Ignore white space at the end of a line when comparing lines.
317 Ignore white space at the end of a line when comparing lines.
318
318
319 ``ignorewsamount``
319 ``ignorewsamount``
320 Ignore changes in the amount of white space.
320 Ignore changes in the amount of white space.
321
321
322 ``ignoreblanklines``
322 ``ignoreblanklines``
323 Ignore changes whose lines are all blank.
323 Ignore changes whose lines are all blank.
324
324
325
325
326 ``auth``
326 ``auth``
327 --------
327 --------
328
328
329 Authentication credentials and other authentication-like configuration
329 Authentication credentials and other authentication-like configuration
330 for HTTP connections. This section allows you to store usernames and
330 for HTTP connections. This section allows you to store usernames and
331 passwords for use when logging *into* HTTP servers. See
331 passwords for use when logging *into* HTTP servers. See
332 :hg:`help config.web` if you want to configure *who* can login to
332 :hg:`help config.web` if you want to configure *who* can login to
333 your HTTP server.
333 your HTTP server.
334
334
335 The following options apply to all hosts.
335 The following options apply to all hosts.
336
336
337 ``cookiefile``
337 ``cookiefile``
338 Path to a file containing HTTP cookie lines. Cookies matching a
338 Path to a file containing HTTP cookie lines. Cookies matching a
339 host will be sent automatically.
339 host will be sent automatically.
340
340
341 The file format uses the Mozilla cookies.txt format, which defines cookies
341 The file format uses the Mozilla cookies.txt format, which defines cookies
342 on their own lines. Each line contains 7 fields delimited by the tab
342 on their own lines. Each line contains 7 fields delimited by the tab
343 character (domain, is_domain_cookie, path, is_secure, expires, name,
343 character (domain, is_domain_cookie, path, is_secure, expires, name,
344 value). For more info, do an Internet search for "Netscape cookies.txt
344 value). For more info, do an Internet search for "Netscape cookies.txt
345 format."
345 format."
346
346
347 Note: the cookies parser does not handle port numbers on domains. You
347 Note: the cookies parser does not handle port numbers on domains. You
348 will need to remove ports from the domain for the cookie to be recognized.
348 will need to remove ports from the domain for the cookie to be recognized.
349 This could result in a cookie being disclosed to an unwanted server.
349 This could result in a cookie being disclosed to an unwanted server.
350
350
351 The cookies file is read-only.
351 The cookies file is read-only.
352
352
353 Other options in this section are grouped by name and have the following
353 Other options in this section are grouped by name and have the following
354 format::
354 format::
355
355
356 <name>.<argument> = <value>
356 <name>.<argument> = <value>
357
357
358 where ``<name>`` is used to group arguments into authentication
358 where ``<name>`` is used to group arguments into authentication
359 entries. Example::
359 entries. Example::
360
360
361 foo.prefix = hg.intevation.de/mercurial
361 foo.prefix = hg.intevation.de/mercurial
362 foo.username = foo
362 foo.username = foo
363 foo.password = bar
363 foo.password = bar
364 foo.schemes = http https
364 foo.schemes = http https
365
365
366 bar.prefix = secure.example.org
366 bar.prefix = secure.example.org
367 bar.key = path/to/file.key
367 bar.key = path/to/file.key
368 bar.cert = path/to/file.cert
368 bar.cert = path/to/file.cert
369 bar.schemes = https
369 bar.schemes = https
370
370
371 Supported arguments:
371 Supported arguments:
372
372
373 ``prefix``
373 ``prefix``
374 Either ``*`` or a URI prefix with or without the scheme part.
374 Either ``*`` or a URI prefix with or without the scheme part.
375 The authentication entry with the longest matching prefix is used
375 The authentication entry with the longest matching prefix is used
376 (where ``*`` matches everything and counts as a match of length
376 (where ``*`` matches everything and counts as a match of length
377 1). If the prefix doesn't include a scheme, the match is performed
377 1). If the prefix doesn't include a scheme, the match is performed
378 against the URI with its scheme stripped as well, and the schemes
378 against the URI with its scheme stripped as well, and the schemes
379 argument, q.v., is then subsequently consulted.
379 argument, q.v., is then subsequently consulted.
380
380
381 ``username``
381 ``username``
382 Optional. Username to authenticate with. If not given, and the
382 Optional. Username to authenticate with. If not given, and the
383 remote site requires basic or digest authentication, the user will
383 remote site requires basic or digest authentication, the user will
384 be prompted for it. Environment variables are expanded in the
384 be prompted for it. Environment variables are expanded in the
385 username letting you do ``foo.username = $USER``. If the URI
385 username letting you do ``foo.username = $USER``. If the URI
386 includes a username, only ``[auth]`` entries with a matching
386 includes a username, only ``[auth]`` entries with a matching
387 username or without a username will be considered.
387 username or without a username will be considered.
388
388
389 ``password``
389 ``password``
390 Optional. Password to authenticate with. If not given, and the
390 Optional. Password to authenticate with. If not given, and the
391 remote site requires basic or digest authentication, the user
391 remote site requires basic or digest authentication, the user
392 will be prompted for it.
392 will be prompted for it.
393
393
394 ``key``
394 ``key``
395 Optional. PEM encoded client certificate key file. Environment
395 Optional. PEM encoded client certificate key file. Environment
396 variables are expanded in the filename.
396 variables are expanded in the filename.
397
397
398 ``cert``
398 ``cert``
399 Optional. PEM encoded client certificate chain file. Environment
399 Optional. PEM encoded client certificate chain file. Environment
400 variables are expanded in the filename.
400 variables are expanded in the filename.
401
401
402 ``schemes``
402 ``schemes``
403 Optional. Space separated list of URI schemes to use this
403 Optional. Space separated list of URI schemes to use this
404 authentication entry with. Only used if the prefix doesn't include
404 authentication entry with. Only used if the prefix doesn't include
405 a scheme. Supported schemes are http and https. They will match
405 a scheme. Supported schemes are http and https. They will match
406 static-http and static-https respectively, as well.
406 static-http and static-https respectively, as well.
407 (default: https)
407 (default: https)
408
408
409 If no suitable authentication entry is found, the user is prompted
409 If no suitable authentication entry is found, the user is prompted
410 for credentials as usual if required by the remote.
410 for credentials as usual if required by the remote.
411
411
412 ``color``
412 ``color``
413 ---------
413 ---------
414
414
415 Configure the Mercurial color mode. For details about how to define your custom
415 Configure the Mercurial color mode. For details about how to define your custom
416 effect and style see :hg:`help color`.
416 effect and style see :hg:`help color`.
417
417
418 ``mode``
418 ``mode``
419 String: control the method used to output color. One of ``auto``, ``ansi``,
419 String: control the method used to output color. One of ``auto``, ``ansi``,
420 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
420 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
421 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
421 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
422 terminal. Any invalid value will disable color.
422 terminal. Any invalid value will disable color.
423
423
424 ``pagermode``
424 ``pagermode``
425 String: optional override of ``color.mode`` used with pager.
425 String: optional override of ``color.mode`` used with pager.
426
426
427 On some systems, terminfo mode may cause problems when using
427 On some systems, terminfo mode may cause problems when using
428 color with ``less -R`` as a pager program. less with the -R option
428 color with ``less -R`` as a pager program. less with the -R option
429 will only display ECMA-48 color codes, and terminfo mode may sometimes
429 will only display ECMA-48 color codes, and terminfo mode may sometimes
430 emit codes that less doesn't understand. You can work around this by
430 emit codes that less doesn't understand. You can work around this by
431 either using ansi mode (or auto mode), or by using less -r (which will
431 either using ansi mode (or auto mode), or by using less -r (which will
432 pass through all terminal control codes, not just color control
432 pass through all terminal control codes, not just color control
433 codes).
433 codes).
434
434
435 On some systems (such as MSYS in Windows), the terminal may support
435 On some systems (such as MSYS in Windows), the terminal may support
436 a different color mode than the pager program.
436 a different color mode than the pager program.
437
437
438 ``commands``
438 ``commands``
439 ------------
439 ------------
440
440
441 ``resolve.confirm``
441 ``resolve.confirm``
442 Confirm before performing action if no filename is passed.
442 Confirm before performing action if no filename is passed.
443 (default: False)
443 (default: False)
444
444
445 ``resolve.explicit-re-merge``
445 ``resolve.explicit-re-merge``
446 Require uses of ``hg resolve`` to specify which action it should perform,
446 Require uses of ``hg resolve`` to specify which action it should perform,
447 instead of re-merging files by default.
447 instead of re-merging files by default.
448 (default: False)
448 (default: False)
449
449
450 ``resolve.mark-check``
450 ``resolve.mark-check``
451 Determines what level of checking :hg:`resolve --mark` will perform before
451 Determines what level of checking :hg:`resolve --mark` will perform before
452 marking files as resolved. Valid values are ``none`, ``warn``, and
452 marking files as resolved. Valid values are ``none`, ``warn``, and
453 ``abort``. ``warn`` will output a warning listing the file(s) that still
453 ``abort``. ``warn`` will output a warning listing the file(s) that still
454 have conflict markers in them, but will still mark everything resolved.
454 have conflict markers in them, but will still mark everything resolved.
455 ``abort`` will output the same warning but will not mark things as resolved.
455 ``abort`` will output the same warning but will not mark things as resolved.
456 If --all is passed and this is set to ``abort``, only a warning will be
456 If --all is passed and this is set to ``abort``, only a warning will be
457 shown (an error will not be raised).
457 shown (an error will not be raised).
458 (default: ``none``)
458 (default: ``none``)
459
459
460 ``status.relative``
460 ``status.relative``
461 Make paths in :hg:`status` output relative to the current directory.
461 Make paths in :hg:`status` output relative to the current directory.
462 (default: False)
462 (default: False)
463
463
464 ``status.terse``
464 ``status.terse``
465 Default value for the --terse flag, which condenses status output.
465 Default value for the --terse flag, which condenses status output.
466 (default: empty)
466 (default: empty)
467
467
468 ``update.check``
468 ``update.check``
469 Determines what level of checking :hg:`update` will perform before moving
469 Determines what level of checking :hg:`update` will perform before moving
470 to a destination revision. Valid values are ``abort``, ``none``,
470 to a destination revision. Valid values are ``abort``, ``none``,
471 ``linear``, and ``noconflict``. ``abort`` always fails if the working
471 ``linear``, and ``noconflict``. ``abort`` always fails if the working
472 directory has uncommitted changes. ``none`` performs no checking, and may
472 directory has uncommitted changes. ``none`` performs no checking, and may
473 result in a merge with uncommitted changes. ``linear`` allows any update
473 result in a merge with uncommitted changes. ``linear`` allows any update
474 as long as it follows a straight line in the revision history, and may
474 as long as it follows a straight line in the revision history, and may
475 trigger a merge with uncommitted changes. ``noconflict`` will allow any
475 trigger a merge with uncommitted changes. ``noconflict`` will allow any
476 update which would not trigger a merge with uncommitted changes, if any
476 update which would not trigger a merge with uncommitted changes, if any
477 are present.
477 are present.
478 (default: ``linear``)
478 (default: ``linear``)
479
479
480 ``update.requiredest``
480 ``update.requiredest``
481 Require that the user pass a destination when running :hg:`update`.
481 Require that the user pass a destination when running :hg:`update`.
482 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
482 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
483 will be disallowed.
483 will be disallowed.
484 (default: False)
484 (default: False)
485
485
486 ``committemplate``
486 ``committemplate``
487 ------------------
487 ------------------
488
488
489 ``changeset``
489 ``changeset``
490 String: configuration in this section is used as the template to
490 String: configuration in this section is used as the template to
491 customize the text shown in the editor when committing.
491 customize the text shown in the editor when committing.
492
492
493 In addition to pre-defined template keywords, commit log specific one
493 In addition to pre-defined template keywords, commit log specific one
494 below can be used for customization:
494 below can be used for customization:
495
495
496 ``extramsg``
496 ``extramsg``
497 String: Extra message (typically 'Leave message empty to abort
497 String: Extra message (typically 'Leave message empty to abort
498 commit.'). This may be changed by some commands or extensions.
498 commit.'). This may be changed by some commands or extensions.
499
499
500 For example, the template configuration below shows as same text as
500 For example, the template configuration below shows as same text as
501 one shown by default::
501 one shown by default::
502
502
503 [committemplate]
503 [committemplate]
504 changeset = {desc}\n\n
504 changeset = {desc}\n\n
505 HG: Enter commit message. Lines beginning with 'HG:' are removed.
505 HG: Enter commit message. Lines beginning with 'HG:' are removed.
506 HG: {extramsg}
506 HG: {extramsg}
507 HG: --
507 HG: --
508 HG: user: {author}\n{ifeq(p2rev, "-1", "",
508 HG: user: {author}\n{ifeq(p2rev, "-1", "",
509 "HG: branch merge\n")
509 "HG: branch merge\n")
510 }HG: branch '{branch}'\n{if(activebookmark,
510 }HG: branch '{branch}'\n{if(activebookmark,
511 "HG: bookmark '{activebookmark}'\n") }{subrepos %
511 "HG: bookmark '{activebookmark}'\n") }{subrepos %
512 "HG: subrepo {subrepo}\n" }{file_adds %
512 "HG: subrepo {subrepo}\n" }{file_adds %
513 "HG: added {file}\n" }{file_mods %
513 "HG: added {file}\n" }{file_mods %
514 "HG: changed {file}\n" }{file_dels %
514 "HG: changed {file}\n" }{file_dels %
515 "HG: removed {file}\n" }{if(files, "",
515 "HG: removed {file}\n" }{if(files, "",
516 "HG: no files changed\n")}
516 "HG: no files changed\n")}
517
517
518 ``diff()``
518 ``diff()``
519 String: show the diff (see :hg:`help templates` for detail)
519 String: show the diff (see :hg:`help templates` for detail)
520
520
521 Sometimes it is helpful to show the diff of the changeset in the editor without
521 Sometimes it is helpful to show the diff of the changeset in the editor without
522 having to prefix 'HG: ' to each line so that highlighting works correctly. For
522 having to prefix 'HG: ' to each line so that highlighting works correctly. For
523 this, Mercurial provides a special string which will ignore everything below
523 this, Mercurial provides a special string which will ignore everything below
524 it::
524 it::
525
525
526 HG: ------------------------ >8 ------------------------
526 HG: ------------------------ >8 ------------------------
527
527
528 For example, the template configuration below will show the diff below the
528 For example, the template configuration below will show the diff below the
529 extra message::
529 extra message::
530
530
531 [committemplate]
531 [committemplate]
532 changeset = {desc}\n\n
532 changeset = {desc}\n\n
533 HG: Enter commit message. Lines beginning with 'HG:' are removed.
533 HG: Enter commit message. Lines beginning with 'HG:' are removed.
534 HG: {extramsg}
534 HG: {extramsg}
535 HG: ------------------------ >8 ------------------------
535 HG: ------------------------ >8 ------------------------
536 HG: Do not touch the line above.
536 HG: Do not touch the line above.
537 HG: Everything below will be removed.
537 HG: Everything below will be removed.
538 {diff()}
538 {diff()}
539
539
540 .. note::
540 .. note::
541
541
542 For some problematic encodings (see :hg:`help win32mbcs` for
542 For some problematic encodings (see :hg:`help win32mbcs` for
543 detail), this customization should be configured carefully, to
543 detail), this customization should be configured carefully, to
544 avoid showing broken characters.
544 avoid showing broken characters.
545
545
546 For example, if a multibyte character ending with backslash (0x5c) is
546 For example, if a multibyte character ending with backslash (0x5c) is
547 followed by the ASCII character 'n' in the customized template,
547 followed by the ASCII character 'n' in the customized template,
548 the sequence of backslash and 'n' is treated as line-feed unexpectedly
548 the sequence of backslash and 'n' is treated as line-feed unexpectedly
549 (and the multibyte character is broken, too).
549 (and the multibyte character is broken, too).
550
550
551 Customized template is used for commands below (``--edit`` may be
551 Customized template is used for commands below (``--edit`` may be
552 required):
552 required):
553
553
554 - :hg:`backout`
554 - :hg:`backout`
555 - :hg:`commit`
555 - :hg:`commit`
556 - :hg:`fetch` (for merge commit only)
556 - :hg:`fetch` (for merge commit only)
557 - :hg:`graft`
557 - :hg:`graft`
558 - :hg:`histedit`
558 - :hg:`histedit`
559 - :hg:`import`
559 - :hg:`import`
560 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
560 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
561 - :hg:`rebase`
561 - :hg:`rebase`
562 - :hg:`shelve`
562 - :hg:`shelve`
563 - :hg:`sign`
563 - :hg:`sign`
564 - :hg:`tag`
564 - :hg:`tag`
565 - :hg:`transplant`
565 - :hg:`transplant`
566
566
567 Configuring items below instead of ``changeset`` allows showing
567 Configuring items below instead of ``changeset`` allows showing
568 customized message only for specific actions, or showing different
568 customized message only for specific actions, or showing different
569 messages for each action.
569 messages for each action.
570
570
571 - ``changeset.backout`` for :hg:`backout`
571 - ``changeset.backout`` for :hg:`backout`
572 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
572 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
573 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
573 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
574 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
574 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
575 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
575 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
576 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
576 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
577 - ``changeset.gpg.sign`` for :hg:`sign`
577 - ``changeset.gpg.sign`` for :hg:`sign`
578 - ``changeset.graft`` for :hg:`graft`
578 - ``changeset.graft`` for :hg:`graft`
579 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
579 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
580 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
580 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
581 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
581 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
582 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
582 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
583 - ``changeset.import.bypass`` for :hg:`import --bypass`
583 - ``changeset.import.bypass`` for :hg:`import --bypass`
584 - ``changeset.import.normal.merge`` for :hg:`import` on merges
584 - ``changeset.import.normal.merge`` for :hg:`import` on merges
585 - ``changeset.import.normal.normal`` for :hg:`import` on other
585 - ``changeset.import.normal.normal`` for :hg:`import` on other
586 - ``changeset.mq.qnew`` for :hg:`qnew`
586 - ``changeset.mq.qnew`` for :hg:`qnew`
587 - ``changeset.mq.qfold`` for :hg:`qfold`
587 - ``changeset.mq.qfold`` for :hg:`qfold`
588 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
588 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
589 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
589 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
590 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
590 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
591 - ``changeset.rebase.normal`` for :hg:`rebase` on other
591 - ``changeset.rebase.normal`` for :hg:`rebase` on other
592 - ``changeset.shelve.shelve`` for :hg:`shelve`
592 - ``changeset.shelve.shelve`` for :hg:`shelve`
593 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
593 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
594 - ``changeset.tag.remove`` for :hg:`tag --remove`
594 - ``changeset.tag.remove`` for :hg:`tag --remove`
595 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
595 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
596 - ``changeset.transplant.normal`` for :hg:`transplant` on other
596 - ``changeset.transplant.normal`` for :hg:`transplant` on other
597
597
598 These dot-separated lists of names are treated as hierarchical ones.
598 These dot-separated lists of names are treated as hierarchical ones.
599 For example, ``changeset.tag.remove`` customizes the commit message
599 For example, ``changeset.tag.remove`` customizes the commit message
600 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
600 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
601 commit message for :hg:`tag` regardless of ``--remove`` option.
601 commit message for :hg:`tag` regardless of ``--remove`` option.
602
602
603 When the external editor is invoked for a commit, the corresponding
603 When the external editor is invoked for a commit, the corresponding
604 dot-separated list of names without the ``changeset.`` prefix
604 dot-separated list of names without the ``changeset.`` prefix
605 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
605 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
606 variable.
606 variable.
607
607
608 In this section, items other than ``changeset`` can be referred from
608 In this section, items other than ``changeset`` can be referred from
609 others. For example, the configuration to list committed files up
609 others. For example, the configuration to list committed files up
610 below can be referred as ``{listupfiles}``::
610 below can be referred as ``{listupfiles}``::
611
611
612 [committemplate]
612 [committemplate]
613 listupfiles = {file_adds %
613 listupfiles = {file_adds %
614 "HG: added {file}\n" }{file_mods %
614 "HG: added {file}\n" }{file_mods %
615 "HG: changed {file}\n" }{file_dels %
615 "HG: changed {file}\n" }{file_dels %
616 "HG: removed {file}\n" }{if(files, "",
616 "HG: removed {file}\n" }{if(files, "",
617 "HG: no files changed\n")}
617 "HG: no files changed\n")}
618
618
619 ``decode/encode``
619 ``decode/encode``
620 -----------------
620 -----------------
621
621
622 Filters for transforming files on checkout/checkin. This would
622 Filters for transforming files on checkout/checkin. This would
623 typically be used for newline processing or other
623 typically be used for newline processing or other
624 localization/canonicalization of files.
624 localization/canonicalization of files.
625
625
626 Filters consist of a filter pattern followed by a filter command.
626 Filters consist of a filter pattern followed by a filter command.
627 Filter patterns are globs by default, rooted at the repository root.
627 Filter patterns are globs by default, rooted at the repository root.
628 For example, to match any file ending in ``.txt`` in the root
628 For example, to match any file ending in ``.txt`` in the root
629 directory only, use the pattern ``*.txt``. To match any file ending
629 directory only, use the pattern ``*.txt``. To match any file ending
630 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
630 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
631 For each file only the first matching filter applies.
631 For each file only the first matching filter applies.
632
632
633 The filter command can start with a specifier, either ``pipe:`` or
633 The filter command can start with a specifier, either ``pipe:`` or
634 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
634 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
635
635
636 A ``pipe:`` command must accept data on stdin and return the transformed
636 A ``pipe:`` command must accept data on stdin and return the transformed
637 data on stdout.
637 data on stdout.
638
638
639 Pipe example::
639 Pipe example::
640
640
641 [encode]
641 [encode]
642 # uncompress gzip files on checkin to improve delta compression
642 # uncompress gzip files on checkin to improve delta compression
643 # note: not necessarily a good idea, just an example
643 # note: not necessarily a good idea, just an example
644 *.gz = pipe: gunzip
644 *.gz = pipe: gunzip
645
645
646 [decode]
646 [decode]
647 # recompress gzip files when writing them to the working dir (we
647 # recompress gzip files when writing them to the working dir (we
648 # can safely omit "pipe:", because it's the default)
648 # can safely omit "pipe:", because it's the default)
649 *.gz = gzip
649 *.gz = gzip
650
650
651 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
651 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
652 with the name of a temporary file that contains the data to be
652 with the name of a temporary file that contains the data to be
653 filtered by the command. The string ``OUTFILE`` is replaced with the name
653 filtered by the command. The string ``OUTFILE`` is replaced with the name
654 of an empty temporary file, where the filtered data must be written by
654 of an empty temporary file, where the filtered data must be written by
655 the command.
655 the command.
656
656
657 .. container:: windows
657 .. container:: windows
658
658
659 .. note::
659 .. note::
660
660
661 The tempfile mechanism is recommended for Windows systems,
661 The tempfile mechanism is recommended for Windows systems,
662 where the standard shell I/O redirection operators often have
662 where the standard shell I/O redirection operators often have
663 strange effects and may corrupt the contents of your files.
663 strange effects and may corrupt the contents of your files.
664
664
665 This filter mechanism is used internally by the ``eol`` extension to
665 This filter mechanism is used internally by the ``eol`` extension to
666 translate line ending characters between Windows (CRLF) and Unix (LF)
666 translate line ending characters between Windows (CRLF) and Unix (LF)
667 format. We suggest you use the ``eol`` extension for convenience.
667 format. We suggest you use the ``eol`` extension for convenience.
668
668
669
669
670 ``defaults``
670 ``defaults``
671 ------------
671 ------------
672
672
673 (defaults are deprecated. Don't use them. Use aliases instead.)
673 (defaults are deprecated. Don't use them. Use aliases instead.)
674
674
675 Use the ``[defaults]`` section to define command defaults, i.e. the
675 Use the ``[defaults]`` section to define command defaults, i.e. the
676 default options/arguments to pass to the specified commands.
676 default options/arguments to pass to the specified commands.
677
677
678 The following example makes :hg:`log` run in verbose mode, and
678 The following example makes :hg:`log` run in verbose mode, and
679 :hg:`status` show only the modified files, by default::
679 :hg:`status` show only the modified files, by default::
680
680
681 [defaults]
681 [defaults]
682 log = -v
682 log = -v
683 status = -m
683 status = -m
684
684
685 The actual commands, instead of their aliases, must be used when
685 The actual commands, instead of their aliases, must be used when
686 defining command defaults. The command defaults will also be applied
686 defining command defaults. The command defaults will also be applied
687 to the aliases of the commands defined.
687 to the aliases of the commands defined.
688
688
689
689
690 ``diff``
690 ``diff``
691 --------
691 --------
692
692
693 Settings used when displaying diffs. Everything except for ``unified``
693 Settings used when displaying diffs. Everything except for ``unified``
694 is a Boolean and defaults to False. See :hg:`help config.annotate`
694 is a Boolean and defaults to False. See :hg:`help config.annotate`
695 for related options for the annotate command.
695 for related options for the annotate command.
696
696
697 ``git``
697 ``git``
698 Use git extended diff format.
698 Use git extended diff format.
699
699
700 ``nobinary``
700 ``nobinary``
701 Omit git binary patches.
701 Omit git binary patches.
702
702
703 ``nodates``
703 ``nodates``
704 Don't include dates in diff headers.
704 Don't include dates in diff headers.
705
705
706 ``noprefix``
706 ``noprefix``
707 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
707 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
708
708
709 ``showfunc``
709 ``showfunc``
710 Show which function each change is in.
710 Show which function each change is in.
711
711
712 ``ignorews``
712 ``ignorews``
713 Ignore white space when comparing lines.
713 Ignore white space when comparing lines.
714
714
715 ``ignorewsamount``
715 ``ignorewsamount``
716 Ignore changes in the amount of white space.
716 Ignore changes in the amount of white space.
717
717
718 ``ignoreblanklines``
718 ``ignoreblanklines``
719 Ignore changes whose lines are all blank.
719 Ignore changes whose lines are all blank.
720
720
721 ``unified``
721 ``unified``
722 Number of lines of context to show.
722 Number of lines of context to show.
723
723
724 ``word-diff``
724 ``word-diff``
725 Highlight changed words.
725 Highlight changed words.
726
726
727 ``email``
727 ``email``
728 ---------
728 ---------
729
729
730 Settings for extensions that send email messages.
730 Settings for extensions that send email messages.
731
731
732 ``from``
732 ``from``
733 Optional. Email address to use in "From" header and SMTP envelope
733 Optional. Email address to use in "From" header and SMTP envelope
734 of outgoing messages.
734 of outgoing messages.
735
735
736 ``to``
736 ``to``
737 Optional. Comma-separated list of recipients' email addresses.
737 Optional. Comma-separated list of recipients' email addresses.
738
738
739 ``cc``
739 ``cc``
740 Optional. Comma-separated list of carbon copy recipients'
740 Optional. Comma-separated list of carbon copy recipients'
741 email addresses.
741 email addresses.
742
742
743 ``bcc``
743 ``bcc``
744 Optional. Comma-separated list of blind carbon copy recipients'
744 Optional. Comma-separated list of blind carbon copy recipients'
745 email addresses.
745 email addresses.
746
746
747 ``method``
747 ``method``
748 Optional. Method to use to send email messages. If value is ``smtp``
748 Optional. Method to use to send email messages. If value is ``smtp``
749 (default), use SMTP (see the ``[smtp]`` section for configuration).
749 (default), use SMTP (see the ``[smtp]`` section for configuration).
750 Otherwise, use as name of program to run that acts like sendmail
750 Otherwise, use as name of program to run that acts like sendmail
751 (takes ``-f`` option for sender, list of recipients on command line,
751 (takes ``-f`` option for sender, list of recipients on command line,
752 message on stdin). Normally, setting this to ``sendmail`` or
752 message on stdin). Normally, setting this to ``sendmail`` or
753 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
753 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
754
754
755 ``charsets``
755 ``charsets``
756 Optional. Comma-separated list of character sets considered
756 Optional. Comma-separated list of character sets considered
757 convenient for recipients. Addresses, headers, and parts not
757 convenient for recipients. Addresses, headers, and parts not
758 containing patches of outgoing messages will be encoded in the
758 containing patches of outgoing messages will be encoded in the
759 first character set to which conversion from local encoding
759 first character set to which conversion from local encoding
760 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
760 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
761 conversion fails, the text in question is sent as is.
761 conversion fails, the text in question is sent as is.
762 (default: '')
762 (default: '')
763
763
764 Order of outgoing email character sets:
764 Order of outgoing email character sets:
765
765
766 1. ``us-ascii``: always first, regardless of settings
766 1. ``us-ascii``: always first, regardless of settings
767 2. ``email.charsets``: in order given by user
767 2. ``email.charsets``: in order given by user
768 3. ``ui.fallbackencoding``: if not in email.charsets
768 3. ``ui.fallbackencoding``: if not in email.charsets
769 4. ``$HGENCODING``: if not in email.charsets
769 4. ``$HGENCODING``: if not in email.charsets
770 5. ``utf-8``: always last, regardless of settings
770 5. ``utf-8``: always last, regardless of settings
771
771
772 Email example::
772 Email example::
773
773
774 [email]
774 [email]
775 from = Joseph User <joe.user@example.com>
775 from = Joseph User <joe.user@example.com>
776 method = /usr/sbin/sendmail
776 method = /usr/sbin/sendmail
777 # charsets for western Europeans
777 # charsets for western Europeans
778 # us-ascii, utf-8 omitted, as they are tried first and last
778 # us-ascii, utf-8 omitted, as they are tried first and last
779 charsets = iso-8859-1, iso-8859-15, windows-1252
779 charsets = iso-8859-1, iso-8859-15, windows-1252
780
780
781
781
782 ``extensions``
782 ``extensions``
783 --------------
783 --------------
784
784
785 Mercurial has an extension mechanism for adding new features. To
785 Mercurial has an extension mechanism for adding new features. To
786 enable an extension, create an entry for it in this section.
786 enable an extension, create an entry for it in this section.
787
787
788 If you know that the extension is already in Python's search path,
788 If you know that the extension is already in Python's search path,
789 you can give the name of the module, followed by ``=``, with nothing
789 you can give the name of the module, followed by ``=``, with nothing
790 after the ``=``.
790 after the ``=``.
791
791
792 Otherwise, give a name that you choose, followed by ``=``, followed by
792 Otherwise, give a name that you choose, followed by ``=``, followed by
793 the path to the ``.py`` file (including the file name extension) that
793 the path to the ``.py`` file (including the file name extension) that
794 defines the extension.
794 defines the extension.
795
795
796 To explicitly disable an extension that is enabled in an hgrc of
796 To explicitly disable an extension that is enabled in an hgrc of
797 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
797 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
798 or ``foo = !`` when path is not supplied.
798 or ``foo = !`` when path is not supplied.
799
799
800 Example for ``~/.hgrc``::
800 Example for ``~/.hgrc``::
801
801
802 [extensions]
802 [extensions]
803 # (the churn extension will get loaded from Mercurial's path)
803 # (the churn extension will get loaded from Mercurial's path)
804 churn =
804 churn =
805 # (this extension will get loaded from the file specified)
805 # (this extension will get loaded from the file specified)
806 myfeature = ~/.hgext/myfeature.py
806 myfeature = ~/.hgext/myfeature.py
807
807
808
808
809 ``format``
809 ``format``
810 ----------
810 ----------
811
811
812 Configuration that controls the repository format. Newer format options are more
812 Configuration that controls the repository format. Newer format options are more
813 powerful but incompatible with some older versions of Mercurial. Format options
813 powerful but incompatible with some older versions of Mercurial. Format options
814 are considered at repository initialization only. You need to make a new clone
814 are considered at repository initialization only. You need to make a new clone
815 for config change to be taken into account.
815 for config change to be taken into account.
816
816
817 For more details about repository format and version compatibility, see
817 For more details about repository format and version compatibility, see
818 https://www.mercurial-scm.org/wiki/MissingRequirement
818 https://www.mercurial-scm.org/wiki/MissingRequirement
819
819
820 ``usegeneraldelta``
820 ``usegeneraldelta``
821 Enable or disable the "generaldelta" repository format which improves
821 Enable or disable the "generaldelta" repository format which improves
822 repository compression by allowing "revlog" to store delta against arbitrary
822 repository compression by allowing "revlog" to store delta against arbitrary
823 revision instead of the previous stored one. This provides significant
823 revision instead of the previous stored one. This provides significant
824 improvement for repositories with branches.
824 improvement for repositories with branches.
825
825
826 Repositories with this on-disk format require Mercurial version 1.9.
826 Repositories with this on-disk format require Mercurial version 1.9.
827
827
828 Enabled by default.
828 Enabled by default.
829
829
830 ``dotencode``
830 ``dotencode``
831 Enable or disable the "dotencode" repository format which enhances
831 Enable or disable the "dotencode" repository format which enhances
832 the "fncache" repository format (which has to be enabled to use
832 the "fncache" repository format (which has to be enabled to use
833 dotencode) to avoid issues with filenames starting with ._ on
833 dotencode) to avoid issues with filenames starting with ._ on
834 Mac OS X and spaces on Windows.
834 Mac OS X and spaces on Windows.
835
835
836 Repositories with this on-disk format require Mercurial version 1.7.
836 Repositories with this on-disk format require Mercurial version 1.7.
837
837
838 Enabled by default.
838 Enabled by default.
839
839
840 ``usefncache``
840 ``usefncache``
841 Enable or disable the "fncache" repository format which enhances
841 Enable or disable the "fncache" repository format which enhances
842 the "store" repository format (which has to be enabled to use
842 the "store" repository format (which has to be enabled to use
843 fncache) to allow longer filenames and avoids using Windows
843 fncache) to allow longer filenames and avoids using Windows
844 reserved names, e.g. "nul".
844 reserved names, e.g. "nul".
845
845
846 Repositories with this on-disk format require Mercurial version 1.1.
846 Repositories with this on-disk format require Mercurial version 1.1.
847
847
848 Enabled by default.
848 Enabled by default.
849
849
850 ``usestore``
850 ``usestore``
851 Enable or disable the "store" repository format which improves
851 Enable or disable the "store" repository format which improves
852 compatibility with systems that fold case or otherwise mangle
852 compatibility with systems that fold case or otherwise mangle
853 filenames. Disabling this option will allow you to store longer filenames
853 filenames. Disabling this option will allow you to store longer filenames
854 in some situations at the expense of compatibility.
854 in some situations at the expense of compatibility.
855
855
856 Repositories with this on-disk format require Mercurial version 0.9.4.
856 Repositories with this on-disk format require Mercurial version 0.9.4.
857
857
858 Enabled by default.
858 Enabled by default.
859
859
860 ``sparse-revlog``
860 ``sparse-revlog``
861 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
861 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
862 delta re-use inside revlog. For very branchy repositories, it results in a
862 delta re-use inside revlog. For very branchy repositories, it results in a
863 smaller store. For repositories with many revisions, it also helps
863 smaller store. For repositories with many revisions, it also helps
864 performance (by using shortened delta chains.)
864 performance (by using shortened delta chains.)
865
865
866 Repositories with this on-disk format require Mercurial version 4.7
866 Repositories with this on-disk format require Mercurial version 4.7
867
867
868 Enabled by default.
868 Enabled by default.
869
869
870 ``graph``
870 ``graph``
871 ---------
871 ---------
872
872
873 Web graph view configuration. This section let you change graph
873 Web graph view configuration. This section let you change graph
874 elements display properties by branches, for instance to make the
874 elements display properties by branches, for instance to make the
875 ``default`` branch stand out.
875 ``default`` branch stand out.
876
876
877 Each line has the following format::
877 Each line has the following format::
878
878
879 <branch>.<argument> = <value>
879 <branch>.<argument> = <value>
880
880
881 where ``<branch>`` is the name of the branch being
881 where ``<branch>`` is the name of the branch being
882 customized. Example::
882 customized. Example::
883
883
884 [graph]
884 [graph]
885 # 2px width
885 # 2px width
886 default.width = 2
886 default.width = 2
887 # red color
887 # red color
888 default.color = FF0000
888 default.color = FF0000
889
889
890 Supported arguments:
890 Supported arguments:
891
891
892 ``width``
892 ``width``
893 Set branch edges width in pixels.
893 Set branch edges width in pixels.
894
894
895 ``color``
895 ``color``
896 Set branch edges color in hexadecimal RGB notation.
896 Set branch edges color in hexadecimal RGB notation.
897
897
898 ``hooks``
898 ``hooks``
899 ---------
899 ---------
900
900
901 Commands or Python functions that get automatically executed by
901 Commands or Python functions that get automatically executed by
902 various actions such as starting or finishing a commit. Multiple
902 various actions such as starting or finishing a commit. Multiple
903 hooks can be run for the same action by appending a suffix to the
903 hooks can be run for the same action by appending a suffix to the
904 action. Overriding a site-wide hook can be done by changing its
904 action. Overriding a site-wide hook can be done by changing its
905 value or setting it to an empty string. Hooks can be prioritized
905 value or setting it to an empty string. Hooks can be prioritized
906 by adding a prefix of ``priority.`` to the hook name on a new line
906 by adding a prefix of ``priority.`` to the hook name on a new line
907 and setting the priority. The default priority is 0.
907 and setting the priority. The default priority is 0.
908
908
909 Example ``.hg/hgrc``::
909 Example ``.hg/hgrc``::
910
910
911 [hooks]
911 [hooks]
912 # update working directory after adding changesets
912 # update working directory after adding changesets
913 changegroup.update = hg update
913 changegroup.update = hg update
914 # do not use the site-wide hook
914 # do not use the site-wide hook
915 incoming =
915 incoming =
916 incoming.email = /my/email/hook
916 incoming.email = /my/email/hook
917 incoming.autobuild = /my/build/hook
917 incoming.autobuild = /my/build/hook
918 # force autobuild hook to run before other incoming hooks
918 # force autobuild hook to run before other incoming hooks
919 priority.incoming.autobuild = 1
919 priority.incoming.autobuild = 1
920
920
921 Most hooks are run with environment variables set that give useful
921 Most hooks are run with environment variables set that give useful
922 additional information. For each hook below, the environment variables
922 additional information. For each hook below, the environment variables
923 it is passed are listed with names in the form ``$HG_foo``. The
923 it is passed are listed with names in the form ``$HG_foo``. The
924 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
924 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
925 They contain the type of hook which triggered the run and the full name
925 They contain the type of hook which triggered the run and the full name
926 of the hook in the config, respectively. In the example above, this will
926 of the hook in the config, respectively. In the example above, this will
927 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
927 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
928
928
929 .. container:: windows
929 .. container:: windows
930
930
931 Some basic Unix syntax can be enabled for portability, including ``$VAR``
931 Some basic Unix syntax can be enabled for portability, including ``$VAR``
932 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
932 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
933 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
933 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
934 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
934 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
935 slash or inside of a strong quote. Strong quotes will be replaced by
935 slash or inside of a strong quote. Strong quotes will be replaced by
936 double quotes after processing.
936 double quotes after processing.
937
937
938 This feature is enabled by adding a prefix of ``tonative.`` to the hook
938 This feature is enabled by adding a prefix of ``tonative.`` to the hook
939 name on a new line, and setting it to ``True``. For example::
939 name on a new line, and setting it to ``True``. For example::
940
940
941 [hooks]
941 [hooks]
942 incoming.autobuild = /my/build/hook
942 incoming.autobuild = /my/build/hook
943 # enable translation to cmd.exe syntax for autobuild hook
943 # enable translation to cmd.exe syntax for autobuild hook
944 tonative.incoming.autobuild = True
944 tonative.incoming.autobuild = True
945
945
946 ``changegroup``
946 ``changegroup``
947 Run after a changegroup has been added via push, pull or unbundle. The ID of
947 Run after a changegroup has been added via push, pull or unbundle. The ID of
948 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
948 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
949 The URL from which changes came is in ``$HG_URL``.
949 The URL from which changes came is in ``$HG_URL``.
950
950
951 ``commit``
951 ``commit``
952 Run after a changeset has been created in the local repository. The ID
952 Run after a changeset has been created in the local repository. The ID
953 of the newly created changeset is in ``$HG_NODE``. Parent changeset
953 of the newly created changeset is in ``$HG_NODE``. Parent changeset
954 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
954 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
955
955
956 ``incoming``
956 ``incoming``
957 Run after a changeset has been pulled, pushed, or unbundled into
957 Run after a changeset has been pulled, pushed, or unbundled into
958 the local repository. The ID of the newly arrived changeset is in
958 the local repository. The ID of the newly arrived changeset is in
959 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
959 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
960
960
961 ``outgoing``
961 ``outgoing``
962 Run after sending changes from the local repository to another. The ID of
962 Run after sending changes from the local repository to another. The ID of
963 first changeset sent is in ``$HG_NODE``. The source of operation is in
963 first changeset sent is in ``$HG_NODE``. The source of operation is in
964 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
964 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
965
965
966 ``post-<command>``
966 ``post-<command>``
967 Run after successful invocations of the associated command. The
967 Run after successful invocations of the associated command. The
968 contents of the command line are passed as ``$HG_ARGS`` and the result
968 contents of the command line are passed as ``$HG_ARGS`` and the result
969 code in ``$HG_RESULT``. Parsed command line arguments are passed as
969 code in ``$HG_RESULT``. Parsed command line arguments are passed as
970 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
970 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
971 the python data internally passed to <command>. ``$HG_OPTS`` is a
971 the python data internally passed to <command>. ``$HG_OPTS`` is a
972 dictionary of options (with unspecified options set to their defaults).
972 dictionary of options (with unspecified options set to their defaults).
973 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
973 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
974
974
975 ``fail-<command>``
975 ``fail-<command>``
976 Run after a failed invocation of an associated command. The contents
976 Run after a failed invocation of an associated command. The contents
977 of the command line are passed as ``$HG_ARGS``. Parsed command line
977 of the command line are passed as ``$HG_ARGS``. Parsed command line
978 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
978 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
979 string representations of the python data internally passed to
979 string representations of the python data internally passed to
980 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
980 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
981 options set to their defaults). ``$HG_PATS`` is a list of arguments.
981 options set to their defaults). ``$HG_PATS`` is a list of arguments.
982 Hook failure is ignored.
982 Hook failure is ignored.
983
983
984 ``pre-<command>``
984 ``pre-<command>``
985 Run before executing the associated command. The contents of the
985 Run before executing the associated command. The contents of the
986 command line are passed as ``$HG_ARGS``. Parsed command line arguments
986 command line are passed as ``$HG_ARGS``. Parsed command line arguments
987 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
987 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
988 representations of the data internally passed to <command>. ``$HG_OPTS``
988 representations of the data internally passed to <command>. ``$HG_OPTS``
989 is a dictionary of options (with unspecified options set to their
989 is a dictionary of options (with unspecified options set to their
990 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
990 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
991 failure, the command doesn't execute and Mercurial returns the failure
991 failure, the command doesn't execute and Mercurial returns the failure
992 code.
992 code.
993
993
994 ``prechangegroup``
994 ``prechangegroup``
995 Run before a changegroup is added via push, pull or unbundle. Exit
995 Run before a changegroup is added via push, pull or unbundle. Exit
996 status 0 allows the changegroup to proceed. A non-zero status will
996 status 0 allows the changegroup to proceed. A non-zero status will
997 cause the push, pull or unbundle to fail. The URL from which changes
997 cause the push, pull or unbundle to fail. The URL from which changes
998 will come is in ``$HG_URL``.
998 will come is in ``$HG_URL``.
999
999
1000 ``precommit``
1000 ``precommit``
1001 Run before starting a local commit. Exit status 0 allows the
1001 Run before starting a local commit. Exit status 0 allows the
1002 commit to proceed. A non-zero status will cause the commit to fail.
1002 commit to proceed. A non-zero status will cause the commit to fail.
1003 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1003 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1004
1004
1005 ``prelistkeys``
1005 ``prelistkeys``
1006 Run before listing pushkeys (like bookmarks) in the
1006 Run before listing pushkeys (like bookmarks) in the
1007 repository. A non-zero status will cause failure. The key namespace is
1007 repository. A non-zero status will cause failure. The key namespace is
1008 in ``$HG_NAMESPACE``.
1008 in ``$HG_NAMESPACE``.
1009
1009
1010 ``preoutgoing``
1010 ``preoutgoing``
1011 Run before collecting changes to send from the local repository to
1011 Run before collecting changes to send from the local repository to
1012 another. A non-zero status will cause failure. This lets you prevent
1012 another. A non-zero status will cause failure. This lets you prevent
1013 pull over HTTP or SSH. It can also prevent propagating commits (via
1013 pull over HTTP or SSH. It can also prevent propagating commits (via
1014 local pull, push (outbound) or bundle commands), but not completely,
1014 local pull, push (outbound) or bundle commands), but not completely,
1015 since you can just copy files instead. The source of operation is in
1015 since you can just copy files instead. The source of operation is in
1016 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1016 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1017 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1017 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1018 is happening on behalf of a repository on same system.
1018 is happening on behalf of a repository on same system.
1019
1019
1020 ``prepushkey``
1020 ``prepushkey``
1021 Run before a pushkey (like a bookmark) is added to the
1021 Run before a pushkey (like a bookmark) is added to the
1022 repository. A non-zero status will cause the key to be rejected. The
1022 repository. A non-zero status will cause the key to be rejected. The
1023 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1023 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1024 the old value (if any) is in ``$HG_OLD``, and the new value is in
1024 the old value (if any) is in ``$HG_OLD``, and the new value is in
1025 ``$HG_NEW``.
1025 ``$HG_NEW``.
1026
1026
1027 ``pretag``
1027 ``pretag``
1028 Run before creating a tag. Exit status 0 allows the tag to be
1028 Run before creating a tag. Exit status 0 allows the tag to be
1029 created. A non-zero status will cause the tag to fail. The ID of the
1029 created. A non-zero status will cause the tag to fail. The ID of the
1030 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1030 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1031 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1031 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1032
1032
1033 ``pretxnopen``
1033 ``pretxnopen``
1034 Run before any new repository transaction is open. The reason for the
1034 Run before any new repository transaction is open. The reason for the
1035 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1035 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1036 transaction will be in ``HG_TXNID``. A non-zero status will prevent the
1036 transaction will be in ``HG_TXNID``. A non-zero status will prevent the
1037 transaction from being opened.
1037 transaction from being opened.
1038
1038
1039 ``pretxnclose``
1039 ``pretxnclose``
1040 Run right before the transaction is actually finalized. Any repository change
1040 Run right before the transaction is actually finalized. Any repository change
1041 will be visible to the hook program. This lets you validate the transaction
1041 will be visible to the hook program. This lets you validate the transaction
1042 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1042 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1043 status will cause the transaction to be rolled back. The reason for the
1043 status will cause the transaction to be rolled back. The reason for the
1044 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1044 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1045 the transaction will be in ``HG_TXNID``. The rest of the available data will
1045 the transaction will be in ``HG_TXNID``. The rest of the available data will
1046 vary according the transaction type. New changesets will add ``$HG_NODE``
1046 vary according the transaction type. New changesets will add ``$HG_NODE``
1047 (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
1047 (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
1048 added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables. Bookmark and
1048 added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables. Bookmark and
1049 phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
1049 phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
1050 respectively, etc.
1050 respectively, etc.
1051
1051
1052 ``pretxnclose-bookmark``
1052 ``pretxnclose-bookmark``
1053 Run right before a bookmark change is actually finalized. Any repository
1053 Run right before a bookmark change is actually finalized. Any repository
1054 change will be visible to the hook program. This lets you validate the
1054 change will be visible to the hook program. This lets you validate the
1055 transaction content or change it. Exit status 0 allows the commit to
1055 transaction content or change it. Exit status 0 allows the commit to
1056 proceed. A non-zero status will cause the transaction to be rolled back.
1056 proceed. A non-zero status will cause the transaction to be rolled back.
1057 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1057 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1058 bookmark location will be available in ``$HG_NODE`` while the previous
1058 bookmark location will be available in ``$HG_NODE`` while the previous
1059 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1059 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1060 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1060 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1061 will be empty.
1061 will be empty.
1062 In addition, the reason for the transaction opening will be in
1062 In addition, the reason for the transaction opening will be in
1063 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1063 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1064 ``HG_TXNID``.
1064 ``HG_TXNID``.
1065
1065
1066 ``pretxnclose-phase``
1066 ``pretxnclose-phase``
1067 Run right before a phase change is actually finalized. Any repository change
1067 Run right before a phase change is actually finalized. Any repository change
1068 will be visible to the hook program. This lets you validate the transaction
1068 will be visible to the hook program. This lets you validate the transaction
1069 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1069 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1070 status will cause the transaction to be rolled back. The hook is called
1070 status will cause the transaction to be rolled back. The hook is called
1071 multiple times, once for each revision affected by a phase change.
1071 multiple times, once for each revision affected by a phase change.
1072 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1072 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1073 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1073 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1074 will be empty. In addition, the reason for the transaction opening will be in
1074 will be empty. In addition, the reason for the transaction opening will be in
1075 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1075 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1076 ``HG_TXNID``. The hook is also run for newly added revisions. In this case
1076 ``HG_TXNID``. The hook is also run for newly added revisions. In this case
1077 the ``$HG_OLDPHASE`` entry will be empty.
1077 the ``$HG_OLDPHASE`` entry will be empty.
1078
1078
1079 ``txnclose``
1079 ``txnclose``
1080 Run after any repository transaction has been committed. At this
1080 Run after any repository transaction has been committed. At this
1081 point, the transaction can no longer be rolled back. The hook will run
1081 point, the transaction can no longer be rolled back. The hook will run
1082 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1082 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1083 details about available variables.
1083 details about available variables.
1084
1084
1085 ``txnclose-bookmark``
1085 ``txnclose-bookmark``
1086 Run after any bookmark change has been committed. At this point, the
1086 Run after any bookmark change has been committed. At this point, the
1087 transaction can no longer be rolled back. The hook will run after the lock
1087 transaction can no longer be rolled back. The hook will run after the lock
1088 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1088 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1089 about available variables.
1089 about available variables.
1090
1090
1091 ``txnclose-phase``
1091 ``txnclose-phase``
1092 Run after any phase change has been committed. At this point, the
1092 Run after any phase change has been committed. At this point, the
1093 transaction can no longer be rolled back. The hook will run after the lock
1093 transaction can no longer be rolled back. The hook will run after the lock
1094 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1094 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1095 available variables.
1095 available variables.
1096
1096
1097 ``txnabort``
1097 ``txnabort``
1098 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1098 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1099 for details about available variables.
1099 for details about available variables.
1100
1100
1101 ``pretxnchangegroup``
1101 ``pretxnchangegroup``
1102 Run after a changegroup has been added via push, pull or unbundle, but before
1102 Run after a changegroup has been added via push, pull or unbundle, but before
1103 the transaction has been committed. The changegroup is visible to the hook
1103 the transaction has been committed. The changegroup is visible to the hook
1104 program. This allows validation of incoming changes before accepting them.
1104 program. This allows validation of incoming changes before accepting them.
1105 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1105 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1106 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1106 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1107 status will cause the transaction to be rolled back, and the push, pull or
1107 status will cause the transaction to be rolled back, and the push, pull or
1108 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1108 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1109
1109
1110 ``pretxncommit``
1110 ``pretxncommit``
1111 Run after a changeset has been created, but before the transaction is
1111 Run after a changeset has been created, but before the transaction is
1112 committed. The changeset is visible to the hook program. This allows
1112 committed. The changeset is visible to the hook program. This allows
1113 validation of the commit message and changes. Exit status 0 allows the
1113 validation of the commit message and changes. Exit status 0 allows the
1114 commit to proceed. A non-zero status will cause the transaction to
1114 commit to proceed. A non-zero status will cause the transaction to
1115 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1115 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1116 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1116 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1117
1117
1118 ``preupdate``
1118 ``preupdate``
1119 Run before updating the working directory. Exit status 0 allows
1119 Run before updating the working directory. Exit status 0 allows
1120 the update to proceed. A non-zero status will prevent the update.
1120 the update to proceed. A non-zero status will prevent the update.
1121 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1121 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1122 merge, the ID of second new parent is in ``$HG_PARENT2``.
1122 merge, the ID of second new parent is in ``$HG_PARENT2``.
1123
1123
1124 ``listkeys``
1124 ``listkeys``
1125 Run after listing pushkeys (like bookmarks) in the repository. The
1125 Run after listing pushkeys (like bookmarks) in the repository. The
1126 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1126 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1127 dictionary containing the keys and values.
1127 dictionary containing the keys and values.
1128
1128
1129 ``pushkey``
1129 ``pushkey``
1130 Run after a pushkey (like a bookmark) is added to the
1130 Run after a pushkey (like a bookmark) is added to the
1131 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1131 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1132 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1132 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1133 value is in ``$HG_NEW``.
1133 value is in ``$HG_NEW``.
1134
1134
1135 ``tag``
1135 ``tag``
1136 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1136 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1137 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1137 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1138 the repository if ``$HG_LOCAL=0``.
1138 the repository if ``$HG_LOCAL=0``.
1139
1139
1140 ``update``
1140 ``update``
1141 Run after updating the working directory. The changeset ID of first
1141 Run after updating the working directory. The changeset ID of first
1142 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1142 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1143 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1143 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1144 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1144 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1145
1145
1146 .. note::
1146 .. note::
1147
1147
1148 It is generally better to use standard hooks rather than the
1148 It is generally better to use standard hooks rather than the
1149 generic pre- and post- command hooks, as they are guaranteed to be
1149 generic pre- and post- command hooks, as they are guaranteed to be
1150 called in the appropriate contexts for influencing transactions.
1150 called in the appropriate contexts for influencing transactions.
1151 Also, hooks like "commit" will be called in all contexts that
1151 Also, hooks like "commit" will be called in all contexts that
1152 generate a commit (e.g. tag) and not just the commit command.
1152 generate a commit (e.g. tag) and not just the commit command.
1153
1153
1154 .. note::
1154 .. note::
1155
1155
1156 Environment variables with empty values may not be passed to
1156 Environment variables with empty values may not be passed to
1157 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1157 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1158 will have an empty value under Unix-like platforms for non-merge
1158 will have an empty value under Unix-like platforms for non-merge
1159 changesets, while it will not be available at all under Windows.
1159 changesets, while it will not be available at all under Windows.
1160
1160
1161 The syntax for Python hooks is as follows::
1161 The syntax for Python hooks is as follows::
1162
1162
1163 hookname = python:modulename.submodule.callable
1163 hookname = python:modulename.submodule.callable
1164 hookname = python:/path/to/python/module.py:callable
1164 hookname = python:/path/to/python/module.py:callable
1165
1165
1166 Python hooks are run within the Mercurial process. Each hook is
1166 Python hooks are run within the Mercurial process. Each hook is
1167 called with at least three keyword arguments: a ui object (keyword
1167 called with at least three keyword arguments: a ui object (keyword
1168 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1168 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1169 keyword that tells what kind of hook is used. Arguments listed as
1169 keyword that tells what kind of hook is used. Arguments listed as
1170 environment variables above are passed as keyword arguments, with no
1170 environment variables above are passed as keyword arguments, with no
1171 ``HG_`` prefix, and names in lower case.
1171 ``HG_`` prefix, and names in lower case.
1172
1172
1173 If a Python hook returns a "true" value or raises an exception, this
1173 If a Python hook returns a "true" value or raises an exception, this
1174 is treated as a failure.
1174 is treated as a failure.
1175
1175
1176
1176
1177 ``hostfingerprints``
1177 ``hostfingerprints``
1178 --------------------
1178 --------------------
1179
1179
1180 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1180 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1181
1181
1182 Fingerprints of the certificates of known HTTPS servers.
1182 Fingerprints of the certificates of known HTTPS servers.
1183
1183
1184 A HTTPS connection to a server with a fingerprint configured here will
1184 A HTTPS connection to a server with a fingerprint configured here will
1185 only succeed if the servers certificate matches the fingerprint.
1185 only succeed if the servers certificate matches the fingerprint.
1186 This is very similar to how ssh known hosts works.
1186 This is very similar to how ssh known hosts works.
1187
1187
1188 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1188 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1189 Multiple values can be specified (separated by spaces or commas). This can
1189 Multiple values can be specified (separated by spaces or commas). This can
1190 be used to define both old and new fingerprints while a host transitions
1190 be used to define both old and new fingerprints while a host transitions
1191 to a new certificate.
1191 to a new certificate.
1192
1192
1193 The CA chain and web.cacerts is not used for servers with a fingerprint.
1193 The CA chain and web.cacerts is not used for servers with a fingerprint.
1194
1194
1195 For example::
1195 For example::
1196
1196
1197 [hostfingerprints]
1197 [hostfingerprints]
1198 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1198 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1199 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1199 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1200
1200
1201 ``hostsecurity``
1201 ``hostsecurity``
1202 ----------------
1202 ----------------
1203
1203
1204 Used to specify global and per-host security settings for connecting to
1204 Used to specify global and per-host security settings for connecting to
1205 other machines.
1205 other machines.
1206
1206
1207 The following options control default behavior for all hosts.
1207 The following options control default behavior for all hosts.
1208
1208
1209 ``ciphers``
1209 ``ciphers``
1210 Defines the cryptographic ciphers to use for connections.
1210 Defines the cryptographic ciphers to use for connections.
1211
1211
1212 Value must be a valid OpenSSL Cipher List Format as documented at
1212 Value must be a valid OpenSSL Cipher List Format as documented at
1213 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1213 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1214
1214
1215 This setting is for advanced users only. Setting to incorrect values
1215 This setting is for advanced users only. Setting to incorrect values
1216 can significantly lower connection security or decrease performance.
1216 can significantly lower connection security or decrease performance.
1217 You have been warned.
1217 You have been warned.
1218
1218
1219 This option requires Python 2.7.
1219 This option requires Python 2.7.
1220
1220
1221 ``minimumprotocol``
1221 ``minimumprotocol``
1222 Defines the minimum channel encryption protocol to use.
1222 Defines the minimum channel encryption protocol to use.
1223
1223
1224 By default, the highest version of TLS supported by both client and server
1224 By default, the highest version of TLS supported by both client and server
1225 is used.
1225 is used.
1226
1226
1227 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1227 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1228
1228
1229 When running on an old Python version, only ``tls1.0`` is allowed since
1229 When running on an old Python version, only ``tls1.0`` is allowed since
1230 old versions of Python only support up to TLS 1.0.
1230 old versions of Python only support up to TLS 1.0.
1231
1231
1232 When running a Python that supports modern TLS versions, the default is
1232 When running a Python that supports modern TLS versions, the default is
1233 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1233 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1234 weakens security and should only be used as a feature of last resort if
1234 weakens security and should only be used as a feature of last resort if
1235 a server does not support TLS 1.1+.
1235 a server does not support TLS 1.1+.
1236
1236
1237 Options in the ``[hostsecurity]`` section can have the form
1237 Options in the ``[hostsecurity]`` section can have the form
1238 ``hostname``:``setting``. This allows multiple settings to be defined on a
1238 ``hostname``:``setting``. This allows multiple settings to be defined on a
1239 per-host basis.
1239 per-host basis.
1240
1240
1241 The following per-host settings can be defined.
1241 The following per-host settings can be defined.
1242
1242
1243 ``ciphers``
1243 ``ciphers``
1244 This behaves like ``ciphers`` as described above except it only applies
1244 This behaves like ``ciphers`` as described above except it only applies
1245 to the host on which it is defined.
1245 to the host on which it is defined.
1246
1246
1247 ``fingerprints``
1247 ``fingerprints``
1248 A list of hashes of the DER encoded peer/remote certificate. Values have
1248 A list of hashes of the DER encoded peer/remote certificate. Values have
1249 the form ``algorithm``:``fingerprint``. e.g.
1249 the form ``algorithm``:``fingerprint``. e.g.
1250 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1250 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1251 In addition, colons (``:``) can appear in the fingerprint part.
1251 In addition, colons (``:``) can appear in the fingerprint part.
1252
1252
1253 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1253 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1254 ``sha512``.
1254 ``sha512``.
1255
1255
1256 Use of ``sha256`` or ``sha512`` is preferred.
1256 Use of ``sha256`` or ``sha512`` is preferred.
1257
1257
1258 If a fingerprint is specified, the CA chain is not validated for this
1258 If a fingerprint is specified, the CA chain is not validated for this
1259 host and Mercurial will require the remote certificate to match one
1259 host and Mercurial will require the remote certificate to match one
1260 of the fingerprints specified. This means if the server updates its
1260 of the fingerprints specified. This means if the server updates its
1261 certificate, Mercurial will abort until a new fingerprint is defined.
1261 certificate, Mercurial will abort until a new fingerprint is defined.
1262 This can provide stronger security than traditional CA-based validation
1262 This can provide stronger security than traditional CA-based validation
1263 at the expense of convenience.
1263 at the expense of convenience.
1264
1264
1265 This option takes precedence over ``verifycertsfile``.
1265 This option takes precedence over ``verifycertsfile``.
1266
1266
1267 ``minimumprotocol``
1267 ``minimumprotocol``
1268 This behaves like ``minimumprotocol`` as described above except it
1268 This behaves like ``minimumprotocol`` as described above except it
1269 only applies to the host on which it is defined.
1269 only applies to the host on which it is defined.
1270
1270
1271 ``verifycertsfile``
1271 ``verifycertsfile``
1272 Path to file a containing a list of PEM encoded certificates used to
1272 Path to file a containing a list of PEM encoded certificates used to
1273 verify the server certificate. Environment variables and ``~user``
1273 verify the server certificate. Environment variables and ``~user``
1274 constructs are expanded in the filename.
1274 constructs are expanded in the filename.
1275
1275
1276 The server certificate or the certificate's certificate authority (CA)
1276 The server certificate or the certificate's certificate authority (CA)
1277 must match a certificate from this file or certificate verification
1277 must match a certificate from this file or certificate verification
1278 will fail and connections to the server will be refused.
1278 will fail and connections to the server will be refused.
1279
1279
1280 If defined, only certificates provided by this file will be used:
1280 If defined, only certificates provided by this file will be used:
1281 ``web.cacerts`` and any system/default certificates will not be
1281 ``web.cacerts`` and any system/default certificates will not be
1282 used.
1282 used.
1283
1283
1284 This option has no effect if the per-host ``fingerprints`` option
1284 This option has no effect if the per-host ``fingerprints`` option
1285 is set.
1285 is set.
1286
1286
1287 The format of the file is as follows::
1287 The format of the file is as follows::
1288
1288
1289 -----BEGIN CERTIFICATE-----
1289 -----BEGIN CERTIFICATE-----
1290 ... (certificate in base64 PEM encoding) ...
1290 ... (certificate in base64 PEM encoding) ...
1291 -----END CERTIFICATE-----
1291 -----END CERTIFICATE-----
1292 -----BEGIN CERTIFICATE-----
1292 -----BEGIN CERTIFICATE-----
1293 ... (certificate in base64 PEM encoding) ...
1293 ... (certificate in base64 PEM encoding) ...
1294 -----END CERTIFICATE-----
1294 -----END CERTIFICATE-----
1295
1295
1296 For example::
1296 For example::
1297
1297
1298 [hostsecurity]
1298 [hostsecurity]
1299 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1299 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1300 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1300 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1301 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1301 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1302 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1302 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1303
1303
1304 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1304 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1305 when connecting to ``hg.example.com``::
1305 when connecting to ``hg.example.com``::
1306
1306
1307 [hostsecurity]
1307 [hostsecurity]
1308 minimumprotocol = tls1.2
1308 minimumprotocol = tls1.2
1309 hg.example.com:minimumprotocol = tls1.1
1309 hg.example.com:minimumprotocol = tls1.1
1310
1310
1311 ``http_proxy``
1311 ``http_proxy``
1312 --------------
1312 --------------
1313
1313
1314 Used to access web-based Mercurial repositories through a HTTP
1314 Used to access web-based Mercurial repositories through a HTTP
1315 proxy.
1315 proxy.
1316
1316
1317 ``host``
1317 ``host``
1318 Host name and (optional) port of the proxy server, for example
1318 Host name and (optional) port of the proxy server, for example
1319 "myproxy:8000".
1319 "myproxy:8000".
1320
1320
1321 ``no``
1321 ``no``
1322 Optional. Comma-separated list of host names that should bypass
1322 Optional. Comma-separated list of host names that should bypass
1323 the proxy.
1323 the proxy.
1324
1324
1325 ``passwd``
1325 ``passwd``
1326 Optional. Password to authenticate with at the proxy server.
1326 Optional. Password to authenticate with at the proxy server.
1327
1327
1328 ``user``
1328 ``user``
1329 Optional. User name to authenticate with at the proxy server.
1329 Optional. User name to authenticate with at the proxy server.
1330
1330
1331 ``always``
1331 ``always``
1332 Optional. Always use the proxy, even for localhost and any entries
1332 Optional. Always use the proxy, even for localhost and any entries
1333 in ``http_proxy.no``. (default: False)
1333 in ``http_proxy.no``. (default: False)
1334
1334
1335 ``http``
1335 ``http``
1336 ----------
1336 ----------
1337
1337
1338 Used to configure access to Mercurial repositories via HTTP.
1338 Used to configure access to Mercurial repositories via HTTP.
1339
1339
1340 ``timeout``
1340 ``timeout``
1341 If set, blocking operations will timeout after that many seconds.
1341 If set, blocking operations will timeout after that many seconds.
1342 (default: None)
1342 (default: None)
1343
1343
1344 ``merge``
1344 ``merge``
1345 ---------
1345 ---------
1346
1346
1347 This section specifies behavior during merges and updates.
1347 This section specifies behavior during merges and updates.
1348
1348
1349 ``checkignored``
1349 ``checkignored``
1350 Controls behavior when an ignored file on disk has the same name as a tracked
1350 Controls behavior when an ignored file on disk has the same name as a tracked
1351 file in the changeset being merged or updated to, and has different
1351 file in the changeset being merged or updated to, and has different
1352 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1352 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1353 abort on such files. With ``warn``, warn on such files and back them up as
1353 abort on such files. With ``warn``, warn on such files and back them up as
1354 ``.orig``. With ``ignore``, don't print a warning and back them up as
1354 ``.orig``. With ``ignore``, don't print a warning and back them up as
1355 ``.orig``. (default: ``abort``)
1355 ``.orig``. (default: ``abort``)
1356
1356
1357 ``checkunknown``
1357 ``checkunknown``
1358 Controls behavior when an unknown file that isn't ignored has the same name
1358 Controls behavior when an unknown file that isn't ignored has the same name
1359 as a tracked file in the changeset being merged or updated to, and has
1359 as a tracked file in the changeset being merged or updated to, and has
1360 different contents. Similar to ``merge.checkignored``, except for files that
1360 different contents. Similar to ``merge.checkignored``, except for files that
1361 are not ignored. (default: ``abort``)
1361 are not ignored. (default: ``abort``)
1362
1362
1363 ``on-failure``
1363 ``on-failure``
1364 When set to ``continue`` (the default), the merge process attempts to
1364 When set to ``continue`` (the default), the merge process attempts to
1365 merge all unresolved files using the merge chosen tool, regardless of
1365 merge all unresolved files using the merge chosen tool, regardless of
1366 whether previous file merge attempts during the process succeeded or not.
1366 whether previous file merge attempts during the process succeeded or not.
1367 Setting this to ``prompt`` will prompt after any merge failure continue
1367 Setting this to ``prompt`` will prompt after any merge failure continue
1368 or halt the merge process. Setting this to ``halt`` will automatically
1368 or halt the merge process. Setting this to ``halt`` will automatically
1369 halt the merge process on any merge tool failure. The merge process
1369 halt the merge process on any merge tool failure. The merge process
1370 can be restarted by using the ``resolve`` command. When a merge is
1370 can be restarted by using the ``resolve`` command. When a merge is
1371 halted, the repository is left in a normal ``unresolved`` merge state.
1371 halted, the repository is left in a normal ``unresolved`` merge state.
1372 (default: ``continue``)
1372 (default: ``continue``)
1373
1373
1374 ``strict-capability-check``
1374 ``strict-capability-check``
1375 Whether capabilities of internal merge tools are checked strictly
1375 Whether capabilities of internal merge tools are checked strictly
1376 or not, while examining rules to decide merge tool to be used.
1376 or not, while examining rules to decide merge tool to be used.
1377 (default: False)
1377 (default: False)
1378
1378
1379 ``merge-patterns``
1379 ``merge-patterns``
1380 ------------------
1380 ------------------
1381
1381
1382 This section specifies merge tools to associate with particular file
1382 This section specifies merge tools to associate with particular file
1383 patterns. Tools matched here will take precedence over the default
1383 patterns. Tools matched here will take precedence over the default
1384 merge tool. Patterns are globs by default, rooted at the repository
1384 merge tool. Patterns are globs by default, rooted at the repository
1385 root.
1385 root.
1386
1386
1387 Example::
1387 Example::
1388
1388
1389 [merge-patterns]
1389 [merge-patterns]
1390 **.c = kdiff3
1390 **.c = kdiff3
1391 **.jpg = myimgmerge
1391 **.jpg = myimgmerge
1392
1392
1393 ``merge-tools``
1393 ``merge-tools``
1394 ---------------
1394 ---------------
1395
1395
1396 This section configures external merge tools to use for file-level
1396 This section configures external merge tools to use for file-level
1397 merges. This section has likely been preconfigured at install time.
1397 merges. This section has likely been preconfigured at install time.
1398 Use :hg:`config merge-tools` to check the existing configuration.
1398 Use :hg:`config merge-tools` to check the existing configuration.
1399 Also see :hg:`help merge-tools` for more details.
1399 Also see :hg:`help merge-tools` for more details.
1400
1400
1401 Example ``~/.hgrc``::
1401 Example ``~/.hgrc``::
1402
1402
1403 [merge-tools]
1403 [merge-tools]
1404 # Override stock tool location
1404 # Override stock tool location
1405 kdiff3.executable = ~/bin/kdiff3
1405 kdiff3.executable = ~/bin/kdiff3
1406 # Specify command line
1406 # Specify command line
1407 kdiff3.args = $base $local $other -o $output
1407 kdiff3.args = $base $local $other -o $output
1408 # Give higher priority
1408 # Give higher priority
1409 kdiff3.priority = 1
1409 kdiff3.priority = 1
1410
1410
1411 # Changing the priority of preconfigured tool
1411 # Changing the priority of preconfigured tool
1412 meld.priority = 0
1412 meld.priority = 0
1413
1413
1414 # Disable a preconfigured tool
1414 # Disable a preconfigured tool
1415 vimdiff.disabled = yes
1415 vimdiff.disabled = yes
1416
1416
1417 # Define new tool
1417 # Define new tool
1418 myHtmlTool.args = -m $local $other $base $output
1418 myHtmlTool.args = -m $local $other $base $output
1419 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1419 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1420 myHtmlTool.priority = 1
1420 myHtmlTool.priority = 1
1421
1421
1422 Supported arguments:
1422 Supported arguments:
1423
1423
1424 ``priority``
1424 ``priority``
1425 The priority in which to evaluate this tool.
1425 The priority in which to evaluate this tool.
1426 (default: 0)
1426 (default: 0)
1427
1427
1428 ``executable``
1428 ``executable``
1429 Either just the name of the executable or its pathname.
1429 Either just the name of the executable or its pathname.
1430
1430
1431 .. container:: windows
1431 .. container:: windows
1432
1432
1433 On Windows, the path can use environment variables with ${ProgramFiles}
1433 On Windows, the path can use environment variables with ${ProgramFiles}
1434 syntax.
1434 syntax.
1435
1435
1436 (default: the tool name)
1436 (default: the tool name)
1437
1437
1438 ``args``
1438 ``args``
1439 The arguments to pass to the tool executable. You can refer to the
1439 The arguments to pass to the tool executable. You can refer to the
1440 files being merged as well as the output file through these
1440 files being merged as well as the output file through these
1441 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1441 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1442
1442
1443 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1443 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1444 being performed. During an update or merge, ``$local`` represents the original
1444 being performed. During an update or merge, ``$local`` represents the original
1445 state of the file, while ``$other`` represents the commit you are updating to or
1445 state of the file, while ``$other`` represents the commit you are updating to or
1446 the commit you are merging with. During a rebase, ``$local`` represents the
1446 the commit you are merging with. During a rebase, ``$local`` represents the
1447 destination of the rebase, and ``$other`` represents the commit being rebased.
1447 destination of the rebase, and ``$other`` represents the commit being rebased.
1448
1448
1449 Some operations define custom labels to assist with identifying the revisions,
1449 Some operations define custom labels to assist with identifying the revisions,
1450 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1450 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1451 labels are not available, these will be ``local``, ``other``, and ``base``,
1451 labels are not available, these will be ``local``, ``other``, and ``base``,
1452 respectively.
1452 respectively.
1453 (default: ``$local $base $other``)
1453 (default: ``$local $base $other``)
1454
1454
1455 ``premerge``
1455 ``premerge``
1456 Attempt to run internal non-interactive 3-way merge tool before
1456 Attempt to run internal non-interactive 3-way merge tool before
1457 launching external tool. Options are ``true``, ``false``, ``keep`` or
1457 launching external tool. Options are ``true``, ``false``, ``keep`` or
1458 ``keep-merge3``. The ``keep`` option will leave markers in the file if the
1458 ``keep-merge3``. The ``keep`` option will leave markers in the file if the
1459 premerge fails. The ``keep-merge3`` will do the same but include information
1459 premerge fails. The ``keep-merge3`` will do the same but include information
1460 about the base of the merge in the marker (see internal :merge3 in
1460 about the base of the merge in the marker (see internal :merge3 in
1461 :hg:`help merge-tools`).
1461 :hg:`help merge-tools`).
1462 (default: True)
1462 (default: True)
1463
1463
1464 ``binary``
1464 ``binary``
1465 This tool can merge binary files. (default: False, unless tool
1465 This tool can merge binary files. (default: False, unless tool
1466 was selected by file pattern match)
1466 was selected by file pattern match)
1467
1467
1468 ``symlink``
1468 ``symlink``
1469 This tool can merge symlinks. (default: False)
1469 This tool can merge symlinks. (default: False)
1470
1470
1471 ``check``
1471 ``check``
1472 A list of merge success-checking options:
1472 A list of merge success-checking options:
1473
1473
1474 ``changed``
1474 ``changed``
1475 Ask whether merge was successful when the merged file shows no changes.
1475 Ask whether merge was successful when the merged file shows no changes.
1476 ``conflicts``
1476 ``conflicts``
1477 Check whether there are conflicts even though the tool reported success.
1477 Check whether there are conflicts even though the tool reported success.
1478 ``prompt``
1478 ``prompt``
1479 Always prompt for merge success, regardless of success reported by tool.
1479 Always prompt for merge success, regardless of success reported by tool.
1480
1480
1481 ``fixeol``
1481 ``fixeol``
1482 Attempt to fix up EOL changes caused by the merge tool.
1482 Attempt to fix up EOL changes caused by the merge tool.
1483 (default: False)
1483 (default: False)
1484
1484
1485 ``gui``
1485 ``gui``
1486 This tool requires a graphical interface to run. (default: False)
1486 This tool requires a graphical interface to run. (default: False)
1487
1487
1488 ``mergemarkers``
1488 ``mergemarkers``
1489 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1489 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1490 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1490 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1491 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1491 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1492 markers generated during premerge will be ``detailed`` if either this option or
1492 markers generated during premerge will be ``detailed`` if either this option or
1493 the corresponding option in the ``[ui]`` section is ``detailed``.
1493 the corresponding option in the ``[ui]`` section is ``detailed``.
1494 (default: ``basic``)
1494 (default: ``basic``)
1495
1495
1496 ``mergemarkertemplate``
1496 ``mergemarkertemplate``
1497 This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
1497 This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
1498 section on a per-tool basis; this applies to the ``$label``-prefixed variables
1498 section on a per-tool basis; this applies to the ``$label``-prefixed variables
1499 and to the conflict markers that are generated if ``premerge`` is ``keep` or
1499 and to the conflict markers that are generated if ``premerge`` is ``keep` or
1500 ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
1500 ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
1501 information.
1501 information.
1502
1502
1503 .. container:: windows
1503 .. container:: windows
1504
1504
1505 ``regkey``
1505 ``regkey``
1506 Windows registry key which describes install location of this
1506 Windows registry key which describes install location of this
1507 tool. Mercurial will search for this key first under
1507 tool. Mercurial will search for this key first under
1508 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1508 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1509 (default: None)
1509 (default: None)
1510
1510
1511 ``regkeyalt``
1511 ``regkeyalt``
1512 An alternate Windows registry key to try if the first key is not
1512 An alternate Windows registry key to try if the first key is not
1513 found. The alternate key uses the same ``regname`` and ``regappend``
1513 found. The alternate key uses the same ``regname`` and ``regappend``
1514 semantics of the primary key. The most common use for this key
1514 semantics of the primary key. The most common use for this key
1515 is to search for 32bit applications on 64bit operating systems.
1515 is to search for 32bit applications on 64bit operating systems.
1516 (default: None)
1516 (default: None)
1517
1517
1518 ``regname``
1518 ``regname``
1519 Name of value to read from specified registry key.
1519 Name of value to read from specified registry key.
1520 (default: the unnamed (default) value)
1520 (default: the unnamed (default) value)
1521
1521
1522 ``regappend``
1522 ``regappend``
1523 String to append to the value read from the registry, typically
1523 String to append to the value read from the registry, typically
1524 the executable name of the tool.
1524 the executable name of the tool.
1525 (default: None)
1525 (default: None)
1526
1526
1527 ``pager``
1527 ``pager``
1528 ---------
1528 ---------
1529
1529
1530 Setting used to control when to paginate and with what external tool. See
1530 Setting used to control when to paginate and with what external tool. See
1531 :hg:`help pager` for details.
1531 :hg:`help pager` for details.
1532
1532
1533 ``pager``
1533 ``pager``
1534 Define the external tool used as pager.
1534 Define the external tool used as pager.
1535
1535
1536 If no pager is set, Mercurial uses the environment variable $PAGER.
1536 If no pager is set, Mercurial uses the environment variable $PAGER.
1537 If neither pager.pager, nor $PAGER is set, a default pager will be
1537 If neither pager.pager, nor $PAGER is set, a default pager will be
1538 used, typically `less` on Unix and `more` on Windows. Example::
1538 used, typically `less` on Unix and `more` on Windows. Example::
1539
1539
1540 [pager]
1540 [pager]
1541 pager = less -FRX
1541 pager = less -FRX
1542
1542
1543 ``ignore``
1543 ``ignore``
1544 List of commands to disable the pager for. Example::
1544 List of commands to disable the pager for. Example::
1545
1545
1546 [pager]
1546 [pager]
1547 ignore = version, help, update
1547 ignore = version, help, update
1548
1548
1549 ``patch``
1549 ``patch``
1550 ---------
1550 ---------
1551
1551
1552 Settings used when applying patches, for instance through the 'import'
1552 Settings used when applying patches, for instance through the 'import'
1553 command or with Mercurial Queues extension.
1553 command or with Mercurial Queues extension.
1554
1554
1555 ``eol``
1555 ``eol``
1556 When set to 'strict' patch content and patched files end of lines
1556 When set to 'strict' patch content and patched files end of lines
1557 are preserved. When set to ``lf`` or ``crlf``, both files end of
1557 are preserved. When set to ``lf`` or ``crlf``, both files end of
1558 lines are ignored when patching and the result line endings are
1558 lines are ignored when patching and the result line endings are
1559 normalized to either LF (Unix) or CRLF (Windows). When set to
1559 normalized to either LF (Unix) or CRLF (Windows). When set to
1560 ``auto``, end of lines are again ignored while patching but line
1560 ``auto``, end of lines are again ignored while patching but line
1561 endings in patched files are normalized to their original setting
1561 endings in patched files are normalized to their original setting
1562 on a per-file basis. If target file does not exist or has no end
1562 on a per-file basis. If target file does not exist or has no end
1563 of line, patch line endings are preserved.
1563 of line, patch line endings are preserved.
1564 (default: strict)
1564 (default: strict)
1565
1565
1566 ``fuzz``
1566 ``fuzz``
1567 The number of lines of 'fuzz' to allow when applying patches. This
1567 The number of lines of 'fuzz' to allow when applying patches. This
1568 controls how much context the patcher is allowed to ignore when
1568 controls how much context the patcher is allowed to ignore when
1569 trying to apply a patch.
1569 trying to apply a patch.
1570 (default: 2)
1570 (default: 2)
1571
1571
1572 ``paths``
1572 ``paths``
1573 ---------
1573 ---------
1574
1574
1575 Assigns symbolic names and behavior to repositories.
1575 Assigns symbolic names and behavior to repositories.
1576
1576
1577 Options are symbolic names defining the URL or directory that is the
1577 Options are symbolic names defining the URL or directory that is the
1578 location of the repository. Example::
1578 location of the repository. Example::
1579
1579
1580 [paths]
1580 [paths]
1581 my_server = https://example.com/my_repo
1581 my_server = https://example.com/my_repo
1582 local_path = /home/me/repo
1582 local_path = /home/me/repo
1583
1583
1584 These symbolic names can be used from the command line. To pull
1584 These symbolic names can be used from the command line. To pull
1585 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1585 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1586 :hg:`push local_path`.
1586 :hg:`push local_path`.
1587
1587
1588 Options containing colons (``:``) denote sub-options that can influence
1588 Options containing colons (``:``) denote sub-options that can influence
1589 behavior for that specific path. Example::
1589 behavior for that specific path. Example::
1590
1590
1591 [paths]
1591 [paths]
1592 my_server = https://example.com/my_path
1592 my_server = https://example.com/my_path
1593 my_server:pushurl = ssh://example.com/my_path
1593 my_server:pushurl = ssh://example.com/my_path
1594
1594
1595 The following sub-options can be defined:
1595 The following sub-options can be defined:
1596
1596
1597 ``pushurl``
1597 ``pushurl``
1598 The URL to use for push operations. If not defined, the location
1598 The URL to use for push operations. If not defined, the location
1599 defined by the path's main entry is used.
1599 defined by the path's main entry is used.
1600
1600
1601 ``pushrev``
1601 ``pushrev``
1602 A revset defining which revisions to push by default.
1602 A revset defining which revisions to push by default.
1603
1603
1604 When :hg:`push` is executed without a ``-r`` argument, the revset
1604 When :hg:`push` is executed without a ``-r`` argument, the revset
1605 defined by this sub-option is evaluated to determine what to push.
1605 defined by this sub-option is evaluated to determine what to push.
1606
1606
1607 For example, a value of ``.`` will push the working directory's
1607 For example, a value of ``.`` will push the working directory's
1608 revision by default.
1608 revision by default.
1609
1609
1610 Revsets specifying bookmarks will not result in the bookmark being
1610 Revsets specifying bookmarks will not result in the bookmark being
1611 pushed.
1611 pushed.
1612
1612
1613 The following special named paths exist:
1613 The following special named paths exist:
1614
1614
1615 ``default``
1615 ``default``
1616 The URL or directory to use when no source or remote is specified.
1616 The URL or directory to use when no source or remote is specified.
1617
1617
1618 :hg:`clone` will automatically define this path to the location the
1618 :hg:`clone` will automatically define this path to the location the
1619 repository was cloned from.
1619 repository was cloned from.
1620
1620
1621 ``default-push``
1621 ``default-push``
1622 (deprecated) The URL or directory for the default :hg:`push` location.
1622 (deprecated) The URL or directory for the default :hg:`push` location.
1623 ``default:pushurl`` should be used instead.
1623 ``default:pushurl`` should be used instead.
1624
1624
1625 ``phases``
1625 ``phases``
1626 ----------
1626 ----------
1627
1627
1628 Specifies default handling of phases. See :hg:`help phases` for more
1628 Specifies default handling of phases. See :hg:`help phases` for more
1629 information about working with phases.
1629 information about working with phases.
1630
1630
1631 ``publish``
1631 ``publish``
1632 Controls draft phase behavior when working as a server. When true,
1632 Controls draft phase behavior when working as a server. When true,
1633 pushed changesets are set to public in both client and server and
1633 pushed changesets are set to public in both client and server and
1634 pulled or cloned changesets are set to public in the client.
1634 pulled or cloned changesets are set to public in the client.
1635 (default: True)
1635 (default: True)
1636
1636
1637 ``new-commit``
1637 ``new-commit``
1638 Phase of newly-created commits.
1638 Phase of newly-created commits.
1639 (default: draft)
1639 (default: draft)
1640
1640
1641 ``checksubrepos``
1641 ``checksubrepos``
1642 Check the phase of the current revision of each subrepository. Allowed
1642 Check the phase of the current revision of each subrepository. Allowed
1643 values are "ignore", "follow" and "abort". For settings other than
1643 values are "ignore", "follow" and "abort". For settings other than
1644 "ignore", the phase of the current revision of each subrepository is
1644 "ignore", the phase of the current revision of each subrepository is
1645 checked before committing the parent repository. If any of those phases is
1645 checked before committing the parent repository. If any of those phases is
1646 greater than the phase of the parent repository (e.g. if a subrepo is in a
1646 greater than the phase of the parent repository (e.g. if a subrepo is in a
1647 "secret" phase while the parent repo is in "draft" phase), the commit is
1647 "secret" phase while the parent repo is in "draft" phase), the commit is
1648 either aborted (if checksubrepos is set to "abort") or the higher phase is
1648 either aborted (if checksubrepos is set to "abort") or the higher phase is
1649 used for the parent repository commit (if set to "follow").
1649 used for the parent repository commit (if set to "follow").
1650 (default: follow)
1650 (default: follow)
1651
1651
1652
1652
1653 ``profiling``
1653 ``profiling``
1654 -------------
1654 -------------
1655
1655
1656 Specifies profiling type, format, and file output. Two profilers are
1656 Specifies profiling type, format, and file output. Two profilers are
1657 supported: an instrumenting profiler (named ``ls``), and a sampling
1657 supported: an instrumenting profiler (named ``ls``), and a sampling
1658 profiler (named ``stat``).
1658 profiler (named ``stat``).
1659
1659
1660 In this section description, 'profiling data' stands for the raw data
1660 In this section description, 'profiling data' stands for the raw data
1661 collected during profiling, while 'profiling report' stands for a
1661 collected during profiling, while 'profiling report' stands for a
1662 statistical text report generated from the profiling data.
1662 statistical text report generated from the profiling data.
1663
1663
1664 ``enabled``
1664 ``enabled``
1665 Enable the profiler.
1665 Enable the profiler.
1666 (default: false)
1666 (default: false)
1667
1667
1668 This is equivalent to passing ``--profile`` on the command line.
1668 This is equivalent to passing ``--profile`` on the command line.
1669
1669
1670 ``type``
1670 ``type``
1671 The type of profiler to use.
1671 The type of profiler to use.
1672 (default: stat)
1672 (default: stat)
1673
1673
1674 ``ls``
1674 ``ls``
1675 Use Python's built-in instrumenting profiler. This profiler
1675 Use Python's built-in instrumenting profiler. This profiler
1676 works on all platforms, but each line number it reports is the
1676 works on all platforms, but each line number it reports is the
1677 first line of a function. This restriction makes it difficult to
1677 first line of a function. This restriction makes it difficult to
1678 identify the expensive parts of a non-trivial function.
1678 identify the expensive parts of a non-trivial function.
1679 ``stat``
1679 ``stat``
1680 Use a statistical profiler, statprof. This profiler is most
1680 Use a statistical profiler, statprof. This profiler is most
1681 useful for profiling commands that run for longer than about 0.1
1681 useful for profiling commands that run for longer than about 0.1
1682 seconds.
1682 seconds.
1683
1683
1684 ``format``
1684 ``format``
1685 Profiling format. Specific to the ``ls`` instrumenting profiler.
1685 Profiling format. Specific to the ``ls`` instrumenting profiler.
1686 (default: text)
1686 (default: text)
1687
1687
1688 ``text``
1688 ``text``
1689 Generate a profiling report. When saving to a file, it should be
1689 Generate a profiling report. When saving to a file, it should be
1690 noted that only the report is saved, and the profiling data is
1690 noted that only the report is saved, and the profiling data is
1691 not kept.
1691 not kept.
1692 ``kcachegrind``
1692 ``kcachegrind``
1693 Format profiling data for kcachegrind use: when saving to a
1693 Format profiling data for kcachegrind use: when saving to a
1694 file, the generated file can directly be loaded into
1694 file, the generated file can directly be loaded into
1695 kcachegrind.
1695 kcachegrind.
1696
1696
1697 ``statformat``
1697 ``statformat``
1698 Profiling format for the ``stat`` profiler.
1698 Profiling format for the ``stat`` profiler.
1699 (default: hotpath)
1699 (default: hotpath)
1700
1700
1701 ``hotpath``
1701 ``hotpath``
1702 Show a tree-based display containing the hot path of execution (where
1702 Show a tree-based display containing the hot path of execution (where
1703 most time was spent).
1703 most time was spent).
1704 ``bymethod``
1704 ``bymethod``
1705 Show a table of methods ordered by how frequently they are active.
1705 Show a table of methods ordered by how frequently they are active.
1706 ``byline``
1706 ``byline``
1707 Show a table of lines in files ordered by how frequently they are active.
1707 Show a table of lines in files ordered by how frequently they are active.
1708 ``json``
1708 ``json``
1709 Render profiling data as JSON.
1709 Render profiling data as JSON.
1710
1710
1711 ``frequency``
1711 ``frequency``
1712 Sampling frequency. Specific to the ``stat`` sampling profiler.
1712 Sampling frequency. Specific to the ``stat`` sampling profiler.
1713 (default: 1000)
1713 (default: 1000)
1714
1714
1715 ``output``
1715 ``output``
1716 File path where profiling data or report should be saved. If the
1716 File path where profiling data or report should be saved. If the
1717 file exists, it is replaced. (default: None, data is printed on
1717 file exists, it is replaced. (default: None, data is printed on
1718 stderr)
1718 stderr)
1719
1719
1720 ``sort``
1720 ``sort``
1721 Sort field. Specific to the ``ls`` instrumenting profiler.
1721 Sort field. Specific to the ``ls`` instrumenting profiler.
1722 One of ``callcount``, ``reccallcount``, ``totaltime`` and
1722 One of ``callcount``, ``reccallcount``, ``totaltime`` and
1723 ``inlinetime``.
1723 ``inlinetime``.
1724 (default: inlinetime)
1724 (default: inlinetime)
1725
1725
1726 ``time-track``
1726 ``time-track``
1727 Control if the stat profiler track ``cpu`` or ``real`` time.
1727 Control if the stat profiler track ``cpu`` or ``real`` time.
1728 (default: ``cpu`` on Windows, otherwise ``real``)
1728 (default: ``cpu`` on Windows, otherwise ``real``)
1729
1729
1730 ``limit``
1730 ``limit``
1731 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
1731 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
1732 (default: 30)
1732 (default: 30)
1733
1733
1734 ``nested``
1734 ``nested``
1735 Show at most this number of lines of drill-down info after each main entry.
1735 Show at most this number of lines of drill-down info after each main entry.
1736 This can help explain the difference between Total and Inline.
1736 This can help explain the difference between Total and Inline.
1737 Specific to the ``ls`` instrumenting profiler.
1737 Specific to the ``ls`` instrumenting profiler.
1738 (default: 0)
1738 (default: 0)
1739
1739
1740 ``showmin``
1740 ``showmin``
1741 Minimum fraction of samples an entry must have for it to be displayed.
1741 Minimum fraction of samples an entry must have for it to be displayed.
1742 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
1742 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
1743 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
1743 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
1744
1744
1745 Only used by the ``stat`` profiler.
1745 Only used by the ``stat`` profiler.
1746
1746
1747 For the ``hotpath`` format, default is ``0.05``.
1747 For the ``hotpath`` format, default is ``0.05``.
1748 For the ``chrome`` format, default is ``0.005``.
1748 For the ``chrome`` format, default is ``0.005``.
1749
1749
1750 The option is unused on other formats.
1750 The option is unused on other formats.
1751
1751
1752 ``showmax``
1752 ``showmax``
1753 Maximum fraction of samples an entry can have before it is ignored in
1753 Maximum fraction of samples an entry can have before it is ignored in
1754 display. Values format is the same as ``showmin``.
1754 display. Values format is the same as ``showmin``.
1755
1755
1756 Only used by the ``stat`` profiler.
1756 Only used by the ``stat`` profiler.
1757
1757
1758 For the ``chrome`` format, default is ``0.999``.
1758 For the ``chrome`` format, default is ``0.999``.
1759
1759
1760 The option is unused on other formats.
1760 The option is unused on other formats.
1761
1761
1762 ``progress``
1762 ``progress``
1763 ------------
1763 ------------
1764
1764
1765 Mercurial commands can draw progress bars that are as informative as
1765 Mercurial commands can draw progress bars that are as informative as
1766 possible. Some progress bars only offer indeterminate information, while others
1766 possible. Some progress bars only offer indeterminate information, while others
1767 have a definite end point.
1767 have a definite end point.
1768
1768
1769 ``debug``
1769 ``debug``
1770 Whether to print debug info when updating the progress bar. (default: False)
1770 Whether to print debug info when updating the progress bar. (default: False)
1771
1771
1772 ``delay``
1772 ``delay``
1773 Number of seconds (float) before showing the progress bar. (default: 3)
1773 Number of seconds (float) before showing the progress bar. (default: 3)
1774
1774
1775 ``changedelay``
1775 ``changedelay``
1776 Minimum delay before showing a new topic. When set to less than 3 * refresh,
1776 Minimum delay before showing a new topic. When set to less than 3 * refresh,
1777 that value will be used instead. (default: 1)
1777 that value will be used instead. (default: 1)
1778
1778
1779 ``estimateinterval``
1779 ``estimateinterval``
1780 Maximum sampling interval in seconds for speed and estimated time
1780 Maximum sampling interval in seconds for speed and estimated time
1781 calculation. (default: 60)
1781 calculation. (default: 60)
1782
1782
1783 ``refresh``
1783 ``refresh``
1784 Time in seconds between refreshes of the progress bar. (default: 0.1)
1784 Time in seconds between refreshes of the progress bar. (default: 0.1)
1785
1785
1786 ``format``
1786 ``format``
1787 Format of the progress bar.
1787 Format of the progress bar.
1788
1788
1789 Valid entries for the format field are ``topic``, ``bar``, ``number``,
1789 Valid entries for the format field are ``topic``, ``bar``, ``number``,
1790 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
1790 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
1791 last 20 characters of the item, but this can be changed by adding either
1791 last 20 characters of the item, but this can be changed by adding either
1792 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
1792 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
1793 first num characters.
1793 first num characters.
1794
1794
1795 (default: topic bar number estimate)
1795 (default: topic bar number estimate)
1796
1796
1797 ``width``
1797 ``width``
1798 If set, the maximum width of the progress information (that is, min(width,
1798 If set, the maximum width of the progress information (that is, min(width,
1799 term width) will be used).
1799 term width) will be used).
1800
1800
1801 ``clear-complete``
1801 ``clear-complete``
1802 Clear the progress bar after it's done. (default: True)
1802 Clear the progress bar after it's done. (default: True)
1803
1803
1804 ``disable``
1804 ``disable``
1805 If true, don't show a progress bar.
1805 If true, don't show a progress bar.
1806
1806
1807 ``assume-tty``
1807 ``assume-tty``
1808 If true, ALWAYS show a progress bar, unless disable is given.
1808 If true, ALWAYS show a progress bar, unless disable is given.
1809
1809
1810 ``rebase``
1810 ``rebase``
1811 ----------
1811 ----------
1812
1812
1813 ``evolution.allowdivergence``
1813 ``evolution.allowdivergence``
1814 Default to False, when True allow creating divergence when performing
1814 Default to False, when True allow creating divergence when performing
1815 rebase of obsolete changesets.
1815 rebase of obsolete changesets.
1816
1816
1817 ``revsetalias``
1817 ``revsetalias``
1818 ---------------
1818 ---------------
1819
1819
1820 Alias definitions for revsets. See :hg:`help revsets` for details.
1820 Alias definitions for revsets. See :hg:`help revsets` for details.
1821
1821
1822 ``rewrite``
1822 ``rewrite``
1823 -----------
1823 -----------
1824
1824
1825 ``backup-bundle``
1825 ``backup-bundle``
1826 Whether to save stripped changesets to a bundle file. (default: True)
1826 Whether to save stripped changesets to a bundle file. (default: True)
1827
1827
1828 ``update-timestamp``
1828 ``update-timestamp``
1829 If true, updates the date and time of the changeset to current. It is only
1829 If true, updates the date and time of the changeset to current. It is only
1830 applicable for hg amend in current version.
1830 applicable for hg amend in current version.
1831
1831
1832 ``storage``
1832 ``storage``
1833 -----------
1833 -----------
1834
1834
1835 Control the strategy Mercurial uses internally to store history. Options in this
1835 Control the strategy Mercurial uses internally to store history. Options in this
1836 category impact performance and repository size.
1836 category impact performance and repository size.
1837
1837
1838 ``revlog.optimize-delta-parent-choice``
1838 ``revlog.optimize-delta-parent-choice``
1839 When storing a merge revision, both parents will be equally considered as
1839 When storing a merge revision, both parents will be equally considered as
1840 a possible delta base. This results in better delta selection and improved
1840 a possible delta base. This results in better delta selection and improved
1841 revlog compression. This option is enabled by default.
1841 revlog compression. This option is enabled by default.
1842
1842
1843 Turning this option off can result in large increase of repository size for
1843 Turning this option off can result in large increase of repository size for
1844 repository with many merges.
1844 repository with many merges.
1845
1845
1846 ``revlog.reuse-external-delta-parent``
1846 ``revlog.reuse-external-delta-parent``
1847 Control the order in which delta parents are considered when adding new
1847 Control the order in which delta parents are considered when adding new
1848 revisions from an external source.
1848 revisions from an external source.
1849 (typically: apply bundle from `hg pull` or `hg push`).
1849 (typically: apply bundle from `hg pull` or `hg push`).
1850
1850
1851 New revisions are usually provided as a delta against other revisions. By
1851 New revisions are usually provided as a delta against other revisions. By
1852 default, Mercurial will try to reuse this delta first, therefore using the
1852 default, Mercurial will try to reuse this delta first, therefore using the
1853 same "delta parent" as the source. Directly using delta's from the source
1853 same "delta parent" as the source. Directly using delta's from the source
1854 reduces CPU usage and usually speeds up operation. However, in some case,
1854 reduces CPU usage and usually speeds up operation. However, in some case,
1855 the source might have sub-optimal delta bases and forcing their reevaluation
1855 the source might have sub-optimal delta bases and forcing their reevaluation
1856 is useful. For example, pushes from an old client could have sub-optimal
1856 is useful. For example, pushes from an old client could have sub-optimal
1857 delta's parent that the server want to optimize. (lack of general delta, bad
1857 delta's parent that the server want to optimize. (lack of general delta, bad
1858 parents, choice, lack of sparse-revlog, etc).
1858 parents, choice, lack of sparse-revlog, etc).
1859
1859
1860 This option is enabled by default. Turning it off will ensure bad delta
1860 This option is enabled by default. Turning it off will ensure bad delta
1861 parent choices from older client do not propagate to this repository, at
1861 parent choices from older client do not propagate to this repository, at
1862 the cost of a small increase in CPU consumption.
1862 the cost of a small increase in CPU consumption.
1863
1863
1864 Note: this option only control the order in which delta parents are
1864 Note: this option only control the order in which delta parents are
1865 considered. Even when disabled, the existing delta from the source will be
1865 considered. Even when disabled, the existing delta from the source will be
1866 reused if the same delta parent is selected.
1866 reused if the same delta parent is selected.
1867
1867
1868 ``revlog.reuse-external-delta``
1868 ``revlog.reuse-external-delta``
1869 Control the reuse of delta from external source.
1869 Control the reuse of delta from external source.
1870 (typically: apply bundle from `hg pull` or `hg push`).
1870 (typically: apply bundle from `hg pull` or `hg push`).
1871
1871
1872 New revisions are usually provided as a delta against another revision. By
1872 New revisions are usually provided as a delta against another revision. By
1873 default, Mercurial will not recompute the same delta again, trusting
1873 default, Mercurial will not recompute the same delta again, trusting
1874 externally provided deltas. There have been rare cases of small adjustment
1874 externally provided deltas. There have been rare cases of small adjustment
1875 to the diffing algorithm in the past. So in some rare case, recomputing
1875 to the diffing algorithm in the past. So in some rare case, recomputing
1876 delta provided by ancient clients can provides better results. Disabling
1876 delta provided by ancient clients can provides better results. Disabling
1877 this option means going through a full delta recomputation for all incoming
1877 this option means going through a full delta recomputation for all incoming
1878 revisions. It means a large increase in CPU usage and will slow operations
1878 revisions. It means a large increase in CPU usage and will slow operations
1879 down.
1879 down.
1880
1880
1881 This option is enabled by default. When disabled, it also disables the
1881 This option is enabled by default. When disabled, it also disables the
1882 related ``storage.revlog.reuse-external-delta-parent`` option.
1882 related ``storage.revlog.reuse-external-delta-parent`` option.
1883
1883
1884 ``revlog.zlib.level``
1885 Zlib compression level used when storing data into the repository. Accepted
1886 Value range from 1 (lowest compression) to 9 (highest compression). Zlib
1887 default value is 6.
1888
1884 ``server``
1889 ``server``
1885 ----------
1890 ----------
1886
1891
1887 Controls generic server settings.
1892 Controls generic server settings.
1888
1893
1889 ``bookmarks-pushkey-compat``
1894 ``bookmarks-pushkey-compat``
1890 Trigger pushkey hook when being pushed bookmark updates. This config exist
1895 Trigger pushkey hook when being pushed bookmark updates. This config exist
1891 for compatibility purpose (default to True)
1896 for compatibility purpose (default to True)
1892
1897
1893 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
1898 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
1894 movement we recommend you migrate them to ``txnclose-bookmark`` and
1899 movement we recommend you migrate them to ``txnclose-bookmark`` and
1895 ``pretxnclose-bookmark``.
1900 ``pretxnclose-bookmark``.
1896
1901
1897 ``compressionengines``
1902 ``compressionengines``
1898 List of compression engines and their relative priority to advertise
1903 List of compression engines and their relative priority to advertise
1899 to clients.
1904 to clients.
1900
1905
1901 The order of compression engines determines their priority, the first
1906 The order of compression engines determines their priority, the first
1902 having the highest priority. If a compression engine is not listed
1907 having the highest priority. If a compression engine is not listed
1903 here, it won't be advertised to clients.
1908 here, it won't be advertised to clients.
1904
1909
1905 If not set (the default), built-in defaults are used. Run
1910 If not set (the default), built-in defaults are used. Run
1906 :hg:`debuginstall` to list available compression engines and their
1911 :hg:`debuginstall` to list available compression engines and their
1907 default wire protocol priority.
1912 default wire protocol priority.
1908
1913
1909 Older Mercurial clients only support zlib compression and this setting
1914 Older Mercurial clients only support zlib compression and this setting
1910 has no effect for legacy clients.
1915 has no effect for legacy clients.
1911
1916
1912 ``uncompressed``
1917 ``uncompressed``
1913 Whether to allow clients to clone a repository using the
1918 Whether to allow clients to clone a repository using the
1914 uncompressed streaming protocol. This transfers about 40% more
1919 uncompressed streaming protocol. This transfers about 40% more
1915 data than a regular clone, but uses less memory and CPU on both
1920 data than a regular clone, but uses less memory and CPU on both
1916 server and client. Over a LAN (100 Mbps or better) or a very fast
1921 server and client. Over a LAN (100 Mbps or better) or a very fast
1917 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
1922 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
1918 regular clone. Over most WAN connections (anything slower than
1923 regular clone. Over most WAN connections (anything slower than
1919 about 6 Mbps), uncompressed streaming is slower, because of the
1924 about 6 Mbps), uncompressed streaming is slower, because of the
1920 extra data transfer overhead. This mode will also temporarily hold
1925 extra data transfer overhead. This mode will also temporarily hold
1921 the write lock while determining what data to transfer.
1926 the write lock while determining what data to transfer.
1922 (default: True)
1927 (default: True)
1923
1928
1924 ``uncompressedallowsecret``
1929 ``uncompressedallowsecret``
1925 Whether to allow stream clones when the repository contains secret
1930 Whether to allow stream clones when the repository contains secret
1926 changesets. (default: False)
1931 changesets. (default: False)
1927
1932
1928 ``preferuncompressed``
1933 ``preferuncompressed``
1929 When set, clients will try to use the uncompressed streaming
1934 When set, clients will try to use the uncompressed streaming
1930 protocol. (default: False)
1935 protocol. (default: False)
1931
1936
1932 ``disablefullbundle``
1937 ``disablefullbundle``
1933 When set, servers will refuse attempts to do pull-based clones.
1938 When set, servers will refuse attempts to do pull-based clones.
1934 If this option is set, ``preferuncompressed`` and/or clone bundles
1939 If this option is set, ``preferuncompressed`` and/or clone bundles
1935 are highly recommended. Partial clones will still be allowed.
1940 are highly recommended. Partial clones will still be allowed.
1936 (default: False)
1941 (default: False)
1937
1942
1938 ``streamunbundle``
1943 ``streamunbundle``
1939 When set, servers will apply data sent from the client directly,
1944 When set, servers will apply data sent from the client directly,
1940 otherwise it will be written to a temporary file first. This option
1945 otherwise it will be written to a temporary file first. This option
1941 effectively prevents concurrent pushes.
1946 effectively prevents concurrent pushes.
1942
1947
1943 ``pullbundle``
1948 ``pullbundle``
1944 When set, the server will check pullbundle.manifest for bundles
1949 When set, the server will check pullbundle.manifest for bundles
1945 covering the requested heads and common nodes. The first matching
1950 covering the requested heads and common nodes. The first matching
1946 entry will be streamed to the client.
1951 entry will be streamed to the client.
1947
1952
1948 For HTTP transport, the stream will still use zlib compression
1953 For HTTP transport, the stream will still use zlib compression
1949 for older clients.
1954 for older clients.
1950
1955
1951 ``concurrent-push-mode``
1956 ``concurrent-push-mode``
1952 Level of allowed race condition between two pushing clients.
1957 Level of allowed race condition between two pushing clients.
1953
1958
1954 - 'strict': push is abort if another client touched the repository
1959 - 'strict': push is abort if another client touched the repository
1955 while the push was preparing. (default)
1960 while the push was preparing. (default)
1956 - 'check-related': push is only aborted if it affects head that got also
1961 - 'check-related': push is only aborted if it affects head that got also
1957 affected while the push was preparing.
1962 affected while the push was preparing.
1958
1963
1959 This requires compatible client (version 4.3 and later). Old client will
1964 This requires compatible client (version 4.3 and later). Old client will
1960 use 'strict'.
1965 use 'strict'.
1961
1966
1962 ``validate``
1967 ``validate``
1963 Whether to validate the completeness of pushed changesets by
1968 Whether to validate the completeness of pushed changesets by
1964 checking that all new file revisions specified in manifests are
1969 checking that all new file revisions specified in manifests are
1965 present. (default: False)
1970 present. (default: False)
1966
1971
1967 ``maxhttpheaderlen``
1972 ``maxhttpheaderlen``
1968 Instruct HTTP clients not to send request headers longer than this
1973 Instruct HTTP clients not to send request headers longer than this
1969 many bytes. (default: 1024)
1974 many bytes. (default: 1024)
1970
1975
1971 ``bundle1``
1976 ``bundle1``
1972 Whether to allow clients to push and pull using the legacy bundle1
1977 Whether to allow clients to push and pull using the legacy bundle1
1973 exchange format. (default: True)
1978 exchange format. (default: True)
1974
1979
1975 ``bundle1gd``
1980 ``bundle1gd``
1976 Like ``bundle1`` but only used if the repository is using the
1981 Like ``bundle1`` but only used if the repository is using the
1977 *generaldelta* storage format. (default: True)
1982 *generaldelta* storage format. (default: True)
1978
1983
1979 ``bundle1.push``
1984 ``bundle1.push``
1980 Whether to allow clients to push using the legacy bundle1 exchange
1985 Whether to allow clients to push using the legacy bundle1 exchange
1981 format. (default: True)
1986 format. (default: True)
1982
1987
1983 ``bundle1gd.push``
1988 ``bundle1gd.push``
1984 Like ``bundle1.push`` but only used if the repository is using the
1989 Like ``bundle1.push`` but only used if the repository is using the
1985 *generaldelta* storage format. (default: True)
1990 *generaldelta* storage format. (default: True)
1986
1991
1987 ``bundle1.pull``
1992 ``bundle1.pull``
1988 Whether to allow clients to pull using the legacy bundle1 exchange
1993 Whether to allow clients to pull using the legacy bundle1 exchange
1989 format. (default: True)
1994 format. (default: True)
1990
1995
1991 ``bundle1gd.pull``
1996 ``bundle1gd.pull``
1992 Like ``bundle1.pull`` but only used if the repository is using the
1997 Like ``bundle1.pull`` but only used if the repository is using the
1993 *generaldelta* storage format. (default: True)
1998 *generaldelta* storage format. (default: True)
1994
1999
1995 Large repositories using the *generaldelta* storage format should
2000 Large repositories using the *generaldelta* storage format should
1996 consider setting this option because converting *generaldelta*
2001 consider setting this option because converting *generaldelta*
1997 repositories to the exchange format required by the bundle1 data
2002 repositories to the exchange format required by the bundle1 data
1998 format can consume a lot of CPU.
2003 format can consume a lot of CPU.
1999
2004
2000 ``bundle2.stream``
2005 ``bundle2.stream``
2001 Whether to allow clients to pull using the bundle2 streaming protocol.
2006 Whether to allow clients to pull using the bundle2 streaming protocol.
2002 (default: True)
2007 (default: True)
2003
2008
2004 ``zliblevel``
2009 ``zliblevel``
2005 Integer between ``-1`` and ``9`` that controls the zlib compression level
2010 Integer between ``-1`` and ``9`` that controls the zlib compression level
2006 for wire protocol commands that send zlib compressed output (notably the
2011 for wire protocol commands that send zlib compressed output (notably the
2007 commands that send repository history data).
2012 commands that send repository history data).
2008
2013
2009 The default (``-1``) uses the default zlib compression level, which is
2014 The default (``-1``) uses the default zlib compression level, which is
2010 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2015 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2011 maximum compression.
2016 maximum compression.
2012
2017
2013 Setting this option allows server operators to make trade-offs between
2018 Setting this option allows server operators to make trade-offs between
2014 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2019 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2015 but sends more bytes to clients.
2020 but sends more bytes to clients.
2016
2021
2017 This option only impacts the HTTP server.
2022 This option only impacts the HTTP server.
2018
2023
2019 ``zstdlevel``
2024 ``zstdlevel``
2020 Integer between ``1`` and ``22`` that controls the zstd compression level
2025 Integer between ``1`` and ``22`` that controls the zstd compression level
2021 for wire protocol commands. ``1`` is the minimal amount of compression and
2026 for wire protocol commands. ``1`` is the minimal amount of compression and
2022 ``22`` is the highest amount of compression.
2027 ``22`` is the highest amount of compression.
2023
2028
2024 The default (``3``) should be significantly faster than zlib while likely
2029 The default (``3``) should be significantly faster than zlib while likely
2025 delivering better compression ratios.
2030 delivering better compression ratios.
2026
2031
2027 This option only impacts the HTTP server.
2032 This option only impacts the HTTP server.
2028
2033
2029 See also ``server.zliblevel``.
2034 See also ``server.zliblevel``.
2030
2035
2031 ``view``
2036 ``view``
2032 Repository filter used when exchanging revisions with the peer.
2037 Repository filter used when exchanging revisions with the peer.
2033
2038
2034 The default view (``served``) excludes secret and hidden changesets.
2039 The default view (``served``) excludes secret and hidden changesets.
2035 Another useful value is ``immutable`` (no draft, secret or hidden changesets).
2040 Another useful value is ``immutable`` (no draft, secret or hidden changesets).
2036
2041
2037 ``smtp``
2042 ``smtp``
2038 --------
2043 --------
2039
2044
2040 Configuration for extensions that need to send email messages.
2045 Configuration for extensions that need to send email messages.
2041
2046
2042 ``host``
2047 ``host``
2043 Host name of mail server, e.g. "mail.example.com".
2048 Host name of mail server, e.g. "mail.example.com".
2044
2049
2045 ``port``
2050 ``port``
2046 Optional. Port to connect to on mail server. (default: 465 if
2051 Optional. Port to connect to on mail server. (default: 465 if
2047 ``tls`` is smtps; 25 otherwise)
2052 ``tls`` is smtps; 25 otherwise)
2048
2053
2049 ``tls``
2054 ``tls``
2050 Optional. Method to enable TLS when connecting to mail server: starttls,
2055 Optional. Method to enable TLS when connecting to mail server: starttls,
2051 smtps or none. (default: none)
2056 smtps or none. (default: none)
2052
2057
2053 ``username``
2058 ``username``
2054 Optional. User name for authenticating with the SMTP server.
2059 Optional. User name for authenticating with the SMTP server.
2055 (default: None)
2060 (default: None)
2056
2061
2057 ``password``
2062 ``password``
2058 Optional. Password for authenticating with the SMTP server. If not
2063 Optional. Password for authenticating with the SMTP server. If not
2059 specified, interactive sessions will prompt the user for a
2064 specified, interactive sessions will prompt the user for a
2060 password; non-interactive sessions will fail. (default: None)
2065 password; non-interactive sessions will fail. (default: None)
2061
2066
2062 ``local_hostname``
2067 ``local_hostname``
2063 Optional. The hostname that the sender can use to identify
2068 Optional. The hostname that the sender can use to identify
2064 itself to the MTA.
2069 itself to the MTA.
2065
2070
2066
2071
2067 ``subpaths``
2072 ``subpaths``
2068 ------------
2073 ------------
2069
2074
2070 Subrepository source URLs can go stale if a remote server changes name
2075 Subrepository source URLs can go stale if a remote server changes name
2071 or becomes temporarily unavailable. This section lets you define
2076 or becomes temporarily unavailable. This section lets you define
2072 rewrite rules of the form::
2077 rewrite rules of the form::
2073
2078
2074 <pattern> = <replacement>
2079 <pattern> = <replacement>
2075
2080
2076 where ``pattern`` is a regular expression matching a subrepository
2081 where ``pattern`` is a regular expression matching a subrepository
2077 source URL and ``replacement`` is the replacement string used to
2082 source URL and ``replacement`` is the replacement string used to
2078 rewrite it. Groups can be matched in ``pattern`` and referenced in
2083 rewrite it. Groups can be matched in ``pattern`` and referenced in
2079 ``replacements``. For instance::
2084 ``replacements``. For instance::
2080
2085
2081 http://server/(.*)-hg/ = http://hg.server/\1/
2086 http://server/(.*)-hg/ = http://hg.server/\1/
2082
2087
2083 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2088 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2084
2089
2085 Relative subrepository paths are first made absolute, and the
2090 Relative subrepository paths are first made absolute, and the
2086 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2091 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2087 doesn't match the full path, an attempt is made to apply it on the
2092 doesn't match the full path, an attempt is made to apply it on the
2088 relative path alone. The rules are applied in definition order.
2093 relative path alone. The rules are applied in definition order.
2089
2094
2090 ``subrepos``
2095 ``subrepos``
2091 ------------
2096 ------------
2092
2097
2093 This section contains options that control the behavior of the
2098 This section contains options that control the behavior of the
2094 subrepositories feature. See also :hg:`help subrepos`.
2099 subrepositories feature. See also :hg:`help subrepos`.
2095
2100
2096 Security note: auditing in Mercurial is known to be insufficient to
2101 Security note: auditing in Mercurial is known to be insufficient to
2097 prevent clone-time code execution with carefully constructed Git
2102 prevent clone-time code execution with carefully constructed Git
2098 subrepos. It is unknown if a similar detect is present in Subversion
2103 subrepos. It is unknown if a similar detect is present in Subversion
2099 subrepos. Both Git and Subversion subrepos are disabled by default
2104 subrepos. Both Git and Subversion subrepos are disabled by default
2100 out of security concerns. These subrepo types can be enabled using
2105 out of security concerns. These subrepo types can be enabled using
2101 the respective options below.
2106 the respective options below.
2102
2107
2103 ``allowed``
2108 ``allowed``
2104 Whether subrepositories are allowed in the working directory.
2109 Whether subrepositories are allowed in the working directory.
2105
2110
2106 When false, commands involving subrepositories (like :hg:`update`)
2111 When false, commands involving subrepositories (like :hg:`update`)
2107 will fail for all subrepository types.
2112 will fail for all subrepository types.
2108 (default: true)
2113 (default: true)
2109
2114
2110 ``hg:allowed``
2115 ``hg:allowed``
2111 Whether Mercurial subrepositories are allowed in the working
2116 Whether Mercurial subrepositories are allowed in the working
2112 directory. This option only has an effect if ``subrepos.allowed``
2117 directory. This option only has an effect if ``subrepos.allowed``
2113 is true.
2118 is true.
2114 (default: true)
2119 (default: true)
2115
2120
2116 ``git:allowed``
2121 ``git:allowed``
2117 Whether Git subrepositories are allowed in the working directory.
2122 Whether Git subrepositories are allowed in the working directory.
2118 This option only has an effect if ``subrepos.allowed`` is true.
2123 This option only has an effect if ``subrepos.allowed`` is true.
2119
2124
2120 See the security note above before enabling Git subrepos.
2125 See the security note above before enabling Git subrepos.
2121 (default: false)
2126 (default: false)
2122
2127
2123 ``svn:allowed``
2128 ``svn:allowed``
2124 Whether Subversion subrepositories are allowed in the working
2129 Whether Subversion subrepositories are allowed in the working
2125 directory. This option only has an effect if ``subrepos.allowed``
2130 directory. This option only has an effect if ``subrepos.allowed``
2126 is true.
2131 is true.
2127
2132
2128 See the security note above before enabling Subversion subrepos.
2133 See the security note above before enabling Subversion subrepos.
2129 (default: false)
2134 (default: false)
2130
2135
2131 ``templatealias``
2136 ``templatealias``
2132 -----------------
2137 -----------------
2133
2138
2134 Alias definitions for templates. See :hg:`help templates` for details.
2139 Alias definitions for templates. See :hg:`help templates` for details.
2135
2140
2136 ``templates``
2141 ``templates``
2137 -------------
2142 -------------
2138
2143
2139 Use the ``[templates]`` section to define template strings.
2144 Use the ``[templates]`` section to define template strings.
2140 See :hg:`help templates` for details.
2145 See :hg:`help templates` for details.
2141
2146
2142 ``trusted``
2147 ``trusted``
2143 -----------
2148 -----------
2144
2149
2145 Mercurial will not use the settings in the
2150 Mercurial will not use the settings in the
2146 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2151 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2147 user or to a trusted group, as various hgrc features allow arbitrary
2152 user or to a trusted group, as various hgrc features allow arbitrary
2148 commands to be run. This issue is often encountered when configuring
2153 commands to be run. This issue is often encountered when configuring
2149 hooks or extensions for shared repositories or servers. However,
2154 hooks or extensions for shared repositories or servers. However,
2150 the web interface will use some safe settings from the ``[web]``
2155 the web interface will use some safe settings from the ``[web]``
2151 section.
2156 section.
2152
2157
2153 This section specifies what users and groups are trusted. The
2158 This section specifies what users and groups are trusted. The
2154 current user is always trusted. To trust everybody, list a user or a
2159 current user is always trusted. To trust everybody, list a user or a
2155 group with name ``*``. These settings must be placed in an
2160 group with name ``*``. These settings must be placed in an
2156 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2161 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2157 user or service running Mercurial.
2162 user or service running Mercurial.
2158
2163
2159 ``users``
2164 ``users``
2160 Comma-separated list of trusted users.
2165 Comma-separated list of trusted users.
2161
2166
2162 ``groups``
2167 ``groups``
2163 Comma-separated list of trusted groups.
2168 Comma-separated list of trusted groups.
2164
2169
2165
2170
2166 ``ui``
2171 ``ui``
2167 ------
2172 ------
2168
2173
2169 User interface controls.
2174 User interface controls.
2170
2175
2171 ``archivemeta``
2176 ``archivemeta``
2172 Whether to include the .hg_archival.txt file containing meta data
2177 Whether to include the .hg_archival.txt file containing meta data
2173 (hashes for the repository base and for tip) in archives created
2178 (hashes for the repository base and for tip) in archives created
2174 by the :hg:`archive` command or downloaded via hgweb.
2179 by the :hg:`archive` command or downloaded via hgweb.
2175 (default: True)
2180 (default: True)
2176
2181
2177 ``askusername``
2182 ``askusername``
2178 Whether to prompt for a username when committing. If True, and
2183 Whether to prompt for a username when committing. If True, and
2179 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2184 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2180 be prompted to enter a username. If no username is entered, the
2185 be prompted to enter a username. If no username is entered, the
2181 default ``USER@HOST`` is used instead.
2186 default ``USER@HOST`` is used instead.
2182 (default: False)
2187 (default: False)
2183
2188
2184 ``clonebundles``
2189 ``clonebundles``
2185 Whether the "clone bundles" feature is enabled.
2190 Whether the "clone bundles" feature is enabled.
2186
2191
2187 When enabled, :hg:`clone` may download and apply a server-advertised
2192 When enabled, :hg:`clone` may download and apply a server-advertised
2188 bundle file from a URL instead of using the normal exchange mechanism.
2193 bundle file from a URL instead of using the normal exchange mechanism.
2189
2194
2190 This can likely result in faster and more reliable clones.
2195 This can likely result in faster and more reliable clones.
2191
2196
2192 (default: True)
2197 (default: True)
2193
2198
2194 ``clonebundlefallback``
2199 ``clonebundlefallback``
2195 Whether failure to apply an advertised "clone bundle" from a server
2200 Whether failure to apply an advertised "clone bundle" from a server
2196 should result in fallback to a regular clone.
2201 should result in fallback to a regular clone.
2197
2202
2198 This is disabled by default because servers advertising "clone
2203 This is disabled by default because servers advertising "clone
2199 bundles" often do so to reduce server load. If advertised bundles
2204 bundles" often do so to reduce server load. If advertised bundles
2200 start mass failing and clients automatically fall back to a regular
2205 start mass failing and clients automatically fall back to a regular
2201 clone, this would add significant and unexpected load to the server
2206 clone, this would add significant and unexpected load to the server
2202 since the server is expecting clone operations to be offloaded to
2207 since the server is expecting clone operations to be offloaded to
2203 pre-generated bundles. Failing fast (the default behavior) ensures
2208 pre-generated bundles. Failing fast (the default behavior) ensures
2204 clients don't overwhelm the server when "clone bundle" application
2209 clients don't overwhelm the server when "clone bundle" application
2205 fails.
2210 fails.
2206
2211
2207 (default: False)
2212 (default: False)
2208
2213
2209 ``clonebundleprefers``
2214 ``clonebundleprefers``
2210 Defines preferences for which "clone bundles" to use.
2215 Defines preferences for which "clone bundles" to use.
2211
2216
2212 Servers advertising "clone bundles" may advertise multiple available
2217 Servers advertising "clone bundles" may advertise multiple available
2213 bundles. Each bundle may have different attributes, such as the bundle
2218 bundles. Each bundle may have different attributes, such as the bundle
2214 type and compression format. This option is used to prefer a particular
2219 type and compression format. This option is used to prefer a particular
2215 bundle over another.
2220 bundle over another.
2216
2221
2217 The following keys are defined by Mercurial:
2222 The following keys are defined by Mercurial:
2218
2223
2219 BUNDLESPEC
2224 BUNDLESPEC
2220 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2225 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2221 e.g. ``gzip-v2`` or ``bzip2-v1``.
2226 e.g. ``gzip-v2`` or ``bzip2-v1``.
2222
2227
2223 COMPRESSION
2228 COMPRESSION
2224 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2229 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2225
2230
2226 Server operators may define custom keys.
2231 Server operators may define custom keys.
2227
2232
2228 Example values: ``COMPRESSION=bzip2``,
2233 Example values: ``COMPRESSION=bzip2``,
2229 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2234 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2230
2235
2231 By default, the first bundle advertised by the server is used.
2236 By default, the first bundle advertised by the server is used.
2232
2237
2233 ``color``
2238 ``color``
2234 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2239 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2235 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2240 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2236 seems possible. See :hg:`help color` for details.
2241 seems possible. See :hg:`help color` for details.
2237
2242
2238 ``commitsubrepos``
2243 ``commitsubrepos``
2239 Whether to commit modified subrepositories when committing the
2244 Whether to commit modified subrepositories when committing the
2240 parent repository. If False and one subrepository has uncommitted
2245 parent repository. If False and one subrepository has uncommitted
2241 changes, abort the commit.
2246 changes, abort the commit.
2242 (default: False)
2247 (default: False)
2243
2248
2244 ``debug``
2249 ``debug``
2245 Print debugging information. (default: False)
2250 Print debugging information. (default: False)
2246
2251
2247 ``editor``
2252 ``editor``
2248 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2253 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2249
2254
2250 ``fallbackencoding``
2255 ``fallbackencoding``
2251 Encoding to try if it's not possible to decode the changelog using
2256 Encoding to try if it's not possible to decode the changelog using
2252 UTF-8. (default: ISO-8859-1)
2257 UTF-8. (default: ISO-8859-1)
2253
2258
2254 ``graphnodetemplate``
2259 ``graphnodetemplate``
2255 The template used to print changeset nodes in an ASCII revision graph.
2260 The template used to print changeset nodes in an ASCII revision graph.
2256 (default: ``{graphnode}``)
2261 (default: ``{graphnode}``)
2257
2262
2258 ``ignore``
2263 ``ignore``
2259 A file to read per-user ignore patterns from. This file should be
2264 A file to read per-user ignore patterns from. This file should be
2260 in the same format as a repository-wide .hgignore file. Filenames
2265 in the same format as a repository-wide .hgignore file. Filenames
2261 are relative to the repository root. This option supports hook syntax,
2266 are relative to the repository root. This option supports hook syntax,
2262 so if you want to specify multiple ignore files, you can do so by
2267 so if you want to specify multiple ignore files, you can do so by
2263 setting something like ``ignore.other = ~/.hgignore2``. For details
2268 setting something like ``ignore.other = ~/.hgignore2``. For details
2264 of the ignore file format, see the ``hgignore(5)`` man page.
2269 of the ignore file format, see the ``hgignore(5)`` man page.
2265
2270
2266 ``interactive``
2271 ``interactive``
2267 Allow to prompt the user. (default: True)
2272 Allow to prompt the user. (default: True)
2268
2273
2269 ``interface``
2274 ``interface``
2270 Select the default interface for interactive features (default: text).
2275 Select the default interface for interactive features (default: text).
2271 Possible values are 'text' and 'curses'.
2276 Possible values are 'text' and 'curses'.
2272
2277
2273 ``interface.chunkselector``
2278 ``interface.chunkselector``
2274 Select the interface for change recording (e.g. :hg:`commit -i`).
2279 Select the interface for change recording (e.g. :hg:`commit -i`).
2275 Possible values are 'text' and 'curses'.
2280 Possible values are 'text' and 'curses'.
2276 This config overrides the interface specified by ui.interface.
2281 This config overrides the interface specified by ui.interface.
2277
2282
2278 ``large-file-limit``
2283 ``large-file-limit``
2279 Largest file size that gives no memory use warning.
2284 Largest file size that gives no memory use warning.
2280 Possible values are integers or 0 to disable the check.
2285 Possible values are integers or 0 to disable the check.
2281 (default: 10000000)
2286 (default: 10000000)
2282
2287
2283 ``logtemplate``
2288 ``logtemplate``
2284 Template string for commands that print changesets.
2289 Template string for commands that print changesets.
2285
2290
2286 ``merge``
2291 ``merge``
2287 The conflict resolution program to use during a manual merge.
2292 The conflict resolution program to use during a manual merge.
2288 For more information on merge tools see :hg:`help merge-tools`.
2293 For more information on merge tools see :hg:`help merge-tools`.
2289 For configuring merge tools see the ``[merge-tools]`` section.
2294 For configuring merge tools see the ``[merge-tools]`` section.
2290
2295
2291 ``mergemarkers``
2296 ``mergemarkers``
2292 Sets the merge conflict marker label styling. The ``detailed``
2297 Sets the merge conflict marker label styling. The ``detailed``
2293 style uses the ``mergemarkertemplate`` setting to style the labels.
2298 style uses the ``mergemarkertemplate`` setting to style the labels.
2294 The ``basic`` style just uses 'local' and 'other' as the marker label.
2299 The ``basic`` style just uses 'local' and 'other' as the marker label.
2295 One of ``basic`` or ``detailed``.
2300 One of ``basic`` or ``detailed``.
2296 (default: ``basic``)
2301 (default: ``basic``)
2297
2302
2298 ``mergemarkertemplate``
2303 ``mergemarkertemplate``
2299 The template used to print the commit description next to each conflict
2304 The template used to print the commit description next to each conflict
2300 marker during merge conflicts. See :hg:`help templates` for the template
2305 marker during merge conflicts. See :hg:`help templates` for the template
2301 format.
2306 format.
2302
2307
2303 Defaults to showing the hash, tags, branches, bookmarks, author, and
2308 Defaults to showing the hash, tags, branches, bookmarks, author, and
2304 the first line of the commit description.
2309 the first line of the commit description.
2305
2310
2306 If you use non-ASCII characters in names for tags, branches, bookmarks,
2311 If you use non-ASCII characters in names for tags, branches, bookmarks,
2307 authors, and/or commit descriptions, you must pay attention to encodings of
2312 authors, and/or commit descriptions, you must pay attention to encodings of
2308 managed files. At template expansion, non-ASCII characters use the encoding
2313 managed files. At template expansion, non-ASCII characters use the encoding
2309 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2314 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2310 environment variables that govern your locale. If the encoding of the merge
2315 environment variables that govern your locale. If the encoding of the merge
2311 markers is different from the encoding of the merged files,
2316 markers is different from the encoding of the merged files,
2312 serious problems may occur.
2317 serious problems may occur.
2313
2318
2314 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2319 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2315
2320
2316 ``message-output``
2321 ``message-output``
2317 Where to write status and error messages. (default: ``stdio``)
2322 Where to write status and error messages. (default: ``stdio``)
2318
2323
2319 ``stderr``
2324 ``stderr``
2320 Everything to stderr.
2325 Everything to stderr.
2321 ``stdio``
2326 ``stdio``
2322 Status to stdout, and error to stderr.
2327 Status to stdout, and error to stderr.
2323
2328
2324 ``origbackuppath``
2329 ``origbackuppath``
2325 The path to a directory used to store generated .orig files. If the path is
2330 The path to a directory used to store generated .orig files. If the path is
2326 not a directory, one will be created. If set, files stored in this
2331 not a directory, one will be created. If set, files stored in this
2327 directory have the same name as the original file and do not have a .orig
2332 directory have the same name as the original file and do not have a .orig
2328 suffix.
2333 suffix.
2329
2334
2330 ``paginate``
2335 ``paginate``
2331 Control the pagination of command output (default: True). See :hg:`help pager`
2336 Control the pagination of command output (default: True). See :hg:`help pager`
2332 for details.
2337 for details.
2333
2338
2334 ``patch``
2339 ``patch``
2335 An optional external tool that ``hg import`` and some extensions
2340 An optional external tool that ``hg import`` and some extensions
2336 will use for applying patches. By default Mercurial uses an
2341 will use for applying patches. By default Mercurial uses an
2337 internal patch utility. The external tool must work as the common
2342 internal patch utility. The external tool must work as the common
2338 Unix ``patch`` program. In particular, it must accept a ``-p``
2343 Unix ``patch`` program. In particular, it must accept a ``-p``
2339 argument to strip patch headers, a ``-d`` argument to specify the
2344 argument to strip patch headers, a ``-d`` argument to specify the
2340 current directory, a file name to patch, and a patch file to take
2345 current directory, a file name to patch, and a patch file to take
2341 from stdin.
2346 from stdin.
2342
2347
2343 It is possible to specify a patch tool together with extra
2348 It is possible to specify a patch tool together with extra
2344 arguments. For example, setting this option to ``patch --merge``
2349 arguments. For example, setting this option to ``patch --merge``
2345 will use the ``patch`` program with its 2-way merge option.
2350 will use the ``patch`` program with its 2-way merge option.
2346
2351
2347 ``portablefilenames``
2352 ``portablefilenames``
2348 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2353 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2349 (default: ``warn``)
2354 (default: ``warn``)
2350
2355
2351 ``warn``
2356 ``warn``
2352 Print a warning message on POSIX platforms, if a file with a non-portable
2357 Print a warning message on POSIX platforms, if a file with a non-portable
2353 filename is added (e.g. a file with a name that can't be created on
2358 filename is added (e.g. a file with a name that can't be created on
2354 Windows because it contains reserved parts like ``AUX``, reserved
2359 Windows because it contains reserved parts like ``AUX``, reserved
2355 characters like ``:``, or would cause a case collision with an existing
2360 characters like ``:``, or would cause a case collision with an existing
2356 file).
2361 file).
2357
2362
2358 ``ignore``
2363 ``ignore``
2359 Don't print a warning.
2364 Don't print a warning.
2360
2365
2361 ``abort``
2366 ``abort``
2362 The command is aborted.
2367 The command is aborted.
2363
2368
2364 ``true``
2369 ``true``
2365 Alias for ``warn``.
2370 Alias for ``warn``.
2366
2371
2367 ``false``
2372 ``false``
2368 Alias for ``ignore``.
2373 Alias for ``ignore``.
2369
2374
2370 .. container:: windows
2375 .. container:: windows
2371
2376
2372 On Windows, this configuration option is ignored and the command aborted.
2377 On Windows, this configuration option is ignored and the command aborted.
2373
2378
2374 ``pre-merge-tool-output-template``
2379 ``pre-merge-tool-output-template``
2375 A template that is printed before executing an external merge tool. This can
2380 A template that is printed before executing an external merge tool. This can
2376 be used to print out additional context that might be useful to have during
2381 be used to print out additional context that might be useful to have during
2377 the conflict resolution, such as the description of the various commits
2382 the conflict resolution, such as the description of the various commits
2378 involved or bookmarks/tags.
2383 involved or bookmarks/tags.
2379
2384
2380 Additional information is available in the ``local`, ``base``, and ``other``
2385 Additional information is available in the ``local`, ``base``, and ``other``
2381 dicts. For example: ``{local.label}``, ``{base.name}``, or
2386 dicts. For example: ``{local.label}``, ``{base.name}``, or
2382 ``{other.islink}``.
2387 ``{other.islink}``.
2383
2388
2384 ``quiet``
2389 ``quiet``
2385 Reduce the amount of output printed.
2390 Reduce the amount of output printed.
2386 (default: False)
2391 (default: False)
2387
2392
2388 ``relative-paths``
2393 ``relative-paths``
2389 Prefer relative paths in the UI.
2394 Prefer relative paths in the UI.
2390
2395
2391 ``remotecmd``
2396 ``remotecmd``
2392 Remote command to use for clone/push/pull operations.
2397 Remote command to use for clone/push/pull operations.
2393 (default: ``hg``)
2398 (default: ``hg``)
2394
2399
2395 ``report_untrusted``
2400 ``report_untrusted``
2396 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2401 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2397 trusted user or group.
2402 trusted user or group.
2398 (default: True)
2403 (default: True)
2399
2404
2400 ``slash``
2405 ``slash``
2401 (Deprecated. Use ``slashpath`` template filter instead.)
2406 (Deprecated. Use ``slashpath`` template filter instead.)
2402
2407
2403 Display paths using a slash (``/``) as the path separator. This
2408 Display paths using a slash (``/``) as the path separator. This
2404 only makes a difference on systems where the default path
2409 only makes a difference on systems where the default path
2405 separator is not the slash character (e.g. Windows uses the
2410 separator is not the slash character (e.g. Windows uses the
2406 backslash character (``\``)).
2411 backslash character (``\``)).
2407 (default: False)
2412 (default: False)
2408
2413
2409 ``statuscopies``
2414 ``statuscopies``
2410 Display copies in the status command.
2415 Display copies in the status command.
2411
2416
2412 ``ssh``
2417 ``ssh``
2413 Command to use for SSH connections. (default: ``ssh``)
2418 Command to use for SSH connections. (default: ``ssh``)
2414
2419
2415 ``ssherrorhint``
2420 ``ssherrorhint``
2416 A hint shown to the user in the case of SSH error (e.g.
2421 A hint shown to the user in the case of SSH error (e.g.
2417 ``Please see http://company/internalwiki/ssh.html``)
2422 ``Please see http://company/internalwiki/ssh.html``)
2418
2423
2419 ``strict``
2424 ``strict``
2420 Require exact command names, instead of allowing unambiguous
2425 Require exact command names, instead of allowing unambiguous
2421 abbreviations. (default: False)
2426 abbreviations. (default: False)
2422
2427
2423 ``style``
2428 ``style``
2424 Name of style to use for command output.
2429 Name of style to use for command output.
2425
2430
2426 ``supportcontact``
2431 ``supportcontact``
2427 A URL where users should report a Mercurial traceback. Use this if you are a
2432 A URL where users should report a Mercurial traceback. Use this if you are a
2428 large organisation with its own Mercurial deployment process and crash
2433 large organisation with its own Mercurial deployment process and crash
2429 reports should be addressed to your internal support.
2434 reports should be addressed to your internal support.
2430
2435
2431 ``textwidth``
2436 ``textwidth``
2432 Maximum width of help text. A longer line generated by ``hg help`` or
2437 Maximum width of help text. A longer line generated by ``hg help`` or
2433 ``hg subcommand --help`` will be broken after white space to get this
2438 ``hg subcommand --help`` will be broken after white space to get this
2434 width or the terminal width, whichever comes first.
2439 width or the terminal width, whichever comes first.
2435 A non-positive value will disable this and the terminal width will be
2440 A non-positive value will disable this and the terminal width will be
2436 used. (default: 78)
2441 used. (default: 78)
2437
2442
2438 ``timeout``
2443 ``timeout``
2439 The timeout used when a lock is held (in seconds), a negative value
2444 The timeout used when a lock is held (in seconds), a negative value
2440 means no timeout. (default: 600)
2445 means no timeout. (default: 600)
2441
2446
2442 ``timeout.warn``
2447 ``timeout.warn``
2443 Time (in seconds) before a warning is printed about held lock. A negative
2448 Time (in seconds) before a warning is printed about held lock. A negative
2444 value means no warning. (default: 0)
2449 value means no warning. (default: 0)
2445
2450
2446 ``traceback``
2451 ``traceback``
2447 Mercurial always prints a traceback when an unknown exception
2452 Mercurial always prints a traceback when an unknown exception
2448 occurs. Setting this to True will make Mercurial print a traceback
2453 occurs. Setting this to True will make Mercurial print a traceback
2449 on all exceptions, even those recognized by Mercurial (such as
2454 on all exceptions, even those recognized by Mercurial (such as
2450 IOError or MemoryError). (default: False)
2455 IOError or MemoryError). (default: False)
2451
2456
2452 ``tweakdefaults``
2457 ``tweakdefaults``
2453
2458
2454 By default Mercurial's behavior changes very little from release
2459 By default Mercurial's behavior changes very little from release
2455 to release, but over time the recommended config settings
2460 to release, but over time the recommended config settings
2456 shift. Enable this config to opt in to get automatic tweaks to
2461 shift. Enable this config to opt in to get automatic tweaks to
2457 Mercurial's behavior over time. This config setting will have no
2462 Mercurial's behavior over time. This config setting will have no
2458 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2463 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2459 not include ``tweakdefaults``. (default: False)
2464 not include ``tweakdefaults``. (default: False)
2460
2465
2461 It currently means::
2466 It currently means::
2462
2467
2463 .. tweakdefaultsmarker
2468 .. tweakdefaultsmarker
2464
2469
2465 ``username``
2470 ``username``
2466 The committer of a changeset created when running "commit".
2471 The committer of a changeset created when running "commit".
2467 Typically a person's name and email address, e.g. ``Fred Widget
2472 Typically a person's name and email address, e.g. ``Fred Widget
2468 <fred@example.com>``. Environment variables in the
2473 <fred@example.com>``. Environment variables in the
2469 username are expanded.
2474 username are expanded.
2470
2475
2471 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2476 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2472 hgrc is empty, e.g. if the system admin set ``username =`` in the
2477 hgrc is empty, e.g. if the system admin set ``username =`` in the
2473 system hgrc, it has to be specified manually or in a different
2478 system hgrc, it has to be specified manually or in a different
2474 hgrc file)
2479 hgrc file)
2475
2480
2476 ``verbose``
2481 ``verbose``
2477 Increase the amount of output printed. (default: False)
2482 Increase the amount of output printed. (default: False)
2478
2483
2479
2484
2480 ``web``
2485 ``web``
2481 -------
2486 -------
2482
2487
2483 Web interface configuration. The settings in this section apply to
2488 Web interface configuration. The settings in this section apply to
2484 both the builtin webserver (started by :hg:`serve`) and the script you
2489 both the builtin webserver (started by :hg:`serve`) and the script you
2485 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2490 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2486 and WSGI).
2491 and WSGI).
2487
2492
2488 The Mercurial webserver does no authentication (it does not prompt for
2493 The Mercurial webserver does no authentication (it does not prompt for
2489 usernames and passwords to validate *who* users are), but it does do
2494 usernames and passwords to validate *who* users are), but it does do
2490 authorization (it grants or denies access for *authenticated users*
2495 authorization (it grants or denies access for *authenticated users*
2491 based on settings in this section). You must either configure your
2496 based on settings in this section). You must either configure your
2492 webserver to do authentication for you, or disable the authorization
2497 webserver to do authentication for you, or disable the authorization
2493 checks.
2498 checks.
2494
2499
2495 For a quick setup in a trusted environment, e.g., a private LAN, where
2500 For a quick setup in a trusted environment, e.g., a private LAN, where
2496 you want it to accept pushes from anybody, you can use the following
2501 you want it to accept pushes from anybody, you can use the following
2497 command line::
2502 command line::
2498
2503
2499 $ hg --config web.allow-push=* --config web.push_ssl=False serve
2504 $ hg --config web.allow-push=* --config web.push_ssl=False serve
2500
2505
2501 Note that this will allow anybody to push anything to the server and
2506 Note that this will allow anybody to push anything to the server and
2502 that this should not be used for public servers.
2507 that this should not be used for public servers.
2503
2508
2504 The full set of options is:
2509 The full set of options is:
2505
2510
2506 ``accesslog``
2511 ``accesslog``
2507 Where to output the access log. (default: stdout)
2512 Where to output the access log. (default: stdout)
2508
2513
2509 ``address``
2514 ``address``
2510 Interface address to bind to. (default: all)
2515 Interface address to bind to. (default: all)
2511
2516
2512 ``allow-archive``
2517 ``allow-archive``
2513 List of archive format (bz2, gz, zip) allowed for downloading.
2518 List of archive format (bz2, gz, zip) allowed for downloading.
2514 (default: empty)
2519 (default: empty)
2515
2520
2516 ``allowbz2``
2521 ``allowbz2``
2517 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
2522 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
2518 revisions.
2523 revisions.
2519 (default: False)
2524 (default: False)
2520
2525
2521 ``allowgz``
2526 ``allowgz``
2522 (DEPRECATED) Whether to allow .tar.gz downloading of repository
2527 (DEPRECATED) Whether to allow .tar.gz downloading of repository
2523 revisions.
2528 revisions.
2524 (default: False)
2529 (default: False)
2525
2530
2526 ``allow-pull``
2531 ``allow-pull``
2527 Whether to allow pulling from the repository. (default: True)
2532 Whether to allow pulling from the repository. (default: True)
2528
2533
2529 ``allow-push``
2534 ``allow-push``
2530 Whether to allow pushing to the repository. If empty or not set,
2535 Whether to allow pushing to the repository. If empty or not set,
2531 pushing is not allowed. If the special value ``*``, any remote
2536 pushing is not allowed. If the special value ``*``, any remote
2532 user can push, including unauthenticated users. Otherwise, the
2537 user can push, including unauthenticated users. Otherwise, the
2533 remote user must have been authenticated, and the authenticated
2538 remote user must have been authenticated, and the authenticated
2534 user name must be present in this list. The contents of the
2539 user name must be present in this list. The contents of the
2535 allow-push list are examined after the deny_push list.
2540 allow-push list are examined after the deny_push list.
2536
2541
2537 ``allow_read``
2542 ``allow_read``
2538 If the user has not already been denied repository access due to
2543 If the user has not already been denied repository access due to
2539 the contents of deny_read, this list determines whether to grant
2544 the contents of deny_read, this list determines whether to grant
2540 repository access to the user. If this list is not empty, and the
2545 repository access to the user. If this list is not empty, and the
2541 user is unauthenticated or not present in the list, then access is
2546 user is unauthenticated or not present in the list, then access is
2542 denied for the user. If the list is empty or not set, then access
2547 denied for the user. If the list is empty or not set, then access
2543 is permitted to all users by default. Setting allow_read to the
2548 is permitted to all users by default. Setting allow_read to the
2544 special value ``*`` is equivalent to it not being set (i.e. access
2549 special value ``*`` is equivalent to it not being set (i.e. access
2545 is permitted to all users). The contents of the allow_read list are
2550 is permitted to all users). The contents of the allow_read list are
2546 examined after the deny_read list.
2551 examined after the deny_read list.
2547
2552
2548 ``allowzip``
2553 ``allowzip``
2549 (DEPRECATED) Whether to allow .zip downloading of repository
2554 (DEPRECATED) Whether to allow .zip downloading of repository
2550 revisions. This feature creates temporary files.
2555 revisions. This feature creates temporary files.
2551 (default: False)
2556 (default: False)
2552
2557
2553 ``archivesubrepos``
2558 ``archivesubrepos``
2554 Whether to recurse into subrepositories when archiving.
2559 Whether to recurse into subrepositories when archiving.
2555 (default: False)
2560 (default: False)
2556
2561
2557 ``baseurl``
2562 ``baseurl``
2558 Base URL to use when publishing URLs in other locations, so
2563 Base URL to use when publishing URLs in other locations, so
2559 third-party tools like email notification hooks can construct
2564 third-party tools like email notification hooks can construct
2560 URLs. Example: ``http://hgserver/repos/``.
2565 URLs. Example: ``http://hgserver/repos/``.
2561
2566
2562 ``cacerts``
2567 ``cacerts``
2563 Path to file containing a list of PEM encoded certificate
2568 Path to file containing a list of PEM encoded certificate
2564 authority certificates. Environment variables and ``~user``
2569 authority certificates. Environment variables and ``~user``
2565 constructs are expanded in the filename. If specified on the
2570 constructs are expanded in the filename. If specified on the
2566 client, then it will verify the identity of remote HTTPS servers
2571 client, then it will verify the identity of remote HTTPS servers
2567 with these certificates.
2572 with these certificates.
2568
2573
2569 To disable SSL verification temporarily, specify ``--insecure`` from
2574 To disable SSL verification temporarily, specify ``--insecure`` from
2570 command line.
2575 command line.
2571
2576
2572 You can use OpenSSL's CA certificate file if your platform has
2577 You can use OpenSSL's CA certificate file if your platform has
2573 one. On most Linux systems this will be
2578 one. On most Linux systems this will be
2574 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
2579 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
2575 generate this file manually. The form must be as follows::
2580 generate this file manually. The form must be as follows::
2576
2581
2577 -----BEGIN CERTIFICATE-----
2582 -----BEGIN CERTIFICATE-----
2578 ... (certificate in base64 PEM encoding) ...
2583 ... (certificate in base64 PEM encoding) ...
2579 -----END CERTIFICATE-----
2584 -----END CERTIFICATE-----
2580 -----BEGIN CERTIFICATE-----
2585 -----BEGIN CERTIFICATE-----
2581 ... (certificate in base64 PEM encoding) ...
2586 ... (certificate in base64 PEM encoding) ...
2582 -----END CERTIFICATE-----
2587 -----END CERTIFICATE-----
2583
2588
2584 ``cache``
2589 ``cache``
2585 Whether to support caching in hgweb. (default: True)
2590 Whether to support caching in hgweb. (default: True)
2586
2591
2587 ``certificate``
2592 ``certificate``
2588 Certificate to use when running :hg:`serve`.
2593 Certificate to use when running :hg:`serve`.
2589
2594
2590 ``collapse``
2595 ``collapse``
2591 With ``descend`` enabled, repositories in subdirectories are shown at
2596 With ``descend`` enabled, repositories in subdirectories are shown at
2592 a single level alongside repositories in the current path. With
2597 a single level alongside repositories in the current path. With
2593 ``collapse`` also enabled, repositories residing at a deeper level than
2598 ``collapse`` also enabled, repositories residing at a deeper level than
2594 the current path are grouped behind navigable directory entries that
2599 the current path are grouped behind navigable directory entries that
2595 lead to the locations of these repositories. In effect, this setting
2600 lead to the locations of these repositories. In effect, this setting
2596 collapses each collection of repositories found within a subdirectory
2601 collapses each collection of repositories found within a subdirectory
2597 into a single entry for that subdirectory. (default: False)
2602 into a single entry for that subdirectory. (default: False)
2598
2603
2599 ``comparisoncontext``
2604 ``comparisoncontext``
2600 Number of lines of context to show in side-by-side file comparison. If
2605 Number of lines of context to show in side-by-side file comparison. If
2601 negative or the value ``full``, whole files are shown. (default: 5)
2606 negative or the value ``full``, whole files are shown. (default: 5)
2602
2607
2603 This setting can be overridden by a ``context`` request parameter to the
2608 This setting can be overridden by a ``context`` request parameter to the
2604 ``comparison`` command, taking the same values.
2609 ``comparison`` command, taking the same values.
2605
2610
2606 ``contact``
2611 ``contact``
2607 Name or email address of the person in charge of the repository.
2612 Name or email address of the person in charge of the repository.
2608 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
2613 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
2609
2614
2610 ``csp``
2615 ``csp``
2611 Send a ``Content-Security-Policy`` HTTP header with this value.
2616 Send a ``Content-Security-Policy`` HTTP header with this value.
2612
2617
2613 The value may contain a special string ``%nonce%``, which will be replaced
2618 The value may contain a special string ``%nonce%``, which will be replaced
2614 by a randomly-generated one-time use value. If the value contains
2619 by a randomly-generated one-time use value. If the value contains
2615 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
2620 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
2616 one-time property of the nonce. This nonce will also be inserted into
2621 one-time property of the nonce. This nonce will also be inserted into
2617 ``<script>`` elements containing inline JavaScript.
2622 ``<script>`` elements containing inline JavaScript.
2618
2623
2619 Note: lots of HTML content sent by the server is derived from repository
2624 Note: lots of HTML content sent by the server is derived from repository
2620 data. Please consider the potential for malicious repository data to
2625 data. Please consider the potential for malicious repository data to
2621 "inject" itself into generated HTML content as part of your security
2626 "inject" itself into generated HTML content as part of your security
2622 threat model.
2627 threat model.
2623
2628
2624 ``deny_push``
2629 ``deny_push``
2625 Whether to deny pushing to the repository. If empty or not set,
2630 Whether to deny pushing to the repository. If empty or not set,
2626 push is not denied. If the special value ``*``, all remote users are
2631 push is not denied. If the special value ``*``, all remote users are
2627 denied push. Otherwise, unauthenticated users are all denied, and
2632 denied push. Otherwise, unauthenticated users are all denied, and
2628 any authenticated user name present in this list is also denied. The
2633 any authenticated user name present in this list is also denied. The
2629 contents of the deny_push list are examined before the allow-push list.
2634 contents of the deny_push list are examined before the allow-push list.
2630
2635
2631 ``deny_read``
2636 ``deny_read``
2632 Whether to deny reading/viewing of the repository. If this list is
2637 Whether to deny reading/viewing of the repository. If this list is
2633 not empty, unauthenticated users are all denied, and any
2638 not empty, unauthenticated users are all denied, and any
2634 authenticated user name present in this list is also denied access to
2639 authenticated user name present in this list is also denied access to
2635 the repository. If set to the special value ``*``, all remote users
2640 the repository. If set to the special value ``*``, all remote users
2636 are denied access (rarely needed ;). If deny_read is empty or not set,
2641 are denied access (rarely needed ;). If deny_read is empty or not set,
2637 the determination of repository access depends on the presence and
2642 the determination of repository access depends on the presence and
2638 content of the allow_read list (see description). If both
2643 content of the allow_read list (see description). If both
2639 deny_read and allow_read are empty or not set, then access is
2644 deny_read and allow_read are empty or not set, then access is
2640 permitted to all users by default. If the repository is being
2645 permitted to all users by default. If the repository is being
2641 served via hgwebdir, denied users will not be able to see it in
2646 served via hgwebdir, denied users will not be able to see it in
2642 the list of repositories. The contents of the deny_read list have
2647 the list of repositories. The contents of the deny_read list have
2643 priority over (are examined before) the contents of the allow_read
2648 priority over (are examined before) the contents of the allow_read
2644 list.
2649 list.
2645
2650
2646 ``descend``
2651 ``descend``
2647 hgwebdir indexes will not descend into subdirectories. Only repositories
2652 hgwebdir indexes will not descend into subdirectories. Only repositories
2648 directly in the current path will be shown (other repositories are still
2653 directly in the current path will be shown (other repositories are still
2649 available from the index corresponding to their containing path).
2654 available from the index corresponding to their containing path).
2650
2655
2651 ``description``
2656 ``description``
2652 Textual description of the repository's purpose or contents.
2657 Textual description of the repository's purpose or contents.
2653 (default: "unknown")
2658 (default: "unknown")
2654
2659
2655 ``encoding``
2660 ``encoding``
2656 Character encoding name. (default: the current locale charset)
2661 Character encoding name. (default: the current locale charset)
2657 Example: "UTF-8".
2662 Example: "UTF-8".
2658
2663
2659 ``errorlog``
2664 ``errorlog``
2660 Where to output the error log. (default: stderr)
2665 Where to output the error log. (default: stderr)
2661
2666
2662 ``guessmime``
2667 ``guessmime``
2663 Control MIME types for raw download of file content.
2668 Control MIME types for raw download of file content.
2664 Set to True to let hgweb guess the content type from the file
2669 Set to True to let hgweb guess the content type from the file
2665 extension. This will serve HTML files as ``text/html`` and might
2670 extension. This will serve HTML files as ``text/html`` and might
2666 allow cross-site scripting attacks when serving untrusted
2671 allow cross-site scripting attacks when serving untrusted
2667 repositories. (default: False)
2672 repositories. (default: False)
2668
2673
2669 ``hidden``
2674 ``hidden``
2670 Whether to hide the repository in the hgwebdir index.
2675 Whether to hide the repository in the hgwebdir index.
2671 (default: False)
2676 (default: False)
2672
2677
2673 ``ipv6``
2678 ``ipv6``
2674 Whether to use IPv6. (default: False)
2679 Whether to use IPv6. (default: False)
2675
2680
2676 ``labels``
2681 ``labels``
2677 List of string *labels* associated with the repository.
2682 List of string *labels* associated with the repository.
2678
2683
2679 Labels are exposed as a template keyword and can be used to customize
2684 Labels are exposed as a template keyword and can be used to customize
2680 output. e.g. the ``index`` template can group or filter repositories
2685 output. e.g. the ``index`` template can group or filter repositories
2681 by labels and the ``summary`` template can display additional content
2686 by labels and the ``summary`` template can display additional content
2682 if a specific label is present.
2687 if a specific label is present.
2683
2688
2684 ``logoimg``
2689 ``logoimg``
2685 File name of the logo image that some templates display on each page.
2690 File name of the logo image that some templates display on each page.
2686 The file name is relative to ``staticurl``. That is, the full path to
2691 The file name is relative to ``staticurl``. That is, the full path to
2687 the logo image is "staticurl/logoimg".
2692 the logo image is "staticurl/logoimg".
2688 If unset, ``hglogo.png`` will be used.
2693 If unset, ``hglogo.png`` will be used.
2689
2694
2690 ``logourl``
2695 ``logourl``
2691 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
2696 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
2692 will be used.
2697 will be used.
2693
2698
2694 ``maxchanges``
2699 ``maxchanges``
2695 Maximum number of changes to list on the changelog. (default: 10)
2700 Maximum number of changes to list on the changelog. (default: 10)
2696
2701
2697 ``maxfiles``
2702 ``maxfiles``
2698 Maximum number of files to list per changeset. (default: 10)
2703 Maximum number of files to list per changeset. (default: 10)
2699
2704
2700 ``maxshortchanges``
2705 ``maxshortchanges``
2701 Maximum number of changes to list on the shortlog, graph or filelog
2706 Maximum number of changes to list on the shortlog, graph or filelog
2702 pages. (default: 60)
2707 pages. (default: 60)
2703
2708
2704 ``name``
2709 ``name``
2705 Repository name to use in the web interface.
2710 Repository name to use in the web interface.
2706 (default: current working directory)
2711 (default: current working directory)
2707
2712
2708 ``port``
2713 ``port``
2709 Port to listen on. (default: 8000)
2714 Port to listen on. (default: 8000)
2710
2715
2711 ``prefix``
2716 ``prefix``
2712 Prefix path to serve from. (default: '' (server root))
2717 Prefix path to serve from. (default: '' (server root))
2713
2718
2714 ``push_ssl``
2719 ``push_ssl``
2715 Whether to require that inbound pushes be transported over SSL to
2720 Whether to require that inbound pushes be transported over SSL to
2716 prevent password sniffing. (default: True)
2721 prevent password sniffing. (default: True)
2717
2722
2718 ``refreshinterval``
2723 ``refreshinterval``
2719 How frequently directory listings re-scan the filesystem for new
2724 How frequently directory listings re-scan the filesystem for new
2720 repositories, in seconds. This is relevant when wildcards are used
2725 repositories, in seconds. This is relevant when wildcards are used
2721 to define paths. Depending on how much filesystem traversal is
2726 to define paths. Depending on how much filesystem traversal is
2722 required, refreshing may negatively impact performance.
2727 required, refreshing may negatively impact performance.
2723
2728
2724 Values less than or equal to 0 always refresh.
2729 Values less than or equal to 0 always refresh.
2725 (default: 20)
2730 (default: 20)
2726
2731
2727 ``server-header``
2732 ``server-header``
2728 Value for HTTP ``Server`` response header.
2733 Value for HTTP ``Server`` response header.
2729
2734
2730 ``static``
2735 ``static``
2731 Directory where static files are served from.
2736 Directory where static files are served from.
2732
2737
2733 ``staticurl``
2738 ``staticurl``
2734 Base URL to use for static files. If unset, static files (e.g. the
2739 Base URL to use for static files. If unset, static files (e.g. the
2735 hgicon.png favicon) will be served by the CGI script itself. Use
2740 hgicon.png favicon) will be served by the CGI script itself. Use
2736 this setting to serve them directly with the HTTP server.
2741 this setting to serve them directly with the HTTP server.
2737 Example: ``http://hgserver/static/``.
2742 Example: ``http://hgserver/static/``.
2738
2743
2739 ``stripes``
2744 ``stripes``
2740 How many lines a "zebra stripe" should span in multi-line output.
2745 How many lines a "zebra stripe" should span in multi-line output.
2741 Set to 0 to disable. (default: 1)
2746 Set to 0 to disable. (default: 1)
2742
2747
2743 ``style``
2748 ``style``
2744 Which template map style to use. The available options are the names of
2749 Which template map style to use. The available options are the names of
2745 subdirectories in the HTML templates path. (default: ``paper``)
2750 subdirectories in the HTML templates path. (default: ``paper``)
2746 Example: ``monoblue``.
2751 Example: ``monoblue``.
2747
2752
2748 ``templates``
2753 ``templates``
2749 Where to find the HTML templates. The default path to the HTML templates
2754 Where to find the HTML templates. The default path to the HTML templates
2750 can be obtained from ``hg debuginstall``.
2755 can be obtained from ``hg debuginstall``.
2751
2756
2752 ``websub``
2757 ``websub``
2753 ----------
2758 ----------
2754
2759
2755 Web substitution filter definition. You can use this section to
2760 Web substitution filter definition. You can use this section to
2756 define a set of regular expression substitution patterns which
2761 define a set of regular expression substitution patterns which
2757 let you automatically modify the hgweb server output.
2762 let you automatically modify the hgweb server output.
2758
2763
2759 The default hgweb templates only apply these substitution patterns
2764 The default hgweb templates only apply these substitution patterns
2760 on the revision description fields. You can apply them anywhere
2765 on the revision description fields. You can apply them anywhere
2761 you want when you create your own templates by adding calls to the
2766 you want when you create your own templates by adding calls to the
2762 "websub" filter (usually after calling the "escape" filter).
2767 "websub" filter (usually after calling the "escape" filter).
2763
2768
2764 This can be used, for example, to convert issue references to links
2769 This can be used, for example, to convert issue references to links
2765 to your issue tracker, or to convert "markdown-like" syntax into
2770 to your issue tracker, or to convert "markdown-like" syntax into
2766 HTML (see the examples below).
2771 HTML (see the examples below).
2767
2772
2768 Each entry in this section names a substitution filter.
2773 Each entry in this section names a substitution filter.
2769 The value of each entry defines the substitution expression itself.
2774 The value of each entry defines the substitution expression itself.
2770 The websub expressions follow the old interhg extension syntax,
2775 The websub expressions follow the old interhg extension syntax,
2771 which in turn imitates the Unix sed replacement syntax::
2776 which in turn imitates the Unix sed replacement syntax::
2772
2777
2773 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
2778 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
2774
2779
2775 You can use any separator other than "/". The final "i" is optional
2780 You can use any separator other than "/". The final "i" is optional
2776 and indicates that the search must be case insensitive.
2781 and indicates that the search must be case insensitive.
2777
2782
2778 Examples::
2783 Examples::
2779
2784
2780 [websub]
2785 [websub]
2781 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
2786 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
2782 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
2787 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
2783 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
2788 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
2784
2789
2785 ``worker``
2790 ``worker``
2786 ----------
2791 ----------
2787
2792
2788 Parallel master/worker configuration. We currently perform working
2793 Parallel master/worker configuration. We currently perform working
2789 directory updates in parallel on Unix-like systems, which greatly
2794 directory updates in parallel on Unix-like systems, which greatly
2790 helps performance.
2795 helps performance.
2791
2796
2792 ``enabled``
2797 ``enabled``
2793 Whether to enable workers code to be used.
2798 Whether to enable workers code to be used.
2794 (default: true)
2799 (default: true)
2795
2800
2796 ``numcpus``
2801 ``numcpus``
2797 Number of CPUs to use for parallel operations. A zero or
2802 Number of CPUs to use for parallel operations. A zero or
2798 negative value is treated as ``use the default``.
2803 negative value is treated as ``use the default``.
2799 (default: 4 or the number of CPUs on the system, whichever is larger)
2804 (default: 4 or the number of CPUs on the system, whichever is larger)
2800
2805
2801 ``backgroundclose``
2806 ``backgroundclose``
2802 Whether to enable closing file handles on background threads during certain
2807 Whether to enable closing file handles on background threads during certain
2803 operations. Some platforms aren't very efficient at closing file
2808 operations. Some platforms aren't very efficient at closing file
2804 handles that have been written or appended to. By performing file closing
2809 handles that have been written or appended to. By performing file closing
2805 on background threads, file write rate can increase substantially.
2810 on background threads, file write rate can increase substantially.
2806 (default: true on Windows, false elsewhere)
2811 (default: true on Windows, false elsewhere)
2807
2812
2808 ``backgroundcloseminfilecount``
2813 ``backgroundcloseminfilecount``
2809 Minimum number of files required to trigger background file closing.
2814 Minimum number of files required to trigger background file closing.
2810 Operations not writing this many files won't start background close
2815 Operations not writing this many files won't start background close
2811 threads.
2816 threads.
2812 (default: 2048)
2817 (default: 2048)
2813
2818
2814 ``backgroundclosemaxqueue``
2819 ``backgroundclosemaxqueue``
2815 The maximum number of opened file handles waiting to be closed in the
2820 The maximum number of opened file handles waiting to be closed in the
2816 background. This option only has an effect if ``backgroundclose`` is
2821 background. This option only has an effect if ``backgroundclose`` is
2817 enabled.
2822 enabled.
2818 (default: 384)
2823 (default: 384)
2819
2824
2820 ``backgroundclosethreadcount``
2825 ``backgroundclosethreadcount``
2821 Number of threads to process background file closes. Only relevant if
2826 Number of threads to process background file closes. Only relevant if
2822 ``backgroundclose`` is enabled.
2827 ``backgroundclose`` is enabled.
2823 (default: 4)
2828 (default: 4)
@@ -1,3092 +1,3098 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
756 lazydeltabase = False
756 lazydeltabase = False
757 if lazydelta:
757 if lazydelta:
758 lazydeltabase = ui.configbool(b'storage',
758 lazydeltabase = ui.configbool(b'storage',
759 b'revlog.reuse-external-delta-parent')
759 b'revlog.reuse-external-delta-parent')
760 if lazydeltabase is None:
760 if lazydeltabase is None:
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
762 options[b'lazydelta'] = lazydelta
762 options[b'lazydelta'] = lazydelta
763 options[b'lazydeltabase'] = lazydeltabase
763 options[b'lazydeltabase'] = lazydeltabase
764
764
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
766 if 0 <= chainspan:
766 if 0 <= chainspan:
767 options[b'maxdeltachainspan'] = chainspan
767 options[b'maxdeltachainspan'] = chainspan
768
768
769 mmapindexthreshold = ui.configbytes(b'experimental',
769 mmapindexthreshold = ui.configbytes(b'experimental',
770 b'mmapindexthreshold')
770 b'mmapindexthreshold')
771 if mmapindexthreshold is not None:
771 if mmapindexthreshold is not None:
772 options[b'mmapindexthreshold'] = mmapindexthreshold
772 options[b'mmapindexthreshold'] = mmapindexthreshold
773
773
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
775 srdensitythres = float(ui.config(b'experimental',
775 srdensitythres = float(ui.config(b'experimental',
776 b'sparse-read.density-threshold'))
776 b'sparse-read.density-threshold'))
777 srmingapsize = ui.configbytes(b'experimental',
777 srmingapsize = ui.configbytes(b'experimental',
778 b'sparse-read.min-gap-size')
778 b'sparse-read.min-gap-size')
779 options[b'with-sparse-read'] = withsparseread
779 options[b'with-sparse-read'] = withsparseread
780 options[b'sparse-read-density-threshold'] = srdensitythres
780 options[b'sparse-read-density-threshold'] = srdensitythres
781 options[b'sparse-read-min-gap-size'] = srmingapsize
781 options[b'sparse-read-min-gap-size'] = srmingapsize
782
782
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
784 options[b'sparse-revlog'] = sparserevlog
784 options[b'sparse-revlog'] = sparserevlog
785 if sparserevlog:
785 if sparserevlog:
786 options[b'generaldelta'] = True
786 options[b'generaldelta'] = True
787
787
788 maxchainlen = None
788 maxchainlen = None
789 if sparserevlog:
789 if sparserevlog:
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
791 # experimental config: format.maxchainlen
791 # experimental config: format.maxchainlen
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
793 if maxchainlen is not None:
793 if maxchainlen is not None:
794 options[b'maxchainlen'] = maxchainlen
794 options[b'maxchainlen'] = maxchainlen
795
795
796 for r in requirements:
796 for r in requirements:
797 if r.startswith(b'exp-compression-'):
797 if r.startswith(b'exp-compression-'):
798 options[b'compengine'] = r[len(b'exp-compression-'):]
798 options[b'compengine'] = r[len(b'exp-compression-'):]
799
799
800 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
801 if options[b'zlib.level'] is not None:
802 if not (0 <= options[b'zlib.level'] <= 9):
803 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
804 raise error.Abort(msg % options[b'zlib.level'])
805
800 if repository.NARROW_REQUIREMENT in requirements:
806 if repository.NARROW_REQUIREMENT in requirements:
801 options[b'enableellipsis'] = True
807 options[b'enableellipsis'] = True
802
808
803 return options
809 return options
804
810
805 def makemain(**kwargs):
811 def makemain(**kwargs):
806 """Produce a type conforming to ``ilocalrepositorymain``."""
812 """Produce a type conforming to ``ilocalrepositorymain``."""
807 return localrepository
813 return localrepository
808
814
809 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
815 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
810 class revlogfilestorage(object):
816 class revlogfilestorage(object):
811 """File storage when using revlogs."""
817 """File storage when using revlogs."""
812
818
813 def file(self, path):
819 def file(self, path):
814 if path[0] == b'/':
820 if path[0] == b'/':
815 path = path[1:]
821 path = path[1:]
816
822
817 return filelog.filelog(self.svfs, path)
823 return filelog.filelog(self.svfs, path)
818
824
819 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
825 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
820 class revlognarrowfilestorage(object):
826 class revlognarrowfilestorage(object):
821 """File storage when using revlogs and narrow files."""
827 """File storage when using revlogs and narrow files."""
822
828
823 def file(self, path):
829 def file(self, path):
824 if path[0] == b'/':
830 if path[0] == b'/':
825 path = path[1:]
831 path = path[1:]
826
832
827 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
833 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
828
834
829 def makefilestorage(requirements, features, **kwargs):
835 def makefilestorage(requirements, features, **kwargs):
830 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
836 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
831 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
837 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
832 features.add(repository.REPO_FEATURE_STREAM_CLONE)
838 features.add(repository.REPO_FEATURE_STREAM_CLONE)
833
839
834 if repository.NARROW_REQUIREMENT in requirements:
840 if repository.NARROW_REQUIREMENT in requirements:
835 return revlognarrowfilestorage
841 return revlognarrowfilestorage
836 else:
842 else:
837 return revlogfilestorage
843 return revlogfilestorage
838
844
839 # List of repository interfaces and factory functions for them. Each
845 # List of repository interfaces and factory functions for them. Each
840 # will be called in order during ``makelocalrepository()`` to iteratively
846 # will be called in order during ``makelocalrepository()`` to iteratively
841 # derive the final type for a local repository instance. We capture the
847 # derive the final type for a local repository instance. We capture the
842 # function as a lambda so we don't hold a reference and the module-level
848 # function as a lambda so we don't hold a reference and the module-level
843 # functions can be wrapped.
849 # functions can be wrapped.
844 REPO_INTERFACES = [
850 REPO_INTERFACES = [
845 (repository.ilocalrepositorymain, lambda: makemain),
851 (repository.ilocalrepositorymain, lambda: makemain),
846 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
852 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
847 ]
853 ]
848
854
849 @interfaceutil.implementer(repository.ilocalrepositorymain)
855 @interfaceutil.implementer(repository.ilocalrepositorymain)
850 class localrepository(object):
856 class localrepository(object):
851 """Main class for representing local repositories.
857 """Main class for representing local repositories.
852
858
853 All local repositories are instances of this class.
859 All local repositories are instances of this class.
854
860
855 Constructed on its own, instances of this class are not usable as
861 Constructed on its own, instances of this class are not usable as
856 repository objects. To obtain a usable repository object, call
862 repository objects. To obtain a usable repository object, call
857 ``hg.repository()``, ``localrepo.instance()``, or
863 ``hg.repository()``, ``localrepo.instance()``, or
858 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
864 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
859 ``instance()`` adds support for creating new repositories.
865 ``instance()`` adds support for creating new repositories.
860 ``hg.repository()`` adds more extension integration, including calling
866 ``hg.repository()`` adds more extension integration, including calling
861 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
867 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
862 used.
868 used.
863 """
869 """
864
870
865 # obsolete experimental requirements:
871 # obsolete experimental requirements:
866 # - manifestv2: An experimental new manifest format that allowed
872 # - manifestv2: An experimental new manifest format that allowed
867 # for stem compression of long paths. Experiment ended up not
873 # for stem compression of long paths. Experiment ended up not
868 # being successful (repository sizes went up due to worse delta
874 # being successful (repository sizes went up due to worse delta
869 # chains), and the code was deleted in 4.6.
875 # chains), and the code was deleted in 4.6.
870 supportedformats = {
876 supportedformats = {
871 'revlogv1',
877 'revlogv1',
872 'generaldelta',
878 'generaldelta',
873 'treemanifest',
879 'treemanifest',
874 REVLOGV2_REQUIREMENT,
880 REVLOGV2_REQUIREMENT,
875 SPARSEREVLOG_REQUIREMENT,
881 SPARSEREVLOG_REQUIREMENT,
876 }
882 }
877 _basesupported = supportedformats | {
883 _basesupported = supportedformats | {
878 'store',
884 'store',
879 'fncache',
885 'fncache',
880 'shared',
886 'shared',
881 'relshared',
887 'relshared',
882 'dotencode',
888 'dotencode',
883 'exp-sparse',
889 'exp-sparse',
884 'internal-phase'
890 'internal-phase'
885 }
891 }
886
892
887 # list of prefix for file which can be written without 'wlock'
893 # list of prefix for file which can be written without 'wlock'
888 # Extensions should extend this list when needed
894 # Extensions should extend this list when needed
889 _wlockfreeprefix = {
895 _wlockfreeprefix = {
890 # We migh consider requiring 'wlock' for the next
896 # We migh consider requiring 'wlock' for the next
891 # two, but pretty much all the existing code assume
897 # two, but pretty much all the existing code assume
892 # wlock is not needed so we keep them excluded for
898 # wlock is not needed so we keep them excluded for
893 # now.
899 # now.
894 'hgrc',
900 'hgrc',
895 'requires',
901 'requires',
896 # XXX cache is a complicatged business someone
902 # XXX cache is a complicatged business someone
897 # should investigate this in depth at some point
903 # should investigate this in depth at some point
898 'cache/',
904 'cache/',
899 # XXX shouldn't be dirstate covered by the wlock?
905 # XXX shouldn't be dirstate covered by the wlock?
900 'dirstate',
906 'dirstate',
901 # XXX bisect was still a bit too messy at the time
907 # XXX bisect was still a bit too messy at the time
902 # this changeset was introduced. Someone should fix
908 # this changeset was introduced. Someone should fix
903 # the remainig bit and drop this line
909 # the remainig bit and drop this line
904 'bisect.state',
910 'bisect.state',
905 }
911 }
906
912
907 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
913 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
908 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
914 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
909 features, intents=None):
915 features, intents=None):
910 """Create a new local repository instance.
916 """Create a new local repository instance.
911
917
912 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
918 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
913 or ``localrepo.makelocalrepository()`` for obtaining a new repository
919 or ``localrepo.makelocalrepository()`` for obtaining a new repository
914 object.
920 object.
915
921
916 Arguments:
922 Arguments:
917
923
918 baseui
924 baseui
919 ``ui.ui`` instance that ``ui`` argument was based off of.
925 ``ui.ui`` instance that ``ui`` argument was based off of.
920
926
921 ui
927 ui
922 ``ui.ui`` instance for use by the repository.
928 ``ui.ui`` instance for use by the repository.
923
929
924 origroot
930 origroot
925 ``bytes`` path to working directory root of this repository.
931 ``bytes`` path to working directory root of this repository.
926
932
927 wdirvfs
933 wdirvfs
928 ``vfs.vfs`` rooted at the working directory.
934 ``vfs.vfs`` rooted at the working directory.
929
935
930 hgvfs
936 hgvfs
931 ``vfs.vfs`` rooted at .hg/
937 ``vfs.vfs`` rooted at .hg/
932
938
933 requirements
939 requirements
934 ``set`` of bytestrings representing repository opening requirements.
940 ``set`` of bytestrings representing repository opening requirements.
935
941
936 supportedrequirements
942 supportedrequirements
937 ``set`` of bytestrings representing repository requirements that we
943 ``set`` of bytestrings representing repository requirements that we
938 know how to open. May be a supetset of ``requirements``.
944 know how to open. May be a supetset of ``requirements``.
939
945
940 sharedpath
946 sharedpath
941 ``bytes`` Defining path to storage base directory. Points to a
947 ``bytes`` Defining path to storage base directory. Points to a
942 ``.hg/`` directory somewhere.
948 ``.hg/`` directory somewhere.
943
949
944 store
950 store
945 ``store.basicstore`` (or derived) instance providing access to
951 ``store.basicstore`` (or derived) instance providing access to
946 versioned storage.
952 versioned storage.
947
953
948 cachevfs
954 cachevfs
949 ``vfs.vfs`` used for cache files.
955 ``vfs.vfs`` used for cache files.
950
956
951 wcachevfs
957 wcachevfs
952 ``vfs.vfs`` used for cache files related to the working copy.
958 ``vfs.vfs`` used for cache files related to the working copy.
953
959
954 features
960 features
955 ``set`` of bytestrings defining features/capabilities of this
961 ``set`` of bytestrings defining features/capabilities of this
956 instance.
962 instance.
957
963
958 intents
964 intents
959 ``set`` of system strings indicating what this repo will be used
965 ``set`` of system strings indicating what this repo will be used
960 for.
966 for.
961 """
967 """
962 self.baseui = baseui
968 self.baseui = baseui
963 self.ui = ui
969 self.ui = ui
964 self.origroot = origroot
970 self.origroot = origroot
965 # vfs rooted at working directory.
971 # vfs rooted at working directory.
966 self.wvfs = wdirvfs
972 self.wvfs = wdirvfs
967 self.root = wdirvfs.base
973 self.root = wdirvfs.base
968 # vfs rooted at .hg/. Used to access most non-store paths.
974 # vfs rooted at .hg/. Used to access most non-store paths.
969 self.vfs = hgvfs
975 self.vfs = hgvfs
970 self.path = hgvfs.base
976 self.path = hgvfs.base
971 self.requirements = requirements
977 self.requirements = requirements
972 self.supported = supportedrequirements
978 self.supported = supportedrequirements
973 self.sharedpath = sharedpath
979 self.sharedpath = sharedpath
974 self.store = store
980 self.store = store
975 self.cachevfs = cachevfs
981 self.cachevfs = cachevfs
976 self.wcachevfs = wcachevfs
982 self.wcachevfs = wcachevfs
977 self.features = features
983 self.features = features
978
984
979 self.filtername = None
985 self.filtername = None
980
986
981 if (self.ui.configbool('devel', 'all-warnings') or
987 if (self.ui.configbool('devel', 'all-warnings') or
982 self.ui.configbool('devel', 'check-locks')):
988 self.ui.configbool('devel', 'check-locks')):
983 self.vfs.audit = self._getvfsward(self.vfs.audit)
989 self.vfs.audit = self._getvfsward(self.vfs.audit)
984 # A list of callback to shape the phase if no data were found.
990 # A list of callback to shape the phase if no data were found.
985 # Callback are in the form: func(repo, roots) --> processed root.
991 # Callback are in the form: func(repo, roots) --> processed root.
986 # This list it to be filled by extension during repo setup
992 # This list it to be filled by extension during repo setup
987 self._phasedefaults = []
993 self._phasedefaults = []
988
994
989 color.setup(self.ui)
995 color.setup(self.ui)
990
996
991 self.spath = self.store.path
997 self.spath = self.store.path
992 self.svfs = self.store.vfs
998 self.svfs = self.store.vfs
993 self.sjoin = self.store.join
999 self.sjoin = self.store.join
994 if (self.ui.configbool('devel', 'all-warnings') or
1000 if (self.ui.configbool('devel', 'all-warnings') or
995 self.ui.configbool('devel', 'check-locks')):
1001 self.ui.configbool('devel', 'check-locks')):
996 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1002 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
997 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1003 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
998 else: # standard vfs
1004 else: # standard vfs
999 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1005 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1000
1006
1001 self._dirstatevalidatewarned = False
1007 self._dirstatevalidatewarned = False
1002
1008
1003 self._branchcaches = branchmap.BranchMapCache()
1009 self._branchcaches = branchmap.BranchMapCache()
1004 self._revbranchcache = None
1010 self._revbranchcache = None
1005 self._filterpats = {}
1011 self._filterpats = {}
1006 self._datafilters = {}
1012 self._datafilters = {}
1007 self._transref = self._lockref = self._wlockref = None
1013 self._transref = self._lockref = self._wlockref = None
1008
1014
1009 # A cache for various files under .hg/ that tracks file changes,
1015 # A cache for various files under .hg/ that tracks file changes,
1010 # (used by the filecache decorator)
1016 # (used by the filecache decorator)
1011 #
1017 #
1012 # Maps a property name to its util.filecacheentry
1018 # Maps a property name to its util.filecacheentry
1013 self._filecache = {}
1019 self._filecache = {}
1014
1020
1015 # hold sets of revision to be filtered
1021 # hold sets of revision to be filtered
1016 # should be cleared when something might have changed the filter value:
1022 # should be cleared when something might have changed the filter value:
1017 # - new changesets,
1023 # - new changesets,
1018 # - phase change,
1024 # - phase change,
1019 # - new obsolescence marker,
1025 # - new obsolescence marker,
1020 # - working directory parent change,
1026 # - working directory parent change,
1021 # - bookmark changes
1027 # - bookmark changes
1022 self.filteredrevcache = {}
1028 self.filteredrevcache = {}
1023
1029
1024 # post-dirstate-status hooks
1030 # post-dirstate-status hooks
1025 self._postdsstatus = []
1031 self._postdsstatus = []
1026
1032
1027 # generic mapping between names and nodes
1033 # generic mapping between names and nodes
1028 self.names = namespaces.namespaces()
1034 self.names = namespaces.namespaces()
1029
1035
1030 # Key to signature value.
1036 # Key to signature value.
1031 self._sparsesignaturecache = {}
1037 self._sparsesignaturecache = {}
1032 # Signature to cached matcher instance.
1038 # Signature to cached matcher instance.
1033 self._sparsematchercache = {}
1039 self._sparsematchercache = {}
1034
1040
1035 def _getvfsward(self, origfunc):
1041 def _getvfsward(self, origfunc):
1036 """build a ward for self.vfs"""
1042 """build a ward for self.vfs"""
1037 rref = weakref.ref(self)
1043 rref = weakref.ref(self)
1038 def checkvfs(path, mode=None):
1044 def checkvfs(path, mode=None):
1039 ret = origfunc(path, mode=mode)
1045 ret = origfunc(path, mode=mode)
1040 repo = rref()
1046 repo = rref()
1041 if (repo is None
1047 if (repo is None
1042 or not util.safehasattr(repo, '_wlockref')
1048 or not util.safehasattr(repo, '_wlockref')
1043 or not util.safehasattr(repo, '_lockref')):
1049 or not util.safehasattr(repo, '_lockref')):
1044 return
1050 return
1045 if mode in (None, 'r', 'rb'):
1051 if mode in (None, 'r', 'rb'):
1046 return
1052 return
1047 if path.startswith(repo.path):
1053 if path.startswith(repo.path):
1048 # truncate name relative to the repository (.hg)
1054 # truncate name relative to the repository (.hg)
1049 path = path[len(repo.path) + 1:]
1055 path = path[len(repo.path) + 1:]
1050 if path.startswith('cache/'):
1056 if path.startswith('cache/'):
1051 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1057 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1052 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1058 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1053 if path.startswith('journal.') or path.startswith('undo.'):
1059 if path.startswith('journal.') or path.startswith('undo.'):
1054 # journal is covered by 'lock'
1060 # journal is covered by 'lock'
1055 if repo._currentlock(repo._lockref) is None:
1061 if repo._currentlock(repo._lockref) is None:
1056 repo.ui.develwarn('write with no lock: "%s"' % path,
1062 repo.ui.develwarn('write with no lock: "%s"' % path,
1057 stacklevel=3, config='check-locks')
1063 stacklevel=3, config='check-locks')
1058 elif repo._currentlock(repo._wlockref) is None:
1064 elif repo._currentlock(repo._wlockref) is None:
1059 # rest of vfs files are covered by 'wlock'
1065 # rest of vfs files are covered by 'wlock'
1060 #
1066 #
1061 # exclude special files
1067 # exclude special files
1062 for prefix in self._wlockfreeprefix:
1068 for prefix in self._wlockfreeprefix:
1063 if path.startswith(prefix):
1069 if path.startswith(prefix):
1064 return
1070 return
1065 repo.ui.develwarn('write with no wlock: "%s"' % path,
1071 repo.ui.develwarn('write with no wlock: "%s"' % path,
1066 stacklevel=3, config='check-locks')
1072 stacklevel=3, config='check-locks')
1067 return ret
1073 return ret
1068 return checkvfs
1074 return checkvfs
1069
1075
1070 def _getsvfsward(self, origfunc):
1076 def _getsvfsward(self, origfunc):
1071 """build a ward for self.svfs"""
1077 """build a ward for self.svfs"""
1072 rref = weakref.ref(self)
1078 rref = weakref.ref(self)
1073 def checksvfs(path, mode=None):
1079 def checksvfs(path, mode=None):
1074 ret = origfunc(path, mode=mode)
1080 ret = origfunc(path, mode=mode)
1075 repo = rref()
1081 repo = rref()
1076 if repo is None or not util.safehasattr(repo, '_lockref'):
1082 if repo is None or not util.safehasattr(repo, '_lockref'):
1077 return
1083 return
1078 if mode in (None, 'r', 'rb'):
1084 if mode in (None, 'r', 'rb'):
1079 return
1085 return
1080 if path.startswith(repo.sharedpath):
1086 if path.startswith(repo.sharedpath):
1081 # truncate name relative to the repository (.hg)
1087 # truncate name relative to the repository (.hg)
1082 path = path[len(repo.sharedpath) + 1:]
1088 path = path[len(repo.sharedpath) + 1:]
1083 if repo._currentlock(repo._lockref) is None:
1089 if repo._currentlock(repo._lockref) is None:
1084 repo.ui.develwarn('write with no lock: "%s"' % path,
1090 repo.ui.develwarn('write with no lock: "%s"' % path,
1085 stacklevel=4)
1091 stacklevel=4)
1086 return ret
1092 return ret
1087 return checksvfs
1093 return checksvfs
1088
1094
1089 def close(self):
1095 def close(self):
1090 self._writecaches()
1096 self._writecaches()
1091
1097
1092 def _writecaches(self):
1098 def _writecaches(self):
1093 if self._revbranchcache:
1099 if self._revbranchcache:
1094 self._revbranchcache.write()
1100 self._revbranchcache.write()
1095
1101
1096 def _restrictcapabilities(self, caps):
1102 def _restrictcapabilities(self, caps):
1097 if self.ui.configbool('experimental', 'bundle2-advertise'):
1103 if self.ui.configbool('experimental', 'bundle2-advertise'):
1098 caps = set(caps)
1104 caps = set(caps)
1099 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1105 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1100 role='client'))
1106 role='client'))
1101 caps.add('bundle2=' + urlreq.quote(capsblob))
1107 caps.add('bundle2=' + urlreq.quote(capsblob))
1102 return caps
1108 return caps
1103
1109
1104 def _writerequirements(self):
1110 def _writerequirements(self):
1105 scmutil.writerequires(self.vfs, self.requirements)
1111 scmutil.writerequires(self.vfs, self.requirements)
1106
1112
1107 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1113 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1108 # self -> auditor -> self._checknested -> self
1114 # self -> auditor -> self._checknested -> self
1109
1115
1110 @property
1116 @property
1111 def auditor(self):
1117 def auditor(self):
1112 # This is only used by context.workingctx.match in order to
1118 # This is only used by context.workingctx.match in order to
1113 # detect files in subrepos.
1119 # detect files in subrepos.
1114 return pathutil.pathauditor(self.root, callback=self._checknested)
1120 return pathutil.pathauditor(self.root, callback=self._checknested)
1115
1121
1116 @property
1122 @property
1117 def nofsauditor(self):
1123 def nofsauditor(self):
1118 # This is only used by context.basectx.match in order to detect
1124 # This is only used by context.basectx.match in order to detect
1119 # files in subrepos.
1125 # files in subrepos.
1120 return pathutil.pathauditor(self.root, callback=self._checknested,
1126 return pathutil.pathauditor(self.root, callback=self._checknested,
1121 realfs=False, cached=True)
1127 realfs=False, cached=True)
1122
1128
1123 def _checknested(self, path):
1129 def _checknested(self, path):
1124 """Determine if path is a legal nested repository."""
1130 """Determine if path is a legal nested repository."""
1125 if not path.startswith(self.root):
1131 if not path.startswith(self.root):
1126 return False
1132 return False
1127 subpath = path[len(self.root) + 1:]
1133 subpath = path[len(self.root) + 1:]
1128 normsubpath = util.pconvert(subpath)
1134 normsubpath = util.pconvert(subpath)
1129
1135
1130 # XXX: Checking against the current working copy is wrong in
1136 # XXX: Checking against the current working copy is wrong in
1131 # the sense that it can reject things like
1137 # the sense that it can reject things like
1132 #
1138 #
1133 # $ hg cat -r 10 sub/x.txt
1139 # $ hg cat -r 10 sub/x.txt
1134 #
1140 #
1135 # if sub/ is no longer a subrepository in the working copy
1141 # if sub/ is no longer a subrepository in the working copy
1136 # parent revision.
1142 # parent revision.
1137 #
1143 #
1138 # However, it can of course also allow things that would have
1144 # However, it can of course also allow things that would have
1139 # been rejected before, such as the above cat command if sub/
1145 # been rejected before, such as the above cat command if sub/
1140 # is a subrepository now, but was a normal directory before.
1146 # is a subrepository now, but was a normal directory before.
1141 # The old path auditor would have rejected by mistake since it
1147 # The old path auditor would have rejected by mistake since it
1142 # panics when it sees sub/.hg/.
1148 # panics when it sees sub/.hg/.
1143 #
1149 #
1144 # All in all, checking against the working copy seems sensible
1150 # All in all, checking against the working copy seems sensible
1145 # since we want to prevent access to nested repositories on
1151 # since we want to prevent access to nested repositories on
1146 # the filesystem *now*.
1152 # the filesystem *now*.
1147 ctx = self[None]
1153 ctx = self[None]
1148 parts = util.splitpath(subpath)
1154 parts = util.splitpath(subpath)
1149 while parts:
1155 while parts:
1150 prefix = '/'.join(parts)
1156 prefix = '/'.join(parts)
1151 if prefix in ctx.substate:
1157 if prefix in ctx.substate:
1152 if prefix == normsubpath:
1158 if prefix == normsubpath:
1153 return True
1159 return True
1154 else:
1160 else:
1155 sub = ctx.sub(prefix)
1161 sub = ctx.sub(prefix)
1156 return sub.checknested(subpath[len(prefix) + 1:])
1162 return sub.checknested(subpath[len(prefix) + 1:])
1157 else:
1163 else:
1158 parts.pop()
1164 parts.pop()
1159 return False
1165 return False
1160
1166
1161 def peer(self):
1167 def peer(self):
1162 return localpeer(self) # not cached to avoid reference cycle
1168 return localpeer(self) # not cached to avoid reference cycle
1163
1169
1164 def unfiltered(self):
1170 def unfiltered(self):
1165 """Return unfiltered version of the repository
1171 """Return unfiltered version of the repository
1166
1172
1167 Intended to be overwritten by filtered repo."""
1173 Intended to be overwritten by filtered repo."""
1168 return self
1174 return self
1169
1175
1170 def filtered(self, name, visibilityexceptions=None):
1176 def filtered(self, name, visibilityexceptions=None):
1171 """Return a filtered version of a repository"""
1177 """Return a filtered version of a repository"""
1172 cls = repoview.newtype(self.unfiltered().__class__)
1178 cls = repoview.newtype(self.unfiltered().__class__)
1173 return cls(self, name, visibilityexceptions)
1179 return cls(self, name, visibilityexceptions)
1174
1180
1175 @repofilecache('bookmarks', 'bookmarks.current')
1181 @repofilecache('bookmarks', 'bookmarks.current')
1176 def _bookmarks(self):
1182 def _bookmarks(self):
1177 return bookmarks.bmstore(self)
1183 return bookmarks.bmstore(self)
1178
1184
1179 @property
1185 @property
1180 def _activebookmark(self):
1186 def _activebookmark(self):
1181 return self._bookmarks.active
1187 return self._bookmarks.active
1182
1188
1183 # _phasesets depend on changelog. what we need is to call
1189 # _phasesets depend on changelog. what we need is to call
1184 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1190 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1185 # can't be easily expressed in filecache mechanism.
1191 # can't be easily expressed in filecache mechanism.
1186 @storecache('phaseroots', '00changelog.i')
1192 @storecache('phaseroots', '00changelog.i')
1187 def _phasecache(self):
1193 def _phasecache(self):
1188 return phases.phasecache(self, self._phasedefaults)
1194 return phases.phasecache(self, self._phasedefaults)
1189
1195
1190 @storecache('obsstore')
1196 @storecache('obsstore')
1191 def obsstore(self):
1197 def obsstore(self):
1192 return obsolete.makestore(self.ui, self)
1198 return obsolete.makestore(self.ui, self)
1193
1199
1194 @storecache('00changelog.i')
1200 @storecache('00changelog.i')
1195 def changelog(self):
1201 def changelog(self):
1196 return changelog.changelog(self.svfs,
1202 return changelog.changelog(self.svfs,
1197 trypending=txnutil.mayhavepending(self.root))
1203 trypending=txnutil.mayhavepending(self.root))
1198
1204
1199 @storecache('00manifest.i')
1205 @storecache('00manifest.i')
1200 def manifestlog(self):
1206 def manifestlog(self):
1201 rootstore = manifest.manifestrevlog(self.svfs)
1207 rootstore = manifest.manifestrevlog(self.svfs)
1202 return manifest.manifestlog(self.svfs, self, rootstore,
1208 return manifest.manifestlog(self.svfs, self, rootstore,
1203 self._storenarrowmatch)
1209 self._storenarrowmatch)
1204
1210
1205 @repofilecache('dirstate')
1211 @repofilecache('dirstate')
1206 def dirstate(self):
1212 def dirstate(self):
1207 return self._makedirstate()
1213 return self._makedirstate()
1208
1214
1209 def _makedirstate(self):
1215 def _makedirstate(self):
1210 """Extension point for wrapping the dirstate per-repo."""
1216 """Extension point for wrapping the dirstate per-repo."""
1211 sparsematchfn = lambda: sparse.matcher(self)
1217 sparsematchfn = lambda: sparse.matcher(self)
1212
1218
1213 return dirstate.dirstate(self.vfs, self.ui, self.root,
1219 return dirstate.dirstate(self.vfs, self.ui, self.root,
1214 self._dirstatevalidate, sparsematchfn)
1220 self._dirstatevalidate, sparsematchfn)
1215
1221
1216 def _dirstatevalidate(self, node):
1222 def _dirstatevalidate(self, node):
1217 try:
1223 try:
1218 self.changelog.rev(node)
1224 self.changelog.rev(node)
1219 return node
1225 return node
1220 except error.LookupError:
1226 except error.LookupError:
1221 if not self._dirstatevalidatewarned:
1227 if not self._dirstatevalidatewarned:
1222 self._dirstatevalidatewarned = True
1228 self._dirstatevalidatewarned = True
1223 self.ui.warn(_("warning: ignoring unknown"
1229 self.ui.warn(_("warning: ignoring unknown"
1224 " working parent %s!\n") % short(node))
1230 " working parent %s!\n") % short(node))
1225 return nullid
1231 return nullid
1226
1232
1227 @storecache(narrowspec.FILENAME)
1233 @storecache(narrowspec.FILENAME)
1228 def narrowpats(self):
1234 def narrowpats(self):
1229 """matcher patterns for this repository's narrowspec
1235 """matcher patterns for this repository's narrowspec
1230
1236
1231 A tuple of (includes, excludes).
1237 A tuple of (includes, excludes).
1232 """
1238 """
1233 return narrowspec.load(self)
1239 return narrowspec.load(self)
1234
1240
1235 @storecache(narrowspec.FILENAME)
1241 @storecache(narrowspec.FILENAME)
1236 def _storenarrowmatch(self):
1242 def _storenarrowmatch(self):
1237 if repository.NARROW_REQUIREMENT not in self.requirements:
1243 if repository.NARROW_REQUIREMENT not in self.requirements:
1238 return matchmod.always()
1244 return matchmod.always()
1239 include, exclude = self.narrowpats
1245 include, exclude = self.narrowpats
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1246 return narrowspec.match(self.root, include=include, exclude=exclude)
1241
1247
1242 @storecache(narrowspec.FILENAME)
1248 @storecache(narrowspec.FILENAME)
1243 def _narrowmatch(self):
1249 def _narrowmatch(self):
1244 if repository.NARROW_REQUIREMENT not in self.requirements:
1250 if repository.NARROW_REQUIREMENT not in self.requirements:
1245 return matchmod.always()
1251 return matchmod.always()
1246 narrowspec.checkworkingcopynarrowspec(self)
1252 narrowspec.checkworkingcopynarrowspec(self)
1247 include, exclude = self.narrowpats
1253 include, exclude = self.narrowpats
1248 return narrowspec.match(self.root, include=include, exclude=exclude)
1254 return narrowspec.match(self.root, include=include, exclude=exclude)
1249
1255
1250 def narrowmatch(self, match=None, includeexact=False):
1256 def narrowmatch(self, match=None, includeexact=False):
1251 """matcher corresponding the the repo's narrowspec
1257 """matcher corresponding the the repo's narrowspec
1252
1258
1253 If `match` is given, then that will be intersected with the narrow
1259 If `match` is given, then that will be intersected with the narrow
1254 matcher.
1260 matcher.
1255
1261
1256 If `includeexact` is True, then any exact matches from `match` will
1262 If `includeexact` is True, then any exact matches from `match` will
1257 be included even if they're outside the narrowspec.
1263 be included even if they're outside the narrowspec.
1258 """
1264 """
1259 if match:
1265 if match:
1260 if includeexact and not self._narrowmatch.always():
1266 if includeexact and not self._narrowmatch.always():
1261 # do not exclude explicitly-specified paths so that they can
1267 # do not exclude explicitly-specified paths so that they can
1262 # be warned later on
1268 # be warned later on
1263 em = matchmod.exact(match.files())
1269 em = matchmod.exact(match.files())
1264 nm = matchmod.unionmatcher([self._narrowmatch, em])
1270 nm = matchmod.unionmatcher([self._narrowmatch, em])
1265 return matchmod.intersectmatchers(match, nm)
1271 return matchmod.intersectmatchers(match, nm)
1266 return matchmod.intersectmatchers(match, self._narrowmatch)
1272 return matchmod.intersectmatchers(match, self._narrowmatch)
1267 return self._narrowmatch
1273 return self._narrowmatch
1268
1274
1269 def setnarrowpats(self, newincludes, newexcludes):
1275 def setnarrowpats(self, newincludes, newexcludes):
1270 narrowspec.save(self, newincludes, newexcludes)
1276 narrowspec.save(self, newincludes, newexcludes)
1271 self.invalidate(clearfilecache=True)
1277 self.invalidate(clearfilecache=True)
1272
1278
1273 def __getitem__(self, changeid):
1279 def __getitem__(self, changeid):
1274 if changeid is None:
1280 if changeid is None:
1275 return context.workingctx(self)
1281 return context.workingctx(self)
1276 if isinstance(changeid, context.basectx):
1282 if isinstance(changeid, context.basectx):
1277 return changeid
1283 return changeid
1278 if isinstance(changeid, slice):
1284 if isinstance(changeid, slice):
1279 # wdirrev isn't contiguous so the slice shouldn't include it
1285 # wdirrev isn't contiguous so the slice shouldn't include it
1280 return [self[i]
1286 return [self[i]
1281 for i in pycompat.xrange(*changeid.indices(len(self)))
1287 for i in pycompat.xrange(*changeid.indices(len(self)))
1282 if i not in self.changelog.filteredrevs]
1288 if i not in self.changelog.filteredrevs]
1283 try:
1289 try:
1284 if isinstance(changeid, int):
1290 if isinstance(changeid, int):
1285 node = self.changelog.node(changeid)
1291 node = self.changelog.node(changeid)
1286 rev = changeid
1292 rev = changeid
1287 elif changeid == 'null':
1293 elif changeid == 'null':
1288 node = nullid
1294 node = nullid
1289 rev = nullrev
1295 rev = nullrev
1290 elif changeid == 'tip':
1296 elif changeid == 'tip':
1291 node = self.changelog.tip()
1297 node = self.changelog.tip()
1292 rev = self.changelog.rev(node)
1298 rev = self.changelog.rev(node)
1293 elif changeid == '.':
1299 elif changeid == '.':
1294 # this is a hack to delay/avoid loading obsmarkers
1300 # this is a hack to delay/avoid loading obsmarkers
1295 # when we know that '.' won't be hidden
1301 # when we know that '.' won't be hidden
1296 node = self.dirstate.p1()
1302 node = self.dirstate.p1()
1297 rev = self.unfiltered().changelog.rev(node)
1303 rev = self.unfiltered().changelog.rev(node)
1298 elif len(changeid) == 20:
1304 elif len(changeid) == 20:
1299 try:
1305 try:
1300 node = changeid
1306 node = changeid
1301 rev = self.changelog.rev(changeid)
1307 rev = self.changelog.rev(changeid)
1302 except error.FilteredLookupError:
1308 except error.FilteredLookupError:
1303 changeid = hex(changeid) # for the error message
1309 changeid = hex(changeid) # for the error message
1304 raise
1310 raise
1305 except LookupError:
1311 except LookupError:
1306 # check if it might have come from damaged dirstate
1312 # check if it might have come from damaged dirstate
1307 #
1313 #
1308 # XXX we could avoid the unfiltered if we had a recognizable
1314 # XXX we could avoid the unfiltered if we had a recognizable
1309 # exception for filtered changeset access
1315 # exception for filtered changeset access
1310 if (self.local()
1316 if (self.local()
1311 and changeid in self.unfiltered().dirstate.parents()):
1317 and changeid in self.unfiltered().dirstate.parents()):
1312 msg = _("working directory has unknown parent '%s'!")
1318 msg = _("working directory has unknown parent '%s'!")
1313 raise error.Abort(msg % short(changeid))
1319 raise error.Abort(msg % short(changeid))
1314 changeid = hex(changeid) # for the error message
1320 changeid = hex(changeid) # for the error message
1315 raise
1321 raise
1316
1322
1317 elif len(changeid) == 40:
1323 elif len(changeid) == 40:
1318 node = bin(changeid)
1324 node = bin(changeid)
1319 rev = self.changelog.rev(node)
1325 rev = self.changelog.rev(node)
1320 else:
1326 else:
1321 raise error.ProgrammingError(
1327 raise error.ProgrammingError(
1322 "unsupported changeid '%s' of type %s" %
1328 "unsupported changeid '%s' of type %s" %
1323 (changeid, type(changeid)))
1329 (changeid, type(changeid)))
1324
1330
1325 return context.changectx(self, rev, node)
1331 return context.changectx(self, rev, node)
1326
1332
1327 except (error.FilteredIndexError, error.FilteredLookupError):
1333 except (error.FilteredIndexError, error.FilteredLookupError):
1328 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1334 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1329 % pycompat.bytestr(changeid))
1335 % pycompat.bytestr(changeid))
1330 except (IndexError, LookupError):
1336 except (IndexError, LookupError):
1331 raise error.RepoLookupError(
1337 raise error.RepoLookupError(
1332 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1338 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1333 except error.WdirUnsupported:
1339 except error.WdirUnsupported:
1334 return context.workingctx(self)
1340 return context.workingctx(self)
1335
1341
1336 def __contains__(self, changeid):
1342 def __contains__(self, changeid):
1337 """True if the given changeid exists
1343 """True if the given changeid exists
1338
1344
1339 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1345 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1340 specified.
1346 specified.
1341 """
1347 """
1342 try:
1348 try:
1343 self[changeid]
1349 self[changeid]
1344 return True
1350 return True
1345 except error.RepoLookupError:
1351 except error.RepoLookupError:
1346 return False
1352 return False
1347
1353
1348 def __nonzero__(self):
1354 def __nonzero__(self):
1349 return True
1355 return True
1350
1356
1351 __bool__ = __nonzero__
1357 __bool__ = __nonzero__
1352
1358
1353 def __len__(self):
1359 def __len__(self):
1354 # no need to pay the cost of repoview.changelog
1360 # no need to pay the cost of repoview.changelog
1355 unfi = self.unfiltered()
1361 unfi = self.unfiltered()
1356 return len(unfi.changelog)
1362 return len(unfi.changelog)
1357
1363
1358 def __iter__(self):
1364 def __iter__(self):
1359 return iter(self.changelog)
1365 return iter(self.changelog)
1360
1366
1361 def revs(self, expr, *args):
1367 def revs(self, expr, *args):
1362 '''Find revisions matching a revset.
1368 '''Find revisions matching a revset.
1363
1369
1364 The revset is specified as a string ``expr`` that may contain
1370 The revset is specified as a string ``expr`` that may contain
1365 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1371 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1366
1372
1367 Revset aliases from the configuration are not expanded. To expand
1373 Revset aliases from the configuration are not expanded. To expand
1368 user aliases, consider calling ``scmutil.revrange()`` or
1374 user aliases, consider calling ``scmutil.revrange()`` or
1369 ``repo.anyrevs([expr], user=True)``.
1375 ``repo.anyrevs([expr], user=True)``.
1370
1376
1371 Returns a revset.abstractsmartset, which is a list-like interface
1377 Returns a revset.abstractsmartset, which is a list-like interface
1372 that contains integer revisions.
1378 that contains integer revisions.
1373 '''
1379 '''
1374 tree = revsetlang.spectree(expr, *args)
1380 tree = revsetlang.spectree(expr, *args)
1375 return revset.makematcher(tree)(self)
1381 return revset.makematcher(tree)(self)
1376
1382
1377 def set(self, expr, *args):
1383 def set(self, expr, *args):
1378 '''Find revisions matching a revset and emit changectx instances.
1384 '''Find revisions matching a revset and emit changectx instances.
1379
1385
1380 This is a convenience wrapper around ``revs()`` that iterates the
1386 This is a convenience wrapper around ``revs()`` that iterates the
1381 result and is a generator of changectx instances.
1387 result and is a generator of changectx instances.
1382
1388
1383 Revset aliases from the configuration are not expanded. To expand
1389 Revset aliases from the configuration are not expanded. To expand
1384 user aliases, consider calling ``scmutil.revrange()``.
1390 user aliases, consider calling ``scmutil.revrange()``.
1385 '''
1391 '''
1386 for r in self.revs(expr, *args):
1392 for r in self.revs(expr, *args):
1387 yield self[r]
1393 yield self[r]
1388
1394
1389 def anyrevs(self, specs, user=False, localalias=None):
1395 def anyrevs(self, specs, user=False, localalias=None):
1390 '''Find revisions matching one of the given revsets.
1396 '''Find revisions matching one of the given revsets.
1391
1397
1392 Revset aliases from the configuration are not expanded by default. To
1398 Revset aliases from the configuration are not expanded by default. To
1393 expand user aliases, specify ``user=True``. To provide some local
1399 expand user aliases, specify ``user=True``. To provide some local
1394 definitions overriding user aliases, set ``localalias`` to
1400 definitions overriding user aliases, set ``localalias`` to
1395 ``{name: definitionstring}``.
1401 ``{name: definitionstring}``.
1396 '''
1402 '''
1397 if user:
1403 if user:
1398 m = revset.matchany(self.ui, specs,
1404 m = revset.matchany(self.ui, specs,
1399 lookup=revset.lookupfn(self),
1405 lookup=revset.lookupfn(self),
1400 localalias=localalias)
1406 localalias=localalias)
1401 else:
1407 else:
1402 m = revset.matchany(None, specs, localalias=localalias)
1408 m = revset.matchany(None, specs, localalias=localalias)
1403 return m(self)
1409 return m(self)
1404
1410
1405 def url(self):
1411 def url(self):
1406 return 'file:' + self.root
1412 return 'file:' + self.root
1407
1413
1408 def hook(self, name, throw=False, **args):
1414 def hook(self, name, throw=False, **args):
1409 """Call a hook, passing this repo instance.
1415 """Call a hook, passing this repo instance.
1410
1416
1411 This a convenience method to aid invoking hooks. Extensions likely
1417 This a convenience method to aid invoking hooks. Extensions likely
1412 won't call this unless they have registered a custom hook or are
1418 won't call this unless they have registered a custom hook or are
1413 replacing code that is expected to call a hook.
1419 replacing code that is expected to call a hook.
1414 """
1420 """
1415 return hook.hook(self.ui, self, name, throw, **args)
1421 return hook.hook(self.ui, self, name, throw, **args)
1416
1422
1417 @filteredpropertycache
1423 @filteredpropertycache
1418 def _tagscache(self):
1424 def _tagscache(self):
1419 '''Returns a tagscache object that contains various tags related
1425 '''Returns a tagscache object that contains various tags related
1420 caches.'''
1426 caches.'''
1421
1427
1422 # This simplifies its cache management by having one decorated
1428 # This simplifies its cache management by having one decorated
1423 # function (this one) and the rest simply fetch things from it.
1429 # function (this one) and the rest simply fetch things from it.
1424 class tagscache(object):
1430 class tagscache(object):
1425 def __init__(self):
1431 def __init__(self):
1426 # These two define the set of tags for this repository. tags
1432 # These two define the set of tags for this repository. tags
1427 # maps tag name to node; tagtypes maps tag name to 'global' or
1433 # maps tag name to node; tagtypes maps tag name to 'global' or
1428 # 'local'. (Global tags are defined by .hgtags across all
1434 # 'local'. (Global tags are defined by .hgtags across all
1429 # heads, and local tags are defined in .hg/localtags.)
1435 # heads, and local tags are defined in .hg/localtags.)
1430 # They constitute the in-memory cache of tags.
1436 # They constitute the in-memory cache of tags.
1431 self.tags = self.tagtypes = None
1437 self.tags = self.tagtypes = None
1432
1438
1433 self.nodetagscache = self.tagslist = None
1439 self.nodetagscache = self.tagslist = None
1434
1440
1435 cache = tagscache()
1441 cache = tagscache()
1436 cache.tags, cache.tagtypes = self._findtags()
1442 cache.tags, cache.tagtypes = self._findtags()
1437
1443
1438 return cache
1444 return cache
1439
1445
1440 def tags(self):
1446 def tags(self):
1441 '''return a mapping of tag to node'''
1447 '''return a mapping of tag to node'''
1442 t = {}
1448 t = {}
1443 if self.changelog.filteredrevs:
1449 if self.changelog.filteredrevs:
1444 tags, tt = self._findtags()
1450 tags, tt = self._findtags()
1445 else:
1451 else:
1446 tags = self._tagscache.tags
1452 tags = self._tagscache.tags
1447 rev = self.changelog.rev
1453 rev = self.changelog.rev
1448 for k, v in tags.iteritems():
1454 for k, v in tags.iteritems():
1449 try:
1455 try:
1450 # ignore tags to unknown nodes
1456 # ignore tags to unknown nodes
1451 rev(v)
1457 rev(v)
1452 t[k] = v
1458 t[k] = v
1453 except (error.LookupError, ValueError):
1459 except (error.LookupError, ValueError):
1454 pass
1460 pass
1455 return t
1461 return t
1456
1462
1457 def _findtags(self):
1463 def _findtags(self):
1458 '''Do the hard work of finding tags. Return a pair of dicts
1464 '''Do the hard work of finding tags. Return a pair of dicts
1459 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1465 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1460 maps tag name to a string like \'global\' or \'local\'.
1466 maps tag name to a string like \'global\' or \'local\'.
1461 Subclasses or extensions are free to add their own tags, but
1467 Subclasses or extensions are free to add their own tags, but
1462 should be aware that the returned dicts will be retained for the
1468 should be aware that the returned dicts will be retained for the
1463 duration of the localrepo object.'''
1469 duration of the localrepo object.'''
1464
1470
1465 # XXX what tagtype should subclasses/extensions use? Currently
1471 # XXX what tagtype should subclasses/extensions use? Currently
1466 # mq and bookmarks add tags, but do not set the tagtype at all.
1472 # mq and bookmarks add tags, but do not set the tagtype at all.
1467 # Should each extension invent its own tag type? Should there
1473 # Should each extension invent its own tag type? Should there
1468 # be one tagtype for all such "virtual" tags? Or is the status
1474 # be one tagtype for all such "virtual" tags? Or is the status
1469 # quo fine?
1475 # quo fine?
1470
1476
1471
1477
1472 # map tag name to (node, hist)
1478 # map tag name to (node, hist)
1473 alltags = tagsmod.findglobaltags(self.ui, self)
1479 alltags = tagsmod.findglobaltags(self.ui, self)
1474 # map tag name to tag type
1480 # map tag name to tag type
1475 tagtypes = dict((tag, 'global') for tag in alltags)
1481 tagtypes = dict((tag, 'global') for tag in alltags)
1476
1482
1477 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1483 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1478
1484
1479 # Build the return dicts. Have to re-encode tag names because
1485 # Build the return dicts. Have to re-encode tag names because
1480 # the tags module always uses UTF-8 (in order not to lose info
1486 # the tags module always uses UTF-8 (in order not to lose info
1481 # writing to the cache), but the rest of Mercurial wants them in
1487 # writing to the cache), but the rest of Mercurial wants them in
1482 # local encoding.
1488 # local encoding.
1483 tags = {}
1489 tags = {}
1484 for (name, (node, hist)) in alltags.iteritems():
1490 for (name, (node, hist)) in alltags.iteritems():
1485 if node != nullid:
1491 if node != nullid:
1486 tags[encoding.tolocal(name)] = node
1492 tags[encoding.tolocal(name)] = node
1487 tags['tip'] = self.changelog.tip()
1493 tags['tip'] = self.changelog.tip()
1488 tagtypes = dict([(encoding.tolocal(name), value)
1494 tagtypes = dict([(encoding.tolocal(name), value)
1489 for (name, value) in tagtypes.iteritems()])
1495 for (name, value) in tagtypes.iteritems()])
1490 return (tags, tagtypes)
1496 return (tags, tagtypes)
1491
1497
1492 def tagtype(self, tagname):
1498 def tagtype(self, tagname):
1493 '''
1499 '''
1494 return the type of the given tag. result can be:
1500 return the type of the given tag. result can be:
1495
1501
1496 'local' : a local tag
1502 'local' : a local tag
1497 'global' : a global tag
1503 'global' : a global tag
1498 None : tag does not exist
1504 None : tag does not exist
1499 '''
1505 '''
1500
1506
1501 return self._tagscache.tagtypes.get(tagname)
1507 return self._tagscache.tagtypes.get(tagname)
1502
1508
1503 def tagslist(self):
1509 def tagslist(self):
1504 '''return a list of tags ordered by revision'''
1510 '''return a list of tags ordered by revision'''
1505 if not self._tagscache.tagslist:
1511 if not self._tagscache.tagslist:
1506 l = []
1512 l = []
1507 for t, n in self.tags().iteritems():
1513 for t, n in self.tags().iteritems():
1508 l.append((self.changelog.rev(n), t, n))
1514 l.append((self.changelog.rev(n), t, n))
1509 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1515 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1510
1516
1511 return self._tagscache.tagslist
1517 return self._tagscache.tagslist
1512
1518
1513 def nodetags(self, node):
1519 def nodetags(self, node):
1514 '''return the tags associated with a node'''
1520 '''return the tags associated with a node'''
1515 if not self._tagscache.nodetagscache:
1521 if not self._tagscache.nodetagscache:
1516 nodetagscache = {}
1522 nodetagscache = {}
1517 for t, n in self._tagscache.tags.iteritems():
1523 for t, n in self._tagscache.tags.iteritems():
1518 nodetagscache.setdefault(n, []).append(t)
1524 nodetagscache.setdefault(n, []).append(t)
1519 for tags in nodetagscache.itervalues():
1525 for tags in nodetagscache.itervalues():
1520 tags.sort()
1526 tags.sort()
1521 self._tagscache.nodetagscache = nodetagscache
1527 self._tagscache.nodetagscache = nodetagscache
1522 return self._tagscache.nodetagscache.get(node, [])
1528 return self._tagscache.nodetagscache.get(node, [])
1523
1529
1524 def nodebookmarks(self, node):
1530 def nodebookmarks(self, node):
1525 """return the list of bookmarks pointing to the specified node"""
1531 """return the list of bookmarks pointing to the specified node"""
1526 return self._bookmarks.names(node)
1532 return self._bookmarks.names(node)
1527
1533
1528 def branchmap(self):
1534 def branchmap(self):
1529 '''returns a dictionary {branch: [branchheads]} with branchheads
1535 '''returns a dictionary {branch: [branchheads]} with branchheads
1530 ordered by increasing revision number'''
1536 ordered by increasing revision number'''
1531 return self._branchcaches[self]
1537 return self._branchcaches[self]
1532
1538
1533 @unfilteredmethod
1539 @unfilteredmethod
1534 def revbranchcache(self):
1540 def revbranchcache(self):
1535 if not self._revbranchcache:
1541 if not self._revbranchcache:
1536 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1542 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1537 return self._revbranchcache
1543 return self._revbranchcache
1538
1544
1539 def branchtip(self, branch, ignoremissing=False):
1545 def branchtip(self, branch, ignoremissing=False):
1540 '''return the tip node for a given branch
1546 '''return the tip node for a given branch
1541
1547
1542 If ignoremissing is True, then this method will not raise an error.
1548 If ignoremissing is True, then this method will not raise an error.
1543 This is helpful for callers that only expect None for a missing branch
1549 This is helpful for callers that only expect None for a missing branch
1544 (e.g. namespace).
1550 (e.g. namespace).
1545
1551
1546 '''
1552 '''
1547 try:
1553 try:
1548 return self.branchmap().branchtip(branch)
1554 return self.branchmap().branchtip(branch)
1549 except KeyError:
1555 except KeyError:
1550 if not ignoremissing:
1556 if not ignoremissing:
1551 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1557 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1552 else:
1558 else:
1553 pass
1559 pass
1554
1560
1555 def lookup(self, key):
1561 def lookup(self, key):
1556 return scmutil.revsymbol(self, key).node()
1562 return scmutil.revsymbol(self, key).node()
1557
1563
1558 def lookupbranch(self, key):
1564 def lookupbranch(self, key):
1559 if self.branchmap().hasbranch(key):
1565 if self.branchmap().hasbranch(key):
1560 return key
1566 return key
1561
1567
1562 return scmutil.revsymbol(self, key).branch()
1568 return scmutil.revsymbol(self, key).branch()
1563
1569
1564 def known(self, nodes):
1570 def known(self, nodes):
1565 cl = self.changelog
1571 cl = self.changelog
1566 nm = cl.nodemap
1572 nm = cl.nodemap
1567 filtered = cl.filteredrevs
1573 filtered = cl.filteredrevs
1568 result = []
1574 result = []
1569 for n in nodes:
1575 for n in nodes:
1570 r = nm.get(n)
1576 r = nm.get(n)
1571 resp = not (r is None or r in filtered)
1577 resp = not (r is None or r in filtered)
1572 result.append(resp)
1578 result.append(resp)
1573 return result
1579 return result
1574
1580
1575 def local(self):
1581 def local(self):
1576 return self
1582 return self
1577
1583
1578 def publishing(self):
1584 def publishing(self):
1579 # it's safe (and desirable) to trust the publish flag unconditionally
1585 # it's safe (and desirable) to trust the publish flag unconditionally
1580 # so that we don't finalize changes shared between users via ssh or nfs
1586 # so that we don't finalize changes shared between users via ssh or nfs
1581 return self.ui.configbool('phases', 'publish', untrusted=True)
1587 return self.ui.configbool('phases', 'publish', untrusted=True)
1582
1588
1583 def cancopy(self):
1589 def cancopy(self):
1584 # so statichttprepo's override of local() works
1590 # so statichttprepo's override of local() works
1585 if not self.local():
1591 if not self.local():
1586 return False
1592 return False
1587 if not self.publishing():
1593 if not self.publishing():
1588 return True
1594 return True
1589 # if publishing we can't copy if there is filtered content
1595 # if publishing we can't copy if there is filtered content
1590 return not self.filtered('visible').changelog.filteredrevs
1596 return not self.filtered('visible').changelog.filteredrevs
1591
1597
1592 def shared(self):
1598 def shared(self):
1593 '''the type of shared repository (None if not shared)'''
1599 '''the type of shared repository (None if not shared)'''
1594 if self.sharedpath != self.path:
1600 if self.sharedpath != self.path:
1595 return 'store'
1601 return 'store'
1596 return None
1602 return None
1597
1603
1598 def wjoin(self, f, *insidef):
1604 def wjoin(self, f, *insidef):
1599 return self.vfs.reljoin(self.root, f, *insidef)
1605 return self.vfs.reljoin(self.root, f, *insidef)
1600
1606
1601 def setparents(self, p1, p2=nullid):
1607 def setparents(self, p1, p2=nullid):
1602 with self.dirstate.parentchange():
1608 with self.dirstate.parentchange():
1603 copies = self.dirstate.setparents(p1, p2)
1609 copies = self.dirstate.setparents(p1, p2)
1604 pctx = self[p1]
1610 pctx = self[p1]
1605 if copies:
1611 if copies:
1606 # Adjust copy records, the dirstate cannot do it, it
1612 # Adjust copy records, the dirstate cannot do it, it
1607 # requires access to parents manifests. Preserve them
1613 # requires access to parents manifests. Preserve them
1608 # only for entries added to first parent.
1614 # only for entries added to first parent.
1609 for f in copies:
1615 for f in copies:
1610 if f not in pctx and copies[f] in pctx:
1616 if f not in pctx and copies[f] in pctx:
1611 self.dirstate.copy(copies[f], f)
1617 self.dirstate.copy(copies[f], f)
1612 if p2 == nullid:
1618 if p2 == nullid:
1613 for f, s in sorted(self.dirstate.copies().items()):
1619 for f, s in sorted(self.dirstate.copies().items()):
1614 if f not in pctx and s not in pctx:
1620 if f not in pctx and s not in pctx:
1615 self.dirstate.copy(None, f)
1621 self.dirstate.copy(None, f)
1616
1622
1617 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1623 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1618 """changeid must be a changeset revision, if specified.
1624 """changeid must be a changeset revision, if specified.
1619 fileid can be a file revision or node."""
1625 fileid can be a file revision or node."""
1620 return context.filectx(self, path, changeid, fileid,
1626 return context.filectx(self, path, changeid, fileid,
1621 changectx=changectx)
1627 changectx=changectx)
1622
1628
1623 def getcwd(self):
1629 def getcwd(self):
1624 return self.dirstate.getcwd()
1630 return self.dirstate.getcwd()
1625
1631
1626 def pathto(self, f, cwd=None):
1632 def pathto(self, f, cwd=None):
1627 return self.dirstate.pathto(f, cwd)
1633 return self.dirstate.pathto(f, cwd)
1628
1634
1629 def _loadfilter(self, filter):
1635 def _loadfilter(self, filter):
1630 if filter not in self._filterpats:
1636 if filter not in self._filterpats:
1631 l = []
1637 l = []
1632 for pat, cmd in self.ui.configitems(filter):
1638 for pat, cmd in self.ui.configitems(filter):
1633 if cmd == '!':
1639 if cmd == '!':
1634 continue
1640 continue
1635 mf = matchmod.match(self.root, '', [pat])
1641 mf = matchmod.match(self.root, '', [pat])
1636 fn = None
1642 fn = None
1637 params = cmd
1643 params = cmd
1638 for name, filterfn in self._datafilters.iteritems():
1644 for name, filterfn in self._datafilters.iteritems():
1639 if cmd.startswith(name):
1645 if cmd.startswith(name):
1640 fn = filterfn
1646 fn = filterfn
1641 params = cmd[len(name):].lstrip()
1647 params = cmd[len(name):].lstrip()
1642 break
1648 break
1643 if not fn:
1649 if not fn:
1644 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1650 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1645 # Wrap old filters not supporting keyword arguments
1651 # Wrap old filters not supporting keyword arguments
1646 if not pycompat.getargspec(fn)[2]:
1652 if not pycompat.getargspec(fn)[2]:
1647 oldfn = fn
1653 oldfn = fn
1648 fn = lambda s, c, **kwargs: oldfn(s, c)
1654 fn = lambda s, c, **kwargs: oldfn(s, c)
1649 l.append((mf, fn, params))
1655 l.append((mf, fn, params))
1650 self._filterpats[filter] = l
1656 self._filterpats[filter] = l
1651 return self._filterpats[filter]
1657 return self._filterpats[filter]
1652
1658
1653 def _filter(self, filterpats, filename, data):
1659 def _filter(self, filterpats, filename, data):
1654 for mf, fn, cmd in filterpats:
1660 for mf, fn, cmd in filterpats:
1655 if mf(filename):
1661 if mf(filename):
1656 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1662 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1657 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1663 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1658 break
1664 break
1659
1665
1660 return data
1666 return data
1661
1667
1662 @unfilteredpropertycache
1668 @unfilteredpropertycache
1663 def _encodefilterpats(self):
1669 def _encodefilterpats(self):
1664 return self._loadfilter('encode')
1670 return self._loadfilter('encode')
1665
1671
1666 @unfilteredpropertycache
1672 @unfilteredpropertycache
1667 def _decodefilterpats(self):
1673 def _decodefilterpats(self):
1668 return self._loadfilter('decode')
1674 return self._loadfilter('decode')
1669
1675
1670 def adddatafilter(self, name, filter):
1676 def adddatafilter(self, name, filter):
1671 self._datafilters[name] = filter
1677 self._datafilters[name] = filter
1672
1678
1673 def wread(self, filename):
1679 def wread(self, filename):
1674 if self.wvfs.islink(filename):
1680 if self.wvfs.islink(filename):
1675 data = self.wvfs.readlink(filename)
1681 data = self.wvfs.readlink(filename)
1676 else:
1682 else:
1677 data = self.wvfs.read(filename)
1683 data = self.wvfs.read(filename)
1678 return self._filter(self._encodefilterpats, filename, data)
1684 return self._filter(self._encodefilterpats, filename, data)
1679
1685
1680 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1686 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1681 """write ``data`` into ``filename`` in the working directory
1687 """write ``data`` into ``filename`` in the working directory
1682
1688
1683 This returns length of written (maybe decoded) data.
1689 This returns length of written (maybe decoded) data.
1684 """
1690 """
1685 data = self._filter(self._decodefilterpats, filename, data)
1691 data = self._filter(self._decodefilterpats, filename, data)
1686 if 'l' in flags:
1692 if 'l' in flags:
1687 self.wvfs.symlink(data, filename)
1693 self.wvfs.symlink(data, filename)
1688 else:
1694 else:
1689 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1695 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1690 **kwargs)
1696 **kwargs)
1691 if 'x' in flags:
1697 if 'x' in flags:
1692 self.wvfs.setflags(filename, False, True)
1698 self.wvfs.setflags(filename, False, True)
1693 else:
1699 else:
1694 self.wvfs.setflags(filename, False, False)
1700 self.wvfs.setflags(filename, False, False)
1695 return len(data)
1701 return len(data)
1696
1702
1697 def wwritedata(self, filename, data):
1703 def wwritedata(self, filename, data):
1698 return self._filter(self._decodefilterpats, filename, data)
1704 return self._filter(self._decodefilterpats, filename, data)
1699
1705
1700 def currenttransaction(self):
1706 def currenttransaction(self):
1701 """return the current transaction or None if non exists"""
1707 """return the current transaction or None if non exists"""
1702 if self._transref:
1708 if self._transref:
1703 tr = self._transref()
1709 tr = self._transref()
1704 else:
1710 else:
1705 tr = None
1711 tr = None
1706
1712
1707 if tr and tr.running():
1713 if tr and tr.running():
1708 return tr
1714 return tr
1709 return None
1715 return None
1710
1716
1711 def transaction(self, desc, report=None):
1717 def transaction(self, desc, report=None):
1712 if (self.ui.configbool('devel', 'all-warnings')
1718 if (self.ui.configbool('devel', 'all-warnings')
1713 or self.ui.configbool('devel', 'check-locks')):
1719 or self.ui.configbool('devel', 'check-locks')):
1714 if self._currentlock(self._lockref) is None:
1720 if self._currentlock(self._lockref) is None:
1715 raise error.ProgrammingError('transaction requires locking')
1721 raise error.ProgrammingError('transaction requires locking')
1716 tr = self.currenttransaction()
1722 tr = self.currenttransaction()
1717 if tr is not None:
1723 if tr is not None:
1718 return tr.nest(name=desc)
1724 return tr.nest(name=desc)
1719
1725
1720 # abort here if the journal already exists
1726 # abort here if the journal already exists
1721 if self.svfs.exists("journal"):
1727 if self.svfs.exists("journal"):
1722 raise error.RepoError(
1728 raise error.RepoError(
1723 _("abandoned transaction found"),
1729 _("abandoned transaction found"),
1724 hint=_("run 'hg recover' to clean up transaction"))
1730 hint=_("run 'hg recover' to clean up transaction"))
1725
1731
1726 idbase = "%.40f#%f" % (random.random(), time.time())
1732 idbase = "%.40f#%f" % (random.random(), time.time())
1727 ha = hex(hashlib.sha1(idbase).digest())
1733 ha = hex(hashlib.sha1(idbase).digest())
1728 txnid = 'TXN:' + ha
1734 txnid = 'TXN:' + ha
1729 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1735 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1730
1736
1731 self._writejournal(desc)
1737 self._writejournal(desc)
1732 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1738 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1733 if report:
1739 if report:
1734 rp = report
1740 rp = report
1735 else:
1741 else:
1736 rp = self.ui.warn
1742 rp = self.ui.warn
1737 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1743 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1738 # we must avoid cyclic reference between repo and transaction.
1744 # we must avoid cyclic reference between repo and transaction.
1739 reporef = weakref.ref(self)
1745 reporef = weakref.ref(self)
1740 # Code to track tag movement
1746 # Code to track tag movement
1741 #
1747 #
1742 # Since tags are all handled as file content, it is actually quite hard
1748 # Since tags are all handled as file content, it is actually quite hard
1743 # to track these movement from a code perspective. So we fallback to a
1749 # to track these movement from a code perspective. So we fallback to a
1744 # tracking at the repository level. One could envision to track changes
1750 # tracking at the repository level. One could envision to track changes
1745 # to the '.hgtags' file through changegroup apply but that fails to
1751 # to the '.hgtags' file through changegroup apply but that fails to
1746 # cope with case where transaction expose new heads without changegroup
1752 # cope with case where transaction expose new heads without changegroup
1747 # being involved (eg: phase movement).
1753 # being involved (eg: phase movement).
1748 #
1754 #
1749 # For now, We gate the feature behind a flag since this likely comes
1755 # For now, We gate the feature behind a flag since this likely comes
1750 # with performance impacts. The current code run more often than needed
1756 # with performance impacts. The current code run more often than needed
1751 # and do not use caches as much as it could. The current focus is on
1757 # and do not use caches as much as it could. The current focus is on
1752 # the behavior of the feature so we disable it by default. The flag
1758 # the behavior of the feature so we disable it by default. The flag
1753 # will be removed when we are happy with the performance impact.
1759 # will be removed when we are happy with the performance impact.
1754 #
1760 #
1755 # Once this feature is no longer experimental move the following
1761 # Once this feature is no longer experimental move the following
1756 # documentation to the appropriate help section:
1762 # documentation to the appropriate help section:
1757 #
1763 #
1758 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1764 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1759 # tags (new or changed or deleted tags). In addition the details of
1765 # tags (new or changed or deleted tags). In addition the details of
1760 # these changes are made available in a file at:
1766 # these changes are made available in a file at:
1761 # ``REPOROOT/.hg/changes/tags.changes``.
1767 # ``REPOROOT/.hg/changes/tags.changes``.
1762 # Make sure you check for HG_TAG_MOVED before reading that file as it
1768 # Make sure you check for HG_TAG_MOVED before reading that file as it
1763 # might exist from a previous transaction even if no tag were touched
1769 # might exist from a previous transaction even if no tag were touched
1764 # in this one. Changes are recorded in a line base format::
1770 # in this one. Changes are recorded in a line base format::
1765 #
1771 #
1766 # <action> <hex-node> <tag-name>\n
1772 # <action> <hex-node> <tag-name>\n
1767 #
1773 #
1768 # Actions are defined as follow:
1774 # Actions are defined as follow:
1769 # "-R": tag is removed,
1775 # "-R": tag is removed,
1770 # "+A": tag is added,
1776 # "+A": tag is added,
1771 # "-M": tag is moved (old value),
1777 # "-M": tag is moved (old value),
1772 # "+M": tag is moved (new value),
1778 # "+M": tag is moved (new value),
1773 tracktags = lambda x: None
1779 tracktags = lambda x: None
1774 # experimental config: experimental.hook-track-tags
1780 # experimental config: experimental.hook-track-tags
1775 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1781 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1776 if desc != 'strip' and shouldtracktags:
1782 if desc != 'strip' and shouldtracktags:
1777 oldheads = self.changelog.headrevs()
1783 oldheads = self.changelog.headrevs()
1778 def tracktags(tr2):
1784 def tracktags(tr2):
1779 repo = reporef()
1785 repo = reporef()
1780 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1786 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1781 newheads = repo.changelog.headrevs()
1787 newheads = repo.changelog.headrevs()
1782 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1788 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1783 # notes: we compare lists here.
1789 # notes: we compare lists here.
1784 # As we do it only once buiding set would not be cheaper
1790 # As we do it only once buiding set would not be cheaper
1785 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1791 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1786 if changes:
1792 if changes:
1787 tr2.hookargs['tag_moved'] = '1'
1793 tr2.hookargs['tag_moved'] = '1'
1788 with repo.vfs('changes/tags.changes', 'w',
1794 with repo.vfs('changes/tags.changes', 'w',
1789 atomictemp=True) as changesfile:
1795 atomictemp=True) as changesfile:
1790 # note: we do not register the file to the transaction
1796 # note: we do not register the file to the transaction
1791 # because we needs it to still exist on the transaction
1797 # because we needs it to still exist on the transaction
1792 # is close (for txnclose hooks)
1798 # is close (for txnclose hooks)
1793 tagsmod.writediff(changesfile, changes)
1799 tagsmod.writediff(changesfile, changes)
1794 def validate(tr2):
1800 def validate(tr2):
1795 """will run pre-closing hooks"""
1801 """will run pre-closing hooks"""
1796 # XXX the transaction API is a bit lacking here so we take a hacky
1802 # XXX the transaction API is a bit lacking here so we take a hacky
1797 # path for now
1803 # path for now
1798 #
1804 #
1799 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1805 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1800 # dict is copied before these run. In addition we needs the data
1806 # dict is copied before these run. In addition we needs the data
1801 # available to in memory hooks too.
1807 # available to in memory hooks too.
1802 #
1808 #
1803 # Moreover, we also need to make sure this runs before txnclose
1809 # Moreover, we also need to make sure this runs before txnclose
1804 # hooks and there is no "pending" mechanism that would execute
1810 # hooks and there is no "pending" mechanism that would execute
1805 # logic only if hooks are about to run.
1811 # logic only if hooks are about to run.
1806 #
1812 #
1807 # Fixing this limitation of the transaction is also needed to track
1813 # Fixing this limitation of the transaction is also needed to track
1808 # other families of changes (bookmarks, phases, obsolescence).
1814 # other families of changes (bookmarks, phases, obsolescence).
1809 #
1815 #
1810 # This will have to be fixed before we remove the experimental
1816 # This will have to be fixed before we remove the experimental
1811 # gating.
1817 # gating.
1812 tracktags(tr2)
1818 tracktags(tr2)
1813 repo = reporef()
1819 repo = reporef()
1814 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1820 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1815 scmutil.enforcesinglehead(repo, tr2, desc)
1821 scmutil.enforcesinglehead(repo, tr2, desc)
1816 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1822 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1817 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1823 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1818 args = tr.hookargs.copy()
1824 args = tr.hookargs.copy()
1819 args.update(bookmarks.preparehookargs(name, old, new))
1825 args.update(bookmarks.preparehookargs(name, old, new))
1820 repo.hook('pretxnclose-bookmark', throw=True,
1826 repo.hook('pretxnclose-bookmark', throw=True,
1821 **pycompat.strkwargs(args))
1827 **pycompat.strkwargs(args))
1822 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1828 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1823 cl = repo.unfiltered().changelog
1829 cl = repo.unfiltered().changelog
1824 for rev, (old, new) in tr.changes['phases'].items():
1830 for rev, (old, new) in tr.changes['phases'].items():
1825 args = tr.hookargs.copy()
1831 args = tr.hookargs.copy()
1826 node = hex(cl.node(rev))
1832 node = hex(cl.node(rev))
1827 args.update(phases.preparehookargs(node, old, new))
1833 args.update(phases.preparehookargs(node, old, new))
1828 repo.hook('pretxnclose-phase', throw=True,
1834 repo.hook('pretxnclose-phase', throw=True,
1829 **pycompat.strkwargs(args))
1835 **pycompat.strkwargs(args))
1830
1836
1831 repo.hook('pretxnclose', throw=True,
1837 repo.hook('pretxnclose', throw=True,
1832 **pycompat.strkwargs(tr.hookargs))
1838 **pycompat.strkwargs(tr.hookargs))
1833 def releasefn(tr, success):
1839 def releasefn(tr, success):
1834 repo = reporef()
1840 repo = reporef()
1835 if success:
1841 if success:
1836 # this should be explicitly invoked here, because
1842 # this should be explicitly invoked here, because
1837 # in-memory changes aren't written out at closing
1843 # in-memory changes aren't written out at closing
1838 # transaction, if tr.addfilegenerator (via
1844 # transaction, if tr.addfilegenerator (via
1839 # dirstate.write or so) isn't invoked while
1845 # dirstate.write or so) isn't invoked while
1840 # transaction running
1846 # transaction running
1841 repo.dirstate.write(None)
1847 repo.dirstate.write(None)
1842 else:
1848 else:
1843 # discard all changes (including ones already written
1849 # discard all changes (including ones already written
1844 # out) in this transaction
1850 # out) in this transaction
1845 narrowspec.restorebackup(self, 'journal.narrowspec')
1851 narrowspec.restorebackup(self, 'journal.narrowspec')
1846 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1852 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1847 repo.dirstate.restorebackup(None, 'journal.dirstate')
1853 repo.dirstate.restorebackup(None, 'journal.dirstate')
1848
1854
1849 repo.invalidate(clearfilecache=True)
1855 repo.invalidate(clearfilecache=True)
1850
1856
1851 tr = transaction.transaction(rp, self.svfs, vfsmap,
1857 tr = transaction.transaction(rp, self.svfs, vfsmap,
1852 "journal",
1858 "journal",
1853 "undo",
1859 "undo",
1854 aftertrans(renames),
1860 aftertrans(renames),
1855 self.store.createmode,
1861 self.store.createmode,
1856 validator=validate,
1862 validator=validate,
1857 releasefn=releasefn,
1863 releasefn=releasefn,
1858 checkambigfiles=_cachedfiles,
1864 checkambigfiles=_cachedfiles,
1859 name=desc)
1865 name=desc)
1860 tr.changes['origrepolen'] = len(self)
1866 tr.changes['origrepolen'] = len(self)
1861 tr.changes['obsmarkers'] = set()
1867 tr.changes['obsmarkers'] = set()
1862 tr.changes['phases'] = {}
1868 tr.changes['phases'] = {}
1863 tr.changes['bookmarks'] = {}
1869 tr.changes['bookmarks'] = {}
1864
1870
1865 tr.hookargs['txnid'] = txnid
1871 tr.hookargs['txnid'] = txnid
1866 tr.hookargs['txnname'] = desc
1872 tr.hookargs['txnname'] = desc
1867 # note: writing the fncache only during finalize mean that the file is
1873 # note: writing the fncache only during finalize mean that the file is
1868 # outdated when running hooks. As fncache is used for streaming clone,
1874 # outdated when running hooks. As fncache is used for streaming clone,
1869 # this is not expected to break anything that happen during the hooks.
1875 # this is not expected to break anything that happen during the hooks.
1870 tr.addfinalize('flush-fncache', self.store.write)
1876 tr.addfinalize('flush-fncache', self.store.write)
1871 def txnclosehook(tr2):
1877 def txnclosehook(tr2):
1872 """To be run if transaction is successful, will schedule a hook run
1878 """To be run if transaction is successful, will schedule a hook run
1873 """
1879 """
1874 # Don't reference tr2 in hook() so we don't hold a reference.
1880 # Don't reference tr2 in hook() so we don't hold a reference.
1875 # This reduces memory consumption when there are multiple
1881 # This reduces memory consumption when there are multiple
1876 # transactions per lock. This can likely go away if issue5045
1882 # transactions per lock. This can likely go away if issue5045
1877 # fixes the function accumulation.
1883 # fixes the function accumulation.
1878 hookargs = tr2.hookargs
1884 hookargs = tr2.hookargs
1879
1885
1880 def hookfunc():
1886 def hookfunc():
1881 repo = reporef()
1887 repo = reporef()
1882 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1888 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1883 bmchanges = sorted(tr.changes['bookmarks'].items())
1889 bmchanges = sorted(tr.changes['bookmarks'].items())
1884 for name, (old, new) in bmchanges:
1890 for name, (old, new) in bmchanges:
1885 args = tr.hookargs.copy()
1891 args = tr.hookargs.copy()
1886 args.update(bookmarks.preparehookargs(name, old, new))
1892 args.update(bookmarks.preparehookargs(name, old, new))
1887 repo.hook('txnclose-bookmark', throw=False,
1893 repo.hook('txnclose-bookmark', throw=False,
1888 **pycompat.strkwargs(args))
1894 **pycompat.strkwargs(args))
1889
1895
1890 if hook.hashook(repo.ui, 'txnclose-phase'):
1896 if hook.hashook(repo.ui, 'txnclose-phase'):
1891 cl = repo.unfiltered().changelog
1897 cl = repo.unfiltered().changelog
1892 phasemv = sorted(tr.changes['phases'].items())
1898 phasemv = sorted(tr.changes['phases'].items())
1893 for rev, (old, new) in phasemv:
1899 for rev, (old, new) in phasemv:
1894 args = tr.hookargs.copy()
1900 args = tr.hookargs.copy()
1895 node = hex(cl.node(rev))
1901 node = hex(cl.node(rev))
1896 args.update(phases.preparehookargs(node, old, new))
1902 args.update(phases.preparehookargs(node, old, new))
1897 repo.hook('txnclose-phase', throw=False,
1903 repo.hook('txnclose-phase', throw=False,
1898 **pycompat.strkwargs(args))
1904 **pycompat.strkwargs(args))
1899
1905
1900 repo.hook('txnclose', throw=False,
1906 repo.hook('txnclose', throw=False,
1901 **pycompat.strkwargs(hookargs))
1907 **pycompat.strkwargs(hookargs))
1902 reporef()._afterlock(hookfunc)
1908 reporef()._afterlock(hookfunc)
1903 tr.addfinalize('txnclose-hook', txnclosehook)
1909 tr.addfinalize('txnclose-hook', txnclosehook)
1904 # Include a leading "-" to make it happen before the transaction summary
1910 # Include a leading "-" to make it happen before the transaction summary
1905 # reports registered via scmutil.registersummarycallback() whose names
1911 # reports registered via scmutil.registersummarycallback() whose names
1906 # are 00-txnreport etc. That way, the caches will be warm when the
1912 # are 00-txnreport etc. That way, the caches will be warm when the
1907 # callbacks run.
1913 # callbacks run.
1908 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1914 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1909 def txnaborthook(tr2):
1915 def txnaborthook(tr2):
1910 """To be run if transaction is aborted
1916 """To be run if transaction is aborted
1911 """
1917 """
1912 reporef().hook('txnabort', throw=False,
1918 reporef().hook('txnabort', throw=False,
1913 **pycompat.strkwargs(tr2.hookargs))
1919 **pycompat.strkwargs(tr2.hookargs))
1914 tr.addabort('txnabort-hook', txnaborthook)
1920 tr.addabort('txnabort-hook', txnaborthook)
1915 # avoid eager cache invalidation. in-memory data should be identical
1921 # avoid eager cache invalidation. in-memory data should be identical
1916 # to stored data if transaction has no error.
1922 # to stored data if transaction has no error.
1917 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1923 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1918 self._transref = weakref.ref(tr)
1924 self._transref = weakref.ref(tr)
1919 scmutil.registersummarycallback(self, tr, desc)
1925 scmutil.registersummarycallback(self, tr, desc)
1920 return tr
1926 return tr
1921
1927
1922 def _journalfiles(self):
1928 def _journalfiles(self):
1923 return ((self.svfs, 'journal'),
1929 return ((self.svfs, 'journal'),
1924 (self.svfs, 'journal.narrowspec'),
1930 (self.svfs, 'journal.narrowspec'),
1925 (self.vfs, 'journal.narrowspec.dirstate'),
1931 (self.vfs, 'journal.narrowspec.dirstate'),
1926 (self.vfs, 'journal.dirstate'),
1932 (self.vfs, 'journal.dirstate'),
1927 (self.vfs, 'journal.branch'),
1933 (self.vfs, 'journal.branch'),
1928 (self.vfs, 'journal.desc'),
1934 (self.vfs, 'journal.desc'),
1929 (self.vfs, 'journal.bookmarks'),
1935 (self.vfs, 'journal.bookmarks'),
1930 (self.svfs, 'journal.phaseroots'))
1936 (self.svfs, 'journal.phaseroots'))
1931
1937
1932 def undofiles(self):
1938 def undofiles(self):
1933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1939 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1934
1940
1935 @unfilteredmethod
1941 @unfilteredmethod
1936 def _writejournal(self, desc):
1942 def _writejournal(self, desc):
1937 self.dirstate.savebackup(None, 'journal.dirstate')
1943 self.dirstate.savebackup(None, 'journal.dirstate')
1938 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1944 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1939 narrowspec.savebackup(self, 'journal.narrowspec')
1945 narrowspec.savebackup(self, 'journal.narrowspec')
1940 self.vfs.write("journal.branch",
1946 self.vfs.write("journal.branch",
1941 encoding.fromlocal(self.dirstate.branch()))
1947 encoding.fromlocal(self.dirstate.branch()))
1942 self.vfs.write("journal.desc",
1948 self.vfs.write("journal.desc",
1943 "%d\n%s\n" % (len(self), desc))
1949 "%d\n%s\n" % (len(self), desc))
1944 self.vfs.write("journal.bookmarks",
1950 self.vfs.write("journal.bookmarks",
1945 self.vfs.tryread("bookmarks"))
1951 self.vfs.tryread("bookmarks"))
1946 self.svfs.write("journal.phaseroots",
1952 self.svfs.write("journal.phaseroots",
1947 self.svfs.tryread("phaseroots"))
1953 self.svfs.tryread("phaseroots"))
1948
1954
1949 def recover(self):
1955 def recover(self):
1950 with self.lock():
1956 with self.lock():
1951 if self.svfs.exists("journal"):
1957 if self.svfs.exists("journal"):
1952 self.ui.status(_("rolling back interrupted transaction\n"))
1958 self.ui.status(_("rolling back interrupted transaction\n"))
1953 vfsmap = {'': self.svfs,
1959 vfsmap = {'': self.svfs,
1954 'plain': self.vfs,}
1960 'plain': self.vfs,}
1955 transaction.rollback(self.svfs, vfsmap, "journal",
1961 transaction.rollback(self.svfs, vfsmap, "journal",
1956 self.ui.warn,
1962 self.ui.warn,
1957 checkambigfiles=_cachedfiles)
1963 checkambigfiles=_cachedfiles)
1958 self.invalidate()
1964 self.invalidate()
1959 return True
1965 return True
1960 else:
1966 else:
1961 self.ui.warn(_("no interrupted transaction available\n"))
1967 self.ui.warn(_("no interrupted transaction available\n"))
1962 return False
1968 return False
1963
1969
1964 def rollback(self, dryrun=False, force=False):
1970 def rollback(self, dryrun=False, force=False):
1965 wlock = lock = dsguard = None
1971 wlock = lock = dsguard = None
1966 try:
1972 try:
1967 wlock = self.wlock()
1973 wlock = self.wlock()
1968 lock = self.lock()
1974 lock = self.lock()
1969 if self.svfs.exists("undo"):
1975 if self.svfs.exists("undo"):
1970 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1976 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1971
1977
1972 return self._rollback(dryrun, force, dsguard)
1978 return self._rollback(dryrun, force, dsguard)
1973 else:
1979 else:
1974 self.ui.warn(_("no rollback information available\n"))
1980 self.ui.warn(_("no rollback information available\n"))
1975 return 1
1981 return 1
1976 finally:
1982 finally:
1977 release(dsguard, lock, wlock)
1983 release(dsguard, lock, wlock)
1978
1984
1979 @unfilteredmethod # Until we get smarter cache management
1985 @unfilteredmethod # Until we get smarter cache management
1980 def _rollback(self, dryrun, force, dsguard):
1986 def _rollback(self, dryrun, force, dsguard):
1981 ui = self.ui
1987 ui = self.ui
1982 try:
1988 try:
1983 args = self.vfs.read('undo.desc').splitlines()
1989 args = self.vfs.read('undo.desc').splitlines()
1984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1990 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1985 if len(args) >= 3:
1991 if len(args) >= 3:
1986 detail = args[2]
1992 detail = args[2]
1987 oldtip = oldlen - 1
1993 oldtip = oldlen - 1
1988
1994
1989 if detail and ui.verbose:
1995 if detail and ui.verbose:
1990 msg = (_('repository tip rolled back to revision %d'
1996 msg = (_('repository tip rolled back to revision %d'
1991 ' (undo %s: %s)\n')
1997 ' (undo %s: %s)\n')
1992 % (oldtip, desc, detail))
1998 % (oldtip, desc, detail))
1993 else:
1999 else:
1994 msg = (_('repository tip rolled back to revision %d'
2000 msg = (_('repository tip rolled back to revision %d'
1995 ' (undo %s)\n')
2001 ' (undo %s)\n')
1996 % (oldtip, desc))
2002 % (oldtip, desc))
1997 except IOError:
2003 except IOError:
1998 msg = _('rolling back unknown transaction\n')
2004 msg = _('rolling back unknown transaction\n')
1999 desc = None
2005 desc = None
2000
2006
2001 if not force and self['.'] != self['tip'] and desc == 'commit':
2007 if not force and self['.'] != self['tip'] and desc == 'commit':
2002 raise error.Abort(
2008 raise error.Abort(
2003 _('rollback of last commit while not checked out '
2009 _('rollback of last commit while not checked out '
2004 'may lose data'), hint=_('use -f to force'))
2010 'may lose data'), hint=_('use -f to force'))
2005
2011
2006 ui.status(msg)
2012 ui.status(msg)
2007 if dryrun:
2013 if dryrun:
2008 return 0
2014 return 0
2009
2015
2010 parents = self.dirstate.parents()
2016 parents = self.dirstate.parents()
2011 self.destroying()
2017 self.destroying()
2012 vfsmap = {'plain': self.vfs, '': self.svfs}
2018 vfsmap = {'plain': self.vfs, '': self.svfs}
2013 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2019 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2014 checkambigfiles=_cachedfiles)
2020 checkambigfiles=_cachedfiles)
2015 if self.vfs.exists('undo.bookmarks'):
2021 if self.vfs.exists('undo.bookmarks'):
2016 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2022 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2017 if self.svfs.exists('undo.phaseroots'):
2023 if self.svfs.exists('undo.phaseroots'):
2018 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2024 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2019 self.invalidate()
2025 self.invalidate()
2020
2026
2021 parentgone = any(p not in self.changelog.nodemap for p in parents)
2027 parentgone = any(p not in self.changelog.nodemap for p in parents)
2022 if parentgone:
2028 if parentgone:
2023 # prevent dirstateguard from overwriting already restored one
2029 # prevent dirstateguard from overwriting already restored one
2024 dsguard.close()
2030 dsguard.close()
2025
2031
2026 narrowspec.restorebackup(self, 'undo.narrowspec')
2032 narrowspec.restorebackup(self, 'undo.narrowspec')
2027 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2033 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2028 self.dirstate.restorebackup(None, 'undo.dirstate')
2034 self.dirstate.restorebackup(None, 'undo.dirstate')
2029 try:
2035 try:
2030 branch = self.vfs.read('undo.branch')
2036 branch = self.vfs.read('undo.branch')
2031 self.dirstate.setbranch(encoding.tolocal(branch))
2037 self.dirstate.setbranch(encoding.tolocal(branch))
2032 except IOError:
2038 except IOError:
2033 ui.warn(_('named branch could not be reset: '
2039 ui.warn(_('named branch could not be reset: '
2034 'current branch is still \'%s\'\n')
2040 'current branch is still \'%s\'\n')
2035 % self.dirstate.branch())
2041 % self.dirstate.branch())
2036
2042
2037 parents = tuple([p.rev() for p in self[None].parents()])
2043 parents = tuple([p.rev() for p in self[None].parents()])
2038 if len(parents) > 1:
2044 if len(parents) > 1:
2039 ui.status(_('working directory now based on '
2045 ui.status(_('working directory now based on '
2040 'revisions %d and %d\n') % parents)
2046 'revisions %d and %d\n') % parents)
2041 else:
2047 else:
2042 ui.status(_('working directory now based on '
2048 ui.status(_('working directory now based on '
2043 'revision %d\n') % parents)
2049 'revision %d\n') % parents)
2044 mergemod.mergestate.clean(self, self['.'].node())
2050 mergemod.mergestate.clean(self, self['.'].node())
2045
2051
2046 # TODO: if we know which new heads may result from this rollback, pass
2052 # TODO: if we know which new heads may result from this rollback, pass
2047 # them to destroy(), which will prevent the branchhead cache from being
2053 # them to destroy(), which will prevent the branchhead cache from being
2048 # invalidated.
2054 # invalidated.
2049 self.destroyed()
2055 self.destroyed()
2050 return 0
2056 return 0
2051
2057
2052 def _buildcacheupdater(self, newtransaction):
2058 def _buildcacheupdater(self, newtransaction):
2053 """called during transaction to build the callback updating cache
2059 """called during transaction to build the callback updating cache
2054
2060
2055 Lives on the repository to help extension who might want to augment
2061 Lives on the repository to help extension who might want to augment
2056 this logic. For this purpose, the created transaction is passed to the
2062 this logic. For this purpose, the created transaction is passed to the
2057 method.
2063 method.
2058 """
2064 """
2059 # we must avoid cyclic reference between repo and transaction.
2065 # we must avoid cyclic reference between repo and transaction.
2060 reporef = weakref.ref(self)
2066 reporef = weakref.ref(self)
2061 def updater(tr):
2067 def updater(tr):
2062 repo = reporef()
2068 repo = reporef()
2063 repo.updatecaches(tr)
2069 repo.updatecaches(tr)
2064 return updater
2070 return updater
2065
2071
2066 @unfilteredmethod
2072 @unfilteredmethod
2067 def updatecaches(self, tr=None, full=False):
2073 def updatecaches(self, tr=None, full=False):
2068 """warm appropriate caches
2074 """warm appropriate caches
2069
2075
2070 If this function is called after a transaction closed. The transaction
2076 If this function is called after a transaction closed. The transaction
2071 will be available in the 'tr' argument. This can be used to selectively
2077 will be available in the 'tr' argument. This can be used to selectively
2072 update caches relevant to the changes in that transaction.
2078 update caches relevant to the changes in that transaction.
2073
2079
2074 If 'full' is set, make sure all caches the function knows about have
2080 If 'full' is set, make sure all caches the function knows about have
2075 up-to-date data. Even the ones usually loaded more lazily.
2081 up-to-date data. Even the ones usually loaded more lazily.
2076 """
2082 """
2077 if tr is not None and tr.hookargs.get('source') == 'strip':
2083 if tr is not None and tr.hookargs.get('source') == 'strip':
2078 # During strip, many caches are invalid but
2084 # During strip, many caches are invalid but
2079 # later call to `destroyed` will refresh them.
2085 # later call to `destroyed` will refresh them.
2080 return
2086 return
2081
2087
2082 if tr is None or tr.changes['origrepolen'] < len(self):
2088 if tr is None or tr.changes['origrepolen'] < len(self):
2083 # accessing the 'ser ved' branchmap should refresh all the others,
2089 # accessing the 'ser ved' branchmap should refresh all the others,
2084 self.ui.debug('updating the branch cache\n')
2090 self.ui.debug('updating the branch cache\n')
2085 self.filtered('served').branchmap()
2091 self.filtered('served').branchmap()
2086
2092
2087 if full:
2093 if full:
2088 unfi = self.unfiltered()
2094 unfi = self.unfiltered()
2089 rbc = unfi.revbranchcache()
2095 rbc = unfi.revbranchcache()
2090 for r in unfi.changelog:
2096 for r in unfi.changelog:
2091 rbc.branchinfo(r)
2097 rbc.branchinfo(r)
2092 rbc.write()
2098 rbc.write()
2093
2099
2094 # ensure the working copy parents are in the manifestfulltextcache
2100 # ensure the working copy parents are in the manifestfulltextcache
2095 for ctx in self['.'].parents():
2101 for ctx in self['.'].parents():
2096 ctx.manifest() # accessing the manifest is enough
2102 ctx.manifest() # accessing the manifest is enough
2097
2103
2098 # accessing tags warm the cache
2104 # accessing tags warm the cache
2099 self.tags()
2105 self.tags()
2100 self.filtered('served').tags()
2106 self.filtered('served').tags()
2101
2107
2102 def invalidatecaches(self):
2108 def invalidatecaches(self):
2103
2109
2104 if r'_tagscache' in vars(self):
2110 if r'_tagscache' in vars(self):
2105 # can't use delattr on proxy
2111 # can't use delattr on proxy
2106 del self.__dict__[r'_tagscache']
2112 del self.__dict__[r'_tagscache']
2107
2113
2108 self._branchcaches.clear()
2114 self._branchcaches.clear()
2109 self.invalidatevolatilesets()
2115 self.invalidatevolatilesets()
2110 self._sparsesignaturecache.clear()
2116 self._sparsesignaturecache.clear()
2111
2117
2112 def invalidatevolatilesets(self):
2118 def invalidatevolatilesets(self):
2113 self.filteredrevcache.clear()
2119 self.filteredrevcache.clear()
2114 obsolete.clearobscaches(self)
2120 obsolete.clearobscaches(self)
2115
2121
2116 def invalidatedirstate(self):
2122 def invalidatedirstate(self):
2117 '''Invalidates the dirstate, causing the next call to dirstate
2123 '''Invalidates the dirstate, causing the next call to dirstate
2118 to check if it was modified since the last time it was read,
2124 to check if it was modified since the last time it was read,
2119 rereading it if it has.
2125 rereading it if it has.
2120
2126
2121 This is different to dirstate.invalidate() that it doesn't always
2127 This is different to dirstate.invalidate() that it doesn't always
2122 rereads the dirstate. Use dirstate.invalidate() if you want to
2128 rereads the dirstate. Use dirstate.invalidate() if you want to
2123 explicitly read the dirstate again (i.e. restoring it to a previous
2129 explicitly read the dirstate again (i.e. restoring it to a previous
2124 known good state).'''
2130 known good state).'''
2125 if hasunfilteredcache(self, r'dirstate'):
2131 if hasunfilteredcache(self, r'dirstate'):
2126 for k in self.dirstate._filecache:
2132 for k in self.dirstate._filecache:
2127 try:
2133 try:
2128 delattr(self.dirstate, k)
2134 delattr(self.dirstate, k)
2129 except AttributeError:
2135 except AttributeError:
2130 pass
2136 pass
2131 delattr(self.unfiltered(), r'dirstate')
2137 delattr(self.unfiltered(), r'dirstate')
2132
2138
2133 def invalidate(self, clearfilecache=False):
2139 def invalidate(self, clearfilecache=False):
2134 '''Invalidates both store and non-store parts other than dirstate
2140 '''Invalidates both store and non-store parts other than dirstate
2135
2141
2136 If a transaction is running, invalidation of store is omitted,
2142 If a transaction is running, invalidation of store is omitted,
2137 because discarding in-memory changes might cause inconsistency
2143 because discarding in-memory changes might cause inconsistency
2138 (e.g. incomplete fncache causes unintentional failure, but
2144 (e.g. incomplete fncache causes unintentional failure, but
2139 redundant one doesn't).
2145 redundant one doesn't).
2140 '''
2146 '''
2141 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2147 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2142 for k in list(self._filecache.keys()):
2148 for k in list(self._filecache.keys()):
2143 # dirstate is invalidated separately in invalidatedirstate()
2149 # dirstate is invalidated separately in invalidatedirstate()
2144 if k == 'dirstate':
2150 if k == 'dirstate':
2145 continue
2151 continue
2146 if (k == 'changelog' and
2152 if (k == 'changelog' and
2147 self.currenttransaction() and
2153 self.currenttransaction() and
2148 self.changelog._delayed):
2154 self.changelog._delayed):
2149 # The changelog object may store unwritten revisions. We don't
2155 # The changelog object may store unwritten revisions. We don't
2150 # want to lose them.
2156 # want to lose them.
2151 # TODO: Solve the problem instead of working around it.
2157 # TODO: Solve the problem instead of working around it.
2152 continue
2158 continue
2153
2159
2154 if clearfilecache:
2160 if clearfilecache:
2155 del self._filecache[k]
2161 del self._filecache[k]
2156 try:
2162 try:
2157 delattr(unfiltered, k)
2163 delattr(unfiltered, k)
2158 except AttributeError:
2164 except AttributeError:
2159 pass
2165 pass
2160 self.invalidatecaches()
2166 self.invalidatecaches()
2161 if not self.currenttransaction():
2167 if not self.currenttransaction():
2162 # TODO: Changing contents of store outside transaction
2168 # TODO: Changing contents of store outside transaction
2163 # causes inconsistency. We should make in-memory store
2169 # causes inconsistency. We should make in-memory store
2164 # changes detectable, and abort if changed.
2170 # changes detectable, and abort if changed.
2165 self.store.invalidatecaches()
2171 self.store.invalidatecaches()
2166
2172
2167 def invalidateall(self):
2173 def invalidateall(self):
2168 '''Fully invalidates both store and non-store parts, causing the
2174 '''Fully invalidates both store and non-store parts, causing the
2169 subsequent operation to reread any outside changes.'''
2175 subsequent operation to reread any outside changes.'''
2170 # extension should hook this to invalidate its caches
2176 # extension should hook this to invalidate its caches
2171 self.invalidate()
2177 self.invalidate()
2172 self.invalidatedirstate()
2178 self.invalidatedirstate()
2173
2179
2174 @unfilteredmethod
2180 @unfilteredmethod
2175 def _refreshfilecachestats(self, tr):
2181 def _refreshfilecachestats(self, tr):
2176 """Reload stats of cached files so that they are flagged as valid"""
2182 """Reload stats of cached files so that they are flagged as valid"""
2177 for k, ce in self._filecache.items():
2183 for k, ce in self._filecache.items():
2178 k = pycompat.sysstr(k)
2184 k = pycompat.sysstr(k)
2179 if k == r'dirstate' or k not in self.__dict__:
2185 if k == r'dirstate' or k not in self.__dict__:
2180 continue
2186 continue
2181 ce.refresh()
2187 ce.refresh()
2182
2188
2183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2189 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2184 inheritchecker=None, parentenvvar=None):
2190 inheritchecker=None, parentenvvar=None):
2185 parentlock = None
2191 parentlock = None
2186 # the contents of parentenvvar are used by the underlying lock to
2192 # the contents of parentenvvar are used by the underlying lock to
2187 # determine whether it can be inherited
2193 # determine whether it can be inherited
2188 if parentenvvar is not None:
2194 if parentenvvar is not None:
2189 parentlock = encoding.environ.get(parentenvvar)
2195 parentlock = encoding.environ.get(parentenvvar)
2190
2196
2191 timeout = 0
2197 timeout = 0
2192 warntimeout = 0
2198 warntimeout = 0
2193 if wait:
2199 if wait:
2194 timeout = self.ui.configint("ui", "timeout")
2200 timeout = self.ui.configint("ui", "timeout")
2195 warntimeout = self.ui.configint("ui", "timeout.warn")
2201 warntimeout = self.ui.configint("ui", "timeout.warn")
2196 # internal config: ui.signal-safe-lock
2202 # internal config: ui.signal-safe-lock
2197 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2203 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2198
2204
2199 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2205 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2200 releasefn=releasefn,
2206 releasefn=releasefn,
2201 acquirefn=acquirefn, desc=desc,
2207 acquirefn=acquirefn, desc=desc,
2202 inheritchecker=inheritchecker,
2208 inheritchecker=inheritchecker,
2203 parentlock=parentlock,
2209 parentlock=parentlock,
2204 signalsafe=signalsafe)
2210 signalsafe=signalsafe)
2205 return l
2211 return l
2206
2212
2207 def _afterlock(self, callback):
2213 def _afterlock(self, callback):
2208 """add a callback to be run when the repository is fully unlocked
2214 """add a callback to be run when the repository is fully unlocked
2209
2215
2210 The callback will be executed when the outermost lock is released
2216 The callback will be executed when the outermost lock is released
2211 (with wlock being higher level than 'lock')."""
2217 (with wlock being higher level than 'lock')."""
2212 for ref in (self._wlockref, self._lockref):
2218 for ref in (self._wlockref, self._lockref):
2213 l = ref and ref()
2219 l = ref and ref()
2214 if l and l.held:
2220 if l and l.held:
2215 l.postrelease.append(callback)
2221 l.postrelease.append(callback)
2216 break
2222 break
2217 else: # no lock have been found.
2223 else: # no lock have been found.
2218 callback()
2224 callback()
2219
2225
2220 def lock(self, wait=True):
2226 def lock(self, wait=True):
2221 '''Lock the repository store (.hg/store) and return a weak reference
2227 '''Lock the repository store (.hg/store) and return a weak reference
2222 to the lock. Use this before modifying the store (e.g. committing or
2228 to the lock. Use this before modifying the store (e.g. committing or
2223 stripping). If you are opening a transaction, get a lock as well.)
2229 stripping). If you are opening a transaction, get a lock as well.)
2224
2230
2225 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2231 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2226 'wlock' first to avoid a dead-lock hazard.'''
2232 'wlock' first to avoid a dead-lock hazard.'''
2227 l = self._currentlock(self._lockref)
2233 l = self._currentlock(self._lockref)
2228 if l is not None:
2234 if l is not None:
2229 l.lock()
2235 l.lock()
2230 return l
2236 return l
2231
2237
2232 l = self._lock(vfs=self.svfs,
2238 l = self._lock(vfs=self.svfs,
2233 lockname="lock",
2239 lockname="lock",
2234 wait=wait,
2240 wait=wait,
2235 releasefn=None,
2241 releasefn=None,
2236 acquirefn=self.invalidate,
2242 acquirefn=self.invalidate,
2237 desc=_('repository %s') % self.origroot)
2243 desc=_('repository %s') % self.origroot)
2238 self._lockref = weakref.ref(l)
2244 self._lockref = weakref.ref(l)
2239 return l
2245 return l
2240
2246
2241 def _wlockchecktransaction(self):
2247 def _wlockchecktransaction(self):
2242 if self.currenttransaction() is not None:
2248 if self.currenttransaction() is not None:
2243 raise error.LockInheritanceContractViolation(
2249 raise error.LockInheritanceContractViolation(
2244 'wlock cannot be inherited in the middle of a transaction')
2250 'wlock cannot be inherited in the middle of a transaction')
2245
2251
2246 def wlock(self, wait=True):
2252 def wlock(self, wait=True):
2247 '''Lock the non-store parts of the repository (everything under
2253 '''Lock the non-store parts of the repository (everything under
2248 .hg except .hg/store) and return a weak reference to the lock.
2254 .hg except .hg/store) and return a weak reference to the lock.
2249
2255
2250 Use this before modifying files in .hg.
2256 Use this before modifying files in .hg.
2251
2257
2252 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2258 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2253 'wlock' first to avoid a dead-lock hazard.'''
2259 'wlock' first to avoid a dead-lock hazard.'''
2254 l = self._wlockref and self._wlockref()
2260 l = self._wlockref and self._wlockref()
2255 if l is not None and l.held:
2261 if l is not None and l.held:
2256 l.lock()
2262 l.lock()
2257 return l
2263 return l
2258
2264
2259 # We do not need to check for non-waiting lock acquisition. Such
2265 # We do not need to check for non-waiting lock acquisition. Such
2260 # acquisition would not cause dead-lock as they would just fail.
2266 # acquisition would not cause dead-lock as they would just fail.
2261 if wait and (self.ui.configbool('devel', 'all-warnings')
2267 if wait and (self.ui.configbool('devel', 'all-warnings')
2262 or self.ui.configbool('devel', 'check-locks')):
2268 or self.ui.configbool('devel', 'check-locks')):
2263 if self._currentlock(self._lockref) is not None:
2269 if self._currentlock(self._lockref) is not None:
2264 self.ui.develwarn('"wlock" acquired after "lock"')
2270 self.ui.develwarn('"wlock" acquired after "lock"')
2265
2271
2266 def unlock():
2272 def unlock():
2267 if self.dirstate.pendingparentchange():
2273 if self.dirstate.pendingparentchange():
2268 self.dirstate.invalidate()
2274 self.dirstate.invalidate()
2269 else:
2275 else:
2270 self.dirstate.write(None)
2276 self.dirstate.write(None)
2271
2277
2272 self._filecache['dirstate'].refresh()
2278 self._filecache['dirstate'].refresh()
2273
2279
2274 l = self._lock(self.vfs, "wlock", wait, unlock,
2280 l = self._lock(self.vfs, "wlock", wait, unlock,
2275 self.invalidatedirstate, _('working directory of %s') %
2281 self.invalidatedirstate, _('working directory of %s') %
2276 self.origroot,
2282 self.origroot,
2277 inheritchecker=self._wlockchecktransaction,
2283 inheritchecker=self._wlockchecktransaction,
2278 parentenvvar='HG_WLOCK_LOCKER')
2284 parentenvvar='HG_WLOCK_LOCKER')
2279 self._wlockref = weakref.ref(l)
2285 self._wlockref = weakref.ref(l)
2280 return l
2286 return l
2281
2287
2282 def _currentlock(self, lockref):
2288 def _currentlock(self, lockref):
2283 """Returns the lock if it's held, or None if it's not."""
2289 """Returns the lock if it's held, or None if it's not."""
2284 if lockref is None:
2290 if lockref is None:
2285 return None
2291 return None
2286 l = lockref()
2292 l = lockref()
2287 if l is None or not l.held:
2293 if l is None or not l.held:
2288 return None
2294 return None
2289 return l
2295 return l
2290
2296
2291 def currentwlock(self):
2297 def currentwlock(self):
2292 """Returns the wlock if it's held, or None if it's not."""
2298 """Returns the wlock if it's held, or None if it's not."""
2293 return self._currentlock(self._wlockref)
2299 return self._currentlock(self._wlockref)
2294
2300
2295 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2301 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2296 """
2302 """
2297 commit an individual file as part of a larger transaction
2303 commit an individual file as part of a larger transaction
2298 """
2304 """
2299
2305
2300 fname = fctx.path()
2306 fname = fctx.path()
2301 fparent1 = manifest1.get(fname, nullid)
2307 fparent1 = manifest1.get(fname, nullid)
2302 fparent2 = manifest2.get(fname, nullid)
2308 fparent2 = manifest2.get(fname, nullid)
2303 if isinstance(fctx, context.filectx):
2309 if isinstance(fctx, context.filectx):
2304 node = fctx.filenode()
2310 node = fctx.filenode()
2305 if node in [fparent1, fparent2]:
2311 if node in [fparent1, fparent2]:
2306 self.ui.debug('reusing %s filelog entry\n' % fname)
2312 self.ui.debug('reusing %s filelog entry\n' % fname)
2307 if manifest1.flags(fname) != fctx.flags():
2313 if manifest1.flags(fname) != fctx.flags():
2308 changelist.append(fname)
2314 changelist.append(fname)
2309 return node
2315 return node
2310
2316
2311 flog = self.file(fname)
2317 flog = self.file(fname)
2312 meta = {}
2318 meta = {}
2313 cfname = fctx.copysource()
2319 cfname = fctx.copysource()
2314 if cfname and cfname != fname:
2320 if cfname and cfname != fname:
2315 # Mark the new revision of this file as a copy of another
2321 # Mark the new revision of this file as a copy of another
2316 # file. This copy data will effectively act as a parent
2322 # file. This copy data will effectively act as a parent
2317 # of this new revision. If this is a merge, the first
2323 # of this new revision. If this is a merge, the first
2318 # parent will be the nullid (meaning "look up the copy data")
2324 # parent will be the nullid (meaning "look up the copy data")
2319 # and the second one will be the other parent. For example:
2325 # and the second one will be the other parent. For example:
2320 #
2326 #
2321 # 0 --- 1 --- 3 rev1 changes file foo
2327 # 0 --- 1 --- 3 rev1 changes file foo
2322 # \ / rev2 renames foo to bar and changes it
2328 # \ / rev2 renames foo to bar and changes it
2323 # \- 2 -/ rev3 should have bar with all changes and
2329 # \- 2 -/ rev3 should have bar with all changes and
2324 # should record that bar descends from
2330 # should record that bar descends from
2325 # bar in rev2 and foo in rev1
2331 # bar in rev2 and foo in rev1
2326 #
2332 #
2327 # this allows this merge to succeed:
2333 # this allows this merge to succeed:
2328 #
2334 #
2329 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2335 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2330 # \ / merging rev3 and rev4 should use bar@rev2
2336 # \ / merging rev3 and rev4 should use bar@rev2
2331 # \- 2 --- 4 as the merge base
2337 # \- 2 --- 4 as the merge base
2332 #
2338 #
2333
2339
2334 crev = manifest1.get(cfname)
2340 crev = manifest1.get(cfname)
2335 newfparent = fparent2
2341 newfparent = fparent2
2336
2342
2337 if manifest2: # branch merge
2343 if manifest2: # branch merge
2338 if fparent2 == nullid or crev is None: # copied on remote side
2344 if fparent2 == nullid or crev is None: # copied on remote side
2339 if cfname in manifest2:
2345 if cfname in manifest2:
2340 crev = manifest2[cfname]
2346 crev = manifest2[cfname]
2341 newfparent = fparent1
2347 newfparent = fparent1
2342
2348
2343 # Here, we used to search backwards through history to try to find
2349 # Here, we used to search backwards through history to try to find
2344 # where the file copy came from if the source of a copy was not in
2350 # where the file copy came from if the source of a copy was not in
2345 # the parent directory. However, this doesn't actually make sense to
2351 # the parent directory. However, this doesn't actually make sense to
2346 # do (what does a copy from something not in your working copy even
2352 # do (what does a copy from something not in your working copy even
2347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2353 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2348 # the user that copy information was dropped, so if they didn't
2354 # the user that copy information was dropped, so if they didn't
2349 # expect this outcome it can be fixed, but this is the correct
2355 # expect this outcome it can be fixed, but this is the correct
2350 # behavior in this circumstance.
2356 # behavior in this circumstance.
2351
2357
2352 if crev:
2358 if crev:
2353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2359 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2354 meta["copy"] = cfname
2360 meta["copy"] = cfname
2355 meta["copyrev"] = hex(crev)
2361 meta["copyrev"] = hex(crev)
2356 fparent1, fparent2 = nullid, newfparent
2362 fparent1, fparent2 = nullid, newfparent
2357 else:
2363 else:
2358 self.ui.warn(_("warning: can't find ancestor for '%s' "
2364 self.ui.warn(_("warning: can't find ancestor for '%s' "
2359 "copied from '%s'!\n") % (fname, cfname))
2365 "copied from '%s'!\n") % (fname, cfname))
2360
2366
2361 elif fparent1 == nullid:
2367 elif fparent1 == nullid:
2362 fparent1, fparent2 = fparent2, nullid
2368 fparent1, fparent2 = fparent2, nullid
2363 elif fparent2 != nullid:
2369 elif fparent2 != nullid:
2364 # is one parent an ancestor of the other?
2370 # is one parent an ancestor of the other?
2365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2371 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2366 if fparent1 in fparentancestors:
2372 if fparent1 in fparentancestors:
2367 fparent1, fparent2 = fparent2, nullid
2373 fparent1, fparent2 = fparent2, nullid
2368 elif fparent2 in fparentancestors:
2374 elif fparent2 in fparentancestors:
2369 fparent2 = nullid
2375 fparent2 = nullid
2370
2376
2371 # is the file changed?
2377 # is the file changed?
2372 text = fctx.data()
2378 text = fctx.data()
2373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2379 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2374 changelist.append(fname)
2380 changelist.append(fname)
2375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2381 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2376 # are just the flags changed during merge?
2382 # are just the flags changed during merge?
2377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2383 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2378 changelist.append(fname)
2384 changelist.append(fname)
2379
2385
2380 return fparent1
2386 return fparent1
2381
2387
2382 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2388 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2383 """check for commit arguments that aren't committable"""
2389 """check for commit arguments that aren't committable"""
2384 if match.isexact() or match.prefix():
2390 if match.isexact() or match.prefix():
2385 matched = set(status.modified + status.added + status.removed)
2391 matched = set(status.modified + status.added + status.removed)
2386
2392
2387 for f in match.files():
2393 for f in match.files():
2388 f = self.dirstate.normalize(f)
2394 f = self.dirstate.normalize(f)
2389 if f == '.' or f in matched or f in wctx.substate:
2395 if f == '.' or f in matched or f in wctx.substate:
2390 continue
2396 continue
2391 if f in status.deleted:
2397 if f in status.deleted:
2392 fail(f, _('file not found!'))
2398 fail(f, _('file not found!'))
2393 if f in vdirs: # visited directory
2399 if f in vdirs: # visited directory
2394 d = f + '/'
2400 d = f + '/'
2395 for mf in matched:
2401 for mf in matched:
2396 if mf.startswith(d):
2402 if mf.startswith(d):
2397 break
2403 break
2398 else:
2404 else:
2399 fail(f, _("no match under directory!"))
2405 fail(f, _("no match under directory!"))
2400 elif f not in self.dirstate:
2406 elif f not in self.dirstate:
2401 fail(f, _("file not tracked!"))
2407 fail(f, _("file not tracked!"))
2402
2408
2403 @unfilteredmethod
2409 @unfilteredmethod
2404 def commit(self, text="", user=None, date=None, match=None, force=False,
2410 def commit(self, text="", user=None, date=None, match=None, force=False,
2405 editor=False, extra=None):
2411 editor=False, extra=None):
2406 """Add a new revision to current repository.
2412 """Add a new revision to current repository.
2407
2413
2408 Revision information is gathered from the working directory,
2414 Revision information is gathered from the working directory,
2409 match can be used to filter the committed files. If editor is
2415 match can be used to filter the committed files. If editor is
2410 supplied, it is called to get a commit message.
2416 supplied, it is called to get a commit message.
2411 """
2417 """
2412 if extra is None:
2418 if extra is None:
2413 extra = {}
2419 extra = {}
2414
2420
2415 def fail(f, msg):
2421 def fail(f, msg):
2416 raise error.Abort('%s: %s' % (f, msg))
2422 raise error.Abort('%s: %s' % (f, msg))
2417
2423
2418 if not match:
2424 if not match:
2419 match = matchmod.always()
2425 match = matchmod.always()
2420
2426
2421 if not force:
2427 if not force:
2422 vdirs = []
2428 vdirs = []
2423 match.explicitdir = vdirs.append
2429 match.explicitdir = vdirs.append
2424 match.bad = fail
2430 match.bad = fail
2425
2431
2426 # lock() for recent changelog (see issue4368)
2432 # lock() for recent changelog (see issue4368)
2427 with self.wlock(), self.lock():
2433 with self.wlock(), self.lock():
2428 wctx = self[None]
2434 wctx = self[None]
2429 merge = len(wctx.parents()) > 1
2435 merge = len(wctx.parents()) > 1
2430
2436
2431 if not force and merge and not match.always():
2437 if not force and merge and not match.always():
2432 raise error.Abort(_('cannot partially commit a merge '
2438 raise error.Abort(_('cannot partially commit a merge '
2433 '(do not specify files or patterns)'))
2439 '(do not specify files or patterns)'))
2434
2440
2435 status = self.status(match=match, clean=force)
2441 status = self.status(match=match, clean=force)
2436 if force:
2442 if force:
2437 status.modified.extend(status.clean) # mq may commit clean files
2443 status.modified.extend(status.clean) # mq may commit clean files
2438
2444
2439 # check subrepos
2445 # check subrepos
2440 subs, commitsubs, newstate = subrepoutil.precommit(
2446 subs, commitsubs, newstate = subrepoutil.precommit(
2441 self.ui, wctx, status, match, force=force)
2447 self.ui, wctx, status, match, force=force)
2442
2448
2443 # make sure all explicit patterns are matched
2449 # make sure all explicit patterns are matched
2444 if not force:
2450 if not force:
2445 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2451 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2446
2452
2447 cctx = context.workingcommitctx(self, status,
2453 cctx = context.workingcommitctx(self, status,
2448 text, user, date, extra)
2454 text, user, date, extra)
2449
2455
2450 # internal config: ui.allowemptycommit
2456 # internal config: ui.allowemptycommit
2451 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2457 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2452 or extra.get('close') or merge or cctx.files()
2458 or extra.get('close') or merge or cctx.files()
2453 or self.ui.configbool('ui', 'allowemptycommit'))
2459 or self.ui.configbool('ui', 'allowemptycommit'))
2454 if not allowemptycommit:
2460 if not allowemptycommit:
2455 return None
2461 return None
2456
2462
2457 if merge and cctx.deleted():
2463 if merge and cctx.deleted():
2458 raise error.Abort(_("cannot commit merge with missing files"))
2464 raise error.Abort(_("cannot commit merge with missing files"))
2459
2465
2460 ms = mergemod.mergestate.read(self)
2466 ms = mergemod.mergestate.read(self)
2461 mergeutil.checkunresolved(ms)
2467 mergeutil.checkunresolved(ms)
2462
2468
2463 if editor:
2469 if editor:
2464 cctx._text = editor(self, cctx, subs)
2470 cctx._text = editor(self, cctx, subs)
2465 edited = (text != cctx._text)
2471 edited = (text != cctx._text)
2466
2472
2467 # Save commit message in case this transaction gets rolled back
2473 # Save commit message in case this transaction gets rolled back
2468 # (e.g. by a pretxncommit hook). Leave the content alone on
2474 # (e.g. by a pretxncommit hook). Leave the content alone on
2469 # the assumption that the user will use the same editor again.
2475 # the assumption that the user will use the same editor again.
2470 msgfn = self.savecommitmessage(cctx._text)
2476 msgfn = self.savecommitmessage(cctx._text)
2471
2477
2472 # commit subs and write new state
2478 # commit subs and write new state
2473 if subs:
2479 if subs:
2474 uipathfn = scmutil.getuipathfn(self)
2480 uipathfn = scmutil.getuipathfn(self)
2475 for s in sorted(commitsubs):
2481 for s in sorted(commitsubs):
2476 sub = wctx.sub(s)
2482 sub = wctx.sub(s)
2477 self.ui.status(_('committing subrepository %s\n') %
2483 self.ui.status(_('committing subrepository %s\n') %
2478 uipathfn(subrepoutil.subrelpath(sub)))
2484 uipathfn(subrepoutil.subrelpath(sub)))
2479 sr = sub.commit(cctx._text, user, date)
2485 sr = sub.commit(cctx._text, user, date)
2480 newstate[s] = (newstate[s][0], sr)
2486 newstate[s] = (newstate[s][0], sr)
2481 subrepoutil.writestate(self, newstate)
2487 subrepoutil.writestate(self, newstate)
2482
2488
2483 p1, p2 = self.dirstate.parents()
2489 p1, p2 = self.dirstate.parents()
2484 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2490 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2485 try:
2491 try:
2486 self.hook("precommit", throw=True, parent1=hookp1,
2492 self.hook("precommit", throw=True, parent1=hookp1,
2487 parent2=hookp2)
2493 parent2=hookp2)
2488 with self.transaction('commit'):
2494 with self.transaction('commit'):
2489 ret = self.commitctx(cctx, True)
2495 ret = self.commitctx(cctx, True)
2490 # update bookmarks, dirstate and mergestate
2496 # update bookmarks, dirstate and mergestate
2491 bookmarks.update(self, [p1, p2], ret)
2497 bookmarks.update(self, [p1, p2], ret)
2492 cctx.markcommitted(ret)
2498 cctx.markcommitted(ret)
2493 ms.reset()
2499 ms.reset()
2494 except: # re-raises
2500 except: # re-raises
2495 if edited:
2501 if edited:
2496 self.ui.write(
2502 self.ui.write(
2497 _('note: commit message saved in %s\n') % msgfn)
2503 _('note: commit message saved in %s\n') % msgfn)
2498 raise
2504 raise
2499
2505
2500 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2506 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2501 # hack for command that use a temporary commit (eg: histedit)
2507 # hack for command that use a temporary commit (eg: histedit)
2502 # temporary commit got stripped before hook release
2508 # temporary commit got stripped before hook release
2503 if self.changelog.hasnode(ret):
2509 if self.changelog.hasnode(ret):
2504 self.hook("commit", node=node, parent1=parent1,
2510 self.hook("commit", node=node, parent1=parent1,
2505 parent2=parent2)
2511 parent2=parent2)
2506 self._afterlock(commithook)
2512 self._afterlock(commithook)
2507 return ret
2513 return ret
2508
2514
2509 @unfilteredmethod
2515 @unfilteredmethod
2510 def commitctx(self, ctx, error=False):
2516 def commitctx(self, ctx, error=False):
2511 """Add a new revision to current repository.
2517 """Add a new revision to current repository.
2512 Revision information is passed via the context argument.
2518 Revision information is passed via the context argument.
2513
2519
2514 ctx.files() should list all files involved in this commit, i.e.
2520 ctx.files() should list all files involved in this commit, i.e.
2515 modified/added/removed files. On merge, it may be wider than the
2521 modified/added/removed files. On merge, it may be wider than the
2516 ctx.files() to be committed, since any file nodes derived directly
2522 ctx.files() to be committed, since any file nodes derived directly
2517 from p1 or p2 are excluded from the committed ctx.files().
2523 from p1 or p2 are excluded from the committed ctx.files().
2518 """
2524 """
2519
2525
2520 p1, p2 = ctx.p1(), ctx.p2()
2526 p1, p2 = ctx.p1(), ctx.p2()
2521 user = ctx.user()
2527 user = ctx.user()
2522
2528
2523 with self.lock(), self.transaction("commit") as tr:
2529 with self.lock(), self.transaction("commit") as tr:
2524 trp = weakref.proxy(tr)
2530 trp = weakref.proxy(tr)
2525
2531
2526 if ctx.manifestnode():
2532 if ctx.manifestnode():
2527 # reuse an existing manifest revision
2533 # reuse an existing manifest revision
2528 self.ui.debug('reusing known manifest\n')
2534 self.ui.debug('reusing known manifest\n')
2529 mn = ctx.manifestnode()
2535 mn = ctx.manifestnode()
2530 files = ctx.files()
2536 files = ctx.files()
2531 elif ctx.files():
2537 elif ctx.files():
2532 m1ctx = p1.manifestctx()
2538 m1ctx = p1.manifestctx()
2533 m2ctx = p2.manifestctx()
2539 m2ctx = p2.manifestctx()
2534 mctx = m1ctx.copy()
2540 mctx = m1ctx.copy()
2535
2541
2536 m = mctx.read()
2542 m = mctx.read()
2537 m1 = m1ctx.read()
2543 m1 = m1ctx.read()
2538 m2 = m2ctx.read()
2544 m2 = m2ctx.read()
2539
2545
2540 # check in files
2546 # check in files
2541 added = []
2547 added = []
2542 changed = []
2548 changed = []
2543 removed = list(ctx.removed())
2549 removed = list(ctx.removed())
2544 linkrev = len(self)
2550 linkrev = len(self)
2545 self.ui.note(_("committing files:\n"))
2551 self.ui.note(_("committing files:\n"))
2546 uipathfn = scmutil.getuipathfn(self)
2552 uipathfn = scmutil.getuipathfn(self)
2547 for f in sorted(ctx.modified() + ctx.added()):
2553 for f in sorted(ctx.modified() + ctx.added()):
2548 self.ui.note(uipathfn(f) + "\n")
2554 self.ui.note(uipathfn(f) + "\n")
2549 try:
2555 try:
2550 fctx = ctx[f]
2556 fctx = ctx[f]
2551 if fctx is None:
2557 if fctx is None:
2552 removed.append(f)
2558 removed.append(f)
2553 else:
2559 else:
2554 added.append(f)
2560 added.append(f)
2555 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2561 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2556 trp, changed)
2562 trp, changed)
2557 m.setflag(f, fctx.flags())
2563 m.setflag(f, fctx.flags())
2558 except OSError:
2564 except OSError:
2559 self.ui.warn(_("trouble committing %s!\n") %
2565 self.ui.warn(_("trouble committing %s!\n") %
2560 uipathfn(f))
2566 uipathfn(f))
2561 raise
2567 raise
2562 except IOError as inst:
2568 except IOError as inst:
2563 errcode = getattr(inst, 'errno', errno.ENOENT)
2569 errcode = getattr(inst, 'errno', errno.ENOENT)
2564 if error or errcode and errcode != errno.ENOENT:
2570 if error or errcode and errcode != errno.ENOENT:
2565 self.ui.warn(_("trouble committing %s!\n") %
2571 self.ui.warn(_("trouble committing %s!\n") %
2566 uipathfn(f))
2572 uipathfn(f))
2567 raise
2573 raise
2568
2574
2569 # update manifest
2575 # update manifest
2570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2576 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2571 drop = [f for f in removed if f in m]
2577 drop = [f for f in removed if f in m]
2572 for f in drop:
2578 for f in drop:
2573 del m[f]
2579 del m[f]
2574 files = changed + removed
2580 files = changed + removed
2575 md = None
2581 md = None
2576 if not files:
2582 if not files:
2577 # if no "files" actually changed in terms of the changelog,
2583 # if no "files" actually changed in terms of the changelog,
2578 # try hard to detect unmodified manifest entry so that the
2584 # try hard to detect unmodified manifest entry so that the
2579 # exact same commit can be reproduced later on convert.
2585 # exact same commit can be reproduced later on convert.
2580 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2586 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2581 if not files and md:
2587 if not files and md:
2582 self.ui.debug('not reusing manifest (no file change in '
2588 self.ui.debug('not reusing manifest (no file change in '
2583 'changelog, but manifest differs)\n')
2589 'changelog, but manifest differs)\n')
2584 if files or md:
2590 if files or md:
2585 self.ui.note(_("committing manifest\n"))
2591 self.ui.note(_("committing manifest\n"))
2586 # we're using narrowmatch here since it's already applied at
2592 # we're using narrowmatch here since it's already applied at
2587 # other stages (such as dirstate.walk), so we're already
2593 # other stages (such as dirstate.walk), so we're already
2588 # ignoring things outside of narrowspec in most cases. The
2594 # ignoring things outside of narrowspec in most cases. The
2589 # one case where we might have files outside the narrowspec
2595 # one case where we might have files outside the narrowspec
2590 # at this point is merges, and we already error out in the
2596 # at this point is merges, and we already error out in the
2591 # case where the merge has files outside of the narrowspec,
2597 # case where the merge has files outside of the narrowspec,
2592 # so this is safe.
2598 # so this is safe.
2593 mn = mctx.write(trp, linkrev,
2599 mn = mctx.write(trp, linkrev,
2594 p1.manifestnode(), p2.manifestnode(),
2600 p1.manifestnode(), p2.manifestnode(),
2595 added, drop, match=self.narrowmatch())
2601 added, drop, match=self.narrowmatch())
2596 else:
2602 else:
2597 self.ui.debug('reusing manifest form p1 (listed files '
2603 self.ui.debug('reusing manifest form p1 (listed files '
2598 'actually unchanged)\n')
2604 'actually unchanged)\n')
2599 mn = p1.manifestnode()
2605 mn = p1.manifestnode()
2600 else:
2606 else:
2601 self.ui.debug('reusing manifest from p1 (no file change)\n')
2607 self.ui.debug('reusing manifest from p1 (no file change)\n')
2602 mn = p1.manifestnode()
2608 mn = p1.manifestnode()
2603 files = []
2609 files = []
2604
2610
2605 # update changelog
2611 # update changelog
2606 self.ui.note(_("committing changelog\n"))
2612 self.ui.note(_("committing changelog\n"))
2607 self.changelog.delayupdate(tr)
2613 self.changelog.delayupdate(tr)
2608 n = self.changelog.add(mn, files, ctx.description(),
2614 n = self.changelog.add(mn, files, ctx.description(),
2609 trp, p1.node(), p2.node(),
2615 trp, p1.node(), p2.node(),
2610 user, ctx.date(), ctx.extra().copy())
2616 user, ctx.date(), ctx.extra().copy())
2611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2617 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2618 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2613 parent2=xp2)
2619 parent2=xp2)
2614 # set the new commit is proper phase
2620 # set the new commit is proper phase
2615 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2621 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2616 if targetphase:
2622 if targetphase:
2617 # retract boundary do not alter parent changeset.
2623 # retract boundary do not alter parent changeset.
2618 # if a parent have higher the resulting phase will
2624 # if a parent have higher the resulting phase will
2619 # be compliant anyway
2625 # be compliant anyway
2620 #
2626 #
2621 # if minimal phase was 0 we don't need to retract anything
2627 # if minimal phase was 0 we don't need to retract anything
2622 phases.registernew(self, tr, targetphase, [n])
2628 phases.registernew(self, tr, targetphase, [n])
2623 return n
2629 return n
2624
2630
2625 @unfilteredmethod
2631 @unfilteredmethod
2626 def destroying(self):
2632 def destroying(self):
2627 '''Inform the repository that nodes are about to be destroyed.
2633 '''Inform the repository that nodes are about to be destroyed.
2628 Intended for use by strip and rollback, so there's a common
2634 Intended for use by strip and rollback, so there's a common
2629 place for anything that has to be done before destroying history.
2635 place for anything that has to be done before destroying history.
2630
2636
2631 This is mostly useful for saving state that is in memory and waiting
2637 This is mostly useful for saving state that is in memory and waiting
2632 to be flushed when the current lock is released. Because a call to
2638 to be flushed when the current lock is released. Because a call to
2633 destroyed is imminent, the repo will be invalidated causing those
2639 destroyed is imminent, the repo will be invalidated causing those
2634 changes to stay in memory (waiting for the next unlock), or vanish
2640 changes to stay in memory (waiting for the next unlock), or vanish
2635 completely.
2641 completely.
2636 '''
2642 '''
2637 # When using the same lock to commit and strip, the phasecache is left
2643 # When using the same lock to commit and strip, the phasecache is left
2638 # dirty after committing. Then when we strip, the repo is invalidated,
2644 # dirty after committing. Then when we strip, the repo is invalidated,
2639 # causing those changes to disappear.
2645 # causing those changes to disappear.
2640 if '_phasecache' in vars(self):
2646 if '_phasecache' in vars(self):
2641 self._phasecache.write()
2647 self._phasecache.write()
2642
2648
2643 @unfilteredmethod
2649 @unfilteredmethod
2644 def destroyed(self):
2650 def destroyed(self):
2645 '''Inform the repository that nodes have been destroyed.
2651 '''Inform the repository that nodes have been destroyed.
2646 Intended for use by strip and rollback, so there's a common
2652 Intended for use by strip and rollback, so there's a common
2647 place for anything that has to be done after destroying history.
2653 place for anything that has to be done after destroying history.
2648 '''
2654 '''
2649 # When one tries to:
2655 # When one tries to:
2650 # 1) destroy nodes thus calling this method (e.g. strip)
2656 # 1) destroy nodes thus calling this method (e.g. strip)
2651 # 2) use phasecache somewhere (e.g. commit)
2657 # 2) use phasecache somewhere (e.g. commit)
2652 #
2658 #
2653 # then 2) will fail because the phasecache contains nodes that were
2659 # then 2) will fail because the phasecache contains nodes that were
2654 # removed. We can either remove phasecache from the filecache,
2660 # removed. We can either remove phasecache from the filecache,
2655 # causing it to reload next time it is accessed, or simply filter
2661 # causing it to reload next time it is accessed, or simply filter
2656 # the removed nodes now and write the updated cache.
2662 # the removed nodes now and write the updated cache.
2657 self._phasecache.filterunknown(self)
2663 self._phasecache.filterunknown(self)
2658 self._phasecache.write()
2664 self._phasecache.write()
2659
2665
2660 # refresh all repository caches
2666 # refresh all repository caches
2661 self.updatecaches()
2667 self.updatecaches()
2662
2668
2663 # Ensure the persistent tag cache is updated. Doing it now
2669 # Ensure the persistent tag cache is updated. Doing it now
2664 # means that the tag cache only has to worry about destroyed
2670 # means that the tag cache only has to worry about destroyed
2665 # heads immediately after a strip/rollback. That in turn
2671 # heads immediately after a strip/rollback. That in turn
2666 # guarantees that "cachetip == currenttip" (comparing both rev
2672 # guarantees that "cachetip == currenttip" (comparing both rev
2667 # and node) always means no nodes have been added or destroyed.
2673 # and node) always means no nodes have been added or destroyed.
2668
2674
2669 # XXX this is suboptimal when qrefresh'ing: we strip the current
2675 # XXX this is suboptimal when qrefresh'ing: we strip the current
2670 # head, refresh the tag cache, then immediately add a new head.
2676 # head, refresh the tag cache, then immediately add a new head.
2671 # But I think doing it this way is necessary for the "instant
2677 # But I think doing it this way is necessary for the "instant
2672 # tag cache retrieval" case to work.
2678 # tag cache retrieval" case to work.
2673 self.invalidate()
2679 self.invalidate()
2674
2680
2675 def status(self, node1='.', node2=None, match=None,
2681 def status(self, node1='.', node2=None, match=None,
2676 ignored=False, clean=False, unknown=False,
2682 ignored=False, clean=False, unknown=False,
2677 listsubrepos=False):
2683 listsubrepos=False):
2678 '''a convenience method that calls node1.status(node2)'''
2684 '''a convenience method that calls node1.status(node2)'''
2679 return self[node1].status(node2, match, ignored, clean, unknown,
2685 return self[node1].status(node2, match, ignored, clean, unknown,
2680 listsubrepos)
2686 listsubrepos)
2681
2687
2682 def addpostdsstatus(self, ps):
2688 def addpostdsstatus(self, ps):
2683 """Add a callback to run within the wlock, at the point at which status
2689 """Add a callback to run within the wlock, at the point at which status
2684 fixups happen.
2690 fixups happen.
2685
2691
2686 On status completion, callback(wctx, status) will be called with the
2692 On status completion, callback(wctx, status) will be called with the
2687 wlock held, unless the dirstate has changed from underneath or the wlock
2693 wlock held, unless the dirstate has changed from underneath or the wlock
2688 couldn't be grabbed.
2694 couldn't be grabbed.
2689
2695
2690 Callbacks should not capture and use a cached copy of the dirstate --
2696 Callbacks should not capture and use a cached copy of the dirstate --
2691 it might change in the meanwhile. Instead, they should access the
2697 it might change in the meanwhile. Instead, they should access the
2692 dirstate via wctx.repo().dirstate.
2698 dirstate via wctx.repo().dirstate.
2693
2699
2694 This list is emptied out after each status run -- extensions should
2700 This list is emptied out after each status run -- extensions should
2695 make sure it adds to this list each time dirstate.status is called.
2701 make sure it adds to this list each time dirstate.status is called.
2696 Extensions should also make sure they don't call this for statuses
2702 Extensions should also make sure they don't call this for statuses
2697 that don't involve the dirstate.
2703 that don't involve the dirstate.
2698 """
2704 """
2699
2705
2700 # The list is located here for uniqueness reasons -- it is actually
2706 # The list is located here for uniqueness reasons -- it is actually
2701 # managed by the workingctx, but that isn't unique per-repo.
2707 # managed by the workingctx, but that isn't unique per-repo.
2702 self._postdsstatus.append(ps)
2708 self._postdsstatus.append(ps)
2703
2709
2704 def postdsstatus(self):
2710 def postdsstatus(self):
2705 """Used by workingctx to get the list of post-dirstate-status hooks."""
2711 """Used by workingctx to get the list of post-dirstate-status hooks."""
2706 return self._postdsstatus
2712 return self._postdsstatus
2707
2713
2708 def clearpostdsstatus(self):
2714 def clearpostdsstatus(self):
2709 """Used by workingctx to clear post-dirstate-status hooks."""
2715 """Used by workingctx to clear post-dirstate-status hooks."""
2710 del self._postdsstatus[:]
2716 del self._postdsstatus[:]
2711
2717
2712 def heads(self, start=None):
2718 def heads(self, start=None):
2713 if start is None:
2719 if start is None:
2714 cl = self.changelog
2720 cl = self.changelog
2715 headrevs = reversed(cl.headrevs())
2721 headrevs = reversed(cl.headrevs())
2716 return [cl.node(rev) for rev in headrevs]
2722 return [cl.node(rev) for rev in headrevs]
2717
2723
2718 heads = self.changelog.heads(start)
2724 heads = self.changelog.heads(start)
2719 # sort the output in rev descending order
2725 # sort the output in rev descending order
2720 return sorted(heads, key=self.changelog.rev, reverse=True)
2726 return sorted(heads, key=self.changelog.rev, reverse=True)
2721
2727
2722 def branchheads(self, branch=None, start=None, closed=False):
2728 def branchheads(self, branch=None, start=None, closed=False):
2723 '''return a (possibly filtered) list of heads for the given branch
2729 '''return a (possibly filtered) list of heads for the given branch
2724
2730
2725 Heads are returned in topological order, from newest to oldest.
2731 Heads are returned in topological order, from newest to oldest.
2726 If branch is None, use the dirstate branch.
2732 If branch is None, use the dirstate branch.
2727 If start is not None, return only heads reachable from start.
2733 If start is not None, return only heads reachable from start.
2728 If closed is True, return heads that are marked as closed as well.
2734 If closed is True, return heads that are marked as closed as well.
2729 '''
2735 '''
2730 if branch is None:
2736 if branch is None:
2731 branch = self[None].branch()
2737 branch = self[None].branch()
2732 branches = self.branchmap()
2738 branches = self.branchmap()
2733 if not branches.hasbranch(branch):
2739 if not branches.hasbranch(branch):
2734 return []
2740 return []
2735 # the cache returns heads ordered lowest to highest
2741 # the cache returns heads ordered lowest to highest
2736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2742 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2737 if start is not None:
2743 if start is not None:
2738 # filter out the heads that cannot be reached from startrev
2744 # filter out the heads that cannot be reached from startrev
2739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2745 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2740 bheads = [h for h in bheads if h in fbheads]
2746 bheads = [h for h in bheads if h in fbheads]
2741 return bheads
2747 return bheads
2742
2748
2743 def branches(self, nodes):
2749 def branches(self, nodes):
2744 if not nodes:
2750 if not nodes:
2745 nodes = [self.changelog.tip()]
2751 nodes = [self.changelog.tip()]
2746 b = []
2752 b = []
2747 for n in nodes:
2753 for n in nodes:
2748 t = n
2754 t = n
2749 while True:
2755 while True:
2750 p = self.changelog.parents(n)
2756 p = self.changelog.parents(n)
2751 if p[1] != nullid or p[0] == nullid:
2757 if p[1] != nullid or p[0] == nullid:
2752 b.append((t, n, p[0], p[1]))
2758 b.append((t, n, p[0], p[1]))
2753 break
2759 break
2754 n = p[0]
2760 n = p[0]
2755 return b
2761 return b
2756
2762
2757 def between(self, pairs):
2763 def between(self, pairs):
2758 r = []
2764 r = []
2759
2765
2760 for top, bottom in pairs:
2766 for top, bottom in pairs:
2761 n, l, i = top, [], 0
2767 n, l, i = top, [], 0
2762 f = 1
2768 f = 1
2763
2769
2764 while n != bottom and n != nullid:
2770 while n != bottom and n != nullid:
2765 p = self.changelog.parents(n)[0]
2771 p = self.changelog.parents(n)[0]
2766 if i == f:
2772 if i == f:
2767 l.append(n)
2773 l.append(n)
2768 f = f * 2
2774 f = f * 2
2769 n = p
2775 n = p
2770 i += 1
2776 i += 1
2771
2777
2772 r.append(l)
2778 r.append(l)
2773
2779
2774 return r
2780 return r
2775
2781
2776 def checkpush(self, pushop):
2782 def checkpush(self, pushop):
2777 """Extensions can override this function if additional checks have
2783 """Extensions can override this function if additional checks have
2778 to be performed before pushing, or call it if they override push
2784 to be performed before pushing, or call it if they override push
2779 command.
2785 command.
2780 """
2786 """
2781
2787
2782 @unfilteredpropertycache
2788 @unfilteredpropertycache
2783 def prepushoutgoinghooks(self):
2789 def prepushoutgoinghooks(self):
2784 """Return util.hooks consists of a pushop with repo, remote, outgoing
2790 """Return util.hooks consists of a pushop with repo, remote, outgoing
2785 methods, which are called before pushing changesets.
2791 methods, which are called before pushing changesets.
2786 """
2792 """
2787 return util.hooks()
2793 return util.hooks()
2788
2794
2789 def pushkey(self, namespace, key, old, new):
2795 def pushkey(self, namespace, key, old, new):
2790 try:
2796 try:
2791 tr = self.currenttransaction()
2797 tr = self.currenttransaction()
2792 hookargs = {}
2798 hookargs = {}
2793 if tr is not None:
2799 if tr is not None:
2794 hookargs.update(tr.hookargs)
2800 hookargs.update(tr.hookargs)
2795 hookargs = pycompat.strkwargs(hookargs)
2801 hookargs = pycompat.strkwargs(hookargs)
2796 hookargs[r'namespace'] = namespace
2802 hookargs[r'namespace'] = namespace
2797 hookargs[r'key'] = key
2803 hookargs[r'key'] = key
2798 hookargs[r'old'] = old
2804 hookargs[r'old'] = old
2799 hookargs[r'new'] = new
2805 hookargs[r'new'] = new
2800 self.hook('prepushkey', throw=True, **hookargs)
2806 self.hook('prepushkey', throw=True, **hookargs)
2801 except error.HookAbort as exc:
2807 except error.HookAbort as exc:
2802 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2808 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2803 if exc.hint:
2809 if exc.hint:
2804 self.ui.write_err(_("(%s)\n") % exc.hint)
2810 self.ui.write_err(_("(%s)\n") % exc.hint)
2805 return False
2811 return False
2806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2812 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2807 ret = pushkey.push(self, namespace, key, old, new)
2813 ret = pushkey.push(self, namespace, key, old, new)
2808 def runhook():
2814 def runhook():
2809 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2815 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2810 ret=ret)
2816 ret=ret)
2811 self._afterlock(runhook)
2817 self._afterlock(runhook)
2812 return ret
2818 return ret
2813
2819
2814 def listkeys(self, namespace):
2820 def listkeys(self, namespace):
2815 self.hook('prelistkeys', throw=True, namespace=namespace)
2821 self.hook('prelistkeys', throw=True, namespace=namespace)
2816 self.ui.debug('listing keys for "%s"\n' % namespace)
2822 self.ui.debug('listing keys for "%s"\n' % namespace)
2817 values = pushkey.list(self, namespace)
2823 values = pushkey.list(self, namespace)
2818 self.hook('listkeys', namespace=namespace, values=values)
2824 self.hook('listkeys', namespace=namespace, values=values)
2819 return values
2825 return values
2820
2826
2821 def debugwireargs(self, one, two, three=None, four=None, five=None):
2827 def debugwireargs(self, one, two, three=None, four=None, five=None):
2822 '''used to test argument passing over the wire'''
2828 '''used to test argument passing over the wire'''
2823 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2829 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2824 pycompat.bytestr(four),
2830 pycompat.bytestr(four),
2825 pycompat.bytestr(five))
2831 pycompat.bytestr(five))
2826
2832
2827 def savecommitmessage(self, text):
2833 def savecommitmessage(self, text):
2828 fp = self.vfs('last-message.txt', 'wb')
2834 fp = self.vfs('last-message.txt', 'wb')
2829 try:
2835 try:
2830 fp.write(text)
2836 fp.write(text)
2831 finally:
2837 finally:
2832 fp.close()
2838 fp.close()
2833 return self.pathto(fp.name[len(self.root) + 1:])
2839 return self.pathto(fp.name[len(self.root) + 1:])
2834
2840
2835 # used to avoid circular references so destructors work
2841 # used to avoid circular references so destructors work
2836 def aftertrans(files):
2842 def aftertrans(files):
2837 renamefiles = [tuple(t) for t in files]
2843 renamefiles = [tuple(t) for t in files]
2838 def a():
2844 def a():
2839 for vfs, src, dest in renamefiles:
2845 for vfs, src, dest in renamefiles:
2840 # if src and dest refer to a same file, vfs.rename is a no-op,
2846 # if src and dest refer to a same file, vfs.rename is a no-op,
2841 # leaving both src and dest on disk. delete dest to make sure
2847 # leaving both src and dest on disk. delete dest to make sure
2842 # the rename couldn't be such a no-op.
2848 # the rename couldn't be such a no-op.
2843 vfs.tryunlink(dest)
2849 vfs.tryunlink(dest)
2844 try:
2850 try:
2845 vfs.rename(src, dest)
2851 vfs.rename(src, dest)
2846 except OSError: # journal file does not yet exist
2852 except OSError: # journal file does not yet exist
2847 pass
2853 pass
2848 return a
2854 return a
2849
2855
2850 def undoname(fn):
2856 def undoname(fn):
2851 base, name = os.path.split(fn)
2857 base, name = os.path.split(fn)
2852 assert name.startswith('journal')
2858 assert name.startswith('journal')
2853 return os.path.join(base, name.replace('journal', 'undo', 1))
2859 return os.path.join(base, name.replace('journal', 'undo', 1))
2854
2860
2855 def instance(ui, path, create, intents=None, createopts=None):
2861 def instance(ui, path, create, intents=None, createopts=None):
2856 localpath = util.urllocalpath(path)
2862 localpath = util.urllocalpath(path)
2857 if create:
2863 if create:
2858 createrepository(ui, localpath, createopts=createopts)
2864 createrepository(ui, localpath, createopts=createopts)
2859
2865
2860 return makelocalrepository(ui, localpath, intents=intents)
2866 return makelocalrepository(ui, localpath, intents=intents)
2861
2867
2862 def islocal(path):
2868 def islocal(path):
2863 return True
2869 return True
2864
2870
2865 def defaultcreateopts(ui, createopts=None):
2871 def defaultcreateopts(ui, createopts=None):
2866 """Populate the default creation options for a repository.
2872 """Populate the default creation options for a repository.
2867
2873
2868 A dictionary of explicitly requested creation options can be passed
2874 A dictionary of explicitly requested creation options can be passed
2869 in. Missing keys will be populated.
2875 in. Missing keys will be populated.
2870 """
2876 """
2871 createopts = dict(createopts or {})
2877 createopts = dict(createopts or {})
2872
2878
2873 if 'backend' not in createopts:
2879 if 'backend' not in createopts:
2874 # experimental config: storage.new-repo-backend
2880 # experimental config: storage.new-repo-backend
2875 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2881 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2876
2882
2877 return createopts
2883 return createopts
2878
2884
2879 def newreporequirements(ui, createopts):
2885 def newreporequirements(ui, createopts):
2880 """Determine the set of requirements for a new local repository.
2886 """Determine the set of requirements for a new local repository.
2881
2887
2882 Extensions can wrap this function to specify custom requirements for
2888 Extensions can wrap this function to specify custom requirements for
2883 new repositories.
2889 new repositories.
2884 """
2890 """
2885 # If the repo is being created from a shared repository, we copy
2891 # If the repo is being created from a shared repository, we copy
2886 # its requirements.
2892 # its requirements.
2887 if 'sharedrepo' in createopts:
2893 if 'sharedrepo' in createopts:
2888 requirements = set(createopts['sharedrepo'].requirements)
2894 requirements = set(createopts['sharedrepo'].requirements)
2889 if createopts.get('sharedrelative'):
2895 if createopts.get('sharedrelative'):
2890 requirements.add('relshared')
2896 requirements.add('relshared')
2891 else:
2897 else:
2892 requirements.add('shared')
2898 requirements.add('shared')
2893
2899
2894 return requirements
2900 return requirements
2895
2901
2896 if 'backend' not in createopts:
2902 if 'backend' not in createopts:
2897 raise error.ProgrammingError('backend key not present in createopts; '
2903 raise error.ProgrammingError('backend key not present in createopts; '
2898 'was defaultcreateopts() called?')
2904 'was defaultcreateopts() called?')
2899
2905
2900 if createopts['backend'] != 'revlogv1':
2906 if createopts['backend'] != 'revlogv1':
2901 raise error.Abort(_('unable to determine repository requirements for '
2907 raise error.Abort(_('unable to determine repository requirements for '
2902 'storage backend: %s') % createopts['backend'])
2908 'storage backend: %s') % createopts['backend'])
2903
2909
2904 requirements = {'revlogv1'}
2910 requirements = {'revlogv1'}
2905 if ui.configbool('format', 'usestore'):
2911 if ui.configbool('format', 'usestore'):
2906 requirements.add('store')
2912 requirements.add('store')
2907 if ui.configbool('format', 'usefncache'):
2913 if ui.configbool('format', 'usefncache'):
2908 requirements.add('fncache')
2914 requirements.add('fncache')
2909 if ui.configbool('format', 'dotencode'):
2915 if ui.configbool('format', 'dotencode'):
2910 requirements.add('dotencode')
2916 requirements.add('dotencode')
2911
2917
2912 compengine = ui.config('experimental', 'format.compression')
2918 compengine = ui.config('experimental', 'format.compression')
2913 if compengine not in util.compengines:
2919 if compengine not in util.compengines:
2914 raise error.Abort(_('compression engine %s defined by '
2920 raise error.Abort(_('compression engine %s defined by '
2915 'experimental.format.compression not available') %
2921 'experimental.format.compression not available') %
2916 compengine,
2922 compengine,
2917 hint=_('run "hg debuginstall" to list available '
2923 hint=_('run "hg debuginstall" to list available '
2918 'compression engines'))
2924 'compression engines'))
2919
2925
2920 # zlib is the historical default and doesn't need an explicit requirement.
2926 # zlib is the historical default and doesn't need an explicit requirement.
2921 if compengine != 'zlib':
2927 if compengine != 'zlib':
2922 requirements.add('exp-compression-%s' % compengine)
2928 requirements.add('exp-compression-%s' % compengine)
2923
2929
2924 if scmutil.gdinitconfig(ui):
2930 if scmutil.gdinitconfig(ui):
2925 requirements.add('generaldelta')
2931 requirements.add('generaldelta')
2926 if ui.configbool('format', 'sparse-revlog'):
2932 if ui.configbool('format', 'sparse-revlog'):
2927 requirements.add(SPARSEREVLOG_REQUIREMENT)
2933 requirements.add(SPARSEREVLOG_REQUIREMENT)
2928 if ui.configbool('experimental', 'treemanifest'):
2934 if ui.configbool('experimental', 'treemanifest'):
2929 requirements.add('treemanifest')
2935 requirements.add('treemanifest')
2930
2936
2931 revlogv2 = ui.config('experimental', 'revlogv2')
2937 revlogv2 = ui.config('experimental', 'revlogv2')
2932 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2938 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2933 requirements.remove('revlogv1')
2939 requirements.remove('revlogv1')
2934 # generaldelta is implied by revlogv2.
2940 # generaldelta is implied by revlogv2.
2935 requirements.discard('generaldelta')
2941 requirements.discard('generaldelta')
2936 requirements.add(REVLOGV2_REQUIREMENT)
2942 requirements.add(REVLOGV2_REQUIREMENT)
2937 # experimental config: format.internal-phase
2943 # experimental config: format.internal-phase
2938 if ui.configbool('format', 'internal-phase'):
2944 if ui.configbool('format', 'internal-phase'):
2939 requirements.add('internal-phase')
2945 requirements.add('internal-phase')
2940
2946
2941 if createopts.get('narrowfiles'):
2947 if createopts.get('narrowfiles'):
2942 requirements.add(repository.NARROW_REQUIREMENT)
2948 requirements.add(repository.NARROW_REQUIREMENT)
2943
2949
2944 if createopts.get('lfs'):
2950 if createopts.get('lfs'):
2945 requirements.add('lfs')
2951 requirements.add('lfs')
2946
2952
2947 return requirements
2953 return requirements
2948
2954
2949 def filterknowncreateopts(ui, createopts):
2955 def filterknowncreateopts(ui, createopts):
2950 """Filters a dict of repo creation options against options that are known.
2956 """Filters a dict of repo creation options against options that are known.
2951
2957
2952 Receives a dict of repo creation options and returns a dict of those
2958 Receives a dict of repo creation options and returns a dict of those
2953 options that we don't know how to handle.
2959 options that we don't know how to handle.
2954
2960
2955 This function is called as part of repository creation. If the
2961 This function is called as part of repository creation. If the
2956 returned dict contains any items, repository creation will not
2962 returned dict contains any items, repository creation will not
2957 be allowed, as it means there was a request to create a repository
2963 be allowed, as it means there was a request to create a repository
2958 with options not recognized by loaded code.
2964 with options not recognized by loaded code.
2959
2965
2960 Extensions can wrap this function to filter out creation options
2966 Extensions can wrap this function to filter out creation options
2961 they know how to handle.
2967 they know how to handle.
2962 """
2968 """
2963 known = {
2969 known = {
2964 'backend',
2970 'backend',
2965 'lfs',
2971 'lfs',
2966 'narrowfiles',
2972 'narrowfiles',
2967 'sharedrepo',
2973 'sharedrepo',
2968 'sharedrelative',
2974 'sharedrelative',
2969 'shareditems',
2975 'shareditems',
2970 'shallowfilestore',
2976 'shallowfilestore',
2971 }
2977 }
2972
2978
2973 return {k: v for k, v in createopts.items() if k not in known}
2979 return {k: v for k, v in createopts.items() if k not in known}
2974
2980
2975 def createrepository(ui, path, createopts=None):
2981 def createrepository(ui, path, createopts=None):
2976 """Create a new repository in a vfs.
2982 """Create a new repository in a vfs.
2977
2983
2978 ``path`` path to the new repo's working directory.
2984 ``path`` path to the new repo's working directory.
2979 ``createopts`` options for the new repository.
2985 ``createopts`` options for the new repository.
2980
2986
2981 The following keys for ``createopts`` are recognized:
2987 The following keys for ``createopts`` are recognized:
2982
2988
2983 backend
2989 backend
2984 The storage backend to use.
2990 The storage backend to use.
2985 lfs
2991 lfs
2986 Repository will be created with ``lfs`` requirement. The lfs extension
2992 Repository will be created with ``lfs`` requirement. The lfs extension
2987 will automatically be loaded when the repository is accessed.
2993 will automatically be loaded when the repository is accessed.
2988 narrowfiles
2994 narrowfiles
2989 Set up repository to support narrow file storage.
2995 Set up repository to support narrow file storage.
2990 sharedrepo
2996 sharedrepo
2991 Repository object from which storage should be shared.
2997 Repository object from which storage should be shared.
2992 sharedrelative
2998 sharedrelative
2993 Boolean indicating if the path to the shared repo should be
2999 Boolean indicating if the path to the shared repo should be
2994 stored as relative. By default, the pointer to the "parent" repo
3000 stored as relative. By default, the pointer to the "parent" repo
2995 is stored as an absolute path.
3001 is stored as an absolute path.
2996 shareditems
3002 shareditems
2997 Set of items to share to the new repository (in addition to storage).
3003 Set of items to share to the new repository (in addition to storage).
2998 shallowfilestore
3004 shallowfilestore
2999 Indicates that storage for files should be shallow (not all ancestor
3005 Indicates that storage for files should be shallow (not all ancestor
3000 revisions are known).
3006 revisions are known).
3001 """
3007 """
3002 createopts = defaultcreateopts(ui, createopts=createopts)
3008 createopts = defaultcreateopts(ui, createopts=createopts)
3003
3009
3004 unknownopts = filterknowncreateopts(ui, createopts)
3010 unknownopts = filterknowncreateopts(ui, createopts)
3005
3011
3006 if not isinstance(unknownopts, dict):
3012 if not isinstance(unknownopts, dict):
3007 raise error.ProgrammingError('filterknowncreateopts() did not return '
3013 raise error.ProgrammingError('filterknowncreateopts() did not return '
3008 'a dict')
3014 'a dict')
3009
3015
3010 if unknownopts:
3016 if unknownopts:
3011 raise error.Abort(_('unable to create repository because of unknown '
3017 raise error.Abort(_('unable to create repository because of unknown '
3012 'creation option: %s') %
3018 'creation option: %s') %
3013 ', '.join(sorted(unknownopts)),
3019 ', '.join(sorted(unknownopts)),
3014 hint=_('is a required extension not loaded?'))
3020 hint=_('is a required extension not loaded?'))
3015
3021
3016 requirements = newreporequirements(ui, createopts=createopts)
3022 requirements = newreporequirements(ui, createopts=createopts)
3017
3023
3018 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3024 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3019
3025
3020 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3026 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3021 if hgvfs.exists():
3027 if hgvfs.exists():
3022 raise error.RepoError(_('repository %s already exists') % path)
3028 raise error.RepoError(_('repository %s already exists') % path)
3023
3029
3024 if 'sharedrepo' in createopts:
3030 if 'sharedrepo' in createopts:
3025 sharedpath = createopts['sharedrepo'].sharedpath
3031 sharedpath = createopts['sharedrepo'].sharedpath
3026
3032
3027 if createopts.get('sharedrelative'):
3033 if createopts.get('sharedrelative'):
3028 try:
3034 try:
3029 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3035 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3030 except (IOError, ValueError) as e:
3036 except (IOError, ValueError) as e:
3031 # ValueError is raised on Windows if the drive letters differ
3037 # ValueError is raised on Windows if the drive letters differ
3032 # on each path.
3038 # on each path.
3033 raise error.Abort(_('cannot calculate relative path'),
3039 raise error.Abort(_('cannot calculate relative path'),
3034 hint=stringutil.forcebytestr(e))
3040 hint=stringutil.forcebytestr(e))
3035
3041
3036 if not wdirvfs.exists():
3042 if not wdirvfs.exists():
3037 wdirvfs.makedirs()
3043 wdirvfs.makedirs()
3038
3044
3039 hgvfs.makedir(notindexed=True)
3045 hgvfs.makedir(notindexed=True)
3040 if 'sharedrepo' not in createopts:
3046 if 'sharedrepo' not in createopts:
3041 hgvfs.mkdir(b'cache')
3047 hgvfs.mkdir(b'cache')
3042 hgvfs.mkdir(b'wcache')
3048 hgvfs.mkdir(b'wcache')
3043
3049
3044 if b'store' in requirements and 'sharedrepo' not in createopts:
3050 if b'store' in requirements and 'sharedrepo' not in createopts:
3045 hgvfs.mkdir(b'store')
3051 hgvfs.mkdir(b'store')
3046
3052
3047 # We create an invalid changelog outside the store so very old
3053 # We create an invalid changelog outside the store so very old
3048 # Mercurial versions (which didn't know about the requirements
3054 # Mercurial versions (which didn't know about the requirements
3049 # file) encounter an error on reading the changelog. This
3055 # file) encounter an error on reading the changelog. This
3050 # effectively locks out old clients and prevents them from
3056 # effectively locks out old clients and prevents them from
3051 # mucking with a repo in an unknown format.
3057 # mucking with a repo in an unknown format.
3052 #
3058 #
3053 # The revlog header has version 2, which won't be recognized by
3059 # The revlog header has version 2, which won't be recognized by
3054 # such old clients.
3060 # such old clients.
3055 hgvfs.append(b'00changelog.i',
3061 hgvfs.append(b'00changelog.i',
3056 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3062 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3057 b'layout')
3063 b'layout')
3058
3064
3059 scmutil.writerequires(hgvfs, requirements)
3065 scmutil.writerequires(hgvfs, requirements)
3060
3066
3061 # Write out file telling readers where to find the shared store.
3067 # Write out file telling readers where to find the shared store.
3062 if 'sharedrepo' in createopts:
3068 if 'sharedrepo' in createopts:
3063 hgvfs.write(b'sharedpath', sharedpath)
3069 hgvfs.write(b'sharedpath', sharedpath)
3064
3070
3065 if createopts.get('shareditems'):
3071 if createopts.get('shareditems'):
3066 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3072 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3067 hgvfs.write(b'shared', shared)
3073 hgvfs.write(b'shared', shared)
3068
3074
3069 def poisonrepository(repo):
3075 def poisonrepository(repo):
3070 """Poison a repository instance so it can no longer be used."""
3076 """Poison a repository instance so it can no longer be used."""
3071 # Perform any cleanup on the instance.
3077 # Perform any cleanup on the instance.
3072 repo.close()
3078 repo.close()
3073
3079
3074 # Our strategy is to replace the type of the object with one that
3080 # Our strategy is to replace the type of the object with one that
3075 # has all attribute lookups result in error.
3081 # has all attribute lookups result in error.
3076 #
3082 #
3077 # But we have to allow the close() method because some constructors
3083 # But we have to allow the close() method because some constructors
3078 # of repos call close() on repo references.
3084 # of repos call close() on repo references.
3079 class poisonedrepository(object):
3085 class poisonedrepository(object):
3080 def __getattribute__(self, item):
3086 def __getattribute__(self, item):
3081 if item == r'close':
3087 if item == r'close':
3082 return object.__getattribute__(self, item)
3088 return object.__getattribute__(self, item)
3083
3089
3084 raise error.ProgrammingError('repo instances should not be used '
3090 raise error.ProgrammingError('repo instances should not be used '
3085 'after unshare')
3091 'after unshare')
3086
3092
3087 def close(self):
3093 def close(self):
3088 pass
3094 pass
3089
3095
3090 # We may have a repoview, which intercepts __setattr__. So be sure
3096 # We may have a repoview, which intercepts __setattr__. So be sure
3091 # we operate at the lowest level possible.
3097 # we operate at the lowest level possible.
3092 object.__setattr__(repo, r'__class__', poisonedrepository)
3098 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,2670 +1,2674 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import os
19 import os
20 import struct
20 import struct
21 import zlib
21 import zlib
22
22
23 # import stuff from node for others to import from revlog
23 # import stuff from node for others to import from revlog
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullhex,
27 nullhex,
28 nullid,
28 nullid,
29 nullrev,
29 nullrev,
30 short,
30 short,
31 wdirfilenodeids,
31 wdirfilenodeids,
32 wdirhex,
32 wdirhex,
33 wdirid,
33 wdirid,
34 wdirrev,
34 wdirrev,
35 )
35 )
36 from .i18n import _
36 from .i18n import _
37 from .revlogutils.constants import (
37 from .revlogutils.constants import (
38 FLAG_GENERALDELTA,
38 FLAG_GENERALDELTA,
39 FLAG_INLINE_DATA,
39 FLAG_INLINE_DATA,
40 REVIDX_DEFAULT_FLAGS,
40 REVIDX_DEFAULT_FLAGS,
41 REVIDX_ELLIPSIS,
41 REVIDX_ELLIPSIS,
42 REVIDX_EXTSTORED,
42 REVIDX_EXTSTORED,
43 REVIDX_FLAGS_ORDER,
43 REVIDX_FLAGS_ORDER,
44 REVIDX_ISCENSORED,
44 REVIDX_ISCENSORED,
45 REVIDX_KNOWN_FLAGS,
45 REVIDX_KNOWN_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
47 REVLOGV0,
47 REVLOGV0,
48 REVLOGV1,
48 REVLOGV1,
49 REVLOGV1_FLAGS,
49 REVLOGV1_FLAGS,
50 REVLOGV2,
50 REVLOGV2,
51 REVLOGV2_FLAGS,
51 REVLOGV2_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
53 REVLOG_DEFAULT_FORMAT,
53 REVLOG_DEFAULT_FORMAT,
54 REVLOG_DEFAULT_VERSION,
54 REVLOG_DEFAULT_VERSION,
55 )
55 )
56 from .thirdparty import (
56 from .thirdparty import (
57 attr,
57 attr,
58 )
58 )
59 from . import (
59 from . import (
60 ancestor,
60 ancestor,
61 dagop,
61 dagop,
62 error,
62 error,
63 mdiff,
63 mdiff,
64 policy,
64 policy,
65 pycompat,
65 pycompat,
66 repository,
66 repository,
67 templatefilters,
67 templatefilters,
68 util,
68 util,
69 )
69 )
70 from .revlogutils import (
70 from .revlogutils import (
71 deltas as deltautil,
71 deltas as deltautil,
72 )
72 )
73 from .utils import (
73 from .utils import (
74 interfaceutil,
74 interfaceutil,
75 storageutil,
75 storageutil,
76 stringutil,
76 stringutil,
77 )
77 )
78
78
79 # blanked usage of all the name to prevent pyflakes constraints
79 # blanked usage of all the name to prevent pyflakes constraints
80 # We need these name available in the module for extensions.
80 # We need these name available in the module for extensions.
81 REVLOGV0
81 REVLOGV0
82 REVLOGV1
82 REVLOGV1
83 REVLOGV2
83 REVLOGV2
84 FLAG_INLINE_DATA
84 FLAG_INLINE_DATA
85 FLAG_GENERALDELTA
85 FLAG_GENERALDELTA
86 REVLOG_DEFAULT_FLAGS
86 REVLOG_DEFAULT_FLAGS
87 REVLOG_DEFAULT_FORMAT
87 REVLOG_DEFAULT_FORMAT
88 REVLOG_DEFAULT_VERSION
88 REVLOG_DEFAULT_VERSION
89 REVLOGV1_FLAGS
89 REVLOGV1_FLAGS
90 REVLOGV2_FLAGS
90 REVLOGV2_FLAGS
91 REVIDX_ISCENSORED
91 REVIDX_ISCENSORED
92 REVIDX_ELLIPSIS
92 REVIDX_ELLIPSIS
93 REVIDX_EXTSTORED
93 REVIDX_EXTSTORED
94 REVIDX_DEFAULT_FLAGS
94 REVIDX_DEFAULT_FLAGS
95 REVIDX_FLAGS_ORDER
95 REVIDX_FLAGS_ORDER
96 REVIDX_KNOWN_FLAGS
96 REVIDX_KNOWN_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
98
98
99 parsers = policy.importmod(r'parsers')
99 parsers = policy.importmod(r'parsers')
100 try:
100 try:
101 from . import rustext
101 from . import rustext
102 rustext.__name__ # force actual import (see hgdemandimport)
102 rustext.__name__ # force actual import (see hgdemandimport)
103 except ImportError:
103 except ImportError:
104 rustext = None
104 rustext = None
105
105
106 # Aliased for performance.
106 # Aliased for performance.
107 _zlibdecompress = zlib.decompress
107 _zlibdecompress = zlib.decompress
108
108
109 # max size of revlog with inline data
109 # max size of revlog with inline data
110 _maxinline = 131072
110 _maxinline = 131072
111 _chunksize = 1048576
111 _chunksize = 1048576
112
112
113 # Store flag processors (cf. 'addflagprocessor()' to register)
113 # Store flag processors (cf. 'addflagprocessor()' to register)
114 _flagprocessors = {
114 _flagprocessors = {
115 REVIDX_ISCENSORED: None,
115 REVIDX_ISCENSORED: None,
116 }
116 }
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False
120 return text, False
121
121
122 def ellipsiswriteprocessor(rl, text):
122 def ellipsiswriteprocessor(rl, text):
123 return text, False
123 return text, False
124
124
125 def ellipsisrawprocessor(rl, text):
125 def ellipsisrawprocessor(rl, text):
126 return False
126 return False
127
127
128 ellipsisprocessor = (
128 ellipsisprocessor = (
129 ellipsisreadprocessor,
129 ellipsisreadprocessor,
130 ellipsiswriteprocessor,
130 ellipsiswriteprocessor,
131 ellipsisrawprocessor,
131 ellipsisrawprocessor,
132 )
132 )
133
133
134 def addflagprocessor(flag, processor):
134 def addflagprocessor(flag, processor):
135 """Register a flag processor on a revision data flag.
135 """Register a flag processor on a revision data flag.
136
136
137 Invariant:
137 Invariant:
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
140 - Only one flag processor can be registered on a specific flag.
140 - Only one flag processor can be registered on a specific flag.
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
142 following signatures:
142 following signatures:
143 - (read) f(self, rawtext) -> text, bool
143 - (read) f(self, rawtext) -> text, bool
144 - (write) f(self, text) -> rawtext, bool
144 - (write) f(self, text) -> rawtext, bool
145 - (raw) f(self, rawtext) -> bool
145 - (raw) f(self, rawtext) -> bool
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
147 directly visible to the user.
147 directly visible to the user.
148 The boolean returned by these transforms is used to determine whether
148 The boolean returned by these transforms is used to determine whether
149 the returned text can be used for hash integrity checking. For example,
149 the returned text can be used for hash integrity checking. For example,
150 if "write" returns False, then "text" is used to generate hash. If
150 if "write" returns False, then "text" is used to generate hash. If
151 "write" returns True, that basically means "rawtext" returned by "write"
151 "write" returns True, that basically means "rawtext" returned by "write"
152 should be used to generate hash. Usually, "write" and "read" return
152 should be used to generate hash. Usually, "write" and "read" return
153 different booleans. And "raw" returns a same boolean as "write".
153 different booleans. And "raw" returns a same boolean as "write".
154
154
155 Note: The 'raw' transform is used for changegroup generation and in some
155 Note: The 'raw' transform is used for changegroup generation and in some
156 debug commands. In this case the transform only indicates whether the
156 debug commands. In this case the transform only indicates whether the
157 contents can be used for hash integrity checks.
157 contents can be used for hash integrity checks.
158 """
158 """
159 _insertflagprocessor(flag, processor, _flagprocessors)
159 _insertflagprocessor(flag, processor, _flagprocessors)
160
160
161 def _insertflagprocessor(flag, processor, flagprocessors):
161 def _insertflagprocessor(flag, processor, flagprocessors):
162 if not flag & REVIDX_KNOWN_FLAGS:
162 if not flag & REVIDX_KNOWN_FLAGS:
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
164 raise error.ProgrammingError(msg)
164 raise error.ProgrammingError(msg)
165 if flag not in REVIDX_FLAGS_ORDER:
165 if flag not in REVIDX_FLAGS_ORDER:
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
167 raise error.ProgrammingError(msg)
167 raise error.ProgrammingError(msg)
168 if flag in flagprocessors:
168 if flag in flagprocessors:
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 flagprocessors[flag] = processor
171 flagprocessors[flag] = processor
172
172
173 def getoffset(q):
173 def getoffset(q):
174 return int(q >> 16)
174 return int(q >> 16)
175
175
176 def gettype(q):
176 def gettype(q):
177 return int(q & 0xFFFF)
177 return int(q & 0xFFFF)
178
178
179 def offset_type(offset, type):
179 def offset_type(offset, type):
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
181 raise ValueError('unknown revlog index flags')
181 raise ValueError('unknown revlog index flags')
182 return int(int(offset) << 16 | type)
182 return int(int(offset) << 16 | type)
183
183
184 @attr.s(slots=True, frozen=True)
184 @attr.s(slots=True, frozen=True)
185 class _revisioninfo(object):
185 class _revisioninfo(object):
186 """Information about a revision that allows building its fulltext
186 """Information about a revision that allows building its fulltext
187 node: expected hash of the revision
187 node: expected hash of the revision
188 p1, p2: parent revs of the revision
188 p1, p2: parent revs of the revision
189 btext: built text cache consisting of a one-element list
189 btext: built text cache consisting of a one-element list
190 cachedelta: (baserev, uncompressed_delta) or None
190 cachedelta: (baserev, uncompressed_delta) or None
191 flags: flags associated to the revision storage
191 flags: flags associated to the revision storage
192
192
193 One of btext[0] or cachedelta must be set.
193 One of btext[0] or cachedelta must be set.
194 """
194 """
195 node = attr.ib()
195 node = attr.ib()
196 p1 = attr.ib()
196 p1 = attr.ib()
197 p2 = attr.ib()
197 p2 = attr.ib()
198 btext = attr.ib()
198 btext = attr.ib()
199 textlen = attr.ib()
199 textlen = attr.ib()
200 cachedelta = attr.ib()
200 cachedelta = attr.ib()
201 flags = attr.ib()
201 flags = attr.ib()
202
202
203 @interfaceutil.implementer(repository.irevisiondelta)
203 @interfaceutil.implementer(repository.irevisiondelta)
204 @attr.s(slots=True)
204 @attr.s(slots=True)
205 class revlogrevisiondelta(object):
205 class revlogrevisiondelta(object):
206 node = attr.ib()
206 node = attr.ib()
207 p1node = attr.ib()
207 p1node = attr.ib()
208 p2node = attr.ib()
208 p2node = attr.ib()
209 basenode = attr.ib()
209 basenode = attr.ib()
210 flags = attr.ib()
210 flags = attr.ib()
211 baserevisionsize = attr.ib()
211 baserevisionsize = attr.ib()
212 revision = attr.ib()
212 revision = attr.ib()
213 delta = attr.ib()
213 delta = attr.ib()
214 linknode = attr.ib(default=None)
214 linknode = attr.ib(default=None)
215
215
216 @interfaceutil.implementer(repository.iverifyproblem)
216 @interfaceutil.implementer(repository.iverifyproblem)
217 @attr.s(frozen=True)
217 @attr.s(frozen=True)
218 class revlogproblem(object):
218 class revlogproblem(object):
219 warning = attr.ib(default=None)
219 warning = attr.ib(default=None)
220 error = attr.ib(default=None)
220 error = attr.ib(default=None)
221 node = attr.ib(default=None)
221 node = attr.ib(default=None)
222
222
223 # index v0:
223 # index v0:
224 # 4 bytes: offset
224 # 4 bytes: offset
225 # 4 bytes: compressed length
225 # 4 bytes: compressed length
226 # 4 bytes: base rev
226 # 4 bytes: base rev
227 # 4 bytes: link rev
227 # 4 bytes: link rev
228 # 20 bytes: parent 1 nodeid
228 # 20 bytes: parent 1 nodeid
229 # 20 bytes: parent 2 nodeid
229 # 20 bytes: parent 2 nodeid
230 # 20 bytes: nodeid
230 # 20 bytes: nodeid
231 indexformatv0 = struct.Struct(">4l20s20s20s")
231 indexformatv0 = struct.Struct(">4l20s20s20s")
232 indexformatv0_pack = indexformatv0.pack
232 indexformatv0_pack = indexformatv0.pack
233 indexformatv0_unpack = indexformatv0.unpack
233 indexformatv0_unpack = indexformatv0.unpack
234
234
235 class revlogoldindex(list):
235 class revlogoldindex(list):
236 def __getitem__(self, i):
236 def __getitem__(self, i):
237 if i == -1:
237 if i == -1:
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
239 return list.__getitem__(self, i)
239 return list.__getitem__(self, i)
240
240
241 class revlogoldio(object):
241 class revlogoldio(object):
242 def __init__(self):
242 def __init__(self):
243 self.size = indexformatv0.size
243 self.size = indexformatv0.size
244
244
245 def parseindex(self, data, inline):
245 def parseindex(self, data, inline):
246 s = self.size
246 s = self.size
247 index = []
247 index = []
248 nodemap = {nullid: nullrev}
248 nodemap = {nullid: nullrev}
249 n = off = 0
249 n = off = 0
250 l = len(data)
250 l = len(data)
251 while off + s <= l:
251 while off + s <= l:
252 cur = data[off:off + s]
252 cur = data[off:off + s]
253 off += s
253 off += s
254 e = indexformatv0_unpack(cur)
254 e = indexformatv0_unpack(cur)
255 # transform to revlogv1 format
255 # transform to revlogv1 format
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
258 index.append(e2)
258 index.append(e2)
259 nodemap[e[6]] = n
259 nodemap[e[6]] = n
260 n += 1
260 n += 1
261
261
262 return revlogoldindex(index), nodemap, None
262 return revlogoldindex(index), nodemap, None
263
263
264 def packentry(self, entry, node, version, rev):
264 def packentry(self, entry, node, version, rev):
265 if gettype(entry[0]):
265 if gettype(entry[0]):
266 raise error.RevlogError(_('index entry flags need revlog '
266 raise error.RevlogError(_('index entry flags need revlog '
267 'version 1'))
267 'version 1'))
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
269 node(entry[5]), node(entry[6]), entry[7])
269 node(entry[5]), node(entry[6]), entry[7])
270 return indexformatv0_pack(*e2)
270 return indexformatv0_pack(*e2)
271
271
272 # index ng:
272 # index ng:
273 # 6 bytes: offset
273 # 6 bytes: offset
274 # 2 bytes: flags
274 # 2 bytes: flags
275 # 4 bytes: compressed length
275 # 4 bytes: compressed length
276 # 4 bytes: uncompressed length
276 # 4 bytes: uncompressed length
277 # 4 bytes: base rev
277 # 4 bytes: base rev
278 # 4 bytes: link rev
278 # 4 bytes: link rev
279 # 4 bytes: parent 1 rev
279 # 4 bytes: parent 1 rev
280 # 4 bytes: parent 2 rev
280 # 4 bytes: parent 2 rev
281 # 32 bytes: nodeid
281 # 32 bytes: nodeid
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
283 indexformatng_pack = indexformatng.pack
283 indexformatng_pack = indexformatng.pack
284 versionformat = struct.Struct(">I")
284 versionformat = struct.Struct(">I")
285 versionformat_pack = versionformat.pack
285 versionformat_pack = versionformat.pack
286 versionformat_unpack = versionformat.unpack
286 versionformat_unpack = versionformat.unpack
287
287
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
289 # signed integer)
289 # signed integer)
290 _maxentrysize = 0x7fffffff
290 _maxentrysize = 0x7fffffff
291
291
292 class revlogio(object):
292 class revlogio(object):
293 def __init__(self):
293 def __init__(self):
294 self.size = indexformatng.size
294 self.size = indexformatng.size
295
295
296 def parseindex(self, data, inline):
296 def parseindex(self, data, inline):
297 # call the C implementation to parse the index data
297 # call the C implementation to parse the index data
298 index, cache = parsers.parse_index2(data, inline)
298 index, cache = parsers.parse_index2(data, inline)
299 return index, getattr(index, 'nodemap', None), cache
299 return index, getattr(index, 'nodemap', None), cache
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 p = indexformatng_pack(*entry)
302 p = indexformatng_pack(*entry)
303 if rev == 0:
303 if rev == 0:
304 p = versionformat_pack(version) + p[4:]
304 p = versionformat_pack(version) + p[4:]
305 return p
305 return p
306
306
307 class revlog(object):
307 class revlog(object):
308 """
308 """
309 the underlying revision storage object
309 the underlying revision storage object
310
310
311 A revlog consists of two parts, an index and the revision data.
311 A revlog consists of two parts, an index and the revision data.
312
312
313 The index is a file with a fixed record size containing
313 The index is a file with a fixed record size containing
314 information on each revision, including its nodeid (hash), the
314 information on each revision, including its nodeid (hash), the
315 nodeids of its parents, the position and offset of its data within
315 nodeids of its parents, the position and offset of its data within
316 the data file, and the revision it's based on. Finally, each entry
316 the data file, and the revision it's based on. Finally, each entry
317 contains a linkrev entry that can serve as a pointer to external
317 contains a linkrev entry that can serve as a pointer to external
318 data.
318 data.
319
319
320 The revision data itself is a linear collection of data chunks.
320 The revision data itself is a linear collection of data chunks.
321 Each chunk represents a revision and is usually represented as a
321 Each chunk represents a revision and is usually represented as a
322 delta against the previous chunk. To bound lookup time, runs of
322 delta against the previous chunk. To bound lookup time, runs of
323 deltas are limited to about 2 times the length of the original
323 deltas are limited to about 2 times the length of the original
324 version data. This makes retrieval of a version proportional to
324 version data. This makes retrieval of a version proportional to
325 its size, or O(1) relative to the number of revisions.
325 its size, or O(1) relative to the number of revisions.
326
326
327 Both pieces of the revlog are written to in an append-only
327 Both pieces of the revlog are written to in an append-only
328 fashion, which means we never need to rewrite a file to insert or
328 fashion, which means we never need to rewrite a file to insert or
329 remove data, and can use some simple techniques to avoid the need
329 remove data, and can use some simple techniques to avoid the need
330 for locking while reading.
330 for locking while reading.
331
331
332 If checkambig, indexfile is opened with checkambig=True at
332 If checkambig, indexfile is opened with checkambig=True at
333 writing, to avoid file stat ambiguity.
333 writing, to avoid file stat ambiguity.
334
334
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
336 index will be mmapped rather than read if it is larger than the
336 index will be mmapped rather than read if it is larger than the
337 configured threshold.
337 configured threshold.
338
338
339 If censorable is True, the revlog can have censored revisions.
339 If censorable is True, the revlog can have censored revisions.
340 """
340 """
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
342 mmaplargeindex=False, censorable=False):
342 mmaplargeindex=False, censorable=False):
343 """
343 """
344 create a revlog object
344 create a revlog object
345
345
346 opener is a function that abstracts the file opening operation
346 opener is a function that abstracts the file opening operation
347 and can be used to implement COW semantics or the like.
347 and can be used to implement COW semantics or the like.
348 """
348 """
349 self.indexfile = indexfile
349 self.indexfile = indexfile
350 self.datafile = datafile or (indexfile[:-2] + ".d")
350 self.datafile = datafile or (indexfile[:-2] + ".d")
351 self.opener = opener
351 self.opener = opener
352 # When True, indexfile is opened with checkambig=True at writing, to
352 # When True, indexfile is opened with checkambig=True at writing, to
353 # avoid file stat ambiguity.
353 # avoid file stat ambiguity.
354 self._checkambig = checkambig
354 self._checkambig = checkambig
355 self._mmaplargeindex = mmaplargeindex
355 self._mmaplargeindex = mmaplargeindex
356 self._censorable = censorable
356 self._censorable = censorable
357 # 3-tuple of (node, rev, text) for a raw revision.
357 # 3-tuple of (node, rev, text) for a raw revision.
358 self._revisioncache = None
358 self._revisioncache = None
359 # Maps rev to chain base rev.
359 # Maps rev to chain base rev.
360 self._chainbasecache = util.lrucachedict(100)
360 self._chainbasecache = util.lrucachedict(100)
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
362 self._chunkcache = (0, '')
362 self._chunkcache = (0, '')
363 # How much data to read and cache into the raw revlog data cache.
363 # How much data to read and cache into the raw revlog data cache.
364 self._chunkcachesize = 65536
364 self._chunkcachesize = 65536
365 self._maxchainlen = None
365 self._maxchainlen = None
366 self._deltabothparents = True
366 self._deltabothparents = True
367 self.index = []
367 self.index = []
368 # Mapping of partial identifiers to full nodes.
368 # Mapping of partial identifiers to full nodes.
369 self._pcache = {}
369 self._pcache = {}
370 # Mapping of revision integer to full node.
370 # Mapping of revision integer to full node.
371 self._nodecache = {nullid: nullrev}
371 self._nodecache = {nullid: nullrev}
372 self._nodepos = None
372 self._nodepos = None
373 self._compengine = 'zlib'
373 self._compengine = 'zlib'
374 self._compengineopts = {}
374 self._maxdeltachainspan = -1
375 self._maxdeltachainspan = -1
375 self._withsparseread = False
376 self._withsparseread = False
376 self._sparserevlog = False
377 self._sparserevlog = False
377 self._srdensitythreshold = 0.50
378 self._srdensitythreshold = 0.50
378 self._srmingapsize = 262144
379 self._srmingapsize = 262144
379
380
380 # Make copy of flag processors so each revlog instance can support
381 # Make copy of flag processors so each revlog instance can support
381 # custom flags.
382 # custom flags.
382 self._flagprocessors = dict(_flagprocessors)
383 self._flagprocessors = dict(_flagprocessors)
383
384
384 # 2-tuple of file handles being used for active writing.
385 # 2-tuple of file handles being used for active writing.
385 self._writinghandles = None
386 self._writinghandles = None
386
387
387 self._loadindex()
388 self._loadindex()
388
389
389 def _loadindex(self):
390 def _loadindex(self):
390 mmapindexthreshold = None
391 mmapindexthreshold = None
391 opts = getattr(self.opener, 'options', {}) or {}
392 opts = getattr(self.opener, 'options', {}) or {}
392
393
393 if 'revlogv2' in opts:
394 if 'revlogv2' in opts:
394 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
395 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
395 elif 'revlogv1' in opts:
396 elif 'revlogv1' in opts:
396 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
397 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
397 if 'generaldelta' in opts:
398 if 'generaldelta' in opts:
398 newversionflags |= FLAG_GENERALDELTA
399 newversionflags |= FLAG_GENERALDELTA
399 elif getattr(self.opener, 'options', None) is not None:
400 elif getattr(self.opener, 'options', None) is not None:
400 # If options provided but no 'revlog*' found, the repository
401 # If options provided but no 'revlog*' found, the repository
401 # would have no 'requires' file in it, which means we have to
402 # would have no 'requires' file in it, which means we have to
402 # stick to the old format.
403 # stick to the old format.
403 newversionflags = REVLOGV0
404 newversionflags = REVLOGV0
404 else:
405 else:
405 newversionflags = REVLOG_DEFAULT_VERSION
406 newversionflags = REVLOG_DEFAULT_VERSION
406
407
407 if 'chunkcachesize' in opts:
408 if 'chunkcachesize' in opts:
408 self._chunkcachesize = opts['chunkcachesize']
409 self._chunkcachesize = opts['chunkcachesize']
409 if 'maxchainlen' in opts:
410 if 'maxchainlen' in opts:
410 self._maxchainlen = opts['maxchainlen']
411 self._maxchainlen = opts['maxchainlen']
411 if 'deltabothparents' in opts:
412 if 'deltabothparents' in opts:
412 self._deltabothparents = opts['deltabothparents']
413 self._deltabothparents = opts['deltabothparents']
413 self._lazydelta = bool(opts.get('lazydelta', True))
414 self._lazydelta = bool(opts.get('lazydelta', True))
414 self._lazydeltabase = False
415 self._lazydeltabase = False
415 if self._lazydelta:
416 if self._lazydelta:
416 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
417 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
417 if 'compengine' in opts:
418 if 'compengine' in opts:
418 self._compengine = opts['compengine']
419 self._compengine = opts['compengine']
420 if 'zlib.level' in opts:
421 self._compengineopts['zlib.level'] = opts['zlib.level']
419 if 'maxdeltachainspan' in opts:
422 if 'maxdeltachainspan' in opts:
420 self._maxdeltachainspan = opts['maxdeltachainspan']
423 self._maxdeltachainspan = opts['maxdeltachainspan']
421 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
424 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
422 mmapindexthreshold = opts['mmapindexthreshold']
425 mmapindexthreshold = opts['mmapindexthreshold']
423 self._sparserevlog = bool(opts.get('sparse-revlog', False))
426 self._sparserevlog = bool(opts.get('sparse-revlog', False))
424 withsparseread = bool(opts.get('with-sparse-read', False))
427 withsparseread = bool(opts.get('with-sparse-read', False))
425 # sparse-revlog forces sparse-read
428 # sparse-revlog forces sparse-read
426 self._withsparseread = self._sparserevlog or withsparseread
429 self._withsparseread = self._sparserevlog or withsparseread
427 if 'sparse-read-density-threshold' in opts:
430 if 'sparse-read-density-threshold' in opts:
428 self._srdensitythreshold = opts['sparse-read-density-threshold']
431 self._srdensitythreshold = opts['sparse-read-density-threshold']
429 if 'sparse-read-min-gap-size' in opts:
432 if 'sparse-read-min-gap-size' in opts:
430 self._srmingapsize = opts['sparse-read-min-gap-size']
433 self._srmingapsize = opts['sparse-read-min-gap-size']
431 if opts.get('enableellipsis'):
434 if opts.get('enableellipsis'):
432 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
433
436
434 # revlog v0 doesn't have flag processors
437 # revlog v0 doesn't have flag processors
435 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
438 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
436 _insertflagprocessor(flag, processor, self._flagprocessors)
439 _insertflagprocessor(flag, processor, self._flagprocessors)
437
440
438 if self._chunkcachesize <= 0:
441 if self._chunkcachesize <= 0:
439 raise error.RevlogError(_('revlog chunk cache size %r is not '
442 raise error.RevlogError(_('revlog chunk cache size %r is not '
440 'greater than 0') % self._chunkcachesize)
443 'greater than 0') % self._chunkcachesize)
441 elif self._chunkcachesize & (self._chunkcachesize - 1):
444 elif self._chunkcachesize & (self._chunkcachesize - 1):
442 raise error.RevlogError(_('revlog chunk cache size %r is not a '
445 raise error.RevlogError(_('revlog chunk cache size %r is not a '
443 'power of 2') % self._chunkcachesize)
446 'power of 2') % self._chunkcachesize)
444
447
445 indexdata = ''
448 indexdata = ''
446 self._initempty = True
449 self._initempty = True
447 try:
450 try:
448 with self._indexfp() as f:
451 with self._indexfp() as f:
449 if (mmapindexthreshold is not None and
452 if (mmapindexthreshold is not None and
450 self.opener.fstat(f).st_size >= mmapindexthreshold):
453 self.opener.fstat(f).st_size >= mmapindexthreshold):
451 # TODO: should .close() to release resources without
454 # TODO: should .close() to release resources without
452 # relying on Python GC
455 # relying on Python GC
453 indexdata = util.buffer(util.mmapread(f))
456 indexdata = util.buffer(util.mmapread(f))
454 else:
457 else:
455 indexdata = f.read()
458 indexdata = f.read()
456 if len(indexdata) > 0:
459 if len(indexdata) > 0:
457 versionflags = versionformat_unpack(indexdata[:4])[0]
460 versionflags = versionformat_unpack(indexdata[:4])[0]
458 self._initempty = False
461 self._initempty = False
459 else:
462 else:
460 versionflags = newversionflags
463 versionflags = newversionflags
461 except IOError as inst:
464 except IOError as inst:
462 if inst.errno != errno.ENOENT:
465 if inst.errno != errno.ENOENT:
463 raise
466 raise
464
467
465 versionflags = newversionflags
468 versionflags = newversionflags
466
469
467 self.version = versionflags
470 self.version = versionflags
468
471
469 flags = versionflags & ~0xFFFF
472 flags = versionflags & ~0xFFFF
470 fmt = versionflags & 0xFFFF
473 fmt = versionflags & 0xFFFF
471
474
472 if fmt == REVLOGV0:
475 if fmt == REVLOGV0:
473 if flags:
476 if flags:
474 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
477 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
475 'revlog %s') %
478 'revlog %s') %
476 (flags >> 16, fmt, self.indexfile))
479 (flags >> 16, fmt, self.indexfile))
477
480
478 self._inline = False
481 self._inline = False
479 self._generaldelta = False
482 self._generaldelta = False
480
483
481 elif fmt == REVLOGV1:
484 elif fmt == REVLOGV1:
482 if flags & ~REVLOGV1_FLAGS:
485 if flags & ~REVLOGV1_FLAGS:
483 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
486 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
484 'revlog %s') %
487 'revlog %s') %
485 (flags >> 16, fmt, self.indexfile))
488 (flags >> 16, fmt, self.indexfile))
486
489
487 self._inline = versionflags & FLAG_INLINE_DATA
490 self._inline = versionflags & FLAG_INLINE_DATA
488 self._generaldelta = versionflags & FLAG_GENERALDELTA
491 self._generaldelta = versionflags & FLAG_GENERALDELTA
489
492
490 elif fmt == REVLOGV2:
493 elif fmt == REVLOGV2:
491 if flags & ~REVLOGV2_FLAGS:
494 if flags & ~REVLOGV2_FLAGS:
492 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
495 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
493 'revlog %s') %
496 'revlog %s') %
494 (flags >> 16, fmt, self.indexfile))
497 (flags >> 16, fmt, self.indexfile))
495
498
496 self._inline = versionflags & FLAG_INLINE_DATA
499 self._inline = versionflags & FLAG_INLINE_DATA
497 # generaldelta implied by version 2 revlogs.
500 # generaldelta implied by version 2 revlogs.
498 self._generaldelta = True
501 self._generaldelta = True
499
502
500 else:
503 else:
501 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
504 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
502 (fmt, self.indexfile))
505 (fmt, self.indexfile))
503 # sparse-revlog can't be on without general-delta (issue6056)
506 # sparse-revlog can't be on without general-delta (issue6056)
504 if not self._generaldelta:
507 if not self._generaldelta:
505 self._sparserevlog = False
508 self._sparserevlog = False
506
509
507 self._storedeltachains = True
510 self._storedeltachains = True
508
511
509 self._io = revlogio()
512 self._io = revlogio()
510 if self.version == REVLOGV0:
513 if self.version == REVLOGV0:
511 self._io = revlogoldio()
514 self._io = revlogoldio()
512 try:
515 try:
513 d = self._io.parseindex(indexdata, self._inline)
516 d = self._io.parseindex(indexdata, self._inline)
514 except (ValueError, IndexError):
517 except (ValueError, IndexError):
515 raise error.RevlogError(_("index %s is corrupted") %
518 raise error.RevlogError(_("index %s is corrupted") %
516 self.indexfile)
519 self.indexfile)
517 self.index, nodemap, self._chunkcache = d
520 self.index, nodemap, self._chunkcache = d
518 if nodemap is not None:
521 if nodemap is not None:
519 self.nodemap = self._nodecache = nodemap
522 self.nodemap = self._nodecache = nodemap
520 if not self._chunkcache:
523 if not self._chunkcache:
521 self._chunkclear()
524 self._chunkclear()
522 # revnum -> (chain-length, sum-delta-length)
525 # revnum -> (chain-length, sum-delta-length)
523 self._chaininfocache = {}
526 self._chaininfocache = {}
524 # revlog header -> revlog compressor
527 # revlog header -> revlog compressor
525 self._decompressors = {}
528 self._decompressors = {}
526
529
527 @util.propertycache
530 @util.propertycache
528 def _compressor(self):
531 def _compressor(self):
529 return util.compengines[self._compengine].revlogcompressor()
532 engine = util.compengines[self._compengine]
533 return engine.revlogcompressor(self._compengineopts)
530
534
531 def _indexfp(self, mode='r'):
535 def _indexfp(self, mode='r'):
532 """file object for the revlog's index file"""
536 """file object for the revlog's index file"""
533 args = {r'mode': mode}
537 args = {r'mode': mode}
534 if mode != 'r':
538 if mode != 'r':
535 args[r'checkambig'] = self._checkambig
539 args[r'checkambig'] = self._checkambig
536 if mode == 'w':
540 if mode == 'w':
537 args[r'atomictemp'] = True
541 args[r'atomictemp'] = True
538 return self.opener(self.indexfile, **args)
542 return self.opener(self.indexfile, **args)
539
543
540 def _datafp(self, mode='r'):
544 def _datafp(self, mode='r'):
541 """file object for the revlog's data file"""
545 """file object for the revlog's data file"""
542 return self.opener(self.datafile, mode=mode)
546 return self.opener(self.datafile, mode=mode)
543
547
544 @contextlib.contextmanager
548 @contextlib.contextmanager
545 def _datareadfp(self, existingfp=None):
549 def _datareadfp(self, existingfp=None):
546 """file object suitable to read data"""
550 """file object suitable to read data"""
547 # Use explicit file handle, if given.
551 # Use explicit file handle, if given.
548 if existingfp is not None:
552 if existingfp is not None:
549 yield existingfp
553 yield existingfp
550
554
551 # Use a file handle being actively used for writes, if available.
555 # Use a file handle being actively used for writes, if available.
552 # There is some danger to doing this because reads will seek the
556 # There is some danger to doing this because reads will seek the
553 # file. However, _writeentry() performs a SEEK_END before all writes,
557 # file. However, _writeentry() performs a SEEK_END before all writes,
554 # so we should be safe.
558 # so we should be safe.
555 elif self._writinghandles:
559 elif self._writinghandles:
556 if self._inline:
560 if self._inline:
557 yield self._writinghandles[0]
561 yield self._writinghandles[0]
558 else:
562 else:
559 yield self._writinghandles[1]
563 yield self._writinghandles[1]
560
564
561 # Otherwise open a new file handle.
565 # Otherwise open a new file handle.
562 else:
566 else:
563 if self._inline:
567 if self._inline:
564 func = self._indexfp
568 func = self._indexfp
565 else:
569 else:
566 func = self._datafp
570 func = self._datafp
567 with func() as fp:
571 with func() as fp:
568 yield fp
572 yield fp
569
573
570 def tip(self):
574 def tip(self):
571 return self.node(len(self.index) - 1)
575 return self.node(len(self.index) - 1)
572 def __contains__(self, rev):
576 def __contains__(self, rev):
573 return 0 <= rev < len(self)
577 return 0 <= rev < len(self)
574 def __len__(self):
578 def __len__(self):
575 return len(self.index)
579 return len(self.index)
576 def __iter__(self):
580 def __iter__(self):
577 return iter(pycompat.xrange(len(self)))
581 return iter(pycompat.xrange(len(self)))
578 def revs(self, start=0, stop=None):
582 def revs(self, start=0, stop=None):
579 """iterate over all rev in this revlog (from start to stop)"""
583 """iterate over all rev in this revlog (from start to stop)"""
580 return storageutil.iterrevs(len(self), start=start, stop=stop)
584 return storageutil.iterrevs(len(self), start=start, stop=stop)
581
585
582 @util.propertycache
586 @util.propertycache
583 def nodemap(self):
587 def nodemap(self):
584 if self.index:
588 if self.index:
585 # populate mapping down to the initial node
589 # populate mapping down to the initial node
586 node0 = self.index[0][7] # get around changelog filtering
590 node0 = self.index[0][7] # get around changelog filtering
587 self.rev(node0)
591 self.rev(node0)
588 return self._nodecache
592 return self._nodecache
589
593
590 def hasnode(self, node):
594 def hasnode(self, node):
591 try:
595 try:
592 self.rev(node)
596 self.rev(node)
593 return True
597 return True
594 except KeyError:
598 except KeyError:
595 return False
599 return False
596
600
597 def candelta(self, baserev, rev):
601 def candelta(self, baserev, rev):
598 """whether two revisions (baserev, rev) can be delta-ed or not"""
602 """whether two revisions (baserev, rev) can be delta-ed or not"""
599 # Disable delta if either rev requires a content-changing flag
603 # Disable delta if either rev requires a content-changing flag
600 # processor (ex. LFS). This is because such flag processor can alter
604 # processor (ex. LFS). This is because such flag processor can alter
601 # the rawtext content that the delta will be based on, and two clients
605 # the rawtext content that the delta will be based on, and two clients
602 # could have a same revlog node with different flags (i.e. different
606 # could have a same revlog node with different flags (i.e. different
603 # rawtext contents) and the delta could be incompatible.
607 # rawtext contents) and the delta could be incompatible.
604 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
608 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
605 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
609 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
606 return False
610 return False
607 return True
611 return True
608
612
609 def clearcaches(self):
613 def clearcaches(self):
610 self._revisioncache = None
614 self._revisioncache = None
611 self._chainbasecache.clear()
615 self._chainbasecache.clear()
612 self._chunkcache = (0, '')
616 self._chunkcache = (0, '')
613 self._pcache = {}
617 self._pcache = {}
614
618
615 try:
619 try:
616 # If we are using the native C version, you are in a fun case
620 # If we are using the native C version, you are in a fun case
617 # where self.index, self.nodemap and self._nodecaches is the same
621 # where self.index, self.nodemap and self._nodecaches is the same
618 # object.
622 # object.
619 self._nodecache.clearcaches()
623 self._nodecache.clearcaches()
620 except AttributeError:
624 except AttributeError:
621 self._nodecache = {nullid: nullrev}
625 self._nodecache = {nullid: nullrev}
622 self._nodepos = None
626 self._nodepos = None
623
627
624 def rev(self, node):
628 def rev(self, node):
625 try:
629 try:
626 return self._nodecache[node]
630 return self._nodecache[node]
627 except TypeError:
631 except TypeError:
628 raise
632 raise
629 except error.RevlogError:
633 except error.RevlogError:
630 # parsers.c radix tree lookup failed
634 # parsers.c radix tree lookup failed
631 if node == wdirid or node in wdirfilenodeids:
635 if node == wdirid or node in wdirfilenodeids:
632 raise error.WdirUnsupported
636 raise error.WdirUnsupported
633 raise error.LookupError(node, self.indexfile, _('no node'))
637 raise error.LookupError(node, self.indexfile, _('no node'))
634 except KeyError:
638 except KeyError:
635 # pure python cache lookup failed
639 # pure python cache lookup failed
636 n = self._nodecache
640 n = self._nodecache
637 i = self.index
641 i = self.index
638 p = self._nodepos
642 p = self._nodepos
639 if p is None:
643 if p is None:
640 p = len(i) - 1
644 p = len(i) - 1
641 else:
645 else:
642 assert p < len(i)
646 assert p < len(i)
643 for r in pycompat.xrange(p, -1, -1):
647 for r in pycompat.xrange(p, -1, -1):
644 v = i[r][7]
648 v = i[r][7]
645 n[v] = r
649 n[v] = r
646 if v == node:
650 if v == node:
647 self._nodepos = r - 1
651 self._nodepos = r - 1
648 return r
652 return r
649 if node == wdirid or node in wdirfilenodeids:
653 if node == wdirid or node in wdirfilenodeids:
650 raise error.WdirUnsupported
654 raise error.WdirUnsupported
651 raise error.LookupError(node, self.indexfile, _('no node'))
655 raise error.LookupError(node, self.indexfile, _('no node'))
652
656
653 # Accessors for index entries.
657 # Accessors for index entries.
654
658
655 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
659 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
656 # are flags.
660 # are flags.
657 def start(self, rev):
661 def start(self, rev):
658 return int(self.index[rev][0] >> 16)
662 return int(self.index[rev][0] >> 16)
659
663
660 def flags(self, rev):
664 def flags(self, rev):
661 return self.index[rev][0] & 0xFFFF
665 return self.index[rev][0] & 0xFFFF
662
666
663 def length(self, rev):
667 def length(self, rev):
664 return self.index[rev][1]
668 return self.index[rev][1]
665
669
666 def rawsize(self, rev):
670 def rawsize(self, rev):
667 """return the length of the uncompressed text for a given revision"""
671 """return the length of the uncompressed text for a given revision"""
668 l = self.index[rev][2]
672 l = self.index[rev][2]
669 if l >= 0:
673 if l >= 0:
670 return l
674 return l
671
675
672 t = self.revision(rev, raw=True)
676 t = self.revision(rev, raw=True)
673 return len(t)
677 return len(t)
674
678
675 def size(self, rev):
679 def size(self, rev):
676 """length of non-raw text (processed by a "read" flag processor)"""
680 """length of non-raw text (processed by a "read" flag processor)"""
677 # fast path: if no "read" flag processor could change the content,
681 # fast path: if no "read" flag processor could change the content,
678 # size is rawsize. note: ELLIPSIS is known to not change the content.
682 # size is rawsize. note: ELLIPSIS is known to not change the content.
679 flags = self.flags(rev)
683 flags = self.flags(rev)
680 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
684 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
681 return self.rawsize(rev)
685 return self.rawsize(rev)
682
686
683 return len(self.revision(rev, raw=False))
687 return len(self.revision(rev, raw=False))
684
688
685 def chainbase(self, rev):
689 def chainbase(self, rev):
686 base = self._chainbasecache.get(rev)
690 base = self._chainbasecache.get(rev)
687 if base is not None:
691 if base is not None:
688 return base
692 return base
689
693
690 index = self.index
694 index = self.index
691 iterrev = rev
695 iterrev = rev
692 base = index[iterrev][3]
696 base = index[iterrev][3]
693 while base != iterrev:
697 while base != iterrev:
694 iterrev = base
698 iterrev = base
695 base = index[iterrev][3]
699 base = index[iterrev][3]
696
700
697 self._chainbasecache[rev] = base
701 self._chainbasecache[rev] = base
698 return base
702 return base
699
703
700 def linkrev(self, rev):
704 def linkrev(self, rev):
701 return self.index[rev][4]
705 return self.index[rev][4]
702
706
703 def parentrevs(self, rev):
707 def parentrevs(self, rev):
704 try:
708 try:
705 entry = self.index[rev]
709 entry = self.index[rev]
706 except IndexError:
710 except IndexError:
707 if rev == wdirrev:
711 if rev == wdirrev:
708 raise error.WdirUnsupported
712 raise error.WdirUnsupported
709 raise
713 raise
710
714
711 return entry[5], entry[6]
715 return entry[5], entry[6]
712
716
713 # fast parentrevs(rev) where rev isn't filtered
717 # fast parentrevs(rev) where rev isn't filtered
714 _uncheckedparentrevs = parentrevs
718 _uncheckedparentrevs = parentrevs
715
719
716 def node(self, rev):
720 def node(self, rev):
717 try:
721 try:
718 return self.index[rev][7]
722 return self.index[rev][7]
719 except IndexError:
723 except IndexError:
720 if rev == wdirrev:
724 if rev == wdirrev:
721 raise error.WdirUnsupported
725 raise error.WdirUnsupported
722 raise
726 raise
723
727
724 # Derived from index values.
728 # Derived from index values.
725
729
726 def end(self, rev):
730 def end(self, rev):
727 return self.start(rev) + self.length(rev)
731 return self.start(rev) + self.length(rev)
728
732
729 def parents(self, node):
733 def parents(self, node):
730 i = self.index
734 i = self.index
731 d = i[self.rev(node)]
735 d = i[self.rev(node)]
732 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
736 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
733
737
734 def chainlen(self, rev):
738 def chainlen(self, rev):
735 return self._chaininfo(rev)[0]
739 return self._chaininfo(rev)[0]
736
740
737 def _chaininfo(self, rev):
741 def _chaininfo(self, rev):
738 chaininfocache = self._chaininfocache
742 chaininfocache = self._chaininfocache
739 if rev in chaininfocache:
743 if rev in chaininfocache:
740 return chaininfocache[rev]
744 return chaininfocache[rev]
741 index = self.index
745 index = self.index
742 generaldelta = self._generaldelta
746 generaldelta = self._generaldelta
743 iterrev = rev
747 iterrev = rev
744 e = index[iterrev]
748 e = index[iterrev]
745 clen = 0
749 clen = 0
746 compresseddeltalen = 0
750 compresseddeltalen = 0
747 while iterrev != e[3]:
751 while iterrev != e[3]:
748 clen += 1
752 clen += 1
749 compresseddeltalen += e[1]
753 compresseddeltalen += e[1]
750 if generaldelta:
754 if generaldelta:
751 iterrev = e[3]
755 iterrev = e[3]
752 else:
756 else:
753 iterrev -= 1
757 iterrev -= 1
754 if iterrev in chaininfocache:
758 if iterrev in chaininfocache:
755 t = chaininfocache[iterrev]
759 t = chaininfocache[iterrev]
756 clen += t[0]
760 clen += t[0]
757 compresseddeltalen += t[1]
761 compresseddeltalen += t[1]
758 break
762 break
759 e = index[iterrev]
763 e = index[iterrev]
760 else:
764 else:
761 # Add text length of base since decompressing that also takes
765 # Add text length of base since decompressing that also takes
762 # work. For cache hits the length is already included.
766 # work. For cache hits the length is already included.
763 compresseddeltalen += e[1]
767 compresseddeltalen += e[1]
764 r = (clen, compresseddeltalen)
768 r = (clen, compresseddeltalen)
765 chaininfocache[rev] = r
769 chaininfocache[rev] = r
766 return r
770 return r
767
771
768 def _deltachain(self, rev, stoprev=None):
772 def _deltachain(self, rev, stoprev=None):
769 """Obtain the delta chain for a revision.
773 """Obtain the delta chain for a revision.
770
774
771 ``stoprev`` specifies a revision to stop at. If not specified, we
775 ``stoprev`` specifies a revision to stop at. If not specified, we
772 stop at the base of the chain.
776 stop at the base of the chain.
773
777
774 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
778 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
775 revs in ascending order and ``stopped`` is a bool indicating whether
779 revs in ascending order and ``stopped`` is a bool indicating whether
776 ``stoprev`` was hit.
780 ``stoprev`` was hit.
777 """
781 """
778 # Try C implementation.
782 # Try C implementation.
779 try:
783 try:
780 return self.index.deltachain(rev, stoprev, self._generaldelta)
784 return self.index.deltachain(rev, stoprev, self._generaldelta)
781 except AttributeError:
785 except AttributeError:
782 pass
786 pass
783
787
784 chain = []
788 chain = []
785
789
786 # Alias to prevent attribute lookup in tight loop.
790 # Alias to prevent attribute lookup in tight loop.
787 index = self.index
791 index = self.index
788 generaldelta = self._generaldelta
792 generaldelta = self._generaldelta
789
793
790 iterrev = rev
794 iterrev = rev
791 e = index[iterrev]
795 e = index[iterrev]
792 while iterrev != e[3] and iterrev != stoprev:
796 while iterrev != e[3] and iterrev != stoprev:
793 chain.append(iterrev)
797 chain.append(iterrev)
794 if generaldelta:
798 if generaldelta:
795 iterrev = e[3]
799 iterrev = e[3]
796 else:
800 else:
797 iterrev -= 1
801 iterrev -= 1
798 e = index[iterrev]
802 e = index[iterrev]
799
803
800 if iterrev == stoprev:
804 if iterrev == stoprev:
801 stopped = True
805 stopped = True
802 else:
806 else:
803 chain.append(iterrev)
807 chain.append(iterrev)
804 stopped = False
808 stopped = False
805
809
806 chain.reverse()
810 chain.reverse()
807 return chain, stopped
811 return chain, stopped
808
812
809 def ancestors(self, revs, stoprev=0, inclusive=False):
813 def ancestors(self, revs, stoprev=0, inclusive=False):
810 """Generate the ancestors of 'revs' in reverse revision order.
814 """Generate the ancestors of 'revs' in reverse revision order.
811 Does not generate revs lower than stoprev.
815 Does not generate revs lower than stoprev.
812
816
813 See the documentation for ancestor.lazyancestors for more details."""
817 See the documentation for ancestor.lazyancestors for more details."""
814
818
815 # first, make sure start revisions aren't filtered
819 # first, make sure start revisions aren't filtered
816 revs = list(revs)
820 revs = list(revs)
817 checkrev = self.node
821 checkrev = self.node
818 for r in revs:
822 for r in revs:
819 checkrev(r)
823 checkrev(r)
820 # and we're sure ancestors aren't filtered as well
824 # and we're sure ancestors aren't filtered as well
821
825
822 if rustext is not None:
826 if rustext is not None:
823 lazyancestors = rustext.ancestor.LazyAncestors
827 lazyancestors = rustext.ancestor.LazyAncestors
824 arg = self.index
828 arg = self.index
825 elif util.safehasattr(parsers, 'rustlazyancestors'):
829 elif util.safehasattr(parsers, 'rustlazyancestors'):
826 lazyancestors = ancestor.rustlazyancestors
830 lazyancestors = ancestor.rustlazyancestors
827 arg = self.index
831 arg = self.index
828 else:
832 else:
829 lazyancestors = ancestor.lazyancestors
833 lazyancestors = ancestor.lazyancestors
830 arg = self._uncheckedparentrevs
834 arg = self._uncheckedparentrevs
831 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
835 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
832
836
833 def descendants(self, revs):
837 def descendants(self, revs):
834 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
838 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
835
839
836 def findcommonmissing(self, common=None, heads=None):
840 def findcommonmissing(self, common=None, heads=None):
837 """Return a tuple of the ancestors of common and the ancestors of heads
841 """Return a tuple of the ancestors of common and the ancestors of heads
838 that are not ancestors of common. In revset terminology, we return the
842 that are not ancestors of common. In revset terminology, we return the
839 tuple:
843 tuple:
840
844
841 ::common, (::heads) - (::common)
845 ::common, (::heads) - (::common)
842
846
843 The list is sorted by revision number, meaning it is
847 The list is sorted by revision number, meaning it is
844 topologically sorted.
848 topologically sorted.
845
849
846 'heads' and 'common' are both lists of node IDs. If heads is
850 'heads' and 'common' are both lists of node IDs. If heads is
847 not supplied, uses all of the revlog's heads. If common is not
851 not supplied, uses all of the revlog's heads. If common is not
848 supplied, uses nullid."""
852 supplied, uses nullid."""
849 if common is None:
853 if common is None:
850 common = [nullid]
854 common = [nullid]
851 if heads is None:
855 if heads is None:
852 heads = self.heads()
856 heads = self.heads()
853
857
854 common = [self.rev(n) for n in common]
858 common = [self.rev(n) for n in common]
855 heads = [self.rev(n) for n in heads]
859 heads = [self.rev(n) for n in heads]
856
860
857 # we want the ancestors, but inclusive
861 # we want the ancestors, but inclusive
858 class lazyset(object):
862 class lazyset(object):
859 def __init__(self, lazyvalues):
863 def __init__(self, lazyvalues):
860 self.addedvalues = set()
864 self.addedvalues = set()
861 self.lazyvalues = lazyvalues
865 self.lazyvalues = lazyvalues
862
866
863 def __contains__(self, value):
867 def __contains__(self, value):
864 return value in self.addedvalues or value in self.lazyvalues
868 return value in self.addedvalues or value in self.lazyvalues
865
869
866 def __iter__(self):
870 def __iter__(self):
867 added = self.addedvalues
871 added = self.addedvalues
868 for r in added:
872 for r in added:
869 yield r
873 yield r
870 for r in self.lazyvalues:
874 for r in self.lazyvalues:
871 if not r in added:
875 if not r in added:
872 yield r
876 yield r
873
877
874 def add(self, value):
878 def add(self, value):
875 self.addedvalues.add(value)
879 self.addedvalues.add(value)
876
880
877 def update(self, values):
881 def update(self, values):
878 self.addedvalues.update(values)
882 self.addedvalues.update(values)
879
883
880 has = lazyset(self.ancestors(common))
884 has = lazyset(self.ancestors(common))
881 has.add(nullrev)
885 has.add(nullrev)
882 has.update(common)
886 has.update(common)
883
887
884 # take all ancestors from heads that aren't in has
888 # take all ancestors from heads that aren't in has
885 missing = set()
889 missing = set()
886 visit = collections.deque(r for r in heads if r not in has)
890 visit = collections.deque(r for r in heads if r not in has)
887 while visit:
891 while visit:
888 r = visit.popleft()
892 r = visit.popleft()
889 if r in missing:
893 if r in missing:
890 continue
894 continue
891 else:
895 else:
892 missing.add(r)
896 missing.add(r)
893 for p in self.parentrevs(r):
897 for p in self.parentrevs(r):
894 if p not in has:
898 if p not in has:
895 visit.append(p)
899 visit.append(p)
896 missing = list(missing)
900 missing = list(missing)
897 missing.sort()
901 missing.sort()
898 return has, [self.node(miss) for miss in missing]
902 return has, [self.node(miss) for miss in missing]
899
903
900 def incrementalmissingrevs(self, common=None):
904 def incrementalmissingrevs(self, common=None):
901 """Return an object that can be used to incrementally compute the
905 """Return an object that can be used to incrementally compute the
902 revision numbers of the ancestors of arbitrary sets that are not
906 revision numbers of the ancestors of arbitrary sets that are not
903 ancestors of common. This is an ancestor.incrementalmissingancestors
907 ancestors of common. This is an ancestor.incrementalmissingancestors
904 object.
908 object.
905
909
906 'common' is a list of revision numbers. If common is not supplied, uses
910 'common' is a list of revision numbers. If common is not supplied, uses
907 nullrev.
911 nullrev.
908 """
912 """
909 if common is None:
913 if common is None:
910 common = [nullrev]
914 common = [nullrev]
911
915
912 if rustext is not None:
916 if rustext is not None:
913 return rustext.ancestor.MissingAncestors(self.index, common)
917 return rustext.ancestor.MissingAncestors(self.index, common)
914 return ancestor.incrementalmissingancestors(self.parentrevs, common)
918 return ancestor.incrementalmissingancestors(self.parentrevs, common)
915
919
916 def findmissingrevs(self, common=None, heads=None):
920 def findmissingrevs(self, common=None, heads=None):
917 """Return the revision numbers of the ancestors of heads that
921 """Return the revision numbers of the ancestors of heads that
918 are not ancestors of common.
922 are not ancestors of common.
919
923
920 More specifically, return a list of revision numbers corresponding to
924 More specifically, return a list of revision numbers corresponding to
921 nodes N such that every N satisfies the following constraints:
925 nodes N such that every N satisfies the following constraints:
922
926
923 1. N is an ancestor of some node in 'heads'
927 1. N is an ancestor of some node in 'heads'
924 2. N is not an ancestor of any node in 'common'
928 2. N is not an ancestor of any node in 'common'
925
929
926 The list is sorted by revision number, meaning it is
930 The list is sorted by revision number, meaning it is
927 topologically sorted.
931 topologically sorted.
928
932
929 'heads' and 'common' are both lists of revision numbers. If heads is
933 'heads' and 'common' are both lists of revision numbers. If heads is
930 not supplied, uses all of the revlog's heads. If common is not
934 not supplied, uses all of the revlog's heads. If common is not
931 supplied, uses nullid."""
935 supplied, uses nullid."""
932 if common is None:
936 if common is None:
933 common = [nullrev]
937 common = [nullrev]
934 if heads is None:
938 if heads is None:
935 heads = self.headrevs()
939 heads = self.headrevs()
936
940
937 inc = self.incrementalmissingrevs(common=common)
941 inc = self.incrementalmissingrevs(common=common)
938 return inc.missingancestors(heads)
942 return inc.missingancestors(heads)
939
943
940 def findmissing(self, common=None, heads=None):
944 def findmissing(self, common=None, heads=None):
941 """Return the ancestors of heads that are not ancestors of common.
945 """Return the ancestors of heads that are not ancestors of common.
942
946
943 More specifically, return a list of nodes N such that every N
947 More specifically, return a list of nodes N such that every N
944 satisfies the following constraints:
948 satisfies the following constraints:
945
949
946 1. N is an ancestor of some node in 'heads'
950 1. N is an ancestor of some node in 'heads'
947 2. N is not an ancestor of any node in 'common'
951 2. N is not an ancestor of any node in 'common'
948
952
949 The list is sorted by revision number, meaning it is
953 The list is sorted by revision number, meaning it is
950 topologically sorted.
954 topologically sorted.
951
955
952 'heads' and 'common' are both lists of node IDs. If heads is
956 'heads' and 'common' are both lists of node IDs. If heads is
953 not supplied, uses all of the revlog's heads. If common is not
957 not supplied, uses all of the revlog's heads. If common is not
954 supplied, uses nullid."""
958 supplied, uses nullid."""
955 if common is None:
959 if common is None:
956 common = [nullid]
960 common = [nullid]
957 if heads is None:
961 if heads is None:
958 heads = self.heads()
962 heads = self.heads()
959
963
960 common = [self.rev(n) for n in common]
964 common = [self.rev(n) for n in common]
961 heads = [self.rev(n) for n in heads]
965 heads = [self.rev(n) for n in heads]
962
966
963 inc = self.incrementalmissingrevs(common=common)
967 inc = self.incrementalmissingrevs(common=common)
964 return [self.node(r) for r in inc.missingancestors(heads)]
968 return [self.node(r) for r in inc.missingancestors(heads)]
965
969
966 def nodesbetween(self, roots=None, heads=None):
970 def nodesbetween(self, roots=None, heads=None):
967 """Return a topological path from 'roots' to 'heads'.
971 """Return a topological path from 'roots' to 'heads'.
968
972
969 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
973 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
970 topologically sorted list of all nodes N that satisfy both of
974 topologically sorted list of all nodes N that satisfy both of
971 these constraints:
975 these constraints:
972
976
973 1. N is a descendant of some node in 'roots'
977 1. N is a descendant of some node in 'roots'
974 2. N is an ancestor of some node in 'heads'
978 2. N is an ancestor of some node in 'heads'
975
979
976 Every node is considered to be both a descendant and an ancestor
980 Every node is considered to be both a descendant and an ancestor
977 of itself, so every reachable node in 'roots' and 'heads' will be
981 of itself, so every reachable node in 'roots' and 'heads' will be
978 included in 'nodes'.
982 included in 'nodes'.
979
983
980 'outroots' is the list of reachable nodes in 'roots', i.e., the
984 'outroots' is the list of reachable nodes in 'roots', i.e., the
981 subset of 'roots' that is returned in 'nodes'. Likewise,
985 subset of 'roots' that is returned in 'nodes'. Likewise,
982 'outheads' is the subset of 'heads' that is also in 'nodes'.
986 'outheads' is the subset of 'heads' that is also in 'nodes'.
983
987
984 'roots' and 'heads' are both lists of node IDs. If 'roots' is
988 'roots' and 'heads' are both lists of node IDs. If 'roots' is
985 unspecified, uses nullid as the only root. If 'heads' is
989 unspecified, uses nullid as the only root. If 'heads' is
986 unspecified, uses list of all of the revlog's heads."""
990 unspecified, uses list of all of the revlog's heads."""
987 nonodes = ([], [], [])
991 nonodes = ([], [], [])
988 if roots is not None:
992 if roots is not None:
989 roots = list(roots)
993 roots = list(roots)
990 if not roots:
994 if not roots:
991 return nonodes
995 return nonodes
992 lowestrev = min([self.rev(n) for n in roots])
996 lowestrev = min([self.rev(n) for n in roots])
993 else:
997 else:
994 roots = [nullid] # Everybody's a descendant of nullid
998 roots = [nullid] # Everybody's a descendant of nullid
995 lowestrev = nullrev
999 lowestrev = nullrev
996 if (lowestrev == nullrev) and (heads is None):
1000 if (lowestrev == nullrev) and (heads is None):
997 # We want _all_ the nodes!
1001 # We want _all_ the nodes!
998 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1002 return ([self.node(r) for r in self], [nullid], list(self.heads()))
999 if heads is None:
1003 if heads is None:
1000 # All nodes are ancestors, so the latest ancestor is the last
1004 # All nodes are ancestors, so the latest ancestor is the last
1001 # node.
1005 # node.
1002 highestrev = len(self) - 1
1006 highestrev = len(self) - 1
1003 # Set ancestors to None to signal that every node is an ancestor.
1007 # Set ancestors to None to signal that every node is an ancestor.
1004 ancestors = None
1008 ancestors = None
1005 # Set heads to an empty dictionary for later discovery of heads
1009 # Set heads to an empty dictionary for later discovery of heads
1006 heads = {}
1010 heads = {}
1007 else:
1011 else:
1008 heads = list(heads)
1012 heads = list(heads)
1009 if not heads:
1013 if not heads:
1010 return nonodes
1014 return nonodes
1011 ancestors = set()
1015 ancestors = set()
1012 # Turn heads into a dictionary so we can remove 'fake' heads.
1016 # Turn heads into a dictionary so we can remove 'fake' heads.
1013 # Also, later we will be using it to filter out the heads we can't
1017 # Also, later we will be using it to filter out the heads we can't
1014 # find from roots.
1018 # find from roots.
1015 heads = dict.fromkeys(heads, False)
1019 heads = dict.fromkeys(heads, False)
1016 # Start at the top and keep marking parents until we're done.
1020 # Start at the top and keep marking parents until we're done.
1017 nodestotag = set(heads)
1021 nodestotag = set(heads)
1018 # Remember where the top was so we can use it as a limit later.
1022 # Remember where the top was so we can use it as a limit later.
1019 highestrev = max([self.rev(n) for n in nodestotag])
1023 highestrev = max([self.rev(n) for n in nodestotag])
1020 while nodestotag:
1024 while nodestotag:
1021 # grab a node to tag
1025 # grab a node to tag
1022 n = nodestotag.pop()
1026 n = nodestotag.pop()
1023 # Never tag nullid
1027 # Never tag nullid
1024 if n == nullid:
1028 if n == nullid:
1025 continue
1029 continue
1026 # A node's revision number represents its place in a
1030 # A node's revision number represents its place in a
1027 # topologically sorted list of nodes.
1031 # topologically sorted list of nodes.
1028 r = self.rev(n)
1032 r = self.rev(n)
1029 if r >= lowestrev:
1033 if r >= lowestrev:
1030 if n not in ancestors:
1034 if n not in ancestors:
1031 # If we are possibly a descendant of one of the roots
1035 # If we are possibly a descendant of one of the roots
1032 # and we haven't already been marked as an ancestor
1036 # and we haven't already been marked as an ancestor
1033 ancestors.add(n) # Mark as ancestor
1037 ancestors.add(n) # Mark as ancestor
1034 # Add non-nullid parents to list of nodes to tag.
1038 # Add non-nullid parents to list of nodes to tag.
1035 nodestotag.update([p for p in self.parents(n) if
1039 nodestotag.update([p for p in self.parents(n) if
1036 p != nullid])
1040 p != nullid])
1037 elif n in heads: # We've seen it before, is it a fake head?
1041 elif n in heads: # We've seen it before, is it a fake head?
1038 # So it is, real heads should not be the ancestors of
1042 # So it is, real heads should not be the ancestors of
1039 # any other heads.
1043 # any other heads.
1040 heads.pop(n)
1044 heads.pop(n)
1041 if not ancestors:
1045 if not ancestors:
1042 return nonodes
1046 return nonodes
1043 # Now that we have our set of ancestors, we want to remove any
1047 # Now that we have our set of ancestors, we want to remove any
1044 # roots that are not ancestors.
1048 # roots that are not ancestors.
1045
1049
1046 # If one of the roots was nullid, everything is included anyway.
1050 # If one of the roots was nullid, everything is included anyway.
1047 if lowestrev > nullrev:
1051 if lowestrev > nullrev:
1048 # But, since we weren't, let's recompute the lowest rev to not
1052 # But, since we weren't, let's recompute the lowest rev to not
1049 # include roots that aren't ancestors.
1053 # include roots that aren't ancestors.
1050
1054
1051 # Filter out roots that aren't ancestors of heads
1055 # Filter out roots that aren't ancestors of heads
1052 roots = [root for root in roots if root in ancestors]
1056 roots = [root for root in roots if root in ancestors]
1053 # Recompute the lowest revision
1057 # Recompute the lowest revision
1054 if roots:
1058 if roots:
1055 lowestrev = min([self.rev(root) for root in roots])
1059 lowestrev = min([self.rev(root) for root in roots])
1056 else:
1060 else:
1057 # No more roots? Return empty list
1061 # No more roots? Return empty list
1058 return nonodes
1062 return nonodes
1059 else:
1063 else:
1060 # We are descending from nullid, and don't need to care about
1064 # We are descending from nullid, and don't need to care about
1061 # any other roots.
1065 # any other roots.
1062 lowestrev = nullrev
1066 lowestrev = nullrev
1063 roots = [nullid]
1067 roots = [nullid]
1064 # Transform our roots list into a set.
1068 # Transform our roots list into a set.
1065 descendants = set(roots)
1069 descendants = set(roots)
1066 # Also, keep the original roots so we can filter out roots that aren't
1070 # Also, keep the original roots so we can filter out roots that aren't
1067 # 'real' roots (i.e. are descended from other roots).
1071 # 'real' roots (i.e. are descended from other roots).
1068 roots = descendants.copy()
1072 roots = descendants.copy()
1069 # Our topologically sorted list of output nodes.
1073 # Our topologically sorted list of output nodes.
1070 orderedout = []
1074 orderedout = []
1071 # Don't start at nullid since we don't want nullid in our output list,
1075 # Don't start at nullid since we don't want nullid in our output list,
1072 # and if nullid shows up in descendants, empty parents will look like
1076 # and if nullid shows up in descendants, empty parents will look like
1073 # they're descendants.
1077 # they're descendants.
1074 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1078 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1075 n = self.node(r)
1079 n = self.node(r)
1076 isdescendant = False
1080 isdescendant = False
1077 if lowestrev == nullrev: # Everybody is a descendant of nullid
1081 if lowestrev == nullrev: # Everybody is a descendant of nullid
1078 isdescendant = True
1082 isdescendant = True
1079 elif n in descendants:
1083 elif n in descendants:
1080 # n is already a descendant
1084 # n is already a descendant
1081 isdescendant = True
1085 isdescendant = True
1082 # This check only needs to be done here because all the roots
1086 # This check only needs to be done here because all the roots
1083 # will start being marked is descendants before the loop.
1087 # will start being marked is descendants before the loop.
1084 if n in roots:
1088 if n in roots:
1085 # If n was a root, check if it's a 'real' root.
1089 # If n was a root, check if it's a 'real' root.
1086 p = tuple(self.parents(n))
1090 p = tuple(self.parents(n))
1087 # If any of its parents are descendants, it's not a root.
1091 # If any of its parents are descendants, it's not a root.
1088 if (p[0] in descendants) or (p[1] in descendants):
1092 if (p[0] in descendants) or (p[1] in descendants):
1089 roots.remove(n)
1093 roots.remove(n)
1090 else:
1094 else:
1091 p = tuple(self.parents(n))
1095 p = tuple(self.parents(n))
1092 # A node is a descendant if either of its parents are
1096 # A node is a descendant if either of its parents are
1093 # descendants. (We seeded the dependents list with the roots
1097 # descendants. (We seeded the dependents list with the roots
1094 # up there, remember?)
1098 # up there, remember?)
1095 if (p[0] in descendants) or (p[1] in descendants):
1099 if (p[0] in descendants) or (p[1] in descendants):
1096 descendants.add(n)
1100 descendants.add(n)
1097 isdescendant = True
1101 isdescendant = True
1098 if isdescendant and ((ancestors is None) or (n in ancestors)):
1102 if isdescendant and ((ancestors is None) or (n in ancestors)):
1099 # Only include nodes that are both descendants and ancestors.
1103 # Only include nodes that are both descendants and ancestors.
1100 orderedout.append(n)
1104 orderedout.append(n)
1101 if (ancestors is not None) and (n in heads):
1105 if (ancestors is not None) and (n in heads):
1102 # We're trying to figure out which heads are reachable
1106 # We're trying to figure out which heads are reachable
1103 # from roots.
1107 # from roots.
1104 # Mark this head as having been reached
1108 # Mark this head as having been reached
1105 heads[n] = True
1109 heads[n] = True
1106 elif ancestors is None:
1110 elif ancestors is None:
1107 # Otherwise, we're trying to discover the heads.
1111 # Otherwise, we're trying to discover the heads.
1108 # Assume this is a head because if it isn't, the next step
1112 # Assume this is a head because if it isn't, the next step
1109 # will eventually remove it.
1113 # will eventually remove it.
1110 heads[n] = True
1114 heads[n] = True
1111 # But, obviously its parents aren't.
1115 # But, obviously its parents aren't.
1112 for p in self.parents(n):
1116 for p in self.parents(n):
1113 heads.pop(p, None)
1117 heads.pop(p, None)
1114 heads = [head for head, flag in heads.iteritems() if flag]
1118 heads = [head for head, flag in heads.iteritems() if flag]
1115 roots = list(roots)
1119 roots = list(roots)
1116 assert orderedout
1120 assert orderedout
1117 assert roots
1121 assert roots
1118 assert heads
1122 assert heads
1119 return (orderedout, roots, heads)
1123 return (orderedout, roots, heads)
1120
1124
1121 def headrevs(self, revs=None):
1125 def headrevs(self, revs=None):
1122 if revs is None:
1126 if revs is None:
1123 try:
1127 try:
1124 return self.index.headrevs()
1128 return self.index.headrevs()
1125 except AttributeError:
1129 except AttributeError:
1126 return self._headrevs()
1130 return self._headrevs()
1127 if rustext is not None:
1131 if rustext is not None:
1128 return rustext.dagop.headrevs(self.index, revs)
1132 return rustext.dagop.headrevs(self.index, revs)
1129 return dagop.headrevs(revs, self._uncheckedparentrevs)
1133 return dagop.headrevs(revs, self._uncheckedparentrevs)
1130
1134
1131 def computephases(self, roots):
1135 def computephases(self, roots):
1132 return self.index.computephasesmapsets(roots)
1136 return self.index.computephasesmapsets(roots)
1133
1137
1134 def _headrevs(self):
1138 def _headrevs(self):
1135 count = len(self)
1139 count = len(self)
1136 if not count:
1140 if not count:
1137 return [nullrev]
1141 return [nullrev]
1138 # we won't iter over filtered rev so nobody is a head at start
1142 # we won't iter over filtered rev so nobody is a head at start
1139 ishead = [0] * (count + 1)
1143 ishead = [0] * (count + 1)
1140 index = self.index
1144 index = self.index
1141 for r in self:
1145 for r in self:
1142 ishead[r] = 1 # I may be an head
1146 ishead[r] = 1 # I may be an head
1143 e = index[r]
1147 e = index[r]
1144 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1148 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1145 return [r for r, val in enumerate(ishead) if val]
1149 return [r for r, val in enumerate(ishead) if val]
1146
1150
1147 def heads(self, start=None, stop=None):
1151 def heads(self, start=None, stop=None):
1148 """return the list of all nodes that have no children
1152 """return the list of all nodes that have no children
1149
1153
1150 if start is specified, only heads that are descendants of
1154 if start is specified, only heads that are descendants of
1151 start will be returned
1155 start will be returned
1152 if stop is specified, it will consider all the revs from stop
1156 if stop is specified, it will consider all the revs from stop
1153 as if they had no children
1157 as if they had no children
1154 """
1158 """
1155 if start is None and stop is None:
1159 if start is None and stop is None:
1156 if not len(self):
1160 if not len(self):
1157 return [nullid]
1161 return [nullid]
1158 return [self.node(r) for r in self.headrevs()]
1162 return [self.node(r) for r in self.headrevs()]
1159
1163
1160 if start is None:
1164 if start is None:
1161 start = nullrev
1165 start = nullrev
1162 else:
1166 else:
1163 start = self.rev(start)
1167 start = self.rev(start)
1164
1168
1165 stoprevs = set(self.rev(n) for n in stop or [])
1169 stoprevs = set(self.rev(n) for n in stop or [])
1166
1170
1167 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1171 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1168 stoprevs=stoprevs)
1172 stoprevs=stoprevs)
1169
1173
1170 return [self.node(rev) for rev in revs]
1174 return [self.node(rev) for rev in revs]
1171
1175
1172 def children(self, node):
1176 def children(self, node):
1173 """find the children of a given node"""
1177 """find the children of a given node"""
1174 c = []
1178 c = []
1175 p = self.rev(node)
1179 p = self.rev(node)
1176 for r in self.revs(start=p + 1):
1180 for r in self.revs(start=p + 1):
1177 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1181 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1178 if prevs:
1182 if prevs:
1179 for pr in prevs:
1183 for pr in prevs:
1180 if pr == p:
1184 if pr == p:
1181 c.append(self.node(r))
1185 c.append(self.node(r))
1182 elif p == nullrev:
1186 elif p == nullrev:
1183 c.append(self.node(r))
1187 c.append(self.node(r))
1184 return c
1188 return c
1185
1189
1186 def commonancestorsheads(self, a, b):
1190 def commonancestorsheads(self, a, b):
1187 """calculate all the heads of the common ancestors of nodes a and b"""
1191 """calculate all the heads of the common ancestors of nodes a and b"""
1188 a, b = self.rev(a), self.rev(b)
1192 a, b = self.rev(a), self.rev(b)
1189 ancs = self._commonancestorsheads(a, b)
1193 ancs = self._commonancestorsheads(a, b)
1190 return pycompat.maplist(self.node, ancs)
1194 return pycompat.maplist(self.node, ancs)
1191
1195
1192 def _commonancestorsheads(self, *revs):
1196 def _commonancestorsheads(self, *revs):
1193 """calculate all the heads of the common ancestors of revs"""
1197 """calculate all the heads of the common ancestors of revs"""
1194 try:
1198 try:
1195 ancs = self.index.commonancestorsheads(*revs)
1199 ancs = self.index.commonancestorsheads(*revs)
1196 except (AttributeError, OverflowError): # C implementation failed
1200 except (AttributeError, OverflowError): # C implementation failed
1197 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1201 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1198 return ancs
1202 return ancs
1199
1203
1200 def isancestor(self, a, b):
1204 def isancestor(self, a, b):
1201 """return True if node a is an ancestor of node b
1205 """return True if node a is an ancestor of node b
1202
1206
1203 A revision is considered an ancestor of itself."""
1207 A revision is considered an ancestor of itself."""
1204 a, b = self.rev(a), self.rev(b)
1208 a, b = self.rev(a), self.rev(b)
1205 return self.isancestorrev(a, b)
1209 return self.isancestorrev(a, b)
1206
1210
1207 def isancestorrev(self, a, b):
1211 def isancestorrev(self, a, b):
1208 """return True if revision a is an ancestor of revision b
1212 """return True if revision a is an ancestor of revision b
1209
1213
1210 A revision is considered an ancestor of itself.
1214 A revision is considered an ancestor of itself.
1211
1215
1212 The implementation of this is trivial but the use of
1216 The implementation of this is trivial but the use of
1213 commonancestorsheads is not."""
1217 commonancestorsheads is not."""
1214 if a == nullrev:
1218 if a == nullrev:
1215 return True
1219 return True
1216 elif a == b:
1220 elif a == b:
1217 return True
1221 return True
1218 elif a > b:
1222 elif a > b:
1219 return False
1223 return False
1220 return a in self._commonancestorsheads(a, b)
1224 return a in self._commonancestorsheads(a, b)
1221
1225
1222 def ancestor(self, a, b):
1226 def ancestor(self, a, b):
1223 """calculate the "best" common ancestor of nodes a and b"""
1227 """calculate the "best" common ancestor of nodes a and b"""
1224
1228
1225 a, b = self.rev(a), self.rev(b)
1229 a, b = self.rev(a), self.rev(b)
1226 try:
1230 try:
1227 ancs = self.index.ancestors(a, b)
1231 ancs = self.index.ancestors(a, b)
1228 except (AttributeError, OverflowError):
1232 except (AttributeError, OverflowError):
1229 ancs = ancestor.ancestors(self.parentrevs, a, b)
1233 ancs = ancestor.ancestors(self.parentrevs, a, b)
1230 if ancs:
1234 if ancs:
1231 # choose a consistent winner when there's a tie
1235 # choose a consistent winner when there's a tie
1232 return min(map(self.node, ancs))
1236 return min(map(self.node, ancs))
1233 return nullid
1237 return nullid
1234
1238
1235 def _match(self, id):
1239 def _match(self, id):
1236 if isinstance(id, int):
1240 if isinstance(id, int):
1237 # rev
1241 # rev
1238 return self.node(id)
1242 return self.node(id)
1239 if len(id) == 20:
1243 if len(id) == 20:
1240 # possibly a binary node
1244 # possibly a binary node
1241 # odds of a binary node being all hex in ASCII are 1 in 10**25
1245 # odds of a binary node being all hex in ASCII are 1 in 10**25
1242 try:
1246 try:
1243 node = id
1247 node = id
1244 self.rev(node) # quick search the index
1248 self.rev(node) # quick search the index
1245 return node
1249 return node
1246 except error.LookupError:
1250 except error.LookupError:
1247 pass # may be partial hex id
1251 pass # may be partial hex id
1248 try:
1252 try:
1249 # str(rev)
1253 # str(rev)
1250 rev = int(id)
1254 rev = int(id)
1251 if "%d" % rev != id:
1255 if "%d" % rev != id:
1252 raise ValueError
1256 raise ValueError
1253 if rev < 0:
1257 if rev < 0:
1254 rev = len(self) + rev
1258 rev = len(self) + rev
1255 if rev < 0 or rev >= len(self):
1259 if rev < 0 or rev >= len(self):
1256 raise ValueError
1260 raise ValueError
1257 return self.node(rev)
1261 return self.node(rev)
1258 except (ValueError, OverflowError):
1262 except (ValueError, OverflowError):
1259 pass
1263 pass
1260 if len(id) == 40:
1264 if len(id) == 40:
1261 try:
1265 try:
1262 # a full hex nodeid?
1266 # a full hex nodeid?
1263 node = bin(id)
1267 node = bin(id)
1264 self.rev(node)
1268 self.rev(node)
1265 return node
1269 return node
1266 except (TypeError, error.LookupError):
1270 except (TypeError, error.LookupError):
1267 pass
1271 pass
1268
1272
1269 def _partialmatch(self, id):
1273 def _partialmatch(self, id):
1270 # we don't care wdirfilenodeids as they should be always full hash
1274 # we don't care wdirfilenodeids as they should be always full hash
1271 maybewdir = wdirhex.startswith(id)
1275 maybewdir = wdirhex.startswith(id)
1272 try:
1276 try:
1273 partial = self.index.partialmatch(id)
1277 partial = self.index.partialmatch(id)
1274 if partial and self.hasnode(partial):
1278 if partial and self.hasnode(partial):
1275 if maybewdir:
1279 if maybewdir:
1276 # single 'ff...' match in radix tree, ambiguous with wdir
1280 # single 'ff...' match in radix tree, ambiguous with wdir
1277 raise error.RevlogError
1281 raise error.RevlogError
1278 return partial
1282 return partial
1279 if maybewdir:
1283 if maybewdir:
1280 # no 'ff...' match in radix tree, wdir identified
1284 # no 'ff...' match in radix tree, wdir identified
1281 raise error.WdirUnsupported
1285 raise error.WdirUnsupported
1282 return None
1286 return None
1283 except error.RevlogError:
1287 except error.RevlogError:
1284 # parsers.c radix tree lookup gave multiple matches
1288 # parsers.c radix tree lookup gave multiple matches
1285 # fast path: for unfiltered changelog, radix tree is accurate
1289 # fast path: for unfiltered changelog, radix tree is accurate
1286 if not getattr(self, 'filteredrevs', None):
1290 if not getattr(self, 'filteredrevs', None):
1287 raise error.AmbiguousPrefixLookupError(
1291 raise error.AmbiguousPrefixLookupError(
1288 id, self.indexfile, _('ambiguous identifier'))
1292 id, self.indexfile, _('ambiguous identifier'))
1289 # fall through to slow path that filters hidden revisions
1293 # fall through to slow path that filters hidden revisions
1290 except (AttributeError, ValueError):
1294 except (AttributeError, ValueError):
1291 # we are pure python, or key was too short to search radix tree
1295 # we are pure python, or key was too short to search radix tree
1292 pass
1296 pass
1293
1297
1294 if id in self._pcache:
1298 if id in self._pcache:
1295 return self._pcache[id]
1299 return self._pcache[id]
1296
1300
1297 if len(id) <= 40:
1301 if len(id) <= 40:
1298 try:
1302 try:
1299 # hex(node)[:...]
1303 # hex(node)[:...]
1300 l = len(id) // 2 # grab an even number of digits
1304 l = len(id) // 2 # grab an even number of digits
1301 prefix = bin(id[:l * 2])
1305 prefix = bin(id[:l * 2])
1302 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1306 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1303 nl = [n for n in nl if hex(n).startswith(id) and
1307 nl = [n for n in nl if hex(n).startswith(id) and
1304 self.hasnode(n)]
1308 self.hasnode(n)]
1305 if nullhex.startswith(id):
1309 if nullhex.startswith(id):
1306 nl.append(nullid)
1310 nl.append(nullid)
1307 if len(nl) > 0:
1311 if len(nl) > 0:
1308 if len(nl) == 1 and not maybewdir:
1312 if len(nl) == 1 and not maybewdir:
1309 self._pcache[id] = nl[0]
1313 self._pcache[id] = nl[0]
1310 return nl[0]
1314 return nl[0]
1311 raise error.AmbiguousPrefixLookupError(
1315 raise error.AmbiguousPrefixLookupError(
1312 id, self.indexfile, _('ambiguous identifier'))
1316 id, self.indexfile, _('ambiguous identifier'))
1313 if maybewdir:
1317 if maybewdir:
1314 raise error.WdirUnsupported
1318 raise error.WdirUnsupported
1315 return None
1319 return None
1316 except TypeError:
1320 except TypeError:
1317 pass
1321 pass
1318
1322
1319 def lookup(self, id):
1323 def lookup(self, id):
1320 """locate a node based on:
1324 """locate a node based on:
1321 - revision number or str(revision number)
1325 - revision number or str(revision number)
1322 - nodeid or subset of hex nodeid
1326 - nodeid or subset of hex nodeid
1323 """
1327 """
1324 n = self._match(id)
1328 n = self._match(id)
1325 if n is not None:
1329 if n is not None:
1326 return n
1330 return n
1327 n = self._partialmatch(id)
1331 n = self._partialmatch(id)
1328 if n:
1332 if n:
1329 return n
1333 return n
1330
1334
1331 raise error.LookupError(id, self.indexfile, _('no match found'))
1335 raise error.LookupError(id, self.indexfile, _('no match found'))
1332
1336
1333 def shortest(self, node, minlength=1):
1337 def shortest(self, node, minlength=1):
1334 """Find the shortest unambiguous prefix that matches node."""
1338 """Find the shortest unambiguous prefix that matches node."""
1335 def isvalid(prefix):
1339 def isvalid(prefix):
1336 try:
1340 try:
1337 node = self._partialmatch(prefix)
1341 node = self._partialmatch(prefix)
1338 except error.AmbiguousPrefixLookupError:
1342 except error.AmbiguousPrefixLookupError:
1339 return False
1343 return False
1340 except error.WdirUnsupported:
1344 except error.WdirUnsupported:
1341 # single 'ff...' match
1345 # single 'ff...' match
1342 return True
1346 return True
1343 if node is None:
1347 if node is None:
1344 raise error.LookupError(node, self.indexfile, _('no node'))
1348 raise error.LookupError(node, self.indexfile, _('no node'))
1345 return True
1349 return True
1346
1350
1347 def maybewdir(prefix):
1351 def maybewdir(prefix):
1348 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1352 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1349
1353
1350 hexnode = hex(node)
1354 hexnode = hex(node)
1351
1355
1352 def disambiguate(hexnode, minlength):
1356 def disambiguate(hexnode, minlength):
1353 """Disambiguate against wdirid."""
1357 """Disambiguate against wdirid."""
1354 for length in range(minlength, 41):
1358 for length in range(minlength, 41):
1355 prefix = hexnode[:length]
1359 prefix = hexnode[:length]
1356 if not maybewdir(prefix):
1360 if not maybewdir(prefix):
1357 return prefix
1361 return prefix
1358
1362
1359 if not getattr(self, 'filteredrevs', None):
1363 if not getattr(self, 'filteredrevs', None):
1360 try:
1364 try:
1361 length = max(self.index.shortest(node), minlength)
1365 length = max(self.index.shortest(node), minlength)
1362 return disambiguate(hexnode, length)
1366 return disambiguate(hexnode, length)
1363 except error.RevlogError:
1367 except error.RevlogError:
1364 if node != wdirid:
1368 if node != wdirid:
1365 raise error.LookupError(node, self.indexfile, _('no node'))
1369 raise error.LookupError(node, self.indexfile, _('no node'))
1366 except AttributeError:
1370 except AttributeError:
1367 # Fall through to pure code
1371 # Fall through to pure code
1368 pass
1372 pass
1369
1373
1370 if node == wdirid:
1374 if node == wdirid:
1371 for length in range(minlength, 41):
1375 for length in range(minlength, 41):
1372 prefix = hexnode[:length]
1376 prefix = hexnode[:length]
1373 if isvalid(prefix):
1377 if isvalid(prefix):
1374 return prefix
1378 return prefix
1375
1379
1376 for length in range(minlength, 41):
1380 for length in range(minlength, 41):
1377 prefix = hexnode[:length]
1381 prefix = hexnode[:length]
1378 if isvalid(prefix):
1382 if isvalid(prefix):
1379 return disambiguate(hexnode, length)
1383 return disambiguate(hexnode, length)
1380
1384
1381 def cmp(self, node, text):
1385 def cmp(self, node, text):
1382 """compare text with a given file revision
1386 """compare text with a given file revision
1383
1387
1384 returns True if text is different than what is stored.
1388 returns True if text is different than what is stored.
1385 """
1389 """
1386 p1, p2 = self.parents(node)
1390 p1, p2 = self.parents(node)
1387 return storageutil.hashrevisionsha1(text, p1, p2) != node
1391 return storageutil.hashrevisionsha1(text, p1, p2) != node
1388
1392
1389 def _cachesegment(self, offset, data):
1393 def _cachesegment(self, offset, data):
1390 """Add a segment to the revlog cache.
1394 """Add a segment to the revlog cache.
1391
1395
1392 Accepts an absolute offset and the data that is at that location.
1396 Accepts an absolute offset and the data that is at that location.
1393 """
1397 """
1394 o, d = self._chunkcache
1398 o, d = self._chunkcache
1395 # try to add to existing cache
1399 # try to add to existing cache
1396 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1400 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1397 self._chunkcache = o, d + data
1401 self._chunkcache = o, d + data
1398 else:
1402 else:
1399 self._chunkcache = offset, data
1403 self._chunkcache = offset, data
1400
1404
1401 def _readsegment(self, offset, length, df=None):
1405 def _readsegment(self, offset, length, df=None):
1402 """Load a segment of raw data from the revlog.
1406 """Load a segment of raw data from the revlog.
1403
1407
1404 Accepts an absolute offset, length to read, and an optional existing
1408 Accepts an absolute offset, length to read, and an optional existing
1405 file handle to read from.
1409 file handle to read from.
1406
1410
1407 If an existing file handle is passed, it will be seeked and the
1411 If an existing file handle is passed, it will be seeked and the
1408 original seek position will NOT be restored.
1412 original seek position will NOT be restored.
1409
1413
1410 Returns a str or buffer of raw byte data.
1414 Returns a str or buffer of raw byte data.
1411
1415
1412 Raises if the requested number of bytes could not be read.
1416 Raises if the requested number of bytes could not be read.
1413 """
1417 """
1414 # Cache data both forward and backward around the requested
1418 # Cache data both forward and backward around the requested
1415 # data, in a fixed size window. This helps speed up operations
1419 # data, in a fixed size window. This helps speed up operations
1416 # involving reading the revlog backwards.
1420 # involving reading the revlog backwards.
1417 cachesize = self._chunkcachesize
1421 cachesize = self._chunkcachesize
1418 realoffset = offset & ~(cachesize - 1)
1422 realoffset = offset & ~(cachesize - 1)
1419 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1423 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1420 - realoffset)
1424 - realoffset)
1421 with self._datareadfp(df) as df:
1425 with self._datareadfp(df) as df:
1422 df.seek(realoffset)
1426 df.seek(realoffset)
1423 d = df.read(reallength)
1427 d = df.read(reallength)
1424
1428
1425 self._cachesegment(realoffset, d)
1429 self._cachesegment(realoffset, d)
1426 if offset != realoffset or reallength != length:
1430 if offset != realoffset or reallength != length:
1427 startoffset = offset - realoffset
1431 startoffset = offset - realoffset
1428 if len(d) - startoffset < length:
1432 if len(d) - startoffset < length:
1429 raise error.RevlogError(
1433 raise error.RevlogError(
1430 _('partial read of revlog %s; expected %d bytes from '
1434 _('partial read of revlog %s; expected %d bytes from '
1431 'offset %d, got %d') %
1435 'offset %d, got %d') %
1432 (self.indexfile if self._inline else self.datafile,
1436 (self.indexfile if self._inline else self.datafile,
1433 length, realoffset, len(d) - startoffset))
1437 length, realoffset, len(d) - startoffset))
1434
1438
1435 return util.buffer(d, startoffset, length)
1439 return util.buffer(d, startoffset, length)
1436
1440
1437 if len(d) < length:
1441 if len(d) < length:
1438 raise error.RevlogError(
1442 raise error.RevlogError(
1439 _('partial read of revlog %s; expected %d bytes from offset '
1443 _('partial read of revlog %s; expected %d bytes from offset '
1440 '%d, got %d') %
1444 '%d, got %d') %
1441 (self.indexfile if self._inline else self.datafile,
1445 (self.indexfile if self._inline else self.datafile,
1442 length, offset, len(d)))
1446 length, offset, len(d)))
1443
1447
1444 return d
1448 return d
1445
1449
1446 def _getsegment(self, offset, length, df=None):
1450 def _getsegment(self, offset, length, df=None):
1447 """Obtain a segment of raw data from the revlog.
1451 """Obtain a segment of raw data from the revlog.
1448
1452
1449 Accepts an absolute offset, length of bytes to obtain, and an
1453 Accepts an absolute offset, length of bytes to obtain, and an
1450 optional file handle to the already-opened revlog. If the file
1454 optional file handle to the already-opened revlog. If the file
1451 handle is used, it's original seek position will not be preserved.
1455 handle is used, it's original seek position will not be preserved.
1452
1456
1453 Requests for data may be returned from a cache.
1457 Requests for data may be returned from a cache.
1454
1458
1455 Returns a str or a buffer instance of raw byte data.
1459 Returns a str or a buffer instance of raw byte data.
1456 """
1460 """
1457 o, d = self._chunkcache
1461 o, d = self._chunkcache
1458 l = len(d)
1462 l = len(d)
1459
1463
1460 # is it in the cache?
1464 # is it in the cache?
1461 cachestart = offset - o
1465 cachestart = offset - o
1462 cacheend = cachestart + length
1466 cacheend = cachestart + length
1463 if cachestart >= 0 and cacheend <= l:
1467 if cachestart >= 0 and cacheend <= l:
1464 if cachestart == 0 and cacheend == l:
1468 if cachestart == 0 and cacheend == l:
1465 return d # avoid a copy
1469 return d # avoid a copy
1466 return util.buffer(d, cachestart, cacheend - cachestart)
1470 return util.buffer(d, cachestart, cacheend - cachestart)
1467
1471
1468 return self._readsegment(offset, length, df=df)
1472 return self._readsegment(offset, length, df=df)
1469
1473
1470 def _getsegmentforrevs(self, startrev, endrev, df=None):
1474 def _getsegmentforrevs(self, startrev, endrev, df=None):
1471 """Obtain a segment of raw data corresponding to a range of revisions.
1475 """Obtain a segment of raw data corresponding to a range of revisions.
1472
1476
1473 Accepts the start and end revisions and an optional already-open
1477 Accepts the start and end revisions and an optional already-open
1474 file handle to be used for reading. If the file handle is read, its
1478 file handle to be used for reading. If the file handle is read, its
1475 seek position will not be preserved.
1479 seek position will not be preserved.
1476
1480
1477 Requests for data may be satisfied by a cache.
1481 Requests for data may be satisfied by a cache.
1478
1482
1479 Returns a 2-tuple of (offset, data) for the requested range of
1483 Returns a 2-tuple of (offset, data) for the requested range of
1480 revisions. Offset is the integer offset from the beginning of the
1484 revisions. Offset is the integer offset from the beginning of the
1481 revlog and data is a str or buffer of the raw byte data.
1485 revlog and data is a str or buffer of the raw byte data.
1482
1486
1483 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1487 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1484 to determine where each revision's data begins and ends.
1488 to determine where each revision's data begins and ends.
1485 """
1489 """
1486 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1490 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1487 # (functions are expensive).
1491 # (functions are expensive).
1488 index = self.index
1492 index = self.index
1489 istart = index[startrev]
1493 istart = index[startrev]
1490 start = int(istart[0] >> 16)
1494 start = int(istart[0] >> 16)
1491 if startrev == endrev:
1495 if startrev == endrev:
1492 end = start + istart[1]
1496 end = start + istart[1]
1493 else:
1497 else:
1494 iend = index[endrev]
1498 iend = index[endrev]
1495 end = int(iend[0] >> 16) + iend[1]
1499 end = int(iend[0] >> 16) + iend[1]
1496
1500
1497 if self._inline:
1501 if self._inline:
1498 start += (startrev + 1) * self._io.size
1502 start += (startrev + 1) * self._io.size
1499 end += (endrev + 1) * self._io.size
1503 end += (endrev + 1) * self._io.size
1500 length = end - start
1504 length = end - start
1501
1505
1502 return start, self._getsegment(start, length, df=df)
1506 return start, self._getsegment(start, length, df=df)
1503
1507
1504 def _chunk(self, rev, df=None):
1508 def _chunk(self, rev, df=None):
1505 """Obtain a single decompressed chunk for a revision.
1509 """Obtain a single decompressed chunk for a revision.
1506
1510
1507 Accepts an integer revision and an optional already-open file handle
1511 Accepts an integer revision and an optional already-open file handle
1508 to be used for reading. If used, the seek position of the file will not
1512 to be used for reading. If used, the seek position of the file will not
1509 be preserved.
1513 be preserved.
1510
1514
1511 Returns a str holding uncompressed data for the requested revision.
1515 Returns a str holding uncompressed data for the requested revision.
1512 """
1516 """
1513 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1517 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1514
1518
1515 def _chunks(self, revs, df=None, targetsize=None):
1519 def _chunks(self, revs, df=None, targetsize=None):
1516 """Obtain decompressed chunks for the specified revisions.
1520 """Obtain decompressed chunks for the specified revisions.
1517
1521
1518 Accepts an iterable of numeric revisions that are assumed to be in
1522 Accepts an iterable of numeric revisions that are assumed to be in
1519 ascending order. Also accepts an optional already-open file handle
1523 ascending order. Also accepts an optional already-open file handle
1520 to be used for reading. If used, the seek position of the file will
1524 to be used for reading. If used, the seek position of the file will
1521 not be preserved.
1525 not be preserved.
1522
1526
1523 This function is similar to calling ``self._chunk()`` multiple times,
1527 This function is similar to calling ``self._chunk()`` multiple times,
1524 but is faster.
1528 but is faster.
1525
1529
1526 Returns a list with decompressed data for each requested revision.
1530 Returns a list with decompressed data for each requested revision.
1527 """
1531 """
1528 if not revs:
1532 if not revs:
1529 return []
1533 return []
1530 start = self.start
1534 start = self.start
1531 length = self.length
1535 length = self.length
1532 inline = self._inline
1536 inline = self._inline
1533 iosize = self._io.size
1537 iosize = self._io.size
1534 buffer = util.buffer
1538 buffer = util.buffer
1535
1539
1536 l = []
1540 l = []
1537 ladd = l.append
1541 ladd = l.append
1538
1542
1539 if not self._withsparseread:
1543 if not self._withsparseread:
1540 slicedchunks = (revs,)
1544 slicedchunks = (revs,)
1541 else:
1545 else:
1542 slicedchunks = deltautil.slicechunk(self, revs,
1546 slicedchunks = deltautil.slicechunk(self, revs,
1543 targetsize=targetsize)
1547 targetsize=targetsize)
1544
1548
1545 for revschunk in slicedchunks:
1549 for revschunk in slicedchunks:
1546 firstrev = revschunk[0]
1550 firstrev = revschunk[0]
1547 # Skip trailing revisions with empty diff
1551 # Skip trailing revisions with empty diff
1548 for lastrev in revschunk[::-1]:
1552 for lastrev in revschunk[::-1]:
1549 if length(lastrev) != 0:
1553 if length(lastrev) != 0:
1550 break
1554 break
1551
1555
1552 try:
1556 try:
1553 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1557 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1554 except OverflowError:
1558 except OverflowError:
1555 # issue4215 - we can't cache a run of chunks greater than
1559 # issue4215 - we can't cache a run of chunks greater than
1556 # 2G on Windows
1560 # 2G on Windows
1557 return [self._chunk(rev, df=df) for rev in revschunk]
1561 return [self._chunk(rev, df=df) for rev in revschunk]
1558
1562
1559 decomp = self.decompress
1563 decomp = self.decompress
1560 for rev in revschunk:
1564 for rev in revschunk:
1561 chunkstart = start(rev)
1565 chunkstart = start(rev)
1562 if inline:
1566 if inline:
1563 chunkstart += (rev + 1) * iosize
1567 chunkstart += (rev + 1) * iosize
1564 chunklength = length(rev)
1568 chunklength = length(rev)
1565 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1569 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1566
1570
1567 return l
1571 return l
1568
1572
1569 def _chunkclear(self):
1573 def _chunkclear(self):
1570 """Clear the raw chunk cache."""
1574 """Clear the raw chunk cache."""
1571 self._chunkcache = (0, '')
1575 self._chunkcache = (0, '')
1572
1576
1573 def deltaparent(self, rev):
1577 def deltaparent(self, rev):
1574 """return deltaparent of the given revision"""
1578 """return deltaparent of the given revision"""
1575 base = self.index[rev][3]
1579 base = self.index[rev][3]
1576 if base == rev:
1580 if base == rev:
1577 return nullrev
1581 return nullrev
1578 elif self._generaldelta:
1582 elif self._generaldelta:
1579 return base
1583 return base
1580 else:
1584 else:
1581 return rev - 1
1585 return rev - 1
1582
1586
1583 def issnapshot(self, rev):
1587 def issnapshot(self, rev):
1584 """tells whether rev is a snapshot
1588 """tells whether rev is a snapshot
1585 """
1589 """
1586 if not self._sparserevlog:
1590 if not self._sparserevlog:
1587 return self.deltaparent(rev) == nullrev
1591 return self.deltaparent(rev) == nullrev
1588 elif util.safehasattr(self.index, 'issnapshot'):
1592 elif util.safehasattr(self.index, 'issnapshot'):
1589 # directly assign the method to cache the testing and access
1593 # directly assign the method to cache the testing and access
1590 self.issnapshot = self.index.issnapshot
1594 self.issnapshot = self.index.issnapshot
1591 return self.issnapshot(rev)
1595 return self.issnapshot(rev)
1592 if rev == nullrev:
1596 if rev == nullrev:
1593 return True
1597 return True
1594 entry = self.index[rev]
1598 entry = self.index[rev]
1595 base = entry[3]
1599 base = entry[3]
1596 if base == rev:
1600 if base == rev:
1597 return True
1601 return True
1598 if base == nullrev:
1602 if base == nullrev:
1599 return True
1603 return True
1600 p1 = entry[5]
1604 p1 = entry[5]
1601 p2 = entry[6]
1605 p2 = entry[6]
1602 if base == p1 or base == p2:
1606 if base == p1 or base == p2:
1603 return False
1607 return False
1604 return self.issnapshot(base)
1608 return self.issnapshot(base)
1605
1609
1606 def snapshotdepth(self, rev):
1610 def snapshotdepth(self, rev):
1607 """number of snapshot in the chain before this one"""
1611 """number of snapshot in the chain before this one"""
1608 if not self.issnapshot(rev):
1612 if not self.issnapshot(rev):
1609 raise error.ProgrammingError('revision %d not a snapshot')
1613 raise error.ProgrammingError('revision %d not a snapshot')
1610 return len(self._deltachain(rev)[0]) - 1
1614 return len(self._deltachain(rev)[0]) - 1
1611
1615
1612 def revdiff(self, rev1, rev2):
1616 def revdiff(self, rev1, rev2):
1613 """return or calculate a delta between two revisions
1617 """return or calculate a delta between two revisions
1614
1618
1615 The delta calculated is in binary form and is intended to be written to
1619 The delta calculated is in binary form and is intended to be written to
1616 revlog data directly. So this function needs raw revision data.
1620 revlog data directly. So this function needs raw revision data.
1617 """
1621 """
1618 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1622 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1619 return bytes(self._chunk(rev2))
1623 return bytes(self._chunk(rev2))
1620
1624
1621 return mdiff.textdiff(self.revision(rev1, raw=True),
1625 return mdiff.textdiff(self.revision(rev1, raw=True),
1622 self.revision(rev2, raw=True))
1626 self.revision(rev2, raw=True))
1623
1627
1624 def revision(self, nodeorrev, _df=None, raw=False):
1628 def revision(self, nodeorrev, _df=None, raw=False):
1625 """return an uncompressed revision of a given node or revision
1629 """return an uncompressed revision of a given node or revision
1626 number.
1630 number.
1627
1631
1628 _df - an existing file handle to read from. (internal-only)
1632 _df - an existing file handle to read from. (internal-only)
1629 raw - an optional argument specifying if the revision data is to be
1633 raw - an optional argument specifying if the revision data is to be
1630 treated as raw data when applying flag transforms. 'raw' should be set
1634 treated as raw data when applying flag transforms. 'raw' should be set
1631 to True when generating changegroups or in debug commands.
1635 to True when generating changegroups or in debug commands.
1632 """
1636 """
1633 if isinstance(nodeorrev, int):
1637 if isinstance(nodeorrev, int):
1634 rev = nodeorrev
1638 rev = nodeorrev
1635 node = self.node(rev)
1639 node = self.node(rev)
1636 else:
1640 else:
1637 node = nodeorrev
1641 node = nodeorrev
1638 rev = None
1642 rev = None
1639
1643
1640 cachedrev = None
1644 cachedrev = None
1641 flags = None
1645 flags = None
1642 rawtext = None
1646 rawtext = None
1643 if node == nullid:
1647 if node == nullid:
1644 return ""
1648 return ""
1645 if self._revisioncache:
1649 if self._revisioncache:
1646 if self._revisioncache[0] == node:
1650 if self._revisioncache[0] == node:
1647 # _cache only stores rawtext
1651 # _cache only stores rawtext
1648 if raw:
1652 if raw:
1649 return self._revisioncache[2]
1653 return self._revisioncache[2]
1650 # duplicated, but good for perf
1654 # duplicated, but good for perf
1651 if rev is None:
1655 if rev is None:
1652 rev = self.rev(node)
1656 rev = self.rev(node)
1653 if flags is None:
1657 if flags is None:
1654 flags = self.flags(rev)
1658 flags = self.flags(rev)
1655 # no extra flags set, no flag processor runs, text = rawtext
1659 # no extra flags set, no flag processor runs, text = rawtext
1656 if flags == REVIDX_DEFAULT_FLAGS:
1660 if flags == REVIDX_DEFAULT_FLAGS:
1657 return self._revisioncache[2]
1661 return self._revisioncache[2]
1658 # rawtext is reusable. need to run flag processor
1662 # rawtext is reusable. need to run flag processor
1659 rawtext = self._revisioncache[2]
1663 rawtext = self._revisioncache[2]
1660
1664
1661 cachedrev = self._revisioncache[1]
1665 cachedrev = self._revisioncache[1]
1662
1666
1663 # look up what we need to read
1667 # look up what we need to read
1664 if rawtext is None:
1668 if rawtext is None:
1665 if rev is None:
1669 if rev is None:
1666 rev = self.rev(node)
1670 rev = self.rev(node)
1667
1671
1668 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1672 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1669 if stopped:
1673 if stopped:
1670 rawtext = self._revisioncache[2]
1674 rawtext = self._revisioncache[2]
1671
1675
1672 # drop cache to save memory
1676 # drop cache to save memory
1673 self._revisioncache = None
1677 self._revisioncache = None
1674
1678
1675 targetsize = None
1679 targetsize = None
1676 rawsize = self.index[rev][2]
1680 rawsize = self.index[rev][2]
1677 if 0 <= rawsize:
1681 if 0 <= rawsize:
1678 targetsize = 4 * rawsize
1682 targetsize = 4 * rawsize
1679
1683
1680 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1684 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1681 if rawtext is None:
1685 if rawtext is None:
1682 rawtext = bytes(bins[0])
1686 rawtext = bytes(bins[0])
1683 bins = bins[1:]
1687 bins = bins[1:]
1684
1688
1685 rawtext = mdiff.patches(rawtext, bins)
1689 rawtext = mdiff.patches(rawtext, bins)
1686 self._revisioncache = (node, rev, rawtext)
1690 self._revisioncache = (node, rev, rawtext)
1687
1691
1688 if flags is None:
1692 if flags is None:
1689 if rev is None:
1693 if rev is None:
1690 rev = self.rev(node)
1694 rev = self.rev(node)
1691 flags = self.flags(rev)
1695 flags = self.flags(rev)
1692
1696
1693 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1697 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1694 if validatehash:
1698 if validatehash:
1695 self.checkhash(text, node, rev=rev)
1699 self.checkhash(text, node, rev=rev)
1696
1700
1697 return text
1701 return text
1698
1702
1699 def hash(self, text, p1, p2):
1703 def hash(self, text, p1, p2):
1700 """Compute a node hash.
1704 """Compute a node hash.
1701
1705
1702 Available as a function so that subclasses can replace the hash
1706 Available as a function so that subclasses can replace the hash
1703 as needed.
1707 as needed.
1704 """
1708 """
1705 return storageutil.hashrevisionsha1(text, p1, p2)
1709 return storageutil.hashrevisionsha1(text, p1, p2)
1706
1710
1707 def _processflags(self, text, flags, operation, raw=False):
1711 def _processflags(self, text, flags, operation, raw=False):
1708 """Inspect revision data flags and applies transforms defined by
1712 """Inspect revision data flags and applies transforms defined by
1709 registered flag processors.
1713 registered flag processors.
1710
1714
1711 ``text`` - the revision data to process
1715 ``text`` - the revision data to process
1712 ``flags`` - the revision flags
1716 ``flags`` - the revision flags
1713 ``operation`` - the operation being performed (read or write)
1717 ``operation`` - the operation being performed (read or write)
1714 ``raw`` - an optional argument describing if the raw transform should be
1718 ``raw`` - an optional argument describing if the raw transform should be
1715 applied.
1719 applied.
1716
1720
1717 This method processes the flags in the order (or reverse order if
1721 This method processes the flags in the order (or reverse order if
1718 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1722 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1719 flag processors registered for present flags. The order of flags defined
1723 flag processors registered for present flags. The order of flags defined
1720 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1724 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1721
1725
1722 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1726 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1723 processed text and ``validatehash`` is a bool indicating whether the
1727 processed text and ``validatehash`` is a bool indicating whether the
1724 returned text should be checked for hash integrity.
1728 returned text should be checked for hash integrity.
1725
1729
1726 Note: If the ``raw`` argument is set, it has precedence over the
1730 Note: If the ``raw`` argument is set, it has precedence over the
1727 operation and will only update the value of ``validatehash``.
1731 operation and will only update the value of ``validatehash``.
1728 """
1732 """
1729 # fast path: no flag processors will run
1733 # fast path: no flag processors will run
1730 if flags == 0:
1734 if flags == 0:
1731 return text, True
1735 return text, True
1732 if not operation in ('read', 'write'):
1736 if not operation in ('read', 'write'):
1733 raise error.ProgrammingError(_("invalid '%s' operation") %
1737 raise error.ProgrammingError(_("invalid '%s' operation") %
1734 operation)
1738 operation)
1735 # Check all flags are known.
1739 # Check all flags are known.
1736 if flags & ~REVIDX_KNOWN_FLAGS:
1740 if flags & ~REVIDX_KNOWN_FLAGS:
1737 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1741 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1738 (flags & ~REVIDX_KNOWN_FLAGS))
1742 (flags & ~REVIDX_KNOWN_FLAGS))
1739 validatehash = True
1743 validatehash = True
1740 # Depending on the operation (read or write), the order might be
1744 # Depending on the operation (read or write), the order might be
1741 # reversed due to non-commutative transforms.
1745 # reversed due to non-commutative transforms.
1742 orderedflags = REVIDX_FLAGS_ORDER
1746 orderedflags = REVIDX_FLAGS_ORDER
1743 if operation == 'write':
1747 if operation == 'write':
1744 orderedflags = reversed(orderedflags)
1748 orderedflags = reversed(orderedflags)
1745
1749
1746 for flag in orderedflags:
1750 for flag in orderedflags:
1747 # If a flagprocessor has been registered for a known flag, apply the
1751 # If a flagprocessor has been registered for a known flag, apply the
1748 # related operation transform and update result tuple.
1752 # related operation transform and update result tuple.
1749 if flag & flags:
1753 if flag & flags:
1750 vhash = True
1754 vhash = True
1751
1755
1752 if flag not in self._flagprocessors:
1756 if flag not in self._flagprocessors:
1753 message = _("missing processor for flag '%#x'") % (flag)
1757 message = _("missing processor for flag '%#x'") % (flag)
1754 raise error.RevlogError(message)
1758 raise error.RevlogError(message)
1755
1759
1756 processor = self._flagprocessors[flag]
1760 processor = self._flagprocessors[flag]
1757 if processor is not None:
1761 if processor is not None:
1758 readtransform, writetransform, rawtransform = processor
1762 readtransform, writetransform, rawtransform = processor
1759
1763
1760 if raw:
1764 if raw:
1761 vhash = rawtransform(self, text)
1765 vhash = rawtransform(self, text)
1762 elif operation == 'read':
1766 elif operation == 'read':
1763 text, vhash = readtransform(self, text)
1767 text, vhash = readtransform(self, text)
1764 else: # write operation
1768 else: # write operation
1765 text, vhash = writetransform(self, text)
1769 text, vhash = writetransform(self, text)
1766 validatehash = validatehash and vhash
1770 validatehash = validatehash and vhash
1767
1771
1768 return text, validatehash
1772 return text, validatehash
1769
1773
1770 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1774 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1771 """Check node hash integrity.
1775 """Check node hash integrity.
1772
1776
1773 Available as a function so that subclasses can extend hash mismatch
1777 Available as a function so that subclasses can extend hash mismatch
1774 behaviors as needed.
1778 behaviors as needed.
1775 """
1779 """
1776 try:
1780 try:
1777 if p1 is None and p2 is None:
1781 if p1 is None and p2 is None:
1778 p1, p2 = self.parents(node)
1782 p1, p2 = self.parents(node)
1779 if node != self.hash(text, p1, p2):
1783 if node != self.hash(text, p1, p2):
1780 # Clear the revision cache on hash failure. The revision cache
1784 # Clear the revision cache on hash failure. The revision cache
1781 # only stores the raw revision and clearing the cache does have
1785 # only stores the raw revision and clearing the cache does have
1782 # the side-effect that we won't have a cache hit when the raw
1786 # the side-effect that we won't have a cache hit when the raw
1783 # revision data is accessed. But this case should be rare and
1787 # revision data is accessed. But this case should be rare and
1784 # it is extra work to teach the cache about the hash
1788 # it is extra work to teach the cache about the hash
1785 # verification state.
1789 # verification state.
1786 if self._revisioncache and self._revisioncache[0] == node:
1790 if self._revisioncache and self._revisioncache[0] == node:
1787 self._revisioncache = None
1791 self._revisioncache = None
1788
1792
1789 revornode = rev
1793 revornode = rev
1790 if revornode is None:
1794 if revornode is None:
1791 revornode = templatefilters.short(hex(node))
1795 revornode = templatefilters.short(hex(node))
1792 raise error.RevlogError(_("integrity check failed on %s:%s")
1796 raise error.RevlogError(_("integrity check failed on %s:%s")
1793 % (self.indexfile, pycompat.bytestr(revornode)))
1797 % (self.indexfile, pycompat.bytestr(revornode)))
1794 except error.RevlogError:
1798 except error.RevlogError:
1795 if self._censorable and storageutil.iscensoredtext(text):
1799 if self._censorable and storageutil.iscensoredtext(text):
1796 raise error.CensoredNodeError(self.indexfile, node, text)
1800 raise error.CensoredNodeError(self.indexfile, node, text)
1797 raise
1801 raise
1798
1802
1799 def _enforceinlinesize(self, tr, fp=None):
1803 def _enforceinlinesize(self, tr, fp=None):
1800 """Check if the revlog is too big for inline and convert if so.
1804 """Check if the revlog is too big for inline and convert if so.
1801
1805
1802 This should be called after revisions are added to the revlog. If the
1806 This should be called after revisions are added to the revlog. If the
1803 revlog has grown too large to be an inline revlog, it will convert it
1807 revlog has grown too large to be an inline revlog, it will convert it
1804 to use multiple index and data files.
1808 to use multiple index and data files.
1805 """
1809 """
1806 tiprev = len(self) - 1
1810 tiprev = len(self) - 1
1807 if (not self._inline or
1811 if (not self._inline or
1808 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1812 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1809 return
1813 return
1810
1814
1811 trinfo = tr.find(self.indexfile)
1815 trinfo = tr.find(self.indexfile)
1812 if trinfo is None:
1816 if trinfo is None:
1813 raise error.RevlogError(_("%s not found in the transaction")
1817 raise error.RevlogError(_("%s not found in the transaction")
1814 % self.indexfile)
1818 % self.indexfile)
1815
1819
1816 trindex = trinfo[2]
1820 trindex = trinfo[2]
1817 if trindex is not None:
1821 if trindex is not None:
1818 dataoff = self.start(trindex)
1822 dataoff = self.start(trindex)
1819 else:
1823 else:
1820 # revlog was stripped at start of transaction, use all leftover data
1824 # revlog was stripped at start of transaction, use all leftover data
1821 trindex = len(self) - 1
1825 trindex = len(self) - 1
1822 dataoff = self.end(tiprev)
1826 dataoff = self.end(tiprev)
1823
1827
1824 tr.add(self.datafile, dataoff)
1828 tr.add(self.datafile, dataoff)
1825
1829
1826 if fp:
1830 if fp:
1827 fp.flush()
1831 fp.flush()
1828 fp.close()
1832 fp.close()
1829 # We can't use the cached file handle after close(). So prevent
1833 # We can't use the cached file handle after close(). So prevent
1830 # its usage.
1834 # its usage.
1831 self._writinghandles = None
1835 self._writinghandles = None
1832
1836
1833 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1837 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1834 for r in self:
1838 for r in self:
1835 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1839 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1836
1840
1837 with self._indexfp('w') as fp:
1841 with self._indexfp('w') as fp:
1838 self.version &= ~FLAG_INLINE_DATA
1842 self.version &= ~FLAG_INLINE_DATA
1839 self._inline = False
1843 self._inline = False
1840 io = self._io
1844 io = self._io
1841 for i in self:
1845 for i in self:
1842 e = io.packentry(self.index[i], self.node, self.version, i)
1846 e = io.packentry(self.index[i], self.node, self.version, i)
1843 fp.write(e)
1847 fp.write(e)
1844
1848
1845 # the temp file replace the real index when we exit the context
1849 # the temp file replace the real index when we exit the context
1846 # manager
1850 # manager
1847
1851
1848 tr.replace(self.indexfile, trindex * self._io.size)
1852 tr.replace(self.indexfile, trindex * self._io.size)
1849 self._chunkclear()
1853 self._chunkclear()
1850
1854
1851 def _nodeduplicatecallback(self, transaction, node):
1855 def _nodeduplicatecallback(self, transaction, node):
1852 """called when trying to add a node already stored.
1856 """called when trying to add a node already stored.
1853 """
1857 """
1854
1858
1855 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1859 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1856 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1860 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1857 """add a revision to the log
1861 """add a revision to the log
1858
1862
1859 text - the revision data to add
1863 text - the revision data to add
1860 transaction - the transaction object used for rollback
1864 transaction - the transaction object used for rollback
1861 link - the linkrev data to add
1865 link - the linkrev data to add
1862 p1, p2 - the parent nodeids of the revision
1866 p1, p2 - the parent nodeids of the revision
1863 cachedelta - an optional precomputed delta
1867 cachedelta - an optional precomputed delta
1864 node - nodeid of revision; typically node is not specified, and it is
1868 node - nodeid of revision; typically node is not specified, and it is
1865 computed by default as hash(text, p1, p2), however subclasses might
1869 computed by default as hash(text, p1, p2), however subclasses might
1866 use different hashing method (and override checkhash() in such case)
1870 use different hashing method (and override checkhash() in such case)
1867 flags - the known flags to set on the revision
1871 flags - the known flags to set on the revision
1868 deltacomputer - an optional deltacomputer instance shared between
1872 deltacomputer - an optional deltacomputer instance shared between
1869 multiple calls
1873 multiple calls
1870 """
1874 """
1871 if link == nullrev:
1875 if link == nullrev:
1872 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1876 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1873 % self.indexfile)
1877 % self.indexfile)
1874
1878
1875 if flags:
1879 if flags:
1876 node = node or self.hash(text, p1, p2)
1880 node = node or self.hash(text, p1, p2)
1877
1881
1878 rawtext, validatehash = self._processflags(text, flags, 'write')
1882 rawtext, validatehash = self._processflags(text, flags, 'write')
1879
1883
1880 # If the flag processor modifies the revision data, ignore any provided
1884 # If the flag processor modifies the revision data, ignore any provided
1881 # cachedelta.
1885 # cachedelta.
1882 if rawtext != text:
1886 if rawtext != text:
1883 cachedelta = None
1887 cachedelta = None
1884
1888
1885 if len(rawtext) > _maxentrysize:
1889 if len(rawtext) > _maxentrysize:
1886 raise error.RevlogError(
1890 raise error.RevlogError(
1887 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1891 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1888 % (self.indexfile, len(rawtext)))
1892 % (self.indexfile, len(rawtext)))
1889
1893
1890 node = node or self.hash(rawtext, p1, p2)
1894 node = node or self.hash(rawtext, p1, p2)
1891 if node in self.nodemap:
1895 if node in self.nodemap:
1892 return node
1896 return node
1893
1897
1894 if validatehash:
1898 if validatehash:
1895 self.checkhash(rawtext, node, p1=p1, p2=p2)
1899 self.checkhash(rawtext, node, p1=p1, p2=p2)
1896
1900
1897 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1901 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1898 flags, cachedelta=cachedelta,
1902 flags, cachedelta=cachedelta,
1899 deltacomputer=deltacomputer)
1903 deltacomputer=deltacomputer)
1900
1904
1901 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1905 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1902 cachedelta=None, deltacomputer=None):
1906 cachedelta=None, deltacomputer=None):
1903 """add a raw revision with known flags, node and parents
1907 """add a raw revision with known flags, node and parents
1904 useful when reusing a revision not stored in this revlog (ex: received
1908 useful when reusing a revision not stored in this revlog (ex: received
1905 over wire, or read from an external bundle).
1909 over wire, or read from an external bundle).
1906 """
1910 """
1907 dfh = None
1911 dfh = None
1908 if not self._inline:
1912 if not self._inline:
1909 dfh = self._datafp("a+")
1913 dfh = self._datafp("a+")
1910 ifh = self._indexfp("a+")
1914 ifh = self._indexfp("a+")
1911 try:
1915 try:
1912 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1916 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1913 flags, cachedelta, ifh, dfh,
1917 flags, cachedelta, ifh, dfh,
1914 deltacomputer=deltacomputer)
1918 deltacomputer=deltacomputer)
1915 finally:
1919 finally:
1916 if dfh:
1920 if dfh:
1917 dfh.close()
1921 dfh.close()
1918 ifh.close()
1922 ifh.close()
1919
1923
1920 def compress(self, data):
1924 def compress(self, data):
1921 """Generate a possibly-compressed representation of data."""
1925 """Generate a possibly-compressed representation of data."""
1922 if not data:
1926 if not data:
1923 return '', data
1927 return '', data
1924
1928
1925 compressed = self._compressor.compress(data)
1929 compressed = self._compressor.compress(data)
1926
1930
1927 if compressed:
1931 if compressed:
1928 # The revlog compressor added the header in the returned data.
1932 # The revlog compressor added the header in the returned data.
1929 return '', compressed
1933 return '', compressed
1930
1934
1931 if data[0:1] == '\0':
1935 if data[0:1] == '\0':
1932 return '', data
1936 return '', data
1933 return 'u', data
1937 return 'u', data
1934
1938
1935 def decompress(self, data):
1939 def decompress(self, data):
1936 """Decompress a revlog chunk.
1940 """Decompress a revlog chunk.
1937
1941
1938 The chunk is expected to begin with a header identifying the
1942 The chunk is expected to begin with a header identifying the
1939 format type so it can be routed to an appropriate decompressor.
1943 format type so it can be routed to an appropriate decompressor.
1940 """
1944 """
1941 if not data:
1945 if not data:
1942 return data
1946 return data
1943
1947
1944 # Revlogs are read much more frequently than they are written and many
1948 # Revlogs are read much more frequently than they are written and many
1945 # chunks only take microseconds to decompress, so performance is
1949 # chunks only take microseconds to decompress, so performance is
1946 # important here.
1950 # important here.
1947 #
1951 #
1948 # We can make a few assumptions about revlogs:
1952 # We can make a few assumptions about revlogs:
1949 #
1953 #
1950 # 1) the majority of chunks will be compressed (as opposed to inline
1954 # 1) the majority of chunks will be compressed (as opposed to inline
1951 # raw data).
1955 # raw data).
1952 # 2) decompressing *any* data will likely by at least 10x slower than
1956 # 2) decompressing *any* data will likely by at least 10x slower than
1953 # returning raw inline data.
1957 # returning raw inline data.
1954 # 3) we want to prioritize common and officially supported compression
1958 # 3) we want to prioritize common and officially supported compression
1955 # engines
1959 # engines
1956 #
1960 #
1957 # It follows that we want to optimize for "decompress compressed data
1961 # It follows that we want to optimize for "decompress compressed data
1958 # when encoded with common and officially supported compression engines"
1962 # when encoded with common and officially supported compression engines"
1959 # case over "raw data" and "data encoded by less common or non-official
1963 # case over "raw data" and "data encoded by less common or non-official
1960 # compression engines." That is why we have the inline lookup first
1964 # compression engines." That is why we have the inline lookup first
1961 # followed by the compengines lookup.
1965 # followed by the compengines lookup.
1962 #
1966 #
1963 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1967 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1964 # compressed chunks. And this matters for changelog and manifest reads.
1968 # compressed chunks. And this matters for changelog and manifest reads.
1965 t = data[0:1]
1969 t = data[0:1]
1966
1970
1967 if t == 'x':
1971 if t == 'x':
1968 try:
1972 try:
1969 return _zlibdecompress(data)
1973 return _zlibdecompress(data)
1970 except zlib.error as e:
1974 except zlib.error as e:
1971 raise error.RevlogError(_('revlog decompress error: %s') %
1975 raise error.RevlogError(_('revlog decompress error: %s') %
1972 stringutil.forcebytestr(e))
1976 stringutil.forcebytestr(e))
1973 # '\0' is more common than 'u' so it goes first.
1977 # '\0' is more common than 'u' so it goes first.
1974 elif t == '\0':
1978 elif t == '\0':
1975 return data
1979 return data
1976 elif t == 'u':
1980 elif t == 'u':
1977 return util.buffer(data, 1)
1981 return util.buffer(data, 1)
1978
1982
1979 try:
1983 try:
1980 compressor = self._decompressors[t]
1984 compressor = self._decompressors[t]
1981 except KeyError:
1985 except KeyError:
1982 try:
1986 try:
1983 engine = util.compengines.forrevlogheader(t)
1987 engine = util.compengines.forrevlogheader(t)
1984 compressor = engine.revlogcompressor()
1988 compressor = engine.revlogcompressor(self._compengineopts)
1985 self._decompressors[t] = compressor
1989 self._decompressors[t] = compressor
1986 except KeyError:
1990 except KeyError:
1987 raise error.RevlogError(_('unknown compression type %r') % t)
1991 raise error.RevlogError(_('unknown compression type %r') % t)
1988
1992
1989 return compressor.decompress(data)
1993 return compressor.decompress(data)
1990
1994
1991 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1995 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1992 cachedelta, ifh, dfh, alwayscache=False,
1996 cachedelta, ifh, dfh, alwayscache=False,
1993 deltacomputer=None):
1997 deltacomputer=None):
1994 """internal function to add revisions to the log
1998 """internal function to add revisions to the log
1995
1999
1996 see addrevision for argument descriptions.
2000 see addrevision for argument descriptions.
1997
2001
1998 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2002 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1999
2003
2000 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2004 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2001 be used.
2005 be used.
2002
2006
2003 invariants:
2007 invariants:
2004 - rawtext is optional (can be None); if not set, cachedelta must be set.
2008 - rawtext is optional (can be None); if not set, cachedelta must be set.
2005 if both are set, they must correspond to each other.
2009 if both are set, they must correspond to each other.
2006 """
2010 """
2007 if node == nullid:
2011 if node == nullid:
2008 raise error.RevlogError(_("%s: attempt to add null revision") %
2012 raise error.RevlogError(_("%s: attempt to add null revision") %
2009 self.indexfile)
2013 self.indexfile)
2010 if node == wdirid or node in wdirfilenodeids:
2014 if node == wdirid or node in wdirfilenodeids:
2011 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2015 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2012 self.indexfile)
2016 self.indexfile)
2013
2017
2014 if self._inline:
2018 if self._inline:
2015 fh = ifh
2019 fh = ifh
2016 else:
2020 else:
2017 fh = dfh
2021 fh = dfh
2018
2022
2019 btext = [rawtext]
2023 btext = [rawtext]
2020
2024
2021 curr = len(self)
2025 curr = len(self)
2022 prev = curr - 1
2026 prev = curr - 1
2023 offset = self.end(prev)
2027 offset = self.end(prev)
2024 p1r, p2r = self.rev(p1), self.rev(p2)
2028 p1r, p2r = self.rev(p1), self.rev(p2)
2025
2029
2026 # full versions are inserted when the needed deltas
2030 # full versions are inserted when the needed deltas
2027 # become comparable to the uncompressed text
2031 # become comparable to the uncompressed text
2028 if rawtext is None:
2032 if rawtext is None:
2029 # need rawtext size, before changed by flag processors, which is
2033 # need rawtext size, before changed by flag processors, which is
2030 # the non-raw size. use revlog explicitly to avoid filelog's extra
2034 # the non-raw size. use revlog explicitly to avoid filelog's extra
2031 # logic that might remove metadata size.
2035 # logic that might remove metadata size.
2032 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2036 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2033 cachedelta[1])
2037 cachedelta[1])
2034 else:
2038 else:
2035 textlen = len(rawtext)
2039 textlen = len(rawtext)
2036
2040
2037 if deltacomputer is None:
2041 if deltacomputer is None:
2038 deltacomputer = deltautil.deltacomputer(self)
2042 deltacomputer = deltautil.deltacomputer(self)
2039
2043
2040 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2044 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2041
2045
2042 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2046 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2043
2047
2044 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2048 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2045 deltainfo.base, link, p1r, p2r, node)
2049 deltainfo.base, link, p1r, p2r, node)
2046 self.index.append(e)
2050 self.index.append(e)
2047 self.nodemap[node] = curr
2051 self.nodemap[node] = curr
2048
2052
2049 # Reset the pure node cache start lookup offset to account for new
2053 # Reset the pure node cache start lookup offset to account for new
2050 # revision.
2054 # revision.
2051 if self._nodepos is not None:
2055 if self._nodepos is not None:
2052 self._nodepos = curr
2056 self._nodepos = curr
2053
2057
2054 entry = self._io.packentry(e, self.node, self.version, curr)
2058 entry = self._io.packentry(e, self.node, self.version, curr)
2055 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2059 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2056 link, offset)
2060 link, offset)
2057
2061
2058 rawtext = btext[0]
2062 rawtext = btext[0]
2059
2063
2060 if alwayscache and rawtext is None:
2064 if alwayscache and rawtext is None:
2061 rawtext = deltacomputer.buildtext(revinfo, fh)
2065 rawtext = deltacomputer.buildtext(revinfo, fh)
2062
2066
2063 if type(rawtext) == bytes: # only accept immutable objects
2067 if type(rawtext) == bytes: # only accept immutable objects
2064 self._revisioncache = (node, curr, rawtext)
2068 self._revisioncache = (node, curr, rawtext)
2065 self._chainbasecache[curr] = deltainfo.chainbase
2069 self._chainbasecache[curr] = deltainfo.chainbase
2066 return node
2070 return node
2067
2071
2068 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2072 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2069 # Files opened in a+ mode have inconsistent behavior on various
2073 # Files opened in a+ mode have inconsistent behavior on various
2070 # platforms. Windows requires that a file positioning call be made
2074 # platforms. Windows requires that a file positioning call be made
2071 # when the file handle transitions between reads and writes. See
2075 # when the file handle transitions between reads and writes. See
2072 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2076 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2073 # platforms, Python or the platform itself can be buggy. Some versions
2077 # platforms, Python or the platform itself can be buggy. Some versions
2074 # of Solaris have been observed to not append at the end of the file
2078 # of Solaris have been observed to not append at the end of the file
2075 # if the file was seeked to before the end. See issue4943 for more.
2079 # if the file was seeked to before the end. See issue4943 for more.
2076 #
2080 #
2077 # We work around this issue by inserting a seek() before writing.
2081 # We work around this issue by inserting a seek() before writing.
2078 # Note: This is likely not necessary on Python 3. However, because
2082 # Note: This is likely not necessary on Python 3. However, because
2079 # the file handle is reused for reads and may be seeked there, we need
2083 # the file handle is reused for reads and may be seeked there, we need
2080 # to be careful before changing this.
2084 # to be careful before changing this.
2081 ifh.seek(0, os.SEEK_END)
2085 ifh.seek(0, os.SEEK_END)
2082 if dfh:
2086 if dfh:
2083 dfh.seek(0, os.SEEK_END)
2087 dfh.seek(0, os.SEEK_END)
2084
2088
2085 curr = len(self) - 1
2089 curr = len(self) - 1
2086 if not self._inline:
2090 if not self._inline:
2087 transaction.add(self.datafile, offset)
2091 transaction.add(self.datafile, offset)
2088 transaction.add(self.indexfile, curr * len(entry))
2092 transaction.add(self.indexfile, curr * len(entry))
2089 if data[0]:
2093 if data[0]:
2090 dfh.write(data[0])
2094 dfh.write(data[0])
2091 dfh.write(data[1])
2095 dfh.write(data[1])
2092 ifh.write(entry)
2096 ifh.write(entry)
2093 else:
2097 else:
2094 offset += curr * self._io.size
2098 offset += curr * self._io.size
2095 transaction.add(self.indexfile, offset, curr)
2099 transaction.add(self.indexfile, offset, curr)
2096 ifh.write(entry)
2100 ifh.write(entry)
2097 ifh.write(data[0])
2101 ifh.write(data[0])
2098 ifh.write(data[1])
2102 ifh.write(data[1])
2099 self._enforceinlinesize(transaction, ifh)
2103 self._enforceinlinesize(transaction, ifh)
2100
2104
2101 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2105 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2102 """
2106 """
2103 add a delta group
2107 add a delta group
2104
2108
2105 given a set of deltas, add them to the revision log. the
2109 given a set of deltas, add them to the revision log. the
2106 first delta is against its parent, which should be in our
2110 first delta is against its parent, which should be in our
2107 log, the rest are against the previous delta.
2111 log, the rest are against the previous delta.
2108
2112
2109 If ``addrevisioncb`` is defined, it will be called with arguments of
2113 If ``addrevisioncb`` is defined, it will be called with arguments of
2110 this revlog and the node that was added.
2114 this revlog and the node that was added.
2111 """
2115 """
2112
2116
2113 if self._writinghandles:
2117 if self._writinghandles:
2114 raise error.ProgrammingError('cannot nest addgroup() calls')
2118 raise error.ProgrammingError('cannot nest addgroup() calls')
2115
2119
2116 nodes = []
2120 nodes = []
2117
2121
2118 r = len(self)
2122 r = len(self)
2119 end = 0
2123 end = 0
2120 if r:
2124 if r:
2121 end = self.end(r - 1)
2125 end = self.end(r - 1)
2122 ifh = self._indexfp("a+")
2126 ifh = self._indexfp("a+")
2123 isize = r * self._io.size
2127 isize = r * self._io.size
2124 if self._inline:
2128 if self._inline:
2125 transaction.add(self.indexfile, end + isize, r)
2129 transaction.add(self.indexfile, end + isize, r)
2126 dfh = None
2130 dfh = None
2127 else:
2131 else:
2128 transaction.add(self.indexfile, isize, r)
2132 transaction.add(self.indexfile, isize, r)
2129 transaction.add(self.datafile, end)
2133 transaction.add(self.datafile, end)
2130 dfh = self._datafp("a+")
2134 dfh = self._datafp("a+")
2131 def flush():
2135 def flush():
2132 if dfh:
2136 if dfh:
2133 dfh.flush()
2137 dfh.flush()
2134 ifh.flush()
2138 ifh.flush()
2135
2139
2136 self._writinghandles = (ifh, dfh)
2140 self._writinghandles = (ifh, dfh)
2137
2141
2138 try:
2142 try:
2139 deltacomputer = deltautil.deltacomputer(self)
2143 deltacomputer = deltautil.deltacomputer(self)
2140 # loop through our set of deltas
2144 # loop through our set of deltas
2141 for data in deltas:
2145 for data in deltas:
2142 node, p1, p2, linknode, deltabase, delta, flags = data
2146 node, p1, p2, linknode, deltabase, delta, flags = data
2143 link = linkmapper(linknode)
2147 link = linkmapper(linknode)
2144 flags = flags or REVIDX_DEFAULT_FLAGS
2148 flags = flags or REVIDX_DEFAULT_FLAGS
2145
2149
2146 nodes.append(node)
2150 nodes.append(node)
2147
2151
2148 if node in self.nodemap:
2152 if node in self.nodemap:
2149 self._nodeduplicatecallback(transaction, node)
2153 self._nodeduplicatecallback(transaction, node)
2150 # this can happen if two branches make the same change
2154 # this can happen if two branches make the same change
2151 continue
2155 continue
2152
2156
2153 for p in (p1, p2):
2157 for p in (p1, p2):
2154 if p not in self.nodemap:
2158 if p not in self.nodemap:
2155 raise error.LookupError(p, self.indexfile,
2159 raise error.LookupError(p, self.indexfile,
2156 _('unknown parent'))
2160 _('unknown parent'))
2157
2161
2158 if deltabase not in self.nodemap:
2162 if deltabase not in self.nodemap:
2159 raise error.LookupError(deltabase, self.indexfile,
2163 raise error.LookupError(deltabase, self.indexfile,
2160 _('unknown delta base'))
2164 _('unknown delta base'))
2161
2165
2162 baserev = self.rev(deltabase)
2166 baserev = self.rev(deltabase)
2163
2167
2164 if baserev != nullrev and self.iscensored(baserev):
2168 if baserev != nullrev and self.iscensored(baserev):
2165 # if base is censored, delta must be full replacement in a
2169 # if base is censored, delta must be full replacement in a
2166 # single patch operation
2170 # single patch operation
2167 hlen = struct.calcsize(">lll")
2171 hlen = struct.calcsize(">lll")
2168 oldlen = self.rawsize(baserev)
2172 oldlen = self.rawsize(baserev)
2169 newlen = len(delta) - hlen
2173 newlen = len(delta) - hlen
2170 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2174 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2171 raise error.CensoredBaseError(self.indexfile,
2175 raise error.CensoredBaseError(self.indexfile,
2172 self.node(baserev))
2176 self.node(baserev))
2173
2177
2174 if not flags and self._peek_iscensored(baserev, delta, flush):
2178 if not flags and self._peek_iscensored(baserev, delta, flush):
2175 flags |= REVIDX_ISCENSORED
2179 flags |= REVIDX_ISCENSORED
2176
2180
2177 # We assume consumers of addrevisioncb will want to retrieve
2181 # We assume consumers of addrevisioncb will want to retrieve
2178 # the added revision, which will require a call to
2182 # the added revision, which will require a call to
2179 # revision(). revision() will fast path if there is a cache
2183 # revision(). revision() will fast path if there is a cache
2180 # hit. So, we tell _addrevision() to always cache in this case.
2184 # hit. So, we tell _addrevision() to always cache in this case.
2181 # We're only using addgroup() in the context of changegroup
2185 # We're only using addgroup() in the context of changegroup
2182 # generation so the revision data can always be handled as raw
2186 # generation so the revision data can always be handled as raw
2183 # by the flagprocessor.
2187 # by the flagprocessor.
2184 self._addrevision(node, None, transaction, link,
2188 self._addrevision(node, None, transaction, link,
2185 p1, p2, flags, (baserev, delta),
2189 p1, p2, flags, (baserev, delta),
2186 ifh, dfh,
2190 ifh, dfh,
2187 alwayscache=bool(addrevisioncb),
2191 alwayscache=bool(addrevisioncb),
2188 deltacomputer=deltacomputer)
2192 deltacomputer=deltacomputer)
2189
2193
2190 if addrevisioncb:
2194 if addrevisioncb:
2191 addrevisioncb(self, node)
2195 addrevisioncb(self, node)
2192
2196
2193 if not dfh and not self._inline:
2197 if not dfh and not self._inline:
2194 # addrevision switched from inline to conventional
2198 # addrevision switched from inline to conventional
2195 # reopen the index
2199 # reopen the index
2196 ifh.close()
2200 ifh.close()
2197 dfh = self._datafp("a+")
2201 dfh = self._datafp("a+")
2198 ifh = self._indexfp("a+")
2202 ifh = self._indexfp("a+")
2199 self._writinghandles = (ifh, dfh)
2203 self._writinghandles = (ifh, dfh)
2200 finally:
2204 finally:
2201 self._writinghandles = None
2205 self._writinghandles = None
2202
2206
2203 if dfh:
2207 if dfh:
2204 dfh.close()
2208 dfh.close()
2205 ifh.close()
2209 ifh.close()
2206
2210
2207 return nodes
2211 return nodes
2208
2212
2209 def iscensored(self, rev):
2213 def iscensored(self, rev):
2210 """Check if a file revision is censored."""
2214 """Check if a file revision is censored."""
2211 if not self._censorable:
2215 if not self._censorable:
2212 return False
2216 return False
2213
2217
2214 return self.flags(rev) & REVIDX_ISCENSORED
2218 return self.flags(rev) & REVIDX_ISCENSORED
2215
2219
2216 def _peek_iscensored(self, baserev, delta, flush):
2220 def _peek_iscensored(self, baserev, delta, flush):
2217 """Quickly check if a delta produces a censored revision."""
2221 """Quickly check if a delta produces a censored revision."""
2218 if not self._censorable:
2222 if not self._censorable:
2219 return False
2223 return False
2220
2224
2221 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2225 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2222
2226
2223 def getstrippoint(self, minlink):
2227 def getstrippoint(self, minlink):
2224 """find the minimum rev that must be stripped to strip the linkrev
2228 """find the minimum rev that must be stripped to strip the linkrev
2225
2229
2226 Returns a tuple containing the minimum rev and a set of all revs that
2230 Returns a tuple containing the minimum rev and a set of all revs that
2227 have linkrevs that will be broken by this strip.
2231 have linkrevs that will be broken by this strip.
2228 """
2232 """
2229 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2233 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2230 self.headrevs(),
2234 self.headrevs(),
2231 self.linkrev, self.parentrevs)
2235 self.linkrev, self.parentrevs)
2232
2236
2233 def strip(self, minlink, transaction):
2237 def strip(self, minlink, transaction):
2234 """truncate the revlog on the first revision with a linkrev >= minlink
2238 """truncate the revlog on the first revision with a linkrev >= minlink
2235
2239
2236 This function is called when we're stripping revision minlink and
2240 This function is called when we're stripping revision minlink and
2237 its descendants from the repository.
2241 its descendants from the repository.
2238
2242
2239 We have to remove all revisions with linkrev >= minlink, because
2243 We have to remove all revisions with linkrev >= minlink, because
2240 the equivalent changelog revisions will be renumbered after the
2244 the equivalent changelog revisions will be renumbered after the
2241 strip.
2245 strip.
2242
2246
2243 So we truncate the revlog on the first of these revisions, and
2247 So we truncate the revlog on the first of these revisions, and
2244 trust that the caller has saved the revisions that shouldn't be
2248 trust that the caller has saved the revisions that shouldn't be
2245 removed and that it'll re-add them after this truncation.
2249 removed and that it'll re-add them after this truncation.
2246 """
2250 """
2247 if len(self) == 0:
2251 if len(self) == 0:
2248 return
2252 return
2249
2253
2250 rev, _ = self.getstrippoint(minlink)
2254 rev, _ = self.getstrippoint(minlink)
2251 if rev == len(self):
2255 if rev == len(self):
2252 return
2256 return
2253
2257
2254 # first truncate the files on disk
2258 # first truncate the files on disk
2255 end = self.start(rev)
2259 end = self.start(rev)
2256 if not self._inline:
2260 if not self._inline:
2257 transaction.add(self.datafile, end)
2261 transaction.add(self.datafile, end)
2258 end = rev * self._io.size
2262 end = rev * self._io.size
2259 else:
2263 else:
2260 end += rev * self._io.size
2264 end += rev * self._io.size
2261
2265
2262 transaction.add(self.indexfile, end)
2266 transaction.add(self.indexfile, end)
2263
2267
2264 # then reset internal state in memory to forget those revisions
2268 # then reset internal state in memory to forget those revisions
2265 self._revisioncache = None
2269 self._revisioncache = None
2266 self._chaininfocache = {}
2270 self._chaininfocache = {}
2267 self._chunkclear()
2271 self._chunkclear()
2268 for x in pycompat.xrange(rev, len(self)):
2272 for x in pycompat.xrange(rev, len(self)):
2269 del self.nodemap[self.node(x)]
2273 del self.nodemap[self.node(x)]
2270
2274
2271 del self.index[rev:-1]
2275 del self.index[rev:-1]
2272 self._nodepos = None
2276 self._nodepos = None
2273
2277
2274 def checksize(self):
2278 def checksize(self):
2275 """Check size of index and data files
2279 """Check size of index and data files
2276
2280
2277 return a (dd, di) tuple.
2281 return a (dd, di) tuple.
2278 - dd: extra bytes for the "data" file
2282 - dd: extra bytes for the "data" file
2279 - di: extra bytes for the "index" file
2283 - di: extra bytes for the "index" file
2280
2284
2281 A healthy revlog will return (0, 0).
2285 A healthy revlog will return (0, 0).
2282 """
2286 """
2283 expected = 0
2287 expected = 0
2284 if len(self):
2288 if len(self):
2285 expected = max(0, self.end(len(self) - 1))
2289 expected = max(0, self.end(len(self) - 1))
2286
2290
2287 try:
2291 try:
2288 with self._datafp() as f:
2292 with self._datafp() as f:
2289 f.seek(0, 2)
2293 f.seek(0, 2)
2290 actual = f.tell()
2294 actual = f.tell()
2291 dd = actual - expected
2295 dd = actual - expected
2292 except IOError as inst:
2296 except IOError as inst:
2293 if inst.errno != errno.ENOENT:
2297 if inst.errno != errno.ENOENT:
2294 raise
2298 raise
2295 dd = 0
2299 dd = 0
2296
2300
2297 try:
2301 try:
2298 f = self.opener(self.indexfile)
2302 f = self.opener(self.indexfile)
2299 f.seek(0, 2)
2303 f.seek(0, 2)
2300 actual = f.tell()
2304 actual = f.tell()
2301 f.close()
2305 f.close()
2302 s = self._io.size
2306 s = self._io.size
2303 i = max(0, actual // s)
2307 i = max(0, actual // s)
2304 di = actual - (i * s)
2308 di = actual - (i * s)
2305 if self._inline:
2309 if self._inline:
2306 databytes = 0
2310 databytes = 0
2307 for r in self:
2311 for r in self:
2308 databytes += max(0, self.length(r))
2312 databytes += max(0, self.length(r))
2309 dd = 0
2313 dd = 0
2310 di = actual - len(self) * s - databytes
2314 di = actual - len(self) * s - databytes
2311 except IOError as inst:
2315 except IOError as inst:
2312 if inst.errno != errno.ENOENT:
2316 if inst.errno != errno.ENOENT:
2313 raise
2317 raise
2314 di = 0
2318 di = 0
2315
2319
2316 return (dd, di)
2320 return (dd, di)
2317
2321
2318 def files(self):
2322 def files(self):
2319 res = [self.indexfile]
2323 res = [self.indexfile]
2320 if not self._inline:
2324 if not self._inline:
2321 res.append(self.datafile)
2325 res.append(self.datafile)
2322 return res
2326 return res
2323
2327
2324 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2328 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2325 assumehaveparentrevisions=False,
2329 assumehaveparentrevisions=False,
2326 deltamode=repository.CG_DELTAMODE_STD):
2330 deltamode=repository.CG_DELTAMODE_STD):
2327 if nodesorder not in ('nodes', 'storage', 'linear', None):
2331 if nodesorder not in ('nodes', 'storage', 'linear', None):
2328 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2332 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2329 nodesorder)
2333 nodesorder)
2330
2334
2331 if nodesorder is None and not self._generaldelta:
2335 if nodesorder is None and not self._generaldelta:
2332 nodesorder = 'storage'
2336 nodesorder = 'storage'
2333
2337
2334 if (not self._storedeltachains and
2338 if (not self._storedeltachains and
2335 deltamode != repository.CG_DELTAMODE_PREV):
2339 deltamode != repository.CG_DELTAMODE_PREV):
2336 deltamode = repository.CG_DELTAMODE_FULL
2340 deltamode = repository.CG_DELTAMODE_FULL
2337
2341
2338 return storageutil.emitrevisions(
2342 return storageutil.emitrevisions(
2339 self, nodes, nodesorder, revlogrevisiondelta,
2343 self, nodes, nodesorder, revlogrevisiondelta,
2340 deltaparentfn=self.deltaparent,
2344 deltaparentfn=self.deltaparent,
2341 candeltafn=self.candelta,
2345 candeltafn=self.candelta,
2342 rawsizefn=self.rawsize,
2346 rawsizefn=self.rawsize,
2343 revdifffn=self.revdiff,
2347 revdifffn=self.revdiff,
2344 flagsfn=self.flags,
2348 flagsfn=self.flags,
2345 deltamode=deltamode,
2349 deltamode=deltamode,
2346 revisiondata=revisiondata,
2350 revisiondata=revisiondata,
2347 assumehaveparentrevisions=assumehaveparentrevisions)
2351 assumehaveparentrevisions=assumehaveparentrevisions)
2348
2352
2349 DELTAREUSEALWAYS = 'always'
2353 DELTAREUSEALWAYS = 'always'
2350 DELTAREUSESAMEREVS = 'samerevs'
2354 DELTAREUSESAMEREVS = 'samerevs'
2351 DELTAREUSENEVER = 'never'
2355 DELTAREUSENEVER = 'never'
2352
2356
2353 DELTAREUSEFULLADD = 'fulladd'
2357 DELTAREUSEFULLADD = 'fulladd'
2354
2358
2355 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2359 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2356
2360
2357 def clone(self, tr, destrevlog, addrevisioncb=None,
2361 def clone(self, tr, destrevlog, addrevisioncb=None,
2358 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2362 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2359 """Copy this revlog to another, possibly with format changes.
2363 """Copy this revlog to another, possibly with format changes.
2360
2364
2361 The destination revlog will contain the same revisions and nodes.
2365 The destination revlog will contain the same revisions and nodes.
2362 However, it may not be bit-for-bit identical due to e.g. delta encoding
2366 However, it may not be bit-for-bit identical due to e.g. delta encoding
2363 differences.
2367 differences.
2364
2368
2365 The ``deltareuse`` argument control how deltas from the existing revlog
2369 The ``deltareuse`` argument control how deltas from the existing revlog
2366 are preserved in the destination revlog. The argument can have the
2370 are preserved in the destination revlog. The argument can have the
2367 following values:
2371 following values:
2368
2372
2369 DELTAREUSEALWAYS
2373 DELTAREUSEALWAYS
2370 Deltas will always be reused (if possible), even if the destination
2374 Deltas will always be reused (if possible), even if the destination
2371 revlog would not select the same revisions for the delta. This is the
2375 revlog would not select the same revisions for the delta. This is the
2372 fastest mode of operation.
2376 fastest mode of operation.
2373 DELTAREUSESAMEREVS
2377 DELTAREUSESAMEREVS
2374 Deltas will be reused if the destination revlog would pick the same
2378 Deltas will be reused if the destination revlog would pick the same
2375 revisions for the delta. This mode strikes a balance between speed
2379 revisions for the delta. This mode strikes a balance between speed
2376 and optimization.
2380 and optimization.
2377 DELTAREUSENEVER
2381 DELTAREUSENEVER
2378 Deltas will never be reused. This is the slowest mode of execution.
2382 Deltas will never be reused. This is the slowest mode of execution.
2379 This mode can be used to recompute deltas (e.g. if the diff/delta
2383 This mode can be used to recompute deltas (e.g. if the diff/delta
2380 algorithm changes).
2384 algorithm changes).
2381
2385
2382 Delta computation can be slow, so the choice of delta reuse policy can
2386 Delta computation can be slow, so the choice of delta reuse policy can
2383 significantly affect run time.
2387 significantly affect run time.
2384
2388
2385 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2389 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2386 two extremes. Deltas will be reused if they are appropriate. But if the
2390 two extremes. Deltas will be reused if they are appropriate. But if the
2387 delta could choose a better revision, it will do so. This means if you
2391 delta could choose a better revision, it will do so. This means if you
2388 are converting a non-generaldelta revlog to a generaldelta revlog,
2392 are converting a non-generaldelta revlog to a generaldelta revlog,
2389 deltas will be recomputed if the delta's parent isn't a parent of the
2393 deltas will be recomputed if the delta's parent isn't a parent of the
2390 revision.
2394 revision.
2391
2395
2392 In addition to the delta policy, the ``forcedeltabothparents``
2396 In addition to the delta policy, the ``forcedeltabothparents``
2393 argument controls whether to force compute deltas against both parents
2397 argument controls whether to force compute deltas against both parents
2394 for merges. By default, the current default is used.
2398 for merges. By default, the current default is used.
2395 """
2399 """
2396 if deltareuse not in self.DELTAREUSEALL:
2400 if deltareuse not in self.DELTAREUSEALL:
2397 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2401 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2398
2402
2399 if len(destrevlog):
2403 if len(destrevlog):
2400 raise ValueError(_('destination revlog is not empty'))
2404 raise ValueError(_('destination revlog is not empty'))
2401
2405
2402 if getattr(self, 'filteredrevs', None):
2406 if getattr(self, 'filteredrevs', None):
2403 raise ValueError(_('source revlog has filtered revisions'))
2407 raise ValueError(_('source revlog has filtered revisions'))
2404 if getattr(destrevlog, 'filteredrevs', None):
2408 if getattr(destrevlog, 'filteredrevs', None):
2405 raise ValueError(_('destination revlog has filtered revisions'))
2409 raise ValueError(_('destination revlog has filtered revisions'))
2406
2410
2407 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2411 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2408 # if possible.
2412 # if possible.
2409 oldlazydelta = destrevlog._lazydelta
2413 oldlazydelta = destrevlog._lazydelta
2410 oldlazydeltabase = destrevlog._lazydeltabase
2414 oldlazydeltabase = destrevlog._lazydeltabase
2411 oldamd = destrevlog._deltabothparents
2415 oldamd = destrevlog._deltabothparents
2412
2416
2413 try:
2417 try:
2414 if deltareuse == self.DELTAREUSEALWAYS:
2418 if deltareuse == self.DELTAREUSEALWAYS:
2415 destrevlog._lazydeltabase = True
2419 destrevlog._lazydeltabase = True
2416 destrevlog._lazydelta = True
2420 destrevlog._lazydelta = True
2417 elif deltareuse == self.DELTAREUSESAMEREVS:
2421 elif deltareuse == self.DELTAREUSESAMEREVS:
2418 destrevlog._lazydeltabase = False
2422 destrevlog._lazydeltabase = False
2419 destrevlog._lazydelta = True
2423 destrevlog._lazydelta = True
2420 elif deltareuse == self.DELTAREUSENEVER:
2424 elif deltareuse == self.DELTAREUSENEVER:
2421 destrevlog._lazydeltabase = False
2425 destrevlog._lazydeltabase = False
2422 destrevlog._lazydelta = False
2426 destrevlog._lazydelta = False
2423
2427
2424 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2428 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2425
2429
2426 deltacomputer = deltautil.deltacomputer(destrevlog)
2430 deltacomputer = deltautil.deltacomputer(destrevlog)
2427 index = self.index
2431 index = self.index
2428 for rev in self:
2432 for rev in self:
2429 entry = index[rev]
2433 entry = index[rev]
2430
2434
2431 # Some classes override linkrev to take filtered revs into
2435 # Some classes override linkrev to take filtered revs into
2432 # account. Use raw entry from index.
2436 # account. Use raw entry from index.
2433 flags = entry[0] & 0xffff
2437 flags = entry[0] & 0xffff
2434 linkrev = entry[4]
2438 linkrev = entry[4]
2435 p1 = index[entry[5]][7]
2439 p1 = index[entry[5]][7]
2436 p2 = index[entry[6]][7]
2440 p2 = index[entry[6]][7]
2437 node = entry[7]
2441 node = entry[7]
2438
2442
2439 # (Possibly) reuse the delta from the revlog if allowed and
2443 # (Possibly) reuse the delta from the revlog if allowed and
2440 # the revlog chunk is a delta.
2444 # the revlog chunk is a delta.
2441 cachedelta = None
2445 cachedelta = None
2442 rawtext = None
2446 rawtext = None
2443 if destrevlog._lazydelta:
2447 if destrevlog._lazydelta:
2444 dp = self.deltaparent(rev)
2448 dp = self.deltaparent(rev)
2445 if dp != nullrev:
2449 if dp != nullrev:
2446 cachedelta = (dp, bytes(self._chunk(rev)))
2450 cachedelta = (dp, bytes(self._chunk(rev)))
2447
2451
2448 if not cachedelta:
2452 if not cachedelta:
2449 rawtext = self.revision(rev, raw=True)
2453 rawtext = self.revision(rev, raw=True)
2450
2454
2451
2455
2452 if deltareuse == self.DELTAREUSEFULLADD:
2456 if deltareuse == self.DELTAREUSEFULLADD:
2453 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2457 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2454 cachedelta=cachedelta,
2458 cachedelta=cachedelta,
2455 node=node, flags=flags,
2459 node=node, flags=flags,
2456 deltacomputer=deltacomputer)
2460 deltacomputer=deltacomputer)
2457 else:
2461 else:
2458 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2462 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2459 checkambig=False)
2463 checkambig=False)
2460 dfh = None
2464 dfh = None
2461 if not destrevlog._inline:
2465 if not destrevlog._inline:
2462 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2466 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2463 try:
2467 try:
2464 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2468 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2465 p2, flags, cachedelta, ifh, dfh,
2469 p2, flags, cachedelta, ifh, dfh,
2466 deltacomputer=deltacomputer)
2470 deltacomputer=deltacomputer)
2467 finally:
2471 finally:
2468 if dfh:
2472 if dfh:
2469 dfh.close()
2473 dfh.close()
2470 ifh.close()
2474 ifh.close()
2471
2475
2472 if addrevisioncb:
2476 if addrevisioncb:
2473 addrevisioncb(self, rev, node)
2477 addrevisioncb(self, rev, node)
2474 finally:
2478 finally:
2475 destrevlog._lazydelta = oldlazydelta
2479 destrevlog._lazydelta = oldlazydelta
2476 destrevlog._lazydeltabase = oldlazydeltabase
2480 destrevlog._lazydeltabase = oldlazydeltabase
2477 destrevlog._deltabothparents = oldamd
2481 destrevlog._deltabothparents = oldamd
2478
2482
2479 def censorrevision(self, tr, censornode, tombstone=b''):
2483 def censorrevision(self, tr, censornode, tombstone=b''):
2480 if (self.version & 0xFFFF) == REVLOGV0:
2484 if (self.version & 0xFFFF) == REVLOGV0:
2481 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2485 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2482 self.version)
2486 self.version)
2483
2487
2484 censorrev = self.rev(censornode)
2488 censorrev = self.rev(censornode)
2485 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2489 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2486
2490
2487 if len(tombstone) > self.rawsize(censorrev):
2491 if len(tombstone) > self.rawsize(censorrev):
2488 raise error.Abort(_('censor tombstone must be no longer than '
2492 raise error.Abort(_('censor tombstone must be no longer than '
2489 'censored data'))
2493 'censored data'))
2490
2494
2491 # Rewriting the revlog in place is hard. Our strategy for censoring is
2495 # Rewriting the revlog in place is hard. Our strategy for censoring is
2492 # to create a new revlog, copy all revisions to it, then replace the
2496 # to create a new revlog, copy all revisions to it, then replace the
2493 # revlogs on transaction close.
2497 # revlogs on transaction close.
2494
2498
2495 newindexfile = self.indexfile + b'.tmpcensored'
2499 newindexfile = self.indexfile + b'.tmpcensored'
2496 newdatafile = self.datafile + b'.tmpcensored'
2500 newdatafile = self.datafile + b'.tmpcensored'
2497
2501
2498 # This is a bit dangerous. We could easily have a mismatch of state.
2502 # This is a bit dangerous. We could easily have a mismatch of state.
2499 newrl = revlog(self.opener, newindexfile, newdatafile,
2503 newrl = revlog(self.opener, newindexfile, newdatafile,
2500 censorable=True)
2504 censorable=True)
2501 newrl.version = self.version
2505 newrl.version = self.version
2502 newrl._generaldelta = self._generaldelta
2506 newrl._generaldelta = self._generaldelta
2503 newrl._io = self._io
2507 newrl._io = self._io
2504
2508
2505 for rev in self.revs():
2509 for rev in self.revs():
2506 node = self.node(rev)
2510 node = self.node(rev)
2507 p1, p2 = self.parents(node)
2511 p1, p2 = self.parents(node)
2508
2512
2509 if rev == censorrev:
2513 if rev == censorrev:
2510 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2514 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2511 p1, p2, censornode, REVIDX_ISCENSORED)
2515 p1, p2, censornode, REVIDX_ISCENSORED)
2512
2516
2513 if newrl.deltaparent(rev) != nullrev:
2517 if newrl.deltaparent(rev) != nullrev:
2514 raise error.Abort(_('censored revision stored as delta; '
2518 raise error.Abort(_('censored revision stored as delta; '
2515 'cannot censor'),
2519 'cannot censor'),
2516 hint=_('censoring of revlogs is not '
2520 hint=_('censoring of revlogs is not '
2517 'fully implemented; please report '
2521 'fully implemented; please report '
2518 'this bug'))
2522 'this bug'))
2519 continue
2523 continue
2520
2524
2521 if self.iscensored(rev):
2525 if self.iscensored(rev):
2522 if self.deltaparent(rev) != nullrev:
2526 if self.deltaparent(rev) != nullrev:
2523 raise error.Abort(_('cannot censor due to censored '
2527 raise error.Abort(_('cannot censor due to censored '
2524 'revision having delta stored'))
2528 'revision having delta stored'))
2525 rawtext = self._chunk(rev)
2529 rawtext = self._chunk(rev)
2526 else:
2530 else:
2527 rawtext = self.revision(rev, raw=True)
2531 rawtext = self.revision(rev, raw=True)
2528
2532
2529 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2533 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2530 self.flags(rev))
2534 self.flags(rev))
2531
2535
2532 tr.addbackup(self.indexfile, location='store')
2536 tr.addbackup(self.indexfile, location='store')
2533 if not self._inline:
2537 if not self._inline:
2534 tr.addbackup(self.datafile, location='store')
2538 tr.addbackup(self.datafile, location='store')
2535
2539
2536 self.opener.rename(newrl.indexfile, self.indexfile)
2540 self.opener.rename(newrl.indexfile, self.indexfile)
2537 if not self._inline:
2541 if not self._inline:
2538 self.opener.rename(newrl.datafile, self.datafile)
2542 self.opener.rename(newrl.datafile, self.datafile)
2539
2543
2540 self.clearcaches()
2544 self.clearcaches()
2541 self._loadindex()
2545 self._loadindex()
2542
2546
2543 def verifyintegrity(self, state):
2547 def verifyintegrity(self, state):
2544 """Verifies the integrity of the revlog.
2548 """Verifies the integrity of the revlog.
2545
2549
2546 Yields ``revlogproblem`` instances describing problems that are
2550 Yields ``revlogproblem`` instances describing problems that are
2547 found.
2551 found.
2548 """
2552 """
2549 dd, di = self.checksize()
2553 dd, di = self.checksize()
2550 if dd:
2554 if dd:
2551 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2555 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2552 if di:
2556 if di:
2553 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2557 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2554
2558
2555 version = self.version & 0xFFFF
2559 version = self.version & 0xFFFF
2556
2560
2557 # The verifier tells us what version revlog we should be.
2561 # The verifier tells us what version revlog we should be.
2558 if version != state['expectedversion']:
2562 if version != state['expectedversion']:
2559 yield revlogproblem(
2563 yield revlogproblem(
2560 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2564 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2561 (self.indexfile, version, state['expectedversion']))
2565 (self.indexfile, version, state['expectedversion']))
2562
2566
2563 state['skipread'] = set()
2567 state['skipread'] = set()
2564
2568
2565 for rev in self:
2569 for rev in self:
2566 node = self.node(rev)
2570 node = self.node(rev)
2567
2571
2568 # Verify contents. 4 cases to care about:
2572 # Verify contents. 4 cases to care about:
2569 #
2573 #
2570 # common: the most common case
2574 # common: the most common case
2571 # rename: with a rename
2575 # rename: with a rename
2572 # meta: file content starts with b'\1\n', the metadata
2576 # meta: file content starts with b'\1\n', the metadata
2573 # header defined in filelog.py, but without a rename
2577 # header defined in filelog.py, but without a rename
2574 # ext: content stored externally
2578 # ext: content stored externally
2575 #
2579 #
2576 # More formally, their differences are shown below:
2580 # More formally, their differences are shown below:
2577 #
2581 #
2578 # | common | rename | meta | ext
2582 # | common | rename | meta | ext
2579 # -------------------------------------------------------
2583 # -------------------------------------------------------
2580 # flags() | 0 | 0 | 0 | not 0
2584 # flags() | 0 | 0 | 0 | not 0
2581 # renamed() | False | True | False | ?
2585 # renamed() | False | True | False | ?
2582 # rawtext[0:2]=='\1\n'| False | True | True | ?
2586 # rawtext[0:2]=='\1\n'| False | True | True | ?
2583 #
2587 #
2584 # "rawtext" means the raw text stored in revlog data, which
2588 # "rawtext" means the raw text stored in revlog data, which
2585 # could be retrieved by "revision(rev, raw=True)". "text"
2589 # could be retrieved by "revision(rev, raw=True)". "text"
2586 # mentioned below is "revision(rev, raw=False)".
2590 # mentioned below is "revision(rev, raw=False)".
2587 #
2591 #
2588 # There are 3 different lengths stored physically:
2592 # There are 3 different lengths stored physically:
2589 # 1. L1: rawsize, stored in revlog index
2593 # 1. L1: rawsize, stored in revlog index
2590 # 2. L2: len(rawtext), stored in revlog data
2594 # 2. L2: len(rawtext), stored in revlog data
2591 # 3. L3: len(text), stored in revlog data if flags==0, or
2595 # 3. L3: len(text), stored in revlog data if flags==0, or
2592 # possibly somewhere else if flags!=0
2596 # possibly somewhere else if flags!=0
2593 #
2597 #
2594 # L1 should be equal to L2. L3 could be different from them.
2598 # L1 should be equal to L2. L3 could be different from them.
2595 # "text" may or may not affect commit hash depending on flag
2599 # "text" may or may not affect commit hash depending on flag
2596 # processors (see revlog.addflagprocessor).
2600 # processors (see revlog.addflagprocessor).
2597 #
2601 #
2598 # | common | rename | meta | ext
2602 # | common | rename | meta | ext
2599 # -------------------------------------------------
2603 # -------------------------------------------------
2600 # rawsize() | L1 | L1 | L1 | L1
2604 # rawsize() | L1 | L1 | L1 | L1
2601 # size() | L1 | L2-LM | L1(*) | L1 (?)
2605 # size() | L1 | L2-LM | L1(*) | L1 (?)
2602 # len(rawtext) | L2 | L2 | L2 | L2
2606 # len(rawtext) | L2 | L2 | L2 | L2
2603 # len(text) | L2 | L2 | L2 | L3
2607 # len(text) | L2 | L2 | L2 | L3
2604 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2608 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2605 #
2609 #
2606 # LM: length of metadata, depending on rawtext
2610 # LM: length of metadata, depending on rawtext
2607 # (*): not ideal, see comment in filelog.size
2611 # (*): not ideal, see comment in filelog.size
2608 # (?): could be "- len(meta)" if the resolved content has
2612 # (?): could be "- len(meta)" if the resolved content has
2609 # rename metadata
2613 # rename metadata
2610 #
2614 #
2611 # Checks needed to be done:
2615 # Checks needed to be done:
2612 # 1. length check: L1 == L2, in all cases.
2616 # 1. length check: L1 == L2, in all cases.
2613 # 2. hash check: depending on flag processor, we may need to
2617 # 2. hash check: depending on flag processor, we may need to
2614 # use either "text" (external), or "rawtext" (in revlog).
2618 # use either "text" (external), or "rawtext" (in revlog).
2615
2619
2616 try:
2620 try:
2617 skipflags = state.get('skipflags', 0)
2621 skipflags = state.get('skipflags', 0)
2618 if skipflags:
2622 if skipflags:
2619 skipflags &= self.flags(rev)
2623 skipflags &= self.flags(rev)
2620
2624
2621 if skipflags:
2625 if skipflags:
2622 state['skipread'].add(node)
2626 state['skipread'].add(node)
2623 else:
2627 else:
2624 # Side-effect: read content and verify hash.
2628 # Side-effect: read content and verify hash.
2625 self.revision(node)
2629 self.revision(node)
2626
2630
2627 l1 = self.rawsize(rev)
2631 l1 = self.rawsize(rev)
2628 l2 = len(self.revision(node, raw=True))
2632 l2 = len(self.revision(node, raw=True))
2629
2633
2630 if l1 != l2:
2634 if l1 != l2:
2631 yield revlogproblem(
2635 yield revlogproblem(
2632 error=_('unpacked size is %d, %d expected') % (l2, l1),
2636 error=_('unpacked size is %d, %d expected') % (l2, l1),
2633 node=node)
2637 node=node)
2634
2638
2635 except error.CensoredNodeError:
2639 except error.CensoredNodeError:
2636 if state['erroroncensored']:
2640 if state['erroroncensored']:
2637 yield revlogproblem(error=_('censored file data'),
2641 yield revlogproblem(error=_('censored file data'),
2638 node=node)
2642 node=node)
2639 state['skipread'].add(node)
2643 state['skipread'].add(node)
2640 except Exception as e:
2644 except Exception as e:
2641 yield revlogproblem(
2645 yield revlogproblem(
2642 error=_('unpacking %s: %s') % (short(node),
2646 error=_('unpacking %s: %s') % (short(node),
2643 stringutil.forcebytestr(e)),
2647 stringutil.forcebytestr(e)),
2644 node=node)
2648 node=node)
2645 state['skipread'].add(node)
2649 state['skipread'].add(node)
2646
2650
2647 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2651 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2648 revisionscount=False, trackedsize=False,
2652 revisionscount=False, trackedsize=False,
2649 storedsize=False):
2653 storedsize=False):
2650 d = {}
2654 d = {}
2651
2655
2652 if exclusivefiles:
2656 if exclusivefiles:
2653 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2657 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2654 if not self._inline:
2658 if not self._inline:
2655 d['exclusivefiles'].append((self.opener, self.datafile))
2659 d['exclusivefiles'].append((self.opener, self.datafile))
2656
2660
2657 if sharedfiles:
2661 if sharedfiles:
2658 d['sharedfiles'] = []
2662 d['sharedfiles'] = []
2659
2663
2660 if revisionscount:
2664 if revisionscount:
2661 d['revisionscount'] = len(self)
2665 d['revisionscount'] = len(self)
2662
2666
2663 if trackedsize:
2667 if trackedsize:
2664 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2668 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2665
2669
2666 if storedsize:
2670 if storedsize:
2667 d['storedsize'] = sum(self.opener.stat(path).st_size
2671 d['storedsize'] = sum(self.opener.stat(path).st_size
2668 for path in self.files())
2672 for path in self.files())
2669
2673
2670 return d
2674 return d
@@ -1,757 +1,760 b''
1 # compression.py - Mercurial utility functions for compression
1 # compression.py - Mercurial utility functions for compression
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6
6
7 from __future__ import absolute_import, print_function
7 from __future__ import absolute_import, print_function
8
8
9 import bz2
9 import bz2
10 import collections
10 import collections
11 import zlib
11 import zlib
12
12
13 from .. import (
13 from .. import (
14 error,
14 error,
15 i18n,
15 i18n,
16 pycompat,
16 pycompat,
17 )
17 )
18 from . import (
18 from . import (
19 stringutil,
19 stringutil,
20 )
20 )
21
21
22 safehasattr = pycompat.safehasattr
22 safehasattr = pycompat.safehasattr
23
23
24
24
25 _ = i18n._
25 _ = i18n._
26
26
27 # compression code
27 # compression code
28
28
29 SERVERROLE = 'server'
29 SERVERROLE = 'server'
30 CLIENTROLE = 'client'
30 CLIENTROLE = 'client'
31
31
32 compewireprotosupport = collections.namedtuple(r'compenginewireprotosupport',
32 compewireprotosupport = collections.namedtuple(r'compenginewireprotosupport',
33 (r'name', r'serverpriority',
33 (r'name', r'serverpriority',
34 r'clientpriority'))
34 r'clientpriority'))
35
35
36 class propertycache(object):
36 class propertycache(object):
37 def __init__(self, func):
37 def __init__(self, func):
38 self.func = func
38 self.func = func
39 self.name = func.__name__
39 self.name = func.__name__
40 def __get__(self, obj, type=None):
40 def __get__(self, obj, type=None):
41 result = self.func(obj)
41 result = self.func(obj)
42 self.cachevalue(obj, result)
42 self.cachevalue(obj, result)
43 return result
43 return result
44
44
45 def cachevalue(self, obj, value):
45 def cachevalue(self, obj, value):
46 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
46 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
47 obj.__dict__[self.name] = value
47 obj.__dict__[self.name] = value
48
48
49 class compressormanager(object):
49 class compressormanager(object):
50 """Holds registrations of various compression engines.
50 """Holds registrations of various compression engines.
51
51
52 This class essentially abstracts the differences between compression
52 This class essentially abstracts the differences between compression
53 engines to allow new compression formats to be added easily, possibly from
53 engines to allow new compression formats to be added easily, possibly from
54 extensions.
54 extensions.
55
55
56 Compressors are registered against the global instance by calling its
56 Compressors are registered against the global instance by calling its
57 ``register()`` method.
57 ``register()`` method.
58 """
58 """
59 def __init__(self):
59 def __init__(self):
60 self._engines = {}
60 self._engines = {}
61 # Bundle spec human name to engine name.
61 # Bundle spec human name to engine name.
62 self._bundlenames = {}
62 self._bundlenames = {}
63 # Internal bundle identifier to engine name.
63 # Internal bundle identifier to engine name.
64 self._bundletypes = {}
64 self._bundletypes = {}
65 # Revlog header to engine name.
65 # Revlog header to engine name.
66 self._revlogheaders = {}
66 self._revlogheaders = {}
67 # Wire proto identifier to engine name.
67 # Wire proto identifier to engine name.
68 self._wiretypes = {}
68 self._wiretypes = {}
69
69
70 def __getitem__(self, key):
70 def __getitem__(self, key):
71 return self._engines[key]
71 return self._engines[key]
72
72
73 def __contains__(self, key):
73 def __contains__(self, key):
74 return key in self._engines
74 return key in self._engines
75
75
76 def __iter__(self):
76 def __iter__(self):
77 return iter(self._engines.keys())
77 return iter(self._engines.keys())
78
78
79 def register(self, engine):
79 def register(self, engine):
80 """Register a compression engine with the manager.
80 """Register a compression engine with the manager.
81
81
82 The argument must be a ``compressionengine`` instance.
82 The argument must be a ``compressionengine`` instance.
83 """
83 """
84 if not isinstance(engine, compressionengine):
84 if not isinstance(engine, compressionengine):
85 raise ValueError(_('argument must be a compressionengine'))
85 raise ValueError(_('argument must be a compressionengine'))
86
86
87 name = engine.name()
87 name = engine.name()
88
88
89 if name in self._engines:
89 if name in self._engines:
90 raise error.Abort(_('compression engine %s already registered') %
90 raise error.Abort(_('compression engine %s already registered') %
91 name)
91 name)
92
92
93 bundleinfo = engine.bundletype()
93 bundleinfo = engine.bundletype()
94 if bundleinfo:
94 if bundleinfo:
95 bundlename, bundletype = bundleinfo
95 bundlename, bundletype = bundleinfo
96
96
97 if bundlename in self._bundlenames:
97 if bundlename in self._bundlenames:
98 raise error.Abort(_('bundle name %s already registered') %
98 raise error.Abort(_('bundle name %s already registered') %
99 bundlename)
99 bundlename)
100 if bundletype in self._bundletypes:
100 if bundletype in self._bundletypes:
101 raise error.Abort(_('bundle type %s already registered by %s') %
101 raise error.Abort(_('bundle type %s already registered by %s') %
102 (bundletype, self._bundletypes[bundletype]))
102 (bundletype, self._bundletypes[bundletype]))
103
103
104 # No external facing name declared.
104 # No external facing name declared.
105 if bundlename:
105 if bundlename:
106 self._bundlenames[bundlename] = name
106 self._bundlenames[bundlename] = name
107
107
108 self._bundletypes[bundletype] = name
108 self._bundletypes[bundletype] = name
109
109
110 wiresupport = engine.wireprotosupport()
110 wiresupport = engine.wireprotosupport()
111 if wiresupport:
111 if wiresupport:
112 wiretype = wiresupport.name
112 wiretype = wiresupport.name
113 if wiretype in self._wiretypes:
113 if wiretype in self._wiretypes:
114 raise error.Abort(_('wire protocol compression %s already '
114 raise error.Abort(_('wire protocol compression %s already '
115 'registered by %s') %
115 'registered by %s') %
116 (wiretype, self._wiretypes[wiretype]))
116 (wiretype, self._wiretypes[wiretype]))
117
117
118 self._wiretypes[wiretype] = name
118 self._wiretypes[wiretype] = name
119
119
120 revlogheader = engine.revlogheader()
120 revlogheader = engine.revlogheader()
121 if revlogheader and revlogheader in self._revlogheaders:
121 if revlogheader and revlogheader in self._revlogheaders:
122 raise error.Abort(_('revlog header %s already registered by %s') %
122 raise error.Abort(_('revlog header %s already registered by %s') %
123 (revlogheader, self._revlogheaders[revlogheader]))
123 (revlogheader, self._revlogheaders[revlogheader]))
124
124
125 if revlogheader:
125 if revlogheader:
126 self._revlogheaders[revlogheader] = name
126 self._revlogheaders[revlogheader] = name
127
127
128 self._engines[name] = engine
128 self._engines[name] = engine
129
129
130 @property
130 @property
131 def supportedbundlenames(self):
131 def supportedbundlenames(self):
132 return set(self._bundlenames.keys())
132 return set(self._bundlenames.keys())
133
133
134 @property
134 @property
135 def supportedbundletypes(self):
135 def supportedbundletypes(self):
136 return set(self._bundletypes.keys())
136 return set(self._bundletypes.keys())
137
137
138 def forbundlename(self, bundlename):
138 def forbundlename(self, bundlename):
139 """Obtain a compression engine registered to a bundle name.
139 """Obtain a compression engine registered to a bundle name.
140
140
141 Will raise KeyError if the bundle type isn't registered.
141 Will raise KeyError if the bundle type isn't registered.
142
142
143 Will abort if the engine is known but not available.
143 Will abort if the engine is known but not available.
144 """
144 """
145 engine = self._engines[self._bundlenames[bundlename]]
145 engine = self._engines[self._bundlenames[bundlename]]
146 if not engine.available():
146 if not engine.available():
147 raise error.Abort(_('compression engine %s could not be loaded') %
147 raise error.Abort(_('compression engine %s could not be loaded') %
148 engine.name())
148 engine.name())
149 return engine
149 return engine
150
150
151 def forbundletype(self, bundletype):
151 def forbundletype(self, bundletype):
152 """Obtain a compression engine registered to a bundle type.
152 """Obtain a compression engine registered to a bundle type.
153
153
154 Will raise KeyError if the bundle type isn't registered.
154 Will raise KeyError if the bundle type isn't registered.
155
155
156 Will abort if the engine is known but not available.
156 Will abort if the engine is known but not available.
157 """
157 """
158 engine = self._engines[self._bundletypes[bundletype]]
158 engine = self._engines[self._bundletypes[bundletype]]
159 if not engine.available():
159 if not engine.available():
160 raise error.Abort(_('compression engine %s could not be loaded') %
160 raise error.Abort(_('compression engine %s could not be loaded') %
161 engine.name())
161 engine.name())
162 return engine
162 return engine
163
163
164 def supportedwireengines(self, role, onlyavailable=True):
164 def supportedwireengines(self, role, onlyavailable=True):
165 """Obtain compression engines that support the wire protocol.
165 """Obtain compression engines that support the wire protocol.
166
166
167 Returns a list of engines in prioritized order, most desired first.
167 Returns a list of engines in prioritized order, most desired first.
168
168
169 If ``onlyavailable`` is set, filter out engines that can't be
169 If ``onlyavailable`` is set, filter out engines that can't be
170 loaded.
170 loaded.
171 """
171 """
172 assert role in (SERVERROLE, CLIENTROLE)
172 assert role in (SERVERROLE, CLIENTROLE)
173
173
174 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
174 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
175
175
176 engines = [self._engines[e] for e in self._wiretypes.values()]
176 engines = [self._engines[e] for e in self._wiretypes.values()]
177 if onlyavailable:
177 if onlyavailable:
178 engines = [e for e in engines if e.available()]
178 engines = [e for e in engines if e.available()]
179
179
180 def getkey(e):
180 def getkey(e):
181 # Sort first by priority, highest first. In case of tie, sort
181 # Sort first by priority, highest first. In case of tie, sort
182 # alphabetically. This is arbitrary, but ensures output is
182 # alphabetically. This is arbitrary, but ensures output is
183 # stable.
183 # stable.
184 w = e.wireprotosupport()
184 w = e.wireprotosupport()
185 return -1 * getattr(w, attr), w.name
185 return -1 * getattr(w, attr), w.name
186
186
187 return list(sorted(engines, key=getkey))
187 return list(sorted(engines, key=getkey))
188
188
189 def forwiretype(self, wiretype):
189 def forwiretype(self, wiretype):
190 engine = self._engines[self._wiretypes[wiretype]]
190 engine = self._engines[self._wiretypes[wiretype]]
191 if not engine.available():
191 if not engine.available():
192 raise error.Abort(_('compression engine %s could not be loaded') %
192 raise error.Abort(_('compression engine %s could not be loaded') %
193 engine.name())
193 engine.name())
194 return engine
194 return engine
195
195
196 def forrevlogheader(self, header):
196 def forrevlogheader(self, header):
197 """Obtain a compression engine registered to a revlog header.
197 """Obtain a compression engine registered to a revlog header.
198
198
199 Will raise KeyError if the revlog header value isn't registered.
199 Will raise KeyError if the revlog header value isn't registered.
200 """
200 """
201 return self._engines[self._revlogheaders[header]]
201 return self._engines[self._revlogheaders[header]]
202
202
203 compengines = compressormanager()
203 compengines = compressormanager()
204
204
205 class compressionengine(object):
205 class compressionengine(object):
206 """Base class for compression engines.
206 """Base class for compression engines.
207
207
208 Compression engines must implement the interface defined by this class.
208 Compression engines must implement the interface defined by this class.
209 """
209 """
210 def name(self):
210 def name(self):
211 """Returns the name of the compression engine.
211 """Returns the name of the compression engine.
212
212
213 This is the key the engine is registered under.
213 This is the key the engine is registered under.
214
214
215 This method must be implemented.
215 This method must be implemented.
216 """
216 """
217 raise NotImplementedError()
217 raise NotImplementedError()
218
218
219 def available(self):
219 def available(self):
220 """Whether the compression engine is available.
220 """Whether the compression engine is available.
221
221
222 The intent of this method is to allow optional compression engines
222 The intent of this method is to allow optional compression engines
223 that may not be available in all installations (such as engines relying
223 that may not be available in all installations (such as engines relying
224 on C extensions that may not be present).
224 on C extensions that may not be present).
225 """
225 """
226 return True
226 return True
227
227
228 def bundletype(self):
228 def bundletype(self):
229 """Describes bundle identifiers for this engine.
229 """Describes bundle identifiers for this engine.
230
230
231 If this compression engine isn't supported for bundles, returns None.
231 If this compression engine isn't supported for bundles, returns None.
232
232
233 If this engine can be used for bundles, returns a 2-tuple of strings of
233 If this engine can be used for bundles, returns a 2-tuple of strings of
234 the user-facing "bundle spec" compression name and an internal
234 the user-facing "bundle spec" compression name and an internal
235 identifier used to denote the compression format within bundles. To
235 identifier used to denote the compression format within bundles. To
236 exclude the name from external usage, set the first element to ``None``.
236 exclude the name from external usage, set the first element to ``None``.
237
237
238 If bundle compression is supported, the class must also implement
238 If bundle compression is supported, the class must also implement
239 ``compressstream`` and `decompressorreader``.
239 ``compressstream`` and `decompressorreader``.
240
240
241 The docstring of this method is used in the help system to tell users
241 The docstring of this method is used in the help system to tell users
242 about this engine.
242 about this engine.
243 """
243 """
244 return None
244 return None
245
245
246 def wireprotosupport(self):
246 def wireprotosupport(self):
247 """Declare support for this compression format on the wire protocol.
247 """Declare support for this compression format on the wire protocol.
248
248
249 If this compression engine isn't supported for compressing wire
249 If this compression engine isn't supported for compressing wire
250 protocol payloads, returns None.
250 protocol payloads, returns None.
251
251
252 Otherwise, returns ``compenginewireprotosupport`` with the following
252 Otherwise, returns ``compenginewireprotosupport`` with the following
253 fields:
253 fields:
254
254
255 * String format identifier
255 * String format identifier
256 * Integer priority for the server
256 * Integer priority for the server
257 * Integer priority for the client
257 * Integer priority for the client
258
258
259 The integer priorities are used to order the advertisement of format
259 The integer priorities are used to order the advertisement of format
260 support by server and client. The highest integer is advertised
260 support by server and client. The highest integer is advertised
261 first. Integers with non-positive values aren't advertised.
261 first. Integers with non-positive values aren't advertised.
262
262
263 The priority values are somewhat arbitrary and only used for default
263 The priority values are somewhat arbitrary and only used for default
264 ordering. The relative order can be changed via config options.
264 ordering. The relative order can be changed via config options.
265
265
266 If wire protocol compression is supported, the class must also implement
266 If wire protocol compression is supported, the class must also implement
267 ``compressstream`` and ``decompressorreader``.
267 ``compressstream`` and ``decompressorreader``.
268 """
268 """
269 return None
269 return None
270
270
271 def revlogheader(self):
271 def revlogheader(self):
272 """Header added to revlog chunks that identifies this engine.
272 """Header added to revlog chunks that identifies this engine.
273
273
274 If this engine can be used to compress revlogs, this method should
274 If this engine can be used to compress revlogs, this method should
275 return the bytes used to identify chunks compressed with this engine.
275 return the bytes used to identify chunks compressed with this engine.
276 Else, the method should return ``None`` to indicate it does not
276 Else, the method should return ``None`` to indicate it does not
277 participate in revlog compression.
277 participate in revlog compression.
278 """
278 """
279 return None
279 return None
280
280
281 def compressstream(self, it, opts=None):
281 def compressstream(self, it, opts=None):
282 """Compress an iterator of chunks.
282 """Compress an iterator of chunks.
283
283
284 The method receives an iterator (ideally a generator) of chunks of
284 The method receives an iterator (ideally a generator) of chunks of
285 bytes to be compressed. It returns an iterator (ideally a generator)
285 bytes to be compressed. It returns an iterator (ideally a generator)
286 of bytes of chunks representing the compressed output.
286 of bytes of chunks representing the compressed output.
287
287
288 Optionally accepts an argument defining how to perform compression.
288 Optionally accepts an argument defining how to perform compression.
289 Each engine treats this argument differently.
289 Each engine treats this argument differently.
290 """
290 """
291 raise NotImplementedError()
291 raise NotImplementedError()
292
292
293 def decompressorreader(self, fh):
293 def decompressorreader(self, fh):
294 """Perform decompression on a file object.
294 """Perform decompression on a file object.
295
295
296 Argument is an object with a ``read(size)`` method that returns
296 Argument is an object with a ``read(size)`` method that returns
297 compressed data. Return value is an object with a ``read(size)`` that
297 compressed data. Return value is an object with a ``read(size)`` that
298 returns uncompressed data.
298 returns uncompressed data.
299 """
299 """
300 raise NotImplementedError()
300 raise NotImplementedError()
301
301
302 def revlogcompressor(self, opts=None):
302 def revlogcompressor(self, opts=None):
303 """Obtain an object that can be used to compress revlog entries.
303 """Obtain an object that can be used to compress revlog entries.
304
304
305 The object has a ``compress(data)`` method that compresses binary
305 The object has a ``compress(data)`` method that compresses binary
306 data. This method returns compressed binary data or ``None`` if
306 data. This method returns compressed binary data or ``None`` if
307 the data could not be compressed (too small, not compressible, etc).
307 the data could not be compressed (too small, not compressible, etc).
308 The returned data should have a header uniquely identifying this
308 The returned data should have a header uniquely identifying this
309 compression format so decompression can be routed to this engine.
309 compression format so decompression can be routed to this engine.
310 This header should be identified by the ``revlogheader()`` return
310 This header should be identified by the ``revlogheader()`` return
311 value.
311 value.
312
312
313 The object has a ``decompress(data)`` method that decompresses
313 The object has a ``decompress(data)`` method that decompresses
314 data. The method will only be called if ``data`` begins with
314 data. The method will only be called if ``data`` begins with
315 ``revlogheader()``. The method should return the raw, uncompressed
315 ``revlogheader()``. The method should return the raw, uncompressed
316 data or raise a ``StorageError``.
316 data or raise a ``StorageError``.
317
317
318 The object is reusable but is not thread safe.
318 The object is reusable but is not thread safe.
319 """
319 """
320 raise NotImplementedError()
320 raise NotImplementedError()
321
321
322 class _CompressedStreamReader(object):
322 class _CompressedStreamReader(object):
323 def __init__(self, fh):
323 def __init__(self, fh):
324 if safehasattr(fh, 'unbufferedread'):
324 if safehasattr(fh, 'unbufferedread'):
325 self._reader = fh.unbufferedread
325 self._reader = fh.unbufferedread
326 else:
326 else:
327 self._reader = fh.read
327 self._reader = fh.read
328 self._pending = []
328 self._pending = []
329 self._pos = 0
329 self._pos = 0
330 self._eof = False
330 self._eof = False
331
331
332 def _decompress(self, chunk):
332 def _decompress(self, chunk):
333 raise NotImplementedError()
333 raise NotImplementedError()
334
334
335 def read(self, l):
335 def read(self, l):
336 buf = []
336 buf = []
337 while True:
337 while True:
338 while self._pending:
338 while self._pending:
339 if len(self._pending[0]) > l + self._pos:
339 if len(self._pending[0]) > l + self._pos:
340 newbuf = self._pending[0]
340 newbuf = self._pending[0]
341 buf.append(newbuf[self._pos:self._pos + l])
341 buf.append(newbuf[self._pos:self._pos + l])
342 self._pos += l
342 self._pos += l
343 return ''.join(buf)
343 return ''.join(buf)
344
344
345 newbuf = self._pending.pop(0)
345 newbuf = self._pending.pop(0)
346 if self._pos:
346 if self._pos:
347 buf.append(newbuf[self._pos:])
347 buf.append(newbuf[self._pos:])
348 l -= len(newbuf) - self._pos
348 l -= len(newbuf) - self._pos
349 else:
349 else:
350 buf.append(newbuf)
350 buf.append(newbuf)
351 l -= len(newbuf)
351 l -= len(newbuf)
352 self._pos = 0
352 self._pos = 0
353
353
354 if self._eof:
354 if self._eof:
355 return ''.join(buf)
355 return ''.join(buf)
356 chunk = self._reader(65536)
356 chunk = self._reader(65536)
357 self._decompress(chunk)
357 self._decompress(chunk)
358 if not chunk and not self._pending and not self._eof:
358 if not chunk and not self._pending and not self._eof:
359 # No progress and no new data, bail out
359 # No progress and no new data, bail out
360 return ''.join(buf)
360 return ''.join(buf)
361
361
362 class _GzipCompressedStreamReader(_CompressedStreamReader):
362 class _GzipCompressedStreamReader(_CompressedStreamReader):
363 def __init__(self, fh):
363 def __init__(self, fh):
364 super(_GzipCompressedStreamReader, self).__init__(fh)
364 super(_GzipCompressedStreamReader, self).__init__(fh)
365 self._decompobj = zlib.decompressobj()
365 self._decompobj = zlib.decompressobj()
366 def _decompress(self, chunk):
366 def _decompress(self, chunk):
367 newbuf = self._decompobj.decompress(chunk)
367 newbuf = self._decompobj.decompress(chunk)
368 if newbuf:
368 if newbuf:
369 self._pending.append(newbuf)
369 self._pending.append(newbuf)
370 d = self._decompobj.copy()
370 d = self._decompobj.copy()
371 try:
371 try:
372 d.decompress('x')
372 d.decompress('x')
373 d.flush()
373 d.flush()
374 if d.unused_data == 'x':
374 if d.unused_data == 'x':
375 self._eof = True
375 self._eof = True
376 except zlib.error:
376 except zlib.error:
377 pass
377 pass
378
378
379 class _BZ2CompressedStreamReader(_CompressedStreamReader):
379 class _BZ2CompressedStreamReader(_CompressedStreamReader):
380 def __init__(self, fh):
380 def __init__(self, fh):
381 super(_BZ2CompressedStreamReader, self).__init__(fh)
381 super(_BZ2CompressedStreamReader, self).__init__(fh)
382 self._decompobj = bz2.BZ2Decompressor()
382 self._decompobj = bz2.BZ2Decompressor()
383 def _decompress(self, chunk):
383 def _decompress(self, chunk):
384 newbuf = self._decompobj.decompress(chunk)
384 newbuf = self._decompobj.decompress(chunk)
385 if newbuf:
385 if newbuf:
386 self._pending.append(newbuf)
386 self._pending.append(newbuf)
387 try:
387 try:
388 while True:
388 while True:
389 newbuf = self._decompobj.decompress('')
389 newbuf = self._decompobj.decompress('')
390 if newbuf:
390 if newbuf:
391 self._pending.append(newbuf)
391 self._pending.append(newbuf)
392 else:
392 else:
393 break
393 break
394 except EOFError:
394 except EOFError:
395 self._eof = True
395 self._eof = True
396
396
397 class _TruncatedBZ2CompressedStreamReader(_BZ2CompressedStreamReader):
397 class _TruncatedBZ2CompressedStreamReader(_BZ2CompressedStreamReader):
398 def __init__(self, fh):
398 def __init__(self, fh):
399 super(_TruncatedBZ2CompressedStreamReader, self).__init__(fh)
399 super(_TruncatedBZ2CompressedStreamReader, self).__init__(fh)
400 newbuf = self._decompobj.decompress('BZ')
400 newbuf = self._decompobj.decompress('BZ')
401 if newbuf:
401 if newbuf:
402 self._pending.append(newbuf)
402 self._pending.append(newbuf)
403
403
404 class _ZstdCompressedStreamReader(_CompressedStreamReader):
404 class _ZstdCompressedStreamReader(_CompressedStreamReader):
405 def __init__(self, fh, zstd):
405 def __init__(self, fh, zstd):
406 super(_ZstdCompressedStreamReader, self).__init__(fh)
406 super(_ZstdCompressedStreamReader, self).__init__(fh)
407 self._zstd = zstd
407 self._zstd = zstd
408 self._decompobj = zstd.ZstdDecompressor().decompressobj()
408 self._decompobj = zstd.ZstdDecompressor().decompressobj()
409 def _decompress(self, chunk):
409 def _decompress(self, chunk):
410 newbuf = self._decompobj.decompress(chunk)
410 newbuf = self._decompobj.decompress(chunk)
411 if newbuf:
411 if newbuf:
412 self._pending.append(newbuf)
412 self._pending.append(newbuf)
413 try:
413 try:
414 while True:
414 while True:
415 newbuf = self._decompobj.decompress('')
415 newbuf = self._decompobj.decompress('')
416 if newbuf:
416 if newbuf:
417 self._pending.append(newbuf)
417 self._pending.append(newbuf)
418 else:
418 else:
419 break
419 break
420 except self._zstd.ZstdError:
420 except self._zstd.ZstdError:
421 self._eof = True
421 self._eof = True
422
422
423 class _zlibengine(compressionengine):
423 class _zlibengine(compressionengine):
424 def name(self):
424 def name(self):
425 return 'zlib'
425 return 'zlib'
426
426
427 def bundletype(self):
427 def bundletype(self):
428 """zlib compression using the DEFLATE algorithm.
428 """zlib compression using the DEFLATE algorithm.
429
429
430 All Mercurial clients should support this format. The compression
430 All Mercurial clients should support this format. The compression
431 algorithm strikes a reasonable balance between compression ratio
431 algorithm strikes a reasonable balance between compression ratio
432 and size.
432 and size.
433 """
433 """
434 return 'gzip', 'GZ'
434 return 'gzip', 'GZ'
435
435
436 def wireprotosupport(self):
436 def wireprotosupport(self):
437 return compewireprotosupport('zlib', 20, 20)
437 return compewireprotosupport('zlib', 20, 20)
438
438
439 def revlogheader(self):
439 def revlogheader(self):
440 return 'x'
440 return 'x'
441
441
442 def compressstream(self, it, opts=None):
442 def compressstream(self, it, opts=None):
443 opts = opts or {}
443 opts = opts or {}
444
444
445 z = zlib.compressobj(opts.get('level', -1))
445 z = zlib.compressobj(opts.get('level', -1))
446 for chunk in it:
446 for chunk in it:
447 data = z.compress(chunk)
447 data = z.compress(chunk)
448 # Not all calls to compress emit data. It is cheaper to inspect
448 # Not all calls to compress emit data. It is cheaper to inspect
449 # here than to feed empty chunks through generator.
449 # here than to feed empty chunks through generator.
450 if data:
450 if data:
451 yield data
451 yield data
452
452
453 yield z.flush()
453 yield z.flush()
454
454
455 def decompressorreader(self, fh):
455 def decompressorreader(self, fh):
456 return _GzipCompressedStreamReader(fh)
456 return _GzipCompressedStreamReader(fh)
457
457
458 class zlibrevlogcompressor(object):
458 class zlibrevlogcompressor(object):
459
459
460 def __init__(self, level=None):
460 def __init__(self, level=None):
461 self._level = level
461 self._level = level
462
462
463 def compress(self, data):
463 def compress(self, data):
464 insize = len(data)
464 insize = len(data)
465 # Caller handles empty input case.
465 # Caller handles empty input case.
466 assert insize > 0
466 assert insize > 0
467
467
468 if insize < 44:
468 if insize < 44:
469 return None
469 return None
470
470
471 elif insize <= 1000000:
471 elif insize <= 1000000:
472 if self._level is None:
472 if self._level is None:
473 compressed = zlib.compress(data)
473 compressed = zlib.compress(data)
474 else:
474 else:
475 compressed = zlib.compress(data, self._level)
475 compressed = zlib.compress(data, self._level)
476 if len(compressed) < insize:
476 if len(compressed) < insize:
477 return compressed
477 return compressed
478 return None
478 return None
479
479
480 # zlib makes an internal copy of the input buffer, doubling
480 # zlib makes an internal copy of the input buffer, doubling
481 # memory usage for large inputs. So do streaming compression
481 # memory usage for large inputs. So do streaming compression
482 # on large inputs.
482 # on large inputs.
483 else:
483 else:
484 if self._level is None:
484 if self._level is None:
485 z = zlib.compressobj()
485 z = zlib.compressobj()
486 else:
486 else:
487 z = zlib.compressobj(level=self._level)
487 z = zlib.compressobj(level=self._level)
488 parts = []
488 parts = []
489 pos = 0
489 pos = 0
490 while pos < insize:
490 while pos < insize:
491 pos2 = pos + 2**20
491 pos2 = pos + 2**20
492 parts.append(z.compress(data[pos:pos2]))
492 parts.append(z.compress(data[pos:pos2]))
493 pos = pos2
493 pos = pos2
494 parts.append(z.flush())
494 parts.append(z.flush())
495
495
496 if sum(map(len, parts)) < insize:
496 if sum(map(len, parts)) < insize:
497 return ''.join(parts)
497 return ''.join(parts)
498 return None
498 return None
499
499
500 def decompress(self, data):
500 def decompress(self, data):
501 try:
501 try:
502 return zlib.decompress(data)
502 return zlib.decompress(data)
503 except zlib.error as e:
503 except zlib.error as e:
504 raise error.StorageError(_('revlog decompress error: %s') %
504 raise error.StorageError(_('revlog decompress error: %s') %
505 stringutil.forcebytestr(e))
505 stringutil.forcebytestr(e))
506
506
507 def revlogcompressor(self, opts=None):
507 def revlogcompressor(self, opts=None):
508 return self.zlibrevlogcompressor()
508 level = None
509 if opts is not None:
510 level = opts.get('zlib.level')
511 return self.zlibrevlogcompressor(level)
509
512
510 compengines.register(_zlibengine())
513 compengines.register(_zlibengine())
511
514
512 class _bz2engine(compressionengine):
515 class _bz2engine(compressionengine):
513 def name(self):
516 def name(self):
514 return 'bz2'
517 return 'bz2'
515
518
516 def bundletype(self):
519 def bundletype(self):
517 """An algorithm that produces smaller bundles than ``gzip``.
520 """An algorithm that produces smaller bundles than ``gzip``.
518
521
519 All Mercurial clients should support this format.
522 All Mercurial clients should support this format.
520
523
521 This engine will likely produce smaller bundles than ``gzip`` but
524 This engine will likely produce smaller bundles than ``gzip`` but
522 will be significantly slower, both during compression and
525 will be significantly slower, both during compression and
523 decompression.
526 decompression.
524
527
525 If available, the ``zstd`` engine can yield similar or better
528 If available, the ``zstd`` engine can yield similar or better
526 compression at much higher speeds.
529 compression at much higher speeds.
527 """
530 """
528 return 'bzip2', 'BZ'
531 return 'bzip2', 'BZ'
529
532
530 # We declare a protocol name but don't advertise by default because
533 # We declare a protocol name but don't advertise by default because
531 # it is slow.
534 # it is slow.
532 def wireprotosupport(self):
535 def wireprotosupport(self):
533 return compewireprotosupport('bzip2', 0, 0)
536 return compewireprotosupport('bzip2', 0, 0)
534
537
535 def compressstream(self, it, opts=None):
538 def compressstream(self, it, opts=None):
536 opts = opts or {}
539 opts = opts or {}
537 z = bz2.BZ2Compressor(opts.get('level', 9))
540 z = bz2.BZ2Compressor(opts.get('level', 9))
538 for chunk in it:
541 for chunk in it:
539 data = z.compress(chunk)
542 data = z.compress(chunk)
540 if data:
543 if data:
541 yield data
544 yield data
542
545
543 yield z.flush()
546 yield z.flush()
544
547
545 def decompressorreader(self, fh):
548 def decompressorreader(self, fh):
546 return _BZ2CompressedStreamReader(fh)
549 return _BZ2CompressedStreamReader(fh)
547
550
548 compengines.register(_bz2engine())
551 compengines.register(_bz2engine())
549
552
550 class _truncatedbz2engine(compressionengine):
553 class _truncatedbz2engine(compressionengine):
551 def name(self):
554 def name(self):
552 return 'bz2truncated'
555 return 'bz2truncated'
553
556
554 def bundletype(self):
557 def bundletype(self):
555 return None, '_truncatedBZ'
558 return None, '_truncatedBZ'
556
559
557 # We don't implement compressstream because it is hackily handled elsewhere.
560 # We don't implement compressstream because it is hackily handled elsewhere.
558
561
559 def decompressorreader(self, fh):
562 def decompressorreader(self, fh):
560 return _TruncatedBZ2CompressedStreamReader(fh)
563 return _TruncatedBZ2CompressedStreamReader(fh)
561
564
562 compengines.register(_truncatedbz2engine())
565 compengines.register(_truncatedbz2engine())
563
566
564 class _noopengine(compressionengine):
567 class _noopengine(compressionengine):
565 def name(self):
568 def name(self):
566 return 'none'
569 return 'none'
567
570
568 def bundletype(self):
571 def bundletype(self):
569 """No compression is performed.
572 """No compression is performed.
570
573
571 Use this compression engine to explicitly disable compression.
574 Use this compression engine to explicitly disable compression.
572 """
575 """
573 return 'none', 'UN'
576 return 'none', 'UN'
574
577
575 # Clients always support uncompressed payloads. Servers don't because
578 # Clients always support uncompressed payloads. Servers don't because
576 # unless you are on a fast network, uncompressed payloads can easily
579 # unless you are on a fast network, uncompressed payloads can easily
577 # saturate your network pipe.
580 # saturate your network pipe.
578 def wireprotosupport(self):
581 def wireprotosupport(self):
579 return compewireprotosupport('none', 0, 10)
582 return compewireprotosupport('none', 0, 10)
580
583
581 # We don't implement revlogheader because it is handled specially
584 # We don't implement revlogheader because it is handled specially
582 # in the revlog class.
585 # in the revlog class.
583
586
584 def compressstream(self, it, opts=None):
587 def compressstream(self, it, opts=None):
585 return it
588 return it
586
589
587 def decompressorreader(self, fh):
590 def decompressorreader(self, fh):
588 return fh
591 return fh
589
592
590 class nooprevlogcompressor(object):
593 class nooprevlogcompressor(object):
591 def compress(self, data):
594 def compress(self, data):
592 return None
595 return None
593
596
594 def revlogcompressor(self, opts=None):
597 def revlogcompressor(self, opts=None):
595 return self.nooprevlogcompressor()
598 return self.nooprevlogcompressor()
596
599
597 compengines.register(_noopengine())
600 compengines.register(_noopengine())
598
601
599 class _zstdengine(compressionengine):
602 class _zstdengine(compressionengine):
600 def name(self):
603 def name(self):
601 return 'zstd'
604 return 'zstd'
602
605
603 @propertycache
606 @propertycache
604 def _module(self):
607 def _module(self):
605 # Not all installs have the zstd module available. So defer importing
608 # Not all installs have the zstd module available. So defer importing
606 # until first access.
609 # until first access.
607 try:
610 try:
608 from .. import zstd
611 from .. import zstd
609 # Force delayed import.
612 # Force delayed import.
610 zstd.__version__
613 zstd.__version__
611 return zstd
614 return zstd
612 except ImportError:
615 except ImportError:
613 return None
616 return None
614
617
615 def available(self):
618 def available(self):
616 return bool(self._module)
619 return bool(self._module)
617
620
618 def bundletype(self):
621 def bundletype(self):
619 """A modern compression algorithm that is fast and highly flexible.
622 """A modern compression algorithm that is fast and highly flexible.
620
623
621 Only supported by Mercurial 4.1 and newer clients.
624 Only supported by Mercurial 4.1 and newer clients.
622
625
623 With the default settings, zstd compression is both faster and yields
626 With the default settings, zstd compression is both faster and yields
624 better compression than ``gzip``. It also frequently yields better
627 better compression than ``gzip``. It also frequently yields better
625 compression than ``bzip2`` while operating at much higher speeds.
628 compression than ``bzip2`` while operating at much higher speeds.
626
629
627 If this engine is available and backwards compatibility is not a
630 If this engine is available and backwards compatibility is not a
628 concern, it is likely the best available engine.
631 concern, it is likely the best available engine.
629 """
632 """
630 return 'zstd', 'ZS'
633 return 'zstd', 'ZS'
631
634
632 def wireprotosupport(self):
635 def wireprotosupport(self):
633 return compewireprotosupport('zstd', 50, 50)
636 return compewireprotosupport('zstd', 50, 50)
634
637
635 def revlogheader(self):
638 def revlogheader(self):
636 return '\x28'
639 return '\x28'
637
640
638 def compressstream(self, it, opts=None):
641 def compressstream(self, it, opts=None):
639 opts = opts or {}
642 opts = opts or {}
640 # zstd level 3 is almost always significantly faster than zlib
643 # zstd level 3 is almost always significantly faster than zlib
641 # while providing no worse compression. It strikes a good balance
644 # while providing no worse compression. It strikes a good balance
642 # between speed and compression.
645 # between speed and compression.
643 level = opts.get('level', 3)
646 level = opts.get('level', 3)
644
647
645 zstd = self._module
648 zstd = self._module
646 z = zstd.ZstdCompressor(level=level).compressobj()
649 z = zstd.ZstdCompressor(level=level).compressobj()
647 for chunk in it:
650 for chunk in it:
648 data = z.compress(chunk)
651 data = z.compress(chunk)
649 if data:
652 if data:
650 yield data
653 yield data
651
654
652 yield z.flush()
655 yield z.flush()
653
656
654 def decompressorreader(self, fh):
657 def decompressorreader(self, fh):
655 return _ZstdCompressedStreamReader(fh, self._module)
658 return _ZstdCompressedStreamReader(fh, self._module)
656
659
657 class zstdrevlogcompressor(object):
660 class zstdrevlogcompressor(object):
658 def __init__(self, zstd, level=3):
661 def __init__(self, zstd, level=3):
659 # TODO consider omitting frame magic to save 4 bytes.
662 # TODO consider omitting frame magic to save 4 bytes.
660 # This writes content sizes into the frame header. That is
663 # This writes content sizes into the frame header. That is
661 # extra storage. But it allows a correct size memory allocation
664 # extra storage. But it allows a correct size memory allocation
662 # to hold the result.
665 # to hold the result.
663 self._cctx = zstd.ZstdCompressor(level=level)
666 self._cctx = zstd.ZstdCompressor(level=level)
664 self._dctx = zstd.ZstdDecompressor()
667 self._dctx = zstd.ZstdDecompressor()
665 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
668 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
666 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
669 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
667
670
668 def compress(self, data):
671 def compress(self, data):
669 insize = len(data)
672 insize = len(data)
670 # Caller handles empty input case.
673 # Caller handles empty input case.
671 assert insize > 0
674 assert insize > 0
672
675
673 if insize < 50:
676 if insize < 50:
674 return None
677 return None
675
678
676 elif insize <= 1000000:
679 elif insize <= 1000000:
677 compressed = self._cctx.compress(data)
680 compressed = self._cctx.compress(data)
678 if len(compressed) < insize:
681 if len(compressed) < insize:
679 return compressed
682 return compressed
680 return None
683 return None
681 else:
684 else:
682 z = self._cctx.compressobj()
685 z = self._cctx.compressobj()
683 chunks = []
686 chunks = []
684 pos = 0
687 pos = 0
685 while pos < insize:
688 while pos < insize:
686 pos2 = pos + self._compinsize
689 pos2 = pos + self._compinsize
687 chunk = z.compress(data[pos:pos2])
690 chunk = z.compress(data[pos:pos2])
688 if chunk:
691 if chunk:
689 chunks.append(chunk)
692 chunks.append(chunk)
690 pos = pos2
693 pos = pos2
691 chunks.append(z.flush())
694 chunks.append(z.flush())
692
695
693 if sum(map(len, chunks)) < insize:
696 if sum(map(len, chunks)) < insize:
694 return ''.join(chunks)
697 return ''.join(chunks)
695 return None
698 return None
696
699
697 def decompress(self, data):
700 def decompress(self, data):
698 insize = len(data)
701 insize = len(data)
699
702
700 try:
703 try:
701 # This was measured to be faster than other streaming
704 # This was measured to be faster than other streaming
702 # decompressors.
705 # decompressors.
703 dobj = self._dctx.decompressobj()
706 dobj = self._dctx.decompressobj()
704 chunks = []
707 chunks = []
705 pos = 0
708 pos = 0
706 while pos < insize:
709 while pos < insize:
707 pos2 = pos + self._decompinsize
710 pos2 = pos + self._decompinsize
708 chunk = dobj.decompress(data[pos:pos2])
711 chunk = dobj.decompress(data[pos:pos2])
709 if chunk:
712 if chunk:
710 chunks.append(chunk)
713 chunks.append(chunk)
711 pos = pos2
714 pos = pos2
712 # Frame should be exhausted, so no finish() API.
715 # Frame should be exhausted, so no finish() API.
713
716
714 return ''.join(chunks)
717 return ''.join(chunks)
715 except Exception as e:
718 except Exception as e:
716 raise error.StorageError(_('revlog decompress error: %s') %
719 raise error.StorageError(_('revlog decompress error: %s') %
717 stringutil.forcebytestr(e))
720 stringutil.forcebytestr(e))
718
721
719 def revlogcompressor(self, opts=None):
722 def revlogcompressor(self, opts=None):
720 opts = opts or {}
723 opts = opts or {}
721 return self.zstdrevlogcompressor(self._module,
724 return self.zstdrevlogcompressor(self._module,
722 level=opts.get('level', 3))
725 level=opts.get('level', 3))
723
726
724 compengines.register(_zstdengine())
727 compengines.register(_zstdengine())
725
728
726 def bundlecompressiontopics():
729 def bundlecompressiontopics():
727 """Obtains a list of available bundle compressions for use in help."""
730 """Obtains a list of available bundle compressions for use in help."""
728 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
731 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
729 items = {}
732 items = {}
730
733
731 # We need to format the docstring. So use a dummy object/type to hold it
734 # We need to format the docstring. So use a dummy object/type to hold it
732 # rather than mutating the original.
735 # rather than mutating the original.
733 class docobject(object):
736 class docobject(object):
734 pass
737 pass
735
738
736 for name in compengines:
739 for name in compengines:
737 engine = compengines[name]
740 engine = compengines[name]
738
741
739 if not engine.available():
742 if not engine.available():
740 continue
743 continue
741
744
742 bt = engine.bundletype()
745 bt = engine.bundletype()
743 if not bt or not bt[0]:
746 if not bt or not bt[0]:
744 continue
747 continue
745
748
746 doc = b'``%s``\n %s' % (bt[0], pycompat.getdoc(engine.bundletype))
749 doc = b'``%s``\n %s' % (bt[0], pycompat.getdoc(engine.bundletype))
747
750
748 value = docobject()
751 value = docobject()
749 value.__doc__ = pycompat.sysstr(doc)
752 value.__doc__ = pycompat.sysstr(doc)
750 value._origdoc = engine.bundletype.__doc__
753 value._origdoc = engine.bundletype.__doc__
751 value._origfunc = engine.bundletype
754 value._origfunc = engine.bundletype
752
755
753 items[bt[0]] = value
756 items[bt[0]] = value
754
757
755 return items
758 return items
756
759
757 i18nfunctions = bundlecompressiontopics().values()
760 i18nfunctions = bundlecompressiontopics().values()
@@ -1,84 +1,140 b''
1 A new repository uses zlib storage, which doesn't need a requirement
1 A new repository uses zlib storage, which doesn't need a requirement
2
2
3 $ hg init default
3 $ hg init default
4 $ cd default
4 $ cd default
5 $ cat .hg/requires
5 $ cat .hg/requires
6 dotencode
6 dotencode
7 fncache
7 fncache
8 generaldelta
8 generaldelta
9 revlogv1
9 revlogv1
10 sparserevlog
10 sparserevlog
11 store
11 store
12 testonly-simplestore (reposimplestore !)
12 testonly-simplestore (reposimplestore !)
13
13
14 $ touch foo
14 $ touch foo
15 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text to trigger compression'
15 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text to trigger compression'
16 $ hg debugrevlog -c | grep 0x78
16 $ hg debugrevlog -c | grep 0x78
17 0x78 (x) : 1 (100.00%)
17 0x78 (x) : 1 (100.00%)
18 0x78 (x) : 110 (100.00%)
18 0x78 (x) : 110 (100.00%)
19
19
20 $ cd ..
20 $ cd ..
21
21
22 Unknown compression engine to format.compression aborts
22 Unknown compression engine to format.compression aborts
23
23
24 $ hg --config experimental.format.compression=unknown init unknown
24 $ hg --config experimental.format.compression=unknown init unknown
25 abort: compression engine unknown defined by experimental.format.compression not available
25 abort: compression engine unknown defined by experimental.format.compression not available
26 (run "hg debuginstall" to list available compression engines)
26 (run "hg debuginstall" to list available compression engines)
27 [255]
27 [255]
28
28
29 A requirement specifying an unknown compression engine results in bail
29 A requirement specifying an unknown compression engine results in bail
30
30
31 $ hg init unknownrequirement
31 $ hg init unknownrequirement
32 $ cd unknownrequirement
32 $ cd unknownrequirement
33 $ echo exp-compression-unknown >> .hg/requires
33 $ echo exp-compression-unknown >> .hg/requires
34 $ hg log
34 $ hg log
35 abort: repository requires features unknown to this Mercurial: exp-compression-unknown!
35 abort: repository requires features unknown to this Mercurial: exp-compression-unknown!
36 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
36 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
37 [255]
37 [255]
38
38
39 $ cd ..
39 $ cd ..
40
40
41 #if zstd
41 #if zstd
42
42
43 $ hg --config experimental.format.compression=zstd init zstd
43 $ hg --config experimental.format.compression=zstd init zstd
44 $ cd zstd
44 $ cd zstd
45 $ cat .hg/requires
45 $ cat .hg/requires
46 dotencode
46 dotencode
47 exp-compression-zstd
47 exp-compression-zstd
48 fncache
48 fncache
49 generaldelta
49 generaldelta
50 revlogv1
50 revlogv1
51 sparserevlog
51 sparserevlog
52 store
52 store
53 testonly-simplestore (reposimplestore !)
53 testonly-simplestore (reposimplestore !)
54
54
55 $ touch foo
55 $ touch foo
56 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text'
56 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text'
57
57
58 $ hg debugrevlog -c | grep 0x28
58 $ hg debugrevlog -c | grep 0x28
59 0x28 : 1 (100.00%)
59 0x28 : 1 (100.00%)
60 0x28 : 98 (100.00%)
60 0x28 : 98 (100.00%)
61
61
62 $ cd ..
62 $ cd ..
63
63
64 Specifying a new format.compression on an existing repo won't introduce data
64 Specifying a new format.compression on an existing repo won't introduce data
65 with that engine or a requirement
65 with that engine or a requirement
66
66
67 $ cd default
67 $ cd default
68 $ touch bar
68 $ touch bar
69 $ hg --config experimental.format.compression=zstd -q commit -A -m 'add bar with a lot of repeated repeated repeated text'
69 $ hg --config experimental.format.compression=zstd -q commit -A -m 'add bar with a lot of repeated repeated repeated text'
70
70
71 $ cat .hg/requires
71 $ cat .hg/requires
72 dotencode
72 dotencode
73 fncache
73 fncache
74 generaldelta
74 generaldelta
75 revlogv1
75 revlogv1
76 sparserevlog
76 sparserevlog
77 store
77 store
78 testonly-simplestore (reposimplestore !)
78 testonly-simplestore (reposimplestore !)
79
79
80 $ hg debugrevlog -c | grep 0x78
80 $ hg debugrevlog -c | grep 0x78
81 0x78 (x) : 2 (100.00%)
81 0x78 (x) : 2 (100.00%)
82 0x78 (x) : 199 (100.00%)
82 0x78 (x) : 199 (100.00%)
83
83
84 #endif
84 #endif
85
86 checking zlib options
87 =====================
88
89 $ hg init zlib-level-default
90 $ hg init zlib-level-1
91 $ cat << EOF >> zlib-level-1/.hg/hgrc
92 > [storage]
93 > revlog.zlib.level=1
94 > EOF
95 $ hg init zlib-level-9
96 $ cat << EOF >> zlib-level-9/.hg/hgrc
97 > [storage]
98 > revlog.zlib.level=9
99 > EOF
100
101
102 $ commitone() {
103 > repo=$1
104 > cp $RUNTESTDIR/bundles/issue4438-r1.hg $repo/a
105 > hg -R $repo add $repo/a
106 > hg -R $repo commit -m some-commit
107 > }
108
109 $ for repo in zlib-level-default zlib-level-1 zlib-level-9; do
110 > commitone $repo
111 > done
112
113 $ $RUNTESTDIR/f -s */.hg/store/data/*
114 zlib-level-1/.hg/store/data/a.i: size=4146
115 zlib-level-9/.hg/store/data/a.i: size=4138
116 zlib-level-default/.hg/store/data/a.i: size=4138
117
118 Test error cases
119
120 $ hg init zlib-level-invalid
121 $ cat << EOF >> zlib-level-invalid/.hg/hgrc
122 > [storage]
123 > revlog.zlib.level=foobar
124 > EOF
125 $ commitone zlib-level-invalid
126 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
127 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
128 [255]
129
130 $ hg init zlib-level-out-of-range
131 $ cat << EOF >> zlib-level-out-of-range/.hg/hgrc
132 > [storage]
133 > revlog.zlib.level=42
134 > EOF
135
136 $ commitone zlib-level-out-of-range
137 abort: invalid value for `storage.revlog.zlib.level` config: 42
138 abort: invalid value for `storage.revlog.zlib.level` config: 42
139 [255]
140
General Comments 0
You need to be logged in to leave comments. Login now