##// END OF EJS Templates
storage: introduce a `revlog.reuse-external-delta` config...
marmoute -
r41985:688fc33e default
parent child Browse files
Show More
@@ -1,1458 +1,1461 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 def _registerdiffopts(section, configprefix=''):
116 def _registerdiffopts(section, configprefix=''):
117 coreconfigitem(section, configprefix + 'nodates',
117 coreconfigitem(section, configprefix + 'nodates',
118 default=False,
118 default=False,
119 )
119 )
120 coreconfigitem(section, configprefix + 'showfunc',
120 coreconfigitem(section, configprefix + 'showfunc',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem(section, configprefix + 'unified',
123 coreconfigitem(section, configprefix + 'unified',
124 default=None,
124 default=None,
125 )
125 )
126 coreconfigitem(section, configprefix + 'git',
126 coreconfigitem(section, configprefix + 'git',
127 default=False,
127 default=False,
128 )
128 )
129 coreconfigitem(section, configprefix + 'ignorews',
129 coreconfigitem(section, configprefix + 'ignorews',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem(section, configprefix + 'ignorewsamount',
132 coreconfigitem(section, configprefix + 'ignorewsamount',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem(section, configprefix + 'ignoreblanklines',
135 coreconfigitem(section, configprefix + 'ignoreblanklines',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem(section, configprefix + 'ignorewseol',
138 coreconfigitem(section, configprefix + 'ignorewseol',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem(section, configprefix + 'nobinary',
141 coreconfigitem(section, configprefix + 'nobinary',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(section, configprefix + 'noprefix',
144 coreconfigitem(section, configprefix + 'noprefix',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem(section, configprefix + 'word-diff',
147 coreconfigitem(section, configprefix + 'word-diff',
148 default=False,
148 default=False,
149 )
149 )
150
150
151 coreconfigitem('alias', '.*',
151 coreconfigitem('alias', '.*',
152 default=dynamicdefault,
152 default=dynamicdefault,
153 generic=True,
153 generic=True,
154 )
154 )
155 coreconfigitem('auth', 'cookiefile',
155 coreconfigitem('auth', 'cookiefile',
156 default=None,
156 default=None,
157 )
157 )
158 _registerdiffopts(section='annotate')
158 _registerdiffopts(section='annotate')
159 # bookmarks.pushing: internal hack for discovery
159 # bookmarks.pushing: internal hack for discovery
160 coreconfigitem('bookmarks', 'pushing',
160 coreconfigitem('bookmarks', 'pushing',
161 default=list,
161 default=list,
162 )
162 )
163 # bundle.mainreporoot: internal hack for bundlerepo
163 # bundle.mainreporoot: internal hack for bundlerepo
164 coreconfigitem('bundle', 'mainreporoot',
164 coreconfigitem('bundle', 'mainreporoot',
165 default='',
165 default='',
166 )
166 )
167 coreconfigitem('censor', 'policy',
167 coreconfigitem('censor', 'policy',
168 default='abort',
168 default='abort',
169 )
169 )
170 coreconfigitem('chgserver', 'idletimeout',
170 coreconfigitem('chgserver', 'idletimeout',
171 default=3600,
171 default=3600,
172 )
172 )
173 coreconfigitem('chgserver', 'skiphash',
173 coreconfigitem('chgserver', 'skiphash',
174 default=False,
174 default=False,
175 )
175 )
176 coreconfigitem('cmdserver', 'log',
176 coreconfigitem('cmdserver', 'log',
177 default=None,
177 default=None,
178 )
178 )
179 coreconfigitem('cmdserver', 'max-log-files',
179 coreconfigitem('cmdserver', 'max-log-files',
180 default=7,
180 default=7,
181 )
181 )
182 coreconfigitem('cmdserver', 'max-log-size',
182 coreconfigitem('cmdserver', 'max-log-size',
183 default='1 MB',
183 default='1 MB',
184 )
184 )
185 coreconfigitem('cmdserver', 'max-repo-cache',
185 coreconfigitem('cmdserver', 'max-repo-cache',
186 default=0,
186 default=0,
187 )
187 )
188 coreconfigitem('cmdserver', 'message-encodings',
188 coreconfigitem('cmdserver', 'message-encodings',
189 default=list,
189 default=list,
190 )
190 )
191 coreconfigitem('cmdserver', 'track-log',
191 coreconfigitem('cmdserver', 'track-log',
192 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
192 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
193 )
193 )
194 coreconfigitem('color', '.*',
194 coreconfigitem('color', '.*',
195 default=None,
195 default=None,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem('color', 'mode',
198 coreconfigitem('color', 'mode',
199 default='auto',
199 default='auto',
200 )
200 )
201 coreconfigitem('color', 'pagermode',
201 coreconfigitem('color', 'pagermode',
202 default=dynamicdefault,
202 default=dynamicdefault,
203 )
203 )
204 _registerdiffopts(section='commands', configprefix='commit.interactive.')
204 _registerdiffopts(section='commands', configprefix='commit.interactive.')
205 coreconfigitem('commands', 'grep.all-files',
205 coreconfigitem('commands', 'grep.all-files',
206 default=False,
206 default=False,
207 )
207 )
208 coreconfigitem('commands', 'resolve.confirm',
208 coreconfigitem('commands', 'resolve.confirm',
209 default=False,
209 default=False,
210 )
210 )
211 coreconfigitem('commands', 'resolve.explicit-re-merge',
211 coreconfigitem('commands', 'resolve.explicit-re-merge',
212 default=False,
212 default=False,
213 )
213 )
214 coreconfigitem('commands', 'resolve.mark-check',
214 coreconfigitem('commands', 'resolve.mark-check',
215 default='none',
215 default='none',
216 )
216 )
217 _registerdiffopts(section='commands', configprefix='revert.interactive.')
217 _registerdiffopts(section='commands', configprefix='revert.interactive.')
218 coreconfigitem('commands', 'show.aliasprefix',
218 coreconfigitem('commands', 'show.aliasprefix',
219 default=list,
219 default=list,
220 )
220 )
221 coreconfigitem('commands', 'status.relative',
221 coreconfigitem('commands', 'status.relative',
222 default=False,
222 default=False,
223 )
223 )
224 coreconfigitem('commands', 'status.skipstates',
224 coreconfigitem('commands', 'status.skipstates',
225 default=[],
225 default=[],
226 )
226 )
227 coreconfigitem('commands', 'status.terse',
227 coreconfigitem('commands', 'status.terse',
228 default='',
228 default='',
229 )
229 )
230 coreconfigitem('commands', 'status.verbose',
230 coreconfigitem('commands', 'status.verbose',
231 default=False,
231 default=False,
232 )
232 )
233 coreconfigitem('commands', 'update.check',
233 coreconfigitem('commands', 'update.check',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem('commands', 'update.requiredest',
236 coreconfigitem('commands', 'update.requiredest',
237 default=False,
237 default=False,
238 )
238 )
239 coreconfigitem('committemplate', '.*',
239 coreconfigitem('committemplate', '.*',
240 default=None,
240 default=None,
241 generic=True,
241 generic=True,
242 )
242 )
243 coreconfigitem('convert', 'bzr.saverev',
243 coreconfigitem('convert', 'bzr.saverev',
244 default=True,
244 default=True,
245 )
245 )
246 coreconfigitem('convert', 'cvsps.cache',
246 coreconfigitem('convert', 'cvsps.cache',
247 default=True,
247 default=True,
248 )
248 )
249 coreconfigitem('convert', 'cvsps.fuzz',
249 coreconfigitem('convert', 'cvsps.fuzz',
250 default=60,
250 default=60,
251 )
251 )
252 coreconfigitem('convert', 'cvsps.logencoding',
252 coreconfigitem('convert', 'cvsps.logencoding',
253 default=None,
253 default=None,
254 )
254 )
255 coreconfigitem('convert', 'cvsps.mergefrom',
255 coreconfigitem('convert', 'cvsps.mergefrom',
256 default=None,
256 default=None,
257 )
257 )
258 coreconfigitem('convert', 'cvsps.mergeto',
258 coreconfigitem('convert', 'cvsps.mergeto',
259 default=None,
259 default=None,
260 )
260 )
261 coreconfigitem('convert', 'git.committeractions',
261 coreconfigitem('convert', 'git.committeractions',
262 default=lambda: ['messagedifferent'],
262 default=lambda: ['messagedifferent'],
263 )
263 )
264 coreconfigitem('convert', 'git.extrakeys',
264 coreconfigitem('convert', 'git.extrakeys',
265 default=list,
265 default=list,
266 )
266 )
267 coreconfigitem('convert', 'git.findcopiesharder',
267 coreconfigitem('convert', 'git.findcopiesharder',
268 default=False,
268 default=False,
269 )
269 )
270 coreconfigitem('convert', 'git.remoteprefix',
270 coreconfigitem('convert', 'git.remoteprefix',
271 default='remote',
271 default='remote',
272 )
272 )
273 coreconfigitem('convert', 'git.renamelimit',
273 coreconfigitem('convert', 'git.renamelimit',
274 default=400,
274 default=400,
275 )
275 )
276 coreconfigitem('convert', 'git.saverev',
276 coreconfigitem('convert', 'git.saverev',
277 default=True,
277 default=True,
278 )
278 )
279 coreconfigitem('convert', 'git.similarity',
279 coreconfigitem('convert', 'git.similarity',
280 default=50,
280 default=50,
281 )
281 )
282 coreconfigitem('convert', 'git.skipsubmodules',
282 coreconfigitem('convert', 'git.skipsubmodules',
283 default=False,
283 default=False,
284 )
284 )
285 coreconfigitem('convert', 'hg.clonebranches',
285 coreconfigitem('convert', 'hg.clonebranches',
286 default=False,
286 default=False,
287 )
287 )
288 coreconfigitem('convert', 'hg.ignoreerrors',
288 coreconfigitem('convert', 'hg.ignoreerrors',
289 default=False,
289 default=False,
290 )
290 )
291 coreconfigitem('convert', 'hg.revs',
291 coreconfigitem('convert', 'hg.revs',
292 default=None,
292 default=None,
293 )
293 )
294 coreconfigitem('convert', 'hg.saverev',
294 coreconfigitem('convert', 'hg.saverev',
295 default=False,
295 default=False,
296 )
296 )
297 coreconfigitem('convert', 'hg.sourcename',
297 coreconfigitem('convert', 'hg.sourcename',
298 default=None,
298 default=None,
299 )
299 )
300 coreconfigitem('convert', 'hg.startrev',
300 coreconfigitem('convert', 'hg.startrev',
301 default=None,
301 default=None,
302 )
302 )
303 coreconfigitem('convert', 'hg.tagsbranch',
303 coreconfigitem('convert', 'hg.tagsbranch',
304 default='default',
304 default='default',
305 )
305 )
306 coreconfigitem('convert', 'hg.usebranchnames',
306 coreconfigitem('convert', 'hg.usebranchnames',
307 default=True,
307 default=True,
308 )
308 )
309 coreconfigitem('convert', 'ignoreancestorcheck',
309 coreconfigitem('convert', 'ignoreancestorcheck',
310 default=False,
310 default=False,
311 )
311 )
312 coreconfigitem('convert', 'localtimezone',
312 coreconfigitem('convert', 'localtimezone',
313 default=False,
313 default=False,
314 )
314 )
315 coreconfigitem('convert', 'p4.encoding',
315 coreconfigitem('convert', 'p4.encoding',
316 default=dynamicdefault,
316 default=dynamicdefault,
317 )
317 )
318 coreconfigitem('convert', 'p4.startrev',
318 coreconfigitem('convert', 'p4.startrev',
319 default=0,
319 default=0,
320 )
320 )
321 coreconfigitem('convert', 'skiptags',
321 coreconfigitem('convert', 'skiptags',
322 default=False,
322 default=False,
323 )
323 )
324 coreconfigitem('convert', 'svn.debugsvnlog',
324 coreconfigitem('convert', 'svn.debugsvnlog',
325 default=True,
325 default=True,
326 )
326 )
327 coreconfigitem('convert', 'svn.trunk',
327 coreconfigitem('convert', 'svn.trunk',
328 default=None,
328 default=None,
329 )
329 )
330 coreconfigitem('convert', 'svn.tags',
330 coreconfigitem('convert', 'svn.tags',
331 default=None,
331 default=None,
332 )
332 )
333 coreconfigitem('convert', 'svn.branches',
333 coreconfigitem('convert', 'svn.branches',
334 default=None,
334 default=None,
335 )
335 )
336 coreconfigitem('convert', 'svn.startrev',
336 coreconfigitem('convert', 'svn.startrev',
337 default=0,
337 default=0,
338 )
338 )
339 coreconfigitem('debug', 'dirstate.delaywrite',
339 coreconfigitem('debug', 'dirstate.delaywrite',
340 default=0,
340 default=0,
341 )
341 )
342 coreconfigitem('defaults', '.*',
342 coreconfigitem('defaults', '.*',
343 default=None,
343 default=None,
344 generic=True,
344 generic=True,
345 )
345 )
346 coreconfigitem('devel', 'all-warnings',
346 coreconfigitem('devel', 'all-warnings',
347 default=False,
347 default=False,
348 )
348 )
349 coreconfigitem('devel', 'bundle2.debug',
349 coreconfigitem('devel', 'bundle2.debug',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem('devel', 'bundle.delta',
352 coreconfigitem('devel', 'bundle.delta',
353 default='',
353 default='',
354 )
354 )
355 coreconfigitem('devel', 'cache-vfs',
355 coreconfigitem('devel', 'cache-vfs',
356 default=None,
356 default=None,
357 )
357 )
358 coreconfigitem('devel', 'check-locks',
358 coreconfigitem('devel', 'check-locks',
359 default=False,
359 default=False,
360 )
360 )
361 coreconfigitem('devel', 'check-relroot',
361 coreconfigitem('devel', 'check-relroot',
362 default=False,
362 default=False,
363 )
363 )
364 coreconfigitem('devel', 'default-date',
364 coreconfigitem('devel', 'default-date',
365 default=None,
365 default=None,
366 )
366 )
367 coreconfigitem('devel', 'deprec-warn',
367 coreconfigitem('devel', 'deprec-warn',
368 default=False,
368 default=False,
369 )
369 )
370 coreconfigitem('devel', 'disableloaddefaultcerts',
370 coreconfigitem('devel', 'disableloaddefaultcerts',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem('devel', 'warn-empty-changegroup',
373 coreconfigitem('devel', 'warn-empty-changegroup',
374 default=False,
374 default=False,
375 )
375 )
376 coreconfigitem('devel', 'legacy.exchange',
376 coreconfigitem('devel', 'legacy.exchange',
377 default=list,
377 default=list,
378 )
378 )
379 coreconfigitem('devel', 'servercafile',
379 coreconfigitem('devel', 'servercafile',
380 default='',
380 default='',
381 )
381 )
382 coreconfigitem('devel', 'serverexactprotocol',
382 coreconfigitem('devel', 'serverexactprotocol',
383 default='',
383 default='',
384 )
384 )
385 coreconfigitem('devel', 'serverrequirecert',
385 coreconfigitem('devel', 'serverrequirecert',
386 default=False,
386 default=False,
387 )
387 )
388 coreconfigitem('devel', 'strip-obsmarkers',
388 coreconfigitem('devel', 'strip-obsmarkers',
389 default=True,
389 default=True,
390 )
390 )
391 coreconfigitem('devel', 'warn-config',
391 coreconfigitem('devel', 'warn-config',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem('devel', 'warn-config-default',
394 coreconfigitem('devel', 'warn-config-default',
395 default=None,
395 default=None,
396 )
396 )
397 coreconfigitem('devel', 'user.obsmarker',
397 coreconfigitem('devel', 'user.obsmarker',
398 default=None,
398 default=None,
399 )
399 )
400 coreconfigitem('devel', 'warn-config-unknown',
400 coreconfigitem('devel', 'warn-config-unknown',
401 default=None,
401 default=None,
402 )
402 )
403 coreconfigitem('devel', 'debug.copies',
403 coreconfigitem('devel', 'debug.copies',
404 default=False,
404 default=False,
405 )
405 )
406 coreconfigitem('devel', 'debug.extensions',
406 coreconfigitem('devel', 'debug.extensions',
407 default=False,
407 default=False,
408 )
408 )
409 coreconfigitem('devel', 'debug.peer-request',
409 coreconfigitem('devel', 'debug.peer-request',
410 default=False,
410 default=False,
411 )
411 )
412 _registerdiffopts(section='diff')
412 _registerdiffopts(section='diff')
413 coreconfigitem('email', 'bcc',
413 coreconfigitem('email', 'bcc',
414 default=None,
414 default=None,
415 )
415 )
416 coreconfigitem('email', 'cc',
416 coreconfigitem('email', 'cc',
417 default=None,
417 default=None,
418 )
418 )
419 coreconfigitem('email', 'charsets',
419 coreconfigitem('email', 'charsets',
420 default=list,
420 default=list,
421 )
421 )
422 coreconfigitem('email', 'from',
422 coreconfigitem('email', 'from',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem('email', 'method',
425 coreconfigitem('email', 'method',
426 default='smtp',
426 default='smtp',
427 )
427 )
428 coreconfigitem('email', 'reply-to',
428 coreconfigitem('email', 'reply-to',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem('email', 'to',
431 coreconfigitem('email', 'to',
432 default=None,
432 default=None,
433 )
433 )
434 coreconfigitem('experimental', 'archivemetatemplate',
434 coreconfigitem('experimental', 'archivemetatemplate',
435 default=dynamicdefault,
435 default=dynamicdefault,
436 )
436 )
437 coreconfigitem('experimental', 'auto-publish',
437 coreconfigitem('experimental', 'auto-publish',
438 default='publish',
438 default='publish',
439 )
439 )
440 coreconfigitem('experimental', 'bundle-phases',
440 coreconfigitem('experimental', 'bundle-phases',
441 default=False,
441 default=False,
442 )
442 )
443 coreconfigitem('experimental', 'bundle2-advertise',
443 coreconfigitem('experimental', 'bundle2-advertise',
444 default=True,
444 default=True,
445 )
445 )
446 coreconfigitem('experimental', 'bundle2-output-capture',
446 coreconfigitem('experimental', 'bundle2-output-capture',
447 default=False,
447 default=False,
448 )
448 )
449 coreconfigitem('experimental', 'bundle2.pushback',
449 coreconfigitem('experimental', 'bundle2.pushback',
450 default=False,
450 default=False,
451 )
451 )
452 coreconfigitem('experimental', 'bundle2lazylocking',
452 coreconfigitem('experimental', 'bundle2lazylocking',
453 default=False,
453 default=False,
454 )
454 )
455 coreconfigitem('experimental', 'bundlecomplevel',
455 coreconfigitem('experimental', 'bundlecomplevel',
456 default=None,
456 default=None,
457 )
457 )
458 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
458 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
459 default=None,
459 default=None,
460 )
460 )
461 coreconfigitem('experimental', 'bundlecomplevel.gzip',
461 coreconfigitem('experimental', 'bundlecomplevel.gzip',
462 default=None,
462 default=None,
463 )
463 )
464 coreconfigitem('experimental', 'bundlecomplevel.none',
464 coreconfigitem('experimental', 'bundlecomplevel.none',
465 default=None,
465 default=None,
466 )
466 )
467 coreconfigitem('experimental', 'bundlecomplevel.zstd',
467 coreconfigitem('experimental', 'bundlecomplevel.zstd',
468 default=None,
468 default=None,
469 )
469 )
470 coreconfigitem('experimental', 'changegroup3',
470 coreconfigitem('experimental', 'changegroup3',
471 default=False,
471 default=False,
472 )
472 )
473 coreconfigitem('experimental', 'cleanup-as-archived',
473 coreconfigitem('experimental', 'cleanup-as-archived',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem('experimental', 'clientcompressionengines',
476 coreconfigitem('experimental', 'clientcompressionengines',
477 default=list,
477 default=list,
478 )
478 )
479 coreconfigitem('experimental', 'copytrace',
479 coreconfigitem('experimental', 'copytrace',
480 default='on',
480 default='on',
481 )
481 )
482 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
482 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
483 default=100,
483 default=100,
484 )
484 )
485 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
485 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
486 default=100,
486 default=100,
487 )
487 )
488 coreconfigitem('experimental', 'copies.read-from',
488 coreconfigitem('experimental', 'copies.read-from',
489 default="filelog-only",
489 default="filelog-only",
490 )
490 )
491 coreconfigitem('experimental', 'crecordtest',
491 coreconfigitem('experimental', 'crecordtest',
492 default=None,
492 default=None,
493 )
493 )
494 coreconfigitem('experimental', 'directaccess',
494 coreconfigitem('experimental', 'directaccess',
495 default=False,
495 default=False,
496 )
496 )
497 coreconfigitem('experimental', 'directaccess.revnums',
497 coreconfigitem('experimental', 'directaccess.revnums',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem('experimental', 'editortmpinhg',
500 coreconfigitem('experimental', 'editortmpinhg',
501 default=False,
501 default=False,
502 )
502 )
503 coreconfigitem('experimental', 'evolution',
503 coreconfigitem('experimental', 'evolution',
504 default=list,
504 default=list,
505 )
505 )
506 coreconfigitem('experimental', 'evolution.allowdivergence',
506 coreconfigitem('experimental', 'evolution.allowdivergence',
507 default=False,
507 default=False,
508 alias=[('experimental', 'allowdivergence')]
508 alias=[('experimental', 'allowdivergence')]
509 )
509 )
510 coreconfigitem('experimental', 'evolution.allowunstable',
510 coreconfigitem('experimental', 'evolution.allowunstable',
511 default=None,
511 default=None,
512 )
512 )
513 coreconfigitem('experimental', 'evolution.createmarkers',
513 coreconfigitem('experimental', 'evolution.createmarkers',
514 default=None,
514 default=None,
515 )
515 )
516 coreconfigitem('experimental', 'evolution.effect-flags',
516 coreconfigitem('experimental', 'evolution.effect-flags',
517 default=True,
517 default=True,
518 alias=[('experimental', 'effect-flags')]
518 alias=[('experimental', 'effect-flags')]
519 )
519 )
520 coreconfigitem('experimental', 'evolution.exchange',
520 coreconfigitem('experimental', 'evolution.exchange',
521 default=None,
521 default=None,
522 )
522 )
523 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
523 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
524 default=False,
524 default=False,
525 )
525 )
526 coreconfigitem('experimental', 'evolution.report-instabilities',
526 coreconfigitem('experimental', 'evolution.report-instabilities',
527 default=True,
527 default=True,
528 )
528 )
529 coreconfigitem('experimental', 'evolution.track-operation',
529 coreconfigitem('experimental', 'evolution.track-operation',
530 default=True,
530 default=True,
531 )
531 )
532 coreconfigitem('experimental', 'maxdeltachainspan',
532 coreconfigitem('experimental', 'maxdeltachainspan',
533 default=-1,
533 default=-1,
534 )
534 )
535 coreconfigitem('experimental', 'mergetempdirprefix',
535 coreconfigitem('experimental', 'mergetempdirprefix',
536 default=None,
536 default=None,
537 )
537 )
538 coreconfigitem('experimental', 'mmapindexthreshold',
538 coreconfigitem('experimental', 'mmapindexthreshold',
539 default=None,
539 default=None,
540 )
540 )
541 coreconfigitem('experimental', 'narrow',
541 coreconfigitem('experimental', 'narrow',
542 default=False,
542 default=False,
543 )
543 )
544 coreconfigitem('experimental', 'nonnormalparanoidcheck',
544 coreconfigitem('experimental', 'nonnormalparanoidcheck',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem('experimental', 'exportableenviron',
547 coreconfigitem('experimental', 'exportableenviron',
548 default=list,
548 default=list,
549 )
549 )
550 coreconfigitem('experimental', 'extendedheader.index',
550 coreconfigitem('experimental', 'extendedheader.index',
551 default=None,
551 default=None,
552 )
552 )
553 coreconfigitem('experimental', 'extendedheader.similarity',
553 coreconfigitem('experimental', 'extendedheader.similarity',
554 default=False,
554 default=False,
555 )
555 )
556 coreconfigitem('experimental', 'format.compression',
556 coreconfigitem('experimental', 'format.compression',
557 default='zlib',
557 default='zlib',
558 )
558 )
559 coreconfigitem('experimental', 'graphshorten',
559 coreconfigitem('experimental', 'graphshorten',
560 default=False,
560 default=False,
561 )
561 )
562 coreconfigitem('experimental', 'graphstyle.parent',
562 coreconfigitem('experimental', 'graphstyle.parent',
563 default=dynamicdefault,
563 default=dynamicdefault,
564 )
564 )
565 coreconfigitem('experimental', 'graphstyle.missing',
565 coreconfigitem('experimental', 'graphstyle.missing',
566 default=dynamicdefault,
566 default=dynamicdefault,
567 )
567 )
568 coreconfigitem('experimental', 'graphstyle.grandparent',
568 coreconfigitem('experimental', 'graphstyle.grandparent',
569 default=dynamicdefault,
569 default=dynamicdefault,
570 )
570 )
571 coreconfigitem('experimental', 'hook-track-tags',
571 coreconfigitem('experimental', 'hook-track-tags',
572 default=False,
572 default=False,
573 )
573 )
574 coreconfigitem('experimental', 'httppeer.advertise-v2',
574 coreconfigitem('experimental', 'httppeer.advertise-v2',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
577 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
578 default=None,
578 default=None,
579 )
579 )
580 coreconfigitem('experimental', 'httppostargs',
580 coreconfigitem('experimental', 'httppostargs',
581 default=False,
581 default=False,
582 )
582 )
583 coreconfigitem('experimental', 'mergedriver',
583 coreconfigitem('experimental', 'mergedriver',
584 default=None,
584 default=None,
585 )
585 )
586 coreconfigitem('experimental', 'nointerrupt', default=False)
586 coreconfigitem('experimental', 'nointerrupt', default=False)
587 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
587 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
588
588
589 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
589 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
590 default=False,
590 default=False,
591 )
591 )
592 coreconfigitem('experimental', 'remotenames',
592 coreconfigitem('experimental', 'remotenames',
593 default=False,
593 default=False,
594 )
594 )
595 coreconfigitem('experimental', 'removeemptydirs',
595 coreconfigitem('experimental', 'removeemptydirs',
596 default=True,
596 default=True,
597 )
597 )
598 coreconfigitem('experimental', 'revisions.prefixhexnode',
598 coreconfigitem('experimental', 'revisions.prefixhexnode',
599 default=False,
599 default=False,
600 )
600 )
601 coreconfigitem('experimental', 'revlogv2',
601 coreconfigitem('experimental', 'revlogv2',
602 default=None,
602 default=None,
603 )
603 )
604 coreconfigitem('experimental', 'revisions.disambiguatewithin',
604 coreconfigitem('experimental', 'revisions.disambiguatewithin',
605 default=None,
605 default=None,
606 )
606 )
607 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
607 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
608 default=50000,
608 default=50000,
609 )
609 )
610 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
610 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
611 default=100000,
611 default=100000,
612 )
612 )
613 coreconfigitem('experimental', 'server.stream-narrow-clones',
613 coreconfigitem('experimental', 'server.stream-narrow-clones',
614 default=False,
614 default=False,
615 )
615 )
616 coreconfigitem('experimental', 'single-head-per-branch',
616 coreconfigitem('experimental', 'single-head-per-branch',
617 default=False,
617 default=False,
618 )
618 )
619 coreconfigitem('experimental', 'sshserver.support-v2',
619 coreconfigitem('experimental', 'sshserver.support-v2',
620 default=False,
620 default=False,
621 )
621 )
622 coreconfigitem('experimental', 'sparse-read',
622 coreconfigitem('experimental', 'sparse-read',
623 default=False,
623 default=False,
624 )
624 )
625 coreconfigitem('experimental', 'sparse-read.density-threshold',
625 coreconfigitem('experimental', 'sparse-read.density-threshold',
626 default=0.50,
626 default=0.50,
627 )
627 )
628 coreconfigitem('experimental', 'sparse-read.min-gap-size',
628 coreconfigitem('experimental', 'sparse-read.min-gap-size',
629 default='65K',
629 default='65K',
630 )
630 )
631 coreconfigitem('experimental', 'treemanifest',
631 coreconfigitem('experimental', 'treemanifest',
632 default=False,
632 default=False,
633 )
633 )
634 coreconfigitem('experimental', 'update.atomic-file',
634 coreconfigitem('experimental', 'update.atomic-file',
635 default=False,
635 default=False,
636 )
636 )
637 coreconfigitem('experimental', 'sshpeer.advertise-v2',
637 coreconfigitem('experimental', 'sshpeer.advertise-v2',
638 default=False,
638 default=False,
639 )
639 )
640 coreconfigitem('experimental', 'web.apiserver',
640 coreconfigitem('experimental', 'web.apiserver',
641 default=False,
641 default=False,
642 )
642 )
643 coreconfigitem('experimental', 'web.api.http-v2',
643 coreconfigitem('experimental', 'web.api.http-v2',
644 default=False,
644 default=False,
645 )
645 )
646 coreconfigitem('experimental', 'web.api.debugreflect',
646 coreconfigitem('experimental', 'web.api.debugreflect',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
649 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
650 default=False,
650 default=False,
651 )
651 )
652 coreconfigitem('experimental', 'xdiff',
652 coreconfigitem('experimental', 'xdiff',
653 default=False,
653 default=False,
654 )
654 )
655 coreconfigitem('extensions', '.*',
655 coreconfigitem('extensions', '.*',
656 default=None,
656 default=None,
657 generic=True,
657 generic=True,
658 )
658 )
659 coreconfigitem('extdata', '.*',
659 coreconfigitem('extdata', '.*',
660 default=None,
660 default=None,
661 generic=True,
661 generic=True,
662 )
662 )
663 coreconfigitem('format', 'chunkcachesize',
663 coreconfigitem('format', 'chunkcachesize',
664 default=None,
664 default=None,
665 )
665 )
666 coreconfigitem('format', 'dotencode',
666 coreconfigitem('format', 'dotencode',
667 default=True,
667 default=True,
668 )
668 )
669 coreconfigitem('format', 'generaldelta',
669 coreconfigitem('format', 'generaldelta',
670 default=False,
670 default=False,
671 )
671 )
672 coreconfigitem('format', 'manifestcachesize',
672 coreconfigitem('format', 'manifestcachesize',
673 default=None,
673 default=None,
674 )
674 )
675 coreconfigitem('format', 'maxchainlen',
675 coreconfigitem('format', 'maxchainlen',
676 default=dynamicdefault,
676 default=dynamicdefault,
677 )
677 )
678 coreconfigitem('format', 'obsstore-version',
678 coreconfigitem('format', 'obsstore-version',
679 default=None,
679 default=None,
680 )
680 )
681 coreconfigitem('format', 'sparse-revlog',
681 coreconfigitem('format', 'sparse-revlog',
682 default=True,
682 default=True,
683 )
683 )
684 coreconfigitem('format', 'usefncache',
684 coreconfigitem('format', 'usefncache',
685 default=True,
685 default=True,
686 )
686 )
687 coreconfigitem('format', 'usegeneraldelta',
687 coreconfigitem('format', 'usegeneraldelta',
688 default=True,
688 default=True,
689 )
689 )
690 coreconfigitem('format', 'usestore',
690 coreconfigitem('format', 'usestore',
691 default=True,
691 default=True,
692 )
692 )
693 coreconfigitem('format', 'internal-phase',
693 coreconfigitem('format', 'internal-phase',
694 default=False,
694 default=False,
695 )
695 )
696 coreconfigitem('fsmonitor', 'warn_when_unused',
696 coreconfigitem('fsmonitor', 'warn_when_unused',
697 default=True,
697 default=True,
698 )
698 )
699 coreconfigitem('fsmonitor', 'warn_update_file_count',
699 coreconfigitem('fsmonitor', 'warn_update_file_count',
700 default=50000,
700 default=50000,
701 )
701 )
702 coreconfigitem('help', br'hidden-command\..*',
702 coreconfigitem('help', br'hidden-command\..*',
703 default=False,
703 default=False,
704 generic=True,
704 generic=True,
705 )
705 )
706 coreconfigitem('help', br'hidden-topic\..*',
706 coreconfigitem('help', br'hidden-topic\..*',
707 default=False,
707 default=False,
708 generic=True,
708 generic=True,
709 )
709 )
710 coreconfigitem('hooks', '.*',
710 coreconfigitem('hooks', '.*',
711 default=dynamicdefault,
711 default=dynamicdefault,
712 generic=True,
712 generic=True,
713 )
713 )
714 coreconfigitem('hgweb-paths', '.*',
714 coreconfigitem('hgweb-paths', '.*',
715 default=list,
715 default=list,
716 generic=True,
716 generic=True,
717 )
717 )
718 coreconfigitem('hostfingerprints', '.*',
718 coreconfigitem('hostfingerprints', '.*',
719 default=list,
719 default=list,
720 generic=True,
720 generic=True,
721 )
721 )
722 coreconfigitem('hostsecurity', 'ciphers',
722 coreconfigitem('hostsecurity', 'ciphers',
723 default=None,
723 default=None,
724 )
724 )
725 coreconfigitem('hostsecurity', 'disabletls10warning',
725 coreconfigitem('hostsecurity', 'disabletls10warning',
726 default=False,
726 default=False,
727 )
727 )
728 coreconfigitem('hostsecurity', 'minimumprotocol',
728 coreconfigitem('hostsecurity', 'minimumprotocol',
729 default=dynamicdefault,
729 default=dynamicdefault,
730 )
730 )
731 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
731 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
732 default=dynamicdefault,
732 default=dynamicdefault,
733 generic=True,
733 generic=True,
734 )
734 )
735 coreconfigitem('hostsecurity', '.*:ciphers$',
735 coreconfigitem('hostsecurity', '.*:ciphers$',
736 default=dynamicdefault,
736 default=dynamicdefault,
737 generic=True,
737 generic=True,
738 )
738 )
739 coreconfigitem('hostsecurity', '.*:fingerprints$',
739 coreconfigitem('hostsecurity', '.*:fingerprints$',
740 default=list,
740 default=list,
741 generic=True,
741 generic=True,
742 )
742 )
743 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
743 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
744 default=None,
744 default=None,
745 generic=True,
745 generic=True,
746 )
746 )
747
747
748 coreconfigitem('http_proxy', 'always',
748 coreconfigitem('http_proxy', 'always',
749 default=False,
749 default=False,
750 )
750 )
751 coreconfigitem('http_proxy', 'host',
751 coreconfigitem('http_proxy', 'host',
752 default=None,
752 default=None,
753 )
753 )
754 coreconfigitem('http_proxy', 'no',
754 coreconfigitem('http_proxy', 'no',
755 default=list,
755 default=list,
756 )
756 )
757 coreconfigitem('http_proxy', 'passwd',
757 coreconfigitem('http_proxy', 'passwd',
758 default=None,
758 default=None,
759 )
759 )
760 coreconfigitem('http_proxy', 'user',
760 coreconfigitem('http_proxy', 'user',
761 default=None,
761 default=None,
762 )
762 )
763
763
764 coreconfigitem('http', 'timeout',
764 coreconfigitem('http', 'timeout',
765 default=None,
765 default=None,
766 )
766 )
767
767
768 coreconfigitem('logtoprocess', 'commandexception',
768 coreconfigitem('logtoprocess', 'commandexception',
769 default=None,
769 default=None,
770 )
770 )
771 coreconfigitem('logtoprocess', 'commandfinish',
771 coreconfigitem('logtoprocess', 'commandfinish',
772 default=None,
772 default=None,
773 )
773 )
774 coreconfigitem('logtoprocess', 'command',
774 coreconfigitem('logtoprocess', 'command',
775 default=None,
775 default=None,
776 )
776 )
777 coreconfigitem('logtoprocess', 'develwarn',
777 coreconfigitem('logtoprocess', 'develwarn',
778 default=None,
778 default=None,
779 )
779 )
780 coreconfigitem('logtoprocess', 'uiblocked',
780 coreconfigitem('logtoprocess', 'uiblocked',
781 default=None,
781 default=None,
782 )
782 )
783 coreconfigitem('merge', 'checkunknown',
783 coreconfigitem('merge', 'checkunknown',
784 default='abort',
784 default='abort',
785 )
785 )
786 coreconfigitem('merge', 'checkignored',
786 coreconfigitem('merge', 'checkignored',
787 default='abort',
787 default='abort',
788 )
788 )
789 coreconfigitem('experimental', 'merge.checkpathconflicts',
789 coreconfigitem('experimental', 'merge.checkpathconflicts',
790 default=False,
790 default=False,
791 )
791 )
792 coreconfigitem('merge', 'followcopies',
792 coreconfigitem('merge', 'followcopies',
793 default=True,
793 default=True,
794 )
794 )
795 coreconfigitem('merge', 'on-failure',
795 coreconfigitem('merge', 'on-failure',
796 default='continue',
796 default='continue',
797 )
797 )
798 coreconfigitem('merge', 'preferancestor',
798 coreconfigitem('merge', 'preferancestor',
799 default=lambda: ['*'],
799 default=lambda: ['*'],
800 )
800 )
801 coreconfigitem('merge', 'strict-capability-check',
801 coreconfigitem('merge', 'strict-capability-check',
802 default=False,
802 default=False,
803 )
803 )
804 coreconfigitem('merge-tools', '.*',
804 coreconfigitem('merge-tools', '.*',
805 default=None,
805 default=None,
806 generic=True,
806 generic=True,
807 )
807 )
808 coreconfigitem('merge-tools', br'.*\.args$',
808 coreconfigitem('merge-tools', br'.*\.args$',
809 default="$local $base $other",
809 default="$local $base $other",
810 generic=True,
810 generic=True,
811 priority=-1,
811 priority=-1,
812 )
812 )
813 coreconfigitem('merge-tools', br'.*\.binary$',
813 coreconfigitem('merge-tools', br'.*\.binary$',
814 default=False,
814 default=False,
815 generic=True,
815 generic=True,
816 priority=-1,
816 priority=-1,
817 )
817 )
818 coreconfigitem('merge-tools', br'.*\.check$',
818 coreconfigitem('merge-tools', br'.*\.check$',
819 default=list,
819 default=list,
820 generic=True,
820 generic=True,
821 priority=-1,
821 priority=-1,
822 )
822 )
823 coreconfigitem('merge-tools', br'.*\.checkchanged$',
823 coreconfigitem('merge-tools', br'.*\.checkchanged$',
824 default=False,
824 default=False,
825 generic=True,
825 generic=True,
826 priority=-1,
826 priority=-1,
827 )
827 )
828 coreconfigitem('merge-tools', br'.*\.executable$',
828 coreconfigitem('merge-tools', br'.*\.executable$',
829 default=dynamicdefault,
829 default=dynamicdefault,
830 generic=True,
830 generic=True,
831 priority=-1,
831 priority=-1,
832 )
832 )
833 coreconfigitem('merge-tools', br'.*\.fixeol$',
833 coreconfigitem('merge-tools', br'.*\.fixeol$',
834 default=False,
834 default=False,
835 generic=True,
835 generic=True,
836 priority=-1,
836 priority=-1,
837 )
837 )
838 coreconfigitem('merge-tools', br'.*\.gui$',
838 coreconfigitem('merge-tools', br'.*\.gui$',
839 default=False,
839 default=False,
840 generic=True,
840 generic=True,
841 priority=-1,
841 priority=-1,
842 )
842 )
843 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
843 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
844 default='basic',
844 default='basic',
845 generic=True,
845 generic=True,
846 priority=-1,
846 priority=-1,
847 )
847 )
848 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
848 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
849 default=dynamicdefault, # take from ui.mergemarkertemplate
849 default=dynamicdefault, # take from ui.mergemarkertemplate
850 generic=True,
850 generic=True,
851 priority=-1,
851 priority=-1,
852 )
852 )
853 coreconfigitem('merge-tools', br'.*\.priority$',
853 coreconfigitem('merge-tools', br'.*\.priority$',
854 default=0,
854 default=0,
855 generic=True,
855 generic=True,
856 priority=-1,
856 priority=-1,
857 )
857 )
858 coreconfigitem('merge-tools', br'.*\.premerge$',
858 coreconfigitem('merge-tools', br'.*\.premerge$',
859 default=dynamicdefault,
859 default=dynamicdefault,
860 generic=True,
860 generic=True,
861 priority=-1,
861 priority=-1,
862 )
862 )
863 coreconfigitem('merge-tools', br'.*\.symlink$',
863 coreconfigitem('merge-tools', br'.*\.symlink$',
864 default=False,
864 default=False,
865 generic=True,
865 generic=True,
866 priority=-1,
866 priority=-1,
867 )
867 )
868 coreconfigitem('pager', 'attend-.*',
868 coreconfigitem('pager', 'attend-.*',
869 default=dynamicdefault,
869 default=dynamicdefault,
870 generic=True,
870 generic=True,
871 )
871 )
872 coreconfigitem('pager', 'ignore',
872 coreconfigitem('pager', 'ignore',
873 default=list,
873 default=list,
874 )
874 )
875 coreconfigitem('pager', 'pager',
875 coreconfigitem('pager', 'pager',
876 default=dynamicdefault,
876 default=dynamicdefault,
877 )
877 )
878 coreconfigitem('patch', 'eol',
878 coreconfigitem('patch', 'eol',
879 default='strict',
879 default='strict',
880 )
880 )
881 coreconfigitem('patch', 'fuzz',
881 coreconfigitem('patch', 'fuzz',
882 default=2,
882 default=2,
883 )
883 )
884 coreconfigitem('paths', 'default',
884 coreconfigitem('paths', 'default',
885 default=None,
885 default=None,
886 )
886 )
887 coreconfigitem('paths', 'default-push',
887 coreconfigitem('paths', 'default-push',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem('paths', '.*',
890 coreconfigitem('paths', '.*',
891 default=None,
891 default=None,
892 generic=True,
892 generic=True,
893 )
893 )
894 coreconfigitem('phases', 'checksubrepos',
894 coreconfigitem('phases', 'checksubrepos',
895 default='follow',
895 default='follow',
896 )
896 )
897 coreconfigitem('phases', 'new-commit',
897 coreconfigitem('phases', 'new-commit',
898 default='draft',
898 default='draft',
899 )
899 )
900 coreconfigitem('phases', 'publish',
900 coreconfigitem('phases', 'publish',
901 default=True,
901 default=True,
902 )
902 )
903 coreconfigitem('profiling', 'enabled',
903 coreconfigitem('profiling', 'enabled',
904 default=False,
904 default=False,
905 )
905 )
906 coreconfigitem('profiling', 'format',
906 coreconfigitem('profiling', 'format',
907 default='text',
907 default='text',
908 )
908 )
909 coreconfigitem('profiling', 'freq',
909 coreconfigitem('profiling', 'freq',
910 default=1000,
910 default=1000,
911 )
911 )
912 coreconfigitem('profiling', 'limit',
912 coreconfigitem('profiling', 'limit',
913 default=30,
913 default=30,
914 )
914 )
915 coreconfigitem('profiling', 'nested',
915 coreconfigitem('profiling', 'nested',
916 default=0,
916 default=0,
917 )
917 )
918 coreconfigitem('profiling', 'output',
918 coreconfigitem('profiling', 'output',
919 default=None,
919 default=None,
920 )
920 )
921 coreconfigitem('profiling', 'showmax',
921 coreconfigitem('profiling', 'showmax',
922 default=0.999,
922 default=0.999,
923 )
923 )
924 coreconfigitem('profiling', 'showmin',
924 coreconfigitem('profiling', 'showmin',
925 default=dynamicdefault,
925 default=dynamicdefault,
926 )
926 )
927 coreconfigitem('profiling', 'sort',
927 coreconfigitem('profiling', 'sort',
928 default='inlinetime',
928 default='inlinetime',
929 )
929 )
930 coreconfigitem('profiling', 'statformat',
930 coreconfigitem('profiling', 'statformat',
931 default='hotpath',
931 default='hotpath',
932 )
932 )
933 coreconfigitem('profiling', 'time-track',
933 coreconfigitem('profiling', 'time-track',
934 default=dynamicdefault,
934 default=dynamicdefault,
935 )
935 )
936 coreconfigitem('profiling', 'type',
936 coreconfigitem('profiling', 'type',
937 default='stat',
937 default='stat',
938 )
938 )
939 coreconfigitem('progress', 'assume-tty',
939 coreconfigitem('progress', 'assume-tty',
940 default=False,
940 default=False,
941 )
941 )
942 coreconfigitem('progress', 'changedelay',
942 coreconfigitem('progress', 'changedelay',
943 default=1,
943 default=1,
944 )
944 )
945 coreconfigitem('progress', 'clear-complete',
945 coreconfigitem('progress', 'clear-complete',
946 default=True,
946 default=True,
947 )
947 )
948 coreconfigitem('progress', 'debug',
948 coreconfigitem('progress', 'debug',
949 default=False,
949 default=False,
950 )
950 )
951 coreconfigitem('progress', 'delay',
951 coreconfigitem('progress', 'delay',
952 default=3,
952 default=3,
953 )
953 )
954 coreconfigitem('progress', 'disable',
954 coreconfigitem('progress', 'disable',
955 default=False,
955 default=False,
956 )
956 )
957 coreconfigitem('progress', 'estimateinterval',
957 coreconfigitem('progress', 'estimateinterval',
958 default=60.0,
958 default=60.0,
959 )
959 )
960 coreconfigitem('progress', 'format',
960 coreconfigitem('progress', 'format',
961 default=lambda: ['topic', 'bar', 'number', 'estimate'],
961 default=lambda: ['topic', 'bar', 'number', 'estimate'],
962 )
962 )
963 coreconfigitem('progress', 'refresh',
963 coreconfigitem('progress', 'refresh',
964 default=0.1,
964 default=0.1,
965 )
965 )
966 coreconfigitem('progress', 'width',
966 coreconfigitem('progress', 'width',
967 default=dynamicdefault,
967 default=dynamicdefault,
968 )
968 )
969 coreconfigitem('push', 'pushvars.server',
969 coreconfigitem('push', 'pushvars.server',
970 default=False,
970 default=False,
971 )
971 )
972 coreconfigitem('rewrite', 'backup-bundle',
972 coreconfigitem('rewrite', 'backup-bundle',
973 default=True,
973 default=True,
974 alias=[('ui', 'history-editing-backup')],
974 alias=[('ui', 'history-editing-backup')],
975 )
975 )
976 coreconfigitem('rewrite', 'update-timestamp',
976 coreconfigitem('rewrite', 'update-timestamp',
977 default=False,
977 default=False,
978 )
978 )
979 coreconfigitem('storage', 'new-repo-backend',
979 coreconfigitem('storage', 'new-repo-backend',
980 default='revlogv1',
980 default='revlogv1',
981 )
981 )
982 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
982 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
983 default=True,
983 default=True,
984 alias=[('format', 'aggressivemergedeltas')],
984 alias=[('format', 'aggressivemergedeltas')],
985 )
985 )
986 coreconfigitem('storage', 'revlog.reuse-external-delta',
987 default=True,
988 )
986 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
989 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
987 default=None,
990 default=None,
988 )
991 )
989 coreconfigitem('server', 'bookmarks-pushkey-compat',
992 coreconfigitem('server', 'bookmarks-pushkey-compat',
990 default=True,
993 default=True,
991 )
994 )
992 coreconfigitem('server', 'bundle1',
995 coreconfigitem('server', 'bundle1',
993 default=True,
996 default=True,
994 )
997 )
995 coreconfigitem('server', 'bundle1gd',
998 coreconfigitem('server', 'bundle1gd',
996 default=None,
999 default=None,
997 )
1000 )
998 coreconfigitem('server', 'bundle1.pull',
1001 coreconfigitem('server', 'bundle1.pull',
999 default=None,
1002 default=None,
1000 )
1003 )
1001 coreconfigitem('server', 'bundle1gd.pull',
1004 coreconfigitem('server', 'bundle1gd.pull',
1002 default=None,
1005 default=None,
1003 )
1006 )
1004 coreconfigitem('server', 'bundle1.push',
1007 coreconfigitem('server', 'bundle1.push',
1005 default=None,
1008 default=None,
1006 )
1009 )
1007 coreconfigitem('server', 'bundle1gd.push',
1010 coreconfigitem('server', 'bundle1gd.push',
1008 default=None,
1011 default=None,
1009 )
1012 )
1010 coreconfigitem('server', 'bundle2.stream',
1013 coreconfigitem('server', 'bundle2.stream',
1011 default=True,
1014 default=True,
1012 alias=[('experimental', 'bundle2.stream')]
1015 alias=[('experimental', 'bundle2.stream')]
1013 )
1016 )
1014 coreconfigitem('server', 'compressionengines',
1017 coreconfigitem('server', 'compressionengines',
1015 default=list,
1018 default=list,
1016 )
1019 )
1017 coreconfigitem('server', 'concurrent-push-mode',
1020 coreconfigitem('server', 'concurrent-push-mode',
1018 default='strict',
1021 default='strict',
1019 )
1022 )
1020 coreconfigitem('server', 'disablefullbundle',
1023 coreconfigitem('server', 'disablefullbundle',
1021 default=False,
1024 default=False,
1022 )
1025 )
1023 coreconfigitem('server', 'maxhttpheaderlen',
1026 coreconfigitem('server', 'maxhttpheaderlen',
1024 default=1024,
1027 default=1024,
1025 )
1028 )
1026 coreconfigitem('server', 'pullbundle',
1029 coreconfigitem('server', 'pullbundle',
1027 default=False,
1030 default=False,
1028 )
1031 )
1029 coreconfigitem('server', 'preferuncompressed',
1032 coreconfigitem('server', 'preferuncompressed',
1030 default=False,
1033 default=False,
1031 )
1034 )
1032 coreconfigitem('server', 'streamunbundle',
1035 coreconfigitem('server', 'streamunbundle',
1033 default=False,
1036 default=False,
1034 )
1037 )
1035 coreconfigitem('server', 'uncompressed',
1038 coreconfigitem('server', 'uncompressed',
1036 default=True,
1039 default=True,
1037 )
1040 )
1038 coreconfigitem('server', 'uncompressedallowsecret',
1041 coreconfigitem('server', 'uncompressedallowsecret',
1039 default=False,
1042 default=False,
1040 )
1043 )
1041 coreconfigitem('server', 'validate',
1044 coreconfigitem('server', 'validate',
1042 default=False,
1045 default=False,
1043 )
1046 )
1044 coreconfigitem('server', 'zliblevel',
1047 coreconfigitem('server', 'zliblevel',
1045 default=-1,
1048 default=-1,
1046 )
1049 )
1047 coreconfigitem('server', 'zstdlevel',
1050 coreconfigitem('server', 'zstdlevel',
1048 default=3,
1051 default=3,
1049 )
1052 )
1050 coreconfigitem('share', 'pool',
1053 coreconfigitem('share', 'pool',
1051 default=None,
1054 default=None,
1052 )
1055 )
1053 coreconfigitem('share', 'poolnaming',
1056 coreconfigitem('share', 'poolnaming',
1054 default='identity',
1057 default='identity',
1055 )
1058 )
1056 coreconfigitem('smtp', 'host',
1059 coreconfigitem('smtp', 'host',
1057 default=None,
1060 default=None,
1058 )
1061 )
1059 coreconfigitem('smtp', 'local_hostname',
1062 coreconfigitem('smtp', 'local_hostname',
1060 default=None,
1063 default=None,
1061 )
1064 )
1062 coreconfigitem('smtp', 'password',
1065 coreconfigitem('smtp', 'password',
1063 default=None,
1066 default=None,
1064 )
1067 )
1065 coreconfigitem('smtp', 'port',
1068 coreconfigitem('smtp', 'port',
1066 default=dynamicdefault,
1069 default=dynamicdefault,
1067 )
1070 )
1068 coreconfigitem('smtp', 'tls',
1071 coreconfigitem('smtp', 'tls',
1069 default='none',
1072 default='none',
1070 )
1073 )
1071 coreconfigitem('smtp', 'username',
1074 coreconfigitem('smtp', 'username',
1072 default=None,
1075 default=None,
1073 )
1076 )
1074 coreconfigitem('sparse', 'missingwarning',
1077 coreconfigitem('sparse', 'missingwarning',
1075 default=True,
1078 default=True,
1076 )
1079 )
1077 coreconfigitem('subrepos', 'allowed',
1080 coreconfigitem('subrepos', 'allowed',
1078 default=dynamicdefault, # to make backporting simpler
1081 default=dynamicdefault, # to make backporting simpler
1079 )
1082 )
1080 coreconfigitem('subrepos', 'hg:allowed',
1083 coreconfigitem('subrepos', 'hg:allowed',
1081 default=dynamicdefault,
1084 default=dynamicdefault,
1082 )
1085 )
1083 coreconfigitem('subrepos', 'git:allowed',
1086 coreconfigitem('subrepos', 'git:allowed',
1084 default=dynamicdefault,
1087 default=dynamicdefault,
1085 )
1088 )
1086 coreconfigitem('subrepos', 'svn:allowed',
1089 coreconfigitem('subrepos', 'svn:allowed',
1087 default=dynamicdefault,
1090 default=dynamicdefault,
1088 )
1091 )
1089 coreconfigitem('templates', '.*',
1092 coreconfigitem('templates', '.*',
1090 default=None,
1093 default=None,
1091 generic=True,
1094 generic=True,
1092 )
1095 )
1093 coreconfigitem('templateconfig', '.*',
1096 coreconfigitem('templateconfig', '.*',
1094 default=dynamicdefault,
1097 default=dynamicdefault,
1095 generic=True,
1098 generic=True,
1096 )
1099 )
1097 coreconfigitem('trusted', 'groups',
1100 coreconfigitem('trusted', 'groups',
1098 default=list,
1101 default=list,
1099 )
1102 )
1100 coreconfigitem('trusted', 'users',
1103 coreconfigitem('trusted', 'users',
1101 default=list,
1104 default=list,
1102 )
1105 )
1103 coreconfigitem('ui', '_usedassubrepo',
1106 coreconfigitem('ui', '_usedassubrepo',
1104 default=False,
1107 default=False,
1105 )
1108 )
1106 coreconfigitem('ui', 'allowemptycommit',
1109 coreconfigitem('ui', 'allowemptycommit',
1107 default=False,
1110 default=False,
1108 )
1111 )
1109 coreconfigitem('ui', 'archivemeta',
1112 coreconfigitem('ui', 'archivemeta',
1110 default=True,
1113 default=True,
1111 )
1114 )
1112 coreconfigitem('ui', 'askusername',
1115 coreconfigitem('ui', 'askusername',
1113 default=False,
1116 default=False,
1114 )
1117 )
1115 coreconfigitem('ui', 'clonebundlefallback',
1118 coreconfigitem('ui', 'clonebundlefallback',
1116 default=False,
1119 default=False,
1117 )
1120 )
1118 coreconfigitem('ui', 'clonebundleprefers',
1121 coreconfigitem('ui', 'clonebundleprefers',
1119 default=list,
1122 default=list,
1120 )
1123 )
1121 coreconfigitem('ui', 'clonebundles',
1124 coreconfigitem('ui', 'clonebundles',
1122 default=True,
1125 default=True,
1123 )
1126 )
1124 coreconfigitem('ui', 'color',
1127 coreconfigitem('ui', 'color',
1125 default='auto',
1128 default='auto',
1126 )
1129 )
1127 coreconfigitem('ui', 'commitsubrepos',
1130 coreconfigitem('ui', 'commitsubrepos',
1128 default=False,
1131 default=False,
1129 )
1132 )
1130 coreconfigitem('ui', 'debug',
1133 coreconfigitem('ui', 'debug',
1131 default=False,
1134 default=False,
1132 )
1135 )
1133 coreconfigitem('ui', 'debugger',
1136 coreconfigitem('ui', 'debugger',
1134 default=None,
1137 default=None,
1135 )
1138 )
1136 coreconfigitem('ui', 'editor',
1139 coreconfigitem('ui', 'editor',
1137 default=dynamicdefault,
1140 default=dynamicdefault,
1138 )
1141 )
1139 coreconfigitem('ui', 'fallbackencoding',
1142 coreconfigitem('ui', 'fallbackencoding',
1140 default=None,
1143 default=None,
1141 )
1144 )
1142 coreconfigitem('ui', 'forcecwd',
1145 coreconfigitem('ui', 'forcecwd',
1143 default=None,
1146 default=None,
1144 )
1147 )
1145 coreconfigitem('ui', 'forcemerge',
1148 coreconfigitem('ui', 'forcemerge',
1146 default=None,
1149 default=None,
1147 )
1150 )
1148 coreconfigitem('ui', 'formatdebug',
1151 coreconfigitem('ui', 'formatdebug',
1149 default=False,
1152 default=False,
1150 )
1153 )
1151 coreconfigitem('ui', 'formatjson',
1154 coreconfigitem('ui', 'formatjson',
1152 default=False,
1155 default=False,
1153 )
1156 )
1154 coreconfigitem('ui', 'formatted',
1157 coreconfigitem('ui', 'formatted',
1155 default=None,
1158 default=None,
1156 )
1159 )
1157 coreconfigitem('ui', 'graphnodetemplate',
1160 coreconfigitem('ui', 'graphnodetemplate',
1158 default=None,
1161 default=None,
1159 )
1162 )
1160 coreconfigitem('ui', 'interactive',
1163 coreconfigitem('ui', 'interactive',
1161 default=None,
1164 default=None,
1162 )
1165 )
1163 coreconfigitem('ui', 'interface',
1166 coreconfigitem('ui', 'interface',
1164 default=None,
1167 default=None,
1165 )
1168 )
1166 coreconfigitem('ui', 'interface.chunkselector',
1169 coreconfigitem('ui', 'interface.chunkselector',
1167 default=None,
1170 default=None,
1168 )
1171 )
1169 coreconfigitem('ui', 'large-file-limit',
1172 coreconfigitem('ui', 'large-file-limit',
1170 default=10000000,
1173 default=10000000,
1171 )
1174 )
1172 coreconfigitem('ui', 'logblockedtimes',
1175 coreconfigitem('ui', 'logblockedtimes',
1173 default=False,
1176 default=False,
1174 )
1177 )
1175 coreconfigitem('ui', 'logtemplate',
1178 coreconfigitem('ui', 'logtemplate',
1176 default=None,
1179 default=None,
1177 )
1180 )
1178 coreconfigitem('ui', 'merge',
1181 coreconfigitem('ui', 'merge',
1179 default=None,
1182 default=None,
1180 )
1183 )
1181 coreconfigitem('ui', 'mergemarkers',
1184 coreconfigitem('ui', 'mergemarkers',
1182 default='basic',
1185 default='basic',
1183 )
1186 )
1184 coreconfigitem('ui', 'mergemarkertemplate',
1187 coreconfigitem('ui', 'mergemarkertemplate',
1185 default=('{node|short} '
1188 default=('{node|short} '
1186 '{ifeq(tags, "tip", "", '
1189 '{ifeq(tags, "tip", "", '
1187 'ifeq(tags, "", "", "{tags} "))}'
1190 'ifeq(tags, "", "", "{tags} "))}'
1188 '{if(bookmarks, "{bookmarks} ")}'
1191 '{if(bookmarks, "{bookmarks} ")}'
1189 '{ifeq(branch, "default", "", "{branch} ")}'
1192 '{ifeq(branch, "default", "", "{branch} ")}'
1190 '- {author|user}: {desc|firstline}')
1193 '- {author|user}: {desc|firstline}')
1191 )
1194 )
1192 coreconfigitem('ui', 'message-output',
1195 coreconfigitem('ui', 'message-output',
1193 default='stdio',
1196 default='stdio',
1194 )
1197 )
1195 coreconfigitem('ui', 'nontty',
1198 coreconfigitem('ui', 'nontty',
1196 default=False,
1199 default=False,
1197 )
1200 )
1198 coreconfigitem('ui', 'origbackuppath',
1201 coreconfigitem('ui', 'origbackuppath',
1199 default=None,
1202 default=None,
1200 )
1203 )
1201 coreconfigitem('ui', 'paginate',
1204 coreconfigitem('ui', 'paginate',
1202 default=True,
1205 default=True,
1203 )
1206 )
1204 coreconfigitem('ui', 'patch',
1207 coreconfigitem('ui', 'patch',
1205 default=None,
1208 default=None,
1206 )
1209 )
1207 coreconfigitem('ui', 'pre-merge-tool-output-template',
1210 coreconfigitem('ui', 'pre-merge-tool-output-template',
1208 default=None,
1211 default=None,
1209 )
1212 )
1210 coreconfigitem('ui', 'portablefilenames',
1213 coreconfigitem('ui', 'portablefilenames',
1211 default='warn',
1214 default='warn',
1212 )
1215 )
1213 coreconfigitem('ui', 'promptecho',
1216 coreconfigitem('ui', 'promptecho',
1214 default=False,
1217 default=False,
1215 )
1218 )
1216 coreconfigitem('ui', 'quiet',
1219 coreconfigitem('ui', 'quiet',
1217 default=False,
1220 default=False,
1218 )
1221 )
1219 coreconfigitem('ui', 'quietbookmarkmove',
1222 coreconfigitem('ui', 'quietbookmarkmove',
1220 default=False,
1223 default=False,
1221 )
1224 )
1222 coreconfigitem('ui', 'relative-paths',
1225 coreconfigitem('ui', 'relative-paths',
1223 default='legacy',
1226 default='legacy',
1224 )
1227 )
1225 coreconfigitem('ui', 'remotecmd',
1228 coreconfigitem('ui', 'remotecmd',
1226 default='hg',
1229 default='hg',
1227 )
1230 )
1228 coreconfigitem('ui', 'report_untrusted',
1231 coreconfigitem('ui', 'report_untrusted',
1229 default=True,
1232 default=True,
1230 )
1233 )
1231 coreconfigitem('ui', 'rollback',
1234 coreconfigitem('ui', 'rollback',
1232 default=True,
1235 default=True,
1233 )
1236 )
1234 coreconfigitem('ui', 'signal-safe-lock',
1237 coreconfigitem('ui', 'signal-safe-lock',
1235 default=True,
1238 default=True,
1236 )
1239 )
1237 coreconfigitem('ui', 'slash',
1240 coreconfigitem('ui', 'slash',
1238 default=False,
1241 default=False,
1239 )
1242 )
1240 coreconfigitem('ui', 'ssh',
1243 coreconfigitem('ui', 'ssh',
1241 default='ssh',
1244 default='ssh',
1242 )
1245 )
1243 coreconfigitem('ui', 'ssherrorhint',
1246 coreconfigitem('ui', 'ssherrorhint',
1244 default=None,
1247 default=None,
1245 )
1248 )
1246 coreconfigitem('ui', 'statuscopies',
1249 coreconfigitem('ui', 'statuscopies',
1247 default=False,
1250 default=False,
1248 )
1251 )
1249 coreconfigitem('ui', 'strict',
1252 coreconfigitem('ui', 'strict',
1250 default=False,
1253 default=False,
1251 )
1254 )
1252 coreconfigitem('ui', 'style',
1255 coreconfigitem('ui', 'style',
1253 default='',
1256 default='',
1254 )
1257 )
1255 coreconfigitem('ui', 'supportcontact',
1258 coreconfigitem('ui', 'supportcontact',
1256 default=None,
1259 default=None,
1257 )
1260 )
1258 coreconfigitem('ui', 'textwidth',
1261 coreconfigitem('ui', 'textwidth',
1259 default=78,
1262 default=78,
1260 )
1263 )
1261 coreconfigitem('ui', 'timeout',
1264 coreconfigitem('ui', 'timeout',
1262 default='600',
1265 default='600',
1263 )
1266 )
1264 coreconfigitem('ui', 'timeout.warn',
1267 coreconfigitem('ui', 'timeout.warn',
1265 default=0,
1268 default=0,
1266 )
1269 )
1267 coreconfigitem('ui', 'traceback',
1270 coreconfigitem('ui', 'traceback',
1268 default=False,
1271 default=False,
1269 )
1272 )
1270 coreconfigitem('ui', 'tweakdefaults',
1273 coreconfigitem('ui', 'tweakdefaults',
1271 default=False,
1274 default=False,
1272 )
1275 )
1273 coreconfigitem('ui', 'username',
1276 coreconfigitem('ui', 'username',
1274 alias=[('ui', 'user')]
1277 alias=[('ui', 'user')]
1275 )
1278 )
1276 coreconfigitem('ui', 'verbose',
1279 coreconfigitem('ui', 'verbose',
1277 default=False,
1280 default=False,
1278 )
1281 )
1279 coreconfigitem('verify', 'skipflags',
1282 coreconfigitem('verify', 'skipflags',
1280 default=None,
1283 default=None,
1281 )
1284 )
1282 coreconfigitem('web', 'allowbz2',
1285 coreconfigitem('web', 'allowbz2',
1283 default=False,
1286 default=False,
1284 )
1287 )
1285 coreconfigitem('web', 'allowgz',
1288 coreconfigitem('web', 'allowgz',
1286 default=False,
1289 default=False,
1287 )
1290 )
1288 coreconfigitem('web', 'allow-pull',
1291 coreconfigitem('web', 'allow-pull',
1289 alias=[('web', 'allowpull')],
1292 alias=[('web', 'allowpull')],
1290 default=True,
1293 default=True,
1291 )
1294 )
1292 coreconfigitem('web', 'allow-push',
1295 coreconfigitem('web', 'allow-push',
1293 alias=[('web', 'allow_push')],
1296 alias=[('web', 'allow_push')],
1294 default=list,
1297 default=list,
1295 )
1298 )
1296 coreconfigitem('web', 'allowzip',
1299 coreconfigitem('web', 'allowzip',
1297 default=False,
1300 default=False,
1298 )
1301 )
1299 coreconfigitem('web', 'archivesubrepos',
1302 coreconfigitem('web', 'archivesubrepos',
1300 default=False,
1303 default=False,
1301 )
1304 )
1302 coreconfigitem('web', 'cache',
1305 coreconfigitem('web', 'cache',
1303 default=True,
1306 default=True,
1304 )
1307 )
1305 coreconfigitem('web', 'comparisoncontext',
1308 coreconfigitem('web', 'comparisoncontext',
1306 default=5,
1309 default=5,
1307 )
1310 )
1308 coreconfigitem('web', 'contact',
1311 coreconfigitem('web', 'contact',
1309 default=None,
1312 default=None,
1310 )
1313 )
1311 coreconfigitem('web', 'deny_push',
1314 coreconfigitem('web', 'deny_push',
1312 default=list,
1315 default=list,
1313 )
1316 )
1314 coreconfigitem('web', 'guessmime',
1317 coreconfigitem('web', 'guessmime',
1315 default=False,
1318 default=False,
1316 )
1319 )
1317 coreconfigitem('web', 'hidden',
1320 coreconfigitem('web', 'hidden',
1318 default=False,
1321 default=False,
1319 )
1322 )
1320 coreconfigitem('web', 'labels',
1323 coreconfigitem('web', 'labels',
1321 default=list,
1324 default=list,
1322 )
1325 )
1323 coreconfigitem('web', 'logoimg',
1326 coreconfigitem('web', 'logoimg',
1324 default='hglogo.png',
1327 default='hglogo.png',
1325 )
1328 )
1326 coreconfigitem('web', 'logourl',
1329 coreconfigitem('web', 'logourl',
1327 default='https://mercurial-scm.org/',
1330 default='https://mercurial-scm.org/',
1328 )
1331 )
1329 coreconfigitem('web', 'accesslog',
1332 coreconfigitem('web', 'accesslog',
1330 default='-',
1333 default='-',
1331 )
1334 )
1332 coreconfigitem('web', 'address',
1335 coreconfigitem('web', 'address',
1333 default='',
1336 default='',
1334 )
1337 )
1335 coreconfigitem('web', 'allow-archive',
1338 coreconfigitem('web', 'allow-archive',
1336 alias=[('web', 'allow_archive')],
1339 alias=[('web', 'allow_archive')],
1337 default=list,
1340 default=list,
1338 )
1341 )
1339 coreconfigitem('web', 'allow_read',
1342 coreconfigitem('web', 'allow_read',
1340 default=list,
1343 default=list,
1341 )
1344 )
1342 coreconfigitem('web', 'baseurl',
1345 coreconfigitem('web', 'baseurl',
1343 default=None,
1346 default=None,
1344 )
1347 )
1345 coreconfigitem('web', 'cacerts',
1348 coreconfigitem('web', 'cacerts',
1346 default=None,
1349 default=None,
1347 )
1350 )
1348 coreconfigitem('web', 'certificate',
1351 coreconfigitem('web', 'certificate',
1349 default=None,
1352 default=None,
1350 )
1353 )
1351 coreconfigitem('web', 'collapse',
1354 coreconfigitem('web', 'collapse',
1352 default=False,
1355 default=False,
1353 )
1356 )
1354 coreconfigitem('web', 'csp',
1357 coreconfigitem('web', 'csp',
1355 default=None,
1358 default=None,
1356 )
1359 )
1357 coreconfigitem('web', 'deny_read',
1360 coreconfigitem('web', 'deny_read',
1358 default=list,
1361 default=list,
1359 )
1362 )
1360 coreconfigitem('web', 'descend',
1363 coreconfigitem('web', 'descend',
1361 default=True,
1364 default=True,
1362 )
1365 )
1363 coreconfigitem('web', 'description',
1366 coreconfigitem('web', 'description',
1364 default="",
1367 default="",
1365 )
1368 )
1366 coreconfigitem('web', 'encoding',
1369 coreconfigitem('web', 'encoding',
1367 default=lambda: encoding.encoding,
1370 default=lambda: encoding.encoding,
1368 )
1371 )
1369 coreconfigitem('web', 'errorlog',
1372 coreconfigitem('web', 'errorlog',
1370 default='-',
1373 default='-',
1371 )
1374 )
1372 coreconfigitem('web', 'ipv6',
1375 coreconfigitem('web', 'ipv6',
1373 default=False,
1376 default=False,
1374 )
1377 )
1375 coreconfigitem('web', 'maxchanges',
1378 coreconfigitem('web', 'maxchanges',
1376 default=10,
1379 default=10,
1377 )
1380 )
1378 coreconfigitem('web', 'maxfiles',
1381 coreconfigitem('web', 'maxfiles',
1379 default=10,
1382 default=10,
1380 )
1383 )
1381 coreconfigitem('web', 'maxshortchanges',
1384 coreconfigitem('web', 'maxshortchanges',
1382 default=60,
1385 default=60,
1383 )
1386 )
1384 coreconfigitem('web', 'motd',
1387 coreconfigitem('web', 'motd',
1385 default='',
1388 default='',
1386 )
1389 )
1387 coreconfigitem('web', 'name',
1390 coreconfigitem('web', 'name',
1388 default=dynamicdefault,
1391 default=dynamicdefault,
1389 )
1392 )
1390 coreconfigitem('web', 'port',
1393 coreconfigitem('web', 'port',
1391 default=8000,
1394 default=8000,
1392 )
1395 )
1393 coreconfigitem('web', 'prefix',
1396 coreconfigitem('web', 'prefix',
1394 default='',
1397 default='',
1395 )
1398 )
1396 coreconfigitem('web', 'push_ssl',
1399 coreconfigitem('web', 'push_ssl',
1397 default=True,
1400 default=True,
1398 )
1401 )
1399 coreconfigitem('web', 'refreshinterval',
1402 coreconfigitem('web', 'refreshinterval',
1400 default=20,
1403 default=20,
1401 )
1404 )
1402 coreconfigitem('web', 'server-header',
1405 coreconfigitem('web', 'server-header',
1403 default=None,
1406 default=None,
1404 )
1407 )
1405 coreconfigitem('web', 'static',
1408 coreconfigitem('web', 'static',
1406 default=None,
1409 default=None,
1407 )
1410 )
1408 coreconfigitem('web', 'staticurl',
1411 coreconfigitem('web', 'staticurl',
1409 default=None,
1412 default=None,
1410 )
1413 )
1411 coreconfigitem('web', 'stripes',
1414 coreconfigitem('web', 'stripes',
1412 default=1,
1415 default=1,
1413 )
1416 )
1414 coreconfigitem('web', 'style',
1417 coreconfigitem('web', 'style',
1415 default='paper',
1418 default='paper',
1416 )
1419 )
1417 coreconfigitem('web', 'templates',
1420 coreconfigitem('web', 'templates',
1418 default=None,
1421 default=None,
1419 )
1422 )
1420 coreconfigitem('web', 'view',
1423 coreconfigitem('web', 'view',
1421 default='served',
1424 default='served',
1422 )
1425 )
1423 coreconfigitem('worker', 'backgroundclose',
1426 coreconfigitem('worker', 'backgroundclose',
1424 default=dynamicdefault,
1427 default=dynamicdefault,
1425 )
1428 )
1426 # Windows defaults to a limit of 512 open files. A buffer of 128
1429 # Windows defaults to a limit of 512 open files. A buffer of 128
1427 # should give us enough headway.
1430 # should give us enough headway.
1428 coreconfigitem('worker', 'backgroundclosemaxqueue',
1431 coreconfigitem('worker', 'backgroundclosemaxqueue',
1429 default=384,
1432 default=384,
1430 )
1433 )
1431 coreconfigitem('worker', 'backgroundcloseminfilecount',
1434 coreconfigitem('worker', 'backgroundcloseminfilecount',
1432 default=2048,
1435 default=2048,
1433 )
1436 )
1434 coreconfigitem('worker', 'backgroundclosethreadcount',
1437 coreconfigitem('worker', 'backgroundclosethreadcount',
1435 default=4,
1438 default=4,
1436 )
1439 )
1437 coreconfigitem('worker', 'enabled',
1440 coreconfigitem('worker', 'enabled',
1438 default=True,
1441 default=True,
1439 )
1442 )
1440 coreconfigitem('worker', 'numcpus',
1443 coreconfigitem('worker', 'numcpus',
1441 default=None,
1444 default=None,
1442 )
1445 )
1443
1446
1444 # Rebase related configuration moved to core because other extension are doing
1447 # Rebase related configuration moved to core because other extension are doing
1445 # strange things. For example, shelve import the extensions to reuse some bit
1448 # strange things. For example, shelve import the extensions to reuse some bit
1446 # without formally loading it.
1449 # without formally loading it.
1447 coreconfigitem('commands', 'rebase.requiredest',
1450 coreconfigitem('commands', 'rebase.requiredest',
1448 default=False,
1451 default=False,
1449 )
1452 )
1450 coreconfigitem('experimental', 'rebaseskipobsolete',
1453 coreconfigitem('experimental', 'rebaseskipobsolete',
1451 default=True,
1454 default=True,
1452 )
1455 )
1453 coreconfigitem('rebase', 'singletransaction',
1456 coreconfigitem('rebase', 'singletransaction',
1454 default=False,
1457 default=False,
1455 )
1458 )
1456 coreconfigitem('rebase', 'experimental.inmemory',
1459 coreconfigitem('rebase', 'experimental.inmemory',
1457 default=False,
1460 default=False,
1458 )
1461 )
@@ -1,2801 +1,2817 b''
1 The Mercurial system uses a set of configuration files to control
1 The Mercurial system uses a set of configuration files to control
2 aspects of its behavior.
2 aspects of its behavior.
3
3
4 Troubleshooting
4 Troubleshooting
5 ===============
5 ===============
6
6
7 If you're having problems with your configuration,
7 If you're having problems with your configuration,
8 :hg:`config --debug` can help you understand what is introducing
8 :hg:`config --debug` can help you understand what is introducing
9 a setting into your environment.
9 a setting into your environment.
10
10
11 See :hg:`help config.syntax` and :hg:`help config.files`
11 See :hg:`help config.syntax` and :hg:`help config.files`
12 for information about how and where to override things.
12 for information about how and where to override things.
13
13
14 Structure
14 Structure
15 =========
15 =========
16
16
17 The configuration files use a simple ini-file format. A configuration
17 The configuration files use a simple ini-file format. A configuration
18 file consists of sections, led by a ``[section]`` header and followed
18 file consists of sections, led by a ``[section]`` header and followed
19 by ``name = value`` entries::
19 by ``name = value`` entries::
20
20
21 [ui]
21 [ui]
22 username = Firstname Lastname <firstname.lastname@example.net>
22 username = Firstname Lastname <firstname.lastname@example.net>
23 verbose = True
23 verbose = True
24
24
25 The above entries will be referred to as ``ui.username`` and
25 The above entries will be referred to as ``ui.username`` and
26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
27
27
28 Files
28 Files
29 =====
29 =====
30
30
31 Mercurial reads configuration data from several files, if they exist.
31 Mercurial reads configuration data from several files, if they exist.
32 These files do not exist by default and you will have to create the
32 These files do not exist by default and you will have to create the
33 appropriate configuration files yourself:
33 appropriate configuration files yourself:
34
34
35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
36
36
37 Global configuration like the username setting is typically put into:
37 Global configuration like the username setting is typically put into:
38
38
39 .. container:: windows
39 .. container:: windows
40
40
41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
42
42
43 .. container:: unix.plan9
43 .. container:: unix.plan9
44
44
45 - ``$HOME/.hgrc`` (on Unix, Plan9)
45 - ``$HOME/.hgrc`` (on Unix, Plan9)
46
46
47 The names of these files depend on the system on which Mercurial is
47 The names of these files depend on the system on which Mercurial is
48 installed. ``*.rc`` files from a single directory are read in
48 installed. ``*.rc`` files from a single directory are read in
49 alphabetical order, later ones overriding earlier ones. Where multiple
49 alphabetical order, later ones overriding earlier ones. Where multiple
50 paths are given below, settings from earlier paths override later
50 paths are given below, settings from earlier paths override later
51 ones.
51 ones.
52
52
53 .. container:: verbose.unix
53 .. container:: verbose.unix
54
54
55 On Unix, the following files are consulted:
55 On Unix, the following files are consulted:
56
56
57 - ``<repo>/.hg/hgrc`` (per-repository)
57 - ``<repo>/.hg/hgrc`` (per-repository)
58 - ``$HOME/.hgrc`` (per-user)
58 - ``$HOME/.hgrc`` (per-user)
59 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
59 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
60 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
60 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
61 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
61 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
62 - ``/etc/mercurial/hgrc`` (per-system)
62 - ``/etc/mercurial/hgrc`` (per-system)
63 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
63 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
64 - ``<internal>/default.d/*.rc`` (defaults)
64 - ``<internal>/default.d/*.rc`` (defaults)
65
65
66 .. container:: verbose.windows
66 .. container:: verbose.windows
67
67
68 On Windows, the following files are consulted:
68 On Windows, the following files are consulted:
69
69
70 - ``<repo>/.hg/hgrc`` (per-repository)
70 - ``<repo>/.hg/hgrc`` (per-repository)
71 - ``%USERPROFILE%\.hgrc`` (per-user)
71 - ``%USERPROFILE%\.hgrc`` (per-user)
72 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
72 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
73 - ``%HOME%\.hgrc`` (per-user)
73 - ``%HOME%\.hgrc`` (per-user)
74 - ``%HOME%\Mercurial.ini`` (per-user)
74 - ``%HOME%\Mercurial.ini`` (per-user)
75 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-installation)
75 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-installation)
76 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
76 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
77 - ``<install-dir>\Mercurial.ini`` (per-installation)
77 - ``<install-dir>\Mercurial.ini`` (per-installation)
78 - ``<internal>/default.d/*.rc`` (defaults)
78 - ``<internal>/default.d/*.rc`` (defaults)
79
79
80 .. note::
80 .. note::
81
81
82 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
82 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
83 is used when running 32-bit Python on 64-bit Windows.
83 is used when running 32-bit Python on 64-bit Windows.
84
84
85 .. container:: windows
85 .. container:: windows
86
86
87 On Windows 9x, ``%HOME%`` is replaced by ``%APPDATA%``.
87 On Windows 9x, ``%HOME%`` is replaced by ``%APPDATA%``.
88
88
89 .. container:: verbose.plan9
89 .. container:: verbose.plan9
90
90
91 On Plan9, the following files are consulted:
91 On Plan9, the following files are consulted:
92
92
93 - ``<repo>/.hg/hgrc`` (per-repository)
93 - ``<repo>/.hg/hgrc`` (per-repository)
94 - ``$home/lib/hgrc`` (per-user)
94 - ``$home/lib/hgrc`` (per-user)
95 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
95 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
96 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
96 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
97 - ``/lib/mercurial/hgrc`` (per-system)
97 - ``/lib/mercurial/hgrc`` (per-system)
98 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
98 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
99 - ``<internal>/default.d/*.rc`` (defaults)
99 - ``<internal>/default.d/*.rc`` (defaults)
100
100
101 Per-repository configuration options only apply in a
101 Per-repository configuration options only apply in a
102 particular repository. This file is not version-controlled, and
102 particular repository. This file is not version-controlled, and
103 will not get transferred during a "clone" operation. Options in
103 will not get transferred during a "clone" operation. Options in
104 this file override options in all other configuration files.
104 this file override options in all other configuration files.
105
105
106 .. container:: unix.plan9
106 .. container:: unix.plan9
107
107
108 On Plan 9 and Unix, most of this file will be ignored if it doesn't
108 On Plan 9 and Unix, most of this file will be ignored if it doesn't
109 belong to a trusted user or to a trusted group. See
109 belong to a trusted user or to a trusted group. See
110 :hg:`help config.trusted` for more details.
110 :hg:`help config.trusted` for more details.
111
111
112 Per-user configuration file(s) are for the user running Mercurial. Options
112 Per-user configuration file(s) are for the user running Mercurial. Options
113 in these files apply to all Mercurial commands executed by this user in any
113 in these files apply to all Mercurial commands executed by this user in any
114 directory. Options in these files override per-system and per-installation
114 directory. Options in these files override per-system and per-installation
115 options.
115 options.
116
116
117 Per-installation configuration files are searched for in the
117 Per-installation configuration files are searched for in the
118 directory where Mercurial is installed. ``<install-root>`` is the
118 directory where Mercurial is installed. ``<install-root>`` is the
119 parent directory of the **hg** executable (or symlink) being run.
119 parent directory of the **hg** executable (or symlink) being run.
120
120
121 .. container:: unix.plan9
121 .. container:: unix.plan9
122
122
123 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
123 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
124 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
124 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
125 files apply to all Mercurial commands executed by any user in any
125 files apply to all Mercurial commands executed by any user in any
126 directory.
126 directory.
127
127
128 Per-installation configuration files are for the system on
128 Per-installation configuration files are for the system on
129 which Mercurial is running. Options in these files apply to all
129 which Mercurial is running. Options in these files apply to all
130 Mercurial commands executed by any user in any directory. Registry
130 Mercurial commands executed by any user in any directory. Registry
131 keys contain PATH-like strings, every part of which must reference
131 keys contain PATH-like strings, every part of which must reference
132 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
132 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
133 be read. Mercurial checks each of these locations in the specified
133 be read. Mercurial checks each of these locations in the specified
134 order until one or more configuration files are detected.
134 order until one or more configuration files are detected.
135
135
136 Per-system configuration files are for the system on which Mercurial
136 Per-system configuration files are for the system on which Mercurial
137 is running. Options in these files apply to all Mercurial commands
137 is running. Options in these files apply to all Mercurial commands
138 executed by any user in any directory. Options in these files
138 executed by any user in any directory. Options in these files
139 override per-installation options.
139 override per-installation options.
140
140
141 Mercurial comes with some default configuration. The default configuration
141 Mercurial comes with some default configuration. The default configuration
142 files are installed with Mercurial and will be overwritten on upgrades. Default
142 files are installed with Mercurial and will be overwritten on upgrades. Default
143 configuration files should never be edited by users or administrators but can
143 configuration files should never be edited by users or administrators but can
144 be overridden in other configuration files. So far the directory only contains
144 be overridden in other configuration files. So far the directory only contains
145 merge tool configuration but packagers can also put other default configuration
145 merge tool configuration but packagers can also put other default configuration
146 there.
146 there.
147
147
148 Syntax
148 Syntax
149 ======
149 ======
150
150
151 A configuration file consists of sections, led by a ``[section]`` header
151 A configuration file consists of sections, led by a ``[section]`` header
152 and followed by ``name = value`` entries (sometimes called
152 and followed by ``name = value`` entries (sometimes called
153 ``configuration keys``)::
153 ``configuration keys``)::
154
154
155 [spam]
155 [spam]
156 eggs=ham
156 eggs=ham
157 green=
157 green=
158 eggs
158 eggs
159
159
160 Each line contains one entry. If the lines that follow are indented,
160 Each line contains one entry. If the lines that follow are indented,
161 they are treated as continuations of that entry. Leading whitespace is
161 they are treated as continuations of that entry. Leading whitespace is
162 removed from values. Empty lines are skipped. Lines beginning with
162 removed from values. Empty lines are skipped. Lines beginning with
163 ``#`` or ``;`` are ignored and may be used to provide comments.
163 ``#`` or ``;`` are ignored and may be used to provide comments.
164
164
165 Configuration keys can be set multiple times, in which case Mercurial
165 Configuration keys can be set multiple times, in which case Mercurial
166 will use the value that was configured last. As an example::
166 will use the value that was configured last. As an example::
167
167
168 [spam]
168 [spam]
169 eggs=large
169 eggs=large
170 ham=serrano
170 ham=serrano
171 eggs=small
171 eggs=small
172
172
173 This would set the configuration key named ``eggs`` to ``small``.
173 This would set the configuration key named ``eggs`` to ``small``.
174
174
175 It is also possible to define a section multiple times. A section can
175 It is also possible to define a section multiple times. A section can
176 be redefined on the same and/or on different configuration files. For
176 be redefined on the same and/or on different configuration files. For
177 example::
177 example::
178
178
179 [foo]
179 [foo]
180 eggs=large
180 eggs=large
181 ham=serrano
181 ham=serrano
182 eggs=small
182 eggs=small
183
183
184 [bar]
184 [bar]
185 eggs=ham
185 eggs=ham
186 green=
186 green=
187 eggs
187 eggs
188
188
189 [foo]
189 [foo]
190 ham=prosciutto
190 ham=prosciutto
191 eggs=medium
191 eggs=medium
192 bread=toasted
192 bread=toasted
193
193
194 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
194 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
195 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
195 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
196 respectively. As you can see there only thing that matters is the last
196 respectively. As you can see there only thing that matters is the last
197 value that was set for each of the configuration keys.
197 value that was set for each of the configuration keys.
198
198
199 If a configuration key is set multiple times in different
199 If a configuration key is set multiple times in different
200 configuration files the final value will depend on the order in which
200 configuration files the final value will depend on the order in which
201 the different configuration files are read, with settings from earlier
201 the different configuration files are read, with settings from earlier
202 paths overriding later ones as described on the ``Files`` section
202 paths overriding later ones as described on the ``Files`` section
203 above.
203 above.
204
204
205 A line of the form ``%include file`` will include ``file`` into the
205 A line of the form ``%include file`` will include ``file`` into the
206 current configuration file. The inclusion is recursive, which means
206 current configuration file. The inclusion is recursive, which means
207 that included files can include other files. Filenames are relative to
207 that included files can include other files. Filenames are relative to
208 the configuration file in which the ``%include`` directive is found.
208 the configuration file in which the ``%include`` directive is found.
209 Environment variables and ``~user`` constructs are expanded in
209 Environment variables and ``~user`` constructs are expanded in
210 ``file``. This lets you do something like::
210 ``file``. This lets you do something like::
211
211
212 %include ~/.hgrc.d/$HOST.rc
212 %include ~/.hgrc.d/$HOST.rc
213
213
214 to include a different configuration file on each computer you use.
214 to include a different configuration file on each computer you use.
215
215
216 A line with ``%unset name`` will remove ``name`` from the current
216 A line with ``%unset name`` will remove ``name`` from the current
217 section, if it has been set previously.
217 section, if it has been set previously.
218
218
219 The values are either free-form text strings, lists of text strings,
219 The values are either free-form text strings, lists of text strings,
220 or Boolean values. Boolean values can be set to true using any of "1",
220 or Boolean values. Boolean values can be set to true using any of "1",
221 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
221 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
222 (all case insensitive).
222 (all case insensitive).
223
223
224 List values are separated by whitespace or comma, except when values are
224 List values are separated by whitespace or comma, except when values are
225 placed in double quotation marks::
225 placed in double quotation marks::
226
226
227 allow_read = "John Doe, PhD", brian, betty
227 allow_read = "John Doe, PhD", brian, betty
228
228
229 Quotation marks can be escaped by prefixing them with a backslash. Only
229 Quotation marks can be escaped by prefixing them with a backslash. Only
230 quotation marks at the beginning of a word is counted as a quotation
230 quotation marks at the beginning of a word is counted as a quotation
231 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
231 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
232
232
233 Sections
233 Sections
234 ========
234 ========
235
235
236 This section describes the different sections that may appear in a
236 This section describes the different sections that may appear in a
237 Mercurial configuration file, the purpose of each section, its possible
237 Mercurial configuration file, the purpose of each section, its possible
238 keys, and their possible values.
238 keys, and their possible values.
239
239
240 ``alias``
240 ``alias``
241 ---------
241 ---------
242
242
243 Defines command aliases.
243 Defines command aliases.
244
244
245 Aliases allow you to define your own commands in terms of other
245 Aliases allow you to define your own commands in terms of other
246 commands (or aliases), optionally including arguments. Positional
246 commands (or aliases), optionally including arguments. Positional
247 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
247 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
248 are expanded by Mercurial before execution. Positional arguments not
248 are expanded by Mercurial before execution. Positional arguments not
249 already used by ``$N`` in the definition are put at the end of the
249 already used by ``$N`` in the definition are put at the end of the
250 command to be executed.
250 command to be executed.
251
251
252 Alias definitions consist of lines of the form::
252 Alias definitions consist of lines of the form::
253
253
254 <alias> = <command> [<argument>]...
254 <alias> = <command> [<argument>]...
255
255
256 For example, this definition::
256 For example, this definition::
257
257
258 latest = log --limit 5
258 latest = log --limit 5
259
259
260 creates a new command ``latest`` that shows only the five most recent
260 creates a new command ``latest`` that shows only the five most recent
261 changesets. You can define subsequent aliases using earlier ones::
261 changesets. You can define subsequent aliases using earlier ones::
262
262
263 stable5 = latest -b stable
263 stable5 = latest -b stable
264
264
265 .. note::
265 .. note::
266
266
267 It is possible to create aliases with the same names as
267 It is possible to create aliases with the same names as
268 existing commands, which will then override the original
268 existing commands, which will then override the original
269 definitions. This is almost always a bad idea!
269 definitions. This is almost always a bad idea!
270
270
271 An alias can start with an exclamation point (``!``) to make it a
271 An alias can start with an exclamation point (``!``) to make it a
272 shell alias. A shell alias is executed with the shell and will let you
272 shell alias. A shell alias is executed with the shell and will let you
273 run arbitrary commands. As an example, ::
273 run arbitrary commands. As an example, ::
274
274
275 echo = !echo $@
275 echo = !echo $@
276
276
277 will let you do ``hg echo foo`` to have ``foo`` printed in your
277 will let you do ``hg echo foo`` to have ``foo`` printed in your
278 terminal. A better example might be::
278 terminal. A better example might be::
279
279
280 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
280 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
281
281
282 which will make ``hg purge`` delete all unknown files in the
282 which will make ``hg purge`` delete all unknown files in the
283 repository in the same manner as the purge extension.
283 repository in the same manner as the purge extension.
284
284
285 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
285 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
286 expand to the command arguments. Unmatched arguments are
286 expand to the command arguments. Unmatched arguments are
287 removed. ``$0`` expands to the alias name and ``$@`` expands to all
287 removed. ``$0`` expands to the alias name and ``$@`` expands to all
288 arguments separated by a space. ``"$@"`` (with quotes) expands to all
288 arguments separated by a space. ``"$@"`` (with quotes) expands to all
289 arguments quoted individually and separated by a space. These expansions
289 arguments quoted individually and separated by a space. These expansions
290 happen before the command is passed to the shell.
290 happen before the command is passed to the shell.
291
291
292 Shell aliases are executed in an environment where ``$HG`` expands to
292 Shell aliases are executed in an environment where ``$HG`` expands to
293 the path of the Mercurial that was used to execute the alias. This is
293 the path of the Mercurial that was used to execute the alias. This is
294 useful when you want to call further Mercurial commands in a shell
294 useful when you want to call further Mercurial commands in a shell
295 alias, as was done above for the purge alias. In addition,
295 alias, as was done above for the purge alias. In addition,
296 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
296 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
297 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
297 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
298
298
299 .. note::
299 .. note::
300
300
301 Some global configuration options such as ``-R`` are
301 Some global configuration options such as ``-R`` are
302 processed before shell aliases and will thus not be passed to
302 processed before shell aliases and will thus not be passed to
303 aliases.
303 aliases.
304
304
305
305
306 ``annotate``
306 ``annotate``
307 ------------
307 ------------
308
308
309 Settings used when displaying file annotations. All values are
309 Settings used when displaying file annotations. All values are
310 Booleans and default to False. See :hg:`help config.diff` for
310 Booleans and default to False. See :hg:`help config.diff` for
311 related options for the diff command.
311 related options for the diff command.
312
312
313 ``ignorews``
313 ``ignorews``
314 Ignore white space when comparing lines.
314 Ignore white space when comparing lines.
315
315
316 ``ignorewseol``
316 ``ignorewseol``
317 Ignore white space at the end of a line when comparing lines.
317 Ignore white space at the end of a line when comparing lines.
318
318
319 ``ignorewsamount``
319 ``ignorewsamount``
320 Ignore changes in the amount of white space.
320 Ignore changes in the amount of white space.
321
321
322 ``ignoreblanklines``
322 ``ignoreblanklines``
323 Ignore changes whose lines are all blank.
323 Ignore changes whose lines are all blank.
324
324
325
325
326 ``auth``
326 ``auth``
327 --------
327 --------
328
328
329 Authentication credentials and other authentication-like configuration
329 Authentication credentials and other authentication-like configuration
330 for HTTP connections. This section allows you to store usernames and
330 for HTTP connections. This section allows you to store usernames and
331 passwords for use when logging *into* HTTP servers. See
331 passwords for use when logging *into* HTTP servers. See
332 :hg:`help config.web` if you want to configure *who* can login to
332 :hg:`help config.web` if you want to configure *who* can login to
333 your HTTP server.
333 your HTTP server.
334
334
335 The following options apply to all hosts.
335 The following options apply to all hosts.
336
336
337 ``cookiefile``
337 ``cookiefile``
338 Path to a file containing HTTP cookie lines. Cookies matching a
338 Path to a file containing HTTP cookie lines. Cookies matching a
339 host will be sent automatically.
339 host will be sent automatically.
340
340
341 The file format uses the Mozilla cookies.txt format, which defines cookies
341 The file format uses the Mozilla cookies.txt format, which defines cookies
342 on their own lines. Each line contains 7 fields delimited by the tab
342 on their own lines. Each line contains 7 fields delimited by the tab
343 character (domain, is_domain_cookie, path, is_secure, expires, name,
343 character (domain, is_domain_cookie, path, is_secure, expires, name,
344 value). For more info, do an Internet search for "Netscape cookies.txt
344 value). For more info, do an Internet search for "Netscape cookies.txt
345 format."
345 format."
346
346
347 Note: the cookies parser does not handle port numbers on domains. You
347 Note: the cookies parser does not handle port numbers on domains. You
348 will need to remove ports from the domain for the cookie to be recognized.
348 will need to remove ports from the domain for the cookie to be recognized.
349 This could result in a cookie being disclosed to an unwanted server.
349 This could result in a cookie being disclosed to an unwanted server.
350
350
351 The cookies file is read-only.
351 The cookies file is read-only.
352
352
353 Other options in this section are grouped by name and have the following
353 Other options in this section are grouped by name and have the following
354 format::
354 format::
355
355
356 <name>.<argument> = <value>
356 <name>.<argument> = <value>
357
357
358 where ``<name>`` is used to group arguments into authentication
358 where ``<name>`` is used to group arguments into authentication
359 entries. Example::
359 entries. Example::
360
360
361 foo.prefix = hg.intevation.de/mercurial
361 foo.prefix = hg.intevation.de/mercurial
362 foo.username = foo
362 foo.username = foo
363 foo.password = bar
363 foo.password = bar
364 foo.schemes = http https
364 foo.schemes = http https
365
365
366 bar.prefix = secure.example.org
366 bar.prefix = secure.example.org
367 bar.key = path/to/file.key
367 bar.key = path/to/file.key
368 bar.cert = path/to/file.cert
368 bar.cert = path/to/file.cert
369 bar.schemes = https
369 bar.schemes = https
370
370
371 Supported arguments:
371 Supported arguments:
372
372
373 ``prefix``
373 ``prefix``
374 Either ``*`` or a URI prefix with or without the scheme part.
374 Either ``*`` or a URI prefix with or without the scheme part.
375 The authentication entry with the longest matching prefix is used
375 The authentication entry with the longest matching prefix is used
376 (where ``*`` matches everything and counts as a match of length
376 (where ``*`` matches everything and counts as a match of length
377 1). If the prefix doesn't include a scheme, the match is performed
377 1). If the prefix doesn't include a scheme, the match is performed
378 against the URI with its scheme stripped as well, and the schemes
378 against the URI with its scheme stripped as well, and the schemes
379 argument, q.v., is then subsequently consulted.
379 argument, q.v., is then subsequently consulted.
380
380
381 ``username``
381 ``username``
382 Optional. Username to authenticate with. If not given, and the
382 Optional. Username to authenticate with. If not given, and the
383 remote site requires basic or digest authentication, the user will
383 remote site requires basic or digest authentication, the user will
384 be prompted for it. Environment variables are expanded in the
384 be prompted for it. Environment variables are expanded in the
385 username letting you do ``foo.username = $USER``. If the URI
385 username letting you do ``foo.username = $USER``. If the URI
386 includes a username, only ``[auth]`` entries with a matching
386 includes a username, only ``[auth]`` entries with a matching
387 username or without a username will be considered.
387 username or without a username will be considered.
388
388
389 ``password``
389 ``password``
390 Optional. Password to authenticate with. If not given, and the
390 Optional. Password to authenticate with. If not given, and the
391 remote site requires basic or digest authentication, the user
391 remote site requires basic or digest authentication, the user
392 will be prompted for it.
392 will be prompted for it.
393
393
394 ``key``
394 ``key``
395 Optional. PEM encoded client certificate key file. Environment
395 Optional. PEM encoded client certificate key file. Environment
396 variables are expanded in the filename.
396 variables are expanded in the filename.
397
397
398 ``cert``
398 ``cert``
399 Optional. PEM encoded client certificate chain file. Environment
399 Optional. PEM encoded client certificate chain file. Environment
400 variables are expanded in the filename.
400 variables are expanded in the filename.
401
401
402 ``schemes``
402 ``schemes``
403 Optional. Space separated list of URI schemes to use this
403 Optional. Space separated list of URI schemes to use this
404 authentication entry with. Only used if the prefix doesn't include
404 authentication entry with. Only used if the prefix doesn't include
405 a scheme. Supported schemes are http and https. They will match
405 a scheme. Supported schemes are http and https. They will match
406 static-http and static-https respectively, as well.
406 static-http and static-https respectively, as well.
407 (default: https)
407 (default: https)
408
408
409 If no suitable authentication entry is found, the user is prompted
409 If no suitable authentication entry is found, the user is prompted
410 for credentials as usual if required by the remote.
410 for credentials as usual if required by the remote.
411
411
412 ``color``
412 ``color``
413 ---------
413 ---------
414
414
415 Configure the Mercurial color mode. For details about how to define your custom
415 Configure the Mercurial color mode. For details about how to define your custom
416 effect and style see :hg:`help color`.
416 effect and style see :hg:`help color`.
417
417
418 ``mode``
418 ``mode``
419 String: control the method used to output color. One of ``auto``, ``ansi``,
419 String: control the method used to output color. One of ``auto``, ``ansi``,
420 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
420 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
421 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
421 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
422 terminal. Any invalid value will disable color.
422 terminal. Any invalid value will disable color.
423
423
424 ``pagermode``
424 ``pagermode``
425 String: optional override of ``color.mode`` used with pager.
425 String: optional override of ``color.mode`` used with pager.
426
426
427 On some systems, terminfo mode may cause problems when using
427 On some systems, terminfo mode may cause problems when using
428 color with ``less -R`` as a pager program. less with the -R option
428 color with ``less -R`` as a pager program. less with the -R option
429 will only display ECMA-48 color codes, and terminfo mode may sometimes
429 will only display ECMA-48 color codes, and terminfo mode may sometimes
430 emit codes that less doesn't understand. You can work around this by
430 emit codes that less doesn't understand. You can work around this by
431 either using ansi mode (or auto mode), or by using less -r (which will
431 either using ansi mode (or auto mode), or by using less -r (which will
432 pass through all terminal control codes, not just color control
432 pass through all terminal control codes, not just color control
433 codes).
433 codes).
434
434
435 On some systems (such as MSYS in Windows), the terminal may support
435 On some systems (such as MSYS in Windows), the terminal may support
436 a different color mode than the pager program.
436 a different color mode than the pager program.
437
437
438 ``commands``
438 ``commands``
439 ------------
439 ------------
440
440
441 ``resolve.confirm``
441 ``resolve.confirm``
442 Confirm before performing action if no filename is passed.
442 Confirm before performing action if no filename is passed.
443 (default: False)
443 (default: False)
444
444
445 ``resolve.explicit-re-merge``
445 ``resolve.explicit-re-merge``
446 Require uses of ``hg resolve`` to specify which action it should perform,
446 Require uses of ``hg resolve`` to specify which action it should perform,
447 instead of re-merging files by default.
447 instead of re-merging files by default.
448 (default: False)
448 (default: False)
449
449
450 ``resolve.mark-check``
450 ``resolve.mark-check``
451 Determines what level of checking :hg:`resolve --mark` will perform before
451 Determines what level of checking :hg:`resolve --mark` will perform before
452 marking files as resolved. Valid values are ``none`, ``warn``, and
452 marking files as resolved. Valid values are ``none`, ``warn``, and
453 ``abort``. ``warn`` will output a warning listing the file(s) that still
453 ``abort``. ``warn`` will output a warning listing the file(s) that still
454 have conflict markers in them, but will still mark everything resolved.
454 have conflict markers in them, but will still mark everything resolved.
455 ``abort`` will output the same warning but will not mark things as resolved.
455 ``abort`` will output the same warning but will not mark things as resolved.
456 If --all is passed and this is set to ``abort``, only a warning will be
456 If --all is passed and this is set to ``abort``, only a warning will be
457 shown (an error will not be raised).
457 shown (an error will not be raised).
458 (default: ``none``)
458 (default: ``none``)
459
459
460 ``status.relative``
460 ``status.relative``
461 Make paths in :hg:`status` output relative to the current directory.
461 Make paths in :hg:`status` output relative to the current directory.
462 (default: False)
462 (default: False)
463
463
464 ``status.terse``
464 ``status.terse``
465 Default value for the --terse flag, which condenses status output.
465 Default value for the --terse flag, which condenses status output.
466 (default: empty)
466 (default: empty)
467
467
468 ``update.check``
468 ``update.check``
469 Determines what level of checking :hg:`update` will perform before moving
469 Determines what level of checking :hg:`update` will perform before moving
470 to a destination revision. Valid values are ``abort``, ``none``,
470 to a destination revision. Valid values are ``abort``, ``none``,
471 ``linear``, and ``noconflict``. ``abort`` always fails if the working
471 ``linear``, and ``noconflict``. ``abort`` always fails if the working
472 directory has uncommitted changes. ``none`` performs no checking, and may
472 directory has uncommitted changes. ``none`` performs no checking, and may
473 result in a merge with uncommitted changes. ``linear`` allows any update
473 result in a merge with uncommitted changes. ``linear`` allows any update
474 as long as it follows a straight line in the revision history, and may
474 as long as it follows a straight line in the revision history, and may
475 trigger a merge with uncommitted changes. ``noconflict`` will allow any
475 trigger a merge with uncommitted changes. ``noconflict`` will allow any
476 update which would not trigger a merge with uncommitted changes, if any
476 update which would not trigger a merge with uncommitted changes, if any
477 are present.
477 are present.
478 (default: ``linear``)
478 (default: ``linear``)
479
479
480 ``update.requiredest``
480 ``update.requiredest``
481 Require that the user pass a destination when running :hg:`update`.
481 Require that the user pass a destination when running :hg:`update`.
482 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
482 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
483 will be disallowed.
483 will be disallowed.
484 (default: False)
484 (default: False)
485
485
486 ``committemplate``
486 ``committemplate``
487 ------------------
487 ------------------
488
488
489 ``changeset``
489 ``changeset``
490 String: configuration in this section is used as the template to
490 String: configuration in this section is used as the template to
491 customize the text shown in the editor when committing.
491 customize the text shown in the editor when committing.
492
492
493 In addition to pre-defined template keywords, commit log specific one
493 In addition to pre-defined template keywords, commit log specific one
494 below can be used for customization:
494 below can be used for customization:
495
495
496 ``extramsg``
496 ``extramsg``
497 String: Extra message (typically 'Leave message empty to abort
497 String: Extra message (typically 'Leave message empty to abort
498 commit.'). This may be changed by some commands or extensions.
498 commit.'). This may be changed by some commands or extensions.
499
499
500 For example, the template configuration below shows as same text as
500 For example, the template configuration below shows as same text as
501 one shown by default::
501 one shown by default::
502
502
503 [committemplate]
503 [committemplate]
504 changeset = {desc}\n\n
504 changeset = {desc}\n\n
505 HG: Enter commit message. Lines beginning with 'HG:' are removed.
505 HG: Enter commit message. Lines beginning with 'HG:' are removed.
506 HG: {extramsg}
506 HG: {extramsg}
507 HG: --
507 HG: --
508 HG: user: {author}\n{ifeq(p2rev, "-1", "",
508 HG: user: {author}\n{ifeq(p2rev, "-1", "",
509 "HG: branch merge\n")
509 "HG: branch merge\n")
510 }HG: branch '{branch}'\n{if(activebookmark,
510 }HG: branch '{branch}'\n{if(activebookmark,
511 "HG: bookmark '{activebookmark}'\n") }{subrepos %
511 "HG: bookmark '{activebookmark}'\n") }{subrepos %
512 "HG: subrepo {subrepo}\n" }{file_adds %
512 "HG: subrepo {subrepo}\n" }{file_adds %
513 "HG: added {file}\n" }{file_mods %
513 "HG: added {file}\n" }{file_mods %
514 "HG: changed {file}\n" }{file_dels %
514 "HG: changed {file}\n" }{file_dels %
515 "HG: removed {file}\n" }{if(files, "",
515 "HG: removed {file}\n" }{if(files, "",
516 "HG: no files changed\n")}
516 "HG: no files changed\n")}
517
517
518 ``diff()``
518 ``diff()``
519 String: show the diff (see :hg:`help templates` for detail)
519 String: show the diff (see :hg:`help templates` for detail)
520
520
521 Sometimes it is helpful to show the diff of the changeset in the editor without
521 Sometimes it is helpful to show the diff of the changeset in the editor without
522 having to prefix 'HG: ' to each line so that highlighting works correctly. For
522 having to prefix 'HG: ' to each line so that highlighting works correctly. For
523 this, Mercurial provides a special string which will ignore everything below
523 this, Mercurial provides a special string which will ignore everything below
524 it::
524 it::
525
525
526 HG: ------------------------ >8 ------------------------
526 HG: ------------------------ >8 ------------------------
527
527
528 For example, the template configuration below will show the diff below the
528 For example, the template configuration below will show the diff below the
529 extra message::
529 extra message::
530
530
531 [committemplate]
531 [committemplate]
532 changeset = {desc}\n\n
532 changeset = {desc}\n\n
533 HG: Enter commit message. Lines beginning with 'HG:' are removed.
533 HG: Enter commit message. Lines beginning with 'HG:' are removed.
534 HG: {extramsg}
534 HG: {extramsg}
535 HG: ------------------------ >8 ------------------------
535 HG: ------------------------ >8 ------------------------
536 HG: Do not touch the line above.
536 HG: Do not touch the line above.
537 HG: Everything below will be removed.
537 HG: Everything below will be removed.
538 {diff()}
538 {diff()}
539
539
540 .. note::
540 .. note::
541
541
542 For some problematic encodings (see :hg:`help win32mbcs` for
542 For some problematic encodings (see :hg:`help win32mbcs` for
543 detail), this customization should be configured carefully, to
543 detail), this customization should be configured carefully, to
544 avoid showing broken characters.
544 avoid showing broken characters.
545
545
546 For example, if a multibyte character ending with backslash (0x5c) is
546 For example, if a multibyte character ending with backslash (0x5c) is
547 followed by the ASCII character 'n' in the customized template,
547 followed by the ASCII character 'n' in the customized template,
548 the sequence of backslash and 'n' is treated as line-feed unexpectedly
548 the sequence of backslash and 'n' is treated as line-feed unexpectedly
549 (and the multibyte character is broken, too).
549 (and the multibyte character is broken, too).
550
550
551 Customized template is used for commands below (``--edit`` may be
551 Customized template is used for commands below (``--edit`` may be
552 required):
552 required):
553
553
554 - :hg:`backout`
554 - :hg:`backout`
555 - :hg:`commit`
555 - :hg:`commit`
556 - :hg:`fetch` (for merge commit only)
556 - :hg:`fetch` (for merge commit only)
557 - :hg:`graft`
557 - :hg:`graft`
558 - :hg:`histedit`
558 - :hg:`histedit`
559 - :hg:`import`
559 - :hg:`import`
560 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
560 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
561 - :hg:`rebase`
561 - :hg:`rebase`
562 - :hg:`shelve`
562 - :hg:`shelve`
563 - :hg:`sign`
563 - :hg:`sign`
564 - :hg:`tag`
564 - :hg:`tag`
565 - :hg:`transplant`
565 - :hg:`transplant`
566
566
567 Configuring items below instead of ``changeset`` allows showing
567 Configuring items below instead of ``changeset`` allows showing
568 customized message only for specific actions, or showing different
568 customized message only for specific actions, or showing different
569 messages for each action.
569 messages for each action.
570
570
571 - ``changeset.backout`` for :hg:`backout`
571 - ``changeset.backout`` for :hg:`backout`
572 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
572 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
573 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
573 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
574 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
574 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
575 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
575 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
576 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
576 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
577 - ``changeset.gpg.sign`` for :hg:`sign`
577 - ``changeset.gpg.sign`` for :hg:`sign`
578 - ``changeset.graft`` for :hg:`graft`
578 - ``changeset.graft`` for :hg:`graft`
579 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
579 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
580 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
580 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
581 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
581 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
582 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
582 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
583 - ``changeset.import.bypass`` for :hg:`import --bypass`
583 - ``changeset.import.bypass`` for :hg:`import --bypass`
584 - ``changeset.import.normal.merge`` for :hg:`import` on merges
584 - ``changeset.import.normal.merge`` for :hg:`import` on merges
585 - ``changeset.import.normal.normal`` for :hg:`import` on other
585 - ``changeset.import.normal.normal`` for :hg:`import` on other
586 - ``changeset.mq.qnew`` for :hg:`qnew`
586 - ``changeset.mq.qnew`` for :hg:`qnew`
587 - ``changeset.mq.qfold`` for :hg:`qfold`
587 - ``changeset.mq.qfold`` for :hg:`qfold`
588 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
588 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
589 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
589 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
590 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
590 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
591 - ``changeset.rebase.normal`` for :hg:`rebase` on other
591 - ``changeset.rebase.normal`` for :hg:`rebase` on other
592 - ``changeset.shelve.shelve`` for :hg:`shelve`
592 - ``changeset.shelve.shelve`` for :hg:`shelve`
593 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
593 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
594 - ``changeset.tag.remove`` for :hg:`tag --remove`
594 - ``changeset.tag.remove`` for :hg:`tag --remove`
595 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
595 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
596 - ``changeset.transplant.normal`` for :hg:`transplant` on other
596 - ``changeset.transplant.normal`` for :hg:`transplant` on other
597
597
598 These dot-separated lists of names are treated as hierarchical ones.
598 These dot-separated lists of names are treated as hierarchical ones.
599 For example, ``changeset.tag.remove`` customizes the commit message
599 For example, ``changeset.tag.remove`` customizes the commit message
600 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
600 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
601 commit message for :hg:`tag` regardless of ``--remove`` option.
601 commit message for :hg:`tag` regardless of ``--remove`` option.
602
602
603 When the external editor is invoked for a commit, the corresponding
603 When the external editor is invoked for a commit, the corresponding
604 dot-separated list of names without the ``changeset.`` prefix
604 dot-separated list of names without the ``changeset.`` prefix
605 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
605 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
606 variable.
606 variable.
607
607
608 In this section, items other than ``changeset`` can be referred from
608 In this section, items other than ``changeset`` can be referred from
609 others. For example, the configuration to list committed files up
609 others. For example, the configuration to list committed files up
610 below can be referred as ``{listupfiles}``::
610 below can be referred as ``{listupfiles}``::
611
611
612 [committemplate]
612 [committemplate]
613 listupfiles = {file_adds %
613 listupfiles = {file_adds %
614 "HG: added {file}\n" }{file_mods %
614 "HG: added {file}\n" }{file_mods %
615 "HG: changed {file}\n" }{file_dels %
615 "HG: changed {file}\n" }{file_dels %
616 "HG: removed {file}\n" }{if(files, "",
616 "HG: removed {file}\n" }{if(files, "",
617 "HG: no files changed\n")}
617 "HG: no files changed\n")}
618
618
619 ``decode/encode``
619 ``decode/encode``
620 -----------------
620 -----------------
621
621
622 Filters for transforming files on checkout/checkin. This would
622 Filters for transforming files on checkout/checkin. This would
623 typically be used for newline processing or other
623 typically be used for newline processing or other
624 localization/canonicalization of files.
624 localization/canonicalization of files.
625
625
626 Filters consist of a filter pattern followed by a filter command.
626 Filters consist of a filter pattern followed by a filter command.
627 Filter patterns are globs by default, rooted at the repository root.
627 Filter patterns are globs by default, rooted at the repository root.
628 For example, to match any file ending in ``.txt`` in the root
628 For example, to match any file ending in ``.txt`` in the root
629 directory only, use the pattern ``*.txt``. To match any file ending
629 directory only, use the pattern ``*.txt``. To match any file ending
630 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
630 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
631 For each file only the first matching filter applies.
631 For each file only the first matching filter applies.
632
632
633 The filter command can start with a specifier, either ``pipe:`` or
633 The filter command can start with a specifier, either ``pipe:`` or
634 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
634 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
635
635
636 A ``pipe:`` command must accept data on stdin and return the transformed
636 A ``pipe:`` command must accept data on stdin and return the transformed
637 data on stdout.
637 data on stdout.
638
638
639 Pipe example::
639 Pipe example::
640
640
641 [encode]
641 [encode]
642 # uncompress gzip files on checkin to improve delta compression
642 # uncompress gzip files on checkin to improve delta compression
643 # note: not necessarily a good idea, just an example
643 # note: not necessarily a good idea, just an example
644 *.gz = pipe: gunzip
644 *.gz = pipe: gunzip
645
645
646 [decode]
646 [decode]
647 # recompress gzip files when writing them to the working dir (we
647 # recompress gzip files when writing them to the working dir (we
648 # can safely omit "pipe:", because it's the default)
648 # can safely omit "pipe:", because it's the default)
649 *.gz = gzip
649 *.gz = gzip
650
650
651 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
651 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
652 with the name of a temporary file that contains the data to be
652 with the name of a temporary file that contains the data to be
653 filtered by the command. The string ``OUTFILE`` is replaced with the name
653 filtered by the command. The string ``OUTFILE`` is replaced with the name
654 of an empty temporary file, where the filtered data must be written by
654 of an empty temporary file, where the filtered data must be written by
655 the command.
655 the command.
656
656
657 .. container:: windows
657 .. container:: windows
658
658
659 .. note::
659 .. note::
660
660
661 The tempfile mechanism is recommended for Windows systems,
661 The tempfile mechanism is recommended for Windows systems,
662 where the standard shell I/O redirection operators often have
662 where the standard shell I/O redirection operators often have
663 strange effects and may corrupt the contents of your files.
663 strange effects and may corrupt the contents of your files.
664
664
665 This filter mechanism is used internally by the ``eol`` extension to
665 This filter mechanism is used internally by the ``eol`` extension to
666 translate line ending characters between Windows (CRLF) and Unix (LF)
666 translate line ending characters between Windows (CRLF) and Unix (LF)
667 format. We suggest you use the ``eol`` extension for convenience.
667 format. We suggest you use the ``eol`` extension for convenience.
668
668
669
669
670 ``defaults``
670 ``defaults``
671 ------------
671 ------------
672
672
673 (defaults are deprecated. Don't use them. Use aliases instead.)
673 (defaults are deprecated. Don't use them. Use aliases instead.)
674
674
675 Use the ``[defaults]`` section to define command defaults, i.e. the
675 Use the ``[defaults]`` section to define command defaults, i.e. the
676 default options/arguments to pass to the specified commands.
676 default options/arguments to pass to the specified commands.
677
677
678 The following example makes :hg:`log` run in verbose mode, and
678 The following example makes :hg:`log` run in verbose mode, and
679 :hg:`status` show only the modified files, by default::
679 :hg:`status` show only the modified files, by default::
680
680
681 [defaults]
681 [defaults]
682 log = -v
682 log = -v
683 status = -m
683 status = -m
684
684
685 The actual commands, instead of their aliases, must be used when
685 The actual commands, instead of their aliases, must be used when
686 defining command defaults. The command defaults will also be applied
686 defining command defaults. The command defaults will also be applied
687 to the aliases of the commands defined.
687 to the aliases of the commands defined.
688
688
689
689
690 ``diff``
690 ``diff``
691 --------
691 --------
692
692
693 Settings used when displaying diffs. Everything except for ``unified``
693 Settings used when displaying diffs. Everything except for ``unified``
694 is a Boolean and defaults to False. See :hg:`help config.annotate`
694 is a Boolean and defaults to False. See :hg:`help config.annotate`
695 for related options for the annotate command.
695 for related options for the annotate command.
696
696
697 ``git``
697 ``git``
698 Use git extended diff format.
698 Use git extended diff format.
699
699
700 ``nobinary``
700 ``nobinary``
701 Omit git binary patches.
701 Omit git binary patches.
702
702
703 ``nodates``
703 ``nodates``
704 Don't include dates in diff headers.
704 Don't include dates in diff headers.
705
705
706 ``noprefix``
706 ``noprefix``
707 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
707 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
708
708
709 ``showfunc``
709 ``showfunc``
710 Show which function each change is in.
710 Show which function each change is in.
711
711
712 ``ignorews``
712 ``ignorews``
713 Ignore white space when comparing lines.
713 Ignore white space when comparing lines.
714
714
715 ``ignorewsamount``
715 ``ignorewsamount``
716 Ignore changes in the amount of white space.
716 Ignore changes in the amount of white space.
717
717
718 ``ignoreblanklines``
718 ``ignoreblanklines``
719 Ignore changes whose lines are all blank.
719 Ignore changes whose lines are all blank.
720
720
721 ``unified``
721 ``unified``
722 Number of lines of context to show.
722 Number of lines of context to show.
723
723
724 ``word-diff``
724 ``word-diff``
725 Highlight changed words.
725 Highlight changed words.
726
726
727 ``email``
727 ``email``
728 ---------
728 ---------
729
729
730 Settings for extensions that send email messages.
730 Settings for extensions that send email messages.
731
731
732 ``from``
732 ``from``
733 Optional. Email address to use in "From" header and SMTP envelope
733 Optional. Email address to use in "From" header and SMTP envelope
734 of outgoing messages.
734 of outgoing messages.
735
735
736 ``to``
736 ``to``
737 Optional. Comma-separated list of recipients' email addresses.
737 Optional. Comma-separated list of recipients' email addresses.
738
738
739 ``cc``
739 ``cc``
740 Optional. Comma-separated list of carbon copy recipients'
740 Optional. Comma-separated list of carbon copy recipients'
741 email addresses.
741 email addresses.
742
742
743 ``bcc``
743 ``bcc``
744 Optional. Comma-separated list of blind carbon copy recipients'
744 Optional. Comma-separated list of blind carbon copy recipients'
745 email addresses.
745 email addresses.
746
746
747 ``method``
747 ``method``
748 Optional. Method to use to send email messages. If value is ``smtp``
748 Optional. Method to use to send email messages. If value is ``smtp``
749 (default), use SMTP (see the ``[smtp]`` section for configuration).
749 (default), use SMTP (see the ``[smtp]`` section for configuration).
750 Otherwise, use as name of program to run that acts like sendmail
750 Otherwise, use as name of program to run that acts like sendmail
751 (takes ``-f`` option for sender, list of recipients on command line,
751 (takes ``-f`` option for sender, list of recipients on command line,
752 message on stdin). Normally, setting this to ``sendmail`` or
752 message on stdin). Normally, setting this to ``sendmail`` or
753 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
753 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
754
754
755 ``charsets``
755 ``charsets``
756 Optional. Comma-separated list of character sets considered
756 Optional. Comma-separated list of character sets considered
757 convenient for recipients. Addresses, headers, and parts not
757 convenient for recipients. Addresses, headers, and parts not
758 containing patches of outgoing messages will be encoded in the
758 containing patches of outgoing messages will be encoded in the
759 first character set to which conversion from local encoding
759 first character set to which conversion from local encoding
760 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
760 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
761 conversion fails, the text in question is sent as is.
761 conversion fails, the text in question is sent as is.
762 (default: '')
762 (default: '')
763
763
764 Order of outgoing email character sets:
764 Order of outgoing email character sets:
765
765
766 1. ``us-ascii``: always first, regardless of settings
766 1. ``us-ascii``: always first, regardless of settings
767 2. ``email.charsets``: in order given by user
767 2. ``email.charsets``: in order given by user
768 3. ``ui.fallbackencoding``: if not in email.charsets
768 3. ``ui.fallbackencoding``: if not in email.charsets
769 4. ``$HGENCODING``: if not in email.charsets
769 4. ``$HGENCODING``: if not in email.charsets
770 5. ``utf-8``: always last, regardless of settings
770 5. ``utf-8``: always last, regardless of settings
771
771
772 Email example::
772 Email example::
773
773
774 [email]
774 [email]
775 from = Joseph User <joe.user@example.com>
775 from = Joseph User <joe.user@example.com>
776 method = /usr/sbin/sendmail
776 method = /usr/sbin/sendmail
777 # charsets for western Europeans
777 # charsets for western Europeans
778 # us-ascii, utf-8 omitted, as they are tried first and last
778 # us-ascii, utf-8 omitted, as they are tried first and last
779 charsets = iso-8859-1, iso-8859-15, windows-1252
779 charsets = iso-8859-1, iso-8859-15, windows-1252
780
780
781
781
782 ``extensions``
782 ``extensions``
783 --------------
783 --------------
784
784
785 Mercurial has an extension mechanism for adding new features. To
785 Mercurial has an extension mechanism for adding new features. To
786 enable an extension, create an entry for it in this section.
786 enable an extension, create an entry for it in this section.
787
787
788 If you know that the extension is already in Python's search path,
788 If you know that the extension is already in Python's search path,
789 you can give the name of the module, followed by ``=``, with nothing
789 you can give the name of the module, followed by ``=``, with nothing
790 after the ``=``.
790 after the ``=``.
791
791
792 Otherwise, give a name that you choose, followed by ``=``, followed by
792 Otherwise, give a name that you choose, followed by ``=``, followed by
793 the path to the ``.py`` file (including the file name extension) that
793 the path to the ``.py`` file (including the file name extension) that
794 defines the extension.
794 defines the extension.
795
795
796 To explicitly disable an extension that is enabled in an hgrc of
796 To explicitly disable an extension that is enabled in an hgrc of
797 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
797 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
798 or ``foo = !`` when path is not supplied.
798 or ``foo = !`` when path is not supplied.
799
799
800 Example for ``~/.hgrc``::
800 Example for ``~/.hgrc``::
801
801
802 [extensions]
802 [extensions]
803 # (the churn extension will get loaded from Mercurial's path)
803 # (the churn extension will get loaded from Mercurial's path)
804 churn =
804 churn =
805 # (this extension will get loaded from the file specified)
805 # (this extension will get loaded from the file specified)
806 myfeature = ~/.hgext/myfeature.py
806 myfeature = ~/.hgext/myfeature.py
807
807
808
808
809 ``format``
809 ``format``
810 ----------
810 ----------
811
811
812 Configuration that controls the repository format. Newer format options are more
812 Configuration that controls the repository format. Newer format options are more
813 powerful but incompatible with some older versions of Mercurial. Format options
813 powerful but incompatible with some older versions of Mercurial. Format options
814 are considered at repository initialization only. You need to make a new clone
814 are considered at repository initialization only. You need to make a new clone
815 for config change to be taken into account.
815 for config change to be taken into account.
816
816
817 For more details about repository format and version compatibility, see
817 For more details about repository format and version compatibility, see
818 https://www.mercurial-scm.org/wiki/MissingRequirement
818 https://www.mercurial-scm.org/wiki/MissingRequirement
819
819
820 ``usegeneraldelta``
820 ``usegeneraldelta``
821 Enable or disable the "generaldelta" repository format which improves
821 Enable or disable the "generaldelta" repository format which improves
822 repository compression by allowing "revlog" to store delta against arbitrary
822 repository compression by allowing "revlog" to store delta against arbitrary
823 revision instead of the previous stored one. This provides significant
823 revision instead of the previous stored one. This provides significant
824 improvement for repositories with branches.
824 improvement for repositories with branches.
825
825
826 Repositories with this on-disk format require Mercurial version 1.9.
826 Repositories with this on-disk format require Mercurial version 1.9.
827
827
828 Enabled by default.
828 Enabled by default.
829
829
830 ``dotencode``
830 ``dotencode``
831 Enable or disable the "dotencode" repository format which enhances
831 Enable or disable the "dotencode" repository format which enhances
832 the "fncache" repository format (which has to be enabled to use
832 the "fncache" repository format (which has to be enabled to use
833 dotencode) to avoid issues with filenames starting with ._ on
833 dotencode) to avoid issues with filenames starting with ._ on
834 Mac OS X and spaces on Windows.
834 Mac OS X and spaces on Windows.
835
835
836 Repositories with this on-disk format require Mercurial version 1.7.
836 Repositories with this on-disk format require Mercurial version 1.7.
837
837
838 Enabled by default.
838 Enabled by default.
839
839
840 ``usefncache``
840 ``usefncache``
841 Enable or disable the "fncache" repository format which enhances
841 Enable or disable the "fncache" repository format which enhances
842 the "store" repository format (which has to be enabled to use
842 the "store" repository format (which has to be enabled to use
843 fncache) to allow longer filenames and avoids using Windows
843 fncache) to allow longer filenames and avoids using Windows
844 reserved names, e.g. "nul".
844 reserved names, e.g. "nul".
845
845
846 Repositories with this on-disk format require Mercurial version 1.1.
846 Repositories with this on-disk format require Mercurial version 1.1.
847
847
848 Enabled by default.
848 Enabled by default.
849
849
850 ``usestore``
850 ``usestore``
851 Enable or disable the "store" repository format which improves
851 Enable or disable the "store" repository format which improves
852 compatibility with systems that fold case or otherwise mangle
852 compatibility with systems that fold case or otherwise mangle
853 filenames. Disabling this option will allow you to store longer filenames
853 filenames. Disabling this option will allow you to store longer filenames
854 in some situations at the expense of compatibility.
854 in some situations at the expense of compatibility.
855
855
856 Repositories with this on-disk format require Mercurial version 0.9.4.
856 Repositories with this on-disk format require Mercurial version 0.9.4.
857
857
858 Enabled by default.
858 Enabled by default.
859
859
860 ``sparse-revlog``
860 ``sparse-revlog``
861 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
861 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
862 delta re-use inside revlog. For very branchy repositories, it results in a
862 delta re-use inside revlog. For very branchy repositories, it results in a
863 smaller store. For repositories with many revisions, it also helps
863 smaller store. For repositories with many revisions, it also helps
864 performance (by using shortened delta chains.)
864 performance (by using shortened delta chains.)
865
865
866 Repositories with this on-disk format require Mercurial version 4.7
866 Repositories with this on-disk format require Mercurial version 4.7
867
867
868 Enabled by default.
868 Enabled by default.
869
869
870 ``graph``
870 ``graph``
871 ---------
871 ---------
872
872
873 Web graph view configuration. This section let you change graph
873 Web graph view configuration. This section let you change graph
874 elements display properties by branches, for instance to make the
874 elements display properties by branches, for instance to make the
875 ``default`` branch stand out.
875 ``default`` branch stand out.
876
876
877 Each line has the following format::
877 Each line has the following format::
878
878
879 <branch>.<argument> = <value>
879 <branch>.<argument> = <value>
880
880
881 where ``<branch>`` is the name of the branch being
881 where ``<branch>`` is the name of the branch being
882 customized. Example::
882 customized. Example::
883
883
884 [graph]
884 [graph]
885 # 2px width
885 # 2px width
886 default.width = 2
886 default.width = 2
887 # red color
887 # red color
888 default.color = FF0000
888 default.color = FF0000
889
889
890 Supported arguments:
890 Supported arguments:
891
891
892 ``width``
892 ``width``
893 Set branch edges width in pixels.
893 Set branch edges width in pixels.
894
894
895 ``color``
895 ``color``
896 Set branch edges color in hexadecimal RGB notation.
896 Set branch edges color in hexadecimal RGB notation.
897
897
898 ``hooks``
898 ``hooks``
899 ---------
899 ---------
900
900
901 Commands or Python functions that get automatically executed by
901 Commands or Python functions that get automatically executed by
902 various actions such as starting or finishing a commit. Multiple
902 various actions such as starting or finishing a commit. Multiple
903 hooks can be run for the same action by appending a suffix to the
903 hooks can be run for the same action by appending a suffix to the
904 action. Overriding a site-wide hook can be done by changing its
904 action. Overriding a site-wide hook can be done by changing its
905 value or setting it to an empty string. Hooks can be prioritized
905 value or setting it to an empty string. Hooks can be prioritized
906 by adding a prefix of ``priority.`` to the hook name on a new line
906 by adding a prefix of ``priority.`` to the hook name on a new line
907 and setting the priority. The default priority is 0.
907 and setting the priority. The default priority is 0.
908
908
909 Example ``.hg/hgrc``::
909 Example ``.hg/hgrc``::
910
910
911 [hooks]
911 [hooks]
912 # update working directory after adding changesets
912 # update working directory after adding changesets
913 changegroup.update = hg update
913 changegroup.update = hg update
914 # do not use the site-wide hook
914 # do not use the site-wide hook
915 incoming =
915 incoming =
916 incoming.email = /my/email/hook
916 incoming.email = /my/email/hook
917 incoming.autobuild = /my/build/hook
917 incoming.autobuild = /my/build/hook
918 # force autobuild hook to run before other incoming hooks
918 # force autobuild hook to run before other incoming hooks
919 priority.incoming.autobuild = 1
919 priority.incoming.autobuild = 1
920
920
921 Most hooks are run with environment variables set that give useful
921 Most hooks are run with environment variables set that give useful
922 additional information. For each hook below, the environment variables
922 additional information. For each hook below, the environment variables
923 it is passed are listed with names in the form ``$HG_foo``. The
923 it is passed are listed with names in the form ``$HG_foo``. The
924 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
924 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
925 They contain the type of hook which triggered the run and the full name
925 They contain the type of hook which triggered the run and the full name
926 of the hook in the config, respectively. In the example above, this will
926 of the hook in the config, respectively. In the example above, this will
927 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
927 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
928
928
929 .. container:: windows
929 .. container:: windows
930
930
931 Some basic Unix syntax can be enabled for portability, including ``$VAR``
931 Some basic Unix syntax can be enabled for portability, including ``$VAR``
932 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
932 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
933 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
933 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
934 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
934 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
935 slash or inside of a strong quote. Strong quotes will be replaced by
935 slash or inside of a strong quote. Strong quotes will be replaced by
936 double quotes after processing.
936 double quotes after processing.
937
937
938 This feature is enabled by adding a prefix of ``tonative.`` to the hook
938 This feature is enabled by adding a prefix of ``tonative.`` to the hook
939 name on a new line, and setting it to ``True``. For example::
939 name on a new line, and setting it to ``True``. For example::
940
940
941 [hooks]
941 [hooks]
942 incoming.autobuild = /my/build/hook
942 incoming.autobuild = /my/build/hook
943 # enable translation to cmd.exe syntax for autobuild hook
943 # enable translation to cmd.exe syntax for autobuild hook
944 tonative.incoming.autobuild = True
944 tonative.incoming.autobuild = True
945
945
946 ``changegroup``
946 ``changegroup``
947 Run after a changegroup has been added via push, pull or unbundle. The ID of
947 Run after a changegroup has been added via push, pull or unbundle. The ID of
948 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
948 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
949 The URL from which changes came is in ``$HG_URL``.
949 The URL from which changes came is in ``$HG_URL``.
950
950
951 ``commit``
951 ``commit``
952 Run after a changeset has been created in the local repository. The ID
952 Run after a changeset has been created in the local repository. The ID
953 of the newly created changeset is in ``$HG_NODE``. Parent changeset
953 of the newly created changeset is in ``$HG_NODE``. Parent changeset
954 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
954 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
955
955
956 ``incoming``
956 ``incoming``
957 Run after a changeset has been pulled, pushed, or unbundled into
957 Run after a changeset has been pulled, pushed, or unbundled into
958 the local repository. The ID of the newly arrived changeset is in
958 the local repository. The ID of the newly arrived changeset is in
959 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
959 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
960
960
961 ``outgoing``
961 ``outgoing``
962 Run after sending changes from the local repository to another. The ID of
962 Run after sending changes from the local repository to another. The ID of
963 first changeset sent is in ``$HG_NODE``. The source of operation is in
963 first changeset sent is in ``$HG_NODE``. The source of operation is in
964 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
964 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
965
965
966 ``post-<command>``
966 ``post-<command>``
967 Run after successful invocations of the associated command. The
967 Run after successful invocations of the associated command. The
968 contents of the command line are passed as ``$HG_ARGS`` and the result
968 contents of the command line are passed as ``$HG_ARGS`` and the result
969 code in ``$HG_RESULT``. Parsed command line arguments are passed as
969 code in ``$HG_RESULT``. Parsed command line arguments are passed as
970 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
970 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
971 the python data internally passed to <command>. ``$HG_OPTS`` is a
971 the python data internally passed to <command>. ``$HG_OPTS`` is a
972 dictionary of options (with unspecified options set to their defaults).
972 dictionary of options (with unspecified options set to their defaults).
973 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
973 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
974
974
975 ``fail-<command>``
975 ``fail-<command>``
976 Run after a failed invocation of an associated command. The contents
976 Run after a failed invocation of an associated command. The contents
977 of the command line are passed as ``$HG_ARGS``. Parsed command line
977 of the command line are passed as ``$HG_ARGS``. Parsed command line
978 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
978 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
979 string representations of the python data internally passed to
979 string representations of the python data internally passed to
980 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
980 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
981 options set to their defaults). ``$HG_PATS`` is a list of arguments.
981 options set to their defaults). ``$HG_PATS`` is a list of arguments.
982 Hook failure is ignored.
982 Hook failure is ignored.
983
983
984 ``pre-<command>``
984 ``pre-<command>``
985 Run before executing the associated command. The contents of the
985 Run before executing the associated command. The contents of the
986 command line are passed as ``$HG_ARGS``. Parsed command line arguments
986 command line are passed as ``$HG_ARGS``. Parsed command line arguments
987 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
987 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
988 representations of the data internally passed to <command>. ``$HG_OPTS``
988 representations of the data internally passed to <command>. ``$HG_OPTS``
989 is a dictionary of options (with unspecified options set to their
989 is a dictionary of options (with unspecified options set to their
990 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
990 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
991 failure, the command doesn't execute and Mercurial returns the failure
991 failure, the command doesn't execute and Mercurial returns the failure
992 code.
992 code.
993
993
994 ``prechangegroup``
994 ``prechangegroup``
995 Run before a changegroup is added via push, pull or unbundle. Exit
995 Run before a changegroup is added via push, pull or unbundle. Exit
996 status 0 allows the changegroup to proceed. A non-zero status will
996 status 0 allows the changegroup to proceed. A non-zero status will
997 cause the push, pull or unbundle to fail. The URL from which changes
997 cause the push, pull or unbundle to fail. The URL from which changes
998 will come is in ``$HG_URL``.
998 will come is in ``$HG_URL``.
999
999
1000 ``precommit``
1000 ``precommit``
1001 Run before starting a local commit. Exit status 0 allows the
1001 Run before starting a local commit. Exit status 0 allows the
1002 commit to proceed. A non-zero status will cause the commit to fail.
1002 commit to proceed. A non-zero status will cause the commit to fail.
1003 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1003 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1004
1004
1005 ``prelistkeys``
1005 ``prelistkeys``
1006 Run before listing pushkeys (like bookmarks) in the
1006 Run before listing pushkeys (like bookmarks) in the
1007 repository. A non-zero status will cause failure. The key namespace is
1007 repository. A non-zero status will cause failure. The key namespace is
1008 in ``$HG_NAMESPACE``.
1008 in ``$HG_NAMESPACE``.
1009
1009
1010 ``preoutgoing``
1010 ``preoutgoing``
1011 Run before collecting changes to send from the local repository to
1011 Run before collecting changes to send from the local repository to
1012 another. A non-zero status will cause failure. This lets you prevent
1012 another. A non-zero status will cause failure. This lets you prevent
1013 pull over HTTP or SSH. It can also prevent propagating commits (via
1013 pull over HTTP or SSH. It can also prevent propagating commits (via
1014 local pull, push (outbound) or bundle commands), but not completely,
1014 local pull, push (outbound) or bundle commands), but not completely,
1015 since you can just copy files instead. The source of operation is in
1015 since you can just copy files instead. The source of operation is in
1016 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1016 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1017 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1017 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1018 is happening on behalf of a repository on same system.
1018 is happening on behalf of a repository on same system.
1019
1019
1020 ``prepushkey``
1020 ``prepushkey``
1021 Run before a pushkey (like a bookmark) is added to the
1021 Run before a pushkey (like a bookmark) is added to the
1022 repository. A non-zero status will cause the key to be rejected. The
1022 repository. A non-zero status will cause the key to be rejected. The
1023 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1023 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1024 the old value (if any) is in ``$HG_OLD``, and the new value is in
1024 the old value (if any) is in ``$HG_OLD``, and the new value is in
1025 ``$HG_NEW``.
1025 ``$HG_NEW``.
1026
1026
1027 ``pretag``
1027 ``pretag``
1028 Run before creating a tag. Exit status 0 allows the tag to be
1028 Run before creating a tag. Exit status 0 allows the tag to be
1029 created. A non-zero status will cause the tag to fail. The ID of the
1029 created. A non-zero status will cause the tag to fail. The ID of the
1030 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1030 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1031 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1031 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1032
1032
1033 ``pretxnopen``
1033 ``pretxnopen``
1034 Run before any new repository transaction is open. The reason for the
1034 Run before any new repository transaction is open. The reason for the
1035 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1035 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1036 transaction will be in ``HG_TXNID``. A non-zero status will prevent the
1036 transaction will be in ``HG_TXNID``. A non-zero status will prevent the
1037 transaction from being opened.
1037 transaction from being opened.
1038
1038
1039 ``pretxnclose``
1039 ``pretxnclose``
1040 Run right before the transaction is actually finalized. Any repository change
1040 Run right before the transaction is actually finalized. Any repository change
1041 will be visible to the hook program. This lets you validate the transaction
1041 will be visible to the hook program. This lets you validate the transaction
1042 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1042 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1043 status will cause the transaction to be rolled back. The reason for the
1043 status will cause the transaction to be rolled back. The reason for the
1044 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1044 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1045 the transaction will be in ``HG_TXNID``. The rest of the available data will
1045 the transaction will be in ``HG_TXNID``. The rest of the available data will
1046 vary according the transaction type. New changesets will add ``$HG_NODE``
1046 vary according the transaction type. New changesets will add ``$HG_NODE``
1047 (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
1047 (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
1048 added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables. Bookmark and
1048 added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables. Bookmark and
1049 phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
1049 phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
1050 respectively, etc.
1050 respectively, etc.
1051
1051
1052 ``pretxnclose-bookmark``
1052 ``pretxnclose-bookmark``
1053 Run right before a bookmark change is actually finalized. Any repository
1053 Run right before a bookmark change is actually finalized. Any repository
1054 change will be visible to the hook program. This lets you validate the
1054 change will be visible to the hook program. This lets you validate the
1055 transaction content or change it. Exit status 0 allows the commit to
1055 transaction content or change it. Exit status 0 allows the commit to
1056 proceed. A non-zero status will cause the transaction to be rolled back.
1056 proceed. A non-zero status will cause the transaction to be rolled back.
1057 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1057 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1058 bookmark location will be available in ``$HG_NODE`` while the previous
1058 bookmark location will be available in ``$HG_NODE`` while the previous
1059 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1059 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1060 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1060 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1061 will be empty.
1061 will be empty.
1062 In addition, the reason for the transaction opening will be in
1062 In addition, the reason for the transaction opening will be in
1063 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1063 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1064 ``HG_TXNID``.
1064 ``HG_TXNID``.
1065
1065
1066 ``pretxnclose-phase``
1066 ``pretxnclose-phase``
1067 Run right before a phase change is actually finalized. Any repository change
1067 Run right before a phase change is actually finalized. Any repository change
1068 will be visible to the hook program. This lets you validate the transaction
1068 will be visible to the hook program. This lets you validate the transaction
1069 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1069 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1070 status will cause the transaction to be rolled back. The hook is called
1070 status will cause the transaction to be rolled back. The hook is called
1071 multiple times, once for each revision affected by a phase change.
1071 multiple times, once for each revision affected by a phase change.
1072 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1072 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1073 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1073 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1074 will be empty. In addition, the reason for the transaction opening will be in
1074 will be empty. In addition, the reason for the transaction opening will be in
1075 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1075 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1076 ``HG_TXNID``. The hook is also run for newly added revisions. In this case
1076 ``HG_TXNID``. The hook is also run for newly added revisions. In this case
1077 the ``$HG_OLDPHASE`` entry will be empty.
1077 the ``$HG_OLDPHASE`` entry will be empty.
1078
1078
1079 ``txnclose``
1079 ``txnclose``
1080 Run after any repository transaction has been committed. At this
1080 Run after any repository transaction has been committed. At this
1081 point, the transaction can no longer be rolled back. The hook will run
1081 point, the transaction can no longer be rolled back. The hook will run
1082 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1082 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1083 details about available variables.
1083 details about available variables.
1084
1084
1085 ``txnclose-bookmark``
1085 ``txnclose-bookmark``
1086 Run after any bookmark change has been committed. At this point, the
1086 Run after any bookmark change has been committed. At this point, the
1087 transaction can no longer be rolled back. The hook will run after the lock
1087 transaction can no longer be rolled back. The hook will run after the lock
1088 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1088 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1089 about available variables.
1089 about available variables.
1090
1090
1091 ``txnclose-phase``
1091 ``txnclose-phase``
1092 Run after any phase change has been committed. At this point, the
1092 Run after any phase change has been committed. At this point, the
1093 transaction can no longer be rolled back. The hook will run after the lock
1093 transaction can no longer be rolled back. The hook will run after the lock
1094 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1094 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1095 available variables.
1095 available variables.
1096
1096
1097 ``txnabort``
1097 ``txnabort``
1098 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1098 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1099 for details about available variables.
1099 for details about available variables.
1100
1100
1101 ``pretxnchangegroup``
1101 ``pretxnchangegroup``
1102 Run after a changegroup has been added via push, pull or unbundle, but before
1102 Run after a changegroup has been added via push, pull or unbundle, but before
1103 the transaction has been committed. The changegroup is visible to the hook
1103 the transaction has been committed. The changegroup is visible to the hook
1104 program. This allows validation of incoming changes before accepting them.
1104 program. This allows validation of incoming changes before accepting them.
1105 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1105 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1106 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1106 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1107 status will cause the transaction to be rolled back, and the push, pull or
1107 status will cause the transaction to be rolled back, and the push, pull or
1108 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1108 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1109
1109
1110 ``pretxncommit``
1110 ``pretxncommit``
1111 Run after a changeset has been created, but before the transaction is
1111 Run after a changeset has been created, but before the transaction is
1112 committed. The changeset is visible to the hook program. This allows
1112 committed. The changeset is visible to the hook program. This allows
1113 validation of the commit message and changes. Exit status 0 allows the
1113 validation of the commit message and changes. Exit status 0 allows the
1114 commit to proceed. A non-zero status will cause the transaction to
1114 commit to proceed. A non-zero status will cause the transaction to
1115 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1115 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1116 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1116 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1117
1117
1118 ``preupdate``
1118 ``preupdate``
1119 Run before updating the working directory. Exit status 0 allows
1119 Run before updating the working directory. Exit status 0 allows
1120 the update to proceed. A non-zero status will prevent the update.
1120 the update to proceed. A non-zero status will prevent the update.
1121 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1121 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1122 merge, the ID of second new parent is in ``$HG_PARENT2``.
1122 merge, the ID of second new parent is in ``$HG_PARENT2``.
1123
1123
1124 ``listkeys``
1124 ``listkeys``
1125 Run after listing pushkeys (like bookmarks) in the repository. The
1125 Run after listing pushkeys (like bookmarks) in the repository. The
1126 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1126 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1127 dictionary containing the keys and values.
1127 dictionary containing the keys and values.
1128
1128
1129 ``pushkey``
1129 ``pushkey``
1130 Run after a pushkey (like a bookmark) is added to the
1130 Run after a pushkey (like a bookmark) is added to the
1131 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1131 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1132 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1132 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1133 value is in ``$HG_NEW``.
1133 value is in ``$HG_NEW``.
1134
1134
1135 ``tag``
1135 ``tag``
1136 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1136 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1137 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1137 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1138 the repository if ``$HG_LOCAL=0``.
1138 the repository if ``$HG_LOCAL=0``.
1139
1139
1140 ``update``
1140 ``update``
1141 Run after updating the working directory. The changeset ID of first
1141 Run after updating the working directory. The changeset ID of first
1142 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1142 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1143 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1143 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1144 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1144 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1145
1145
1146 .. note::
1146 .. note::
1147
1147
1148 It is generally better to use standard hooks rather than the
1148 It is generally better to use standard hooks rather than the
1149 generic pre- and post- command hooks, as they are guaranteed to be
1149 generic pre- and post- command hooks, as they are guaranteed to be
1150 called in the appropriate contexts for influencing transactions.
1150 called in the appropriate contexts for influencing transactions.
1151 Also, hooks like "commit" will be called in all contexts that
1151 Also, hooks like "commit" will be called in all contexts that
1152 generate a commit (e.g. tag) and not just the commit command.
1152 generate a commit (e.g. tag) and not just the commit command.
1153
1153
1154 .. note::
1154 .. note::
1155
1155
1156 Environment variables with empty values may not be passed to
1156 Environment variables with empty values may not be passed to
1157 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1157 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1158 will have an empty value under Unix-like platforms for non-merge
1158 will have an empty value under Unix-like platforms for non-merge
1159 changesets, while it will not be available at all under Windows.
1159 changesets, while it will not be available at all under Windows.
1160
1160
1161 The syntax for Python hooks is as follows::
1161 The syntax for Python hooks is as follows::
1162
1162
1163 hookname = python:modulename.submodule.callable
1163 hookname = python:modulename.submodule.callable
1164 hookname = python:/path/to/python/module.py:callable
1164 hookname = python:/path/to/python/module.py:callable
1165
1165
1166 Python hooks are run within the Mercurial process. Each hook is
1166 Python hooks are run within the Mercurial process. Each hook is
1167 called with at least three keyword arguments: a ui object (keyword
1167 called with at least three keyword arguments: a ui object (keyword
1168 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1168 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1169 keyword that tells what kind of hook is used. Arguments listed as
1169 keyword that tells what kind of hook is used. Arguments listed as
1170 environment variables above are passed as keyword arguments, with no
1170 environment variables above are passed as keyword arguments, with no
1171 ``HG_`` prefix, and names in lower case.
1171 ``HG_`` prefix, and names in lower case.
1172
1172
1173 If a Python hook returns a "true" value or raises an exception, this
1173 If a Python hook returns a "true" value or raises an exception, this
1174 is treated as a failure.
1174 is treated as a failure.
1175
1175
1176
1176
1177 ``hostfingerprints``
1177 ``hostfingerprints``
1178 --------------------
1178 --------------------
1179
1179
1180 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1180 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1181
1181
1182 Fingerprints of the certificates of known HTTPS servers.
1182 Fingerprints of the certificates of known HTTPS servers.
1183
1183
1184 A HTTPS connection to a server with a fingerprint configured here will
1184 A HTTPS connection to a server with a fingerprint configured here will
1185 only succeed if the servers certificate matches the fingerprint.
1185 only succeed if the servers certificate matches the fingerprint.
1186 This is very similar to how ssh known hosts works.
1186 This is very similar to how ssh known hosts works.
1187
1187
1188 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1188 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1189 Multiple values can be specified (separated by spaces or commas). This can
1189 Multiple values can be specified (separated by spaces or commas). This can
1190 be used to define both old and new fingerprints while a host transitions
1190 be used to define both old and new fingerprints while a host transitions
1191 to a new certificate.
1191 to a new certificate.
1192
1192
1193 The CA chain and web.cacerts is not used for servers with a fingerprint.
1193 The CA chain and web.cacerts is not used for servers with a fingerprint.
1194
1194
1195 For example::
1195 For example::
1196
1196
1197 [hostfingerprints]
1197 [hostfingerprints]
1198 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1198 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1199 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1199 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1200
1200
1201 ``hostsecurity``
1201 ``hostsecurity``
1202 ----------------
1202 ----------------
1203
1203
1204 Used to specify global and per-host security settings for connecting to
1204 Used to specify global and per-host security settings for connecting to
1205 other machines.
1205 other machines.
1206
1206
1207 The following options control default behavior for all hosts.
1207 The following options control default behavior for all hosts.
1208
1208
1209 ``ciphers``
1209 ``ciphers``
1210 Defines the cryptographic ciphers to use for connections.
1210 Defines the cryptographic ciphers to use for connections.
1211
1211
1212 Value must be a valid OpenSSL Cipher List Format as documented at
1212 Value must be a valid OpenSSL Cipher List Format as documented at
1213 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1213 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1214
1214
1215 This setting is for advanced users only. Setting to incorrect values
1215 This setting is for advanced users only. Setting to incorrect values
1216 can significantly lower connection security or decrease performance.
1216 can significantly lower connection security or decrease performance.
1217 You have been warned.
1217 You have been warned.
1218
1218
1219 This option requires Python 2.7.
1219 This option requires Python 2.7.
1220
1220
1221 ``minimumprotocol``
1221 ``minimumprotocol``
1222 Defines the minimum channel encryption protocol to use.
1222 Defines the minimum channel encryption protocol to use.
1223
1223
1224 By default, the highest version of TLS supported by both client and server
1224 By default, the highest version of TLS supported by both client and server
1225 is used.
1225 is used.
1226
1226
1227 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1227 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1228
1228
1229 When running on an old Python version, only ``tls1.0`` is allowed since
1229 When running on an old Python version, only ``tls1.0`` is allowed since
1230 old versions of Python only support up to TLS 1.0.
1230 old versions of Python only support up to TLS 1.0.
1231
1231
1232 When running a Python that supports modern TLS versions, the default is
1232 When running a Python that supports modern TLS versions, the default is
1233 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1233 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1234 weakens security and should only be used as a feature of last resort if
1234 weakens security and should only be used as a feature of last resort if
1235 a server does not support TLS 1.1+.
1235 a server does not support TLS 1.1+.
1236
1236
1237 Options in the ``[hostsecurity]`` section can have the form
1237 Options in the ``[hostsecurity]`` section can have the form
1238 ``hostname``:``setting``. This allows multiple settings to be defined on a
1238 ``hostname``:``setting``. This allows multiple settings to be defined on a
1239 per-host basis.
1239 per-host basis.
1240
1240
1241 The following per-host settings can be defined.
1241 The following per-host settings can be defined.
1242
1242
1243 ``ciphers``
1243 ``ciphers``
1244 This behaves like ``ciphers`` as described above except it only applies
1244 This behaves like ``ciphers`` as described above except it only applies
1245 to the host on which it is defined.
1245 to the host on which it is defined.
1246
1246
1247 ``fingerprints``
1247 ``fingerprints``
1248 A list of hashes of the DER encoded peer/remote certificate. Values have
1248 A list of hashes of the DER encoded peer/remote certificate. Values have
1249 the form ``algorithm``:``fingerprint``. e.g.
1249 the form ``algorithm``:``fingerprint``. e.g.
1250 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1250 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1251 In addition, colons (``:``) can appear in the fingerprint part.
1251 In addition, colons (``:``) can appear in the fingerprint part.
1252
1252
1253 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1253 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1254 ``sha512``.
1254 ``sha512``.
1255
1255
1256 Use of ``sha256`` or ``sha512`` is preferred.
1256 Use of ``sha256`` or ``sha512`` is preferred.
1257
1257
1258 If a fingerprint is specified, the CA chain is not validated for this
1258 If a fingerprint is specified, the CA chain is not validated for this
1259 host and Mercurial will require the remote certificate to match one
1259 host and Mercurial will require the remote certificate to match one
1260 of the fingerprints specified. This means if the server updates its
1260 of the fingerprints specified. This means if the server updates its
1261 certificate, Mercurial will abort until a new fingerprint is defined.
1261 certificate, Mercurial will abort until a new fingerprint is defined.
1262 This can provide stronger security than traditional CA-based validation
1262 This can provide stronger security than traditional CA-based validation
1263 at the expense of convenience.
1263 at the expense of convenience.
1264
1264
1265 This option takes precedence over ``verifycertsfile``.
1265 This option takes precedence over ``verifycertsfile``.
1266
1266
1267 ``minimumprotocol``
1267 ``minimumprotocol``
1268 This behaves like ``minimumprotocol`` as described above except it
1268 This behaves like ``minimumprotocol`` as described above except it
1269 only applies to the host on which it is defined.
1269 only applies to the host on which it is defined.
1270
1270
1271 ``verifycertsfile``
1271 ``verifycertsfile``
1272 Path to file a containing a list of PEM encoded certificates used to
1272 Path to file a containing a list of PEM encoded certificates used to
1273 verify the server certificate. Environment variables and ``~user``
1273 verify the server certificate. Environment variables and ``~user``
1274 constructs are expanded in the filename.
1274 constructs are expanded in the filename.
1275
1275
1276 The server certificate or the certificate's certificate authority (CA)
1276 The server certificate or the certificate's certificate authority (CA)
1277 must match a certificate from this file or certificate verification
1277 must match a certificate from this file or certificate verification
1278 will fail and connections to the server will be refused.
1278 will fail and connections to the server will be refused.
1279
1279
1280 If defined, only certificates provided by this file will be used:
1280 If defined, only certificates provided by this file will be used:
1281 ``web.cacerts`` and any system/default certificates will not be
1281 ``web.cacerts`` and any system/default certificates will not be
1282 used.
1282 used.
1283
1283
1284 This option has no effect if the per-host ``fingerprints`` option
1284 This option has no effect if the per-host ``fingerprints`` option
1285 is set.
1285 is set.
1286
1286
1287 The format of the file is as follows::
1287 The format of the file is as follows::
1288
1288
1289 -----BEGIN CERTIFICATE-----
1289 -----BEGIN CERTIFICATE-----
1290 ... (certificate in base64 PEM encoding) ...
1290 ... (certificate in base64 PEM encoding) ...
1291 -----END CERTIFICATE-----
1291 -----END CERTIFICATE-----
1292 -----BEGIN CERTIFICATE-----
1292 -----BEGIN CERTIFICATE-----
1293 ... (certificate in base64 PEM encoding) ...
1293 ... (certificate in base64 PEM encoding) ...
1294 -----END CERTIFICATE-----
1294 -----END CERTIFICATE-----
1295
1295
1296 For example::
1296 For example::
1297
1297
1298 [hostsecurity]
1298 [hostsecurity]
1299 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1299 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1300 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1300 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1301 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1301 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1302 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1302 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1303
1303
1304 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1304 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1305 when connecting to ``hg.example.com``::
1305 when connecting to ``hg.example.com``::
1306
1306
1307 [hostsecurity]
1307 [hostsecurity]
1308 minimumprotocol = tls1.2
1308 minimumprotocol = tls1.2
1309 hg.example.com:minimumprotocol = tls1.1
1309 hg.example.com:minimumprotocol = tls1.1
1310
1310
1311 ``http_proxy``
1311 ``http_proxy``
1312 --------------
1312 --------------
1313
1313
1314 Used to access web-based Mercurial repositories through a HTTP
1314 Used to access web-based Mercurial repositories through a HTTP
1315 proxy.
1315 proxy.
1316
1316
1317 ``host``
1317 ``host``
1318 Host name and (optional) port of the proxy server, for example
1318 Host name and (optional) port of the proxy server, for example
1319 "myproxy:8000".
1319 "myproxy:8000".
1320
1320
1321 ``no``
1321 ``no``
1322 Optional. Comma-separated list of host names that should bypass
1322 Optional. Comma-separated list of host names that should bypass
1323 the proxy.
1323 the proxy.
1324
1324
1325 ``passwd``
1325 ``passwd``
1326 Optional. Password to authenticate with at the proxy server.
1326 Optional. Password to authenticate with at the proxy server.
1327
1327
1328 ``user``
1328 ``user``
1329 Optional. User name to authenticate with at the proxy server.
1329 Optional. User name to authenticate with at the proxy server.
1330
1330
1331 ``always``
1331 ``always``
1332 Optional. Always use the proxy, even for localhost and any entries
1332 Optional. Always use the proxy, even for localhost and any entries
1333 in ``http_proxy.no``. (default: False)
1333 in ``http_proxy.no``. (default: False)
1334
1334
1335 ``http``
1335 ``http``
1336 ----------
1336 ----------
1337
1337
1338 Used to configure access to Mercurial repositories via HTTP.
1338 Used to configure access to Mercurial repositories via HTTP.
1339
1339
1340 ``timeout``
1340 ``timeout``
1341 If set, blocking operations will timeout after that many seconds.
1341 If set, blocking operations will timeout after that many seconds.
1342 (default: None)
1342 (default: None)
1343
1343
1344 ``merge``
1344 ``merge``
1345 ---------
1345 ---------
1346
1346
1347 This section specifies behavior during merges and updates.
1347 This section specifies behavior during merges and updates.
1348
1348
1349 ``checkignored``
1349 ``checkignored``
1350 Controls behavior when an ignored file on disk has the same name as a tracked
1350 Controls behavior when an ignored file on disk has the same name as a tracked
1351 file in the changeset being merged or updated to, and has different
1351 file in the changeset being merged or updated to, and has different
1352 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1352 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1353 abort on such files. With ``warn``, warn on such files and back them up as
1353 abort on such files. With ``warn``, warn on such files and back them up as
1354 ``.orig``. With ``ignore``, don't print a warning and back them up as
1354 ``.orig``. With ``ignore``, don't print a warning and back them up as
1355 ``.orig``. (default: ``abort``)
1355 ``.orig``. (default: ``abort``)
1356
1356
1357 ``checkunknown``
1357 ``checkunknown``
1358 Controls behavior when an unknown file that isn't ignored has the same name
1358 Controls behavior when an unknown file that isn't ignored has the same name
1359 as a tracked file in the changeset being merged or updated to, and has
1359 as a tracked file in the changeset being merged or updated to, and has
1360 different contents. Similar to ``merge.checkignored``, except for files that
1360 different contents. Similar to ``merge.checkignored``, except for files that
1361 are not ignored. (default: ``abort``)
1361 are not ignored. (default: ``abort``)
1362
1362
1363 ``on-failure``
1363 ``on-failure``
1364 When set to ``continue`` (the default), the merge process attempts to
1364 When set to ``continue`` (the default), the merge process attempts to
1365 merge all unresolved files using the merge chosen tool, regardless of
1365 merge all unresolved files using the merge chosen tool, regardless of
1366 whether previous file merge attempts during the process succeeded or not.
1366 whether previous file merge attempts during the process succeeded or not.
1367 Setting this to ``prompt`` will prompt after any merge failure continue
1367 Setting this to ``prompt`` will prompt after any merge failure continue
1368 or halt the merge process. Setting this to ``halt`` will automatically
1368 or halt the merge process. Setting this to ``halt`` will automatically
1369 halt the merge process on any merge tool failure. The merge process
1369 halt the merge process on any merge tool failure. The merge process
1370 can be restarted by using the ``resolve`` command. When a merge is
1370 can be restarted by using the ``resolve`` command. When a merge is
1371 halted, the repository is left in a normal ``unresolved`` merge state.
1371 halted, the repository is left in a normal ``unresolved`` merge state.
1372 (default: ``continue``)
1372 (default: ``continue``)
1373
1373
1374 ``strict-capability-check``
1374 ``strict-capability-check``
1375 Whether capabilities of internal merge tools are checked strictly
1375 Whether capabilities of internal merge tools are checked strictly
1376 or not, while examining rules to decide merge tool to be used.
1376 or not, while examining rules to decide merge tool to be used.
1377 (default: False)
1377 (default: False)
1378
1378
1379 ``merge-patterns``
1379 ``merge-patterns``
1380 ------------------
1380 ------------------
1381
1381
1382 This section specifies merge tools to associate with particular file
1382 This section specifies merge tools to associate with particular file
1383 patterns. Tools matched here will take precedence over the default
1383 patterns. Tools matched here will take precedence over the default
1384 merge tool. Patterns are globs by default, rooted at the repository
1384 merge tool. Patterns are globs by default, rooted at the repository
1385 root.
1385 root.
1386
1386
1387 Example::
1387 Example::
1388
1388
1389 [merge-patterns]
1389 [merge-patterns]
1390 **.c = kdiff3
1390 **.c = kdiff3
1391 **.jpg = myimgmerge
1391 **.jpg = myimgmerge
1392
1392
1393 ``merge-tools``
1393 ``merge-tools``
1394 ---------------
1394 ---------------
1395
1395
1396 This section configures external merge tools to use for file-level
1396 This section configures external merge tools to use for file-level
1397 merges. This section has likely been preconfigured at install time.
1397 merges. This section has likely been preconfigured at install time.
1398 Use :hg:`config merge-tools` to check the existing configuration.
1398 Use :hg:`config merge-tools` to check the existing configuration.
1399 Also see :hg:`help merge-tools` for more details.
1399 Also see :hg:`help merge-tools` for more details.
1400
1400
1401 Example ``~/.hgrc``::
1401 Example ``~/.hgrc``::
1402
1402
1403 [merge-tools]
1403 [merge-tools]
1404 # Override stock tool location
1404 # Override stock tool location
1405 kdiff3.executable = ~/bin/kdiff3
1405 kdiff3.executable = ~/bin/kdiff3
1406 # Specify command line
1406 # Specify command line
1407 kdiff3.args = $base $local $other -o $output
1407 kdiff3.args = $base $local $other -o $output
1408 # Give higher priority
1408 # Give higher priority
1409 kdiff3.priority = 1
1409 kdiff3.priority = 1
1410
1410
1411 # Changing the priority of preconfigured tool
1411 # Changing the priority of preconfigured tool
1412 meld.priority = 0
1412 meld.priority = 0
1413
1413
1414 # Disable a preconfigured tool
1414 # Disable a preconfigured tool
1415 vimdiff.disabled = yes
1415 vimdiff.disabled = yes
1416
1416
1417 # Define new tool
1417 # Define new tool
1418 myHtmlTool.args = -m $local $other $base $output
1418 myHtmlTool.args = -m $local $other $base $output
1419 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1419 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1420 myHtmlTool.priority = 1
1420 myHtmlTool.priority = 1
1421
1421
1422 Supported arguments:
1422 Supported arguments:
1423
1423
1424 ``priority``
1424 ``priority``
1425 The priority in which to evaluate this tool.
1425 The priority in which to evaluate this tool.
1426 (default: 0)
1426 (default: 0)
1427
1427
1428 ``executable``
1428 ``executable``
1429 Either just the name of the executable or its pathname.
1429 Either just the name of the executable or its pathname.
1430
1430
1431 .. container:: windows
1431 .. container:: windows
1432
1432
1433 On Windows, the path can use environment variables with ${ProgramFiles}
1433 On Windows, the path can use environment variables with ${ProgramFiles}
1434 syntax.
1434 syntax.
1435
1435
1436 (default: the tool name)
1436 (default: the tool name)
1437
1437
1438 ``args``
1438 ``args``
1439 The arguments to pass to the tool executable. You can refer to the
1439 The arguments to pass to the tool executable. You can refer to the
1440 files being merged as well as the output file through these
1440 files being merged as well as the output file through these
1441 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1441 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1442
1442
1443 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1443 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1444 being performed. During an update or merge, ``$local`` represents the original
1444 being performed. During an update or merge, ``$local`` represents the original
1445 state of the file, while ``$other`` represents the commit you are updating to or
1445 state of the file, while ``$other`` represents the commit you are updating to or
1446 the commit you are merging with. During a rebase, ``$local`` represents the
1446 the commit you are merging with. During a rebase, ``$local`` represents the
1447 destination of the rebase, and ``$other`` represents the commit being rebased.
1447 destination of the rebase, and ``$other`` represents the commit being rebased.
1448
1448
1449 Some operations define custom labels to assist with identifying the revisions,
1449 Some operations define custom labels to assist with identifying the revisions,
1450 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1450 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1451 labels are not available, these will be ``local``, ``other``, and ``base``,
1451 labels are not available, these will be ``local``, ``other``, and ``base``,
1452 respectively.
1452 respectively.
1453 (default: ``$local $base $other``)
1453 (default: ``$local $base $other``)
1454
1454
1455 ``premerge``
1455 ``premerge``
1456 Attempt to run internal non-interactive 3-way merge tool before
1456 Attempt to run internal non-interactive 3-way merge tool before
1457 launching external tool. Options are ``true``, ``false``, ``keep`` or
1457 launching external tool. Options are ``true``, ``false``, ``keep`` or
1458 ``keep-merge3``. The ``keep`` option will leave markers in the file if the
1458 ``keep-merge3``. The ``keep`` option will leave markers in the file if the
1459 premerge fails. The ``keep-merge3`` will do the same but include information
1459 premerge fails. The ``keep-merge3`` will do the same but include information
1460 about the base of the merge in the marker (see internal :merge3 in
1460 about the base of the merge in the marker (see internal :merge3 in
1461 :hg:`help merge-tools`).
1461 :hg:`help merge-tools`).
1462 (default: True)
1462 (default: True)
1463
1463
1464 ``binary``
1464 ``binary``
1465 This tool can merge binary files. (default: False, unless tool
1465 This tool can merge binary files. (default: False, unless tool
1466 was selected by file pattern match)
1466 was selected by file pattern match)
1467
1467
1468 ``symlink``
1468 ``symlink``
1469 This tool can merge symlinks. (default: False)
1469 This tool can merge symlinks. (default: False)
1470
1470
1471 ``check``
1471 ``check``
1472 A list of merge success-checking options:
1472 A list of merge success-checking options:
1473
1473
1474 ``changed``
1474 ``changed``
1475 Ask whether merge was successful when the merged file shows no changes.
1475 Ask whether merge was successful when the merged file shows no changes.
1476 ``conflicts``
1476 ``conflicts``
1477 Check whether there are conflicts even though the tool reported success.
1477 Check whether there are conflicts even though the tool reported success.
1478 ``prompt``
1478 ``prompt``
1479 Always prompt for merge success, regardless of success reported by tool.
1479 Always prompt for merge success, regardless of success reported by tool.
1480
1480
1481 ``fixeol``
1481 ``fixeol``
1482 Attempt to fix up EOL changes caused by the merge tool.
1482 Attempt to fix up EOL changes caused by the merge tool.
1483 (default: False)
1483 (default: False)
1484
1484
1485 ``gui``
1485 ``gui``
1486 This tool requires a graphical interface to run. (default: False)
1486 This tool requires a graphical interface to run. (default: False)
1487
1487
1488 ``mergemarkers``
1488 ``mergemarkers``
1489 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1489 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1490 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1490 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1491 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1491 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1492 markers generated during premerge will be ``detailed`` if either this option or
1492 markers generated during premerge will be ``detailed`` if either this option or
1493 the corresponding option in the ``[ui]`` section is ``detailed``.
1493 the corresponding option in the ``[ui]`` section is ``detailed``.
1494 (default: ``basic``)
1494 (default: ``basic``)
1495
1495
1496 ``mergemarkertemplate``
1496 ``mergemarkertemplate``
1497 This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
1497 This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
1498 section on a per-tool basis; this applies to the ``$label``-prefixed variables
1498 section on a per-tool basis; this applies to the ``$label``-prefixed variables
1499 and to the conflict markers that are generated if ``premerge`` is ``keep` or
1499 and to the conflict markers that are generated if ``premerge`` is ``keep` or
1500 ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
1500 ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
1501 information.
1501 information.
1502
1502
1503 .. container:: windows
1503 .. container:: windows
1504
1504
1505 ``regkey``
1505 ``regkey``
1506 Windows registry key which describes install location of this
1506 Windows registry key which describes install location of this
1507 tool. Mercurial will search for this key first under
1507 tool. Mercurial will search for this key first under
1508 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1508 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1509 (default: None)
1509 (default: None)
1510
1510
1511 ``regkeyalt``
1511 ``regkeyalt``
1512 An alternate Windows registry key to try if the first key is not
1512 An alternate Windows registry key to try if the first key is not
1513 found. The alternate key uses the same ``regname`` and ``regappend``
1513 found. The alternate key uses the same ``regname`` and ``regappend``
1514 semantics of the primary key. The most common use for this key
1514 semantics of the primary key. The most common use for this key
1515 is to search for 32bit applications on 64bit operating systems.
1515 is to search for 32bit applications on 64bit operating systems.
1516 (default: None)
1516 (default: None)
1517
1517
1518 ``regname``
1518 ``regname``
1519 Name of value to read from specified registry key.
1519 Name of value to read from specified registry key.
1520 (default: the unnamed (default) value)
1520 (default: the unnamed (default) value)
1521
1521
1522 ``regappend``
1522 ``regappend``
1523 String to append to the value read from the registry, typically
1523 String to append to the value read from the registry, typically
1524 the executable name of the tool.
1524 the executable name of the tool.
1525 (default: None)
1525 (default: None)
1526
1526
1527 ``pager``
1527 ``pager``
1528 ---------
1528 ---------
1529
1529
1530 Setting used to control when to paginate and with what external tool. See
1530 Setting used to control when to paginate and with what external tool. See
1531 :hg:`help pager` for details.
1531 :hg:`help pager` for details.
1532
1532
1533 ``pager``
1533 ``pager``
1534 Define the external tool used as pager.
1534 Define the external tool used as pager.
1535
1535
1536 If no pager is set, Mercurial uses the environment variable $PAGER.
1536 If no pager is set, Mercurial uses the environment variable $PAGER.
1537 If neither pager.pager, nor $PAGER is set, a default pager will be
1537 If neither pager.pager, nor $PAGER is set, a default pager will be
1538 used, typically `less` on Unix and `more` on Windows. Example::
1538 used, typically `less` on Unix and `more` on Windows. Example::
1539
1539
1540 [pager]
1540 [pager]
1541 pager = less -FRX
1541 pager = less -FRX
1542
1542
1543 ``ignore``
1543 ``ignore``
1544 List of commands to disable the pager for. Example::
1544 List of commands to disable the pager for. Example::
1545
1545
1546 [pager]
1546 [pager]
1547 ignore = version, help, update
1547 ignore = version, help, update
1548
1548
1549 ``patch``
1549 ``patch``
1550 ---------
1550 ---------
1551
1551
1552 Settings used when applying patches, for instance through the 'import'
1552 Settings used when applying patches, for instance through the 'import'
1553 command or with Mercurial Queues extension.
1553 command or with Mercurial Queues extension.
1554
1554
1555 ``eol``
1555 ``eol``
1556 When set to 'strict' patch content and patched files end of lines
1556 When set to 'strict' patch content and patched files end of lines
1557 are preserved. When set to ``lf`` or ``crlf``, both files end of
1557 are preserved. When set to ``lf`` or ``crlf``, both files end of
1558 lines are ignored when patching and the result line endings are
1558 lines are ignored when patching and the result line endings are
1559 normalized to either LF (Unix) or CRLF (Windows). When set to
1559 normalized to either LF (Unix) or CRLF (Windows). When set to
1560 ``auto``, end of lines are again ignored while patching but line
1560 ``auto``, end of lines are again ignored while patching but line
1561 endings in patched files are normalized to their original setting
1561 endings in patched files are normalized to their original setting
1562 on a per-file basis. If target file does not exist or has no end
1562 on a per-file basis. If target file does not exist or has no end
1563 of line, patch line endings are preserved.
1563 of line, patch line endings are preserved.
1564 (default: strict)
1564 (default: strict)
1565
1565
1566 ``fuzz``
1566 ``fuzz``
1567 The number of lines of 'fuzz' to allow when applying patches. This
1567 The number of lines of 'fuzz' to allow when applying patches. This
1568 controls how much context the patcher is allowed to ignore when
1568 controls how much context the patcher is allowed to ignore when
1569 trying to apply a patch.
1569 trying to apply a patch.
1570 (default: 2)
1570 (default: 2)
1571
1571
1572 ``paths``
1572 ``paths``
1573 ---------
1573 ---------
1574
1574
1575 Assigns symbolic names and behavior to repositories.
1575 Assigns symbolic names and behavior to repositories.
1576
1576
1577 Options are symbolic names defining the URL or directory that is the
1577 Options are symbolic names defining the URL or directory that is the
1578 location of the repository. Example::
1578 location of the repository. Example::
1579
1579
1580 [paths]
1580 [paths]
1581 my_server = https://example.com/my_repo
1581 my_server = https://example.com/my_repo
1582 local_path = /home/me/repo
1582 local_path = /home/me/repo
1583
1583
1584 These symbolic names can be used from the command line. To pull
1584 These symbolic names can be used from the command line. To pull
1585 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1585 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1586 :hg:`push local_path`.
1586 :hg:`push local_path`.
1587
1587
1588 Options containing colons (``:``) denote sub-options that can influence
1588 Options containing colons (``:``) denote sub-options that can influence
1589 behavior for that specific path. Example::
1589 behavior for that specific path. Example::
1590
1590
1591 [paths]
1591 [paths]
1592 my_server = https://example.com/my_path
1592 my_server = https://example.com/my_path
1593 my_server:pushurl = ssh://example.com/my_path
1593 my_server:pushurl = ssh://example.com/my_path
1594
1594
1595 The following sub-options can be defined:
1595 The following sub-options can be defined:
1596
1596
1597 ``pushurl``
1597 ``pushurl``
1598 The URL to use for push operations. If not defined, the location
1598 The URL to use for push operations. If not defined, the location
1599 defined by the path's main entry is used.
1599 defined by the path's main entry is used.
1600
1600
1601 ``pushrev``
1601 ``pushrev``
1602 A revset defining which revisions to push by default.
1602 A revset defining which revisions to push by default.
1603
1603
1604 When :hg:`push` is executed without a ``-r`` argument, the revset
1604 When :hg:`push` is executed without a ``-r`` argument, the revset
1605 defined by this sub-option is evaluated to determine what to push.
1605 defined by this sub-option is evaluated to determine what to push.
1606
1606
1607 For example, a value of ``.`` will push the working directory's
1607 For example, a value of ``.`` will push the working directory's
1608 revision by default.
1608 revision by default.
1609
1609
1610 Revsets specifying bookmarks will not result in the bookmark being
1610 Revsets specifying bookmarks will not result in the bookmark being
1611 pushed.
1611 pushed.
1612
1612
1613 The following special named paths exist:
1613 The following special named paths exist:
1614
1614
1615 ``default``
1615 ``default``
1616 The URL or directory to use when no source or remote is specified.
1616 The URL or directory to use when no source or remote is specified.
1617
1617
1618 :hg:`clone` will automatically define this path to the location the
1618 :hg:`clone` will automatically define this path to the location the
1619 repository was cloned from.
1619 repository was cloned from.
1620
1620
1621 ``default-push``
1621 ``default-push``
1622 (deprecated) The URL or directory for the default :hg:`push` location.
1622 (deprecated) The URL or directory for the default :hg:`push` location.
1623 ``default:pushurl`` should be used instead.
1623 ``default:pushurl`` should be used instead.
1624
1624
1625 ``phases``
1625 ``phases``
1626 ----------
1626 ----------
1627
1627
1628 Specifies default handling of phases. See :hg:`help phases` for more
1628 Specifies default handling of phases. See :hg:`help phases` for more
1629 information about working with phases.
1629 information about working with phases.
1630
1630
1631 ``publish``
1631 ``publish``
1632 Controls draft phase behavior when working as a server. When true,
1632 Controls draft phase behavior when working as a server. When true,
1633 pushed changesets are set to public in both client and server and
1633 pushed changesets are set to public in both client and server and
1634 pulled or cloned changesets are set to public in the client.
1634 pulled or cloned changesets are set to public in the client.
1635 (default: True)
1635 (default: True)
1636
1636
1637 ``new-commit``
1637 ``new-commit``
1638 Phase of newly-created commits.
1638 Phase of newly-created commits.
1639 (default: draft)
1639 (default: draft)
1640
1640
1641 ``checksubrepos``
1641 ``checksubrepos``
1642 Check the phase of the current revision of each subrepository. Allowed
1642 Check the phase of the current revision of each subrepository. Allowed
1643 values are "ignore", "follow" and "abort". For settings other than
1643 values are "ignore", "follow" and "abort". For settings other than
1644 "ignore", the phase of the current revision of each subrepository is
1644 "ignore", the phase of the current revision of each subrepository is
1645 checked before committing the parent repository. If any of those phases is
1645 checked before committing the parent repository. If any of those phases is
1646 greater than the phase of the parent repository (e.g. if a subrepo is in a
1646 greater than the phase of the parent repository (e.g. if a subrepo is in a
1647 "secret" phase while the parent repo is in "draft" phase), the commit is
1647 "secret" phase while the parent repo is in "draft" phase), the commit is
1648 either aborted (if checksubrepos is set to "abort") or the higher phase is
1648 either aborted (if checksubrepos is set to "abort") or the higher phase is
1649 used for the parent repository commit (if set to "follow").
1649 used for the parent repository commit (if set to "follow").
1650 (default: follow)
1650 (default: follow)
1651
1651
1652
1652
1653 ``profiling``
1653 ``profiling``
1654 -------------
1654 -------------
1655
1655
1656 Specifies profiling type, format, and file output. Two profilers are
1656 Specifies profiling type, format, and file output. Two profilers are
1657 supported: an instrumenting profiler (named ``ls``), and a sampling
1657 supported: an instrumenting profiler (named ``ls``), and a sampling
1658 profiler (named ``stat``).
1658 profiler (named ``stat``).
1659
1659
1660 In this section description, 'profiling data' stands for the raw data
1660 In this section description, 'profiling data' stands for the raw data
1661 collected during profiling, while 'profiling report' stands for a
1661 collected during profiling, while 'profiling report' stands for a
1662 statistical text report generated from the profiling data.
1662 statistical text report generated from the profiling data.
1663
1663
1664 ``enabled``
1664 ``enabled``
1665 Enable the profiler.
1665 Enable the profiler.
1666 (default: false)
1666 (default: false)
1667
1667
1668 This is equivalent to passing ``--profile`` on the command line.
1668 This is equivalent to passing ``--profile`` on the command line.
1669
1669
1670 ``type``
1670 ``type``
1671 The type of profiler to use.
1671 The type of profiler to use.
1672 (default: stat)
1672 (default: stat)
1673
1673
1674 ``ls``
1674 ``ls``
1675 Use Python's built-in instrumenting profiler. This profiler
1675 Use Python's built-in instrumenting profiler. This profiler
1676 works on all platforms, but each line number it reports is the
1676 works on all platforms, but each line number it reports is the
1677 first line of a function. This restriction makes it difficult to
1677 first line of a function. This restriction makes it difficult to
1678 identify the expensive parts of a non-trivial function.
1678 identify the expensive parts of a non-trivial function.
1679 ``stat``
1679 ``stat``
1680 Use a statistical profiler, statprof. This profiler is most
1680 Use a statistical profiler, statprof. This profiler is most
1681 useful for profiling commands that run for longer than about 0.1
1681 useful for profiling commands that run for longer than about 0.1
1682 seconds.
1682 seconds.
1683
1683
1684 ``format``
1684 ``format``
1685 Profiling format. Specific to the ``ls`` instrumenting profiler.
1685 Profiling format. Specific to the ``ls`` instrumenting profiler.
1686 (default: text)
1686 (default: text)
1687
1687
1688 ``text``
1688 ``text``
1689 Generate a profiling report. When saving to a file, it should be
1689 Generate a profiling report. When saving to a file, it should be
1690 noted that only the report is saved, and the profiling data is
1690 noted that only the report is saved, and the profiling data is
1691 not kept.
1691 not kept.
1692 ``kcachegrind``
1692 ``kcachegrind``
1693 Format profiling data for kcachegrind use: when saving to a
1693 Format profiling data for kcachegrind use: when saving to a
1694 file, the generated file can directly be loaded into
1694 file, the generated file can directly be loaded into
1695 kcachegrind.
1695 kcachegrind.
1696
1696
1697 ``statformat``
1697 ``statformat``
1698 Profiling format for the ``stat`` profiler.
1698 Profiling format for the ``stat`` profiler.
1699 (default: hotpath)
1699 (default: hotpath)
1700
1700
1701 ``hotpath``
1701 ``hotpath``
1702 Show a tree-based display containing the hot path of execution (where
1702 Show a tree-based display containing the hot path of execution (where
1703 most time was spent).
1703 most time was spent).
1704 ``bymethod``
1704 ``bymethod``
1705 Show a table of methods ordered by how frequently they are active.
1705 Show a table of methods ordered by how frequently they are active.
1706 ``byline``
1706 ``byline``
1707 Show a table of lines in files ordered by how frequently they are active.
1707 Show a table of lines in files ordered by how frequently they are active.
1708 ``json``
1708 ``json``
1709 Render profiling data as JSON.
1709 Render profiling data as JSON.
1710
1710
1711 ``frequency``
1711 ``frequency``
1712 Sampling frequency. Specific to the ``stat`` sampling profiler.
1712 Sampling frequency. Specific to the ``stat`` sampling profiler.
1713 (default: 1000)
1713 (default: 1000)
1714
1714
1715 ``output``
1715 ``output``
1716 File path where profiling data or report should be saved. If the
1716 File path where profiling data or report should be saved. If the
1717 file exists, it is replaced. (default: None, data is printed on
1717 file exists, it is replaced. (default: None, data is printed on
1718 stderr)
1718 stderr)
1719
1719
1720 ``sort``
1720 ``sort``
1721 Sort field. Specific to the ``ls`` instrumenting profiler.
1721 Sort field. Specific to the ``ls`` instrumenting profiler.
1722 One of ``callcount``, ``reccallcount``, ``totaltime`` and
1722 One of ``callcount``, ``reccallcount``, ``totaltime`` and
1723 ``inlinetime``.
1723 ``inlinetime``.
1724 (default: inlinetime)
1724 (default: inlinetime)
1725
1725
1726 ``time-track``
1726 ``time-track``
1727 Control if the stat profiler track ``cpu`` or ``real`` time.
1727 Control if the stat profiler track ``cpu`` or ``real`` time.
1728 (default: ``cpu`` on Windows, otherwise ``real``)
1728 (default: ``cpu`` on Windows, otherwise ``real``)
1729
1729
1730 ``limit``
1730 ``limit``
1731 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
1731 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
1732 (default: 30)
1732 (default: 30)
1733
1733
1734 ``nested``
1734 ``nested``
1735 Show at most this number of lines of drill-down info after each main entry.
1735 Show at most this number of lines of drill-down info after each main entry.
1736 This can help explain the difference between Total and Inline.
1736 This can help explain the difference between Total and Inline.
1737 Specific to the ``ls`` instrumenting profiler.
1737 Specific to the ``ls`` instrumenting profiler.
1738 (default: 0)
1738 (default: 0)
1739
1739
1740 ``showmin``
1740 ``showmin``
1741 Minimum fraction of samples an entry must have for it to be displayed.
1741 Minimum fraction of samples an entry must have for it to be displayed.
1742 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
1742 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
1743 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
1743 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
1744
1744
1745 Only used by the ``stat`` profiler.
1745 Only used by the ``stat`` profiler.
1746
1746
1747 For the ``hotpath`` format, default is ``0.05``.
1747 For the ``hotpath`` format, default is ``0.05``.
1748 For the ``chrome`` format, default is ``0.005``.
1748 For the ``chrome`` format, default is ``0.005``.
1749
1749
1750 The option is unused on other formats.
1750 The option is unused on other formats.
1751
1751
1752 ``showmax``
1752 ``showmax``
1753 Maximum fraction of samples an entry can have before it is ignored in
1753 Maximum fraction of samples an entry can have before it is ignored in
1754 display. Values format is the same as ``showmin``.
1754 display. Values format is the same as ``showmin``.
1755
1755
1756 Only used by the ``stat`` profiler.
1756 Only used by the ``stat`` profiler.
1757
1757
1758 For the ``chrome`` format, default is ``0.999``.
1758 For the ``chrome`` format, default is ``0.999``.
1759
1759
1760 The option is unused on other formats.
1760 The option is unused on other formats.
1761
1761
1762 ``progress``
1762 ``progress``
1763 ------------
1763 ------------
1764
1764
1765 Mercurial commands can draw progress bars that are as informative as
1765 Mercurial commands can draw progress bars that are as informative as
1766 possible. Some progress bars only offer indeterminate information, while others
1766 possible. Some progress bars only offer indeterminate information, while others
1767 have a definite end point.
1767 have a definite end point.
1768
1768
1769 ``debug``
1769 ``debug``
1770 Whether to print debug info when updating the progress bar. (default: False)
1770 Whether to print debug info when updating the progress bar. (default: False)
1771
1771
1772 ``delay``
1772 ``delay``
1773 Number of seconds (float) before showing the progress bar. (default: 3)
1773 Number of seconds (float) before showing the progress bar. (default: 3)
1774
1774
1775 ``changedelay``
1775 ``changedelay``
1776 Minimum delay before showing a new topic. When set to less than 3 * refresh,
1776 Minimum delay before showing a new topic. When set to less than 3 * refresh,
1777 that value will be used instead. (default: 1)
1777 that value will be used instead. (default: 1)
1778
1778
1779 ``estimateinterval``
1779 ``estimateinterval``
1780 Maximum sampling interval in seconds for speed and estimated time
1780 Maximum sampling interval in seconds for speed and estimated time
1781 calculation. (default: 60)
1781 calculation. (default: 60)
1782
1782
1783 ``refresh``
1783 ``refresh``
1784 Time in seconds between refreshes of the progress bar. (default: 0.1)
1784 Time in seconds between refreshes of the progress bar. (default: 0.1)
1785
1785
1786 ``format``
1786 ``format``
1787 Format of the progress bar.
1787 Format of the progress bar.
1788
1788
1789 Valid entries for the format field are ``topic``, ``bar``, ``number``,
1789 Valid entries for the format field are ``topic``, ``bar``, ``number``,
1790 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
1790 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
1791 last 20 characters of the item, but this can be changed by adding either
1791 last 20 characters of the item, but this can be changed by adding either
1792 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
1792 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
1793 first num characters.
1793 first num characters.
1794
1794
1795 (default: topic bar number estimate)
1795 (default: topic bar number estimate)
1796
1796
1797 ``width``
1797 ``width``
1798 If set, the maximum width of the progress information (that is, min(width,
1798 If set, the maximum width of the progress information (that is, min(width,
1799 term width) will be used).
1799 term width) will be used).
1800
1800
1801 ``clear-complete``
1801 ``clear-complete``
1802 Clear the progress bar after it's done. (default: True)
1802 Clear the progress bar after it's done. (default: True)
1803
1803
1804 ``disable``
1804 ``disable``
1805 If true, don't show a progress bar.
1805 If true, don't show a progress bar.
1806
1806
1807 ``assume-tty``
1807 ``assume-tty``
1808 If true, ALWAYS show a progress bar, unless disable is given.
1808 If true, ALWAYS show a progress bar, unless disable is given.
1809
1809
1810 ``rebase``
1810 ``rebase``
1811 ----------
1811 ----------
1812
1812
1813 ``evolution.allowdivergence``
1813 ``evolution.allowdivergence``
1814 Default to False, when True allow creating divergence when performing
1814 Default to False, when True allow creating divergence when performing
1815 rebase of obsolete changesets.
1815 rebase of obsolete changesets.
1816
1816
1817 ``revsetalias``
1817 ``revsetalias``
1818 ---------------
1818 ---------------
1819
1819
1820 Alias definitions for revsets. See :hg:`help revsets` for details.
1820 Alias definitions for revsets. See :hg:`help revsets` for details.
1821
1821
1822 ``rewrite``
1822 ``rewrite``
1823 -----------
1823 -----------
1824
1824
1825 ``backup-bundle``
1825 ``backup-bundle``
1826 Whether to save stripped changesets to a bundle file. (default: True)
1826 Whether to save stripped changesets to a bundle file. (default: True)
1827
1827
1828 ``update-timestamp``
1828 ``update-timestamp``
1829 If true, updates the date and time of the changeset to current. It is only
1829 If true, updates the date and time of the changeset to current. It is only
1830 applicable for hg amend in current version.
1830 applicable for hg amend in current version.
1831
1831
1832 ``storage``
1832 ``storage``
1833 -----------
1833 -----------
1834
1834
1835 Control the strategy Mercurial uses internally to store history. Options in this
1835 Control the strategy Mercurial uses internally to store history. Options in this
1836 category impact performance and repository size.
1836 category impact performance and repository size.
1837
1837
1838 ``revlog.optimize-delta-parent-choice``
1838 ``revlog.optimize-delta-parent-choice``
1839 When storing a merge revision, both parents will be equally considered as
1839 When storing a merge revision, both parents will be equally considered as
1840 a possible delta base. This results in better delta selection and improved
1840 a possible delta base. This results in better delta selection and improved
1841 revlog compression. This option is enabled by default.
1841 revlog compression. This option is enabled by default.
1842
1842
1843 Turning this option off can result in large increase of repository size for
1843 Turning this option off can result in large increase of repository size for
1844 repository with many merges.
1844 repository with many merges.
1845
1845
1846 ``revlog.reuse-external-delta-parent``
1846 ``revlog.reuse-external-delta-parent``
1847 Control the order in which delta parents are considered when adding new
1847 Control the order in which delta parents are considered when adding new
1848 revisions from an external source.
1848 revisions from an external source.
1849 (typically: apply bundle from `hg pull` or `hg push`).
1849 (typically: apply bundle from `hg pull` or `hg push`).
1850
1850
1851 New revisions are usually provided as a delta against other revisions. By
1851 New revisions are usually provided as a delta against other revisions. By
1852 default, Mercurial will try to reuse this delta first, therefore using the
1852 default, Mercurial will try to reuse this delta first, therefore using the
1853 same "delta parent" as the source. Directly using delta's from the source
1853 same "delta parent" as the source. Directly using delta's from the source
1854 reduces CPU usage and usually speeds up operation. However, in some case,
1854 reduces CPU usage and usually speeds up operation. However, in some case,
1855 the source might have sub-optimal delta bases and forcing their reevaluation
1855 the source might have sub-optimal delta bases and forcing their reevaluation
1856 is useful. For example, pushes from an old client could have sub-optimal
1856 is useful. For example, pushes from an old client could have sub-optimal
1857 delta's parent that the server want to optimize. (lack of general delta, bad
1857 delta's parent that the server want to optimize. (lack of general delta, bad
1858 parents, choice, lack of sparse-revlog, etc).
1858 parents, choice, lack of sparse-revlog, etc).
1859
1859
1860 This option is enabled by default. Turning it off will ensure bad delta
1860 This option is enabled by default. Turning it off will ensure bad delta
1861 parent choices from older client do not propagate to this repository, at
1861 parent choices from older client do not propagate to this repository, at
1862 the cost of a small increase in CPU consumption.
1862 the cost of a small increase in CPU consumption.
1863
1863
1864 Note: this option only control the order in which delta parents are
1864 Note: this option only control the order in which delta parents are
1865 considered. Even when disabled, the existing delta from the source will be
1865 considered. Even when disabled, the existing delta from the source will be
1866 reused if the same delta parent is selected.
1866 reused if the same delta parent is selected.
1867
1867
1868 ``revlog.reuse-external-delta``
1869 Control the reuse of delta from external source.
1870 (typically: apply bundle from `hg pull` or `hg push`).
1871
1872 New revisions are usually provided as a delta against another revision. By
1873 default, Mercurial will not recompute the same delta again, trusting
1874 externally provided deltas. There have been rare cases of small adjustment
1875 to the diffing algorithm in the past. So in some rare case, recomputing
1876 delta provided by ancient clients can provides better results. Disabling
1877 this option means going through a full delta recomputation for all incoming
1878 revisions. It means a large increase in CPU usage and will slow operations
1879 down.
1880
1881 This option is enabled by default. When disabled, it also disables the
1882 related ``storage.revlog.reuse-external-delta-parent`` option.
1883
1868 ``server``
1884 ``server``
1869 ----------
1885 ----------
1870
1886
1871 Controls generic server settings.
1887 Controls generic server settings.
1872
1888
1873 ``bookmarks-pushkey-compat``
1889 ``bookmarks-pushkey-compat``
1874 Trigger pushkey hook when being pushed bookmark updates. This config exist
1890 Trigger pushkey hook when being pushed bookmark updates. This config exist
1875 for compatibility purpose (default to True)
1891 for compatibility purpose (default to True)
1876
1892
1877 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
1893 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
1878 movement we recommend you migrate them to ``txnclose-bookmark`` and
1894 movement we recommend you migrate them to ``txnclose-bookmark`` and
1879 ``pretxnclose-bookmark``.
1895 ``pretxnclose-bookmark``.
1880
1896
1881 ``compressionengines``
1897 ``compressionengines``
1882 List of compression engines and their relative priority to advertise
1898 List of compression engines and their relative priority to advertise
1883 to clients.
1899 to clients.
1884
1900
1885 The order of compression engines determines their priority, the first
1901 The order of compression engines determines their priority, the first
1886 having the highest priority. If a compression engine is not listed
1902 having the highest priority. If a compression engine is not listed
1887 here, it won't be advertised to clients.
1903 here, it won't be advertised to clients.
1888
1904
1889 If not set (the default), built-in defaults are used. Run
1905 If not set (the default), built-in defaults are used. Run
1890 :hg:`debuginstall` to list available compression engines and their
1906 :hg:`debuginstall` to list available compression engines and their
1891 default wire protocol priority.
1907 default wire protocol priority.
1892
1908
1893 Older Mercurial clients only support zlib compression and this setting
1909 Older Mercurial clients only support zlib compression and this setting
1894 has no effect for legacy clients.
1910 has no effect for legacy clients.
1895
1911
1896 ``uncompressed``
1912 ``uncompressed``
1897 Whether to allow clients to clone a repository using the
1913 Whether to allow clients to clone a repository using the
1898 uncompressed streaming protocol. This transfers about 40% more
1914 uncompressed streaming protocol. This transfers about 40% more
1899 data than a regular clone, but uses less memory and CPU on both
1915 data than a regular clone, but uses less memory and CPU on both
1900 server and client. Over a LAN (100 Mbps or better) or a very fast
1916 server and client. Over a LAN (100 Mbps or better) or a very fast
1901 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
1917 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
1902 regular clone. Over most WAN connections (anything slower than
1918 regular clone. Over most WAN connections (anything slower than
1903 about 6 Mbps), uncompressed streaming is slower, because of the
1919 about 6 Mbps), uncompressed streaming is slower, because of the
1904 extra data transfer overhead. This mode will also temporarily hold
1920 extra data transfer overhead. This mode will also temporarily hold
1905 the write lock while determining what data to transfer.
1921 the write lock while determining what data to transfer.
1906 (default: True)
1922 (default: True)
1907
1923
1908 ``uncompressedallowsecret``
1924 ``uncompressedallowsecret``
1909 Whether to allow stream clones when the repository contains secret
1925 Whether to allow stream clones when the repository contains secret
1910 changesets. (default: False)
1926 changesets. (default: False)
1911
1927
1912 ``preferuncompressed``
1928 ``preferuncompressed``
1913 When set, clients will try to use the uncompressed streaming
1929 When set, clients will try to use the uncompressed streaming
1914 protocol. (default: False)
1930 protocol. (default: False)
1915
1931
1916 ``disablefullbundle``
1932 ``disablefullbundle``
1917 When set, servers will refuse attempts to do pull-based clones.
1933 When set, servers will refuse attempts to do pull-based clones.
1918 If this option is set, ``preferuncompressed`` and/or clone bundles
1934 If this option is set, ``preferuncompressed`` and/or clone bundles
1919 are highly recommended. Partial clones will still be allowed.
1935 are highly recommended. Partial clones will still be allowed.
1920 (default: False)
1936 (default: False)
1921
1937
1922 ``streamunbundle``
1938 ``streamunbundle``
1923 When set, servers will apply data sent from the client directly,
1939 When set, servers will apply data sent from the client directly,
1924 otherwise it will be written to a temporary file first. This option
1940 otherwise it will be written to a temporary file first. This option
1925 effectively prevents concurrent pushes.
1941 effectively prevents concurrent pushes.
1926
1942
1927 ``pullbundle``
1943 ``pullbundle``
1928 When set, the server will check pullbundle.manifest for bundles
1944 When set, the server will check pullbundle.manifest for bundles
1929 covering the requested heads and common nodes. The first matching
1945 covering the requested heads and common nodes. The first matching
1930 entry will be streamed to the client.
1946 entry will be streamed to the client.
1931
1947
1932 For HTTP transport, the stream will still use zlib compression
1948 For HTTP transport, the stream will still use zlib compression
1933 for older clients.
1949 for older clients.
1934
1950
1935 ``concurrent-push-mode``
1951 ``concurrent-push-mode``
1936 Level of allowed race condition between two pushing clients.
1952 Level of allowed race condition between two pushing clients.
1937
1953
1938 - 'strict': push is abort if another client touched the repository
1954 - 'strict': push is abort if another client touched the repository
1939 while the push was preparing. (default)
1955 while the push was preparing. (default)
1940 - 'check-related': push is only aborted if it affects head that got also
1956 - 'check-related': push is only aborted if it affects head that got also
1941 affected while the push was preparing.
1957 affected while the push was preparing.
1942
1958
1943 This requires compatible client (version 4.3 and later). Old client will
1959 This requires compatible client (version 4.3 and later). Old client will
1944 use 'strict'.
1960 use 'strict'.
1945
1961
1946 ``validate``
1962 ``validate``
1947 Whether to validate the completeness of pushed changesets by
1963 Whether to validate the completeness of pushed changesets by
1948 checking that all new file revisions specified in manifests are
1964 checking that all new file revisions specified in manifests are
1949 present. (default: False)
1965 present. (default: False)
1950
1966
1951 ``maxhttpheaderlen``
1967 ``maxhttpheaderlen``
1952 Instruct HTTP clients not to send request headers longer than this
1968 Instruct HTTP clients not to send request headers longer than this
1953 many bytes. (default: 1024)
1969 many bytes. (default: 1024)
1954
1970
1955 ``bundle1``
1971 ``bundle1``
1956 Whether to allow clients to push and pull using the legacy bundle1
1972 Whether to allow clients to push and pull using the legacy bundle1
1957 exchange format. (default: True)
1973 exchange format. (default: True)
1958
1974
1959 ``bundle1gd``
1975 ``bundle1gd``
1960 Like ``bundle1`` but only used if the repository is using the
1976 Like ``bundle1`` but only used if the repository is using the
1961 *generaldelta* storage format. (default: True)
1977 *generaldelta* storage format. (default: True)
1962
1978
1963 ``bundle1.push``
1979 ``bundle1.push``
1964 Whether to allow clients to push using the legacy bundle1 exchange
1980 Whether to allow clients to push using the legacy bundle1 exchange
1965 format. (default: True)
1981 format. (default: True)
1966
1982
1967 ``bundle1gd.push``
1983 ``bundle1gd.push``
1968 Like ``bundle1.push`` but only used if the repository is using the
1984 Like ``bundle1.push`` but only used if the repository is using the
1969 *generaldelta* storage format. (default: True)
1985 *generaldelta* storage format. (default: True)
1970
1986
1971 ``bundle1.pull``
1987 ``bundle1.pull``
1972 Whether to allow clients to pull using the legacy bundle1 exchange
1988 Whether to allow clients to pull using the legacy bundle1 exchange
1973 format. (default: True)
1989 format. (default: True)
1974
1990
1975 ``bundle1gd.pull``
1991 ``bundle1gd.pull``
1976 Like ``bundle1.pull`` but only used if the repository is using the
1992 Like ``bundle1.pull`` but only used if the repository is using the
1977 *generaldelta* storage format. (default: True)
1993 *generaldelta* storage format. (default: True)
1978
1994
1979 Large repositories using the *generaldelta* storage format should
1995 Large repositories using the *generaldelta* storage format should
1980 consider setting this option because converting *generaldelta*
1996 consider setting this option because converting *generaldelta*
1981 repositories to the exchange format required by the bundle1 data
1997 repositories to the exchange format required by the bundle1 data
1982 format can consume a lot of CPU.
1998 format can consume a lot of CPU.
1983
1999
1984 ``bundle2.stream``
2000 ``bundle2.stream``
1985 Whether to allow clients to pull using the bundle2 streaming protocol.
2001 Whether to allow clients to pull using the bundle2 streaming protocol.
1986 (default: True)
2002 (default: True)
1987
2003
1988 ``zliblevel``
2004 ``zliblevel``
1989 Integer between ``-1`` and ``9`` that controls the zlib compression level
2005 Integer between ``-1`` and ``9`` that controls the zlib compression level
1990 for wire protocol commands that send zlib compressed output (notably the
2006 for wire protocol commands that send zlib compressed output (notably the
1991 commands that send repository history data).
2007 commands that send repository history data).
1992
2008
1993 The default (``-1``) uses the default zlib compression level, which is
2009 The default (``-1``) uses the default zlib compression level, which is
1994 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2010 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
1995 maximum compression.
2011 maximum compression.
1996
2012
1997 Setting this option allows server operators to make trade-offs between
2013 Setting this option allows server operators to make trade-offs between
1998 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2014 bandwidth and CPU used. Lowering the compression lowers CPU utilization
1999 but sends more bytes to clients.
2015 but sends more bytes to clients.
2000
2016
2001 This option only impacts the HTTP server.
2017 This option only impacts the HTTP server.
2002
2018
2003 ``zstdlevel``
2019 ``zstdlevel``
2004 Integer between ``1`` and ``22`` that controls the zstd compression level
2020 Integer between ``1`` and ``22`` that controls the zstd compression level
2005 for wire protocol commands. ``1`` is the minimal amount of compression and
2021 for wire protocol commands. ``1`` is the minimal amount of compression and
2006 ``22`` is the highest amount of compression.
2022 ``22`` is the highest amount of compression.
2007
2023
2008 The default (``3``) should be significantly faster than zlib while likely
2024 The default (``3``) should be significantly faster than zlib while likely
2009 delivering better compression ratios.
2025 delivering better compression ratios.
2010
2026
2011 This option only impacts the HTTP server.
2027 This option only impacts the HTTP server.
2012
2028
2013 See also ``server.zliblevel``.
2029 See also ``server.zliblevel``.
2014
2030
2015 ``smtp``
2031 ``smtp``
2016 --------
2032 --------
2017
2033
2018 Configuration for extensions that need to send email messages.
2034 Configuration for extensions that need to send email messages.
2019
2035
2020 ``host``
2036 ``host``
2021 Host name of mail server, e.g. "mail.example.com".
2037 Host name of mail server, e.g. "mail.example.com".
2022
2038
2023 ``port``
2039 ``port``
2024 Optional. Port to connect to on mail server. (default: 465 if
2040 Optional. Port to connect to on mail server. (default: 465 if
2025 ``tls`` is smtps; 25 otherwise)
2041 ``tls`` is smtps; 25 otherwise)
2026
2042
2027 ``tls``
2043 ``tls``
2028 Optional. Method to enable TLS when connecting to mail server: starttls,
2044 Optional. Method to enable TLS when connecting to mail server: starttls,
2029 smtps or none. (default: none)
2045 smtps or none. (default: none)
2030
2046
2031 ``username``
2047 ``username``
2032 Optional. User name for authenticating with the SMTP server.
2048 Optional. User name for authenticating with the SMTP server.
2033 (default: None)
2049 (default: None)
2034
2050
2035 ``password``
2051 ``password``
2036 Optional. Password for authenticating with the SMTP server. If not
2052 Optional. Password for authenticating with the SMTP server. If not
2037 specified, interactive sessions will prompt the user for a
2053 specified, interactive sessions will prompt the user for a
2038 password; non-interactive sessions will fail. (default: None)
2054 password; non-interactive sessions will fail. (default: None)
2039
2055
2040 ``local_hostname``
2056 ``local_hostname``
2041 Optional. The hostname that the sender can use to identify
2057 Optional. The hostname that the sender can use to identify
2042 itself to the MTA.
2058 itself to the MTA.
2043
2059
2044
2060
2045 ``subpaths``
2061 ``subpaths``
2046 ------------
2062 ------------
2047
2063
2048 Subrepository source URLs can go stale if a remote server changes name
2064 Subrepository source URLs can go stale if a remote server changes name
2049 or becomes temporarily unavailable. This section lets you define
2065 or becomes temporarily unavailable. This section lets you define
2050 rewrite rules of the form::
2066 rewrite rules of the form::
2051
2067
2052 <pattern> = <replacement>
2068 <pattern> = <replacement>
2053
2069
2054 where ``pattern`` is a regular expression matching a subrepository
2070 where ``pattern`` is a regular expression matching a subrepository
2055 source URL and ``replacement`` is the replacement string used to
2071 source URL and ``replacement`` is the replacement string used to
2056 rewrite it. Groups can be matched in ``pattern`` and referenced in
2072 rewrite it. Groups can be matched in ``pattern`` and referenced in
2057 ``replacements``. For instance::
2073 ``replacements``. For instance::
2058
2074
2059 http://server/(.*)-hg/ = http://hg.server/\1/
2075 http://server/(.*)-hg/ = http://hg.server/\1/
2060
2076
2061 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2077 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2062
2078
2063 Relative subrepository paths are first made absolute, and the
2079 Relative subrepository paths are first made absolute, and the
2064 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2080 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2065 doesn't match the full path, an attempt is made to apply it on the
2081 doesn't match the full path, an attempt is made to apply it on the
2066 relative path alone. The rules are applied in definition order.
2082 relative path alone. The rules are applied in definition order.
2067
2083
2068 ``subrepos``
2084 ``subrepos``
2069 ------------
2085 ------------
2070
2086
2071 This section contains options that control the behavior of the
2087 This section contains options that control the behavior of the
2072 subrepositories feature. See also :hg:`help subrepos`.
2088 subrepositories feature. See also :hg:`help subrepos`.
2073
2089
2074 Security note: auditing in Mercurial is known to be insufficient to
2090 Security note: auditing in Mercurial is known to be insufficient to
2075 prevent clone-time code execution with carefully constructed Git
2091 prevent clone-time code execution with carefully constructed Git
2076 subrepos. It is unknown if a similar detect is present in Subversion
2092 subrepos. It is unknown if a similar detect is present in Subversion
2077 subrepos. Both Git and Subversion subrepos are disabled by default
2093 subrepos. Both Git and Subversion subrepos are disabled by default
2078 out of security concerns. These subrepo types can be enabled using
2094 out of security concerns. These subrepo types can be enabled using
2079 the respective options below.
2095 the respective options below.
2080
2096
2081 ``allowed``
2097 ``allowed``
2082 Whether subrepositories are allowed in the working directory.
2098 Whether subrepositories are allowed in the working directory.
2083
2099
2084 When false, commands involving subrepositories (like :hg:`update`)
2100 When false, commands involving subrepositories (like :hg:`update`)
2085 will fail for all subrepository types.
2101 will fail for all subrepository types.
2086 (default: true)
2102 (default: true)
2087
2103
2088 ``hg:allowed``
2104 ``hg:allowed``
2089 Whether Mercurial subrepositories are allowed in the working
2105 Whether Mercurial subrepositories are allowed in the working
2090 directory. This option only has an effect if ``subrepos.allowed``
2106 directory. This option only has an effect if ``subrepos.allowed``
2091 is true.
2107 is true.
2092 (default: true)
2108 (default: true)
2093
2109
2094 ``git:allowed``
2110 ``git:allowed``
2095 Whether Git subrepositories are allowed in the working directory.
2111 Whether Git subrepositories are allowed in the working directory.
2096 This option only has an effect if ``subrepos.allowed`` is true.
2112 This option only has an effect if ``subrepos.allowed`` is true.
2097
2113
2098 See the security note above before enabling Git subrepos.
2114 See the security note above before enabling Git subrepos.
2099 (default: false)
2115 (default: false)
2100
2116
2101 ``svn:allowed``
2117 ``svn:allowed``
2102 Whether Subversion subrepositories are allowed in the working
2118 Whether Subversion subrepositories are allowed in the working
2103 directory. This option only has an effect if ``subrepos.allowed``
2119 directory. This option only has an effect if ``subrepos.allowed``
2104 is true.
2120 is true.
2105
2121
2106 See the security note above before enabling Subversion subrepos.
2122 See the security note above before enabling Subversion subrepos.
2107 (default: false)
2123 (default: false)
2108
2124
2109 ``templatealias``
2125 ``templatealias``
2110 -----------------
2126 -----------------
2111
2127
2112 Alias definitions for templates. See :hg:`help templates` for details.
2128 Alias definitions for templates. See :hg:`help templates` for details.
2113
2129
2114 ``templates``
2130 ``templates``
2115 -------------
2131 -------------
2116
2132
2117 Use the ``[templates]`` section to define template strings.
2133 Use the ``[templates]`` section to define template strings.
2118 See :hg:`help templates` for details.
2134 See :hg:`help templates` for details.
2119
2135
2120 ``trusted``
2136 ``trusted``
2121 -----------
2137 -----------
2122
2138
2123 Mercurial will not use the settings in the
2139 Mercurial will not use the settings in the
2124 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2140 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2125 user or to a trusted group, as various hgrc features allow arbitrary
2141 user or to a trusted group, as various hgrc features allow arbitrary
2126 commands to be run. This issue is often encountered when configuring
2142 commands to be run. This issue is often encountered when configuring
2127 hooks or extensions for shared repositories or servers. However,
2143 hooks or extensions for shared repositories or servers. However,
2128 the web interface will use some safe settings from the ``[web]``
2144 the web interface will use some safe settings from the ``[web]``
2129 section.
2145 section.
2130
2146
2131 This section specifies what users and groups are trusted. The
2147 This section specifies what users and groups are trusted. The
2132 current user is always trusted. To trust everybody, list a user or a
2148 current user is always trusted. To trust everybody, list a user or a
2133 group with name ``*``. These settings must be placed in an
2149 group with name ``*``. These settings must be placed in an
2134 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2150 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2135 user or service running Mercurial.
2151 user or service running Mercurial.
2136
2152
2137 ``users``
2153 ``users``
2138 Comma-separated list of trusted users.
2154 Comma-separated list of trusted users.
2139
2155
2140 ``groups``
2156 ``groups``
2141 Comma-separated list of trusted groups.
2157 Comma-separated list of trusted groups.
2142
2158
2143
2159
2144 ``ui``
2160 ``ui``
2145 ------
2161 ------
2146
2162
2147 User interface controls.
2163 User interface controls.
2148
2164
2149 ``archivemeta``
2165 ``archivemeta``
2150 Whether to include the .hg_archival.txt file containing meta data
2166 Whether to include the .hg_archival.txt file containing meta data
2151 (hashes for the repository base and for tip) in archives created
2167 (hashes for the repository base and for tip) in archives created
2152 by the :hg:`archive` command or downloaded via hgweb.
2168 by the :hg:`archive` command or downloaded via hgweb.
2153 (default: True)
2169 (default: True)
2154
2170
2155 ``askusername``
2171 ``askusername``
2156 Whether to prompt for a username when committing. If True, and
2172 Whether to prompt for a username when committing. If True, and
2157 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2173 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2158 be prompted to enter a username. If no username is entered, the
2174 be prompted to enter a username. If no username is entered, the
2159 default ``USER@HOST`` is used instead.
2175 default ``USER@HOST`` is used instead.
2160 (default: False)
2176 (default: False)
2161
2177
2162 ``clonebundles``
2178 ``clonebundles``
2163 Whether the "clone bundles" feature is enabled.
2179 Whether the "clone bundles" feature is enabled.
2164
2180
2165 When enabled, :hg:`clone` may download and apply a server-advertised
2181 When enabled, :hg:`clone` may download and apply a server-advertised
2166 bundle file from a URL instead of using the normal exchange mechanism.
2182 bundle file from a URL instead of using the normal exchange mechanism.
2167
2183
2168 This can likely result in faster and more reliable clones.
2184 This can likely result in faster and more reliable clones.
2169
2185
2170 (default: True)
2186 (default: True)
2171
2187
2172 ``clonebundlefallback``
2188 ``clonebundlefallback``
2173 Whether failure to apply an advertised "clone bundle" from a server
2189 Whether failure to apply an advertised "clone bundle" from a server
2174 should result in fallback to a regular clone.
2190 should result in fallback to a regular clone.
2175
2191
2176 This is disabled by default because servers advertising "clone
2192 This is disabled by default because servers advertising "clone
2177 bundles" often do so to reduce server load. If advertised bundles
2193 bundles" often do so to reduce server load. If advertised bundles
2178 start mass failing and clients automatically fall back to a regular
2194 start mass failing and clients automatically fall back to a regular
2179 clone, this would add significant and unexpected load to the server
2195 clone, this would add significant and unexpected load to the server
2180 since the server is expecting clone operations to be offloaded to
2196 since the server is expecting clone operations to be offloaded to
2181 pre-generated bundles. Failing fast (the default behavior) ensures
2197 pre-generated bundles. Failing fast (the default behavior) ensures
2182 clients don't overwhelm the server when "clone bundle" application
2198 clients don't overwhelm the server when "clone bundle" application
2183 fails.
2199 fails.
2184
2200
2185 (default: False)
2201 (default: False)
2186
2202
2187 ``clonebundleprefers``
2203 ``clonebundleprefers``
2188 Defines preferences for which "clone bundles" to use.
2204 Defines preferences for which "clone bundles" to use.
2189
2205
2190 Servers advertising "clone bundles" may advertise multiple available
2206 Servers advertising "clone bundles" may advertise multiple available
2191 bundles. Each bundle may have different attributes, such as the bundle
2207 bundles. Each bundle may have different attributes, such as the bundle
2192 type and compression format. This option is used to prefer a particular
2208 type and compression format. This option is used to prefer a particular
2193 bundle over another.
2209 bundle over another.
2194
2210
2195 The following keys are defined by Mercurial:
2211 The following keys are defined by Mercurial:
2196
2212
2197 BUNDLESPEC
2213 BUNDLESPEC
2198 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2214 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2199 e.g. ``gzip-v2`` or ``bzip2-v1``.
2215 e.g. ``gzip-v2`` or ``bzip2-v1``.
2200
2216
2201 COMPRESSION
2217 COMPRESSION
2202 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2218 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2203
2219
2204 Server operators may define custom keys.
2220 Server operators may define custom keys.
2205
2221
2206 Example values: ``COMPRESSION=bzip2``,
2222 Example values: ``COMPRESSION=bzip2``,
2207 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2223 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2208
2224
2209 By default, the first bundle advertised by the server is used.
2225 By default, the first bundle advertised by the server is used.
2210
2226
2211 ``color``
2227 ``color``
2212 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2228 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2213 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2229 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2214 seems possible. See :hg:`help color` for details.
2230 seems possible. See :hg:`help color` for details.
2215
2231
2216 ``commitsubrepos``
2232 ``commitsubrepos``
2217 Whether to commit modified subrepositories when committing the
2233 Whether to commit modified subrepositories when committing the
2218 parent repository. If False and one subrepository has uncommitted
2234 parent repository. If False and one subrepository has uncommitted
2219 changes, abort the commit.
2235 changes, abort the commit.
2220 (default: False)
2236 (default: False)
2221
2237
2222 ``debug``
2238 ``debug``
2223 Print debugging information. (default: False)
2239 Print debugging information. (default: False)
2224
2240
2225 ``editor``
2241 ``editor``
2226 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2242 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2227
2243
2228 ``fallbackencoding``
2244 ``fallbackencoding``
2229 Encoding to try if it's not possible to decode the changelog using
2245 Encoding to try if it's not possible to decode the changelog using
2230 UTF-8. (default: ISO-8859-1)
2246 UTF-8. (default: ISO-8859-1)
2231
2247
2232 ``graphnodetemplate``
2248 ``graphnodetemplate``
2233 The template used to print changeset nodes in an ASCII revision graph.
2249 The template used to print changeset nodes in an ASCII revision graph.
2234 (default: ``{graphnode}``)
2250 (default: ``{graphnode}``)
2235
2251
2236 ``ignore``
2252 ``ignore``
2237 A file to read per-user ignore patterns from. This file should be
2253 A file to read per-user ignore patterns from. This file should be
2238 in the same format as a repository-wide .hgignore file. Filenames
2254 in the same format as a repository-wide .hgignore file. Filenames
2239 are relative to the repository root. This option supports hook syntax,
2255 are relative to the repository root. This option supports hook syntax,
2240 so if you want to specify multiple ignore files, you can do so by
2256 so if you want to specify multiple ignore files, you can do so by
2241 setting something like ``ignore.other = ~/.hgignore2``. For details
2257 setting something like ``ignore.other = ~/.hgignore2``. For details
2242 of the ignore file format, see the ``hgignore(5)`` man page.
2258 of the ignore file format, see the ``hgignore(5)`` man page.
2243
2259
2244 ``interactive``
2260 ``interactive``
2245 Allow to prompt the user. (default: True)
2261 Allow to prompt the user. (default: True)
2246
2262
2247 ``interface``
2263 ``interface``
2248 Select the default interface for interactive features (default: text).
2264 Select the default interface for interactive features (default: text).
2249 Possible values are 'text' and 'curses'.
2265 Possible values are 'text' and 'curses'.
2250
2266
2251 ``interface.chunkselector``
2267 ``interface.chunkselector``
2252 Select the interface for change recording (e.g. :hg:`commit -i`).
2268 Select the interface for change recording (e.g. :hg:`commit -i`).
2253 Possible values are 'text' and 'curses'.
2269 Possible values are 'text' and 'curses'.
2254 This config overrides the interface specified by ui.interface.
2270 This config overrides the interface specified by ui.interface.
2255
2271
2256 ``large-file-limit``
2272 ``large-file-limit``
2257 Largest file size that gives no memory use warning.
2273 Largest file size that gives no memory use warning.
2258 Possible values are integers or 0 to disable the check.
2274 Possible values are integers or 0 to disable the check.
2259 (default: 10000000)
2275 (default: 10000000)
2260
2276
2261 ``logtemplate``
2277 ``logtemplate``
2262 Template string for commands that print changesets.
2278 Template string for commands that print changesets.
2263
2279
2264 ``merge``
2280 ``merge``
2265 The conflict resolution program to use during a manual merge.
2281 The conflict resolution program to use during a manual merge.
2266 For more information on merge tools see :hg:`help merge-tools`.
2282 For more information on merge tools see :hg:`help merge-tools`.
2267 For configuring merge tools see the ``[merge-tools]`` section.
2283 For configuring merge tools see the ``[merge-tools]`` section.
2268
2284
2269 ``mergemarkers``
2285 ``mergemarkers``
2270 Sets the merge conflict marker label styling. The ``detailed``
2286 Sets the merge conflict marker label styling. The ``detailed``
2271 style uses the ``mergemarkertemplate`` setting to style the labels.
2287 style uses the ``mergemarkertemplate`` setting to style the labels.
2272 The ``basic`` style just uses 'local' and 'other' as the marker label.
2288 The ``basic`` style just uses 'local' and 'other' as the marker label.
2273 One of ``basic`` or ``detailed``.
2289 One of ``basic`` or ``detailed``.
2274 (default: ``basic``)
2290 (default: ``basic``)
2275
2291
2276 ``mergemarkertemplate``
2292 ``mergemarkertemplate``
2277 The template used to print the commit description next to each conflict
2293 The template used to print the commit description next to each conflict
2278 marker during merge conflicts. See :hg:`help templates` for the template
2294 marker during merge conflicts. See :hg:`help templates` for the template
2279 format.
2295 format.
2280
2296
2281 Defaults to showing the hash, tags, branches, bookmarks, author, and
2297 Defaults to showing the hash, tags, branches, bookmarks, author, and
2282 the first line of the commit description.
2298 the first line of the commit description.
2283
2299
2284 If you use non-ASCII characters in names for tags, branches, bookmarks,
2300 If you use non-ASCII characters in names for tags, branches, bookmarks,
2285 authors, and/or commit descriptions, you must pay attention to encodings of
2301 authors, and/or commit descriptions, you must pay attention to encodings of
2286 managed files. At template expansion, non-ASCII characters use the encoding
2302 managed files. At template expansion, non-ASCII characters use the encoding
2287 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2303 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2288 environment variables that govern your locale. If the encoding of the merge
2304 environment variables that govern your locale. If the encoding of the merge
2289 markers is different from the encoding of the merged files,
2305 markers is different from the encoding of the merged files,
2290 serious problems may occur.
2306 serious problems may occur.
2291
2307
2292 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2308 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2293
2309
2294 ``message-output``
2310 ``message-output``
2295 Where to write status and error messages. (default: ``stdio``)
2311 Where to write status and error messages. (default: ``stdio``)
2296
2312
2297 ``stderr``
2313 ``stderr``
2298 Everything to stderr.
2314 Everything to stderr.
2299 ``stdio``
2315 ``stdio``
2300 Status to stdout, and error to stderr.
2316 Status to stdout, and error to stderr.
2301
2317
2302 ``origbackuppath``
2318 ``origbackuppath``
2303 The path to a directory used to store generated .orig files. If the path is
2319 The path to a directory used to store generated .orig files. If the path is
2304 not a directory, one will be created. If set, files stored in this
2320 not a directory, one will be created. If set, files stored in this
2305 directory have the same name as the original file and do not have a .orig
2321 directory have the same name as the original file and do not have a .orig
2306 suffix.
2322 suffix.
2307
2323
2308 ``paginate``
2324 ``paginate``
2309 Control the pagination of command output (default: True). See :hg:`help pager`
2325 Control the pagination of command output (default: True). See :hg:`help pager`
2310 for details.
2326 for details.
2311
2327
2312 ``patch``
2328 ``patch``
2313 An optional external tool that ``hg import`` and some extensions
2329 An optional external tool that ``hg import`` and some extensions
2314 will use for applying patches. By default Mercurial uses an
2330 will use for applying patches. By default Mercurial uses an
2315 internal patch utility. The external tool must work as the common
2331 internal patch utility. The external tool must work as the common
2316 Unix ``patch`` program. In particular, it must accept a ``-p``
2332 Unix ``patch`` program. In particular, it must accept a ``-p``
2317 argument to strip patch headers, a ``-d`` argument to specify the
2333 argument to strip patch headers, a ``-d`` argument to specify the
2318 current directory, a file name to patch, and a patch file to take
2334 current directory, a file name to patch, and a patch file to take
2319 from stdin.
2335 from stdin.
2320
2336
2321 It is possible to specify a patch tool together with extra
2337 It is possible to specify a patch tool together with extra
2322 arguments. For example, setting this option to ``patch --merge``
2338 arguments. For example, setting this option to ``patch --merge``
2323 will use the ``patch`` program with its 2-way merge option.
2339 will use the ``patch`` program with its 2-way merge option.
2324
2340
2325 ``portablefilenames``
2341 ``portablefilenames``
2326 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2342 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2327 (default: ``warn``)
2343 (default: ``warn``)
2328
2344
2329 ``warn``
2345 ``warn``
2330 Print a warning message on POSIX platforms, if a file with a non-portable
2346 Print a warning message on POSIX platforms, if a file with a non-portable
2331 filename is added (e.g. a file with a name that can't be created on
2347 filename is added (e.g. a file with a name that can't be created on
2332 Windows because it contains reserved parts like ``AUX``, reserved
2348 Windows because it contains reserved parts like ``AUX``, reserved
2333 characters like ``:``, or would cause a case collision with an existing
2349 characters like ``:``, or would cause a case collision with an existing
2334 file).
2350 file).
2335
2351
2336 ``ignore``
2352 ``ignore``
2337 Don't print a warning.
2353 Don't print a warning.
2338
2354
2339 ``abort``
2355 ``abort``
2340 The command is aborted.
2356 The command is aborted.
2341
2357
2342 ``true``
2358 ``true``
2343 Alias for ``warn``.
2359 Alias for ``warn``.
2344
2360
2345 ``false``
2361 ``false``
2346 Alias for ``ignore``.
2362 Alias for ``ignore``.
2347
2363
2348 .. container:: windows
2364 .. container:: windows
2349
2365
2350 On Windows, this configuration option is ignored and the command aborted.
2366 On Windows, this configuration option is ignored and the command aborted.
2351
2367
2352 ``pre-merge-tool-output-template``
2368 ``pre-merge-tool-output-template``
2353 A template that is printed before executing an external merge tool. This can
2369 A template that is printed before executing an external merge tool. This can
2354 be used to print out additional context that might be useful to have during
2370 be used to print out additional context that might be useful to have during
2355 the conflict resolution, such as the description of the various commits
2371 the conflict resolution, such as the description of the various commits
2356 involved or bookmarks/tags.
2372 involved or bookmarks/tags.
2357
2373
2358 Additional information is available in the ``local`, ``base``, and ``other``
2374 Additional information is available in the ``local`, ``base``, and ``other``
2359 dicts. For example: ``{local.label}``, ``{base.name}``, or
2375 dicts. For example: ``{local.label}``, ``{base.name}``, or
2360 ``{other.islink}``.
2376 ``{other.islink}``.
2361
2377
2362 ``quiet``
2378 ``quiet``
2363 Reduce the amount of output printed.
2379 Reduce the amount of output printed.
2364 (default: False)
2380 (default: False)
2365
2381
2366 ``relative-paths``
2382 ``relative-paths``
2367 Prefer relative paths in the UI.
2383 Prefer relative paths in the UI.
2368
2384
2369 ``remotecmd``
2385 ``remotecmd``
2370 Remote command to use for clone/push/pull operations.
2386 Remote command to use for clone/push/pull operations.
2371 (default: ``hg``)
2387 (default: ``hg``)
2372
2388
2373 ``report_untrusted``
2389 ``report_untrusted``
2374 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2390 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2375 trusted user or group.
2391 trusted user or group.
2376 (default: True)
2392 (default: True)
2377
2393
2378 ``slash``
2394 ``slash``
2379 (Deprecated. Use ``slashpath`` template filter instead.)
2395 (Deprecated. Use ``slashpath`` template filter instead.)
2380
2396
2381 Display paths using a slash (``/``) as the path separator. This
2397 Display paths using a slash (``/``) as the path separator. This
2382 only makes a difference on systems where the default path
2398 only makes a difference on systems where the default path
2383 separator is not the slash character (e.g. Windows uses the
2399 separator is not the slash character (e.g. Windows uses the
2384 backslash character (``\``)).
2400 backslash character (``\``)).
2385 (default: False)
2401 (default: False)
2386
2402
2387 ``statuscopies``
2403 ``statuscopies``
2388 Display copies in the status command.
2404 Display copies in the status command.
2389
2405
2390 ``ssh``
2406 ``ssh``
2391 Command to use for SSH connections. (default: ``ssh``)
2407 Command to use for SSH connections. (default: ``ssh``)
2392
2408
2393 ``ssherrorhint``
2409 ``ssherrorhint``
2394 A hint shown to the user in the case of SSH error (e.g.
2410 A hint shown to the user in the case of SSH error (e.g.
2395 ``Please see http://company/internalwiki/ssh.html``)
2411 ``Please see http://company/internalwiki/ssh.html``)
2396
2412
2397 ``strict``
2413 ``strict``
2398 Require exact command names, instead of allowing unambiguous
2414 Require exact command names, instead of allowing unambiguous
2399 abbreviations. (default: False)
2415 abbreviations. (default: False)
2400
2416
2401 ``style``
2417 ``style``
2402 Name of style to use for command output.
2418 Name of style to use for command output.
2403
2419
2404 ``supportcontact``
2420 ``supportcontact``
2405 A URL where users should report a Mercurial traceback. Use this if you are a
2421 A URL where users should report a Mercurial traceback. Use this if you are a
2406 large organisation with its own Mercurial deployment process and crash
2422 large organisation with its own Mercurial deployment process and crash
2407 reports should be addressed to your internal support.
2423 reports should be addressed to your internal support.
2408
2424
2409 ``textwidth``
2425 ``textwidth``
2410 Maximum width of help text. A longer line generated by ``hg help`` or
2426 Maximum width of help text. A longer line generated by ``hg help`` or
2411 ``hg subcommand --help`` will be broken after white space to get this
2427 ``hg subcommand --help`` will be broken after white space to get this
2412 width or the terminal width, whichever comes first.
2428 width or the terminal width, whichever comes first.
2413 A non-positive value will disable this and the terminal width will be
2429 A non-positive value will disable this and the terminal width will be
2414 used. (default: 78)
2430 used. (default: 78)
2415
2431
2416 ``timeout``
2432 ``timeout``
2417 The timeout used when a lock is held (in seconds), a negative value
2433 The timeout used when a lock is held (in seconds), a negative value
2418 means no timeout. (default: 600)
2434 means no timeout. (default: 600)
2419
2435
2420 ``timeout.warn``
2436 ``timeout.warn``
2421 Time (in seconds) before a warning is printed about held lock. A negative
2437 Time (in seconds) before a warning is printed about held lock. A negative
2422 value means no warning. (default: 0)
2438 value means no warning. (default: 0)
2423
2439
2424 ``traceback``
2440 ``traceback``
2425 Mercurial always prints a traceback when an unknown exception
2441 Mercurial always prints a traceback when an unknown exception
2426 occurs. Setting this to True will make Mercurial print a traceback
2442 occurs. Setting this to True will make Mercurial print a traceback
2427 on all exceptions, even those recognized by Mercurial (such as
2443 on all exceptions, even those recognized by Mercurial (such as
2428 IOError or MemoryError). (default: False)
2444 IOError or MemoryError). (default: False)
2429
2445
2430 ``tweakdefaults``
2446 ``tweakdefaults``
2431
2447
2432 By default Mercurial's behavior changes very little from release
2448 By default Mercurial's behavior changes very little from release
2433 to release, but over time the recommended config settings
2449 to release, but over time the recommended config settings
2434 shift. Enable this config to opt in to get automatic tweaks to
2450 shift. Enable this config to opt in to get automatic tweaks to
2435 Mercurial's behavior over time. This config setting will have no
2451 Mercurial's behavior over time. This config setting will have no
2436 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2452 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2437 not include ``tweakdefaults``. (default: False)
2453 not include ``tweakdefaults``. (default: False)
2438
2454
2439 It currently means::
2455 It currently means::
2440
2456
2441 .. tweakdefaultsmarker
2457 .. tweakdefaultsmarker
2442
2458
2443 ``username``
2459 ``username``
2444 The committer of a changeset created when running "commit".
2460 The committer of a changeset created when running "commit".
2445 Typically a person's name and email address, e.g. ``Fred Widget
2461 Typically a person's name and email address, e.g. ``Fred Widget
2446 <fred@example.com>``. Environment variables in the
2462 <fred@example.com>``. Environment variables in the
2447 username are expanded.
2463 username are expanded.
2448
2464
2449 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2465 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2450 hgrc is empty, e.g. if the system admin set ``username =`` in the
2466 hgrc is empty, e.g. if the system admin set ``username =`` in the
2451 system hgrc, it has to be specified manually or in a different
2467 system hgrc, it has to be specified manually or in a different
2452 hgrc file)
2468 hgrc file)
2453
2469
2454 ``verbose``
2470 ``verbose``
2455 Increase the amount of output printed. (default: False)
2471 Increase the amount of output printed. (default: False)
2456
2472
2457
2473
2458 ``web``
2474 ``web``
2459 -------
2475 -------
2460
2476
2461 Web interface configuration. The settings in this section apply to
2477 Web interface configuration. The settings in this section apply to
2462 both the builtin webserver (started by :hg:`serve`) and the script you
2478 both the builtin webserver (started by :hg:`serve`) and the script you
2463 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2479 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2464 and WSGI).
2480 and WSGI).
2465
2481
2466 The Mercurial webserver does no authentication (it does not prompt for
2482 The Mercurial webserver does no authentication (it does not prompt for
2467 usernames and passwords to validate *who* users are), but it does do
2483 usernames and passwords to validate *who* users are), but it does do
2468 authorization (it grants or denies access for *authenticated users*
2484 authorization (it grants or denies access for *authenticated users*
2469 based on settings in this section). You must either configure your
2485 based on settings in this section). You must either configure your
2470 webserver to do authentication for you, or disable the authorization
2486 webserver to do authentication for you, or disable the authorization
2471 checks.
2487 checks.
2472
2488
2473 For a quick setup in a trusted environment, e.g., a private LAN, where
2489 For a quick setup in a trusted environment, e.g., a private LAN, where
2474 you want it to accept pushes from anybody, you can use the following
2490 you want it to accept pushes from anybody, you can use the following
2475 command line::
2491 command line::
2476
2492
2477 $ hg --config web.allow-push=* --config web.push_ssl=False serve
2493 $ hg --config web.allow-push=* --config web.push_ssl=False serve
2478
2494
2479 Note that this will allow anybody to push anything to the server and
2495 Note that this will allow anybody to push anything to the server and
2480 that this should not be used for public servers.
2496 that this should not be used for public servers.
2481
2497
2482 The full set of options is:
2498 The full set of options is:
2483
2499
2484 ``accesslog``
2500 ``accesslog``
2485 Where to output the access log. (default: stdout)
2501 Where to output the access log. (default: stdout)
2486
2502
2487 ``address``
2503 ``address``
2488 Interface address to bind to. (default: all)
2504 Interface address to bind to. (default: all)
2489
2505
2490 ``allow-archive``
2506 ``allow-archive``
2491 List of archive format (bz2, gz, zip) allowed for downloading.
2507 List of archive format (bz2, gz, zip) allowed for downloading.
2492 (default: empty)
2508 (default: empty)
2493
2509
2494 ``allowbz2``
2510 ``allowbz2``
2495 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
2511 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
2496 revisions.
2512 revisions.
2497 (default: False)
2513 (default: False)
2498
2514
2499 ``allowgz``
2515 ``allowgz``
2500 (DEPRECATED) Whether to allow .tar.gz downloading of repository
2516 (DEPRECATED) Whether to allow .tar.gz downloading of repository
2501 revisions.
2517 revisions.
2502 (default: False)
2518 (default: False)
2503
2519
2504 ``allow-pull``
2520 ``allow-pull``
2505 Whether to allow pulling from the repository. (default: True)
2521 Whether to allow pulling from the repository. (default: True)
2506
2522
2507 ``allow-push``
2523 ``allow-push``
2508 Whether to allow pushing to the repository. If empty or not set,
2524 Whether to allow pushing to the repository. If empty or not set,
2509 pushing is not allowed. If the special value ``*``, any remote
2525 pushing is not allowed. If the special value ``*``, any remote
2510 user can push, including unauthenticated users. Otherwise, the
2526 user can push, including unauthenticated users. Otherwise, the
2511 remote user must have been authenticated, and the authenticated
2527 remote user must have been authenticated, and the authenticated
2512 user name must be present in this list. The contents of the
2528 user name must be present in this list. The contents of the
2513 allow-push list are examined after the deny_push list.
2529 allow-push list are examined after the deny_push list.
2514
2530
2515 ``allow_read``
2531 ``allow_read``
2516 If the user has not already been denied repository access due to
2532 If the user has not already been denied repository access due to
2517 the contents of deny_read, this list determines whether to grant
2533 the contents of deny_read, this list determines whether to grant
2518 repository access to the user. If this list is not empty, and the
2534 repository access to the user. If this list is not empty, and the
2519 user is unauthenticated or not present in the list, then access is
2535 user is unauthenticated or not present in the list, then access is
2520 denied for the user. If the list is empty or not set, then access
2536 denied for the user. If the list is empty or not set, then access
2521 is permitted to all users by default. Setting allow_read to the
2537 is permitted to all users by default. Setting allow_read to the
2522 special value ``*`` is equivalent to it not being set (i.e. access
2538 special value ``*`` is equivalent to it not being set (i.e. access
2523 is permitted to all users). The contents of the allow_read list are
2539 is permitted to all users). The contents of the allow_read list are
2524 examined after the deny_read list.
2540 examined after the deny_read list.
2525
2541
2526 ``allowzip``
2542 ``allowzip``
2527 (DEPRECATED) Whether to allow .zip downloading of repository
2543 (DEPRECATED) Whether to allow .zip downloading of repository
2528 revisions. This feature creates temporary files.
2544 revisions. This feature creates temporary files.
2529 (default: False)
2545 (default: False)
2530
2546
2531 ``archivesubrepos``
2547 ``archivesubrepos``
2532 Whether to recurse into subrepositories when archiving.
2548 Whether to recurse into subrepositories when archiving.
2533 (default: False)
2549 (default: False)
2534
2550
2535 ``baseurl``
2551 ``baseurl``
2536 Base URL to use when publishing URLs in other locations, so
2552 Base URL to use when publishing URLs in other locations, so
2537 third-party tools like email notification hooks can construct
2553 third-party tools like email notification hooks can construct
2538 URLs. Example: ``http://hgserver/repos/``.
2554 URLs. Example: ``http://hgserver/repos/``.
2539
2555
2540 ``cacerts``
2556 ``cacerts``
2541 Path to file containing a list of PEM encoded certificate
2557 Path to file containing a list of PEM encoded certificate
2542 authority certificates. Environment variables and ``~user``
2558 authority certificates. Environment variables and ``~user``
2543 constructs are expanded in the filename. If specified on the
2559 constructs are expanded in the filename. If specified on the
2544 client, then it will verify the identity of remote HTTPS servers
2560 client, then it will verify the identity of remote HTTPS servers
2545 with these certificates.
2561 with these certificates.
2546
2562
2547 To disable SSL verification temporarily, specify ``--insecure`` from
2563 To disable SSL verification temporarily, specify ``--insecure`` from
2548 command line.
2564 command line.
2549
2565
2550 You can use OpenSSL's CA certificate file if your platform has
2566 You can use OpenSSL's CA certificate file if your platform has
2551 one. On most Linux systems this will be
2567 one. On most Linux systems this will be
2552 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
2568 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
2553 generate this file manually. The form must be as follows::
2569 generate this file manually. The form must be as follows::
2554
2570
2555 -----BEGIN CERTIFICATE-----
2571 -----BEGIN CERTIFICATE-----
2556 ... (certificate in base64 PEM encoding) ...
2572 ... (certificate in base64 PEM encoding) ...
2557 -----END CERTIFICATE-----
2573 -----END CERTIFICATE-----
2558 -----BEGIN CERTIFICATE-----
2574 -----BEGIN CERTIFICATE-----
2559 ... (certificate in base64 PEM encoding) ...
2575 ... (certificate in base64 PEM encoding) ...
2560 -----END CERTIFICATE-----
2576 -----END CERTIFICATE-----
2561
2577
2562 ``cache``
2578 ``cache``
2563 Whether to support caching in hgweb. (default: True)
2579 Whether to support caching in hgweb. (default: True)
2564
2580
2565 ``certificate``
2581 ``certificate``
2566 Certificate to use when running :hg:`serve`.
2582 Certificate to use when running :hg:`serve`.
2567
2583
2568 ``collapse``
2584 ``collapse``
2569 With ``descend`` enabled, repositories in subdirectories are shown at
2585 With ``descend`` enabled, repositories in subdirectories are shown at
2570 a single level alongside repositories in the current path. With
2586 a single level alongside repositories in the current path. With
2571 ``collapse`` also enabled, repositories residing at a deeper level than
2587 ``collapse`` also enabled, repositories residing at a deeper level than
2572 the current path are grouped behind navigable directory entries that
2588 the current path are grouped behind navigable directory entries that
2573 lead to the locations of these repositories. In effect, this setting
2589 lead to the locations of these repositories. In effect, this setting
2574 collapses each collection of repositories found within a subdirectory
2590 collapses each collection of repositories found within a subdirectory
2575 into a single entry for that subdirectory. (default: False)
2591 into a single entry for that subdirectory. (default: False)
2576
2592
2577 ``comparisoncontext``
2593 ``comparisoncontext``
2578 Number of lines of context to show in side-by-side file comparison. If
2594 Number of lines of context to show in side-by-side file comparison. If
2579 negative or the value ``full``, whole files are shown. (default: 5)
2595 negative or the value ``full``, whole files are shown. (default: 5)
2580
2596
2581 This setting can be overridden by a ``context`` request parameter to the
2597 This setting can be overridden by a ``context`` request parameter to the
2582 ``comparison`` command, taking the same values.
2598 ``comparison`` command, taking the same values.
2583
2599
2584 ``contact``
2600 ``contact``
2585 Name or email address of the person in charge of the repository.
2601 Name or email address of the person in charge of the repository.
2586 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
2602 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
2587
2603
2588 ``csp``
2604 ``csp``
2589 Send a ``Content-Security-Policy`` HTTP header with this value.
2605 Send a ``Content-Security-Policy`` HTTP header with this value.
2590
2606
2591 The value may contain a special string ``%nonce%``, which will be replaced
2607 The value may contain a special string ``%nonce%``, which will be replaced
2592 by a randomly-generated one-time use value. If the value contains
2608 by a randomly-generated one-time use value. If the value contains
2593 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
2609 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
2594 one-time property of the nonce. This nonce will also be inserted into
2610 one-time property of the nonce. This nonce will also be inserted into
2595 ``<script>`` elements containing inline JavaScript.
2611 ``<script>`` elements containing inline JavaScript.
2596
2612
2597 Note: lots of HTML content sent by the server is derived from repository
2613 Note: lots of HTML content sent by the server is derived from repository
2598 data. Please consider the potential for malicious repository data to
2614 data. Please consider the potential for malicious repository data to
2599 "inject" itself into generated HTML content as part of your security
2615 "inject" itself into generated HTML content as part of your security
2600 threat model.
2616 threat model.
2601
2617
2602 ``deny_push``
2618 ``deny_push``
2603 Whether to deny pushing to the repository. If empty or not set,
2619 Whether to deny pushing to the repository. If empty or not set,
2604 push is not denied. If the special value ``*``, all remote users are
2620 push is not denied. If the special value ``*``, all remote users are
2605 denied push. Otherwise, unauthenticated users are all denied, and
2621 denied push. Otherwise, unauthenticated users are all denied, and
2606 any authenticated user name present in this list is also denied. The
2622 any authenticated user name present in this list is also denied. The
2607 contents of the deny_push list are examined before the allow-push list.
2623 contents of the deny_push list are examined before the allow-push list.
2608
2624
2609 ``deny_read``
2625 ``deny_read``
2610 Whether to deny reading/viewing of the repository. If this list is
2626 Whether to deny reading/viewing of the repository. If this list is
2611 not empty, unauthenticated users are all denied, and any
2627 not empty, unauthenticated users are all denied, and any
2612 authenticated user name present in this list is also denied access to
2628 authenticated user name present in this list is also denied access to
2613 the repository. If set to the special value ``*``, all remote users
2629 the repository. If set to the special value ``*``, all remote users
2614 are denied access (rarely needed ;). If deny_read is empty or not set,
2630 are denied access (rarely needed ;). If deny_read is empty or not set,
2615 the determination of repository access depends on the presence and
2631 the determination of repository access depends on the presence and
2616 content of the allow_read list (see description). If both
2632 content of the allow_read list (see description). If both
2617 deny_read and allow_read are empty or not set, then access is
2633 deny_read and allow_read are empty or not set, then access is
2618 permitted to all users by default. If the repository is being
2634 permitted to all users by default. If the repository is being
2619 served via hgwebdir, denied users will not be able to see it in
2635 served via hgwebdir, denied users will not be able to see it in
2620 the list of repositories. The contents of the deny_read list have
2636 the list of repositories. The contents of the deny_read list have
2621 priority over (are examined before) the contents of the allow_read
2637 priority over (are examined before) the contents of the allow_read
2622 list.
2638 list.
2623
2639
2624 ``descend``
2640 ``descend``
2625 hgwebdir indexes will not descend into subdirectories. Only repositories
2641 hgwebdir indexes will not descend into subdirectories. Only repositories
2626 directly in the current path will be shown (other repositories are still
2642 directly in the current path will be shown (other repositories are still
2627 available from the index corresponding to their containing path).
2643 available from the index corresponding to their containing path).
2628
2644
2629 ``description``
2645 ``description``
2630 Textual description of the repository's purpose or contents.
2646 Textual description of the repository's purpose or contents.
2631 (default: "unknown")
2647 (default: "unknown")
2632
2648
2633 ``encoding``
2649 ``encoding``
2634 Character encoding name. (default: the current locale charset)
2650 Character encoding name. (default: the current locale charset)
2635 Example: "UTF-8".
2651 Example: "UTF-8".
2636
2652
2637 ``errorlog``
2653 ``errorlog``
2638 Where to output the error log. (default: stderr)
2654 Where to output the error log. (default: stderr)
2639
2655
2640 ``guessmime``
2656 ``guessmime``
2641 Control MIME types for raw download of file content.
2657 Control MIME types for raw download of file content.
2642 Set to True to let hgweb guess the content type from the file
2658 Set to True to let hgweb guess the content type from the file
2643 extension. This will serve HTML files as ``text/html`` and might
2659 extension. This will serve HTML files as ``text/html`` and might
2644 allow cross-site scripting attacks when serving untrusted
2660 allow cross-site scripting attacks when serving untrusted
2645 repositories. (default: False)
2661 repositories. (default: False)
2646
2662
2647 ``hidden``
2663 ``hidden``
2648 Whether to hide the repository in the hgwebdir index.
2664 Whether to hide the repository in the hgwebdir index.
2649 (default: False)
2665 (default: False)
2650
2666
2651 ``ipv6``
2667 ``ipv6``
2652 Whether to use IPv6. (default: False)
2668 Whether to use IPv6. (default: False)
2653
2669
2654 ``labels``
2670 ``labels``
2655 List of string *labels* associated with the repository.
2671 List of string *labels* associated with the repository.
2656
2672
2657 Labels are exposed as a template keyword and can be used to customize
2673 Labels are exposed as a template keyword and can be used to customize
2658 output. e.g. the ``index`` template can group or filter repositories
2674 output. e.g. the ``index`` template can group or filter repositories
2659 by labels and the ``summary`` template can display additional content
2675 by labels and the ``summary`` template can display additional content
2660 if a specific label is present.
2676 if a specific label is present.
2661
2677
2662 ``logoimg``
2678 ``logoimg``
2663 File name of the logo image that some templates display on each page.
2679 File name of the logo image that some templates display on each page.
2664 The file name is relative to ``staticurl``. That is, the full path to
2680 The file name is relative to ``staticurl``. That is, the full path to
2665 the logo image is "staticurl/logoimg".
2681 the logo image is "staticurl/logoimg".
2666 If unset, ``hglogo.png`` will be used.
2682 If unset, ``hglogo.png`` will be used.
2667
2683
2668 ``logourl``
2684 ``logourl``
2669 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
2685 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
2670 will be used.
2686 will be used.
2671
2687
2672 ``maxchanges``
2688 ``maxchanges``
2673 Maximum number of changes to list on the changelog. (default: 10)
2689 Maximum number of changes to list on the changelog. (default: 10)
2674
2690
2675 ``maxfiles``
2691 ``maxfiles``
2676 Maximum number of files to list per changeset. (default: 10)
2692 Maximum number of files to list per changeset. (default: 10)
2677
2693
2678 ``maxshortchanges``
2694 ``maxshortchanges``
2679 Maximum number of changes to list on the shortlog, graph or filelog
2695 Maximum number of changes to list on the shortlog, graph or filelog
2680 pages. (default: 60)
2696 pages. (default: 60)
2681
2697
2682 ``name``
2698 ``name``
2683 Repository name to use in the web interface.
2699 Repository name to use in the web interface.
2684 (default: current working directory)
2700 (default: current working directory)
2685
2701
2686 ``port``
2702 ``port``
2687 Port to listen on. (default: 8000)
2703 Port to listen on. (default: 8000)
2688
2704
2689 ``prefix``
2705 ``prefix``
2690 Prefix path to serve from. (default: '' (server root))
2706 Prefix path to serve from. (default: '' (server root))
2691
2707
2692 ``push_ssl``
2708 ``push_ssl``
2693 Whether to require that inbound pushes be transported over SSL to
2709 Whether to require that inbound pushes be transported over SSL to
2694 prevent password sniffing. (default: True)
2710 prevent password sniffing. (default: True)
2695
2711
2696 ``refreshinterval``
2712 ``refreshinterval``
2697 How frequently directory listings re-scan the filesystem for new
2713 How frequently directory listings re-scan the filesystem for new
2698 repositories, in seconds. This is relevant when wildcards are used
2714 repositories, in seconds. This is relevant when wildcards are used
2699 to define paths. Depending on how much filesystem traversal is
2715 to define paths. Depending on how much filesystem traversal is
2700 required, refreshing may negatively impact performance.
2716 required, refreshing may negatively impact performance.
2701
2717
2702 Values less than or equal to 0 always refresh.
2718 Values less than or equal to 0 always refresh.
2703 (default: 20)
2719 (default: 20)
2704
2720
2705 ``server-header``
2721 ``server-header``
2706 Value for HTTP ``Server`` response header.
2722 Value for HTTP ``Server`` response header.
2707
2723
2708 ``static``
2724 ``static``
2709 Directory where static files are served from.
2725 Directory where static files are served from.
2710
2726
2711 ``staticurl``
2727 ``staticurl``
2712 Base URL to use for static files. If unset, static files (e.g. the
2728 Base URL to use for static files. If unset, static files (e.g. the
2713 hgicon.png favicon) will be served by the CGI script itself. Use
2729 hgicon.png favicon) will be served by the CGI script itself. Use
2714 this setting to serve them directly with the HTTP server.
2730 this setting to serve them directly with the HTTP server.
2715 Example: ``http://hgserver/static/``.
2731 Example: ``http://hgserver/static/``.
2716
2732
2717 ``stripes``
2733 ``stripes``
2718 How many lines a "zebra stripe" should span in multi-line output.
2734 How many lines a "zebra stripe" should span in multi-line output.
2719 Set to 0 to disable. (default: 1)
2735 Set to 0 to disable. (default: 1)
2720
2736
2721 ``style``
2737 ``style``
2722 Which template map style to use. The available options are the names of
2738 Which template map style to use. The available options are the names of
2723 subdirectories in the HTML templates path. (default: ``paper``)
2739 subdirectories in the HTML templates path. (default: ``paper``)
2724 Example: ``monoblue``.
2740 Example: ``monoblue``.
2725
2741
2726 ``templates``
2742 ``templates``
2727 Where to find the HTML templates. The default path to the HTML templates
2743 Where to find the HTML templates. The default path to the HTML templates
2728 can be obtained from ``hg debuginstall``.
2744 can be obtained from ``hg debuginstall``.
2729
2745
2730 ``websub``
2746 ``websub``
2731 ----------
2747 ----------
2732
2748
2733 Web substitution filter definition. You can use this section to
2749 Web substitution filter definition. You can use this section to
2734 define a set of regular expression substitution patterns which
2750 define a set of regular expression substitution patterns which
2735 let you automatically modify the hgweb server output.
2751 let you automatically modify the hgweb server output.
2736
2752
2737 The default hgweb templates only apply these substitution patterns
2753 The default hgweb templates only apply these substitution patterns
2738 on the revision description fields. You can apply them anywhere
2754 on the revision description fields. You can apply them anywhere
2739 you want when you create your own templates by adding calls to the
2755 you want when you create your own templates by adding calls to the
2740 "websub" filter (usually after calling the "escape" filter).
2756 "websub" filter (usually after calling the "escape" filter).
2741
2757
2742 This can be used, for example, to convert issue references to links
2758 This can be used, for example, to convert issue references to links
2743 to your issue tracker, or to convert "markdown-like" syntax into
2759 to your issue tracker, or to convert "markdown-like" syntax into
2744 HTML (see the examples below).
2760 HTML (see the examples below).
2745
2761
2746 Each entry in this section names a substitution filter.
2762 Each entry in this section names a substitution filter.
2747 The value of each entry defines the substitution expression itself.
2763 The value of each entry defines the substitution expression itself.
2748 The websub expressions follow the old interhg extension syntax,
2764 The websub expressions follow the old interhg extension syntax,
2749 which in turn imitates the Unix sed replacement syntax::
2765 which in turn imitates the Unix sed replacement syntax::
2750
2766
2751 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
2767 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
2752
2768
2753 You can use any separator other than "/". The final "i" is optional
2769 You can use any separator other than "/". The final "i" is optional
2754 and indicates that the search must be case insensitive.
2770 and indicates that the search must be case insensitive.
2755
2771
2756 Examples::
2772 Examples::
2757
2773
2758 [websub]
2774 [websub]
2759 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
2775 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
2760 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
2776 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
2761 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
2777 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
2762
2778
2763 ``worker``
2779 ``worker``
2764 ----------
2780 ----------
2765
2781
2766 Parallel master/worker configuration. We currently perform working
2782 Parallel master/worker configuration. We currently perform working
2767 directory updates in parallel on Unix-like systems, which greatly
2783 directory updates in parallel on Unix-like systems, which greatly
2768 helps performance.
2784 helps performance.
2769
2785
2770 ``enabled``
2786 ``enabled``
2771 Whether to enable workers code to be used.
2787 Whether to enable workers code to be used.
2772 (default: true)
2788 (default: true)
2773
2789
2774 ``numcpus``
2790 ``numcpus``
2775 Number of CPUs to use for parallel operations. A zero or
2791 Number of CPUs to use for parallel operations. A zero or
2776 negative value is treated as ``use the default``.
2792 negative value is treated as ``use the default``.
2777 (default: 4 or the number of CPUs on the system, whichever is larger)
2793 (default: 4 or the number of CPUs on the system, whichever is larger)
2778
2794
2779 ``backgroundclose``
2795 ``backgroundclose``
2780 Whether to enable closing file handles on background threads during certain
2796 Whether to enable closing file handles on background threads during certain
2781 operations. Some platforms aren't very efficient at closing file
2797 operations. Some platforms aren't very efficient at closing file
2782 handles that have been written or appended to. By performing file closing
2798 handles that have been written or appended to. By performing file closing
2783 on background threads, file write rate can increase substantially.
2799 on background threads, file write rate can increase substantially.
2784 (default: true on Windows, false elsewhere)
2800 (default: true on Windows, false elsewhere)
2785
2801
2786 ``backgroundcloseminfilecount``
2802 ``backgroundcloseminfilecount``
2787 Minimum number of files required to trigger background file closing.
2803 Minimum number of files required to trigger background file closing.
2788 Operations not writing this many files won't start background close
2804 Operations not writing this many files won't start background close
2789 threads.
2805 threads.
2790 (default: 2048)
2806 (default: 2048)
2791
2807
2792 ``backgroundclosemaxqueue``
2808 ``backgroundclosemaxqueue``
2793 The maximum number of opened file handles waiting to be closed in the
2809 The maximum number of opened file handles waiting to be closed in the
2794 background. This option only has an effect if ``backgroundclose`` is
2810 background. This option only has an effect if ``backgroundclose`` is
2795 enabled.
2811 enabled.
2796 (default: 384)
2812 (default: 384)
2797
2813
2798 ``backgroundclosethreadcount``
2814 ``backgroundclosethreadcount``
2799 Number of threads to process background file closes. Only relevant if
2815 Number of threads to process background file closes. Only relevant if
2800 ``backgroundclose`` is enabled.
2816 ``backgroundclose`` is enabled.
2801 (default: 4)
2817 (default: 4)
@@ -1,3079 +1,3083 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 lazydeltabase = ui.configbool(b'storage',
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
756 b'revlog.reuse-external-delta-parent')
756 lazydeltabase = False
757 if lazydelta:
758 lazydeltabase = ui.configbool(b'storage',
759 b'revlog.reuse-external-delta-parent')
757 if lazydeltabase is None:
760 if lazydeltabase is None:
758 lazydeltabase = not scmutil.gddeltaconfig(ui)
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
762 options[b'lazydelta'] = lazydelta
759 options[b'lazydeltabase'] = lazydeltabase
763 options[b'lazydeltabase'] = lazydeltabase
760
764
761 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
762 if 0 <= chainspan:
766 if 0 <= chainspan:
763 options[b'maxdeltachainspan'] = chainspan
767 options[b'maxdeltachainspan'] = chainspan
764
768
765 mmapindexthreshold = ui.configbytes(b'experimental',
769 mmapindexthreshold = ui.configbytes(b'experimental',
766 b'mmapindexthreshold')
770 b'mmapindexthreshold')
767 if mmapindexthreshold is not None:
771 if mmapindexthreshold is not None:
768 options[b'mmapindexthreshold'] = mmapindexthreshold
772 options[b'mmapindexthreshold'] = mmapindexthreshold
769
773
770 withsparseread = ui.configbool(b'experimental', b'sparse-read')
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
771 srdensitythres = float(ui.config(b'experimental',
775 srdensitythres = float(ui.config(b'experimental',
772 b'sparse-read.density-threshold'))
776 b'sparse-read.density-threshold'))
773 srmingapsize = ui.configbytes(b'experimental',
777 srmingapsize = ui.configbytes(b'experimental',
774 b'sparse-read.min-gap-size')
778 b'sparse-read.min-gap-size')
775 options[b'with-sparse-read'] = withsparseread
779 options[b'with-sparse-read'] = withsparseread
776 options[b'sparse-read-density-threshold'] = srdensitythres
780 options[b'sparse-read-density-threshold'] = srdensitythres
777 options[b'sparse-read-min-gap-size'] = srmingapsize
781 options[b'sparse-read-min-gap-size'] = srmingapsize
778
782
779 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
780 options[b'sparse-revlog'] = sparserevlog
784 options[b'sparse-revlog'] = sparserevlog
781 if sparserevlog:
785 if sparserevlog:
782 options[b'generaldelta'] = True
786 options[b'generaldelta'] = True
783
787
784 maxchainlen = None
788 maxchainlen = None
785 if sparserevlog:
789 if sparserevlog:
786 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
787 # experimental config: format.maxchainlen
791 # experimental config: format.maxchainlen
788 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
789 if maxchainlen is not None:
793 if maxchainlen is not None:
790 options[b'maxchainlen'] = maxchainlen
794 options[b'maxchainlen'] = maxchainlen
791
795
792 for r in requirements:
796 for r in requirements:
793 if r.startswith(b'exp-compression-'):
797 if r.startswith(b'exp-compression-'):
794 options[b'compengine'] = r[len(b'exp-compression-'):]
798 options[b'compengine'] = r[len(b'exp-compression-'):]
795
799
796 if repository.NARROW_REQUIREMENT in requirements:
800 if repository.NARROW_REQUIREMENT in requirements:
797 options[b'enableellipsis'] = True
801 options[b'enableellipsis'] = True
798
802
799 return options
803 return options
800
804
801 def makemain(**kwargs):
805 def makemain(**kwargs):
802 """Produce a type conforming to ``ilocalrepositorymain``."""
806 """Produce a type conforming to ``ilocalrepositorymain``."""
803 return localrepository
807 return localrepository
804
808
805 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
809 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
806 class revlogfilestorage(object):
810 class revlogfilestorage(object):
807 """File storage when using revlogs."""
811 """File storage when using revlogs."""
808
812
809 def file(self, path):
813 def file(self, path):
810 if path[0] == b'/':
814 if path[0] == b'/':
811 path = path[1:]
815 path = path[1:]
812
816
813 return filelog.filelog(self.svfs, path)
817 return filelog.filelog(self.svfs, path)
814
818
815 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
819 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
816 class revlognarrowfilestorage(object):
820 class revlognarrowfilestorage(object):
817 """File storage when using revlogs and narrow files."""
821 """File storage when using revlogs and narrow files."""
818
822
819 def file(self, path):
823 def file(self, path):
820 if path[0] == b'/':
824 if path[0] == b'/':
821 path = path[1:]
825 path = path[1:]
822
826
823 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
827 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
824
828
825 def makefilestorage(requirements, features, **kwargs):
829 def makefilestorage(requirements, features, **kwargs):
826 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
830 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
827 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
831 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
828 features.add(repository.REPO_FEATURE_STREAM_CLONE)
832 features.add(repository.REPO_FEATURE_STREAM_CLONE)
829
833
830 if repository.NARROW_REQUIREMENT in requirements:
834 if repository.NARROW_REQUIREMENT in requirements:
831 return revlognarrowfilestorage
835 return revlognarrowfilestorage
832 else:
836 else:
833 return revlogfilestorage
837 return revlogfilestorage
834
838
835 # List of repository interfaces and factory functions for them. Each
839 # List of repository interfaces and factory functions for them. Each
836 # will be called in order during ``makelocalrepository()`` to iteratively
840 # will be called in order during ``makelocalrepository()`` to iteratively
837 # derive the final type for a local repository instance. We capture the
841 # derive the final type for a local repository instance. We capture the
838 # function as a lambda so we don't hold a reference and the module-level
842 # function as a lambda so we don't hold a reference and the module-level
839 # functions can be wrapped.
843 # functions can be wrapped.
840 REPO_INTERFACES = [
844 REPO_INTERFACES = [
841 (repository.ilocalrepositorymain, lambda: makemain),
845 (repository.ilocalrepositorymain, lambda: makemain),
842 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
846 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
843 ]
847 ]
844
848
845 @interfaceutil.implementer(repository.ilocalrepositorymain)
849 @interfaceutil.implementer(repository.ilocalrepositorymain)
846 class localrepository(object):
850 class localrepository(object):
847 """Main class for representing local repositories.
851 """Main class for representing local repositories.
848
852
849 All local repositories are instances of this class.
853 All local repositories are instances of this class.
850
854
851 Constructed on its own, instances of this class are not usable as
855 Constructed on its own, instances of this class are not usable as
852 repository objects. To obtain a usable repository object, call
856 repository objects. To obtain a usable repository object, call
853 ``hg.repository()``, ``localrepo.instance()``, or
857 ``hg.repository()``, ``localrepo.instance()``, or
854 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
858 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
855 ``instance()`` adds support for creating new repositories.
859 ``instance()`` adds support for creating new repositories.
856 ``hg.repository()`` adds more extension integration, including calling
860 ``hg.repository()`` adds more extension integration, including calling
857 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
861 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
858 used.
862 used.
859 """
863 """
860
864
861 # obsolete experimental requirements:
865 # obsolete experimental requirements:
862 # - manifestv2: An experimental new manifest format that allowed
866 # - manifestv2: An experimental new manifest format that allowed
863 # for stem compression of long paths. Experiment ended up not
867 # for stem compression of long paths. Experiment ended up not
864 # being successful (repository sizes went up due to worse delta
868 # being successful (repository sizes went up due to worse delta
865 # chains), and the code was deleted in 4.6.
869 # chains), and the code was deleted in 4.6.
866 supportedformats = {
870 supportedformats = {
867 'revlogv1',
871 'revlogv1',
868 'generaldelta',
872 'generaldelta',
869 'treemanifest',
873 'treemanifest',
870 REVLOGV2_REQUIREMENT,
874 REVLOGV2_REQUIREMENT,
871 SPARSEREVLOG_REQUIREMENT,
875 SPARSEREVLOG_REQUIREMENT,
872 }
876 }
873 _basesupported = supportedformats | {
877 _basesupported = supportedformats | {
874 'store',
878 'store',
875 'fncache',
879 'fncache',
876 'shared',
880 'shared',
877 'relshared',
881 'relshared',
878 'dotencode',
882 'dotencode',
879 'exp-sparse',
883 'exp-sparse',
880 'internal-phase'
884 'internal-phase'
881 }
885 }
882
886
883 # list of prefix for file which can be written without 'wlock'
887 # list of prefix for file which can be written without 'wlock'
884 # Extensions should extend this list when needed
888 # Extensions should extend this list when needed
885 _wlockfreeprefix = {
889 _wlockfreeprefix = {
886 # We migh consider requiring 'wlock' for the next
890 # We migh consider requiring 'wlock' for the next
887 # two, but pretty much all the existing code assume
891 # two, but pretty much all the existing code assume
888 # wlock is not needed so we keep them excluded for
892 # wlock is not needed so we keep them excluded for
889 # now.
893 # now.
890 'hgrc',
894 'hgrc',
891 'requires',
895 'requires',
892 # XXX cache is a complicatged business someone
896 # XXX cache is a complicatged business someone
893 # should investigate this in depth at some point
897 # should investigate this in depth at some point
894 'cache/',
898 'cache/',
895 # XXX shouldn't be dirstate covered by the wlock?
899 # XXX shouldn't be dirstate covered by the wlock?
896 'dirstate',
900 'dirstate',
897 # XXX bisect was still a bit too messy at the time
901 # XXX bisect was still a bit too messy at the time
898 # this changeset was introduced. Someone should fix
902 # this changeset was introduced. Someone should fix
899 # the remainig bit and drop this line
903 # the remainig bit and drop this line
900 'bisect.state',
904 'bisect.state',
901 }
905 }
902
906
903 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
907 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
904 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
908 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
905 features, intents=None):
909 features, intents=None):
906 """Create a new local repository instance.
910 """Create a new local repository instance.
907
911
908 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
912 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
909 or ``localrepo.makelocalrepository()`` for obtaining a new repository
913 or ``localrepo.makelocalrepository()`` for obtaining a new repository
910 object.
914 object.
911
915
912 Arguments:
916 Arguments:
913
917
914 baseui
918 baseui
915 ``ui.ui`` instance that ``ui`` argument was based off of.
919 ``ui.ui`` instance that ``ui`` argument was based off of.
916
920
917 ui
921 ui
918 ``ui.ui`` instance for use by the repository.
922 ``ui.ui`` instance for use by the repository.
919
923
920 origroot
924 origroot
921 ``bytes`` path to working directory root of this repository.
925 ``bytes`` path to working directory root of this repository.
922
926
923 wdirvfs
927 wdirvfs
924 ``vfs.vfs`` rooted at the working directory.
928 ``vfs.vfs`` rooted at the working directory.
925
929
926 hgvfs
930 hgvfs
927 ``vfs.vfs`` rooted at .hg/
931 ``vfs.vfs`` rooted at .hg/
928
932
929 requirements
933 requirements
930 ``set`` of bytestrings representing repository opening requirements.
934 ``set`` of bytestrings representing repository opening requirements.
931
935
932 supportedrequirements
936 supportedrequirements
933 ``set`` of bytestrings representing repository requirements that we
937 ``set`` of bytestrings representing repository requirements that we
934 know how to open. May be a supetset of ``requirements``.
938 know how to open. May be a supetset of ``requirements``.
935
939
936 sharedpath
940 sharedpath
937 ``bytes`` Defining path to storage base directory. Points to a
941 ``bytes`` Defining path to storage base directory. Points to a
938 ``.hg/`` directory somewhere.
942 ``.hg/`` directory somewhere.
939
943
940 store
944 store
941 ``store.basicstore`` (or derived) instance providing access to
945 ``store.basicstore`` (or derived) instance providing access to
942 versioned storage.
946 versioned storage.
943
947
944 cachevfs
948 cachevfs
945 ``vfs.vfs`` used for cache files.
949 ``vfs.vfs`` used for cache files.
946
950
947 wcachevfs
951 wcachevfs
948 ``vfs.vfs`` used for cache files related to the working copy.
952 ``vfs.vfs`` used for cache files related to the working copy.
949
953
950 features
954 features
951 ``set`` of bytestrings defining features/capabilities of this
955 ``set`` of bytestrings defining features/capabilities of this
952 instance.
956 instance.
953
957
954 intents
958 intents
955 ``set`` of system strings indicating what this repo will be used
959 ``set`` of system strings indicating what this repo will be used
956 for.
960 for.
957 """
961 """
958 self.baseui = baseui
962 self.baseui = baseui
959 self.ui = ui
963 self.ui = ui
960 self.origroot = origroot
964 self.origroot = origroot
961 # vfs rooted at working directory.
965 # vfs rooted at working directory.
962 self.wvfs = wdirvfs
966 self.wvfs = wdirvfs
963 self.root = wdirvfs.base
967 self.root = wdirvfs.base
964 # vfs rooted at .hg/. Used to access most non-store paths.
968 # vfs rooted at .hg/. Used to access most non-store paths.
965 self.vfs = hgvfs
969 self.vfs = hgvfs
966 self.path = hgvfs.base
970 self.path = hgvfs.base
967 self.requirements = requirements
971 self.requirements = requirements
968 self.supported = supportedrequirements
972 self.supported = supportedrequirements
969 self.sharedpath = sharedpath
973 self.sharedpath = sharedpath
970 self.store = store
974 self.store = store
971 self.cachevfs = cachevfs
975 self.cachevfs = cachevfs
972 self.wcachevfs = wcachevfs
976 self.wcachevfs = wcachevfs
973 self.features = features
977 self.features = features
974
978
975 self.filtername = None
979 self.filtername = None
976
980
977 if (self.ui.configbool('devel', 'all-warnings') or
981 if (self.ui.configbool('devel', 'all-warnings') or
978 self.ui.configbool('devel', 'check-locks')):
982 self.ui.configbool('devel', 'check-locks')):
979 self.vfs.audit = self._getvfsward(self.vfs.audit)
983 self.vfs.audit = self._getvfsward(self.vfs.audit)
980 # A list of callback to shape the phase if no data were found.
984 # A list of callback to shape the phase if no data were found.
981 # Callback are in the form: func(repo, roots) --> processed root.
985 # Callback are in the form: func(repo, roots) --> processed root.
982 # This list it to be filled by extension during repo setup
986 # This list it to be filled by extension during repo setup
983 self._phasedefaults = []
987 self._phasedefaults = []
984
988
985 color.setup(self.ui)
989 color.setup(self.ui)
986
990
987 self.spath = self.store.path
991 self.spath = self.store.path
988 self.svfs = self.store.vfs
992 self.svfs = self.store.vfs
989 self.sjoin = self.store.join
993 self.sjoin = self.store.join
990 if (self.ui.configbool('devel', 'all-warnings') or
994 if (self.ui.configbool('devel', 'all-warnings') or
991 self.ui.configbool('devel', 'check-locks')):
995 self.ui.configbool('devel', 'check-locks')):
992 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
996 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
993 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
997 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
994 else: # standard vfs
998 else: # standard vfs
995 self.svfs.audit = self._getsvfsward(self.svfs.audit)
999 self.svfs.audit = self._getsvfsward(self.svfs.audit)
996
1000
997 self._dirstatevalidatewarned = False
1001 self._dirstatevalidatewarned = False
998
1002
999 self._branchcaches = branchmap.BranchMapCache()
1003 self._branchcaches = branchmap.BranchMapCache()
1000 self._revbranchcache = None
1004 self._revbranchcache = None
1001 self._filterpats = {}
1005 self._filterpats = {}
1002 self._datafilters = {}
1006 self._datafilters = {}
1003 self._transref = self._lockref = self._wlockref = None
1007 self._transref = self._lockref = self._wlockref = None
1004
1008
1005 # A cache for various files under .hg/ that tracks file changes,
1009 # A cache for various files under .hg/ that tracks file changes,
1006 # (used by the filecache decorator)
1010 # (used by the filecache decorator)
1007 #
1011 #
1008 # Maps a property name to its util.filecacheentry
1012 # Maps a property name to its util.filecacheentry
1009 self._filecache = {}
1013 self._filecache = {}
1010
1014
1011 # hold sets of revision to be filtered
1015 # hold sets of revision to be filtered
1012 # should be cleared when something might have changed the filter value:
1016 # should be cleared when something might have changed the filter value:
1013 # - new changesets,
1017 # - new changesets,
1014 # - phase change,
1018 # - phase change,
1015 # - new obsolescence marker,
1019 # - new obsolescence marker,
1016 # - working directory parent change,
1020 # - working directory parent change,
1017 # - bookmark changes
1021 # - bookmark changes
1018 self.filteredrevcache = {}
1022 self.filteredrevcache = {}
1019
1023
1020 # post-dirstate-status hooks
1024 # post-dirstate-status hooks
1021 self._postdsstatus = []
1025 self._postdsstatus = []
1022
1026
1023 # generic mapping between names and nodes
1027 # generic mapping between names and nodes
1024 self.names = namespaces.namespaces()
1028 self.names = namespaces.namespaces()
1025
1029
1026 # Key to signature value.
1030 # Key to signature value.
1027 self._sparsesignaturecache = {}
1031 self._sparsesignaturecache = {}
1028 # Signature to cached matcher instance.
1032 # Signature to cached matcher instance.
1029 self._sparsematchercache = {}
1033 self._sparsematchercache = {}
1030
1034
1031 def _getvfsward(self, origfunc):
1035 def _getvfsward(self, origfunc):
1032 """build a ward for self.vfs"""
1036 """build a ward for self.vfs"""
1033 rref = weakref.ref(self)
1037 rref = weakref.ref(self)
1034 def checkvfs(path, mode=None):
1038 def checkvfs(path, mode=None):
1035 ret = origfunc(path, mode=mode)
1039 ret = origfunc(path, mode=mode)
1036 repo = rref()
1040 repo = rref()
1037 if (repo is None
1041 if (repo is None
1038 or not util.safehasattr(repo, '_wlockref')
1042 or not util.safehasattr(repo, '_wlockref')
1039 or not util.safehasattr(repo, '_lockref')):
1043 or not util.safehasattr(repo, '_lockref')):
1040 return
1044 return
1041 if mode in (None, 'r', 'rb'):
1045 if mode in (None, 'r', 'rb'):
1042 return
1046 return
1043 if path.startswith(repo.path):
1047 if path.startswith(repo.path):
1044 # truncate name relative to the repository (.hg)
1048 # truncate name relative to the repository (.hg)
1045 path = path[len(repo.path) + 1:]
1049 path = path[len(repo.path) + 1:]
1046 if path.startswith('cache/'):
1050 if path.startswith('cache/'):
1047 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1051 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1048 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1052 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1049 if path.startswith('journal.') or path.startswith('undo.'):
1053 if path.startswith('journal.') or path.startswith('undo.'):
1050 # journal is covered by 'lock'
1054 # journal is covered by 'lock'
1051 if repo._currentlock(repo._lockref) is None:
1055 if repo._currentlock(repo._lockref) is None:
1052 repo.ui.develwarn('write with no lock: "%s"' % path,
1056 repo.ui.develwarn('write with no lock: "%s"' % path,
1053 stacklevel=3, config='check-locks')
1057 stacklevel=3, config='check-locks')
1054 elif repo._currentlock(repo._wlockref) is None:
1058 elif repo._currentlock(repo._wlockref) is None:
1055 # rest of vfs files are covered by 'wlock'
1059 # rest of vfs files are covered by 'wlock'
1056 #
1060 #
1057 # exclude special files
1061 # exclude special files
1058 for prefix in self._wlockfreeprefix:
1062 for prefix in self._wlockfreeprefix:
1059 if path.startswith(prefix):
1063 if path.startswith(prefix):
1060 return
1064 return
1061 repo.ui.develwarn('write with no wlock: "%s"' % path,
1065 repo.ui.develwarn('write with no wlock: "%s"' % path,
1062 stacklevel=3, config='check-locks')
1066 stacklevel=3, config='check-locks')
1063 return ret
1067 return ret
1064 return checkvfs
1068 return checkvfs
1065
1069
1066 def _getsvfsward(self, origfunc):
1070 def _getsvfsward(self, origfunc):
1067 """build a ward for self.svfs"""
1071 """build a ward for self.svfs"""
1068 rref = weakref.ref(self)
1072 rref = weakref.ref(self)
1069 def checksvfs(path, mode=None):
1073 def checksvfs(path, mode=None):
1070 ret = origfunc(path, mode=mode)
1074 ret = origfunc(path, mode=mode)
1071 repo = rref()
1075 repo = rref()
1072 if repo is None or not util.safehasattr(repo, '_lockref'):
1076 if repo is None or not util.safehasattr(repo, '_lockref'):
1073 return
1077 return
1074 if mode in (None, 'r', 'rb'):
1078 if mode in (None, 'r', 'rb'):
1075 return
1079 return
1076 if path.startswith(repo.sharedpath):
1080 if path.startswith(repo.sharedpath):
1077 # truncate name relative to the repository (.hg)
1081 # truncate name relative to the repository (.hg)
1078 path = path[len(repo.sharedpath) + 1:]
1082 path = path[len(repo.sharedpath) + 1:]
1079 if repo._currentlock(repo._lockref) is None:
1083 if repo._currentlock(repo._lockref) is None:
1080 repo.ui.develwarn('write with no lock: "%s"' % path,
1084 repo.ui.develwarn('write with no lock: "%s"' % path,
1081 stacklevel=4)
1085 stacklevel=4)
1082 return ret
1086 return ret
1083 return checksvfs
1087 return checksvfs
1084
1088
1085 def close(self):
1089 def close(self):
1086 self._writecaches()
1090 self._writecaches()
1087
1091
1088 def _writecaches(self):
1092 def _writecaches(self):
1089 if self._revbranchcache:
1093 if self._revbranchcache:
1090 self._revbranchcache.write()
1094 self._revbranchcache.write()
1091
1095
1092 def _restrictcapabilities(self, caps):
1096 def _restrictcapabilities(self, caps):
1093 if self.ui.configbool('experimental', 'bundle2-advertise'):
1097 if self.ui.configbool('experimental', 'bundle2-advertise'):
1094 caps = set(caps)
1098 caps = set(caps)
1095 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1099 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1096 role='client'))
1100 role='client'))
1097 caps.add('bundle2=' + urlreq.quote(capsblob))
1101 caps.add('bundle2=' + urlreq.quote(capsblob))
1098 return caps
1102 return caps
1099
1103
1100 def _writerequirements(self):
1104 def _writerequirements(self):
1101 scmutil.writerequires(self.vfs, self.requirements)
1105 scmutil.writerequires(self.vfs, self.requirements)
1102
1106
1103 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1107 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1104 # self -> auditor -> self._checknested -> self
1108 # self -> auditor -> self._checknested -> self
1105
1109
1106 @property
1110 @property
1107 def auditor(self):
1111 def auditor(self):
1108 # This is only used by context.workingctx.match in order to
1112 # This is only used by context.workingctx.match in order to
1109 # detect files in subrepos.
1113 # detect files in subrepos.
1110 return pathutil.pathauditor(self.root, callback=self._checknested)
1114 return pathutil.pathauditor(self.root, callback=self._checknested)
1111
1115
1112 @property
1116 @property
1113 def nofsauditor(self):
1117 def nofsauditor(self):
1114 # This is only used by context.basectx.match in order to detect
1118 # This is only used by context.basectx.match in order to detect
1115 # files in subrepos.
1119 # files in subrepos.
1116 return pathutil.pathauditor(self.root, callback=self._checknested,
1120 return pathutil.pathauditor(self.root, callback=self._checknested,
1117 realfs=False, cached=True)
1121 realfs=False, cached=True)
1118
1122
1119 def _checknested(self, path):
1123 def _checknested(self, path):
1120 """Determine if path is a legal nested repository."""
1124 """Determine if path is a legal nested repository."""
1121 if not path.startswith(self.root):
1125 if not path.startswith(self.root):
1122 return False
1126 return False
1123 subpath = path[len(self.root) + 1:]
1127 subpath = path[len(self.root) + 1:]
1124 normsubpath = util.pconvert(subpath)
1128 normsubpath = util.pconvert(subpath)
1125
1129
1126 # XXX: Checking against the current working copy is wrong in
1130 # XXX: Checking against the current working copy is wrong in
1127 # the sense that it can reject things like
1131 # the sense that it can reject things like
1128 #
1132 #
1129 # $ hg cat -r 10 sub/x.txt
1133 # $ hg cat -r 10 sub/x.txt
1130 #
1134 #
1131 # if sub/ is no longer a subrepository in the working copy
1135 # if sub/ is no longer a subrepository in the working copy
1132 # parent revision.
1136 # parent revision.
1133 #
1137 #
1134 # However, it can of course also allow things that would have
1138 # However, it can of course also allow things that would have
1135 # been rejected before, such as the above cat command if sub/
1139 # been rejected before, such as the above cat command if sub/
1136 # is a subrepository now, but was a normal directory before.
1140 # is a subrepository now, but was a normal directory before.
1137 # The old path auditor would have rejected by mistake since it
1141 # The old path auditor would have rejected by mistake since it
1138 # panics when it sees sub/.hg/.
1142 # panics when it sees sub/.hg/.
1139 #
1143 #
1140 # All in all, checking against the working copy seems sensible
1144 # All in all, checking against the working copy seems sensible
1141 # since we want to prevent access to nested repositories on
1145 # since we want to prevent access to nested repositories on
1142 # the filesystem *now*.
1146 # the filesystem *now*.
1143 ctx = self[None]
1147 ctx = self[None]
1144 parts = util.splitpath(subpath)
1148 parts = util.splitpath(subpath)
1145 while parts:
1149 while parts:
1146 prefix = '/'.join(parts)
1150 prefix = '/'.join(parts)
1147 if prefix in ctx.substate:
1151 if prefix in ctx.substate:
1148 if prefix == normsubpath:
1152 if prefix == normsubpath:
1149 return True
1153 return True
1150 else:
1154 else:
1151 sub = ctx.sub(prefix)
1155 sub = ctx.sub(prefix)
1152 return sub.checknested(subpath[len(prefix) + 1:])
1156 return sub.checknested(subpath[len(prefix) + 1:])
1153 else:
1157 else:
1154 parts.pop()
1158 parts.pop()
1155 return False
1159 return False
1156
1160
1157 def peer(self):
1161 def peer(self):
1158 return localpeer(self) # not cached to avoid reference cycle
1162 return localpeer(self) # not cached to avoid reference cycle
1159
1163
1160 def unfiltered(self):
1164 def unfiltered(self):
1161 """Return unfiltered version of the repository
1165 """Return unfiltered version of the repository
1162
1166
1163 Intended to be overwritten by filtered repo."""
1167 Intended to be overwritten by filtered repo."""
1164 return self
1168 return self
1165
1169
1166 def filtered(self, name, visibilityexceptions=None):
1170 def filtered(self, name, visibilityexceptions=None):
1167 """Return a filtered version of a repository"""
1171 """Return a filtered version of a repository"""
1168 cls = repoview.newtype(self.unfiltered().__class__)
1172 cls = repoview.newtype(self.unfiltered().__class__)
1169 return cls(self, name, visibilityexceptions)
1173 return cls(self, name, visibilityexceptions)
1170
1174
1171 @repofilecache('bookmarks', 'bookmarks.current')
1175 @repofilecache('bookmarks', 'bookmarks.current')
1172 def _bookmarks(self):
1176 def _bookmarks(self):
1173 return bookmarks.bmstore(self)
1177 return bookmarks.bmstore(self)
1174
1178
1175 @property
1179 @property
1176 def _activebookmark(self):
1180 def _activebookmark(self):
1177 return self._bookmarks.active
1181 return self._bookmarks.active
1178
1182
1179 # _phasesets depend on changelog. what we need is to call
1183 # _phasesets depend on changelog. what we need is to call
1180 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1184 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1181 # can't be easily expressed in filecache mechanism.
1185 # can't be easily expressed in filecache mechanism.
1182 @storecache('phaseroots', '00changelog.i')
1186 @storecache('phaseroots', '00changelog.i')
1183 def _phasecache(self):
1187 def _phasecache(self):
1184 return phases.phasecache(self, self._phasedefaults)
1188 return phases.phasecache(self, self._phasedefaults)
1185
1189
1186 @storecache('obsstore')
1190 @storecache('obsstore')
1187 def obsstore(self):
1191 def obsstore(self):
1188 return obsolete.makestore(self.ui, self)
1192 return obsolete.makestore(self.ui, self)
1189
1193
1190 @storecache('00changelog.i')
1194 @storecache('00changelog.i')
1191 def changelog(self):
1195 def changelog(self):
1192 return changelog.changelog(self.svfs,
1196 return changelog.changelog(self.svfs,
1193 trypending=txnutil.mayhavepending(self.root))
1197 trypending=txnutil.mayhavepending(self.root))
1194
1198
1195 @storecache('00manifest.i')
1199 @storecache('00manifest.i')
1196 def manifestlog(self):
1200 def manifestlog(self):
1197 rootstore = manifest.manifestrevlog(self.svfs)
1201 rootstore = manifest.manifestrevlog(self.svfs)
1198 return manifest.manifestlog(self.svfs, self, rootstore,
1202 return manifest.manifestlog(self.svfs, self, rootstore,
1199 self._storenarrowmatch)
1203 self._storenarrowmatch)
1200
1204
1201 @repofilecache('dirstate')
1205 @repofilecache('dirstate')
1202 def dirstate(self):
1206 def dirstate(self):
1203 return self._makedirstate()
1207 return self._makedirstate()
1204
1208
1205 def _makedirstate(self):
1209 def _makedirstate(self):
1206 """Extension point for wrapping the dirstate per-repo."""
1210 """Extension point for wrapping the dirstate per-repo."""
1207 sparsematchfn = lambda: sparse.matcher(self)
1211 sparsematchfn = lambda: sparse.matcher(self)
1208
1212
1209 return dirstate.dirstate(self.vfs, self.ui, self.root,
1213 return dirstate.dirstate(self.vfs, self.ui, self.root,
1210 self._dirstatevalidate, sparsematchfn)
1214 self._dirstatevalidate, sparsematchfn)
1211
1215
1212 def _dirstatevalidate(self, node):
1216 def _dirstatevalidate(self, node):
1213 try:
1217 try:
1214 self.changelog.rev(node)
1218 self.changelog.rev(node)
1215 return node
1219 return node
1216 except error.LookupError:
1220 except error.LookupError:
1217 if not self._dirstatevalidatewarned:
1221 if not self._dirstatevalidatewarned:
1218 self._dirstatevalidatewarned = True
1222 self._dirstatevalidatewarned = True
1219 self.ui.warn(_("warning: ignoring unknown"
1223 self.ui.warn(_("warning: ignoring unknown"
1220 " working parent %s!\n") % short(node))
1224 " working parent %s!\n") % short(node))
1221 return nullid
1225 return nullid
1222
1226
1223 @storecache(narrowspec.FILENAME)
1227 @storecache(narrowspec.FILENAME)
1224 def narrowpats(self):
1228 def narrowpats(self):
1225 """matcher patterns for this repository's narrowspec
1229 """matcher patterns for this repository's narrowspec
1226
1230
1227 A tuple of (includes, excludes).
1231 A tuple of (includes, excludes).
1228 """
1232 """
1229 return narrowspec.load(self)
1233 return narrowspec.load(self)
1230
1234
1231 @storecache(narrowspec.FILENAME)
1235 @storecache(narrowspec.FILENAME)
1232 def _storenarrowmatch(self):
1236 def _storenarrowmatch(self):
1233 if repository.NARROW_REQUIREMENT not in self.requirements:
1237 if repository.NARROW_REQUIREMENT not in self.requirements:
1234 return matchmod.always()
1238 return matchmod.always()
1235 include, exclude = self.narrowpats
1239 include, exclude = self.narrowpats
1236 return narrowspec.match(self.root, include=include, exclude=exclude)
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1237
1241
1238 @storecache(narrowspec.FILENAME)
1242 @storecache(narrowspec.FILENAME)
1239 def _narrowmatch(self):
1243 def _narrowmatch(self):
1240 if repository.NARROW_REQUIREMENT not in self.requirements:
1244 if repository.NARROW_REQUIREMENT not in self.requirements:
1241 return matchmod.always()
1245 return matchmod.always()
1242 narrowspec.checkworkingcopynarrowspec(self)
1246 narrowspec.checkworkingcopynarrowspec(self)
1243 include, exclude = self.narrowpats
1247 include, exclude = self.narrowpats
1244 return narrowspec.match(self.root, include=include, exclude=exclude)
1248 return narrowspec.match(self.root, include=include, exclude=exclude)
1245
1249
1246 def narrowmatch(self, match=None, includeexact=False):
1250 def narrowmatch(self, match=None, includeexact=False):
1247 """matcher corresponding the the repo's narrowspec
1251 """matcher corresponding the the repo's narrowspec
1248
1252
1249 If `match` is given, then that will be intersected with the narrow
1253 If `match` is given, then that will be intersected with the narrow
1250 matcher.
1254 matcher.
1251
1255
1252 If `includeexact` is True, then any exact matches from `match` will
1256 If `includeexact` is True, then any exact matches from `match` will
1253 be included even if they're outside the narrowspec.
1257 be included even if they're outside the narrowspec.
1254 """
1258 """
1255 if match:
1259 if match:
1256 if includeexact and not self._narrowmatch.always():
1260 if includeexact and not self._narrowmatch.always():
1257 # do not exclude explicitly-specified paths so that they can
1261 # do not exclude explicitly-specified paths so that they can
1258 # be warned later on
1262 # be warned later on
1259 em = matchmod.exact(match.files())
1263 em = matchmod.exact(match.files())
1260 nm = matchmod.unionmatcher([self._narrowmatch, em])
1264 nm = matchmod.unionmatcher([self._narrowmatch, em])
1261 return matchmod.intersectmatchers(match, nm)
1265 return matchmod.intersectmatchers(match, nm)
1262 return matchmod.intersectmatchers(match, self._narrowmatch)
1266 return matchmod.intersectmatchers(match, self._narrowmatch)
1263 return self._narrowmatch
1267 return self._narrowmatch
1264
1268
1265 def setnarrowpats(self, newincludes, newexcludes):
1269 def setnarrowpats(self, newincludes, newexcludes):
1266 narrowspec.save(self, newincludes, newexcludes)
1270 narrowspec.save(self, newincludes, newexcludes)
1267 self.invalidate(clearfilecache=True)
1271 self.invalidate(clearfilecache=True)
1268
1272
1269 def __getitem__(self, changeid):
1273 def __getitem__(self, changeid):
1270 if changeid is None:
1274 if changeid is None:
1271 return context.workingctx(self)
1275 return context.workingctx(self)
1272 if isinstance(changeid, context.basectx):
1276 if isinstance(changeid, context.basectx):
1273 return changeid
1277 return changeid
1274 if isinstance(changeid, slice):
1278 if isinstance(changeid, slice):
1275 # wdirrev isn't contiguous so the slice shouldn't include it
1279 # wdirrev isn't contiguous so the slice shouldn't include it
1276 return [self[i]
1280 return [self[i]
1277 for i in pycompat.xrange(*changeid.indices(len(self)))
1281 for i in pycompat.xrange(*changeid.indices(len(self)))
1278 if i not in self.changelog.filteredrevs]
1282 if i not in self.changelog.filteredrevs]
1279 try:
1283 try:
1280 if isinstance(changeid, int):
1284 if isinstance(changeid, int):
1281 node = self.changelog.node(changeid)
1285 node = self.changelog.node(changeid)
1282 rev = changeid
1286 rev = changeid
1283 elif changeid == 'null':
1287 elif changeid == 'null':
1284 node = nullid
1288 node = nullid
1285 rev = nullrev
1289 rev = nullrev
1286 elif changeid == 'tip':
1290 elif changeid == 'tip':
1287 node = self.changelog.tip()
1291 node = self.changelog.tip()
1288 rev = self.changelog.rev(node)
1292 rev = self.changelog.rev(node)
1289 elif changeid == '.':
1293 elif changeid == '.':
1290 # this is a hack to delay/avoid loading obsmarkers
1294 # this is a hack to delay/avoid loading obsmarkers
1291 # when we know that '.' won't be hidden
1295 # when we know that '.' won't be hidden
1292 node = self.dirstate.p1()
1296 node = self.dirstate.p1()
1293 rev = self.unfiltered().changelog.rev(node)
1297 rev = self.unfiltered().changelog.rev(node)
1294 elif len(changeid) == 20:
1298 elif len(changeid) == 20:
1295 try:
1299 try:
1296 node = changeid
1300 node = changeid
1297 rev = self.changelog.rev(changeid)
1301 rev = self.changelog.rev(changeid)
1298 except error.FilteredLookupError:
1302 except error.FilteredLookupError:
1299 changeid = hex(changeid) # for the error message
1303 changeid = hex(changeid) # for the error message
1300 raise
1304 raise
1301 except LookupError:
1305 except LookupError:
1302 # check if it might have come from damaged dirstate
1306 # check if it might have come from damaged dirstate
1303 #
1307 #
1304 # XXX we could avoid the unfiltered if we had a recognizable
1308 # XXX we could avoid the unfiltered if we had a recognizable
1305 # exception for filtered changeset access
1309 # exception for filtered changeset access
1306 if (self.local()
1310 if (self.local()
1307 and changeid in self.unfiltered().dirstate.parents()):
1311 and changeid in self.unfiltered().dirstate.parents()):
1308 msg = _("working directory has unknown parent '%s'!")
1312 msg = _("working directory has unknown parent '%s'!")
1309 raise error.Abort(msg % short(changeid))
1313 raise error.Abort(msg % short(changeid))
1310 changeid = hex(changeid) # for the error message
1314 changeid = hex(changeid) # for the error message
1311 raise
1315 raise
1312
1316
1313 elif len(changeid) == 40:
1317 elif len(changeid) == 40:
1314 node = bin(changeid)
1318 node = bin(changeid)
1315 rev = self.changelog.rev(node)
1319 rev = self.changelog.rev(node)
1316 else:
1320 else:
1317 raise error.ProgrammingError(
1321 raise error.ProgrammingError(
1318 "unsupported changeid '%s' of type %s" %
1322 "unsupported changeid '%s' of type %s" %
1319 (changeid, type(changeid)))
1323 (changeid, type(changeid)))
1320
1324
1321 return context.changectx(self, rev, node)
1325 return context.changectx(self, rev, node)
1322
1326
1323 except (error.FilteredIndexError, error.FilteredLookupError):
1327 except (error.FilteredIndexError, error.FilteredLookupError):
1324 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1328 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1325 % pycompat.bytestr(changeid))
1329 % pycompat.bytestr(changeid))
1326 except (IndexError, LookupError):
1330 except (IndexError, LookupError):
1327 raise error.RepoLookupError(
1331 raise error.RepoLookupError(
1328 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1332 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1329 except error.WdirUnsupported:
1333 except error.WdirUnsupported:
1330 return context.workingctx(self)
1334 return context.workingctx(self)
1331
1335
1332 def __contains__(self, changeid):
1336 def __contains__(self, changeid):
1333 """True if the given changeid exists
1337 """True if the given changeid exists
1334
1338
1335 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1339 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1336 specified.
1340 specified.
1337 """
1341 """
1338 try:
1342 try:
1339 self[changeid]
1343 self[changeid]
1340 return True
1344 return True
1341 except error.RepoLookupError:
1345 except error.RepoLookupError:
1342 return False
1346 return False
1343
1347
1344 def __nonzero__(self):
1348 def __nonzero__(self):
1345 return True
1349 return True
1346
1350
1347 __bool__ = __nonzero__
1351 __bool__ = __nonzero__
1348
1352
1349 def __len__(self):
1353 def __len__(self):
1350 # no need to pay the cost of repoview.changelog
1354 # no need to pay the cost of repoview.changelog
1351 unfi = self.unfiltered()
1355 unfi = self.unfiltered()
1352 return len(unfi.changelog)
1356 return len(unfi.changelog)
1353
1357
1354 def __iter__(self):
1358 def __iter__(self):
1355 return iter(self.changelog)
1359 return iter(self.changelog)
1356
1360
1357 def revs(self, expr, *args):
1361 def revs(self, expr, *args):
1358 '''Find revisions matching a revset.
1362 '''Find revisions matching a revset.
1359
1363
1360 The revset is specified as a string ``expr`` that may contain
1364 The revset is specified as a string ``expr`` that may contain
1361 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1365 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1362
1366
1363 Revset aliases from the configuration are not expanded. To expand
1367 Revset aliases from the configuration are not expanded. To expand
1364 user aliases, consider calling ``scmutil.revrange()`` or
1368 user aliases, consider calling ``scmutil.revrange()`` or
1365 ``repo.anyrevs([expr], user=True)``.
1369 ``repo.anyrevs([expr], user=True)``.
1366
1370
1367 Returns a revset.abstractsmartset, which is a list-like interface
1371 Returns a revset.abstractsmartset, which is a list-like interface
1368 that contains integer revisions.
1372 that contains integer revisions.
1369 '''
1373 '''
1370 tree = revsetlang.spectree(expr, *args)
1374 tree = revsetlang.spectree(expr, *args)
1371 return revset.makematcher(tree)(self)
1375 return revset.makematcher(tree)(self)
1372
1376
1373 def set(self, expr, *args):
1377 def set(self, expr, *args):
1374 '''Find revisions matching a revset and emit changectx instances.
1378 '''Find revisions matching a revset and emit changectx instances.
1375
1379
1376 This is a convenience wrapper around ``revs()`` that iterates the
1380 This is a convenience wrapper around ``revs()`` that iterates the
1377 result and is a generator of changectx instances.
1381 result and is a generator of changectx instances.
1378
1382
1379 Revset aliases from the configuration are not expanded. To expand
1383 Revset aliases from the configuration are not expanded. To expand
1380 user aliases, consider calling ``scmutil.revrange()``.
1384 user aliases, consider calling ``scmutil.revrange()``.
1381 '''
1385 '''
1382 for r in self.revs(expr, *args):
1386 for r in self.revs(expr, *args):
1383 yield self[r]
1387 yield self[r]
1384
1388
1385 def anyrevs(self, specs, user=False, localalias=None):
1389 def anyrevs(self, specs, user=False, localalias=None):
1386 '''Find revisions matching one of the given revsets.
1390 '''Find revisions matching one of the given revsets.
1387
1391
1388 Revset aliases from the configuration are not expanded by default. To
1392 Revset aliases from the configuration are not expanded by default. To
1389 expand user aliases, specify ``user=True``. To provide some local
1393 expand user aliases, specify ``user=True``. To provide some local
1390 definitions overriding user aliases, set ``localalias`` to
1394 definitions overriding user aliases, set ``localalias`` to
1391 ``{name: definitionstring}``.
1395 ``{name: definitionstring}``.
1392 '''
1396 '''
1393 if user:
1397 if user:
1394 m = revset.matchany(self.ui, specs,
1398 m = revset.matchany(self.ui, specs,
1395 lookup=revset.lookupfn(self),
1399 lookup=revset.lookupfn(self),
1396 localalias=localalias)
1400 localalias=localalias)
1397 else:
1401 else:
1398 m = revset.matchany(None, specs, localalias=localalias)
1402 m = revset.matchany(None, specs, localalias=localalias)
1399 return m(self)
1403 return m(self)
1400
1404
1401 def url(self):
1405 def url(self):
1402 return 'file:' + self.root
1406 return 'file:' + self.root
1403
1407
1404 def hook(self, name, throw=False, **args):
1408 def hook(self, name, throw=False, **args):
1405 """Call a hook, passing this repo instance.
1409 """Call a hook, passing this repo instance.
1406
1410
1407 This a convenience method to aid invoking hooks. Extensions likely
1411 This a convenience method to aid invoking hooks. Extensions likely
1408 won't call this unless they have registered a custom hook or are
1412 won't call this unless they have registered a custom hook or are
1409 replacing code that is expected to call a hook.
1413 replacing code that is expected to call a hook.
1410 """
1414 """
1411 return hook.hook(self.ui, self, name, throw, **args)
1415 return hook.hook(self.ui, self, name, throw, **args)
1412
1416
1413 @filteredpropertycache
1417 @filteredpropertycache
1414 def _tagscache(self):
1418 def _tagscache(self):
1415 '''Returns a tagscache object that contains various tags related
1419 '''Returns a tagscache object that contains various tags related
1416 caches.'''
1420 caches.'''
1417
1421
1418 # This simplifies its cache management by having one decorated
1422 # This simplifies its cache management by having one decorated
1419 # function (this one) and the rest simply fetch things from it.
1423 # function (this one) and the rest simply fetch things from it.
1420 class tagscache(object):
1424 class tagscache(object):
1421 def __init__(self):
1425 def __init__(self):
1422 # These two define the set of tags for this repository. tags
1426 # These two define the set of tags for this repository. tags
1423 # maps tag name to node; tagtypes maps tag name to 'global' or
1427 # maps tag name to node; tagtypes maps tag name to 'global' or
1424 # 'local'. (Global tags are defined by .hgtags across all
1428 # 'local'. (Global tags are defined by .hgtags across all
1425 # heads, and local tags are defined in .hg/localtags.)
1429 # heads, and local tags are defined in .hg/localtags.)
1426 # They constitute the in-memory cache of tags.
1430 # They constitute the in-memory cache of tags.
1427 self.tags = self.tagtypes = None
1431 self.tags = self.tagtypes = None
1428
1432
1429 self.nodetagscache = self.tagslist = None
1433 self.nodetagscache = self.tagslist = None
1430
1434
1431 cache = tagscache()
1435 cache = tagscache()
1432 cache.tags, cache.tagtypes = self._findtags()
1436 cache.tags, cache.tagtypes = self._findtags()
1433
1437
1434 return cache
1438 return cache
1435
1439
1436 def tags(self):
1440 def tags(self):
1437 '''return a mapping of tag to node'''
1441 '''return a mapping of tag to node'''
1438 t = {}
1442 t = {}
1439 if self.changelog.filteredrevs:
1443 if self.changelog.filteredrevs:
1440 tags, tt = self._findtags()
1444 tags, tt = self._findtags()
1441 else:
1445 else:
1442 tags = self._tagscache.tags
1446 tags = self._tagscache.tags
1443 rev = self.changelog.rev
1447 rev = self.changelog.rev
1444 for k, v in tags.iteritems():
1448 for k, v in tags.iteritems():
1445 try:
1449 try:
1446 # ignore tags to unknown nodes
1450 # ignore tags to unknown nodes
1447 rev(v)
1451 rev(v)
1448 t[k] = v
1452 t[k] = v
1449 except (error.LookupError, ValueError):
1453 except (error.LookupError, ValueError):
1450 pass
1454 pass
1451 return t
1455 return t
1452
1456
1453 def _findtags(self):
1457 def _findtags(self):
1454 '''Do the hard work of finding tags. Return a pair of dicts
1458 '''Do the hard work of finding tags. Return a pair of dicts
1455 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1459 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1456 maps tag name to a string like \'global\' or \'local\'.
1460 maps tag name to a string like \'global\' or \'local\'.
1457 Subclasses or extensions are free to add their own tags, but
1461 Subclasses or extensions are free to add their own tags, but
1458 should be aware that the returned dicts will be retained for the
1462 should be aware that the returned dicts will be retained for the
1459 duration of the localrepo object.'''
1463 duration of the localrepo object.'''
1460
1464
1461 # XXX what tagtype should subclasses/extensions use? Currently
1465 # XXX what tagtype should subclasses/extensions use? Currently
1462 # mq and bookmarks add tags, but do not set the tagtype at all.
1466 # mq and bookmarks add tags, but do not set the tagtype at all.
1463 # Should each extension invent its own tag type? Should there
1467 # Should each extension invent its own tag type? Should there
1464 # be one tagtype for all such "virtual" tags? Or is the status
1468 # be one tagtype for all such "virtual" tags? Or is the status
1465 # quo fine?
1469 # quo fine?
1466
1470
1467
1471
1468 # map tag name to (node, hist)
1472 # map tag name to (node, hist)
1469 alltags = tagsmod.findglobaltags(self.ui, self)
1473 alltags = tagsmod.findglobaltags(self.ui, self)
1470 # map tag name to tag type
1474 # map tag name to tag type
1471 tagtypes = dict((tag, 'global') for tag in alltags)
1475 tagtypes = dict((tag, 'global') for tag in alltags)
1472
1476
1473 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1477 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1474
1478
1475 # Build the return dicts. Have to re-encode tag names because
1479 # Build the return dicts. Have to re-encode tag names because
1476 # the tags module always uses UTF-8 (in order not to lose info
1480 # the tags module always uses UTF-8 (in order not to lose info
1477 # writing to the cache), but the rest of Mercurial wants them in
1481 # writing to the cache), but the rest of Mercurial wants them in
1478 # local encoding.
1482 # local encoding.
1479 tags = {}
1483 tags = {}
1480 for (name, (node, hist)) in alltags.iteritems():
1484 for (name, (node, hist)) in alltags.iteritems():
1481 if node != nullid:
1485 if node != nullid:
1482 tags[encoding.tolocal(name)] = node
1486 tags[encoding.tolocal(name)] = node
1483 tags['tip'] = self.changelog.tip()
1487 tags['tip'] = self.changelog.tip()
1484 tagtypes = dict([(encoding.tolocal(name), value)
1488 tagtypes = dict([(encoding.tolocal(name), value)
1485 for (name, value) in tagtypes.iteritems()])
1489 for (name, value) in tagtypes.iteritems()])
1486 return (tags, tagtypes)
1490 return (tags, tagtypes)
1487
1491
1488 def tagtype(self, tagname):
1492 def tagtype(self, tagname):
1489 '''
1493 '''
1490 return the type of the given tag. result can be:
1494 return the type of the given tag. result can be:
1491
1495
1492 'local' : a local tag
1496 'local' : a local tag
1493 'global' : a global tag
1497 'global' : a global tag
1494 None : tag does not exist
1498 None : tag does not exist
1495 '''
1499 '''
1496
1500
1497 return self._tagscache.tagtypes.get(tagname)
1501 return self._tagscache.tagtypes.get(tagname)
1498
1502
1499 def tagslist(self):
1503 def tagslist(self):
1500 '''return a list of tags ordered by revision'''
1504 '''return a list of tags ordered by revision'''
1501 if not self._tagscache.tagslist:
1505 if not self._tagscache.tagslist:
1502 l = []
1506 l = []
1503 for t, n in self.tags().iteritems():
1507 for t, n in self.tags().iteritems():
1504 l.append((self.changelog.rev(n), t, n))
1508 l.append((self.changelog.rev(n), t, n))
1505 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1509 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1506
1510
1507 return self._tagscache.tagslist
1511 return self._tagscache.tagslist
1508
1512
1509 def nodetags(self, node):
1513 def nodetags(self, node):
1510 '''return the tags associated with a node'''
1514 '''return the tags associated with a node'''
1511 if not self._tagscache.nodetagscache:
1515 if not self._tagscache.nodetagscache:
1512 nodetagscache = {}
1516 nodetagscache = {}
1513 for t, n in self._tagscache.tags.iteritems():
1517 for t, n in self._tagscache.tags.iteritems():
1514 nodetagscache.setdefault(n, []).append(t)
1518 nodetagscache.setdefault(n, []).append(t)
1515 for tags in nodetagscache.itervalues():
1519 for tags in nodetagscache.itervalues():
1516 tags.sort()
1520 tags.sort()
1517 self._tagscache.nodetagscache = nodetagscache
1521 self._tagscache.nodetagscache = nodetagscache
1518 return self._tagscache.nodetagscache.get(node, [])
1522 return self._tagscache.nodetagscache.get(node, [])
1519
1523
1520 def nodebookmarks(self, node):
1524 def nodebookmarks(self, node):
1521 """return the list of bookmarks pointing to the specified node"""
1525 """return the list of bookmarks pointing to the specified node"""
1522 return self._bookmarks.names(node)
1526 return self._bookmarks.names(node)
1523
1527
1524 def branchmap(self):
1528 def branchmap(self):
1525 '''returns a dictionary {branch: [branchheads]} with branchheads
1529 '''returns a dictionary {branch: [branchheads]} with branchheads
1526 ordered by increasing revision number'''
1530 ordered by increasing revision number'''
1527 return self._branchcaches[self]
1531 return self._branchcaches[self]
1528
1532
1529 @unfilteredmethod
1533 @unfilteredmethod
1530 def revbranchcache(self):
1534 def revbranchcache(self):
1531 if not self._revbranchcache:
1535 if not self._revbranchcache:
1532 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1536 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1533 return self._revbranchcache
1537 return self._revbranchcache
1534
1538
1535 def branchtip(self, branch, ignoremissing=False):
1539 def branchtip(self, branch, ignoremissing=False):
1536 '''return the tip node for a given branch
1540 '''return the tip node for a given branch
1537
1541
1538 If ignoremissing is True, then this method will not raise an error.
1542 If ignoremissing is True, then this method will not raise an error.
1539 This is helpful for callers that only expect None for a missing branch
1543 This is helpful for callers that only expect None for a missing branch
1540 (e.g. namespace).
1544 (e.g. namespace).
1541
1545
1542 '''
1546 '''
1543 try:
1547 try:
1544 return self.branchmap().branchtip(branch)
1548 return self.branchmap().branchtip(branch)
1545 except KeyError:
1549 except KeyError:
1546 if not ignoremissing:
1550 if not ignoremissing:
1547 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1551 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1548 else:
1552 else:
1549 pass
1553 pass
1550
1554
1551 def lookup(self, key):
1555 def lookup(self, key):
1552 return scmutil.revsymbol(self, key).node()
1556 return scmutil.revsymbol(self, key).node()
1553
1557
1554 def lookupbranch(self, key):
1558 def lookupbranch(self, key):
1555 if key in self.branchmap():
1559 if key in self.branchmap():
1556 return key
1560 return key
1557
1561
1558 return scmutil.revsymbol(self, key).branch()
1562 return scmutil.revsymbol(self, key).branch()
1559
1563
1560 def known(self, nodes):
1564 def known(self, nodes):
1561 cl = self.changelog
1565 cl = self.changelog
1562 nm = cl.nodemap
1566 nm = cl.nodemap
1563 filtered = cl.filteredrevs
1567 filtered = cl.filteredrevs
1564 result = []
1568 result = []
1565 for n in nodes:
1569 for n in nodes:
1566 r = nm.get(n)
1570 r = nm.get(n)
1567 resp = not (r is None or r in filtered)
1571 resp = not (r is None or r in filtered)
1568 result.append(resp)
1572 result.append(resp)
1569 return result
1573 return result
1570
1574
1571 def local(self):
1575 def local(self):
1572 return self
1576 return self
1573
1577
1574 def publishing(self):
1578 def publishing(self):
1575 # it's safe (and desirable) to trust the publish flag unconditionally
1579 # it's safe (and desirable) to trust the publish flag unconditionally
1576 # so that we don't finalize changes shared between users via ssh or nfs
1580 # so that we don't finalize changes shared between users via ssh or nfs
1577 return self.ui.configbool('phases', 'publish', untrusted=True)
1581 return self.ui.configbool('phases', 'publish', untrusted=True)
1578
1582
1579 def cancopy(self):
1583 def cancopy(self):
1580 # so statichttprepo's override of local() works
1584 # so statichttprepo's override of local() works
1581 if not self.local():
1585 if not self.local():
1582 return False
1586 return False
1583 if not self.publishing():
1587 if not self.publishing():
1584 return True
1588 return True
1585 # if publishing we can't copy if there is filtered content
1589 # if publishing we can't copy if there is filtered content
1586 return not self.filtered('visible').changelog.filteredrevs
1590 return not self.filtered('visible').changelog.filteredrevs
1587
1591
1588 def shared(self):
1592 def shared(self):
1589 '''the type of shared repository (None if not shared)'''
1593 '''the type of shared repository (None if not shared)'''
1590 if self.sharedpath != self.path:
1594 if self.sharedpath != self.path:
1591 return 'store'
1595 return 'store'
1592 return None
1596 return None
1593
1597
1594 def wjoin(self, f, *insidef):
1598 def wjoin(self, f, *insidef):
1595 return self.vfs.reljoin(self.root, f, *insidef)
1599 return self.vfs.reljoin(self.root, f, *insidef)
1596
1600
1597 def setparents(self, p1, p2=nullid):
1601 def setparents(self, p1, p2=nullid):
1598 with self.dirstate.parentchange():
1602 with self.dirstate.parentchange():
1599 copies = self.dirstate.setparents(p1, p2)
1603 copies = self.dirstate.setparents(p1, p2)
1600 pctx = self[p1]
1604 pctx = self[p1]
1601 if copies:
1605 if copies:
1602 # Adjust copy records, the dirstate cannot do it, it
1606 # Adjust copy records, the dirstate cannot do it, it
1603 # requires access to parents manifests. Preserve them
1607 # requires access to parents manifests. Preserve them
1604 # only for entries added to first parent.
1608 # only for entries added to first parent.
1605 for f in copies:
1609 for f in copies:
1606 if f not in pctx and copies[f] in pctx:
1610 if f not in pctx and copies[f] in pctx:
1607 self.dirstate.copy(copies[f], f)
1611 self.dirstate.copy(copies[f], f)
1608 if p2 == nullid:
1612 if p2 == nullid:
1609 for f, s in sorted(self.dirstate.copies().items()):
1613 for f, s in sorted(self.dirstate.copies().items()):
1610 if f not in pctx and s not in pctx:
1614 if f not in pctx and s not in pctx:
1611 self.dirstate.copy(None, f)
1615 self.dirstate.copy(None, f)
1612
1616
1613 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1617 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1614 """changeid must be a changeset revision, if specified.
1618 """changeid must be a changeset revision, if specified.
1615 fileid can be a file revision or node."""
1619 fileid can be a file revision or node."""
1616 return context.filectx(self, path, changeid, fileid,
1620 return context.filectx(self, path, changeid, fileid,
1617 changectx=changectx)
1621 changectx=changectx)
1618
1622
1619 def getcwd(self):
1623 def getcwd(self):
1620 return self.dirstate.getcwd()
1624 return self.dirstate.getcwd()
1621
1625
1622 def pathto(self, f, cwd=None):
1626 def pathto(self, f, cwd=None):
1623 return self.dirstate.pathto(f, cwd)
1627 return self.dirstate.pathto(f, cwd)
1624
1628
1625 def _loadfilter(self, filter):
1629 def _loadfilter(self, filter):
1626 if filter not in self._filterpats:
1630 if filter not in self._filterpats:
1627 l = []
1631 l = []
1628 for pat, cmd in self.ui.configitems(filter):
1632 for pat, cmd in self.ui.configitems(filter):
1629 if cmd == '!':
1633 if cmd == '!':
1630 continue
1634 continue
1631 mf = matchmod.match(self.root, '', [pat])
1635 mf = matchmod.match(self.root, '', [pat])
1632 fn = None
1636 fn = None
1633 params = cmd
1637 params = cmd
1634 for name, filterfn in self._datafilters.iteritems():
1638 for name, filterfn in self._datafilters.iteritems():
1635 if cmd.startswith(name):
1639 if cmd.startswith(name):
1636 fn = filterfn
1640 fn = filterfn
1637 params = cmd[len(name):].lstrip()
1641 params = cmd[len(name):].lstrip()
1638 break
1642 break
1639 if not fn:
1643 if not fn:
1640 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1644 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1641 # Wrap old filters not supporting keyword arguments
1645 # Wrap old filters not supporting keyword arguments
1642 if not pycompat.getargspec(fn)[2]:
1646 if not pycompat.getargspec(fn)[2]:
1643 oldfn = fn
1647 oldfn = fn
1644 fn = lambda s, c, **kwargs: oldfn(s, c)
1648 fn = lambda s, c, **kwargs: oldfn(s, c)
1645 l.append((mf, fn, params))
1649 l.append((mf, fn, params))
1646 self._filterpats[filter] = l
1650 self._filterpats[filter] = l
1647 return self._filterpats[filter]
1651 return self._filterpats[filter]
1648
1652
1649 def _filter(self, filterpats, filename, data):
1653 def _filter(self, filterpats, filename, data):
1650 for mf, fn, cmd in filterpats:
1654 for mf, fn, cmd in filterpats:
1651 if mf(filename):
1655 if mf(filename):
1652 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1656 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1653 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1657 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1654 break
1658 break
1655
1659
1656 return data
1660 return data
1657
1661
1658 @unfilteredpropertycache
1662 @unfilteredpropertycache
1659 def _encodefilterpats(self):
1663 def _encodefilterpats(self):
1660 return self._loadfilter('encode')
1664 return self._loadfilter('encode')
1661
1665
1662 @unfilteredpropertycache
1666 @unfilteredpropertycache
1663 def _decodefilterpats(self):
1667 def _decodefilterpats(self):
1664 return self._loadfilter('decode')
1668 return self._loadfilter('decode')
1665
1669
1666 def adddatafilter(self, name, filter):
1670 def adddatafilter(self, name, filter):
1667 self._datafilters[name] = filter
1671 self._datafilters[name] = filter
1668
1672
1669 def wread(self, filename):
1673 def wread(self, filename):
1670 if self.wvfs.islink(filename):
1674 if self.wvfs.islink(filename):
1671 data = self.wvfs.readlink(filename)
1675 data = self.wvfs.readlink(filename)
1672 else:
1676 else:
1673 data = self.wvfs.read(filename)
1677 data = self.wvfs.read(filename)
1674 return self._filter(self._encodefilterpats, filename, data)
1678 return self._filter(self._encodefilterpats, filename, data)
1675
1679
1676 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1680 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1677 """write ``data`` into ``filename`` in the working directory
1681 """write ``data`` into ``filename`` in the working directory
1678
1682
1679 This returns length of written (maybe decoded) data.
1683 This returns length of written (maybe decoded) data.
1680 """
1684 """
1681 data = self._filter(self._decodefilterpats, filename, data)
1685 data = self._filter(self._decodefilterpats, filename, data)
1682 if 'l' in flags:
1686 if 'l' in flags:
1683 self.wvfs.symlink(data, filename)
1687 self.wvfs.symlink(data, filename)
1684 else:
1688 else:
1685 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1689 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1686 **kwargs)
1690 **kwargs)
1687 if 'x' in flags:
1691 if 'x' in flags:
1688 self.wvfs.setflags(filename, False, True)
1692 self.wvfs.setflags(filename, False, True)
1689 else:
1693 else:
1690 self.wvfs.setflags(filename, False, False)
1694 self.wvfs.setflags(filename, False, False)
1691 return len(data)
1695 return len(data)
1692
1696
1693 def wwritedata(self, filename, data):
1697 def wwritedata(self, filename, data):
1694 return self._filter(self._decodefilterpats, filename, data)
1698 return self._filter(self._decodefilterpats, filename, data)
1695
1699
1696 def currenttransaction(self):
1700 def currenttransaction(self):
1697 """return the current transaction or None if non exists"""
1701 """return the current transaction or None if non exists"""
1698 if self._transref:
1702 if self._transref:
1699 tr = self._transref()
1703 tr = self._transref()
1700 else:
1704 else:
1701 tr = None
1705 tr = None
1702
1706
1703 if tr and tr.running():
1707 if tr and tr.running():
1704 return tr
1708 return tr
1705 return None
1709 return None
1706
1710
1707 def transaction(self, desc, report=None):
1711 def transaction(self, desc, report=None):
1708 if (self.ui.configbool('devel', 'all-warnings')
1712 if (self.ui.configbool('devel', 'all-warnings')
1709 or self.ui.configbool('devel', 'check-locks')):
1713 or self.ui.configbool('devel', 'check-locks')):
1710 if self._currentlock(self._lockref) is None:
1714 if self._currentlock(self._lockref) is None:
1711 raise error.ProgrammingError('transaction requires locking')
1715 raise error.ProgrammingError('transaction requires locking')
1712 tr = self.currenttransaction()
1716 tr = self.currenttransaction()
1713 if tr is not None:
1717 if tr is not None:
1714 return tr.nest(name=desc)
1718 return tr.nest(name=desc)
1715
1719
1716 # abort here if the journal already exists
1720 # abort here if the journal already exists
1717 if self.svfs.exists("journal"):
1721 if self.svfs.exists("journal"):
1718 raise error.RepoError(
1722 raise error.RepoError(
1719 _("abandoned transaction found"),
1723 _("abandoned transaction found"),
1720 hint=_("run 'hg recover' to clean up transaction"))
1724 hint=_("run 'hg recover' to clean up transaction"))
1721
1725
1722 idbase = "%.40f#%f" % (random.random(), time.time())
1726 idbase = "%.40f#%f" % (random.random(), time.time())
1723 ha = hex(hashlib.sha1(idbase).digest())
1727 ha = hex(hashlib.sha1(idbase).digest())
1724 txnid = 'TXN:' + ha
1728 txnid = 'TXN:' + ha
1725 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1729 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1726
1730
1727 self._writejournal(desc)
1731 self._writejournal(desc)
1728 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1732 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1729 if report:
1733 if report:
1730 rp = report
1734 rp = report
1731 else:
1735 else:
1732 rp = self.ui.warn
1736 rp = self.ui.warn
1733 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1737 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1734 # we must avoid cyclic reference between repo and transaction.
1738 # we must avoid cyclic reference between repo and transaction.
1735 reporef = weakref.ref(self)
1739 reporef = weakref.ref(self)
1736 # Code to track tag movement
1740 # Code to track tag movement
1737 #
1741 #
1738 # Since tags are all handled as file content, it is actually quite hard
1742 # Since tags are all handled as file content, it is actually quite hard
1739 # to track these movement from a code perspective. So we fallback to a
1743 # to track these movement from a code perspective. So we fallback to a
1740 # tracking at the repository level. One could envision to track changes
1744 # tracking at the repository level. One could envision to track changes
1741 # to the '.hgtags' file through changegroup apply but that fails to
1745 # to the '.hgtags' file through changegroup apply but that fails to
1742 # cope with case where transaction expose new heads without changegroup
1746 # cope with case where transaction expose new heads without changegroup
1743 # being involved (eg: phase movement).
1747 # being involved (eg: phase movement).
1744 #
1748 #
1745 # For now, We gate the feature behind a flag since this likely comes
1749 # For now, We gate the feature behind a flag since this likely comes
1746 # with performance impacts. The current code run more often than needed
1750 # with performance impacts. The current code run more often than needed
1747 # and do not use caches as much as it could. The current focus is on
1751 # and do not use caches as much as it could. The current focus is on
1748 # the behavior of the feature so we disable it by default. The flag
1752 # the behavior of the feature so we disable it by default. The flag
1749 # will be removed when we are happy with the performance impact.
1753 # will be removed when we are happy with the performance impact.
1750 #
1754 #
1751 # Once this feature is no longer experimental move the following
1755 # Once this feature is no longer experimental move the following
1752 # documentation to the appropriate help section:
1756 # documentation to the appropriate help section:
1753 #
1757 #
1754 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1758 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1755 # tags (new or changed or deleted tags). In addition the details of
1759 # tags (new or changed or deleted tags). In addition the details of
1756 # these changes are made available in a file at:
1760 # these changes are made available in a file at:
1757 # ``REPOROOT/.hg/changes/tags.changes``.
1761 # ``REPOROOT/.hg/changes/tags.changes``.
1758 # Make sure you check for HG_TAG_MOVED before reading that file as it
1762 # Make sure you check for HG_TAG_MOVED before reading that file as it
1759 # might exist from a previous transaction even if no tag were touched
1763 # might exist from a previous transaction even if no tag were touched
1760 # in this one. Changes are recorded in a line base format::
1764 # in this one. Changes are recorded in a line base format::
1761 #
1765 #
1762 # <action> <hex-node> <tag-name>\n
1766 # <action> <hex-node> <tag-name>\n
1763 #
1767 #
1764 # Actions are defined as follow:
1768 # Actions are defined as follow:
1765 # "-R": tag is removed,
1769 # "-R": tag is removed,
1766 # "+A": tag is added,
1770 # "+A": tag is added,
1767 # "-M": tag is moved (old value),
1771 # "-M": tag is moved (old value),
1768 # "+M": tag is moved (new value),
1772 # "+M": tag is moved (new value),
1769 tracktags = lambda x: None
1773 tracktags = lambda x: None
1770 # experimental config: experimental.hook-track-tags
1774 # experimental config: experimental.hook-track-tags
1771 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1775 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1772 if desc != 'strip' and shouldtracktags:
1776 if desc != 'strip' and shouldtracktags:
1773 oldheads = self.changelog.headrevs()
1777 oldheads = self.changelog.headrevs()
1774 def tracktags(tr2):
1778 def tracktags(tr2):
1775 repo = reporef()
1779 repo = reporef()
1776 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1780 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1777 newheads = repo.changelog.headrevs()
1781 newheads = repo.changelog.headrevs()
1778 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1782 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1779 # notes: we compare lists here.
1783 # notes: we compare lists here.
1780 # As we do it only once buiding set would not be cheaper
1784 # As we do it only once buiding set would not be cheaper
1781 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1785 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1782 if changes:
1786 if changes:
1783 tr2.hookargs['tag_moved'] = '1'
1787 tr2.hookargs['tag_moved'] = '1'
1784 with repo.vfs('changes/tags.changes', 'w',
1788 with repo.vfs('changes/tags.changes', 'w',
1785 atomictemp=True) as changesfile:
1789 atomictemp=True) as changesfile:
1786 # note: we do not register the file to the transaction
1790 # note: we do not register the file to the transaction
1787 # because we needs it to still exist on the transaction
1791 # because we needs it to still exist on the transaction
1788 # is close (for txnclose hooks)
1792 # is close (for txnclose hooks)
1789 tagsmod.writediff(changesfile, changes)
1793 tagsmod.writediff(changesfile, changes)
1790 def validate(tr2):
1794 def validate(tr2):
1791 """will run pre-closing hooks"""
1795 """will run pre-closing hooks"""
1792 # XXX the transaction API is a bit lacking here so we take a hacky
1796 # XXX the transaction API is a bit lacking here so we take a hacky
1793 # path for now
1797 # path for now
1794 #
1798 #
1795 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1799 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1796 # dict is copied before these run. In addition we needs the data
1800 # dict is copied before these run. In addition we needs the data
1797 # available to in memory hooks too.
1801 # available to in memory hooks too.
1798 #
1802 #
1799 # Moreover, we also need to make sure this runs before txnclose
1803 # Moreover, we also need to make sure this runs before txnclose
1800 # hooks and there is no "pending" mechanism that would execute
1804 # hooks and there is no "pending" mechanism that would execute
1801 # logic only if hooks are about to run.
1805 # logic only if hooks are about to run.
1802 #
1806 #
1803 # Fixing this limitation of the transaction is also needed to track
1807 # Fixing this limitation of the transaction is also needed to track
1804 # other families of changes (bookmarks, phases, obsolescence).
1808 # other families of changes (bookmarks, phases, obsolescence).
1805 #
1809 #
1806 # This will have to be fixed before we remove the experimental
1810 # This will have to be fixed before we remove the experimental
1807 # gating.
1811 # gating.
1808 tracktags(tr2)
1812 tracktags(tr2)
1809 repo = reporef()
1813 repo = reporef()
1810 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1814 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1811 scmutil.enforcesinglehead(repo, tr2, desc)
1815 scmutil.enforcesinglehead(repo, tr2, desc)
1812 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1816 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1813 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1817 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1814 args = tr.hookargs.copy()
1818 args = tr.hookargs.copy()
1815 args.update(bookmarks.preparehookargs(name, old, new))
1819 args.update(bookmarks.preparehookargs(name, old, new))
1816 repo.hook('pretxnclose-bookmark', throw=True,
1820 repo.hook('pretxnclose-bookmark', throw=True,
1817 txnname=desc,
1821 txnname=desc,
1818 **pycompat.strkwargs(args))
1822 **pycompat.strkwargs(args))
1819 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1823 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1820 cl = repo.unfiltered().changelog
1824 cl = repo.unfiltered().changelog
1821 for rev, (old, new) in tr.changes['phases'].items():
1825 for rev, (old, new) in tr.changes['phases'].items():
1822 args = tr.hookargs.copy()
1826 args = tr.hookargs.copy()
1823 node = hex(cl.node(rev))
1827 node = hex(cl.node(rev))
1824 args.update(phases.preparehookargs(node, old, new))
1828 args.update(phases.preparehookargs(node, old, new))
1825 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1829 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1826 **pycompat.strkwargs(args))
1830 **pycompat.strkwargs(args))
1827
1831
1828 repo.hook('pretxnclose', throw=True,
1832 repo.hook('pretxnclose', throw=True,
1829 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1833 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1830 def releasefn(tr, success):
1834 def releasefn(tr, success):
1831 repo = reporef()
1835 repo = reporef()
1832 if success:
1836 if success:
1833 # this should be explicitly invoked here, because
1837 # this should be explicitly invoked here, because
1834 # in-memory changes aren't written out at closing
1838 # in-memory changes aren't written out at closing
1835 # transaction, if tr.addfilegenerator (via
1839 # transaction, if tr.addfilegenerator (via
1836 # dirstate.write or so) isn't invoked while
1840 # dirstate.write or so) isn't invoked while
1837 # transaction running
1841 # transaction running
1838 repo.dirstate.write(None)
1842 repo.dirstate.write(None)
1839 else:
1843 else:
1840 # discard all changes (including ones already written
1844 # discard all changes (including ones already written
1841 # out) in this transaction
1845 # out) in this transaction
1842 narrowspec.restorebackup(self, 'journal.narrowspec')
1846 narrowspec.restorebackup(self, 'journal.narrowspec')
1843 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1847 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1844 repo.dirstate.restorebackup(None, 'journal.dirstate')
1848 repo.dirstate.restorebackup(None, 'journal.dirstate')
1845
1849
1846 repo.invalidate(clearfilecache=True)
1850 repo.invalidate(clearfilecache=True)
1847
1851
1848 tr = transaction.transaction(rp, self.svfs, vfsmap,
1852 tr = transaction.transaction(rp, self.svfs, vfsmap,
1849 "journal",
1853 "journal",
1850 "undo",
1854 "undo",
1851 aftertrans(renames),
1855 aftertrans(renames),
1852 self.store.createmode,
1856 self.store.createmode,
1853 validator=validate,
1857 validator=validate,
1854 releasefn=releasefn,
1858 releasefn=releasefn,
1855 checkambigfiles=_cachedfiles,
1859 checkambigfiles=_cachedfiles,
1856 name=desc)
1860 name=desc)
1857 tr.changes['origrepolen'] = len(self)
1861 tr.changes['origrepolen'] = len(self)
1858 tr.changes['obsmarkers'] = set()
1862 tr.changes['obsmarkers'] = set()
1859 tr.changes['phases'] = {}
1863 tr.changes['phases'] = {}
1860 tr.changes['bookmarks'] = {}
1864 tr.changes['bookmarks'] = {}
1861
1865
1862 tr.hookargs['txnid'] = txnid
1866 tr.hookargs['txnid'] = txnid
1863 # note: writing the fncache only during finalize mean that the file is
1867 # note: writing the fncache only during finalize mean that the file is
1864 # outdated when running hooks. As fncache is used for streaming clone,
1868 # outdated when running hooks. As fncache is used for streaming clone,
1865 # this is not expected to break anything that happen during the hooks.
1869 # this is not expected to break anything that happen during the hooks.
1866 tr.addfinalize('flush-fncache', self.store.write)
1870 tr.addfinalize('flush-fncache', self.store.write)
1867 def txnclosehook(tr2):
1871 def txnclosehook(tr2):
1868 """To be run if transaction is successful, will schedule a hook run
1872 """To be run if transaction is successful, will schedule a hook run
1869 """
1873 """
1870 # Don't reference tr2 in hook() so we don't hold a reference.
1874 # Don't reference tr2 in hook() so we don't hold a reference.
1871 # This reduces memory consumption when there are multiple
1875 # This reduces memory consumption when there are multiple
1872 # transactions per lock. This can likely go away if issue5045
1876 # transactions per lock. This can likely go away if issue5045
1873 # fixes the function accumulation.
1877 # fixes the function accumulation.
1874 hookargs = tr2.hookargs
1878 hookargs = tr2.hookargs
1875
1879
1876 def hookfunc():
1880 def hookfunc():
1877 repo = reporef()
1881 repo = reporef()
1878 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1882 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1879 bmchanges = sorted(tr.changes['bookmarks'].items())
1883 bmchanges = sorted(tr.changes['bookmarks'].items())
1880 for name, (old, new) in bmchanges:
1884 for name, (old, new) in bmchanges:
1881 args = tr.hookargs.copy()
1885 args = tr.hookargs.copy()
1882 args.update(bookmarks.preparehookargs(name, old, new))
1886 args.update(bookmarks.preparehookargs(name, old, new))
1883 repo.hook('txnclose-bookmark', throw=False,
1887 repo.hook('txnclose-bookmark', throw=False,
1884 txnname=desc, **pycompat.strkwargs(args))
1888 txnname=desc, **pycompat.strkwargs(args))
1885
1889
1886 if hook.hashook(repo.ui, 'txnclose-phase'):
1890 if hook.hashook(repo.ui, 'txnclose-phase'):
1887 cl = repo.unfiltered().changelog
1891 cl = repo.unfiltered().changelog
1888 phasemv = sorted(tr.changes['phases'].items())
1892 phasemv = sorted(tr.changes['phases'].items())
1889 for rev, (old, new) in phasemv:
1893 for rev, (old, new) in phasemv:
1890 args = tr.hookargs.copy()
1894 args = tr.hookargs.copy()
1891 node = hex(cl.node(rev))
1895 node = hex(cl.node(rev))
1892 args.update(phases.preparehookargs(node, old, new))
1896 args.update(phases.preparehookargs(node, old, new))
1893 repo.hook('txnclose-phase', throw=False, txnname=desc,
1897 repo.hook('txnclose-phase', throw=False, txnname=desc,
1894 **pycompat.strkwargs(args))
1898 **pycompat.strkwargs(args))
1895
1899
1896 repo.hook('txnclose', throw=False, txnname=desc,
1900 repo.hook('txnclose', throw=False, txnname=desc,
1897 **pycompat.strkwargs(hookargs))
1901 **pycompat.strkwargs(hookargs))
1898 reporef()._afterlock(hookfunc)
1902 reporef()._afterlock(hookfunc)
1899 tr.addfinalize('txnclose-hook', txnclosehook)
1903 tr.addfinalize('txnclose-hook', txnclosehook)
1900 # Include a leading "-" to make it happen before the transaction summary
1904 # Include a leading "-" to make it happen before the transaction summary
1901 # reports registered via scmutil.registersummarycallback() whose names
1905 # reports registered via scmutil.registersummarycallback() whose names
1902 # are 00-txnreport etc. That way, the caches will be warm when the
1906 # are 00-txnreport etc. That way, the caches will be warm when the
1903 # callbacks run.
1907 # callbacks run.
1904 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1908 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1905 def txnaborthook(tr2):
1909 def txnaborthook(tr2):
1906 """To be run if transaction is aborted
1910 """To be run if transaction is aborted
1907 """
1911 """
1908 reporef().hook('txnabort', throw=False, txnname=desc,
1912 reporef().hook('txnabort', throw=False, txnname=desc,
1909 **pycompat.strkwargs(tr2.hookargs))
1913 **pycompat.strkwargs(tr2.hookargs))
1910 tr.addabort('txnabort-hook', txnaborthook)
1914 tr.addabort('txnabort-hook', txnaborthook)
1911 # avoid eager cache invalidation. in-memory data should be identical
1915 # avoid eager cache invalidation. in-memory data should be identical
1912 # to stored data if transaction has no error.
1916 # to stored data if transaction has no error.
1913 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1917 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1914 self._transref = weakref.ref(tr)
1918 self._transref = weakref.ref(tr)
1915 scmutil.registersummarycallback(self, tr, desc)
1919 scmutil.registersummarycallback(self, tr, desc)
1916 return tr
1920 return tr
1917
1921
1918 def _journalfiles(self):
1922 def _journalfiles(self):
1919 return ((self.svfs, 'journal'),
1923 return ((self.svfs, 'journal'),
1920 (self.svfs, 'journal.narrowspec'),
1924 (self.svfs, 'journal.narrowspec'),
1921 (self.vfs, 'journal.narrowspec.dirstate'),
1925 (self.vfs, 'journal.narrowspec.dirstate'),
1922 (self.vfs, 'journal.dirstate'),
1926 (self.vfs, 'journal.dirstate'),
1923 (self.vfs, 'journal.branch'),
1927 (self.vfs, 'journal.branch'),
1924 (self.vfs, 'journal.desc'),
1928 (self.vfs, 'journal.desc'),
1925 (self.vfs, 'journal.bookmarks'),
1929 (self.vfs, 'journal.bookmarks'),
1926 (self.svfs, 'journal.phaseroots'))
1930 (self.svfs, 'journal.phaseroots'))
1927
1931
1928 def undofiles(self):
1932 def undofiles(self):
1929 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1930
1934
1931 @unfilteredmethod
1935 @unfilteredmethod
1932 def _writejournal(self, desc):
1936 def _writejournal(self, desc):
1933 self.dirstate.savebackup(None, 'journal.dirstate')
1937 self.dirstate.savebackup(None, 'journal.dirstate')
1934 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1938 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1935 narrowspec.savebackup(self, 'journal.narrowspec')
1939 narrowspec.savebackup(self, 'journal.narrowspec')
1936 self.vfs.write("journal.branch",
1940 self.vfs.write("journal.branch",
1937 encoding.fromlocal(self.dirstate.branch()))
1941 encoding.fromlocal(self.dirstate.branch()))
1938 self.vfs.write("journal.desc",
1942 self.vfs.write("journal.desc",
1939 "%d\n%s\n" % (len(self), desc))
1943 "%d\n%s\n" % (len(self), desc))
1940 self.vfs.write("journal.bookmarks",
1944 self.vfs.write("journal.bookmarks",
1941 self.vfs.tryread("bookmarks"))
1945 self.vfs.tryread("bookmarks"))
1942 self.svfs.write("journal.phaseroots",
1946 self.svfs.write("journal.phaseroots",
1943 self.svfs.tryread("phaseroots"))
1947 self.svfs.tryread("phaseroots"))
1944
1948
1945 def recover(self):
1949 def recover(self):
1946 with self.lock():
1950 with self.lock():
1947 if self.svfs.exists("journal"):
1951 if self.svfs.exists("journal"):
1948 self.ui.status(_("rolling back interrupted transaction\n"))
1952 self.ui.status(_("rolling back interrupted transaction\n"))
1949 vfsmap = {'': self.svfs,
1953 vfsmap = {'': self.svfs,
1950 'plain': self.vfs,}
1954 'plain': self.vfs,}
1951 transaction.rollback(self.svfs, vfsmap, "journal",
1955 transaction.rollback(self.svfs, vfsmap, "journal",
1952 self.ui.warn,
1956 self.ui.warn,
1953 checkambigfiles=_cachedfiles)
1957 checkambigfiles=_cachedfiles)
1954 self.invalidate()
1958 self.invalidate()
1955 return True
1959 return True
1956 else:
1960 else:
1957 self.ui.warn(_("no interrupted transaction available\n"))
1961 self.ui.warn(_("no interrupted transaction available\n"))
1958 return False
1962 return False
1959
1963
1960 def rollback(self, dryrun=False, force=False):
1964 def rollback(self, dryrun=False, force=False):
1961 wlock = lock = dsguard = None
1965 wlock = lock = dsguard = None
1962 try:
1966 try:
1963 wlock = self.wlock()
1967 wlock = self.wlock()
1964 lock = self.lock()
1968 lock = self.lock()
1965 if self.svfs.exists("undo"):
1969 if self.svfs.exists("undo"):
1966 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1970 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1967
1971
1968 return self._rollback(dryrun, force, dsguard)
1972 return self._rollback(dryrun, force, dsguard)
1969 else:
1973 else:
1970 self.ui.warn(_("no rollback information available\n"))
1974 self.ui.warn(_("no rollback information available\n"))
1971 return 1
1975 return 1
1972 finally:
1976 finally:
1973 release(dsguard, lock, wlock)
1977 release(dsguard, lock, wlock)
1974
1978
1975 @unfilteredmethod # Until we get smarter cache management
1979 @unfilteredmethod # Until we get smarter cache management
1976 def _rollback(self, dryrun, force, dsguard):
1980 def _rollback(self, dryrun, force, dsguard):
1977 ui = self.ui
1981 ui = self.ui
1978 try:
1982 try:
1979 args = self.vfs.read('undo.desc').splitlines()
1983 args = self.vfs.read('undo.desc').splitlines()
1980 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1981 if len(args) >= 3:
1985 if len(args) >= 3:
1982 detail = args[2]
1986 detail = args[2]
1983 oldtip = oldlen - 1
1987 oldtip = oldlen - 1
1984
1988
1985 if detail and ui.verbose:
1989 if detail and ui.verbose:
1986 msg = (_('repository tip rolled back to revision %d'
1990 msg = (_('repository tip rolled back to revision %d'
1987 ' (undo %s: %s)\n')
1991 ' (undo %s: %s)\n')
1988 % (oldtip, desc, detail))
1992 % (oldtip, desc, detail))
1989 else:
1993 else:
1990 msg = (_('repository tip rolled back to revision %d'
1994 msg = (_('repository tip rolled back to revision %d'
1991 ' (undo %s)\n')
1995 ' (undo %s)\n')
1992 % (oldtip, desc))
1996 % (oldtip, desc))
1993 except IOError:
1997 except IOError:
1994 msg = _('rolling back unknown transaction\n')
1998 msg = _('rolling back unknown transaction\n')
1995 desc = None
1999 desc = None
1996
2000
1997 if not force and self['.'] != self['tip'] and desc == 'commit':
2001 if not force and self['.'] != self['tip'] and desc == 'commit':
1998 raise error.Abort(
2002 raise error.Abort(
1999 _('rollback of last commit while not checked out '
2003 _('rollback of last commit while not checked out '
2000 'may lose data'), hint=_('use -f to force'))
2004 'may lose data'), hint=_('use -f to force'))
2001
2005
2002 ui.status(msg)
2006 ui.status(msg)
2003 if dryrun:
2007 if dryrun:
2004 return 0
2008 return 0
2005
2009
2006 parents = self.dirstate.parents()
2010 parents = self.dirstate.parents()
2007 self.destroying()
2011 self.destroying()
2008 vfsmap = {'plain': self.vfs, '': self.svfs}
2012 vfsmap = {'plain': self.vfs, '': self.svfs}
2009 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2013 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2010 checkambigfiles=_cachedfiles)
2014 checkambigfiles=_cachedfiles)
2011 if self.vfs.exists('undo.bookmarks'):
2015 if self.vfs.exists('undo.bookmarks'):
2012 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2016 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2013 if self.svfs.exists('undo.phaseroots'):
2017 if self.svfs.exists('undo.phaseroots'):
2014 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2018 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2015 self.invalidate()
2019 self.invalidate()
2016
2020
2017 parentgone = any(p not in self.changelog.nodemap for p in parents)
2021 parentgone = any(p not in self.changelog.nodemap for p in parents)
2018 if parentgone:
2022 if parentgone:
2019 # prevent dirstateguard from overwriting already restored one
2023 # prevent dirstateguard from overwriting already restored one
2020 dsguard.close()
2024 dsguard.close()
2021
2025
2022 narrowspec.restorebackup(self, 'undo.narrowspec')
2026 narrowspec.restorebackup(self, 'undo.narrowspec')
2023 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2027 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2024 self.dirstate.restorebackup(None, 'undo.dirstate')
2028 self.dirstate.restorebackup(None, 'undo.dirstate')
2025 try:
2029 try:
2026 branch = self.vfs.read('undo.branch')
2030 branch = self.vfs.read('undo.branch')
2027 self.dirstate.setbranch(encoding.tolocal(branch))
2031 self.dirstate.setbranch(encoding.tolocal(branch))
2028 except IOError:
2032 except IOError:
2029 ui.warn(_('named branch could not be reset: '
2033 ui.warn(_('named branch could not be reset: '
2030 'current branch is still \'%s\'\n')
2034 'current branch is still \'%s\'\n')
2031 % self.dirstate.branch())
2035 % self.dirstate.branch())
2032
2036
2033 parents = tuple([p.rev() for p in self[None].parents()])
2037 parents = tuple([p.rev() for p in self[None].parents()])
2034 if len(parents) > 1:
2038 if len(parents) > 1:
2035 ui.status(_('working directory now based on '
2039 ui.status(_('working directory now based on '
2036 'revisions %d and %d\n') % parents)
2040 'revisions %d and %d\n') % parents)
2037 else:
2041 else:
2038 ui.status(_('working directory now based on '
2042 ui.status(_('working directory now based on '
2039 'revision %d\n') % parents)
2043 'revision %d\n') % parents)
2040 mergemod.mergestate.clean(self, self['.'].node())
2044 mergemod.mergestate.clean(self, self['.'].node())
2041
2045
2042 # TODO: if we know which new heads may result from this rollback, pass
2046 # TODO: if we know which new heads may result from this rollback, pass
2043 # them to destroy(), which will prevent the branchhead cache from being
2047 # them to destroy(), which will prevent the branchhead cache from being
2044 # invalidated.
2048 # invalidated.
2045 self.destroyed()
2049 self.destroyed()
2046 return 0
2050 return 0
2047
2051
2048 def _buildcacheupdater(self, newtransaction):
2052 def _buildcacheupdater(self, newtransaction):
2049 """called during transaction to build the callback updating cache
2053 """called during transaction to build the callback updating cache
2050
2054
2051 Lives on the repository to help extension who might want to augment
2055 Lives on the repository to help extension who might want to augment
2052 this logic. For this purpose, the created transaction is passed to the
2056 this logic. For this purpose, the created transaction is passed to the
2053 method.
2057 method.
2054 """
2058 """
2055 # we must avoid cyclic reference between repo and transaction.
2059 # we must avoid cyclic reference between repo and transaction.
2056 reporef = weakref.ref(self)
2060 reporef = weakref.ref(self)
2057 def updater(tr):
2061 def updater(tr):
2058 repo = reporef()
2062 repo = reporef()
2059 repo.updatecaches(tr)
2063 repo.updatecaches(tr)
2060 return updater
2064 return updater
2061
2065
2062 @unfilteredmethod
2066 @unfilteredmethod
2063 def updatecaches(self, tr=None, full=False):
2067 def updatecaches(self, tr=None, full=False):
2064 """warm appropriate caches
2068 """warm appropriate caches
2065
2069
2066 If this function is called after a transaction closed. The transaction
2070 If this function is called after a transaction closed. The transaction
2067 will be available in the 'tr' argument. This can be used to selectively
2071 will be available in the 'tr' argument. This can be used to selectively
2068 update caches relevant to the changes in that transaction.
2072 update caches relevant to the changes in that transaction.
2069
2073
2070 If 'full' is set, make sure all caches the function knows about have
2074 If 'full' is set, make sure all caches the function knows about have
2071 up-to-date data. Even the ones usually loaded more lazily.
2075 up-to-date data. Even the ones usually loaded more lazily.
2072 """
2076 """
2073 if tr is not None and tr.hookargs.get('source') == 'strip':
2077 if tr is not None and tr.hookargs.get('source') == 'strip':
2074 # During strip, many caches are invalid but
2078 # During strip, many caches are invalid but
2075 # later call to `destroyed` will refresh them.
2079 # later call to `destroyed` will refresh them.
2076 return
2080 return
2077
2081
2078 if tr is None or tr.changes['origrepolen'] < len(self):
2082 if tr is None or tr.changes['origrepolen'] < len(self):
2079 # accessing the 'ser ved' branchmap should refresh all the others,
2083 # accessing the 'ser ved' branchmap should refresh all the others,
2080 self.ui.debug('updating the branch cache\n')
2084 self.ui.debug('updating the branch cache\n')
2081 self.filtered('served').branchmap()
2085 self.filtered('served').branchmap()
2082
2086
2083 if full:
2087 if full:
2084 rbc = self.revbranchcache()
2088 rbc = self.revbranchcache()
2085 for r in self.changelog:
2089 for r in self.changelog:
2086 rbc.branchinfo(r)
2090 rbc.branchinfo(r)
2087 rbc.write()
2091 rbc.write()
2088
2092
2089 # ensure the working copy parents are in the manifestfulltextcache
2093 # ensure the working copy parents are in the manifestfulltextcache
2090 for ctx in self['.'].parents():
2094 for ctx in self['.'].parents():
2091 ctx.manifest() # accessing the manifest is enough
2095 ctx.manifest() # accessing the manifest is enough
2092
2096
2093 def invalidatecaches(self):
2097 def invalidatecaches(self):
2094
2098
2095 if r'_tagscache' in vars(self):
2099 if r'_tagscache' in vars(self):
2096 # can't use delattr on proxy
2100 # can't use delattr on proxy
2097 del self.__dict__[r'_tagscache']
2101 del self.__dict__[r'_tagscache']
2098
2102
2099 self._branchcaches.clear()
2103 self._branchcaches.clear()
2100 self.invalidatevolatilesets()
2104 self.invalidatevolatilesets()
2101 self._sparsesignaturecache.clear()
2105 self._sparsesignaturecache.clear()
2102
2106
2103 def invalidatevolatilesets(self):
2107 def invalidatevolatilesets(self):
2104 self.filteredrevcache.clear()
2108 self.filteredrevcache.clear()
2105 obsolete.clearobscaches(self)
2109 obsolete.clearobscaches(self)
2106
2110
2107 def invalidatedirstate(self):
2111 def invalidatedirstate(self):
2108 '''Invalidates the dirstate, causing the next call to dirstate
2112 '''Invalidates the dirstate, causing the next call to dirstate
2109 to check if it was modified since the last time it was read,
2113 to check if it was modified since the last time it was read,
2110 rereading it if it has.
2114 rereading it if it has.
2111
2115
2112 This is different to dirstate.invalidate() that it doesn't always
2116 This is different to dirstate.invalidate() that it doesn't always
2113 rereads the dirstate. Use dirstate.invalidate() if you want to
2117 rereads the dirstate. Use dirstate.invalidate() if you want to
2114 explicitly read the dirstate again (i.e. restoring it to a previous
2118 explicitly read the dirstate again (i.e. restoring it to a previous
2115 known good state).'''
2119 known good state).'''
2116 if hasunfilteredcache(self, r'dirstate'):
2120 if hasunfilteredcache(self, r'dirstate'):
2117 for k in self.dirstate._filecache:
2121 for k in self.dirstate._filecache:
2118 try:
2122 try:
2119 delattr(self.dirstate, k)
2123 delattr(self.dirstate, k)
2120 except AttributeError:
2124 except AttributeError:
2121 pass
2125 pass
2122 delattr(self.unfiltered(), r'dirstate')
2126 delattr(self.unfiltered(), r'dirstate')
2123
2127
2124 def invalidate(self, clearfilecache=False):
2128 def invalidate(self, clearfilecache=False):
2125 '''Invalidates both store and non-store parts other than dirstate
2129 '''Invalidates both store and non-store parts other than dirstate
2126
2130
2127 If a transaction is running, invalidation of store is omitted,
2131 If a transaction is running, invalidation of store is omitted,
2128 because discarding in-memory changes might cause inconsistency
2132 because discarding in-memory changes might cause inconsistency
2129 (e.g. incomplete fncache causes unintentional failure, but
2133 (e.g. incomplete fncache causes unintentional failure, but
2130 redundant one doesn't).
2134 redundant one doesn't).
2131 '''
2135 '''
2132 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2136 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2133 for k in list(self._filecache.keys()):
2137 for k in list(self._filecache.keys()):
2134 # dirstate is invalidated separately in invalidatedirstate()
2138 # dirstate is invalidated separately in invalidatedirstate()
2135 if k == 'dirstate':
2139 if k == 'dirstate':
2136 continue
2140 continue
2137 if (k == 'changelog' and
2141 if (k == 'changelog' and
2138 self.currenttransaction() and
2142 self.currenttransaction() and
2139 self.changelog._delayed):
2143 self.changelog._delayed):
2140 # The changelog object may store unwritten revisions. We don't
2144 # The changelog object may store unwritten revisions. We don't
2141 # want to lose them.
2145 # want to lose them.
2142 # TODO: Solve the problem instead of working around it.
2146 # TODO: Solve the problem instead of working around it.
2143 continue
2147 continue
2144
2148
2145 if clearfilecache:
2149 if clearfilecache:
2146 del self._filecache[k]
2150 del self._filecache[k]
2147 try:
2151 try:
2148 delattr(unfiltered, k)
2152 delattr(unfiltered, k)
2149 except AttributeError:
2153 except AttributeError:
2150 pass
2154 pass
2151 self.invalidatecaches()
2155 self.invalidatecaches()
2152 if not self.currenttransaction():
2156 if not self.currenttransaction():
2153 # TODO: Changing contents of store outside transaction
2157 # TODO: Changing contents of store outside transaction
2154 # causes inconsistency. We should make in-memory store
2158 # causes inconsistency. We should make in-memory store
2155 # changes detectable, and abort if changed.
2159 # changes detectable, and abort if changed.
2156 self.store.invalidatecaches()
2160 self.store.invalidatecaches()
2157
2161
2158 def invalidateall(self):
2162 def invalidateall(self):
2159 '''Fully invalidates both store and non-store parts, causing the
2163 '''Fully invalidates both store and non-store parts, causing the
2160 subsequent operation to reread any outside changes.'''
2164 subsequent operation to reread any outside changes.'''
2161 # extension should hook this to invalidate its caches
2165 # extension should hook this to invalidate its caches
2162 self.invalidate()
2166 self.invalidate()
2163 self.invalidatedirstate()
2167 self.invalidatedirstate()
2164
2168
2165 @unfilteredmethod
2169 @unfilteredmethod
2166 def _refreshfilecachestats(self, tr):
2170 def _refreshfilecachestats(self, tr):
2167 """Reload stats of cached files so that they are flagged as valid"""
2171 """Reload stats of cached files so that they are flagged as valid"""
2168 for k, ce in self._filecache.items():
2172 for k, ce in self._filecache.items():
2169 k = pycompat.sysstr(k)
2173 k = pycompat.sysstr(k)
2170 if k == r'dirstate' or k not in self.__dict__:
2174 if k == r'dirstate' or k not in self.__dict__:
2171 continue
2175 continue
2172 ce.refresh()
2176 ce.refresh()
2173
2177
2174 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2178 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2175 inheritchecker=None, parentenvvar=None):
2179 inheritchecker=None, parentenvvar=None):
2176 parentlock = None
2180 parentlock = None
2177 # the contents of parentenvvar are used by the underlying lock to
2181 # the contents of parentenvvar are used by the underlying lock to
2178 # determine whether it can be inherited
2182 # determine whether it can be inherited
2179 if parentenvvar is not None:
2183 if parentenvvar is not None:
2180 parentlock = encoding.environ.get(parentenvvar)
2184 parentlock = encoding.environ.get(parentenvvar)
2181
2185
2182 timeout = 0
2186 timeout = 0
2183 warntimeout = 0
2187 warntimeout = 0
2184 if wait:
2188 if wait:
2185 timeout = self.ui.configint("ui", "timeout")
2189 timeout = self.ui.configint("ui", "timeout")
2186 warntimeout = self.ui.configint("ui", "timeout.warn")
2190 warntimeout = self.ui.configint("ui", "timeout.warn")
2187 # internal config: ui.signal-safe-lock
2191 # internal config: ui.signal-safe-lock
2188 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2192 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2189
2193
2190 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2194 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2191 releasefn=releasefn,
2195 releasefn=releasefn,
2192 acquirefn=acquirefn, desc=desc,
2196 acquirefn=acquirefn, desc=desc,
2193 inheritchecker=inheritchecker,
2197 inheritchecker=inheritchecker,
2194 parentlock=parentlock,
2198 parentlock=parentlock,
2195 signalsafe=signalsafe)
2199 signalsafe=signalsafe)
2196 return l
2200 return l
2197
2201
2198 def _afterlock(self, callback):
2202 def _afterlock(self, callback):
2199 """add a callback to be run when the repository is fully unlocked
2203 """add a callback to be run when the repository is fully unlocked
2200
2204
2201 The callback will be executed when the outermost lock is released
2205 The callback will be executed when the outermost lock is released
2202 (with wlock being higher level than 'lock')."""
2206 (with wlock being higher level than 'lock')."""
2203 for ref in (self._wlockref, self._lockref):
2207 for ref in (self._wlockref, self._lockref):
2204 l = ref and ref()
2208 l = ref and ref()
2205 if l and l.held:
2209 if l and l.held:
2206 l.postrelease.append(callback)
2210 l.postrelease.append(callback)
2207 break
2211 break
2208 else: # no lock have been found.
2212 else: # no lock have been found.
2209 callback()
2213 callback()
2210
2214
2211 def lock(self, wait=True):
2215 def lock(self, wait=True):
2212 '''Lock the repository store (.hg/store) and return a weak reference
2216 '''Lock the repository store (.hg/store) and return a weak reference
2213 to the lock. Use this before modifying the store (e.g. committing or
2217 to the lock. Use this before modifying the store (e.g. committing or
2214 stripping). If you are opening a transaction, get a lock as well.)
2218 stripping). If you are opening a transaction, get a lock as well.)
2215
2219
2216 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2220 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2217 'wlock' first to avoid a dead-lock hazard.'''
2221 'wlock' first to avoid a dead-lock hazard.'''
2218 l = self._currentlock(self._lockref)
2222 l = self._currentlock(self._lockref)
2219 if l is not None:
2223 if l is not None:
2220 l.lock()
2224 l.lock()
2221 return l
2225 return l
2222
2226
2223 l = self._lock(self.svfs, "lock", wait, None,
2227 l = self._lock(self.svfs, "lock", wait, None,
2224 self.invalidate, _('repository %s') % self.origroot)
2228 self.invalidate, _('repository %s') % self.origroot)
2225 self._lockref = weakref.ref(l)
2229 self._lockref = weakref.ref(l)
2226 return l
2230 return l
2227
2231
2228 def _wlockchecktransaction(self):
2232 def _wlockchecktransaction(self):
2229 if self.currenttransaction() is not None:
2233 if self.currenttransaction() is not None:
2230 raise error.LockInheritanceContractViolation(
2234 raise error.LockInheritanceContractViolation(
2231 'wlock cannot be inherited in the middle of a transaction')
2235 'wlock cannot be inherited in the middle of a transaction')
2232
2236
2233 def wlock(self, wait=True):
2237 def wlock(self, wait=True):
2234 '''Lock the non-store parts of the repository (everything under
2238 '''Lock the non-store parts of the repository (everything under
2235 .hg except .hg/store) and return a weak reference to the lock.
2239 .hg except .hg/store) and return a weak reference to the lock.
2236
2240
2237 Use this before modifying files in .hg.
2241 Use this before modifying files in .hg.
2238
2242
2239 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2240 'wlock' first to avoid a dead-lock hazard.'''
2244 'wlock' first to avoid a dead-lock hazard.'''
2241 l = self._wlockref and self._wlockref()
2245 l = self._wlockref and self._wlockref()
2242 if l is not None and l.held:
2246 if l is not None and l.held:
2243 l.lock()
2247 l.lock()
2244 return l
2248 return l
2245
2249
2246 # We do not need to check for non-waiting lock acquisition. Such
2250 # We do not need to check for non-waiting lock acquisition. Such
2247 # acquisition would not cause dead-lock as they would just fail.
2251 # acquisition would not cause dead-lock as they would just fail.
2248 if wait and (self.ui.configbool('devel', 'all-warnings')
2252 if wait and (self.ui.configbool('devel', 'all-warnings')
2249 or self.ui.configbool('devel', 'check-locks')):
2253 or self.ui.configbool('devel', 'check-locks')):
2250 if self._currentlock(self._lockref) is not None:
2254 if self._currentlock(self._lockref) is not None:
2251 self.ui.develwarn('"wlock" acquired after "lock"')
2255 self.ui.develwarn('"wlock" acquired after "lock"')
2252
2256
2253 def unlock():
2257 def unlock():
2254 if self.dirstate.pendingparentchange():
2258 if self.dirstate.pendingparentchange():
2255 self.dirstate.invalidate()
2259 self.dirstate.invalidate()
2256 else:
2260 else:
2257 self.dirstate.write(None)
2261 self.dirstate.write(None)
2258
2262
2259 self._filecache['dirstate'].refresh()
2263 self._filecache['dirstate'].refresh()
2260
2264
2261 l = self._lock(self.vfs, "wlock", wait, unlock,
2265 l = self._lock(self.vfs, "wlock", wait, unlock,
2262 self.invalidatedirstate, _('working directory of %s') %
2266 self.invalidatedirstate, _('working directory of %s') %
2263 self.origroot,
2267 self.origroot,
2264 inheritchecker=self._wlockchecktransaction,
2268 inheritchecker=self._wlockchecktransaction,
2265 parentenvvar='HG_WLOCK_LOCKER')
2269 parentenvvar='HG_WLOCK_LOCKER')
2266 self._wlockref = weakref.ref(l)
2270 self._wlockref = weakref.ref(l)
2267 return l
2271 return l
2268
2272
2269 def _currentlock(self, lockref):
2273 def _currentlock(self, lockref):
2270 """Returns the lock if it's held, or None if it's not."""
2274 """Returns the lock if it's held, or None if it's not."""
2271 if lockref is None:
2275 if lockref is None:
2272 return None
2276 return None
2273 l = lockref()
2277 l = lockref()
2274 if l is None or not l.held:
2278 if l is None or not l.held:
2275 return None
2279 return None
2276 return l
2280 return l
2277
2281
2278 def currentwlock(self):
2282 def currentwlock(self):
2279 """Returns the wlock if it's held, or None if it's not."""
2283 """Returns the wlock if it's held, or None if it's not."""
2280 return self._currentlock(self._wlockref)
2284 return self._currentlock(self._wlockref)
2281
2285
2282 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2286 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2283 """
2287 """
2284 commit an individual file as part of a larger transaction
2288 commit an individual file as part of a larger transaction
2285 """
2289 """
2286
2290
2287 fname = fctx.path()
2291 fname = fctx.path()
2288 fparent1 = manifest1.get(fname, nullid)
2292 fparent1 = manifest1.get(fname, nullid)
2289 fparent2 = manifest2.get(fname, nullid)
2293 fparent2 = manifest2.get(fname, nullid)
2290 if isinstance(fctx, context.filectx):
2294 if isinstance(fctx, context.filectx):
2291 node = fctx.filenode()
2295 node = fctx.filenode()
2292 if node in [fparent1, fparent2]:
2296 if node in [fparent1, fparent2]:
2293 self.ui.debug('reusing %s filelog entry\n' % fname)
2297 self.ui.debug('reusing %s filelog entry\n' % fname)
2294 if manifest1.flags(fname) != fctx.flags():
2298 if manifest1.flags(fname) != fctx.flags():
2295 changelist.append(fname)
2299 changelist.append(fname)
2296 return node
2300 return node
2297
2301
2298 flog = self.file(fname)
2302 flog = self.file(fname)
2299 meta = {}
2303 meta = {}
2300 cfname = fctx.copysource()
2304 cfname = fctx.copysource()
2301 if cfname and cfname != fname:
2305 if cfname and cfname != fname:
2302 # Mark the new revision of this file as a copy of another
2306 # Mark the new revision of this file as a copy of another
2303 # file. This copy data will effectively act as a parent
2307 # file. This copy data will effectively act as a parent
2304 # of this new revision. If this is a merge, the first
2308 # of this new revision. If this is a merge, the first
2305 # parent will be the nullid (meaning "look up the copy data")
2309 # parent will be the nullid (meaning "look up the copy data")
2306 # and the second one will be the other parent. For example:
2310 # and the second one will be the other parent. For example:
2307 #
2311 #
2308 # 0 --- 1 --- 3 rev1 changes file foo
2312 # 0 --- 1 --- 3 rev1 changes file foo
2309 # \ / rev2 renames foo to bar and changes it
2313 # \ / rev2 renames foo to bar and changes it
2310 # \- 2 -/ rev3 should have bar with all changes and
2314 # \- 2 -/ rev3 should have bar with all changes and
2311 # should record that bar descends from
2315 # should record that bar descends from
2312 # bar in rev2 and foo in rev1
2316 # bar in rev2 and foo in rev1
2313 #
2317 #
2314 # this allows this merge to succeed:
2318 # this allows this merge to succeed:
2315 #
2319 #
2316 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2320 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2317 # \ / merging rev3 and rev4 should use bar@rev2
2321 # \ / merging rev3 and rev4 should use bar@rev2
2318 # \- 2 --- 4 as the merge base
2322 # \- 2 --- 4 as the merge base
2319 #
2323 #
2320
2324
2321 crev = manifest1.get(cfname)
2325 crev = manifest1.get(cfname)
2322 newfparent = fparent2
2326 newfparent = fparent2
2323
2327
2324 if manifest2: # branch merge
2328 if manifest2: # branch merge
2325 if fparent2 == nullid or crev is None: # copied on remote side
2329 if fparent2 == nullid or crev is None: # copied on remote side
2326 if cfname in manifest2:
2330 if cfname in manifest2:
2327 crev = manifest2[cfname]
2331 crev = manifest2[cfname]
2328 newfparent = fparent1
2332 newfparent = fparent1
2329
2333
2330 # Here, we used to search backwards through history to try to find
2334 # Here, we used to search backwards through history to try to find
2331 # where the file copy came from if the source of a copy was not in
2335 # where the file copy came from if the source of a copy was not in
2332 # the parent directory. However, this doesn't actually make sense to
2336 # the parent directory. However, this doesn't actually make sense to
2333 # do (what does a copy from something not in your working copy even
2337 # do (what does a copy from something not in your working copy even
2334 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2338 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2335 # the user that copy information was dropped, so if they didn't
2339 # the user that copy information was dropped, so if they didn't
2336 # expect this outcome it can be fixed, but this is the correct
2340 # expect this outcome it can be fixed, but this is the correct
2337 # behavior in this circumstance.
2341 # behavior in this circumstance.
2338
2342
2339 if crev:
2343 if crev:
2340 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2344 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2341 meta["copy"] = cfname
2345 meta["copy"] = cfname
2342 meta["copyrev"] = hex(crev)
2346 meta["copyrev"] = hex(crev)
2343 fparent1, fparent2 = nullid, newfparent
2347 fparent1, fparent2 = nullid, newfparent
2344 else:
2348 else:
2345 self.ui.warn(_("warning: can't find ancestor for '%s' "
2349 self.ui.warn(_("warning: can't find ancestor for '%s' "
2346 "copied from '%s'!\n") % (fname, cfname))
2350 "copied from '%s'!\n") % (fname, cfname))
2347
2351
2348 elif fparent1 == nullid:
2352 elif fparent1 == nullid:
2349 fparent1, fparent2 = fparent2, nullid
2353 fparent1, fparent2 = fparent2, nullid
2350 elif fparent2 != nullid:
2354 elif fparent2 != nullid:
2351 # is one parent an ancestor of the other?
2355 # is one parent an ancestor of the other?
2352 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2356 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2353 if fparent1 in fparentancestors:
2357 if fparent1 in fparentancestors:
2354 fparent1, fparent2 = fparent2, nullid
2358 fparent1, fparent2 = fparent2, nullid
2355 elif fparent2 in fparentancestors:
2359 elif fparent2 in fparentancestors:
2356 fparent2 = nullid
2360 fparent2 = nullid
2357
2361
2358 # is the file changed?
2362 # is the file changed?
2359 text = fctx.data()
2363 text = fctx.data()
2360 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2364 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2361 changelist.append(fname)
2365 changelist.append(fname)
2362 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2366 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2363 # are just the flags changed during merge?
2367 # are just the flags changed during merge?
2364 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2368 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2365 changelist.append(fname)
2369 changelist.append(fname)
2366
2370
2367 return fparent1
2371 return fparent1
2368
2372
2369 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2373 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2370 """check for commit arguments that aren't committable"""
2374 """check for commit arguments that aren't committable"""
2371 if match.isexact() or match.prefix():
2375 if match.isexact() or match.prefix():
2372 matched = set(status.modified + status.added + status.removed)
2376 matched = set(status.modified + status.added + status.removed)
2373
2377
2374 for f in match.files():
2378 for f in match.files():
2375 f = self.dirstate.normalize(f)
2379 f = self.dirstate.normalize(f)
2376 if f == '.' or f in matched or f in wctx.substate:
2380 if f == '.' or f in matched or f in wctx.substate:
2377 continue
2381 continue
2378 if f in status.deleted:
2382 if f in status.deleted:
2379 fail(f, _('file not found!'))
2383 fail(f, _('file not found!'))
2380 if f in vdirs: # visited directory
2384 if f in vdirs: # visited directory
2381 d = f + '/'
2385 d = f + '/'
2382 for mf in matched:
2386 for mf in matched:
2383 if mf.startswith(d):
2387 if mf.startswith(d):
2384 break
2388 break
2385 else:
2389 else:
2386 fail(f, _("no match under directory!"))
2390 fail(f, _("no match under directory!"))
2387 elif f not in self.dirstate:
2391 elif f not in self.dirstate:
2388 fail(f, _("file not tracked!"))
2392 fail(f, _("file not tracked!"))
2389
2393
2390 @unfilteredmethod
2394 @unfilteredmethod
2391 def commit(self, text="", user=None, date=None, match=None, force=False,
2395 def commit(self, text="", user=None, date=None, match=None, force=False,
2392 editor=False, extra=None):
2396 editor=False, extra=None):
2393 """Add a new revision to current repository.
2397 """Add a new revision to current repository.
2394
2398
2395 Revision information is gathered from the working directory,
2399 Revision information is gathered from the working directory,
2396 match can be used to filter the committed files. If editor is
2400 match can be used to filter the committed files. If editor is
2397 supplied, it is called to get a commit message.
2401 supplied, it is called to get a commit message.
2398 """
2402 """
2399 if extra is None:
2403 if extra is None:
2400 extra = {}
2404 extra = {}
2401
2405
2402 def fail(f, msg):
2406 def fail(f, msg):
2403 raise error.Abort('%s: %s' % (f, msg))
2407 raise error.Abort('%s: %s' % (f, msg))
2404
2408
2405 if not match:
2409 if not match:
2406 match = matchmod.always()
2410 match = matchmod.always()
2407
2411
2408 if not force:
2412 if not force:
2409 vdirs = []
2413 vdirs = []
2410 match.explicitdir = vdirs.append
2414 match.explicitdir = vdirs.append
2411 match.bad = fail
2415 match.bad = fail
2412
2416
2413 # lock() for recent changelog (see issue4368)
2417 # lock() for recent changelog (see issue4368)
2414 with self.wlock(), self.lock():
2418 with self.wlock(), self.lock():
2415 wctx = self[None]
2419 wctx = self[None]
2416 merge = len(wctx.parents()) > 1
2420 merge = len(wctx.parents()) > 1
2417
2421
2418 if not force and merge and not match.always():
2422 if not force and merge and not match.always():
2419 raise error.Abort(_('cannot partially commit a merge '
2423 raise error.Abort(_('cannot partially commit a merge '
2420 '(do not specify files or patterns)'))
2424 '(do not specify files or patterns)'))
2421
2425
2422 status = self.status(match=match, clean=force)
2426 status = self.status(match=match, clean=force)
2423 if force:
2427 if force:
2424 status.modified.extend(status.clean) # mq may commit clean files
2428 status.modified.extend(status.clean) # mq may commit clean files
2425
2429
2426 # check subrepos
2430 # check subrepos
2427 subs, commitsubs, newstate = subrepoutil.precommit(
2431 subs, commitsubs, newstate = subrepoutil.precommit(
2428 self.ui, wctx, status, match, force=force)
2432 self.ui, wctx, status, match, force=force)
2429
2433
2430 # make sure all explicit patterns are matched
2434 # make sure all explicit patterns are matched
2431 if not force:
2435 if not force:
2432 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2436 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2433
2437
2434 cctx = context.workingcommitctx(self, status,
2438 cctx = context.workingcommitctx(self, status,
2435 text, user, date, extra)
2439 text, user, date, extra)
2436
2440
2437 # internal config: ui.allowemptycommit
2441 # internal config: ui.allowemptycommit
2438 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2442 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2439 or extra.get('close') or merge or cctx.files()
2443 or extra.get('close') or merge or cctx.files()
2440 or self.ui.configbool('ui', 'allowemptycommit'))
2444 or self.ui.configbool('ui', 'allowemptycommit'))
2441 if not allowemptycommit:
2445 if not allowemptycommit:
2442 return None
2446 return None
2443
2447
2444 if merge and cctx.deleted():
2448 if merge and cctx.deleted():
2445 raise error.Abort(_("cannot commit merge with missing files"))
2449 raise error.Abort(_("cannot commit merge with missing files"))
2446
2450
2447 ms = mergemod.mergestate.read(self)
2451 ms = mergemod.mergestate.read(self)
2448 mergeutil.checkunresolved(ms)
2452 mergeutil.checkunresolved(ms)
2449
2453
2450 if editor:
2454 if editor:
2451 cctx._text = editor(self, cctx, subs)
2455 cctx._text = editor(self, cctx, subs)
2452 edited = (text != cctx._text)
2456 edited = (text != cctx._text)
2453
2457
2454 # Save commit message in case this transaction gets rolled back
2458 # Save commit message in case this transaction gets rolled back
2455 # (e.g. by a pretxncommit hook). Leave the content alone on
2459 # (e.g. by a pretxncommit hook). Leave the content alone on
2456 # the assumption that the user will use the same editor again.
2460 # the assumption that the user will use the same editor again.
2457 msgfn = self.savecommitmessage(cctx._text)
2461 msgfn = self.savecommitmessage(cctx._text)
2458
2462
2459 # commit subs and write new state
2463 # commit subs and write new state
2460 if subs:
2464 if subs:
2461 uipathfn = scmutil.getuipathfn(self)
2465 uipathfn = scmutil.getuipathfn(self)
2462 for s in sorted(commitsubs):
2466 for s in sorted(commitsubs):
2463 sub = wctx.sub(s)
2467 sub = wctx.sub(s)
2464 self.ui.status(_('committing subrepository %s\n') %
2468 self.ui.status(_('committing subrepository %s\n') %
2465 uipathfn(subrepoutil.subrelpath(sub)))
2469 uipathfn(subrepoutil.subrelpath(sub)))
2466 sr = sub.commit(cctx._text, user, date)
2470 sr = sub.commit(cctx._text, user, date)
2467 newstate[s] = (newstate[s][0], sr)
2471 newstate[s] = (newstate[s][0], sr)
2468 subrepoutil.writestate(self, newstate)
2472 subrepoutil.writestate(self, newstate)
2469
2473
2470 p1, p2 = self.dirstate.parents()
2474 p1, p2 = self.dirstate.parents()
2471 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2475 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2472 try:
2476 try:
2473 self.hook("precommit", throw=True, parent1=hookp1,
2477 self.hook("precommit", throw=True, parent1=hookp1,
2474 parent2=hookp2)
2478 parent2=hookp2)
2475 with self.transaction('commit'):
2479 with self.transaction('commit'):
2476 ret = self.commitctx(cctx, True)
2480 ret = self.commitctx(cctx, True)
2477 # update bookmarks, dirstate and mergestate
2481 # update bookmarks, dirstate and mergestate
2478 bookmarks.update(self, [p1, p2], ret)
2482 bookmarks.update(self, [p1, p2], ret)
2479 cctx.markcommitted(ret)
2483 cctx.markcommitted(ret)
2480 ms.reset()
2484 ms.reset()
2481 except: # re-raises
2485 except: # re-raises
2482 if edited:
2486 if edited:
2483 self.ui.write(
2487 self.ui.write(
2484 _('note: commit message saved in %s\n') % msgfn)
2488 _('note: commit message saved in %s\n') % msgfn)
2485 raise
2489 raise
2486
2490
2487 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2491 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2488 # hack for command that use a temporary commit (eg: histedit)
2492 # hack for command that use a temporary commit (eg: histedit)
2489 # temporary commit got stripped before hook release
2493 # temporary commit got stripped before hook release
2490 if self.changelog.hasnode(ret):
2494 if self.changelog.hasnode(ret):
2491 self.hook("commit", node=node, parent1=parent1,
2495 self.hook("commit", node=node, parent1=parent1,
2492 parent2=parent2)
2496 parent2=parent2)
2493 self._afterlock(commithook)
2497 self._afterlock(commithook)
2494 return ret
2498 return ret
2495
2499
2496 @unfilteredmethod
2500 @unfilteredmethod
2497 def commitctx(self, ctx, error=False):
2501 def commitctx(self, ctx, error=False):
2498 """Add a new revision to current repository.
2502 """Add a new revision to current repository.
2499 Revision information is passed via the context argument.
2503 Revision information is passed via the context argument.
2500
2504
2501 ctx.files() should list all files involved in this commit, i.e.
2505 ctx.files() should list all files involved in this commit, i.e.
2502 modified/added/removed files. On merge, it may be wider than the
2506 modified/added/removed files. On merge, it may be wider than the
2503 ctx.files() to be committed, since any file nodes derived directly
2507 ctx.files() to be committed, since any file nodes derived directly
2504 from p1 or p2 are excluded from the committed ctx.files().
2508 from p1 or p2 are excluded from the committed ctx.files().
2505 """
2509 """
2506
2510
2507 p1, p2 = ctx.p1(), ctx.p2()
2511 p1, p2 = ctx.p1(), ctx.p2()
2508 user = ctx.user()
2512 user = ctx.user()
2509
2513
2510 with self.lock(), self.transaction("commit") as tr:
2514 with self.lock(), self.transaction("commit") as tr:
2511 trp = weakref.proxy(tr)
2515 trp = weakref.proxy(tr)
2512
2516
2513 if ctx.manifestnode():
2517 if ctx.manifestnode():
2514 # reuse an existing manifest revision
2518 # reuse an existing manifest revision
2515 self.ui.debug('reusing known manifest\n')
2519 self.ui.debug('reusing known manifest\n')
2516 mn = ctx.manifestnode()
2520 mn = ctx.manifestnode()
2517 files = ctx.files()
2521 files = ctx.files()
2518 elif ctx.files():
2522 elif ctx.files():
2519 m1ctx = p1.manifestctx()
2523 m1ctx = p1.manifestctx()
2520 m2ctx = p2.manifestctx()
2524 m2ctx = p2.manifestctx()
2521 mctx = m1ctx.copy()
2525 mctx = m1ctx.copy()
2522
2526
2523 m = mctx.read()
2527 m = mctx.read()
2524 m1 = m1ctx.read()
2528 m1 = m1ctx.read()
2525 m2 = m2ctx.read()
2529 m2 = m2ctx.read()
2526
2530
2527 # check in files
2531 # check in files
2528 added = []
2532 added = []
2529 changed = []
2533 changed = []
2530 removed = list(ctx.removed())
2534 removed = list(ctx.removed())
2531 linkrev = len(self)
2535 linkrev = len(self)
2532 self.ui.note(_("committing files:\n"))
2536 self.ui.note(_("committing files:\n"))
2533 uipathfn = scmutil.getuipathfn(self)
2537 uipathfn = scmutil.getuipathfn(self)
2534 for f in sorted(ctx.modified() + ctx.added()):
2538 for f in sorted(ctx.modified() + ctx.added()):
2535 self.ui.note(uipathfn(f) + "\n")
2539 self.ui.note(uipathfn(f) + "\n")
2536 try:
2540 try:
2537 fctx = ctx[f]
2541 fctx = ctx[f]
2538 if fctx is None:
2542 if fctx is None:
2539 removed.append(f)
2543 removed.append(f)
2540 else:
2544 else:
2541 added.append(f)
2545 added.append(f)
2542 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2546 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2543 trp, changed)
2547 trp, changed)
2544 m.setflag(f, fctx.flags())
2548 m.setflag(f, fctx.flags())
2545 except OSError:
2549 except OSError:
2546 self.ui.warn(_("trouble committing %s!\n") %
2550 self.ui.warn(_("trouble committing %s!\n") %
2547 uipathfn(f))
2551 uipathfn(f))
2548 raise
2552 raise
2549 except IOError as inst:
2553 except IOError as inst:
2550 errcode = getattr(inst, 'errno', errno.ENOENT)
2554 errcode = getattr(inst, 'errno', errno.ENOENT)
2551 if error or errcode and errcode != errno.ENOENT:
2555 if error or errcode and errcode != errno.ENOENT:
2552 self.ui.warn(_("trouble committing %s!\n") %
2556 self.ui.warn(_("trouble committing %s!\n") %
2553 uipathfn(f))
2557 uipathfn(f))
2554 raise
2558 raise
2555
2559
2556 # update manifest
2560 # update manifest
2557 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2561 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2558 drop = [f for f in removed if f in m]
2562 drop = [f for f in removed if f in m]
2559 for f in drop:
2563 for f in drop:
2560 del m[f]
2564 del m[f]
2561 files = changed + removed
2565 files = changed + removed
2562 md = None
2566 md = None
2563 if not files:
2567 if not files:
2564 # if no "files" actually changed in terms of the changelog,
2568 # if no "files" actually changed in terms of the changelog,
2565 # try hard to detect unmodified manifest entry so that the
2569 # try hard to detect unmodified manifest entry so that the
2566 # exact same commit can be reproduced later on convert.
2570 # exact same commit can be reproduced later on convert.
2567 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2571 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2568 if not files and md:
2572 if not files and md:
2569 self.ui.debug('not reusing manifest (no file change in '
2573 self.ui.debug('not reusing manifest (no file change in '
2570 'changelog, but manifest differs)\n')
2574 'changelog, but manifest differs)\n')
2571 if files or md:
2575 if files or md:
2572 self.ui.note(_("committing manifest\n"))
2576 self.ui.note(_("committing manifest\n"))
2573 # we're using narrowmatch here since it's already applied at
2577 # we're using narrowmatch here since it's already applied at
2574 # other stages (such as dirstate.walk), so we're already
2578 # other stages (such as dirstate.walk), so we're already
2575 # ignoring things outside of narrowspec in most cases. The
2579 # ignoring things outside of narrowspec in most cases. The
2576 # one case where we might have files outside the narrowspec
2580 # one case where we might have files outside the narrowspec
2577 # at this point is merges, and we already error out in the
2581 # at this point is merges, and we already error out in the
2578 # case where the merge has files outside of the narrowspec,
2582 # case where the merge has files outside of the narrowspec,
2579 # so this is safe.
2583 # so this is safe.
2580 mn = mctx.write(trp, linkrev,
2584 mn = mctx.write(trp, linkrev,
2581 p1.manifestnode(), p2.manifestnode(),
2585 p1.manifestnode(), p2.manifestnode(),
2582 added, drop, match=self.narrowmatch())
2586 added, drop, match=self.narrowmatch())
2583 else:
2587 else:
2584 self.ui.debug('reusing manifest form p1 (listed files '
2588 self.ui.debug('reusing manifest form p1 (listed files '
2585 'actually unchanged)\n')
2589 'actually unchanged)\n')
2586 mn = p1.manifestnode()
2590 mn = p1.manifestnode()
2587 else:
2591 else:
2588 self.ui.debug('reusing manifest from p1 (no file change)\n')
2592 self.ui.debug('reusing manifest from p1 (no file change)\n')
2589 mn = p1.manifestnode()
2593 mn = p1.manifestnode()
2590 files = []
2594 files = []
2591
2595
2592 # update changelog
2596 # update changelog
2593 self.ui.note(_("committing changelog\n"))
2597 self.ui.note(_("committing changelog\n"))
2594 self.changelog.delayupdate(tr)
2598 self.changelog.delayupdate(tr)
2595 n = self.changelog.add(mn, files, ctx.description(),
2599 n = self.changelog.add(mn, files, ctx.description(),
2596 trp, p1.node(), p2.node(),
2600 trp, p1.node(), p2.node(),
2597 user, ctx.date(), ctx.extra().copy())
2601 user, ctx.date(), ctx.extra().copy())
2598 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2602 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2599 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2603 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2600 parent2=xp2)
2604 parent2=xp2)
2601 # set the new commit is proper phase
2605 # set the new commit is proper phase
2602 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2606 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2603 if targetphase:
2607 if targetphase:
2604 # retract boundary do not alter parent changeset.
2608 # retract boundary do not alter parent changeset.
2605 # if a parent have higher the resulting phase will
2609 # if a parent have higher the resulting phase will
2606 # be compliant anyway
2610 # be compliant anyway
2607 #
2611 #
2608 # if minimal phase was 0 we don't need to retract anything
2612 # if minimal phase was 0 we don't need to retract anything
2609 phases.registernew(self, tr, targetphase, [n])
2613 phases.registernew(self, tr, targetphase, [n])
2610 return n
2614 return n
2611
2615
2612 @unfilteredmethod
2616 @unfilteredmethod
2613 def destroying(self):
2617 def destroying(self):
2614 '''Inform the repository that nodes are about to be destroyed.
2618 '''Inform the repository that nodes are about to be destroyed.
2615 Intended for use by strip and rollback, so there's a common
2619 Intended for use by strip and rollback, so there's a common
2616 place for anything that has to be done before destroying history.
2620 place for anything that has to be done before destroying history.
2617
2621
2618 This is mostly useful for saving state that is in memory and waiting
2622 This is mostly useful for saving state that is in memory and waiting
2619 to be flushed when the current lock is released. Because a call to
2623 to be flushed when the current lock is released. Because a call to
2620 destroyed is imminent, the repo will be invalidated causing those
2624 destroyed is imminent, the repo will be invalidated causing those
2621 changes to stay in memory (waiting for the next unlock), or vanish
2625 changes to stay in memory (waiting for the next unlock), or vanish
2622 completely.
2626 completely.
2623 '''
2627 '''
2624 # When using the same lock to commit and strip, the phasecache is left
2628 # When using the same lock to commit and strip, the phasecache is left
2625 # dirty after committing. Then when we strip, the repo is invalidated,
2629 # dirty after committing. Then when we strip, the repo is invalidated,
2626 # causing those changes to disappear.
2630 # causing those changes to disappear.
2627 if '_phasecache' in vars(self):
2631 if '_phasecache' in vars(self):
2628 self._phasecache.write()
2632 self._phasecache.write()
2629
2633
2630 @unfilteredmethod
2634 @unfilteredmethod
2631 def destroyed(self):
2635 def destroyed(self):
2632 '''Inform the repository that nodes have been destroyed.
2636 '''Inform the repository that nodes have been destroyed.
2633 Intended for use by strip and rollback, so there's a common
2637 Intended for use by strip and rollback, so there's a common
2634 place for anything that has to be done after destroying history.
2638 place for anything that has to be done after destroying history.
2635 '''
2639 '''
2636 # When one tries to:
2640 # When one tries to:
2637 # 1) destroy nodes thus calling this method (e.g. strip)
2641 # 1) destroy nodes thus calling this method (e.g. strip)
2638 # 2) use phasecache somewhere (e.g. commit)
2642 # 2) use phasecache somewhere (e.g. commit)
2639 #
2643 #
2640 # then 2) will fail because the phasecache contains nodes that were
2644 # then 2) will fail because the phasecache contains nodes that were
2641 # removed. We can either remove phasecache from the filecache,
2645 # removed. We can either remove phasecache from the filecache,
2642 # causing it to reload next time it is accessed, or simply filter
2646 # causing it to reload next time it is accessed, or simply filter
2643 # the removed nodes now and write the updated cache.
2647 # the removed nodes now and write the updated cache.
2644 self._phasecache.filterunknown(self)
2648 self._phasecache.filterunknown(self)
2645 self._phasecache.write()
2649 self._phasecache.write()
2646
2650
2647 # refresh all repository caches
2651 # refresh all repository caches
2648 self.updatecaches()
2652 self.updatecaches()
2649
2653
2650 # Ensure the persistent tag cache is updated. Doing it now
2654 # Ensure the persistent tag cache is updated. Doing it now
2651 # means that the tag cache only has to worry about destroyed
2655 # means that the tag cache only has to worry about destroyed
2652 # heads immediately after a strip/rollback. That in turn
2656 # heads immediately after a strip/rollback. That in turn
2653 # guarantees that "cachetip == currenttip" (comparing both rev
2657 # guarantees that "cachetip == currenttip" (comparing both rev
2654 # and node) always means no nodes have been added or destroyed.
2658 # and node) always means no nodes have been added or destroyed.
2655
2659
2656 # XXX this is suboptimal when qrefresh'ing: we strip the current
2660 # XXX this is suboptimal when qrefresh'ing: we strip the current
2657 # head, refresh the tag cache, then immediately add a new head.
2661 # head, refresh the tag cache, then immediately add a new head.
2658 # But I think doing it this way is necessary for the "instant
2662 # But I think doing it this way is necessary for the "instant
2659 # tag cache retrieval" case to work.
2663 # tag cache retrieval" case to work.
2660 self.invalidate()
2664 self.invalidate()
2661
2665
2662 def status(self, node1='.', node2=None, match=None,
2666 def status(self, node1='.', node2=None, match=None,
2663 ignored=False, clean=False, unknown=False,
2667 ignored=False, clean=False, unknown=False,
2664 listsubrepos=False):
2668 listsubrepos=False):
2665 '''a convenience method that calls node1.status(node2)'''
2669 '''a convenience method that calls node1.status(node2)'''
2666 return self[node1].status(node2, match, ignored, clean, unknown,
2670 return self[node1].status(node2, match, ignored, clean, unknown,
2667 listsubrepos)
2671 listsubrepos)
2668
2672
2669 def addpostdsstatus(self, ps):
2673 def addpostdsstatus(self, ps):
2670 """Add a callback to run within the wlock, at the point at which status
2674 """Add a callback to run within the wlock, at the point at which status
2671 fixups happen.
2675 fixups happen.
2672
2676
2673 On status completion, callback(wctx, status) will be called with the
2677 On status completion, callback(wctx, status) will be called with the
2674 wlock held, unless the dirstate has changed from underneath or the wlock
2678 wlock held, unless the dirstate has changed from underneath or the wlock
2675 couldn't be grabbed.
2679 couldn't be grabbed.
2676
2680
2677 Callbacks should not capture and use a cached copy of the dirstate --
2681 Callbacks should not capture and use a cached copy of the dirstate --
2678 it might change in the meanwhile. Instead, they should access the
2682 it might change in the meanwhile. Instead, they should access the
2679 dirstate via wctx.repo().dirstate.
2683 dirstate via wctx.repo().dirstate.
2680
2684
2681 This list is emptied out after each status run -- extensions should
2685 This list is emptied out after each status run -- extensions should
2682 make sure it adds to this list each time dirstate.status is called.
2686 make sure it adds to this list each time dirstate.status is called.
2683 Extensions should also make sure they don't call this for statuses
2687 Extensions should also make sure they don't call this for statuses
2684 that don't involve the dirstate.
2688 that don't involve the dirstate.
2685 """
2689 """
2686
2690
2687 # The list is located here for uniqueness reasons -- it is actually
2691 # The list is located here for uniqueness reasons -- it is actually
2688 # managed by the workingctx, but that isn't unique per-repo.
2692 # managed by the workingctx, but that isn't unique per-repo.
2689 self._postdsstatus.append(ps)
2693 self._postdsstatus.append(ps)
2690
2694
2691 def postdsstatus(self):
2695 def postdsstatus(self):
2692 """Used by workingctx to get the list of post-dirstate-status hooks."""
2696 """Used by workingctx to get the list of post-dirstate-status hooks."""
2693 return self._postdsstatus
2697 return self._postdsstatus
2694
2698
2695 def clearpostdsstatus(self):
2699 def clearpostdsstatus(self):
2696 """Used by workingctx to clear post-dirstate-status hooks."""
2700 """Used by workingctx to clear post-dirstate-status hooks."""
2697 del self._postdsstatus[:]
2701 del self._postdsstatus[:]
2698
2702
2699 def heads(self, start=None):
2703 def heads(self, start=None):
2700 if start is None:
2704 if start is None:
2701 cl = self.changelog
2705 cl = self.changelog
2702 headrevs = reversed(cl.headrevs())
2706 headrevs = reversed(cl.headrevs())
2703 return [cl.node(rev) for rev in headrevs]
2707 return [cl.node(rev) for rev in headrevs]
2704
2708
2705 heads = self.changelog.heads(start)
2709 heads = self.changelog.heads(start)
2706 # sort the output in rev descending order
2710 # sort the output in rev descending order
2707 return sorted(heads, key=self.changelog.rev, reverse=True)
2711 return sorted(heads, key=self.changelog.rev, reverse=True)
2708
2712
2709 def branchheads(self, branch=None, start=None, closed=False):
2713 def branchheads(self, branch=None, start=None, closed=False):
2710 '''return a (possibly filtered) list of heads for the given branch
2714 '''return a (possibly filtered) list of heads for the given branch
2711
2715
2712 Heads are returned in topological order, from newest to oldest.
2716 Heads are returned in topological order, from newest to oldest.
2713 If branch is None, use the dirstate branch.
2717 If branch is None, use the dirstate branch.
2714 If start is not None, return only heads reachable from start.
2718 If start is not None, return only heads reachable from start.
2715 If closed is True, return heads that are marked as closed as well.
2719 If closed is True, return heads that are marked as closed as well.
2716 '''
2720 '''
2717 if branch is None:
2721 if branch is None:
2718 branch = self[None].branch()
2722 branch = self[None].branch()
2719 branches = self.branchmap()
2723 branches = self.branchmap()
2720 if branch not in branches:
2724 if branch not in branches:
2721 return []
2725 return []
2722 # the cache returns heads ordered lowest to highest
2726 # the cache returns heads ordered lowest to highest
2723 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2727 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2724 if start is not None:
2728 if start is not None:
2725 # filter out the heads that cannot be reached from startrev
2729 # filter out the heads that cannot be reached from startrev
2726 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2730 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2727 bheads = [h for h in bheads if h in fbheads]
2731 bheads = [h for h in bheads if h in fbheads]
2728 return bheads
2732 return bheads
2729
2733
2730 def branches(self, nodes):
2734 def branches(self, nodes):
2731 if not nodes:
2735 if not nodes:
2732 nodes = [self.changelog.tip()]
2736 nodes = [self.changelog.tip()]
2733 b = []
2737 b = []
2734 for n in nodes:
2738 for n in nodes:
2735 t = n
2739 t = n
2736 while True:
2740 while True:
2737 p = self.changelog.parents(n)
2741 p = self.changelog.parents(n)
2738 if p[1] != nullid or p[0] == nullid:
2742 if p[1] != nullid or p[0] == nullid:
2739 b.append((t, n, p[0], p[1]))
2743 b.append((t, n, p[0], p[1]))
2740 break
2744 break
2741 n = p[0]
2745 n = p[0]
2742 return b
2746 return b
2743
2747
2744 def between(self, pairs):
2748 def between(self, pairs):
2745 r = []
2749 r = []
2746
2750
2747 for top, bottom in pairs:
2751 for top, bottom in pairs:
2748 n, l, i = top, [], 0
2752 n, l, i = top, [], 0
2749 f = 1
2753 f = 1
2750
2754
2751 while n != bottom and n != nullid:
2755 while n != bottom and n != nullid:
2752 p = self.changelog.parents(n)[0]
2756 p = self.changelog.parents(n)[0]
2753 if i == f:
2757 if i == f:
2754 l.append(n)
2758 l.append(n)
2755 f = f * 2
2759 f = f * 2
2756 n = p
2760 n = p
2757 i += 1
2761 i += 1
2758
2762
2759 r.append(l)
2763 r.append(l)
2760
2764
2761 return r
2765 return r
2762
2766
2763 def checkpush(self, pushop):
2767 def checkpush(self, pushop):
2764 """Extensions can override this function if additional checks have
2768 """Extensions can override this function if additional checks have
2765 to be performed before pushing, or call it if they override push
2769 to be performed before pushing, or call it if they override push
2766 command.
2770 command.
2767 """
2771 """
2768
2772
2769 @unfilteredpropertycache
2773 @unfilteredpropertycache
2770 def prepushoutgoinghooks(self):
2774 def prepushoutgoinghooks(self):
2771 """Return util.hooks consists of a pushop with repo, remote, outgoing
2775 """Return util.hooks consists of a pushop with repo, remote, outgoing
2772 methods, which are called before pushing changesets.
2776 methods, which are called before pushing changesets.
2773 """
2777 """
2774 return util.hooks()
2778 return util.hooks()
2775
2779
2776 def pushkey(self, namespace, key, old, new):
2780 def pushkey(self, namespace, key, old, new):
2777 try:
2781 try:
2778 tr = self.currenttransaction()
2782 tr = self.currenttransaction()
2779 hookargs = {}
2783 hookargs = {}
2780 if tr is not None:
2784 if tr is not None:
2781 hookargs.update(tr.hookargs)
2785 hookargs.update(tr.hookargs)
2782 hookargs = pycompat.strkwargs(hookargs)
2786 hookargs = pycompat.strkwargs(hookargs)
2783 hookargs[r'namespace'] = namespace
2787 hookargs[r'namespace'] = namespace
2784 hookargs[r'key'] = key
2788 hookargs[r'key'] = key
2785 hookargs[r'old'] = old
2789 hookargs[r'old'] = old
2786 hookargs[r'new'] = new
2790 hookargs[r'new'] = new
2787 self.hook('prepushkey', throw=True, **hookargs)
2791 self.hook('prepushkey', throw=True, **hookargs)
2788 except error.HookAbort as exc:
2792 except error.HookAbort as exc:
2789 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2793 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2790 if exc.hint:
2794 if exc.hint:
2791 self.ui.write_err(_("(%s)\n") % exc.hint)
2795 self.ui.write_err(_("(%s)\n") % exc.hint)
2792 return False
2796 return False
2793 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2797 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2794 ret = pushkey.push(self, namespace, key, old, new)
2798 ret = pushkey.push(self, namespace, key, old, new)
2795 def runhook():
2799 def runhook():
2796 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2800 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2797 ret=ret)
2801 ret=ret)
2798 self._afterlock(runhook)
2802 self._afterlock(runhook)
2799 return ret
2803 return ret
2800
2804
2801 def listkeys(self, namespace):
2805 def listkeys(self, namespace):
2802 self.hook('prelistkeys', throw=True, namespace=namespace)
2806 self.hook('prelistkeys', throw=True, namespace=namespace)
2803 self.ui.debug('listing keys for "%s"\n' % namespace)
2807 self.ui.debug('listing keys for "%s"\n' % namespace)
2804 values = pushkey.list(self, namespace)
2808 values = pushkey.list(self, namespace)
2805 self.hook('listkeys', namespace=namespace, values=values)
2809 self.hook('listkeys', namespace=namespace, values=values)
2806 return values
2810 return values
2807
2811
2808 def debugwireargs(self, one, two, three=None, four=None, five=None):
2812 def debugwireargs(self, one, two, three=None, four=None, five=None):
2809 '''used to test argument passing over the wire'''
2813 '''used to test argument passing over the wire'''
2810 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2814 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2811 pycompat.bytestr(four),
2815 pycompat.bytestr(four),
2812 pycompat.bytestr(five))
2816 pycompat.bytestr(five))
2813
2817
2814 def savecommitmessage(self, text):
2818 def savecommitmessage(self, text):
2815 fp = self.vfs('last-message.txt', 'wb')
2819 fp = self.vfs('last-message.txt', 'wb')
2816 try:
2820 try:
2817 fp.write(text)
2821 fp.write(text)
2818 finally:
2822 finally:
2819 fp.close()
2823 fp.close()
2820 return self.pathto(fp.name[len(self.root) + 1:])
2824 return self.pathto(fp.name[len(self.root) + 1:])
2821
2825
2822 # used to avoid circular references so destructors work
2826 # used to avoid circular references so destructors work
2823 def aftertrans(files):
2827 def aftertrans(files):
2824 renamefiles = [tuple(t) for t in files]
2828 renamefiles = [tuple(t) for t in files]
2825 def a():
2829 def a():
2826 for vfs, src, dest in renamefiles:
2830 for vfs, src, dest in renamefiles:
2827 # if src and dest refer to a same file, vfs.rename is a no-op,
2831 # if src and dest refer to a same file, vfs.rename is a no-op,
2828 # leaving both src and dest on disk. delete dest to make sure
2832 # leaving both src and dest on disk. delete dest to make sure
2829 # the rename couldn't be such a no-op.
2833 # the rename couldn't be such a no-op.
2830 vfs.tryunlink(dest)
2834 vfs.tryunlink(dest)
2831 try:
2835 try:
2832 vfs.rename(src, dest)
2836 vfs.rename(src, dest)
2833 except OSError: # journal file does not yet exist
2837 except OSError: # journal file does not yet exist
2834 pass
2838 pass
2835 return a
2839 return a
2836
2840
2837 def undoname(fn):
2841 def undoname(fn):
2838 base, name = os.path.split(fn)
2842 base, name = os.path.split(fn)
2839 assert name.startswith('journal')
2843 assert name.startswith('journal')
2840 return os.path.join(base, name.replace('journal', 'undo', 1))
2844 return os.path.join(base, name.replace('journal', 'undo', 1))
2841
2845
2842 def instance(ui, path, create, intents=None, createopts=None):
2846 def instance(ui, path, create, intents=None, createopts=None):
2843 localpath = util.urllocalpath(path)
2847 localpath = util.urllocalpath(path)
2844 if create:
2848 if create:
2845 createrepository(ui, localpath, createopts=createopts)
2849 createrepository(ui, localpath, createopts=createopts)
2846
2850
2847 return makelocalrepository(ui, localpath, intents=intents)
2851 return makelocalrepository(ui, localpath, intents=intents)
2848
2852
2849 def islocal(path):
2853 def islocal(path):
2850 return True
2854 return True
2851
2855
2852 def defaultcreateopts(ui, createopts=None):
2856 def defaultcreateopts(ui, createopts=None):
2853 """Populate the default creation options for a repository.
2857 """Populate the default creation options for a repository.
2854
2858
2855 A dictionary of explicitly requested creation options can be passed
2859 A dictionary of explicitly requested creation options can be passed
2856 in. Missing keys will be populated.
2860 in. Missing keys will be populated.
2857 """
2861 """
2858 createopts = dict(createopts or {})
2862 createopts = dict(createopts or {})
2859
2863
2860 if 'backend' not in createopts:
2864 if 'backend' not in createopts:
2861 # experimental config: storage.new-repo-backend
2865 # experimental config: storage.new-repo-backend
2862 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2866 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2863
2867
2864 return createopts
2868 return createopts
2865
2869
2866 def newreporequirements(ui, createopts):
2870 def newreporequirements(ui, createopts):
2867 """Determine the set of requirements for a new local repository.
2871 """Determine the set of requirements for a new local repository.
2868
2872
2869 Extensions can wrap this function to specify custom requirements for
2873 Extensions can wrap this function to specify custom requirements for
2870 new repositories.
2874 new repositories.
2871 """
2875 """
2872 # If the repo is being created from a shared repository, we copy
2876 # If the repo is being created from a shared repository, we copy
2873 # its requirements.
2877 # its requirements.
2874 if 'sharedrepo' in createopts:
2878 if 'sharedrepo' in createopts:
2875 requirements = set(createopts['sharedrepo'].requirements)
2879 requirements = set(createopts['sharedrepo'].requirements)
2876 if createopts.get('sharedrelative'):
2880 if createopts.get('sharedrelative'):
2877 requirements.add('relshared')
2881 requirements.add('relshared')
2878 else:
2882 else:
2879 requirements.add('shared')
2883 requirements.add('shared')
2880
2884
2881 return requirements
2885 return requirements
2882
2886
2883 if 'backend' not in createopts:
2887 if 'backend' not in createopts:
2884 raise error.ProgrammingError('backend key not present in createopts; '
2888 raise error.ProgrammingError('backend key not present in createopts; '
2885 'was defaultcreateopts() called?')
2889 'was defaultcreateopts() called?')
2886
2890
2887 if createopts['backend'] != 'revlogv1':
2891 if createopts['backend'] != 'revlogv1':
2888 raise error.Abort(_('unable to determine repository requirements for '
2892 raise error.Abort(_('unable to determine repository requirements for '
2889 'storage backend: %s') % createopts['backend'])
2893 'storage backend: %s') % createopts['backend'])
2890
2894
2891 requirements = {'revlogv1'}
2895 requirements = {'revlogv1'}
2892 if ui.configbool('format', 'usestore'):
2896 if ui.configbool('format', 'usestore'):
2893 requirements.add('store')
2897 requirements.add('store')
2894 if ui.configbool('format', 'usefncache'):
2898 if ui.configbool('format', 'usefncache'):
2895 requirements.add('fncache')
2899 requirements.add('fncache')
2896 if ui.configbool('format', 'dotencode'):
2900 if ui.configbool('format', 'dotencode'):
2897 requirements.add('dotencode')
2901 requirements.add('dotencode')
2898
2902
2899 compengine = ui.config('experimental', 'format.compression')
2903 compengine = ui.config('experimental', 'format.compression')
2900 if compengine not in util.compengines:
2904 if compengine not in util.compengines:
2901 raise error.Abort(_('compression engine %s defined by '
2905 raise error.Abort(_('compression engine %s defined by '
2902 'experimental.format.compression not available') %
2906 'experimental.format.compression not available') %
2903 compengine,
2907 compengine,
2904 hint=_('run "hg debuginstall" to list available '
2908 hint=_('run "hg debuginstall" to list available '
2905 'compression engines'))
2909 'compression engines'))
2906
2910
2907 # zlib is the historical default and doesn't need an explicit requirement.
2911 # zlib is the historical default and doesn't need an explicit requirement.
2908 if compengine != 'zlib':
2912 if compengine != 'zlib':
2909 requirements.add('exp-compression-%s' % compengine)
2913 requirements.add('exp-compression-%s' % compengine)
2910
2914
2911 if scmutil.gdinitconfig(ui):
2915 if scmutil.gdinitconfig(ui):
2912 requirements.add('generaldelta')
2916 requirements.add('generaldelta')
2913 if ui.configbool('format', 'sparse-revlog'):
2917 if ui.configbool('format', 'sparse-revlog'):
2914 requirements.add(SPARSEREVLOG_REQUIREMENT)
2918 requirements.add(SPARSEREVLOG_REQUIREMENT)
2915 if ui.configbool('experimental', 'treemanifest'):
2919 if ui.configbool('experimental', 'treemanifest'):
2916 requirements.add('treemanifest')
2920 requirements.add('treemanifest')
2917
2921
2918 revlogv2 = ui.config('experimental', 'revlogv2')
2922 revlogv2 = ui.config('experimental', 'revlogv2')
2919 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2923 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2920 requirements.remove('revlogv1')
2924 requirements.remove('revlogv1')
2921 # generaldelta is implied by revlogv2.
2925 # generaldelta is implied by revlogv2.
2922 requirements.discard('generaldelta')
2926 requirements.discard('generaldelta')
2923 requirements.add(REVLOGV2_REQUIREMENT)
2927 requirements.add(REVLOGV2_REQUIREMENT)
2924 # experimental config: format.internal-phase
2928 # experimental config: format.internal-phase
2925 if ui.configbool('format', 'internal-phase'):
2929 if ui.configbool('format', 'internal-phase'):
2926 requirements.add('internal-phase')
2930 requirements.add('internal-phase')
2927
2931
2928 if createopts.get('narrowfiles'):
2932 if createopts.get('narrowfiles'):
2929 requirements.add(repository.NARROW_REQUIREMENT)
2933 requirements.add(repository.NARROW_REQUIREMENT)
2930
2934
2931 if createopts.get('lfs'):
2935 if createopts.get('lfs'):
2932 requirements.add('lfs')
2936 requirements.add('lfs')
2933
2937
2934 return requirements
2938 return requirements
2935
2939
2936 def filterknowncreateopts(ui, createopts):
2940 def filterknowncreateopts(ui, createopts):
2937 """Filters a dict of repo creation options against options that are known.
2941 """Filters a dict of repo creation options against options that are known.
2938
2942
2939 Receives a dict of repo creation options and returns a dict of those
2943 Receives a dict of repo creation options and returns a dict of those
2940 options that we don't know how to handle.
2944 options that we don't know how to handle.
2941
2945
2942 This function is called as part of repository creation. If the
2946 This function is called as part of repository creation. If the
2943 returned dict contains any items, repository creation will not
2947 returned dict contains any items, repository creation will not
2944 be allowed, as it means there was a request to create a repository
2948 be allowed, as it means there was a request to create a repository
2945 with options not recognized by loaded code.
2949 with options not recognized by loaded code.
2946
2950
2947 Extensions can wrap this function to filter out creation options
2951 Extensions can wrap this function to filter out creation options
2948 they know how to handle.
2952 they know how to handle.
2949 """
2953 """
2950 known = {
2954 known = {
2951 'backend',
2955 'backend',
2952 'lfs',
2956 'lfs',
2953 'narrowfiles',
2957 'narrowfiles',
2954 'sharedrepo',
2958 'sharedrepo',
2955 'sharedrelative',
2959 'sharedrelative',
2956 'shareditems',
2960 'shareditems',
2957 'shallowfilestore',
2961 'shallowfilestore',
2958 }
2962 }
2959
2963
2960 return {k: v for k, v in createopts.items() if k not in known}
2964 return {k: v for k, v in createopts.items() if k not in known}
2961
2965
2962 def createrepository(ui, path, createopts=None):
2966 def createrepository(ui, path, createopts=None):
2963 """Create a new repository in a vfs.
2967 """Create a new repository in a vfs.
2964
2968
2965 ``path`` path to the new repo's working directory.
2969 ``path`` path to the new repo's working directory.
2966 ``createopts`` options for the new repository.
2970 ``createopts`` options for the new repository.
2967
2971
2968 The following keys for ``createopts`` are recognized:
2972 The following keys for ``createopts`` are recognized:
2969
2973
2970 backend
2974 backend
2971 The storage backend to use.
2975 The storage backend to use.
2972 lfs
2976 lfs
2973 Repository will be created with ``lfs`` requirement. The lfs extension
2977 Repository will be created with ``lfs`` requirement. The lfs extension
2974 will automatically be loaded when the repository is accessed.
2978 will automatically be loaded when the repository is accessed.
2975 narrowfiles
2979 narrowfiles
2976 Set up repository to support narrow file storage.
2980 Set up repository to support narrow file storage.
2977 sharedrepo
2981 sharedrepo
2978 Repository object from which storage should be shared.
2982 Repository object from which storage should be shared.
2979 sharedrelative
2983 sharedrelative
2980 Boolean indicating if the path to the shared repo should be
2984 Boolean indicating if the path to the shared repo should be
2981 stored as relative. By default, the pointer to the "parent" repo
2985 stored as relative. By default, the pointer to the "parent" repo
2982 is stored as an absolute path.
2986 is stored as an absolute path.
2983 shareditems
2987 shareditems
2984 Set of items to share to the new repository (in addition to storage).
2988 Set of items to share to the new repository (in addition to storage).
2985 shallowfilestore
2989 shallowfilestore
2986 Indicates that storage for files should be shallow (not all ancestor
2990 Indicates that storage for files should be shallow (not all ancestor
2987 revisions are known).
2991 revisions are known).
2988 """
2992 """
2989 createopts = defaultcreateopts(ui, createopts=createopts)
2993 createopts = defaultcreateopts(ui, createopts=createopts)
2990
2994
2991 unknownopts = filterknowncreateopts(ui, createopts)
2995 unknownopts = filterknowncreateopts(ui, createopts)
2992
2996
2993 if not isinstance(unknownopts, dict):
2997 if not isinstance(unknownopts, dict):
2994 raise error.ProgrammingError('filterknowncreateopts() did not return '
2998 raise error.ProgrammingError('filterknowncreateopts() did not return '
2995 'a dict')
2999 'a dict')
2996
3000
2997 if unknownopts:
3001 if unknownopts:
2998 raise error.Abort(_('unable to create repository because of unknown '
3002 raise error.Abort(_('unable to create repository because of unknown '
2999 'creation option: %s') %
3003 'creation option: %s') %
3000 ', '.join(sorted(unknownopts)),
3004 ', '.join(sorted(unknownopts)),
3001 hint=_('is a required extension not loaded?'))
3005 hint=_('is a required extension not loaded?'))
3002
3006
3003 requirements = newreporequirements(ui, createopts=createopts)
3007 requirements = newreporequirements(ui, createopts=createopts)
3004
3008
3005 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3009 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3006
3010
3007 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3011 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3008 if hgvfs.exists():
3012 if hgvfs.exists():
3009 raise error.RepoError(_('repository %s already exists') % path)
3013 raise error.RepoError(_('repository %s already exists') % path)
3010
3014
3011 if 'sharedrepo' in createopts:
3015 if 'sharedrepo' in createopts:
3012 sharedpath = createopts['sharedrepo'].sharedpath
3016 sharedpath = createopts['sharedrepo'].sharedpath
3013
3017
3014 if createopts.get('sharedrelative'):
3018 if createopts.get('sharedrelative'):
3015 try:
3019 try:
3016 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3020 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3017 except (IOError, ValueError) as e:
3021 except (IOError, ValueError) as e:
3018 # ValueError is raised on Windows if the drive letters differ
3022 # ValueError is raised on Windows if the drive letters differ
3019 # on each path.
3023 # on each path.
3020 raise error.Abort(_('cannot calculate relative path'),
3024 raise error.Abort(_('cannot calculate relative path'),
3021 hint=stringutil.forcebytestr(e))
3025 hint=stringutil.forcebytestr(e))
3022
3026
3023 if not wdirvfs.exists():
3027 if not wdirvfs.exists():
3024 wdirvfs.makedirs()
3028 wdirvfs.makedirs()
3025
3029
3026 hgvfs.makedir(notindexed=True)
3030 hgvfs.makedir(notindexed=True)
3027 if 'sharedrepo' not in createopts:
3031 if 'sharedrepo' not in createopts:
3028 hgvfs.mkdir(b'cache')
3032 hgvfs.mkdir(b'cache')
3029 hgvfs.mkdir(b'wcache')
3033 hgvfs.mkdir(b'wcache')
3030
3034
3031 if b'store' in requirements and 'sharedrepo' not in createopts:
3035 if b'store' in requirements and 'sharedrepo' not in createopts:
3032 hgvfs.mkdir(b'store')
3036 hgvfs.mkdir(b'store')
3033
3037
3034 # We create an invalid changelog outside the store so very old
3038 # We create an invalid changelog outside the store so very old
3035 # Mercurial versions (which didn't know about the requirements
3039 # Mercurial versions (which didn't know about the requirements
3036 # file) encounter an error on reading the changelog. This
3040 # file) encounter an error on reading the changelog. This
3037 # effectively locks out old clients and prevents them from
3041 # effectively locks out old clients and prevents them from
3038 # mucking with a repo in an unknown format.
3042 # mucking with a repo in an unknown format.
3039 #
3043 #
3040 # The revlog header has version 2, which won't be recognized by
3044 # The revlog header has version 2, which won't be recognized by
3041 # such old clients.
3045 # such old clients.
3042 hgvfs.append(b'00changelog.i',
3046 hgvfs.append(b'00changelog.i',
3043 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3047 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3044 b'layout')
3048 b'layout')
3045
3049
3046 scmutil.writerequires(hgvfs, requirements)
3050 scmutil.writerequires(hgvfs, requirements)
3047
3051
3048 # Write out file telling readers where to find the shared store.
3052 # Write out file telling readers where to find the shared store.
3049 if 'sharedrepo' in createopts:
3053 if 'sharedrepo' in createopts:
3050 hgvfs.write(b'sharedpath', sharedpath)
3054 hgvfs.write(b'sharedpath', sharedpath)
3051
3055
3052 if createopts.get('shareditems'):
3056 if createopts.get('shareditems'):
3053 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3057 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3054 hgvfs.write(b'shared', shared)
3058 hgvfs.write(b'shared', shared)
3055
3059
3056 def poisonrepository(repo):
3060 def poisonrepository(repo):
3057 """Poison a repository instance so it can no longer be used."""
3061 """Poison a repository instance so it can no longer be used."""
3058 # Perform any cleanup on the instance.
3062 # Perform any cleanup on the instance.
3059 repo.close()
3063 repo.close()
3060
3064
3061 # Our strategy is to replace the type of the object with one that
3065 # Our strategy is to replace the type of the object with one that
3062 # has all attribute lookups result in error.
3066 # has all attribute lookups result in error.
3063 #
3067 #
3064 # But we have to allow the close() method because some constructors
3068 # But we have to allow the close() method because some constructors
3065 # of repos call close() on repo references.
3069 # of repos call close() on repo references.
3066 class poisonedrepository(object):
3070 class poisonedrepository(object):
3067 def __getattribute__(self, item):
3071 def __getattribute__(self, item):
3068 if item == r'close':
3072 if item == r'close':
3069 return object.__getattribute__(self, item)
3073 return object.__getattribute__(self, item)
3070
3074
3071 raise error.ProgrammingError('repo instances should not be used '
3075 raise error.ProgrammingError('repo instances should not be used '
3072 'after unshare')
3076 'after unshare')
3073
3077
3074 def close(self):
3078 def close(self):
3075 pass
3079 pass
3076
3080
3077 # We may have a repoview, which intercepts __setattr__. So be sure
3081 # We may have a repoview, which intercepts __setattr__. So be sure
3078 # we operate at the lowest level possible.
3082 # we operate at the lowest level possible.
3079 object.__setattr__(repo, r'__class__', poisonedrepository)
3083 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,2654 +1,2657 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import os
19 import os
20 import struct
20 import struct
21 import zlib
21 import zlib
22
22
23 # import stuff from node for others to import from revlog
23 # import stuff from node for others to import from revlog
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullhex,
27 nullhex,
28 nullid,
28 nullid,
29 nullrev,
29 nullrev,
30 short,
30 short,
31 wdirfilenodeids,
31 wdirfilenodeids,
32 wdirhex,
32 wdirhex,
33 wdirid,
33 wdirid,
34 wdirrev,
34 wdirrev,
35 )
35 )
36 from .i18n import _
36 from .i18n import _
37 from .revlogutils.constants import (
37 from .revlogutils.constants import (
38 FLAG_GENERALDELTA,
38 FLAG_GENERALDELTA,
39 FLAG_INLINE_DATA,
39 FLAG_INLINE_DATA,
40 REVIDX_DEFAULT_FLAGS,
40 REVIDX_DEFAULT_FLAGS,
41 REVIDX_ELLIPSIS,
41 REVIDX_ELLIPSIS,
42 REVIDX_EXTSTORED,
42 REVIDX_EXTSTORED,
43 REVIDX_FLAGS_ORDER,
43 REVIDX_FLAGS_ORDER,
44 REVIDX_ISCENSORED,
44 REVIDX_ISCENSORED,
45 REVIDX_KNOWN_FLAGS,
45 REVIDX_KNOWN_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
47 REVLOGV0,
47 REVLOGV0,
48 REVLOGV1,
48 REVLOGV1,
49 REVLOGV1_FLAGS,
49 REVLOGV1_FLAGS,
50 REVLOGV2,
50 REVLOGV2,
51 REVLOGV2_FLAGS,
51 REVLOGV2_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
53 REVLOG_DEFAULT_FORMAT,
53 REVLOG_DEFAULT_FORMAT,
54 REVLOG_DEFAULT_VERSION,
54 REVLOG_DEFAULT_VERSION,
55 )
55 )
56 from .thirdparty import (
56 from .thirdparty import (
57 attr,
57 attr,
58 )
58 )
59 from . import (
59 from . import (
60 ancestor,
60 ancestor,
61 dagop,
61 dagop,
62 error,
62 error,
63 mdiff,
63 mdiff,
64 policy,
64 policy,
65 pycompat,
65 pycompat,
66 repository,
66 repository,
67 templatefilters,
67 templatefilters,
68 util,
68 util,
69 )
69 )
70 from .revlogutils import (
70 from .revlogutils import (
71 deltas as deltautil,
71 deltas as deltautil,
72 )
72 )
73 from .utils import (
73 from .utils import (
74 interfaceutil,
74 interfaceutil,
75 storageutil,
75 storageutil,
76 stringutil,
76 stringutil,
77 )
77 )
78
78
79 # blanked usage of all the name to prevent pyflakes constraints
79 # blanked usage of all the name to prevent pyflakes constraints
80 # We need these name available in the module for extensions.
80 # We need these name available in the module for extensions.
81 REVLOGV0
81 REVLOGV0
82 REVLOGV1
82 REVLOGV1
83 REVLOGV2
83 REVLOGV2
84 FLAG_INLINE_DATA
84 FLAG_INLINE_DATA
85 FLAG_GENERALDELTA
85 FLAG_GENERALDELTA
86 REVLOG_DEFAULT_FLAGS
86 REVLOG_DEFAULT_FLAGS
87 REVLOG_DEFAULT_FORMAT
87 REVLOG_DEFAULT_FORMAT
88 REVLOG_DEFAULT_VERSION
88 REVLOG_DEFAULT_VERSION
89 REVLOGV1_FLAGS
89 REVLOGV1_FLAGS
90 REVLOGV2_FLAGS
90 REVLOGV2_FLAGS
91 REVIDX_ISCENSORED
91 REVIDX_ISCENSORED
92 REVIDX_ELLIPSIS
92 REVIDX_ELLIPSIS
93 REVIDX_EXTSTORED
93 REVIDX_EXTSTORED
94 REVIDX_DEFAULT_FLAGS
94 REVIDX_DEFAULT_FLAGS
95 REVIDX_FLAGS_ORDER
95 REVIDX_FLAGS_ORDER
96 REVIDX_KNOWN_FLAGS
96 REVIDX_KNOWN_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
98
98
99 parsers = policy.importmod(r'parsers')
99 parsers = policy.importmod(r'parsers')
100 try:
100 try:
101 from . import rustext
101 from . import rustext
102 rustext.__name__ # force actual import (see hgdemandimport)
102 rustext.__name__ # force actual import (see hgdemandimport)
103 except ImportError:
103 except ImportError:
104 rustext = None
104 rustext = None
105
105
106 # Aliased for performance.
106 # Aliased for performance.
107 _zlibdecompress = zlib.decompress
107 _zlibdecompress = zlib.decompress
108
108
109 # max size of revlog with inline data
109 # max size of revlog with inline data
110 _maxinline = 131072
110 _maxinline = 131072
111 _chunksize = 1048576
111 _chunksize = 1048576
112
112
113 # Store flag processors (cf. 'addflagprocessor()' to register)
113 # Store flag processors (cf. 'addflagprocessor()' to register)
114 _flagprocessors = {
114 _flagprocessors = {
115 REVIDX_ISCENSORED: None,
115 REVIDX_ISCENSORED: None,
116 }
116 }
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False
120 return text, False
121
121
122 def ellipsiswriteprocessor(rl, text):
122 def ellipsiswriteprocessor(rl, text):
123 return text, False
123 return text, False
124
124
125 def ellipsisrawprocessor(rl, text):
125 def ellipsisrawprocessor(rl, text):
126 return False
126 return False
127
127
128 ellipsisprocessor = (
128 ellipsisprocessor = (
129 ellipsisreadprocessor,
129 ellipsisreadprocessor,
130 ellipsiswriteprocessor,
130 ellipsiswriteprocessor,
131 ellipsisrawprocessor,
131 ellipsisrawprocessor,
132 )
132 )
133
133
134 def addflagprocessor(flag, processor):
134 def addflagprocessor(flag, processor):
135 """Register a flag processor on a revision data flag.
135 """Register a flag processor on a revision data flag.
136
136
137 Invariant:
137 Invariant:
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
140 - Only one flag processor can be registered on a specific flag.
140 - Only one flag processor can be registered on a specific flag.
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
142 following signatures:
142 following signatures:
143 - (read) f(self, rawtext) -> text, bool
143 - (read) f(self, rawtext) -> text, bool
144 - (write) f(self, text) -> rawtext, bool
144 - (write) f(self, text) -> rawtext, bool
145 - (raw) f(self, rawtext) -> bool
145 - (raw) f(self, rawtext) -> bool
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
147 directly visible to the user.
147 directly visible to the user.
148 The boolean returned by these transforms is used to determine whether
148 The boolean returned by these transforms is used to determine whether
149 the returned text can be used for hash integrity checking. For example,
149 the returned text can be used for hash integrity checking. For example,
150 if "write" returns False, then "text" is used to generate hash. If
150 if "write" returns False, then "text" is used to generate hash. If
151 "write" returns True, that basically means "rawtext" returned by "write"
151 "write" returns True, that basically means "rawtext" returned by "write"
152 should be used to generate hash. Usually, "write" and "read" return
152 should be used to generate hash. Usually, "write" and "read" return
153 different booleans. And "raw" returns a same boolean as "write".
153 different booleans. And "raw" returns a same boolean as "write".
154
154
155 Note: The 'raw' transform is used for changegroup generation and in some
155 Note: The 'raw' transform is used for changegroup generation and in some
156 debug commands. In this case the transform only indicates whether the
156 debug commands. In this case the transform only indicates whether the
157 contents can be used for hash integrity checks.
157 contents can be used for hash integrity checks.
158 """
158 """
159 _insertflagprocessor(flag, processor, _flagprocessors)
159 _insertflagprocessor(flag, processor, _flagprocessors)
160
160
161 def _insertflagprocessor(flag, processor, flagprocessors):
161 def _insertflagprocessor(flag, processor, flagprocessors):
162 if not flag & REVIDX_KNOWN_FLAGS:
162 if not flag & REVIDX_KNOWN_FLAGS:
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
164 raise error.ProgrammingError(msg)
164 raise error.ProgrammingError(msg)
165 if flag not in REVIDX_FLAGS_ORDER:
165 if flag not in REVIDX_FLAGS_ORDER:
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
167 raise error.ProgrammingError(msg)
167 raise error.ProgrammingError(msg)
168 if flag in flagprocessors:
168 if flag in flagprocessors:
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 flagprocessors[flag] = processor
171 flagprocessors[flag] = processor
172
172
173 def getoffset(q):
173 def getoffset(q):
174 return int(q >> 16)
174 return int(q >> 16)
175
175
176 def gettype(q):
176 def gettype(q):
177 return int(q & 0xFFFF)
177 return int(q & 0xFFFF)
178
178
179 def offset_type(offset, type):
179 def offset_type(offset, type):
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
181 raise ValueError('unknown revlog index flags')
181 raise ValueError('unknown revlog index flags')
182 return int(int(offset) << 16 | type)
182 return int(int(offset) << 16 | type)
183
183
184 @attr.s(slots=True, frozen=True)
184 @attr.s(slots=True, frozen=True)
185 class _revisioninfo(object):
185 class _revisioninfo(object):
186 """Information about a revision that allows building its fulltext
186 """Information about a revision that allows building its fulltext
187 node: expected hash of the revision
187 node: expected hash of the revision
188 p1, p2: parent revs of the revision
188 p1, p2: parent revs of the revision
189 btext: built text cache consisting of a one-element list
189 btext: built text cache consisting of a one-element list
190 cachedelta: (baserev, uncompressed_delta) or None
190 cachedelta: (baserev, uncompressed_delta) or None
191 flags: flags associated to the revision storage
191 flags: flags associated to the revision storage
192
192
193 One of btext[0] or cachedelta must be set.
193 One of btext[0] or cachedelta must be set.
194 """
194 """
195 node = attr.ib()
195 node = attr.ib()
196 p1 = attr.ib()
196 p1 = attr.ib()
197 p2 = attr.ib()
197 p2 = attr.ib()
198 btext = attr.ib()
198 btext = attr.ib()
199 textlen = attr.ib()
199 textlen = attr.ib()
200 cachedelta = attr.ib()
200 cachedelta = attr.ib()
201 flags = attr.ib()
201 flags = attr.ib()
202
202
203 @interfaceutil.implementer(repository.irevisiondelta)
203 @interfaceutil.implementer(repository.irevisiondelta)
204 @attr.s(slots=True)
204 @attr.s(slots=True)
205 class revlogrevisiondelta(object):
205 class revlogrevisiondelta(object):
206 node = attr.ib()
206 node = attr.ib()
207 p1node = attr.ib()
207 p1node = attr.ib()
208 p2node = attr.ib()
208 p2node = attr.ib()
209 basenode = attr.ib()
209 basenode = attr.ib()
210 flags = attr.ib()
210 flags = attr.ib()
211 baserevisionsize = attr.ib()
211 baserevisionsize = attr.ib()
212 revision = attr.ib()
212 revision = attr.ib()
213 delta = attr.ib()
213 delta = attr.ib()
214 linknode = attr.ib(default=None)
214 linknode = attr.ib(default=None)
215
215
216 @interfaceutil.implementer(repository.iverifyproblem)
216 @interfaceutil.implementer(repository.iverifyproblem)
217 @attr.s(frozen=True)
217 @attr.s(frozen=True)
218 class revlogproblem(object):
218 class revlogproblem(object):
219 warning = attr.ib(default=None)
219 warning = attr.ib(default=None)
220 error = attr.ib(default=None)
220 error = attr.ib(default=None)
221 node = attr.ib(default=None)
221 node = attr.ib(default=None)
222
222
223 # index v0:
223 # index v0:
224 # 4 bytes: offset
224 # 4 bytes: offset
225 # 4 bytes: compressed length
225 # 4 bytes: compressed length
226 # 4 bytes: base rev
226 # 4 bytes: base rev
227 # 4 bytes: link rev
227 # 4 bytes: link rev
228 # 20 bytes: parent 1 nodeid
228 # 20 bytes: parent 1 nodeid
229 # 20 bytes: parent 2 nodeid
229 # 20 bytes: parent 2 nodeid
230 # 20 bytes: nodeid
230 # 20 bytes: nodeid
231 indexformatv0 = struct.Struct(">4l20s20s20s")
231 indexformatv0 = struct.Struct(">4l20s20s20s")
232 indexformatv0_pack = indexformatv0.pack
232 indexformatv0_pack = indexformatv0.pack
233 indexformatv0_unpack = indexformatv0.unpack
233 indexformatv0_unpack = indexformatv0.unpack
234
234
235 class revlogoldindex(list):
235 class revlogoldindex(list):
236 def __getitem__(self, i):
236 def __getitem__(self, i):
237 if i == -1:
237 if i == -1:
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
239 return list.__getitem__(self, i)
239 return list.__getitem__(self, i)
240
240
241 class revlogoldio(object):
241 class revlogoldio(object):
242 def __init__(self):
242 def __init__(self):
243 self.size = indexformatv0.size
243 self.size = indexformatv0.size
244
244
245 def parseindex(self, data, inline):
245 def parseindex(self, data, inline):
246 s = self.size
246 s = self.size
247 index = []
247 index = []
248 nodemap = {nullid: nullrev}
248 nodemap = {nullid: nullrev}
249 n = off = 0
249 n = off = 0
250 l = len(data)
250 l = len(data)
251 while off + s <= l:
251 while off + s <= l:
252 cur = data[off:off + s]
252 cur = data[off:off + s]
253 off += s
253 off += s
254 e = indexformatv0_unpack(cur)
254 e = indexformatv0_unpack(cur)
255 # transform to revlogv1 format
255 # transform to revlogv1 format
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
258 index.append(e2)
258 index.append(e2)
259 nodemap[e[6]] = n
259 nodemap[e[6]] = n
260 n += 1
260 n += 1
261
261
262 return revlogoldindex(index), nodemap, None
262 return revlogoldindex(index), nodemap, None
263
263
264 def packentry(self, entry, node, version, rev):
264 def packentry(self, entry, node, version, rev):
265 if gettype(entry[0]):
265 if gettype(entry[0]):
266 raise error.RevlogError(_('index entry flags need revlog '
266 raise error.RevlogError(_('index entry flags need revlog '
267 'version 1'))
267 'version 1'))
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
269 node(entry[5]), node(entry[6]), entry[7])
269 node(entry[5]), node(entry[6]), entry[7])
270 return indexformatv0_pack(*e2)
270 return indexformatv0_pack(*e2)
271
271
272 # index ng:
272 # index ng:
273 # 6 bytes: offset
273 # 6 bytes: offset
274 # 2 bytes: flags
274 # 2 bytes: flags
275 # 4 bytes: compressed length
275 # 4 bytes: compressed length
276 # 4 bytes: uncompressed length
276 # 4 bytes: uncompressed length
277 # 4 bytes: base rev
277 # 4 bytes: base rev
278 # 4 bytes: link rev
278 # 4 bytes: link rev
279 # 4 bytes: parent 1 rev
279 # 4 bytes: parent 1 rev
280 # 4 bytes: parent 2 rev
280 # 4 bytes: parent 2 rev
281 # 32 bytes: nodeid
281 # 32 bytes: nodeid
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
283 indexformatng_pack = indexformatng.pack
283 indexformatng_pack = indexformatng.pack
284 versionformat = struct.Struct(">I")
284 versionformat = struct.Struct(">I")
285 versionformat_pack = versionformat.pack
285 versionformat_pack = versionformat.pack
286 versionformat_unpack = versionformat.unpack
286 versionformat_unpack = versionformat.unpack
287
287
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
289 # signed integer)
289 # signed integer)
290 _maxentrysize = 0x7fffffff
290 _maxentrysize = 0x7fffffff
291
291
292 class revlogio(object):
292 class revlogio(object):
293 def __init__(self):
293 def __init__(self):
294 self.size = indexformatng.size
294 self.size = indexformatng.size
295
295
296 def parseindex(self, data, inline):
296 def parseindex(self, data, inline):
297 # call the C implementation to parse the index data
297 # call the C implementation to parse the index data
298 index, cache = parsers.parse_index2(data, inline)
298 index, cache = parsers.parse_index2(data, inline)
299 return index, getattr(index, 'nodemap', None), cache
299 return index, getattr(index, 'nodemap', None), cache
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 p = indexformatng_pack(*entry)
302 p = indexformatng_pack(*entry)
303 if rev == 0:
303 if rev == 0:
304 p = versionformat_pack(version) + p[4:]
304 p = versionformat_pack(version) + p[4:]
305 return p
305 return p
306
306
307 class revlog(object):
307 class revlog(object):
308 """
308 """
309 the underlying revision storage object
309 the underlying revision storage object
310
310
311 A revlog consists of two parts, an index and the revision data.
311 A revlog consists of two parts, an index and the revision data.
312
312
313 The index is a file with a fixed record size containing
313 The index is a file with a fixed record size containing
314 information on each revision, including its nodeid (hash), the
314 information on each revision, including its nodeid (hash), the
315 nodeids of its parents, the position and offset of its data within
315 nodeids of its parents, the position and offset of its data within
316 the data file, and the revision it's based on. Finally, each entry
316 the data file, and the revision it's based on. Finally, each entry
317 contains a linkrev entry that can serve as a pointer to external
317 contains a linkrev entry that can serve as a pointer to external
318 data.
318 data.
319
319
320 The revision data itself is a linear collection of data chunks.
320 The revision data itself is a linear collection of data chunks.
321 Each chunk represents a revision and is usually represented as a
321 Each chunk represents a revision and is usually represented as a
322 delta against the previous chunk. To bound lookup time, runs of
322 delta against the previous chunk. To bound lookup time, runs of
323 deltas are limited to about 2 times the length of the original
323 deltas are limited to about 2 times the length of the original
324 version data. This makes retrieval of a version proportional to
324 version data. This makes retrieval of a version proportional to
325 its size, or O(1) relative to the number of revisions.
325 its size, or O(1) relative to the number of revisions.
326
326
327 Both pieces of the revlog are written to in an append-only
327 Both pieces of the revlog are written to in an append-only
328 fashion, which means we never need to rewrite a file to insert or
328 fashion, which means we never need to rewrite a file to insert or
329 remove data, and can use some simple techniques to avoid the need
329 remove data, and can use some simple techniques to avoid the need
330 for locking while reading.
330 for locking while reading.
331
331
332 If checkambig, indexfile is opened with checkambig=True at
332 If checkambig, indexfile is opened with checkambig=True at
333 writing, to avoid file stat ambiguity.
333 writing, to avoid file stat ambiguity.
334
334
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
336 index will be mmapped rather than read if it is larger than the
336 index will be mmapped rather than read if it is larger than the
337 configured threshold.
337 configured threshold.
338
338
339 If censorable is True, the revlog can have censored revisions.
339 If censorable is True, the revlog can have censored revisions.
340 """
340 """
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
342 mmaplargeindex=False, censorable=False):
342 mmaplargeindex=False, censorable=False):
343 """
343 """
344 create a revlog object
344 create a revlog object
345
345
346 opener is a function that abstracts the file opening operation
346 opener is a function that abstracts the file opening operation
347 and can be used to implement COW semantics or the like.
347 and can be used to implement COW semantics or the like.
348 """
348 """
349 self.indexfile = indexfile
349 self.indexfile = indexfile
350 self.datafile = datafile or (indexfile[:-2] + ".d")
350 self.datafile = datafile or (indexfile[:-2] + ".d")
351 self.opener = opener
351 self.opener = opener
352 # When True, indexfile is opened with checkambig=True at writing, to
352 # When True, indexfile is opened with checkambig=True at writing, to
353 # avoid file stat ambiguity.
353 # avoid file stat ambiguity.
354 self._checkambig = checkambig
354 self._checkambig = checkambig
355 self._mmaplargeindex = mmaplargeindex
355 self._mmaplargeindex = mmaplargeindex
356 self._censorable = censorable
356 self._censorable = censorable
357 # 3-tuple of (node, rev, text) for a raw revision.
357 # 3-tuple of (node, rev, text) for a raw revision.
358 self._revisioncache = None
358 self._revisioncache = None
359 # Maps rev to chain base rev.
359 # Maps rev to chain base rev.
360 self._chainbasecache = util.lrucachedict(100)
360 self._chainbasecache = util.lrucachedict(100)
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
362 self._chunkcache = (0, '')
362 self._chunkcache = (0, '')
363 # How much data to read and cache into the raw revlog data cache.
363 # How much data to read and cache into the raw revlog data cache.
364 self._chunkcachesize = 65536
364 self._chunkcachesize = 65536
365 self._maxchainlen = None
365 self._maxchainlen = None
366 self._deltabothparents = True
366 self._deltabothparents = True
367 self.index = []
367 self.index = []
368 # Mapping of partial identifiers to full nodes.
368 # Mapping of partial identifiers to full nodes.
369 self._pcache = {}
369 self._pcache = {}
370 # Mapping of revision integer to full node.
370 # Mapping of revision integer to full node.
371 self._nodecache = {nullid: nullrev}
371 self._nodecache = {nullid: nullrev}
372 self._nodepos = None
372 self._nodepos = None
373 self._compengine = 'zlib'
373 self._compengine = 'zlib'
374 self._maxdeltachainspan = -1
374 self._maxdeltachainspan = -1
375 self._withsparseread = False
375 self._withsparseread = False
376 self._sparserevlog = False
376 self._sparserevlog = False
377 self._srdensitythreshold = 0.50
377 self._srdensitythreshold = 0.50
378 self._srmingapsize = 262144
378 self._srmingapsize = 262144
379
379
380 # Make copy of flag processors so each revlog instance can support
380 # Make copy of flag processors so each revlog instance can support
381 # custom flags.
381 # custom flags.
382 self._flagprocessors = dict(_flagprocessors)
382 self._flagprocessors = dict(_flagprocessors)
383
383
384 # 2-tuple of file handles being used for active writing.
384 # 2-tuple of file handles being used for active writing.
385 self._writinghandles = None
385 self._writinghandles = None
386
386
387 self._loadindex()
387 self._loadindex()
388
388
389 def _loadindex(self):
389 def _loadindex(self):
390 mmapindexthreshold = None
390 mmapindexthreshold = None
391 opts = getattr(self.opener, 'options', {}) or {}
391 opts = getattr(self.opener, 'options', {}) or {}
392
392
393 if 'revlogv2' in opts:
393 if 'revlogv2' in opts:
394 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
394 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
395 elif 'revlogv1' in opts:
395 elif 'revlogv1' in opts:
396 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
396 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
397 if 'generaldelta' in opts:
397 if 'generaldelta' in opts:
398 newversionflags |= FLAG_GENERALDELTA
398 newversionflags |= FLAG_GENERALDELTA
399 elif getattr(self.opener, 'options', None) is not None:
399 elif getattr(self.opener, 'options', None) is not None:
400 # If options provided but no 'revlog*' found, the repository
400 # If options provided but no 'revlog*' found, the repository
401 # would have no 'requires' file in it, which means we have to
401 # would have no 'requires' file in it, which means we have to
402 # stick to the old format.
402 # stick to the old format.
403 newversionflags = REVLOGV0
403 newversionflags = REVLOGV0
404 else:
404 else:
405 newversionflags = REVLOG_DEFAULT_VERSION
405 newversionflags = REVLOG_DEFAULT_VERSION
406
406
407 if 'chunkcachesize' in opts:
407 if 'chunkcachesize' in opts:
408 self._chunkcachesize = opts['chunkcachesize']
408 self._chunkcachesize = opts['chunkcachesize']
409 if 'maxchainlen' in opts:
409 if 'maxchainlen' in opts:
410 self._maxchainlen = opts['maxchainlen']
410 self._maxchainlen = opts['maxchainlen']
411 if 'deltabothparents' in opts:
411 if 'deltabothparents' in opts:
412 self._deltabothparents = opts['deltabothparents']
412 self._deltabothparents = opts['deltabothparents']
413 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
413 self._lazydelta = bool(opts.get('lazydelta', True))
414 self._lazydeltabase = False
415 if self._lazydelta:
416 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
414 if 'compengine' in opts:
417 if 'compengine' in opts:
415 self._compengine = opts['compengine']
418 self._compengine = opts['compengine']
416 if 'maxdeltachainspan' in opts:
419 if 'maxdeltachainspan' in opts:
417 self._maxdeltachainspan = opts['maxdeltachainspan']
420 self._maxdeltachainspan = opts['maxdeltachainspan']
418 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
421 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
419 mmapindexthreshold = opts['mmapindexthreshold']
422 mmapindexthreshold = opts['mmapindexthreshold']
420 self._sparserevlog = bool(opts.get('sparse-revlog', False))
423 self._sparserevlog = bool(opts.get('sparse-revlog', False))
421 withsparseread = bool(opts.get('with-sparse-read', False))
424 withsparseread = bool(opts.get('with-sparse-read', False))
422 # sparse-revlog forces sparse-read
425 # sparse-revlog forces sparse-read
423 self._withsparseread = self._sparserevlog or withsparseread
426 self._withsparseread = self._sparserevlog or withsparseread
424 if 'sparse-read-density-threshold' in opts:
427 if 'sparse-read-density-threshold' in opts:
425 self._srdensitythreshold = opts['sparse-read-density-threshold']
428 self._srdensitythreshold = opts['sparse-read-density-threshold']
426 if 'sparse-read-min-gap-size' in opts:
429 if 'sparse-read-min-gap-size' in opts:
427 self._srmingapsize = opts['sparse-read-min-gap-size']
430 self._srmingapsize = opts['sparse-read-min-gap-size']
428 if opts.get('enableellipsis'):
431 if opts.get('enableellipsis'):
429 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
432 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
430
433
431 # revlog v0 doesn't have flag processors
434 # revlog v0 doesn't have flag processors
432 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
435 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
433 _insertflagprocessor(flag, processor, self._flagprocessors)
436 _insertflagprocessor(flag, processor, self._flagprocessors)
434
437
435 if self._chunkcachesize <= 0:
438 if self._chunkcachesize <= 0:
436 raise error.RevlogError(_('revlog chunk cache size %r is not '
439 raise error.RevlogError(_('revlog chunk cache size %r is not '
437 'greater than 0') % self._chunkcachesize)
440 'greater than 0') % self._chunkcachesize)
438 elif self._chunkcachesize & (self._chunkcachesize - 1):
441 elif self._chunkcachesize & (self._chunkcachesize - 1):
439 raise error.RevlogError(_('revlog chunk cache size %r is not a '
442 raise error.RevlogError(_('revlog chunk cache size %r is not a '
440 'power of 2') % self._chunkcachesize)
443 'power of 2') % self._chunkcachesize)
441
444
442 indexdata = ''
445 indexdata = ''
443 self._initempty = True
446 self._initempty = True
444 try:
447 try:
445 with self._indexfp() as f:
448 with self._indexfp() as f:
446 if (mmapindexthreshold is not None and
449 if (mmapindexthreshold is not None and
447 self.opener.fstat(f).st_size >= mmapindexthreshold):
450 self.opener.fstat(f).st_size >= mmapindexthreshold):
448 # TODO: should .close() to release resources without
451 # TODO: should .close() to release resources without
449 # relying on Python GC
452 # relying on Python GC
450 indexdata = util.buffer(util.mmapread(f))
453 indexdata = util.buffer(util.mmapread(f))
451 else:
454 else:
452 indexdata = f.read()
455 indexdata = f.read()
453 if len(indexdata) > 0:
456 if len(indexdata) > 0:
454 versionflags = versionformat_unpack(indexdata[:4])[0]
457 versionflags = versionformat_unpack(indexdata[:4])[0]
455 self._initempty = False
458 self._initempty = False
456 else:
459 else:
457 versionflags = newversionflags
460 versionflags = newversionflags
458 except IOError as inst:
461 except IOError as inst:
459 if inst.errno != errno.ENOENT:
462 if inst.errno != errno.ENOENT:
460 raise
463 raise
461
464
462 versionflags = newversionflags
465 versionflags = newversionflags
463
466
464 self.version = versionflags
467 self.version = versionflags
465
468
466 flags = versionflags & ~0xFFFF
469 flags = versionflags & ~0xFFFF
467 fmt = versionflags & 0xFFFF
470 fmt = versionflags & 0xFFFF
468
471
469 if fmt == REVLOGV0:
472 if fmt == REVLOGV0:
470 if flags:
473 if flags:
471 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
474 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
472 'revlog %s') %
475 'revlog %s') %
473 (flags >> 16, fmt, self.indexfile))
476 (flags >> 16, fmt, self.indexfile))
474
477
475 self._inline = False
478 self._inline = False
476 self._generaldelta = False
479 self._generaldelta = False
477
480
478 elif fmt == REVLOGV1:
481 elif fmt == REVLOGV1:
479 if flags & ~REVLOGV1_FLAGS:
482 if flags & ~REVLOGV1_FLAGS:
480 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
483 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
481 'revlog %s') %
484 'revlog %s') %
482 (flags >> 16, fmt, self.indexfile))
485 (flags >> 16, fmt, self.indexfile))
483
486
484 self._inline = versionflags & FLAG_INLINE_DATA
487 self._inline = versionflags & FLAG_INLINE_DATA
485 self._generaldelta = versionflags & FLAG_GENERALDELTA
488 self._generaldelta = versionflags & FLAG_GENERALDELTA
486
489
487 elif fmt == REVLOGV2:
490 elif fmt == REVLOGV2:
488 if flags & ~REVLOGV2_FLAGS:
491 if flags & ~REVLOGV2_FLAGS:
489 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
492 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
490 'revlog %s') %
493 'revlog %s') %
491 (flags >> 16, fmt, self.indexfile))
494 (flags >> 16, fmt, self.indexfile))
492
495
493 self._inline = versionflags & FLAG_INLINE_DATA
496 self._inline = versionflags & FLAG_INLINE_DATA
494 # generaldelta implied by version 2 revlogs.
497 # generaldelta implied by version 2 revlogs.
495 self._generaldelta = True
498 self._generaldelta = True
496
499
497 else:
500 else:
498 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
501 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
499 (fmt, self.indexfile))
502 (fmt, self.indexfile))
500 # sparse-revlog can't be on without general-delta (issue6056)
503 # sparse-revlog can't be on without general-delta (issue6056)
501 if not self._generaldelta:
504 if not self._generaldelta:
502 self._sparserevlog = False
505 self._sparserevlog = False
503
506
504 self._storedeltachains = True
507 self._storedeltachains = True
505
508
506 self._io = revlogio()
509 self._io = revlogio()
507 if self.version == REVLOGV0:
510 if self.version == REVLOGV0:
508 self._io = revlogoldio()
511 self._io = revlogoldio()
509 try:
512 try:
510 d = self._io.parseindex(indexdata, self._inline)
513 d = self._io.parseindex(indexdata, self._inline)
511 except (ValueError, IndexError):
514 except (ValueError, IndexError):
512 raise error.RevlogError(_("index %s is corrupted") %
515 raise error.RevlogError(_("index %s is corrupted") %
513 self.indexfile)
516 self.indexfile)
514 self.index, nodemap, self._chunkcache = d
517 self.index, nodemap, self._chunkcache = d
515 if nodemap is not None:
518 if nodemap is not None:
516 self.nodemap = self._nodecache = nodemap
519 self.nodemap = self._nodecache = nodemap
517 if not self._chunkcache:
520 if not self._chunkcache:
518 self._chunkclear()
521 self._chunkclear()
519 # revnum -> (chain-length, sum-delta-length)
522 # revnum -> (chain-length, sum-delta-length)
520 self._chaininfocache = {}
523 self._chaininfocache = {}
521 # revlog header -> revlog compressor
524 # revlog header -> revlog compressor
522 self._decompressors = {}
525 self._decompressors = {}
523
526
524 @util.propertycache
527 @util.propertycache
525 def _compressor(self):
528 def _compressor(self):
526 return util.compengines[self._compengine].revlogcompressor()
529 return util.compengines[self._compengine].revlogcompressor()
527
530
528 def _indexfp(self, mode='r'):
531 def _indexfp(self, mode='r'):
529 """file object for the revlog's index file"""
532 """file object for the revlog's index file"""
530 args = {r'mode': mode}
533 args = {r'mode': mode}
531 if mode != 'r':
534 if mode != 'r':
532 args[r'checkambig'] = self._checkambig
535 args[r'checkambig'] = self._checkambig
533 if mode == 'w':
536 if mode == 'w':
534 args[r'atomictemp'] = True
537 args[r'atomictemp'] = True
535 return self.opener(self.indexfile, **args)
538 return self.opener(self.indexfile, **args)
536
539
537 def _datafp(self, mode='r'):
540 def _datafp(self, mode='r'):
538 """file object for the revlog's data file"""
541 """file object for the revlog's data file"""
539 return self.opener(self.datafile, mode=mode)
542 return self.opener(self.datafile, mode=mode)
540
543
541 @contextlib.contextmanager
544 @contextlib.contextmanager
542 def _datareadfp(self, existingfp=None):
545 def _datareadfp(self, existingfp=None):
543 """file object suitable to read data"""
546 """file object suitable to read data"""
544 # Use explicit file handle, if given.
547 # Use explicit file handle, if given.
545 if existingfp is not None:
548 if existingfp is not None:
546 yield existingfp
549 yield existingfp
547
550
548 # Use a file handle being actively used for writes, if available.
551 # Use a file handle being actively used for writes, if available.
549 # There is some danger to doing this because reads will seek the
552 # There is some danger to doing this because reads will seek the
550 # file. However, _writeentry() performs a SEEK_END before all writes,
553 # file. However, _writeentry() performs a SEEK_END before all writes,
551 # so we should be safe.
554 # so we should be safe.
552 elif self._writinghandles:
555 elif self._writinghandles:
553 if self._inline:
556 if self._inline:
554 yield self._writinghandles[0]
557 yield self._writinghandles[0]
555 else:
558 else:
556 yield self._writinghandles[1]
559 yield self._writinghandles[1]
557
560
558 # Otherwise open a new file handle.
561 # Otherwise open a new file handle.
559 else:
562 else:
560 if self._inline:
563 if self._inline:
561 func = self._indexfp
564 func = self._indexfp
562 else:
565 else:
563 func = self._datafp
566 func = self._datafp
564 with func() as fp:
567 with func() as fp:
565 yield fp
568 yield fp
566
569
567 def tip(self):
570 def tip(self):
568 return self.node(len(self.index) - 1)
571 return self.node(len(self.index) - 1)
569 def __contains__(self, rev):
572 def __contains__(self, rev):
570 return 0 <= rev < len(self)
573 return 0 <= rev < len(self)
571 def __len__(self):
574 def __len__(self):
572 return len(self.index)
575 return len(self.index)
573 def __iter__(self):
576 def __iter__(self):
574 return iter(pycompat.xrange(len(self)))
577 return iter(pycompat.xrange(len(self)))
575 def revs(self, start=0, stop=None):
578 def revs(self, start=0, stop=None):
576 """iterate over all rev in this revlog (from start to stop)"""
579 """iterate over all rev in this revlog (from start to stop)"""
577 return storageutil.iterrevs(len(self), start=start, stop=stop)
580 return storageutil.iterrevs(len(self), start=start, stop=stop)
578
581
579 @util.propertycache
582 @util.propertycache
580 def nodemap(self):
583 def nodemap(self):
581 if self.index:
584 if self.index:
582 # populate mapping down to the initial node
585 # populate mapping down to the initial node
583 node0 = self.index[0][7] # get around changelog filtering
586 node0 = self.index[0][7] # get around changelog filtering
584 self.rev(node0)
587 self.rev(node0)
585 return self._nodecache
588 return self._nodecache
586
589
587 def hasnode(self, node):
590 def hasnode(self, node):
588 try:
591 try:
589 self.rev(node)
592 self.rev(node)
590 return True
593 return True
591 except KeyError:
594 except KeyError:
592 return False
595 return False
593
596
594 def candelta(self, baserev, rev):
597 def candelta(self, baserev, rev):
595 """whether two revisions (baserev, rev) can be delta-ed or not"""
598 """whether two revisions (baserev, rev) can be delta-ed or not"""
596 # Disable delta if either rev requires a content-changing flag
599 # Disable delta if either rev requires a content-changing flag
597 # processor (ex. LFS). This is because such flag processor can alter
600 # processor (ex. LFS). This is because such flag processor can alter
598 # the rawtext content that the delta will be based on, and two clients
601 # the rawtext content that the delta will be based on, and two clients
599 # could have a same revlog node with different flags (i.e. different
602 # could have a same revlog node with different flags (i.e. different
600 # rawtext contents) and the delta could be incompatible.
603 # rawtext contents) and the delta could be incompatible.
601 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
604 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
602 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
605 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
603 return False
606 return False
604 return True
607 return True
605
608
606 def clearcaches(self):
609 def clearcaches(self):
607 self._revisioncache = None
610 self._revisioncache = None
608 self._chainbasecache.clear()
611 self._chainbasecache.clear()
609 self._chunkcache = (0, '')
612 self._chunkcache = (0, '')
610 self._pcache = {}
613 self._pcache = {}
611
614
612 try:
615 try:
613 # If we are using the native C version, you are in a fun case
616 # If we are using the native C version, you are in a fun case
614 # where self.index, self.nodemap and self._nodecaches is the same
617 # where self.index, self.nodemap and self._nodecaches is the same
615 # object.
618 # object.
616 self._nodecache.clearcaches()
619 self._nodecache.clearcaches()
617 except AttributeError:
620 except AttributeError:
618 self._nodecache = {nullid: nullrev}
621 self._nodecache = {nullid: nullrev}
619 self._nodepos = None
622 self._nodepos = None
620
623
621 def rev(self, node):
624 def rev(self, node):
622 try:
625 try:
623 return self._nodecache[node]
626 return self._nodecache[node]
624 except TypeError:
627 except TypeError:
625 raise
628 raise
626 except error.RevlogError:
629 except error.RevlogError:
627 # parsers.c radix tree lookup failed
630 # parsers.c radix tree lookup failed
628 if node == wdirid or node in wdirfilenodeids:
631 if node == wdirid or node in wdirfilenodeids:
629 raise error.WdirUnsupported
632 raise error.WdirUnsupported
630 raise error.LookupError(node, self.indexfile, _('no node'))
633 raise error.LookupError(node, self.indexfile, _('no node'))
631 except KeyError:
634 except KeyError:
632 # pure python cache lookup failed
635 # pure python cache lookup failed
633 n = self._nodecache
636 n = self._nodecache
634 i = self.index
637 i = self.index
635 p = self._nodepos
638 p = self._nodepos
636 if p is None:
639 if p is None:
637 p = len(i) - 1
640 p = len(i) - 1
638 else:
641 else:
639 assert p < len(i)
642 assert p < len(i)
640 for r in pycompat.xrange(p, -1, -1):
643 for r in pycompat.xrange(p, -1, -1):
641 v = i[r][7]
644 v = i[r][7]
642 n[v] = r
645 n[v] = r
643 if v == node:
646 if v == node:
644 self._nodepos = r - 1
647 self._nodepos = r - 1
645 return r
648 return r
646 if node == wdirid or node in wdirfilenodeids:
649 if node == wdirid or node in wdirfilenodeids:
647 raise error.WdirUnsupported
650 raise error.WdirUnsupported
648 raise error.LookupError(node, self.indexfile, _('no node'))
651 raise error.LookupError(node, self.indexfile, _('no node'))
649
652
650 # Accessors for index entries.
653 # Accessors for index entries.
651
654
652 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
655 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
653 # are flags.
656 # are flags.
654 def start(self, rev):
657 def start(self, rev):
655 return int(self.index[rev][0] >> 16)
658 return int(self.index[rev][0] >> 16)
656
659
657 def flags(self, rev):
660 def flags(self, rev):
658 return self.index[rev][0] & 0xFFFF
661 return self.index[rev][0] & 0xFFFF
659
662
660 def length(self, rev):
663 def length(self, rev):
661 return self.index[rev][1]
664 return self.index[rev][1]
662
665
663 def rawsize(self, rev):
666 def rawsize(self, rev):
664 """return the length of the uncompressed text for a given revision"""
667 """return the length of the uncompressed text for a given revision"""
665 l = self.index[rev][2]
668 l = self.index[rev][2]
666 if l >= 0:
669 if l >= 0:
667 return l
670 return l
668
671
669 t = self.revision(rev, raw=True)
672 t = self.revision(rev, raw=True)
670 return len(t)
673 return len(t)
671
674
672 def size(self, rev):
675 def size(self, rev):
673 """length of non-raw text (processed by a "read" flag processor)"""
676 """length of non-raw text (processed by a "read" flag processor)"""
674 # fast path: if no "read" flag processor could change the content,
677 # fast path: if no "read" flag processor could change the content,
675 # size is rawsize. note: ELLIPSIS is known to not change the content.
678 # size is rawsize. note: ELLIPSIS is known to not change the content.
676 flags = self.flags(rev)
679 flags = self.flags(rev)
677 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
680 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
678 return self.rawsize(rev)
681 return self.rawsize(rev)
679
682
680 return len(self.revision(rev, raw=False))
683 return len(self.revision(rev, raw=False))
681
684
682 def chainbase(self, rev):
685 def chainbase(self, rev):
683 base = self._chainbasecache.get(rev)
686 base = self._chainbasecache.get(rev)
684 if base is not None:
687 if base is not None:
685 return base
688 return base
686
689
687 index = self.index
690 index = self.index
688 iterrev = rev
691 iterrev = rev
689 base = index[iterrev][3]
692 base = index[iterrev][3]
690 while base != iterrev:
693 while base != iterrev:
691 iterrev = base
694 iterrev = base
692 base = index[iterrev][3]
695 base = index[iterrev][3]
693
696
694 self._chainbasecache[rev] = base
697 self._chainbasecache[rev] = base
695 return base
698 return base
696
699
697 def linkrev(self, rev):
700 def linkrev(self, rev):
698 return self.index[rev][4]
701 return self.index[rev][4]
699
702
700 def parentrevs(self, rev):
703 def parentrevs(self, rev):
701 try:
704 try:
702 entry = self.index[rev]
705 entry = self.index[rev]
703 except IndexError:
706 except IndexError:
704 if rev == wdirrev:
707 if rev == wdirrev:
705 raise error.WdirUnsupported
708 raise error.WdirUnsupported
706 raise
709 raise
707
710
708 return entry[5], entry[6]
711 return entry[5], entry[6]
709
712
710 # fast parentrevs(rev) where rev isn't filtered
713 # fast parentrevs(rev) where rev isn't filtered
711 _uncheckedparentrevs = parentrevs
714 _uncheckedparentrevs = parentrevs
712
715
713 def node(self, rev):
716 def node(self, rev):
714 try:
717 try:
715 return self.index[rev][7]
718 return self.index[rev][7]
716 except IndexError:
719 except IndexError:
717 if rev == wdirrev:
720 if rev == wdirrev:
718 raise error.WdirUnsupported
721 raise error.WdirUnsupported
719 raise
722 raise
720
723
721 # Derived from index values.
724 # Derived from index values.
722
725
723 def end(self, rev):
726 def end(self, rev):
724 return self.start(rev) + self.length(rev)
727 return self.start(rev) + self.length(rev)
725
728
726 def parents(self, node):
729 def parents(self, node):
727 i = self.index
730 i = self.index
728 d = i[self.rev(node)]
731 d = i[self.rev(node)]
729 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
732 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
730
733
731 def chainlen(self, rev):
734 def chainlen(self, rev):
732 return self._chaininfo(rev)[0]
735 return self._chaininfo(rev)[0]
733
736
734 def _chaininfo(self, rev):
737 def _chaininfo(self, rev):
735 chaininfocache = self._chaininfocache
738 chaininfocache = self._chaininfocache
736 if rev in chaininfocache:
739 if rev in chaininfocache:
737 return chaininfocache[rev]
740 return chaininfocache[rev]
738 index = self.index
741 index = self.index
739 generaldelta = self._generaldelta
742 generaldelta = self._generaldelta
740 iterrev = rev
743 iterrev = rev
741 e = index[iterrev]
744 e = index[iterrev]
742 clen = 0
745 clen = 0
743 compresseddeltalen = 0
746 compresseddeltalen = 0
744 while iterrev != e[3]:
747 while iterrev != e[3]:
745 clen += 1
748 clen += 1
746 compresseddeltalen += e[1]
749 compresseddeltalen += e[1]
747 if generaldelta:
750 if generaldelta:
748 iterrev = e[3]
751 iterrev = e[3]
749 else:
752 else:
750 iterrev -= 1
753 iterrev -= 1
751 if iterrev in chaininfocache:
754 if iterrev in chaininfocache:
752 t = chaininfocache[iterrev]
755 t = chaininfocache[iterrev]
753 clen += t[0]
756 clen += t[0]
754 compresseddeltalen += t[1]
757 compresseddeltalen += t[1]
755 break
758 break
756 e = index[iterrev]
759 e = index[iterrev]
757 else:
760 else:
758 # Add text length of base since decompressing that also takes
761 # Add text length of base since decompressing that also takes
759 # work. For cache hits the length is already included.
762 # work. For cache hits the length is already included.
760 compresseddeltalen += e[1]
763 compresseddeltalen += e[1]
761 r = (clen, compresseddeltalen)
764 r = (clen, compresseddeltalen)
762 chaininfocache[rev] = r
765 chaininfocache[rev] = r
763 return r
766 return r
764
767
765 def _deltachain(self, rev, stoprev=None):
768 def _deltachain(self, rev, stoprev=None):
766 """Obtain the delta chain for a revision.
769 """Obtain the delta chain for a revision.
767
770
768 ``stoprev`` specifies a revision to stop at. If not specified, we
771 ``stoprev`` specifies a revision to stop at. If not specified, we
769 stop at the base of the chain.
772 stop at the base of the chain.
770
773
771 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
774 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
772 revs in ascending order and ``stopped`` is a bool indicating whether
775 revs in ascending order and ``stopped`` is a bool indicating whether
773 ``stoprev`` was hit.
776 ``stoprev`` was hit.
774 """
777 """
775 # Try C implementation.
778 # Try C implementation.
776 try:
779 try:
777 return self.index.deltachain(rev, stoprev, self._generaldelta)
780 return self.index.deltachain(rev, stoprev, self._generaldelta)
778 except AttributeError:
781 except AttributeError:
779 pass
782 pass
780
783
781 chain = []
784 chain = []
782
785
783 # Alias to prevent attribute lookup in tight loop.
786 # Alias to prevent attribute lookup in tight loop.
784 index = self.index
787 index = self.index
785 generaldelta = self._generaldelta
788 generaldelta = self._generaldelta
786
789
787 iterrev = rev
790 iterrev = rev
788 e = index[iterrev]
791 e = index[iterrev]
789 while iterrev != e[3] and iterrev != stoprev:
792 while iterrev != e[3] and iterrev != stoprev:
790 chain.append(iterrev)
793 chain.append(iterrev)
791 if generaldelta:
794 if generaldelta:
792 iterrev = e[3]
795 iterrev = e[3]
793 else:
796 else:
794 iterrev -= 1
797 iterrev -= 1
795 e = index[iterrev]
798 e = index[iterrev]
796
799
797 if iterrev == stoprev:
800 if iterrev == stoprev:
798 stopped = True
801 stopped = True
799 else:
802 else:
800 chain.append(iterrev)
803 chain.append(iterrev)
801 stopped = False
804 stopped = False
802
805
803 chain.reverse()
806 chain.reverse()
804 return chain, stopped
807 return chain, stopped
805
808
806 def ancestors(self, revs, stoprev=0, inclusive=False):
809 def ancestors(self, revs, stoprev=0, inclusive=False):
807 """Generate the ancestors of 'revs' in reverse revision order.
810 """Generate the ancestors of 'revs' in reverse revision order.
808 Does not generate revs lower than stoprev.
811 Does not generate revs lower than stoprev.
809
812
810 See the documentation for ancestor.lazyancestors for more details."""
813 See the documentation for ancestor.lazyancestors for more details."""
811
814
812 # first, make sure start revisions aren't filtered
815 # first, make sure start revisions aren't filtered
813 revs = list(revs)
816 revs = list(revs)
814 checkrev = self.node
817 checkrev = self.node
815 for r in revs:
818 for r in revs:
816 checkrev(r)
819 checkrev(r)
817 # and we're sure ancestors aren't filtered as well
820 # and we're sure ancestors aren't filtered as well
818
821
819 if rustext is not None:
822 if rustext is not None:
820 lazyancestors = rustext.ancestor.LazyAncestors
823 lazyancestors = rustext.ancestor.LazyAncestors
821 arg = self.index
824 arg = self.index
822 elif util.safehasattr(parsers, 'rustlazyancestors'):
825 elif util.safehasattr(parsers, 'rustlazyancestors'):
823 lazyancestors = ancestor.rustlazyancestors
826 lazyancestors = ancestor.rustlazyancestors
824 arg = self.index
827 arg = self.index
825 else:
828 else:
826 lazyancestors = ancestor.lazyancestors
829 lazyancestors = ancestor.lazyancestors
827 arg = self._uncheckedparentrevs
830 arg = self._uncheckedparentrevs
828 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
831 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
829
832
830 def descendants(self, revs):
833 def descendants(self, revs):
831 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
834 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
832
835
833 def findcommonmissing(self, common=None, heads=None):
836 def findcommonmissing(self, common=None, heads=None):
834 """Return a tuple of the ancestors of common and the ancestors of heads
837 """Return a tuple of the ancestors of common and the ancestors of heads
835 that are not ancestors of common. In revset terminology, we return the
838 that are not ancestors of common. In revset terminology, we return the
836 tuple:
839 tuple:
837
840
838 ::common, (::heads) - (::common)
841 ::common, (::heads) - (::common)
839
842
840 The list is sorted by revision number, meaning it is
843 The list is sorted by revision number, meaning it is
841 topologically sorted.
844 topologically sorted.
842
845
843 'heads' and 'common' are both lists of node IDs. If heads is
846 'heads' and 'common' are both lists of node IDs. If heads is
844 not supplied, uses all of the revlog's heads. If common is not
847 not supplied, uses all of the revlog's heads. If common is not
845 supplied, uses nullid."""
848 supplied, uses nullid."""
846 if common is None:
849 if common is None:
847 common = [nullid]
850 common = [nullid]
848 if heads is None:
851 if heads is None:
849 heads = self.heads()
852 heads = self.heads()
850
853
851 common = [self.rev(n) for n in common]
854 common = [self.rev(n) for n in common]
852 heads = [self.rev(n) for n in heads]
855 heads = [self.rev(n) for n in heads]
853
856
854 # we want the ancestors, but inclusive
857 # we want the ancestors, but inclusive
855 class lazyset(object):
858 class lazyset(object):
856 def __init__(self, lazyvalues):
859 def __init__(self, lazyvalues):
857 self.addedvalues = set()
860 self.addedvalues = set()
858 self.lazyvalues = lazyvalues
861 self.lazyvalues = lazyvalues
859
862
860 def __contains__(self, value):
863 def __contains__(self, value):
861 return value in self.addedvalues or value in self.lazyvalues
864 return value in self.addedvalues or value in self.lazyvalues
862
865
863 def __iter__(self):
866 def __iter__(self):
864 added = self.addedvalues
867 added = self.addedvalues
865 for r in added:
868 for r in added:
866 yield r
869 yield r
867 for r in self.lazyvalues:
870 for r in self.lazyvalues:
868 if not r in added:
871 if not r in added:
869 yield r
872 yield r
870
873
871 def add(self, value):
874 def add(self, value):
872 self.addedvalues.add(value)
875 self.addedvalues.add(value)
873
876
874 def update(self, values):
877 def update(self, values):
875 self.addedvalues.update(values)
878 self.addedvalues.update(values)
876
879
877 has = lazyset(self.ancestors(common))
880 has = lazyset(self.ancestors(common))
878 has.add(nullrev)
881 has.add(nullrev)
879 has.update(common)
882 has.update(common)
880
883
881 # take all ancestors from heads that aren't in has
884 # take all ancestors from heads that aren't in has
882 missing = set()
885 missing = set()
883 visit = collections.deque(r for r in heads if r not in has)
886 visit = collections.deque(r for r in heads if r not in has)
884 while visit:
887 while visit:
885 r = visit.popleft()
888 r = visit.popleft()
886 if r in missing:
889 if r in missing:
887 continue
890 continue
888 else:
891 else:
889 missing.add(r)
892 missing.add(r)
890 for p in self.parentrevs(r):
893 for p in self.parentrevs(r):
891 if p not in has:
894 if p not in has:
892 visit.append(p)
895 visit.append(p)
893 missing = list(missing)
896 missing = list(missing)
894 missing.sort()
897 missing.sort()
895 return has, [self.node(miss) for miss in missing]
898 return has, [self.node(miss) for miss in missing]
896
899
897 def incrementalmissingrevs(self, common=None):
900 def incrementalmissingrevs(self, common=None):
898 """Return an object that can be used to incrementally compute the
901 """Return an object that can be used to incrementally compute the
899 revision numbers of the ancestors of arbitrary sets that are not
902 revision numbers of the ancestors of arbitrary sets that are not
900 ancestors of common. This is an ancestor.incrementalmissingancestors
903 ancestors of common. This is an ancestor.incrementalmissingancestors
901 object.
904 object.
902
905
903 'common' is a list of revision numbers. If common is not supplied, uses
906 'common' is a list of revision numbers. If common is not supplied, uses
904 nullrev.
907 nullrev.
905 """
908 """
906 if common is None:
909 if common is None:
907 common = [nullrev]
910 common = [nullrev]
908
911
909 if rustext is not None:
912 if rustext is not None:
910 return rustext.ancestor.MissingAncestors(self.index, common)
913 return rustext.ancestor.MissingAncestors(self.index, common)
911 return ancestor.incrementalmissingancestors(self.parentrevs, common)
914 return ancestor.incrementalmissingancestors(self.parentrevs, common)
912
915
913 def findmissingrevs(self, common=None, heads=None):
916 def findmissingrevs(self, common=None, heads=None):
914 """Return the revision numbers of the ancestors of heads that
917 """Return the revision numbers of the ancestors of heads that
915 are not ancestors of common.
918 are not ancestors of common.
916
919
917 More specifically, return a list of revision numbers corresponding to
920 More specifically, return a list of revision numbers corresponding to
918 nodes N such that every N satisfies the following constraints:
921 nodes N such that every N satisfies the following constraints:
919
922
920 1. N is an ancestor of some node in 'heads'
923 1. N is an ancestor of some node in 'heads'
921 2. N is not an ancestor of any node in 'common'
924 2. N is not an ancestor of any node in 'common'
922
925
923 The list is sorted by revision number, meaning it is
926 The list is sorted by revision number, meaning it is
924 topologically sorted.
927 topologically sorted.
925
928
926 'heads' and 'common' are both lists of revision numbers. If heads is
929 'heads' and 'common' are both lists of revision numbers. If heads is
927 not supplied, uses all of the revlog's heads. If common is not
930 not supplied, uses all of the revlog's heads. If common is not
928 supplied, uses nullid."""
931 supplied, uses nullid."""
929 if common is None:
932 if common is None:
930 common = [nullrev]
933 common = [nullrev]
931 if heads is None:
934 if heads is None:
932 heads = self.headrevs()
935 heads = self.headrevs()
933
936
934 inc = self.incrementalmissingrevs(common=common)
937 inc = self.incrementalmissingrevs(common=common)
935 return inc.missingancestors(heads)
938 return inc.missingancestors(heads)
936
939
937 def findmissing(self, common=None, heads=None):
940 def findmissing(self, common=None, heads=None):
938 """Return the ancestors of heads that are not ancestors of common.
941 """Return the ancestors of heads that are not ancestors of common.
939
942
940 More specifically, return a list of nodes N such that every N
943 More specifically, return a list of nodes N such that every N
941 satisfies the following constraints:
944 satisfies the following constraints:
942
945
943 1. N is an ancestor of some node in 'heads'
946 1. N is an ancestor of some node in 'heads'
944 2. N is not an ancestor of any node in 'common'
947 2. N is not an ancestor of any node in 'common'
945
948
946 The list is sorted by revision number, meaning it is
949 The list is sorted by revision number, meaning it is
947 topologically sorted.
950 topologically sorted.
948
951
949 'heads' and 'common' are both lists of node IDs. If heads is
952 'heads' and 'common' are both lists of node IDs. If heads is
950 not supplied, uses all of the revlog's heads. If common is not
953 not supplied, uses all of the revlog's heads. If common is not
951 supplied, uses nullid."""
954 supplied, uses nullid."""
952 if common is None:
955 if common is None:
953 common = [nullid]
956 common = [nullid]
954 if heads is None:
957 if heads is None:
955 heads = self.heads()
958 heads = self.heads()
956
959
957 common = [self.rev(n) for n in common]
960 common = [self.rev(n) for n in common]
958 heads = [self.rev(n) for n in heads]
961 heads = [self.rev(n) for n in heads]
959
962
960 inc = self.incrementalmissingrevs(common=common)
963 inc = self.incrementalmissingrevs(common=common)
961 return [self.node(r) for r in inc.missingancestors(heads)]
964 return [self.node(r) for r in inc.missingancestors(heads)]
962
965
963 def nodesbetween(self, roots=None, heads=None):
966 def nodesbetween(self, roots=None, heads=None):
964 """Return a topological path from 'roots' to 'heads'.
967 """Return a topological path from 'roots' to 'heads'.
965
968
966 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
969 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
967 topologically sorted list of all nodes N that satisfy both of
970 topologically sorted list of all nodes N that satisfy both of
968 these constraints:
971 these constraints:
969
972
970 1. N is a descendant of some node in 'roots'
973 1. N is a descendant of some node in 'roots'
971 2. N is an ancestor of some node in 'heads'
974 2. N is an ancestor of some node in 'heads'
972
975
973 Every node is considered to be both a descendant and an ancestor
976 Every node is considered to be both a descendant and an ancestor
974 of itself, so every reachable node in 'roots' and 'heads' will be
977 of itself, so every reachable node in 'roots' and 'heads' will be
975 included in 'nodes'.
978 included in 'nodes'.
976
979
977 'outroots' is the list of reachable nodes in 'roots', i.e., the
980 'outroots' is the list of reachable nodes in 'roots', i.e., the
978 subset of 'roots' that is returned in 'nodes'. Likewise,
981 subset of 'roots' that is returned in 'nodes'. Likewise,
979 'outheads' is the subset of 'heads' that is also in 'nodes'.
982 'outheads' is the subset of 'heads' that is also in 'nodes'.
980
983
981 'roots' and 'heads' are both lists of node IDs. If 'roots' is
984 'roots' and 'heads' are both lists of node IDs. If 'roots' is
982 unspecified, uses nullid as the only root. If 'heads' is
985 unspecified, uses nullid as the only root. If 'heads' is
983 unspecified, uses list of all of the revlog's heads."""
986 unspecified, uses list of all of the revlog's heads."""
984 nonodes = ([], [], [])
987 nonodes = ([], [], [])
985 if roots is not None:
988 if roots is not None:
986 roots = list(roots)
989 roots = list(roots)
987 if not roots:
990 if not roots:
988 return nonodes
991 return nonodes
989 lowestrev = min([self.rev(n) for n in roots])
992 lowestrev = min([self.rev(n) for n in roots])
990 else:
993 else:
991 roots = [nullid] # Everybody's a descendant of nullid
994 roots = [nullid] # Everybody's a descendant of nullid
992 lowestrev = nullrev
995 lowestrev = nullrev
993 if (lowestrev == nullrev) and (heads is None):
996 if (lowestrev == nullrev) and (heads is None):
994 # We want _all_ the nodes!
997 # We want _all_ the nodes!
995 return ([self.node(r) for r in self], [nullid], list(self.heads()))
998 return ([self.node(r) for r in self], [nullid], list(self.heads()))
996 if heads is None:
999 if heads is None:
997 # All nodes are ancestors, so the latest ancestor is the last
1000 # All nodes are ancestors, so the latest ancestor is the last
998 # node.
1001 # node.
999 highestrev = len(self) - 1
1002 highestrev = len(self) - 1
1000 # Set ancestors to None to signal that every node is an ancestor.
1003 # Set ancestors to None to signal that every node is an ancestor.
1001 ancestors = None
1004 ancestors = None
1002 # Set heads to an empty dictionary for later discovery of heads
1005 # Set heads to an empty dictionary for later discovery of heads
1003 heads = {}
1006 heads = {}
1004 else:
1007 else:
1005 heads = list(heads)
1008 heads = list(heads)
1006 if not heads:
1009 if not heads:
1007 return nonodes
1010 return nonodes
1008 ancestors = set()
1011 ancestors = set()
1009 # Turn heads into a dictionary so we can remove 'fake' heads.
1012 # Turn heads into a dictionary so we can remove 'fake' heads.
1010 # Also, later we will be using it to filter out the heads we can't
1013 # Also, later we will be using it to filter out the heads we can't
1011 # find from roots.
1014 # find from roots.
1012 heads = dict.fromkeys(heads, False)
1015 heads = dict.fromkeys(heads, False)
1013 # Start at the top and keep marking parents until we're done.
1016 # Start at the top and keep marking parents until we're done.
1014 nodestotag = set(heads)
1017 nodestotag = set(heads)
1015 # Remember where the top was so we can use it as a limit later.
1018 # Remember where the top was so we can use it as a limit later.
1016 highestrev = max([self.rev(n) for n in nodestotag])
1019 highestrev = max([self.rev(n) for n in nodestotag])
1017 while nodestotag:
1020 while nodestotag:
1018 # grab a node to tag
1021 # grab a node to tag
1019 n = nodestotag.pop()
1022 n = nodestotag.pop()
1020 # Never tag nullid
1023 # Never tag nullid
1021 if n == nullid:
1024 if n == nullid:
1022 continue
1025 continue
1023 # A node's revision number represents its place in a
1026 # A node's revision number represents its place in a
1024 # topologically sorted list of nodes.
1027 # topologically sorted list of nodes.
1025 r = self.rev(n)
1028 r = self.rev(n)
1026 if r >= lowestrev:
1029 if r >= lowestrev:
1027 if n not in ancestors:
1030 if n not in ancestors:
1028 # If we are possibly a descendant of one of the roots
1031 # If we are possibly a descendant of one of the roots
1029 # and we haven't already been marked as an ancestor
1032 # and we haven't already been marked as an ancestor
1030 ancestors.add(n) # Mark as ancestor
1033 ancestors.add(n) # Mark as ancestor
1031 # Add non-nullid parents to list of nodes to tag.
1034 # Add non-nullid parents to list of nodes to tag.
1032 nodestotag.update([p for p in self.parents(n) if
1035 nodestotag.update([p for p in self.parents(n) if
1033 p != nullid])
1036 p != nullid])
1034 elif n in heads: # We've seen it before, is it a fake head?
1037 elif n in heads: # We've seen it before, is it a fake head?
1035 # So it is, real heads should not be the ancestors of
1038 # So it is, real heads should not be the ancestors of
1036 # any other heads.
1039 # any other heads.
1037 heads.pop(n)
1040 heads.pop(n)
1038 if not ancestors:
1041 if not ancestors:
1039 return nonodes
1042 return nonodes
1040 # Now that we have our set of ancestors, we want to remove any
1043 # Now that we have our set of ancestors, we want to remove any
1041 # roots that are not ancestors.
1044 # roots that are not ancestors.
1042
1045
1043 # If one of the roots was nullid, everything is included anyway.
1046 # If one of the roots was nullid, everything is included anyway.
1044 if lowestrev > nullrev:
1047 if lowestrev > nullrev:
1045 # But, since we weren't, let's recompute the lowest rev to not
1048 # But, since we weren't, let's recompute the lowest rev to not
1046 # include roots that aren't ancestors.
1049 # include roots that aren't ancestors.
1047
1050
1048 # Filter out roots that aren't ancestors of heads
1051 # Filter out roots that aren't ancestors of heads
1049 roots = [root for root in roots if root in ancestors]
1052 roots = [root for root in roots if root in ancestors]
1050 # Recompute the lowest revision
1053 # Recompute the lowest revision
1051 if roots:
1054 if roots:
1052 lowestrev = min([self.rev(root) for root in roots])
1055 lowestrev = min([self.rev(root) for root in roots])
1053 else:
1056 else:
1054 # No more roots? Return empty list
1057 # No more roots? Return empty list
1055 return nonodes
1058 return nonodes
1056 else:
1059 else:
1057 # We are descending from nullid, and don't need to care about
1060 # We are descending from nullid, and don't need to care about
1058 # any other roots.
1061 # any other roots.
1059 lowestrev = nullrev
1062 lowestrev = nullrev
1060 roots = [nullid]
1063 roots = [nullid]
1061 # Transform our roots list into a set.
1064 # Transform our roots list into a set.
1062 descendants = set(roots)
1065 descendants = set(roots)
1063 # Also, keep the original roots so we can filter out roots that aren't
1066 # Also, keep the original roots so we can filter out roots that aren't
1064 # 'real' roots (i.e. are descended from other roots).
1067 # 'real' roots (i.e. are descended from other roots).
1065 roots = descendants.copy()
1068 roots = descendants.copy()
1066 # Our topologically sorted list of output nodes.
1069 # Our topologically sorted list of output nodes.
1067 orderedout = []
1070 orderedout = []
1068 # Don't start at nullid since we don't want nullid in our output list,
1071 # Don't start at nullid since we don't want nullid in our output list,
1069 # and if nullid shows up in descendants, empty parents will look like
1072 # and if nullid shows up in descendants, empty parents will look like
1070 # they're descendants.
1073 # they're descendants.
1071 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1074 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1072 n = self.node(r)
1075 n = self.node(r)
1073 isdescendant = False
1076 isdescendant = False
1074 if lowestrev == nullrev: # Everybody is a descendant of nullid
1077 if lowestrev == nullrev: # Everybody is a descendant of nullid
1075 isdescendant = True
1078 isdescendant = True
1076 elif n in descendants:
1079 elif n in descendants:
1077 # n is already a descendant
1080 # n is already a descendant
1078 isdescendant = True
1081 isdescendant = True
1079 # This check only needs to be done here because all the roots
1082 # This check only needs to be done here because all the roots
1080 # will start being marked is descendants before the loop.
1083 # will start being marked is descendants before the loop.
1081 if n in roots:
1084 if n in roots:
1082 # If n was a root, check if it's a 'real' root.
1085 # If n was a root, check if it's a 'real' root.
1083 p = tuple(self.parents(n))
1086 p = tuple(self.parents(n))
1084 # If any of its parents are descendants, it's not a root.
1087 # If any of its parents are descendants, it's not a root.
1085 if (p[0] in descendants) or (p[1] in descendants):
1088 if (p[0] in descendants) or (p[1] in descendants):
1086 roots.remove(n)
1089 roots.remove(n)
1087 else:
1090 else:
1088 p = tuple(self.parents(n))
1091 p = tuple(self.parents(n))
1089 # A node is a descendant if either of its parents are
1092 # A node is a descendant if either of its parents are
1090 # descendants. (We seeded the dependents list with the roots
1093 # descendants. (We seeded the dependents list with the roots
1091 # up there, remember?)
1094 # up there, remember?)
1092 if (p[0] in descendants) or (p[1] in descendants):
1095 if (p[0] in descendants) or (p[1] in descendants):
1093 descendants.add(n)
1096 descendants.add(n)
1094 isdescendant = True
1097 isdescendant = True
1095 if isdescendant and ((ancestors is None) or (n in ancestors)):
1098 if isdescendant and ((ancestors is None) or (n in ancestors)):
1096 # Only include nodes that are both descendants and ancestors.
1099 # Only include nodes that are both descendants and ancestors.
1097 orderedout.append(n)
1100 orderedout.append(n)
1098 if (ancestors is not None) and (n in heads):
1101 if (ancestors is not None) and (n in heads):
1099 # We're trying to figure out which heads are reachable
1102 # We're trying to figure out which heads are reachable
1100 # from roots.
1103 # from roots.
1101 # Mark this head as having been reached
1104 # Mark this head as having been reached
1102 heads[n] = True
1105 heads[n] = True
1103 elif ancestors is None:
1106 elif ancestors is None:
1104 # Otherwise, we're trying to discover the heads.
1107 # Otherwise, we're trying to discover the heads.
1105 # Assume this is a head because if it isn't, the next step
1108 # Assume this is a head because if it isn't, the next step
1106 # will eventually remove it.
1109 # will eventually remove it.
1107 heads[n] = True
1110 heads[n] = True
1108 # But, obviously its parents aren't.
1111 # But, obviously its parents aren't.
1109 for p in self.parents(n):
1112 for p in self.parents(n):
1110 heads.pop(p, None)
1113 heads.pop(p, None)
1111 heads = [head for head, flag in heads.iteritems() if flag]
1114 heads = [head for head, flag in heads.iteritems() if flag]
1112 roots = list(roots)
1115 roots = list(roots)
1113 assert orderedout
1116 assert orderedout
1114 assert roots
1117 assert roots
1115 assert heads
1118 assert heads
1116 return (orderedout, roots, heads)
1119 return (orderedout, roots, heads)
1117
1120
1118 def headrevs(self, revs=None):
1121 def headrevs(self, revs=None):
1119 if revs is None:
1122 if revs is None:
1120 try:
1123 try:
1121 return self.index.headrevs()
1124 return self.index.headrevs()
1122 except AttributeError:
1125 except AttributeError:
1123 return self._headrevs()
1126 return self._headrevs()
1124 if rustext is not None:
1127 if rustext is not None:
1125 return rustext.dagop.headrevs(self.index, revs)
1128 return rustext.dagop.headrevs(self.index, revs)
1126 return dagop.headrevs(revs, self._uncheckedparentrevs)
1129 return dagop.headrevs(revs, self._uncheckedparentrevs)
1127
1130
1128 def computephases(self, roots):
1131 def computephases(self, roots):
1129 return self.index.computephasesmapsets(roots)
1132 return self.index.computephasesmapsets(roots)
1130
1133
1131 def _headrevs(self):
1134 def _headrevs(self):
1132 count = len(self)
1135 count = len(self)
1133 if not count:
1136 if not count:
1134 return [nullrev]
1137 return [nullrev]
1135 # we won't iter over filtered rev so nobody is a head at start
1138 # we won't iter over filtered rev so nobody is a head at start
1136 ishead = [0] * (count + 1)
1139 ishead = [0] * (count + 1)
1137 index = self.index
1140 index = self.index
1138 for r in self:
1141 for r in self:
1139 ishead[r] = 1 # I may be an head
1142 ishead[r] = 1 # I may be an head
1140 e = index[r]
1143 e = index[r]
1141 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1144 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1142 return [r for r, val in enumerate(ishead) if val]
1145 return [r for r, val in enumerate(ishead) if val]
1143
1146
1144 def heads(self, start=None, stop=None):
1147 def heads(self, start=None, stop=None):
1145 """return the list of all nodes that have no children
1148 """return the list of all nodes that have no children
1146
1149
1147 if start is specified, only heads that are descendants of
1150 if start is specified, only heads that are descendants of
1148 start will be returned
1151 start will be returned
1149 if stop is specified, it will consider all the revs from stop
1152 if stop is specified, it will consider all the revs from stop
1150 as if they had no children
1153 as if they had no children
1151 """
1154 """
1152 if start is None and stop is None:
1155 if start is None and stop is None:
1153 if not len(self):
1156 if not len(self):
1154 return [nullid]
1157 return [nullid]
1155 return [self.node(r) for r in self.headrevs()]
1158 return [self.node(r) for r in self.headrevs()]
1156
1159
1157 if start is None:
1160 if start is None:
1158 start = nullrev
1161 start = nullrev
1159 else:
1162 else:
1160 start = self.rev(start)
1163 start = self.rev(start)
1161
1164
1162 stoprevs = set(self.rev(n) for n in stop or [])
1165 stoprevs = set(self.rev(n) for n in stop or [])
1163
1166
1164 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1167 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1165 stoprevs=stoprevs)
1168 stoprevs=stoprevs)
1166
1169
1167 return [self.node(rev) for rev in revs]
1170 return [self.node(rev) for rev in revs]
1168
1171
1169 def children(self, node):
1172 def children(self, node):
1170 """find the children of a given node"""
1173 """find the children of a given node"""
1171 c = []
1174 c = []
1172 p = self.rev(node)
1175 p = self.rev(node)
1173 for r in self.revs(start=p + 1):
1176 for r in self.revs(start=p + 1):
1174 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1177 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1175 if prevs:
1178 if prevs:
1176 for pr in prevs:
1179 for pr in prevs:
1177 if pr == p:
1180 if pr == p:
1178 c.append(self.node(r))
1181 c.append(self.node(r))
1179 elif p == nullrev:
1182 elif p == nullrev:
1180 c.append(self.node(r))
1183 c.append(self.node(r))
1181 return c
1184 return c
1182
1185
1183 def commonancestorsheads(self, a, b):
1186 def commonancestorsheads(self, a, b):
1184 """calculate all the heads of the common ancestors of nodes a and b"""
1187 """calculate all the heads of the common ancestors of nodes a and b"""
1185 a, b = self.rev(a), self.rev(b)
1188 a, b = self.rev(a), self.rev(b)
1186 ancs = self._commonancestorsheads(a, b)
1189 ancs = self._commonancestorsheads(a, b)
1187 return pycompat.maplist(self.node, ancs)
1190 return pycompat.maplist(self.node, ancs)
1188
1191
1189 def _commonancestorsheads(self, *revs):
1192 def _commonancestorsheads(self, *revs):
1190 """calculate all the heads of the common ancestors of revs"""
1193 """calculate all the heads of the common ancestors of revs"""
1191 try:
1194 try:
1192 ancs = self.index.commonancestorsheads(*revs)
1195 ancs = self.index.commonancestorsheads(*revs)
1193 except (AttributeError, OverflowError): # C implementation failed
1196 except (AttributeError, OverflowError): # C implementation failed
1194 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1197 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1195 return ancs
1198 return ancs
1196
1199
1197 def isancestor(self, a, b):
1200 def isancestor(self, a, b):
1198 """return True if node a is an ancestor of node b
1201 """return True if node a is an ancestor of node b
1199
1202
1200 A revision is considered an ancestor of itself."""
1203 A revision is considered an ancestor of itself."""
1201 a, b = self.rev(a), self.rev(b)
1204 a, b = self.rev(a), self.rev(b)
1202 return self.isancestorrev(a, b)
1205 return self.isancestorrev(a, b)
1203
1206
1204 def isancestorrev(self, a, b):
1207 def isancestorrev(self, a, b):
1205 """return True if revision a is an ancestor of revision b
1208 """return True if revision a is an ancestor of revision b
1206
1209
1207 A revision is considered an ancestor of itself.
1210 A revision is considered an ancestor of itself.
1208
1211
1209 The implementation of this is trivial but the use of
1212 The implementation of this is trivial but the use of
1210 commonancestorsheads is not."""
1213 commonancestorsheads is not."""
1211 if a == nullrev:
1214 if a == nullrev:
1212 return True
1215 return True
1213 elif a == b:
1216 elif a == b:
1214 return True
1217 return True
1215 elif a > b:
1218 elif a > b:
1216 return False
1219 return False
1217 return a in self._commonancestorsheads(a, b)
1220 return a in self._commonancestorsheads(a, b)
1218
1221
1219 def ancestor(self, a, b):
1222 def ancestor(self, a, b):
1220 """calculate the "best" common ancestor of nodes a and b"""
1223 """calculate the "best" common ancestor of nodes a and b"""
1221
1224
1222 a, b = self.rev(a), self.rev(b)
1225 a, b = self.rev(a), self.rev(b)
1223 try:
1226 try:
1224 ancs = self.index.ancestors(a, b)
1227 ancs = self.index.ancestors(a, b)
1225 except (AttributeError, OverflowError):
1228 except (AttributeError, OverflowError):
1226 ancs = ancestor.ancestors(self.parentrevs, a, b)
1229 ancs = ancestor.ancestors(self.parentrevs, a, b)
1227 if ancs:
1230 if ancs:
1228 # choose a consistent winner when there's a tie
1231 # choose a consistent winner when there's a tie
1229 return min(map(self.node, ancs))
1232 return min(map(self.node, ancs))
1230 return nullid
1233 return nullid
1231
1234
1232 def _match(self, id):
1235 def _match(self, id):
1233 if isinstance(id, int):
1236 if isinstance(id, int):
1234 # rev
1237 # rev
1235 return self.node(id)
1238 return self.node(id)
1236 if len(id) == 20:
1239 if len(id) == 20:
1237 # possibly a binary node
1240 # possibly a binary node
1238 # odds of a binary node being all hex in ASCII are 1 in 10**25
1241 # odds of a binary node being all hex in ASCII are 1 in 10**25
1239 try:
1242 try:
1240 node = id
1243 node = id
1241 self.rev(node) # quick search the index
1244 self.rev(node) # quick search the index
1242 return node
1245 return node
1243 except error.LookupError:
1246 except error.LookupError:
1244 pass # may be partial hex id
1247 pass # may be partial hex id
1245 try:
1248 try:
1246 # str(rev)
1249 # str(rev)
1247 rev = int(id)
1250 rev = int(id)
1248 if "%d" % rev != id:
1251 if "%d" % rev != id:
1249 raise ValueError
1252 raise ValueError
1250 if rev < 0:
1253 if rev < 0:
1251 rev = len(self) + rev
1254 rev = len(self) + rev
1252 if rev < 0 or rev >= len(self):
1255 if rev < 0 or rev >= len(self):
1253 raise ValueError
1256 raise ValueError
1254 return self.node(rev)
1257 return self.node(rev)
1255 except (ValueError, OverflowError):
1258 except (ValueError, OverflowError):
1256 pass
1259 pass
1257 if len(id) == 40:
1260 if len(id) == 40:
1258 try:
1261 try:
1259 # a full hex nodeid?
1262 # a full hex nodeid?
1260 node = bin(id)
1263 node = bin(id)
1261 self.rev(node)
1264 self.rev(node)
1262 return node
1265 return node
1263 except (TypeError, error.LookupError):
1266 except (TypeError, error.LookupError):
1264 pass
1267 pass
1265
1268
1266 def _partialmatch(self, id):
1269 def _partialmatch(self, id):
1267 # we don't care wdirfilenodeids as they should be always full hash
1270 # we don't care wdirfilenodeids as they should be always full hash
1268 maybewdir = wdirhex.startswith(id)
1271 maybewdir = wdirhex.startswith(id)
1269 try:
1272 try:
1270 partial = self.index.partialmatch(id)
1273 partial = self.index.partialmatch(id)
1271 if partial and self.hasnode(partial):
1274 if partial and self.hasnode(partial):
1272 if maybewdir:
1275 if maybewdir:
1273 # single 'ff...' match in radix tree, ambiguous with wdir
1276 # single 'ff...' match in radix tree, ambiguous with wdir
1274 raise error.RevlogError
1277 raise error.RevlogError
1275 return partial
1278 return partial
1276 if maybewdir:
1279 if maybewdir:
1277 # no 'ff...' match in radix tree, wdir identified
1280 # no 'ff...' match in radix tree, wdir identified
1278 raise error.WdirUnsupported
1281 raise error.WdirUnsupported
1279 return None
1282 return None
1280 except error.RevlogError:
1283 except error.RevlogError:
1281 # parsers.c radix tree lookup gave multiple matches
1284 # parsers.c radix tree lookup gave multiple matches
1282 # fast path: for unfiltered changelog, radix tree is accurate
1285 # fast path: for unfiltered changelog, radix tree is accurate
1283 if not getattr(self, 'filteredrevs', None):
1286 if not getattr(self, 'filteredrevs', None):
1284 raise error.AmbiguousPrefixLookupError(
1287 raise error.AmbiguousPrefixLookupError(
1285 id, self.indexfile, _('ambiguous identifier'))
1288 id, self.indexfile, _('ambiguous identifier'))
1286 # fall through to slow path that filters hidden revisions
1289 # fall through to slow path that filters hidden revisions
1287 except (AttributeError, ValueError):
1290 except (AttributeError, ValueError):
1288 # we are pure python, or key was too short to search radix tree
1291 # we are pure python, or key was too short to search radix tree
1289 pass
1292 pass
1290
1293
1291 if id in self._pcache:
1294 if id in self._pcache:
1292 return self._pcache[id]
1295 return self._pcache[id]
1293
1296
1294 if len(id) <= 40:
1297 if len(id) <= 40:
1295 try:
1298 try:
1296 # hex(node)[:...]
1299 # hex(node)[:...]
1297 l = len(id) // 2 # grab an even number of digits
1300 l = len(id) // 2 # grab an even number of digits
1298 prefix = bin(id[:l * 2])
1301 prefix = bin(id[:l * 2])
1299 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1302 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1300 nl = [n for n in nl if hex(n).startswith(id) and
1303 nl = [n for n in nl if hex(n).startswith(id) and
1301 self.hasnode(n)]
1304 self.hasnode(n)]
1302 if nullhex.startswith(id):
1305 if nullhex.startswith(id):
1303 nl.append(nullid)
1306 nl.append(nullid)
1304 if len(nl) > 0:
1307 if len(nl) > 0:
1305 if len(nl) == 1 and not maybewdir:
1308 if len(nl) == 1 and not maybewdir:
1306 self._pcache[id] = nl[0]
1309 self._pcache[id] = nl[0]
1307 return nl[0]
1310 return nl[0]
1308 raise error.AmbiguousPrefixLookupError(
1311 raise error.AmbiguousPrefixLookupError(
1309 id, self.indexfile, _('ambiguous identifier'))
1312 id, self.indexfile, _('ambiguous identifier'))
1310 if maybewdir:
1313 if maybewdir:
1311 raise error.WdirUnsupported
1314 raise error.WdirUnsupported
1312 return None
1315 return None
1313 except TypeError:
1316 except TypeError:
1314 pass
1317 pass
1315
1318
1316 def lookup(self, id):
1319 def lookup(self, id):
1317 """locate a node based on:
1320 """locate a node based on:
1318 - revision number or str(revision number)
1321 - revision number or str(revision number)
1319 - nodeid or subset of hex nodeid
1322 - nodeid or subset of hex nodeid
1320 """
1323 """
1321 n = self._match(id)
1324 n = self._match(id)
1322 if n is not None:
1325 if n is not None:
1323 return n
1326 return n
1324 n = self._partialmatch(id)
1327 n = self._partialmatch(id)
1325 if n:
1328 if n:
1326 return n
1329 return n
1327
1330
1328 raise error.LookupError(id, self.indexfile, _('no match found'))
1331 raise error.LookupError(id, self.indexfile, _('no match found'))
1329
1332
1330 def shortest(self, node, minlength=1):
1333 def shortest(self, node, minlength=1):
1331 """Find the shortest unambiguous prefix that matches node."""
1334 """Find the shortest unambiguous prefix that matches node."""
1332 def isvalid(prefix):
1335 def isvalid(prefix):
1333 try:
1336 try:
1334 node = self._partialmatch(prefix)
1337 node = self._partialmatch(prefix)
1335 except error.AmbiguousPrefixLookupError:
1338 except error.AmbiguousPrefixLookupError:
1336 return False
1339 return False
1337 except error.WdirUnsupported:
1340 except error.WdirUnsupported:
1338 # single 'ff...' match
1341 # single 'ff...' match
1339 return True
1342 return True
1340 if node is None:
1343 if node is None:
1341 raise error.LookupError(node, self.indexfile, _('no node'))
1344 raise error.LookupError(node, self.indexfile, _('no node'))
1342 return True
1345 return True
1343
1346
1344 def maybewdir(prefix):
1347 def maybewdir(prefix):
1345 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1348 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1346
1349
1347 hexnode = hex(node)
1350 hexnode = hex(node)
1348
1351
1349 def disambiguate(hexnode, minlength):
1352 def disambiguate(hexnode, minlength):
1350 """Disambiguate against wdirid."""
1353 """Disambiguate against wdirid."""
1351 for length in range(minlength, 41):
1354 for length in range(minlength, 41):
1352 prefix = hexnode[:length]
1355 prefix = hexnode[:length]
1353 if not maybewdir(prefix):
1356 if not maybewdir(prefix):
1354 return prefix
1357 return prefix
1355
1358
1356 if not getattr(self, 'filteredrevs', None):
1359 if not getattr(self, 'filteredrevs', None):
1357 try:
1360 try:
1358 length = max(self.index.shortest(node), minlength)
1361 length = max(self.index.shortest(node), minlength)
1359 return disambiguate(hexnode, length)
1362 return disambiguate(hexnode, length)
1360 except error.RevlogError:
1363 except error.RevlogError:
1361 if node != wdirid:
1364 if node != wdirid:
1362 raise error.LookupError(node, self.indexfile, _('no node'))
1365 raise error.LookupError(node, self.indexfile, _('no node'))
1363 except AttributeError:
1366 except AttributeError:
1364 # Fall through to pure code
1367 # Fall through to pure code
1365 pass
1368 pass
1366
1369
1367 if node == wdirid:
1370 if node == wdirid:
1368 for length in range(minlength, 41):
1371 for length in range(minlength, 41):
1369 prefix = hexnode[:length]
1372 prefix = hexnode[:length]
1370 if isvalid(prefix):
1373 if isvalid(prefix):
1371 return prefix
1374 return prefix
1372
1375
1373 for length in range(minlength, 41):
1376 for length in range(minlength, 41):
1374 prefix = hexnode[:length]
1377 prefix = hexnode[:length]
1375 if isvalid(prefix):
1378 if isvalid(prefix):
1376 return disambiguate(hexnode, length)
1379 return disambiguate(hexnode, length)
1377
1380
1378 def cmp(self, node, text):
1381 def cmp(self, node, text):
1379 """compare text with a given file revision
1382 """compare text with a given file revision
1380
1383
1381 returns True if text is different than what is stored.
1384 returns True if text is different than what is stored.
1382 """
1385 """
1383 p1, p2 = self.parents(node)
1386 p1, p2 = self.parents(node)
1384 return storageutil.hashrevisionsha1(text, p1, p2) != node
1387 return storageutil.hashrevisionsha1(text, p1, p2) != node
1385
1388
1386 def _cachesegment(self, offset, data):
1389 def _cachesegment(self, offset, data):
1387 """Add a segment to the revlog cache.
1390 """Add a segment to the revlog cache.
1388
1391
1389 Accepts an absolute offset and the data that is at that location.
1392 Accepts an absolute offset and the data that is at that location.
1390 """
1393 """
1391 o, d = self._chunkcache
1394 o, d = self._chunkcache
1392 # try to add to existing cache
1395 # try to add to existing cache
1393 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1396 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1394 self._chunkcache = o, d + data
1397 self._chunkcache = o, d + data
1395 else:
1398 else:
1396 self._chunkcache = offset, data
1399 self._chunkcache = offset, data
1397
1400
1398 def _readsegment(self, offset, length, df=None):
1401 def _readsegment(self, offset, length, df=None):
1399 """Load a segment of raw data from the revlog.
1402 """Load a segment of raw data from the revlog.
1400
1403
1401 Accepts an absolute offset, length to read, and an optional existing
1404 Accepts an absolute offset, length to read, and an optional existing
1402 file handle to read from.
1405 file handle to read from.
1403
1406
1404 If an existing file handle is passed, it will be seeked and the
1407 If an existing file handle is passed, it will be seeked and the
1405 original seek position will NOT be restored.
1408 original seek position will NOT be restored.
1406
1409
1407 Returns a str or buffer of raw byte data.
1410 Returns a str or buffer of raw byte data.
1408
1411
1409 Raises if the requested number of bytes could not be read.
1412 Raises if the requested number of bytes could not be read.
1410 """
1413 """
1411 # Cache data both forward and backward around the requested
1414 # Cache data both forward and backward around the requested
1412 # data, in a fixed size window. This helps speed up operations
1415 # data, in a fixed size window. This helps speed up operations
1413 # involving reading the revlog backwards.
1416 # involving reading the revlog backwards.
1414 cachesize = self._chunkcachesize
1417 cachesize = self._chunkcachesize
1415 realoffset = offset & ~(cachesize - 1)
1418 realoffset = offset & ~(cachesize - 1)
1416 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1419 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1417 - realoffset)
1420 - realoffset)
1418 with self._datareadfp(df) as df:
1421 with self._datareadfp(df) as df:
1419 df.seek(realoffset)
1422 df.seek(realoffset)
1420 d = df.read(reallength)
1423 d = df.read(reallength)
1421
1424
1422 self._cachesegment(realoffset, d)
1425 self._cachesegment(realoffset, d)
1423 if offset != realoffset or reallength != length:
1426 if offset != realoffset or reallength != length:
1424 startoffset = offset - realoffset
1427 startoffset = offset - realoffset
1425 if len(d) - startoffset < length:
1428 if len(d) - startoffset < length:
1426 raise error.RevlogError(
1429 raise error.RevlogError(
1427 _('partial read of revlog %s; expected %d bytes from '
1430 _('partial read of revlog %s; expected %d bytes from '
1428 'offset %d, got %d') %
1431 'offset %d, got %d') %
1429 (self.indexfile if self._inline else self.datafile,
1432 (self.indexfile if self._inline else self.datafile,
1430 length, realoffset, len(d) - startoffset))
1433 length, realoffset, len(d) - startoffset))
1431
1434
1432 return util.buffer(d, startoffset, length)
1435 return util.buffer(d, startoffset, length)
1433
1436
1434 if len(d) < length:
1437 if len(d) < length:
1435 raise error.RevlogError(
1438 raise error.RevlogError(
1436 _('partial read of revlog %s; expected %d bytes from offset '
1439 _('partial read of revlog %s; expected %d bytes from offset '
1437 '%d, got %d') %
1440 '%d, got %d') %
1438 (self.indexfile if self._inline else self.datafile,
1441 (self.indexfile if self._inline else self.datafile,
1439 length, offset, len(d)))
1442 length, offset, len(d)))
1440
1443
1441 return d
1444 return d
1442
1445
1443 def _getsegment(self, offset, length, df=None):
1446 def _getsegment(self, offset, length, df=None):
1444 """Obtain a segment of raw data from the revlog.
1447 """Obtain a segment of raw data from the revlog.
1445
1448
1446 Accepts an absolute offset, length of bytes to obtain, and an
1449 Accepts an absolute offset, length of bytes to obtain, and an
1447 optional file handle to the already-opened revlog. If the file
1450 optional file handle to the already-opened revlog. If the file
1448 handle is used, it's original seek position will not be preserved.
1451 handle is used, it's original seek position will not be preserved.
1449
1452
1450 Requests for data may be returned from a cache.
1453 Requests for data may be returned from a cache.
1451
1454
1452 Returns a str or a buffer instance of raw byte data.
1455 Returns a str or a buffer instance of raw byte data.
1453 """
1456 """
1454 o, d = self._chunkcache
1457 o, d = self._chunkcache
1455 l = len(d)
1458 l = len(d)
1456
1459
1457 # is it in the cache?
1460 # is it in the cache?
1458 cachestart = offset - o
1461 cachestart = offset - o
1459 cacheend = cachestart + length
1462 cacheend = cachestart + length
1460 if cachestart >= 0 and cacheend <= l:
1463 if cachestart >= 0 and cacheend <= l:
1461 if cachestart == 0 and cacheend == l:
1464 if cachestart == 0 and cacheend == l:
1462 return d # avoid a copy
1465 return d # avoid a copy
1463 return util.buffer(d, cachestart, cacheend - cachestart)
1466 return util.buffer(d, cachestart, cacheend - cachestart)
1464
1467
1465 return self._readsegment(offset, length, df=df)
1468 return self._readsegment(offset, length, df=df)
1466
1469
1467 def _getsegmentforrevs(self, startrev, endrev, df=None):
1470 def _getsegmentforrevs(self, startrev, endrev, df=None):
1468 """Obtain a segment of raw data corresponding to a range of revisions.
1471 """Obtain a segment of raw data corresponding to a range of revisions.
1469
1472
1470 Accepts the start and end revisions and an optional already-open
1473 Accepts the start and end revisions and an optional already-open
1471 file handle to be used for reading. If the file handle is read, its
1474 file handle to be used for reading. If the file handle is read, its
1472 seek position will not be preserved.
1475 seek position will not be preserved.
1473
1476
1474 Requests for data may be satisfied by a cache.
1477 Requests for data may be satisfied by a cache.
1475
1478
1476 Returns a 2-tuple of (offset, data) for the requested range of
1479 Returns a 2-tuple of (offset, data) for the requested range of
1477 revisions. Offset is the integer offset from the beginning of the
1480 revisions. Offset is the integer offset from the beginning of the
1478 revlog and data is a str or buffer of the raw byte data.
1481 revlog and data is a str or buffer of the raw byte data.
1479
1482
1480 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1483 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1481 to determine where each revision's data begins and ends.
1484 to determine where each revision's data begins and ends.
1482 """
1485 """
1483 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1486 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1484 # (functions are expensive).
1487 # (functions are expensive).
1485 index = self.index
1488 index = self.index
1486 istart = index[startrev]
1489 istart = index[startrev]
1487 start = int(istart[0] >> 16)
1490 start = int(istart[0] >> 16)
1488 if startrev == endrev:
1491 if startrev == endrev:
1489 end = start + istart[1]
1492 end = start + istart[1]
1490 else:
1493 else:
1491 iend = index[endrev]
1494 iend = index[endrev]
1492 end = int(iend[0] >> 16) + iend[1]
1495 end = int(iend[0] >> 16) + iend[1]
1493
1496
1494 if self._inline:
1497 if self._inline:
1495 start += (startrev + 1) * self._io.size
1498 start += (startrev + 1) * self._io.size
1496 end += (endrev + 1) * self._io.size
1499 end += (endrev + 1) * self._io.size
1497 length = end - start
1500 length = end - start
1498
1501
1499 return start, self._getsegment(start, length, df=df)
1502 return start, self._getsegment(start, length, df=df)
1500
1503
1501 def _chunk(self, rev, df=None):
1504 def _chunk(self, rev, df=None):
1502 """Obtain a single decompressed chunk for a revision.
1505 """Obtain a single decompressed chunk for a revision.
1503
1506
1504 Accepts an integer revision and an optional already-open file handle
1507 Accepts an integer revision and an optional already-open file handle
1505 to be used for reading. If used, the seek position of the file will not
1508 to be used for reading. If used, the seek position of the file will not
1506 be preserved.
1509 be preserved.
1507
1510
1508 Returns a str holding uncompressed data for the requested revision.
1511 Returns a str holding uncompressed data for the requested revision.
1509 """
1512 """
1510 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1513 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1511
1514
1512 def _chunks(self, revs, df=None, targetsize=None):
1515 def _chunks(self, revs, df=None, targetsize=None):
1513 """Obtain decompressed chunks for the specified revisions.
1516 """Obtain decompressed chunks for the specified revisions.
1514
1517
1515 Accepts an iterable of numeric revisions that are assumed to be in
1518 Accepts an iterable of numeric revisions that are assumed to be in
1516 ascending order. Also accepts an optional already-open file handle
1519 ascending order. Also accepts an optional already-open file handle
1517 to be used for reading. If used, the seek position of the file will
1520 to be used for reading. If used, the seek position of the file will
1518 not be preserved.
1521 not be preserved.
1519
1522
1520 This function is similar to calling ``self._chunk()`` multiple times,
1523 This function is similar to calling ``self._chunk()`` multiple times,
1521 but is faster.
1524 but is faster.
1522
1525
1523 Returns a list with decompressed data for each requested revision.
1526 Returns a list with decompressed data for each requested revision.
1524 """
1527 """
1525 if not revs:
1528 if not revs:
1526 return []
1529 return []
1527 start = self.start
1530 start = self.start
1528 length = self.length
1531 length = self.length
1529 inline = self._inline
1532 inline = self._inline
1530 iosize = self._io.size
1533 iosize = self._io.size
1531 buffer = util.buffer
1534 buffer = util.buffer
1532
1535
1533 l = []
1536 l = []
1534 ladd = l.append
1537 ladd = l.append
1535
1538
1536 if not self._withsparseread:
1539 if not self._withsparseread:
1537 slicedchunks = (revs,)
1540 slicedchunks = (revs,)
1538 else:
1541 else:
1539 slicedchunks = deltautil.slicechunk(self, revs,
1542 slicedchunks = deltautil.slicechunk(self, revs,
1540 targetsize=targetsize)
1543 targetsize=targetsize)
1541
1544
1542 for revschunk in slicedchunks:
1545 for revschunk in slicedchunks:
1543 firstrev = revschunk[0]
1546 firstrev = revschunk[0]
1544 # Skip trailing revisions with empty diff
1547 # Skip trailing revisions with empty diff
1545 for lastrev in revschunk[::-1]:
1548 for lastrev in revschunk[::-1]:
1546 if length(lastrev) != 0:
1549 if length(lastrev) != 0:
1547 break
1550 break
1548
1551
1549 try:
1552 try:
1550 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1553 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1551 except OverflowError:
1554 except OverflowError:
1552 # issue4215 - we can't cache a run of chunks greater than
1555 # issue4215 - we can't cache a run of chunks greater than
1553 # 2G on Windows
1556 # 2G on Windows
1554 return [self._chunk(rev, df=df) for rev in revschunk]
1557 return [self._chunk(rev, df=df) for rev in revschunk]
1555
1558
1556 decomp = self.decompress
1559 decomp = self.decompress
1557 for rev in revschunk:
1560 for rev in revschunk:
1558 chunkstart = start(rev)
1561 chunkstart = start(rev)
1559 if inline:
1562 if inline:
1560 chunkstart += (rev + 1) * iosize
1563 chunkstart += (rev + 1) * iosize
1561 chunklength = length(rev)
1564 chunklength = length(rev)
1562 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1565 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1563
1566
1564 return l
1567 return l
1565
1568
1566 def _chunkclear(self):
1569 def _chunkclear(self):
1567 """Clear the raw chunk cache."""
1570 """Clear the raw chunk cache."""
1568 self._chunkcache = (0, '')
1571 self._chunkcache = (0, '')
1569
1572
1570 def deltaparent(self, rev):
1573 def deltaparent(self, rev):
1571 """return deltaparent of the given revision"""
1574 """return deltaparent of the given revision"""
1572 base = self.index[rev][3]
1575 base = self.index[rev][3]
1573 if base == rev:
1576 if base == rev:
1574 return nullrev
1577 return nullrev
1575 elif self._generaldelta:
1578 elif self._generaldelta:
1576 return base
1579 return base
1577 else:
1580 else:
1578 return rev - 1
1581 return rev - 1
1579
1582
1580 def issnapshot(self, rev):
1583 def issnapshot(self, rev):
1581 """tells whether rev is a snapshot
1584 """tells whether rev is a snapshot
1582 """
1585 """
1583 if not self._sparserevlog:
1586 if not self._sparserevlog:
1584 return self.deltaparent(rev) == nullrev
1587 return self.deltaparent(rev) == nullrev
1585 elif util.safehasattr(self.index, 'issnapshot'):
1588 elif util.safehasattr(self.index, 'issnapshot'):
1586 # directly assign the method to cache the testing and access
1589 # directly assign the method to cache the testing and access
1587 self.issnapshot = self.index.issnapshot
1590 self.issnapshot = self.index.issnapshot
1588 return self.issnapshot(rev)
1591 return self.issnapshot(rev)
1589 if rev == nullrev:
1592 if rev == nullrev:
1590 return True
1593 return True
1591 entry = self.index[rev]
1594 entry = self.index[rev]
1592 base = entry[3]
1595 base = entry[3]
1593 if base == rev:
1596 if base == rev:
1594 return True
1597 return True
1595 if base == nullrev:
1598 if base == nullrev:
1596 return True
1599 return True
1597 p1 = entry[5]
1600 p1 = entry[5]
1598 p2 = entry[6]
1601 p2 = entry[6]
1599 if base == p1 or base == p2:
1602 if base == p1 or base == p2:
1600 return False
1603 return False
1601 return self.issnapshot(base)
1604 return self.issnapshot(base)
1602
1605
1603 def snapshotdepth(self, rev):
1606 def snapshotdepth(self, rev):
1604 """number of snapshot in the chain before this one"""
1607 """number of snapshot in the chain before this one"""
1605 if not self.issnapshot(rev):
1608 if not self.issnapshot(rev):
1606 raise error.ProgrammingError('revision %d not a snapshot')
1609 raise error.ProgrammingError('revision %d not a snapshot')
1607 return len(self._deltachain(rev)[0]) - 1
1610 return len(self._deltachain(rev)[0]) - 1
1608
1611
1609 def revdiff(self, rev1, rev2):
1612 def revdiff(self, rev1, rev2):
1610 """return or calculate a delta between two revisions
1613 """return or calculate a delta between two revisions
1611
1614
1612 The delta calculated is in binary form and is intended to be written to
1615 The delta calculated is in binary form and is intended to be written to
1613 revlog data directly. So this function needs raw revision data.
1616 revlog data directly. So this function needs raw revision data.
1614 """
1617 """
1615 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1618 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1616 return bytes(self._chunk(rev2))
1619 return bytes(self._chunk(rev2))
1617
1620
1618 return mdiff.textdiff(self.revision(rev1, raw=True),
1621 return mdiff.textdiff(self.revision(rev1, raw=True),
1619 self.revision(rev2, raw=True))
1622 self.revision(rev2, raw=True))
1620
1623
1621 def revision(self, nodeorrev, _df=None, raw=False):
1624 def revision(self, nodeorrev, _df=None, raw=False):
1622 """return an uncompressed revision of a given node or revision
1625 """return an uncompressed revision of a given node or revision
1623 number.
1626 number.
1624
1627
1625 _df - an existing file handle to read from. (internal-only)
1628 _df - an existing file handle to read from. (internal-only)
1626 raw - an optional argument specifying if the revision data is to be
1629 raw - an optional argument specifying if the revision data is to be
1627 treated as raw data when applying flag transforms. 'raw' should be set
1630 treated as raw data when applying flag transforms. 'raw' should be set
1628 to True when generating changegroups or in debug commands.
1631 to True when generating changegroups or in debug commands.
1629 """
1632 """
1630 if isinstance(nodeorrev, int):
1633 if isinstance(nodeorrev, int):
1631 rev = nodeorrev
1634 rev = nodeorrev
1632 node = self.node(rev)
1635 node = self.node(rev)
1633 else:
1636 else:
1634 node = nodeorrev
1637 node = nodeorrev
1635 rev = None
1638 rev = None
1636
1639
1637 cachedrev = None
1640 cachedrev = None
1638 flags = None
1641 flags = None
1639 rawtext = None
1642 rawtext = None
1640 if node == nullid:
1643 if node == nullid:
1641 return ""
1644 return ""
1642 if self._revisioncache:
1645 if self._revisioncache:
1643 if self._revisioncache[0] == node:
1646 if self._revisioncache[0] == node:
1644 # _cache only stores rawtext
1647 # _cache only stores rawtext
1645 if raw:
1648 if raw:
1646 return self._revisioncache[2]
1649 return self._revisioncache[2]
1647 # duplicated, but good for perf
1650 # duplicated, but good for perf
1648 if rev is None:
1651 if rev is None:
1649 rev = self.rev(node)
1652 rev = self.rev(node)
1650 if flags is None:
1653 if flags is None:
1651 flags = self.flags(rev)
1654 flags = self.flags(rev)
1652 # no extra flags set, no flag processor runs, text = rawtext
1655 # no extra flags set, no flag processor runs, text = rawtext
1653 if flags == REVIDX_DEFAULT_FLAGS:
1656 if flags == REVIDX_DEFAULT_FLAGS:
1654 return self._revisioncache[2]
1657 return self._revisioncache[2]
1655 # rawtext is reusable. need to run flag processor
1658 # rawtext is reusable. need to run flag processor
1656 rawtext = self._revisioncache[2]
1659 rawtext = self._revisioncache[2]
1657
1660
1658 cachedrev = self._revisioncache[1]
1661 cachedrev = self._revisioncache[1]
1659
1662
1660 # look up what we need to read
1663 # look up what we need to read
1661 if rawtext is None:
1664 if rawtext is None:
1662 if rev is None:
1665 if rev is None:
1663 rev = self.rev(node)
1666 rev = self.rev(node)
1664
1667
1665 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1668 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1666 if stopped:
1669 if stopped:
1667 rawtext = self._revisioncache[2]
1670 rawtext = self._revisioncache[2]
1668
1671
1669 # drop cache to save memory
1672 # drop cache to save memory
1670 self._revisioncache = None
1673 self._revisioncache = None
1671
1674
1672 targetsize = None
1675 targetsize = None
1673 rawsize = self.index[rev][2]
1676 rawsize = self.index[rev][2]
1674 if 0 <= rawsize:
1677 if 0 <= rawsize:
1675 targetsize = 4 * rawsize
1678 targetsize = 4 * rawsize
1676
1679
1677 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1680 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1678 if rawtext is None:
1681 if rawtext is None:
1679 rawtext = bytes(bins[0])
1682 rawtext = bytes(bins[0])
1680 bins = bins[1:]
1683 bins = bins[1:]
1681
1684
1682 rawtext = mdiff.patches(rawtext, bins)
1685 rawtext = mdiff.patches(rawtext, bins)
1683 self._revisioncache = (node, rev, rawtext)
1686 self._revisioncache = (node, rev, rawtext)
1684
1687
1685 if flags is None:
1688 if flags is None:
1686 if rev is None:
1689 if rev is None:
1687 rev = self.rev(node)
1690 rev = self.rev(node)
1688 flags = self.flags(rev)
1691 flags = self.flags(rev)
1689
1692
1690 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1693 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1691 if validatehash:
1694 if validatehash:
1692 self.checkhash(text, node, rev=rev)
1695 self.checkhash(text, node, rev=rev)
1693
1696
1694 return text
1697 return text
1695
1698
1696 def hash(self, text, p1, p2):
1699 def hash(self, text, p1, p2):
1697 """Compute a node hash.
1700 """Compute a node hash.
1698
1701
1699 Available as a function so that subclasses can replace the hash
1702 Available as a function so that subclasses can replace the hash
1700 as needed.
1703 as needed.
1701 """
1704 """
1702 return storageutil.hashrevisionsha1(text, p1, p2)
1705 return storageutil.hashrevisionsha1(text, p1, p2)
1703
1706
1704 def _processflags(self, text, flags, operation, raw=False):
1707 def _processflags(self, text, flags, operation, raw=False):
1705 """Inspect revision data flags and applies transforms defined by
1708 """Inspect revision data flags and applies transforms defined by
1706 registered flag processors.
1709 registered flag processors.
1707
1710
1708 ``text`` - the revision data to process
1711 ``text`` - the revision data to process
1709 ``flags`` - the revision flags
1712 ``flags`` - the revision flags
1710 ``operation`` - the operation being performed (read or write)
1713 ``operation`` - the operation being performed (read or write)
1711 ``raw`` - an optional argument describing if the raw transform should be
1714 ``raw`` - an optional argument describing if the raw transform should be
1712 applied.
1715 applied.
1713
1716
1714 This method processes the flags in the order (or reverse order if
1717 This method processes the flags in the order (or reverse order if
1715 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1718 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1716 flag processors registered for present flags. The order of flags defined
1719 flag processors registered for present flags. The order of flags defined
1717 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1720 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1718
1721
1719 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1722 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1720 processed text and ``validatehash`` is a bool indicating whether the
1723 processed text and ``validatehash`` is a bool indicating whether the
1721 returned text should be checked for hash integrity.
1724 returned text should be checked for hash integrity.
1722
1725
1723 Note: If the ``raw`` argument is set, it has precedence over the
1726 Note: If the ``raw`` argument is set, it has precedence over the
1724 operation and will only update the value of ``validatehash``.
1727 operation and will only update the value of ``validatehash``.
1725 """
1728 """
1726 # fast path: no flag processors will run
1729 # fast path: no flag processors will run
1727 if flags == 0:
1730 if flags == 0:
1728 return text, True
1731 return text, True
1729 if not operation in ('read', 'write'):
1732 if not operation in ('read', 'write'):
1730 raise error.ProgrammingError(_("invalid '%s' operation") %
1733 raise error.ProgrammingError(_("invalid '%s' operation") %
1731 operation)
1734 operation)
1732 # Check all flags are known.
1735 # Check all flags are known.
1733 if flags & ~REVIDX_KNOWN_FLAGS:
1736 if flags & ~REVIDX_KNOWN_FLAGS:
1734 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1737 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1735 (flags & ~REVIDX_KNOWN_FLAGS))
1738 (flags & ~REVIDX_KNOWN_FLAGS))
1736 validatehash = True
1739 validatehash = True
1737 # Depending on the operation (read or write), the order might be
1740 # Depending on the operation (read or write), the order might be
1738 # reversed due to non-commutative transforms.
1741 # reversed due to non-commutative transforms.
1739 orderedflags = REVIDX_FLAGS_ORDER
1742 orderedflags = REVIDX_FLAGS_ORDER
1740 if operation == 'write':
1743 if operation == 'write':
1741 orderedflags = reversed(orderedflags)
1744 orderedflags = reversed(orderedflags)
1742
1745
1743 for flag in orderedflags:
1746 for flag in orderedflags:
1744 # If a flagprocessor has been registered for a known flag, apply the
1747 # If a flagprocessor has been registered for a known flag, apply the
1745 # related operation transform and update result tuple.
1748 # related operation transform and update result tuple.
1746 if flag & flags:
1749 if flag & flags:
1747 vhash = True
1750 vhash = True
1748
1751
1749 if flag not in self._flagprocessors:
1752 if flag not in self._flagprocessors:
1750 message = _("missing processor for flag '%#x'") % (flag)
1753 message = _("missing processor for flag '%#x'") % (flag)
1751 raise error.RevlogError(message)
1754 raise error.RevlogError(message)
1752
1755
1753 processor = self._flagprocessors[flag]
1756 processor = self._flagprocessors[flag]
1754 if processor is not None:
1757 if processor is not None:
1755 readtransform, writetransform, rawtransform = processor
1758 readtransform, writetransform, rawtransform = processor
1756
1759
1757 if raw:
1760 if raw:
1758 vhash = rawtransform(self, text)
1761 vhash = rawtransform(self, text)
1759 elif operation == 'read':
1762 elif operation == 'read':
1760 text, vhash = readtransform(self, text)
1763 text, vhash = readtransform(self, text)
1761 else: # write operation
1764 else: # write operation
1762 text, vhash = writetransform(self, text)
1765 text, vhash = writetransform(self, text)
1763 validatehash = validatehash and vhash
1766 validatehash = validatehash and vhash
1764
1767
1765 return text, validatehash
1768 return text, validatehash
1766
1769
1767 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1770 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1768 """Check node hash integrity.
1771 """Check node hash integrity.
1769
1772
1770 Available as a function so that subclasses can extend hash mismatch
1773 Available as a function so that subclasses can extend hash mismatch
1771 behaviors as needed.
1774 behaviors as needed.
1772 """
1775 """
1773 try:
1776 try:
1774 if p1 is None and p2 is None:
1777 if p1 is None and p2 is None:
1775 p1, p2 = self.parents(node)
1778 p1, p2 = self.parents(node)
1776 if node != self.hash(text, p1, p2):
1779 if node != self.hash(text, p1, p2):
1777 # Clear the revision cache on hash failure. The revision cache
1780 # Clear the revision cache on hash failure. The revision cache
1778 # only stores the raw revision and clearing the cache does have
1781 # only stores the raw revision and clearing the cache does have
1779 # the side-effect that we won't have a cache hit when the raw
1782 # the side-effect that we won't have a cache hit when the raw
1780 # revision data is accessed. But this case should be rare and
1783 # revision data is accessed. But this case should be rare and
1781 # it is extra work to teach the cache about the hash
1784 # it is extra work to teach the cache about the hash
1782 # verification state.
1785 # verification state.
1783 if self._revisioncache and self._revisioncache[0] == node:
1786 if self._revisioncache and self._revisioncache[0] == node:
1784 self._revisioncache = None
1787 self._revisioncache = None
1785
1788
1786 revornode = rev
1789 revornode = rev
1787 if revornode is None:
1790 if revornode is None:
1788 revornode = templatefilters.short(hex(node))
1791 revornode = templatefilters.short(hex(node))
1789 raise error.RevlogError(_("integrity check failed on %s:%s")
1792 raise error.RevlogError(_("integrity check failed on %s:%s")
1790 % (self.indexfile, pycompat.bytestr(revornode)))
1793 % (self.indexfile, pycompat.bytestr(revornode)))
1791 except error.RevlogError:
1794 except error.RevlogError:
1792 if self._censorable and storageutil.iscensoredtext(text):
1795 if self._censorable and storageutil.iscensoredtext(text):
1793 raise error.CensoredNodeError(self.indexfile, node, text)
1796 raise error.CensoredNodeError(self.indexfile, node, text)
1794 raise
1797 raise
1795
1798
1796 def _enforceinlinesize(self, tr, fp=None):
1799 def _enforceinlinesize(self, tr, fp=None):
1797 """Check if the revlog is too big for inline and convert if so.
1800 """Check if the revlog is too big for inline and convert if so.
1798
1801
1799 This should be called after revisions are added to the revlog. If the
1802 This should be called after revisions are added to the revlog. If the
1800 revlog has grown too large to be an inline revlog, it will convert it
1803 revlog has grown too large to be an inline revlog, it will convert it
1801 to use multiple index and data files.
1804 to use multiple index and data files.
1802 """
1805 """
1803 tiprev = len(self) - 1
1806 tiprev = len(self) - 1
1804 if (not self._inline or
1807 if (not self._inline or
1805 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1808 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1806 return
1809 return
1807
1810
1808 trinfo = tr.find(self.indexfile)
1811 trinfo = tr.find(self.indexfile)
1809 if trinfo is None:
1812 if trinfo is None:
1810 raise error.RevlogError(_("%s not found in the transaction")
1813 raise error.RevlogError(_("%s not found in the transaction")
1811 % self.indexfile)
1814 % self.indexfile)
1812
1815
1813 trindex = trinfo[2]
1816 trindex = trinfo[2]
1814 if trindex is not None:
1817 if trindex is not None:
1815 dataoff = self.start(trindex)
1818 dataoff = self.start(trindex)
1816 else:
1819 else:
1817 # revlog was stripped at start of transaction, use all leftover data
1820 # revlog was stripped at start of transaction, use all leftover data
1818 trindex = len(self) - 1
1821 trindex = len(self) - 1
1819 dataoff = self.end(tiprev)
1822 dataoff = self.end(tiprev)
1820
1823
1821 tr.add(self.datafile, dataoff)
1824 tr.add(self.datafile, dataoff)
1822
1825
1823 if fp:
1826 if fp:
1824 fp.flush()
1827 fp.flush()
1825 fp.close()
1828 fp.close()
1826 # We can't use the cached file handle after close(). So prevent
1829 # We can't use the cached file handle after close(). So prevent
1827 # its usage.
1830 # its usage.
1828 self._writinghandles = None
1831 self._writinghandles = None
1829
1832
1830 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1833 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1831 for r in self:
1834 for r in self:
1832 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1835 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1833
1836
1834 with self._indexfp('w') as fp:
1837 with self._indexfp('w') as fp:
1835 self.version &= ~FLAG_INLINE_DATA
1838 self.version &= ~FLAG_INLINE_DATA
1836 self._inline = False
1839 self._inline = False
1837 io = self._io
1840 io = self._io
1838 for i in self:
1841 for i in self:
1839 e = io.packentry(self.index[i], self.node, self.version, i)
1842 e = io.packentry(self.index[i], self.node, self.version, i)
1840 fp.write(e)
1843 fp.write(e)
1841
1844
1842 # the temp file replace the real index when we exit the context
1845 # the temp file replace the real index when we exit the context
1843 # manager
1846 # manager
1844
1847
1845 tr.replace(self.indexfile, trindex * self._io.size)
1848 tr.replace(self.indexfile, trindex * self._io.size)
1846 self._chunkclear()
1849 self._chunkclear()
1847
1850
1848 def _nodeduplicatecallback(self, transaction, node):
1851 def _nodeduplicatecallback(self, transaction, node):
1849 """called when trying to add a node already stored.
1852 """called when trying to add a node already stored.
1850 """
1853 """
1851
1854
1852 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1855 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1853 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1856 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1854 """add a revision to the log
1857 """add a revision to the log
1855
1858
1856 text - the revision data to add
1859 text - the revision data to add
1857 transaction - the transaction object used for rollback
1860 transaction - the transaction object used for rollback
1858 link - the linkrev data to add
1861 link - the linkrev data to add
1859 p1, p2 - the parent nodeids of the revision
1862 p1, p2 - the parent nodeids of the revision
1860 cachedelta - an optional precomputed delta
1863 cachedelta - an optional precomputed delta
1861 node - nodeid of revision; typically node is not specified, and it is
1864 node - nodeid of revision; typically node is not specified, and it is
1862 computed by default as hash(text, p1, p2), however subclasses might
1865 computed by default as hash(text, p1, p2), however subclasses might
1863 use different hashing method (and override checkhash() in such case)
1866 use different hashing method (and override checkhash() in such case)
1864 flags - the known flags to set on the revision
1867 flags - the known flags to set on the revision
1865 deltacomputer - an optional deltacomputer instance shared between
1868 deltacomputer - an optional deltacomputer instance shared between
1866 multiple calls
1869 multiple calls
1867 """
1870 """
1868 if link == nullrev:
1871 if link == nullrev:
1869 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1872 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1870 % self.indexfile)
1873 % self.indexfile)
1871
1874
1872 if flags:
1875 if flags:
1873 node = node or self.hash(text, p1, p2)
1876 node = node or self.hash(text, p1, p2)
1874
1877
1875 rawtext, validatehash = self._processflags(text, flags, 'write')
1878 rawtext, validatehash = self._processflags(text, flags, 'write')
1876
1879
1877 # If the flag processor modifies the revision data, ignore any provided
1880 # If the flag processor modifies the revision data, ignore any provided
1878 # cachedelta.
1881 # cachedelta.
1879 if rawtext != text:
1882 if rawtext != text:
1880 cachedelta = None
1883 cachedelta = None
1881
1884
1882 if len(rawtext) > _maxentrysize:
1885 if len(rawtext) > _maxentrysize:
1883 raise error.RevlogError(
1886 raise error.RevlogError(
1884 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1887 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1885 % (self.indexfile, len(rawtext)))
1888 % (self.indexfile, len(rawtext)))
1886
1889
1887 node = node or self.hash(rawtext, p1, p2)
1890 node = node or self.hash(rawtext, p1, p2)
1888 if node in self.nodemap:
1891 if node in self.nodemap:
1889 return node
1892 return node
1890
1893
1891 if validatehash:
1894 if validatehash:
1892 self.checkhash(rawtext, node, p1=p1, p2=p2)
1895 self.checkhash(rawtext, node, p1=p1, p2=p2)
1893
1896
1894 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1897 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1895 flags, cachedelta=cachedelta,
1898 flags, cachedelta=cachedelta,
1896 deltacomputer=deltacomputer)
1899 deltacomputer=deltacomputer)
1897
1900
1898 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1901 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1899 cachedelta=None, deltacomputer=None):
1902 cachedelta=None, deltacomputer=None):
1900 """add a raw revision with known flags, node and parents
1903 """add a raw revision with known flags, node and parents
1901 useful when reusing a revision not stored in this revlog (ex: received
1904 useful when reusing a revision not stored in this revlog (ex: received
1902 over wire, or read from an external bundle).
1905 over wire, or read from an external bundle).
1903 """
1906 """
1904 dfh = None
1907 dfh = None
1905 if not self._inline:
1908 if not self._inline:
1906 dfh = self._datafp("a+")
1909 dfh = self._datafp("a+")
1907 ifh = self._indexfp("a+")
1910 ifh = self._indexfp("a+")
1908 try:
1911 try:
1909 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1912 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1910 flags, cachedelta, ifh, dfh,
1913 flags, cachedelta, ifh, dfh,
1911 deltacomputer=deltacomputer)
1914 deltacomputer=deltacomputer)
1912 finally:
1915 finally:
1913 if dfh:
1916 if dfh:
1914 dfh.close()
1917 dfh.close()
1915 ifh.close()
1918 ifh.close()
1916
1919
1917 def compress(self, data):
1920 def compress(self, data):
1918 """Generate a possibly-compressed representation of data."""
1921 """Generate a possibly-compressed representation of data."""
1919 if not data:
1922 if not data:
1920 return '', data
1923 return '', data
1921
1924
1922 compressed = self._compressor.compress(data)
1925 compressed = self._compressor.compress(data)
1923
1926
1924 if compressed:
1927 if compressed:
1925 # The revlog compressor added the header in the returned data.
1928 # The revlog compressor added the header in the returned data.
1926 return '', compressed
1929 return '', compressed
1927
1930
1928 if data[0:1] == '\0':
1931 if data[0:1] == '\0':
1929 return '', data
1932 return '', data
1930 return 'u', data
1933 return 'u', data
1931
1934
1932 def decompress(self, data):
1935 def decompress(self, data):
1933 """Decompress a revlog chunk.
1936 """Decompress a revlog chunk.
1934
1937
1935 The chunk is expected to begin with a header identifying the
1938 The chunk is expected to begin with a header identifying the
1936 format type so it can be routed to an appropriate decompressor.
1939 format type so it can be routed to an appropriate decompressor.
1937 """
1940 """
1938 if not data:
1941 if not data:
1939 return data
1942 return data
1940
1943
1941 # Revlogs are read much more frequently than they are written and many
1944 # Revlogs are read much more frequently than they are written and many
1942 # chunks only take microseconds to decompress, so performance is
1945 # chunks only take microseconds to decompress, so performance is
1943 # important here.
1946 # important here.
1944 #
1947 #
1945 # We can make a few assumptions about revlogs:
1948 # We can make a few assumptions about revlogs:
1946 #
1949 #
1947 # 1) the majority of chunks will be compressed (as opposed to inline
1950 # 1) the majority of chunks will be compressed (as opposed to inline
1948 # raw data).
1951 # raw data).
1949 # 2) decompressing *any* data will likely by at least 10x slower than
1952 # 2) decompressing *any* data will likely by at least 10x slower than
1950 # returning raw inline data.
1953 # returning raw inline data.
1951 # 3) we want to prioritize common and officially supported compression
1954 # 3) we want to prioritize common and officially supported compression
1952 # engines
1955 # engines
1953 #
1956 #
1954 # It follows that we want to optimize for "decompress compressed data
1957 # It follows that we want to optimize for "decompress compressed data
1955 # when encoded with common and officially supported compression engines"
1958 # when encoded with common and officially supported compression engines"
1956 # case over "raw data" and "data encoded by less common or non-official
1959 # case over "raw data" and "data encoded by less common or non-official
1957 # compression engines." That is why we have the inline lookup first
1960 # compression engines." That is why we have the inline lookup first
1958 # followed by the compengines lookup.
1961 # followed by the compengines lookup.
1959 #
1962 #
1960 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1963 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1961 # compressed chunks. And this matters for changelog and manifest reads.
1964 # compressed chunks. And this matters for changelog and manifest reads.
1962 t = data[0:1]
1965 t = data[0:1]
1963
1966
1964 if t == 'x':
1967 if t == 'x':
1965 try:
1968 try:
1966 return _zlibdecompress(data)
1969 return _zlibdecompress(data)
1967 except zlib.error as e:
1970 except zlib.error as e:
1968 raise error.RevlogError(_('revlog decompress error: %s') %
1971 raise error.RevlogError(_('revlog decompress error: %s') %
1969 stringutil.forcebytestr(e))
1972 stringutil.forcebytestr(e))
1970 # '\0' is more common than 'u' so it goes first.
1973 # '\0' is more common than 'u' so it goes first.
1971 elif t == '\0':
1974 elif t == '\0':
1972 return data
1975 return data
1973 elif t == 'u':
1976 elif t == 'u':
1974 return util.buffer(data, 1)
1977 return util.buffer(data, 1)
1975
1978
1976 try:
1979 try:
1977 compressor = self._decompressors[t]
1980 compressor = self._decompressors[t]
1978 except KeyError:
1981 except KeyError:
1979 try:
1982 try:
1980 engine = util.compengines.forrevlogheader(t)
1983 engine = util.compengines.forrevlogheader(t)
1981 compressor = engine.revlogcompressor()
1984 compressor = engine.revlogcompressor()
1982 self._decompressors[t] = compressor
1985 self._decompressors[t] = compressor
1983 except KeyError:
1986 except KeyError:
1984 raise error.RevlogError(_('unknown compression type %r') % t)
1987 raise error.RevlogError(_('unknown compression type %r') % t)
1985
1988
1986 return compressor.decompress(data)
1989 return compressor.decompress(data)
1987
1990
1988 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1991 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1989 cachedelta, ifh, dfh, alwayscache=False,
1992 cachedelta, ifh, dfh, alwayscache=False,
1990 deltacomputer=None):
1993 deltacomputer=None):
1991 """internal function to add revisions to the log
1994 """internal function to add revisions to the log
1992
1995
1993 see addrevision for argument descriptions.
1996 see addrevision for argument descriptions.
1994
1997
1995 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1998 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1996
1999
1997 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2000 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1998 be used.
2001 be used.
1999
2002
2000 invariants:
2003 invariants:
2001 - rawtext is optional (can be None); if not set, cachedelta must be set.
2004 - rawtext is optional (can be None); if not set, cachedelta must be set.
2002 if both are set, they must correspond to each other.
2005 if both are set, they must correspond to each other.
2003 """
2006 """
2004 if node == nullid:
2007 if node == nullid:
2005 raise error.RevlogError(_("%s: attempt to add null revision") %
2008 raise error.RevlogError(_("%s: attempt to add null revision") %
2006 self.indexfile)
2009 self.indexfile)
2007 if node == wdirid or node in wdirfilenodeids:
2010 if node == wdirid or node in wdirfilenodeids:
2008 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2011 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2009 self.indexfile)
2012 self.indexfile)
2010
2013
2011 if self._inline:
2014 if self._inline:
2012 fh = ifh
2015 fh = ifh
2013 else:
2016 else:
2014 fh = dfh
2017 fh = dfh
2015
2018
2016 btext = [rawtext]
2019 btext = [rawtext]
2017
2020
2018 curr = len(self)
2021 curr = len(self)
2019 prev = curr - 1
2022 prev = curr - 1
2020 offset = self.end(prev)
2023 offset = self.end(prev)
2021 p1r, p2r = self.rev(p1), self.rev(p2)
2024 p1r, p2r = self.rev(p1), self.rev(p2)
2022
2025
2023 # full versions are inserted when the needed deltas
2026 # full versions are inserted when the needed deltas
2024 # become comparable to the uncompressed text
2027 # become comparable to the uncompressed text
2025 if rawtext is None:
2028 if rawtext is None:
2026 # need rawtext size, before changed by flag processors, which is
2029 # need rawtext size, before changed by flag processors, which is
2027 # the non-raw size. use revlog explicitly to avoid filelog's extra
2030 # the non-raw size. use revlog explicitly to avoid filelog's extra
2028 # logic that might remove metadata size.
2031 # logic that might remove metadata size.
2029 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2032 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2030 cachedelta[1])
2033 cachedelta[1])
2031 else:
2034 else:
2032 textlen = len(rawtext)
2035 textlen = len(rawtext)
2033
2036
2034 if deltacomputer is None:
2037 if deltacomputer is None:
2035 deltacomputer = deltautil.deltacomputer(self)
2038 deltacomputer = deltautil.deltacomputer(self)
2036
2039
2037 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2040 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2038
2041
2039 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2042 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2040
2043
2041 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2044 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2042 deltainfo.base, link, p1r, p2r, node)
2045 deltainfo.base, link, p1r, p2r, node)
2043 self.index.append(e)
2046 self.index.append(e)
2044 self.nodemap[node] = curr
2047 self.nodemap[node] = curr
2045
2048
2046 # Reset the pure node cache start lookup offset to account for new
2049 # Reset the pure node cache start lookup offset to account for new
2047 # revision.
2050 # revision.
2048 if self._nodepos is not None:
2051 if self._nodepos is not None:
2049 self._nodepos = curr
2052 self._nodepos = curr
2050
2053
2051 entry = self._io.packentry(e, self.node, self.version, curr)
2054 entry = self._io.packentry(e, self.node, self.version, curr)
2052 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2055 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2053 link, offset)
2056 link, offset)
2054
2057
2055 rawtext = btext[0]
2058 rawtext = btext[0]
2056
2059
2057 if alwayscache and rawtext is None:
2060 if alwayscache and rawtext is None:
2058 rawtext = deltacomputer.buildtext(revinfo, fh)
2061 rawtext = deltacomputer.buildtext(revinfo, fh)
2059
2062
2060 if type(rawtext) == bytes: # only accept immutable objects
2063 if type(rawtext) == bytes: # only accept immutable objects
2061 self._revisioncache = (node, curr, rawtext)
2064 self._revisioncache = (node, curr, rawtext)
2062 self._chainbasecache[curr] = deltainfo.chainbase
2065 self._chainbasecache[curr] = deltainfo.chainbase
2063 return node
2066 return node
2064
2067
2065 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2068 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2066 # Files opened in a+ mode have inconsistent behavior on various
2069 # Files opened in a+ mode have inconsistent behavior on various
2067 # platforms. Windows requires that a file positioning call be made
2070 # platforms. Windows requires that a file positioning call be made
2068 # when the file handle transitions between reads and writes. See
2071 # when the file handle transitions between reads and writes. See
2069 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2072 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2070 # platforms, Python or the platform itself can be buggy. Some versions
2073 # platforms, Python or the platform itself can be buggy. Some versions
2071 # of Solaris have been observed to not append at the end of the file
2074 # of Solaris have been observed to not append at the end of the file
2072 # if the file was seeked to before the end. See issue4943 for more.
2075 # if the file was seeked to before the end. See issue4943 for more.
2073 #
2076 #
2074 # We work around this issue by inserting a seek() before writing.
2077 # We work around this issue by inserting a seek() before writing.
2075 # Note: This is likely not necessary on Python 3. However, because
2078 # Note: This is likely not necessary on Python 3. However, because
2076 # the file handle is reused for reads and may be seeked there, we need
2079 # the file handle is reused for reads and may be seeked there, we need
2077 # to be careful before changing this.
2080 # to be careful before changing this.
2078 ifh.seek(0, os.SEEK_END)
2081 ifh.seek(0, os.SEEK_END)
2079 if dfh:
2082 if dfh:
2080 dfh.seek(0, os.SEEK_END)
2083 dfh.seek(0, os.SEEK_END)
2081
2084
2082 curr = len(self) - 1
2085 curr = len(self) - 1
2083 if not self._inline:
2086 if not self._inline:
2084 transaction.add(self.datafile, offset)
2087 transaction.add(self.datafile, offset)
2085 transaction.add(self.indexfile, curr * len(entry))
2088 transaction.add(self.indexfile, curr * len(entry))
2086 if data[0]:
2089 if data[0]:
2087 dfh.write(data[0])
2090 dfh.write(data[0])
2088 dfh.write(data[1])
2091 dfh.write(data[1])
2089 ifh.write(entry)
2092 ifh.write(entry)
2090 else:
2093 else:
2091 offset += curr * self._io.size
2094 offset += curr * self._io.size
2092 transaction.add(self.indexfile, offset, curr)
2095 transaction.add(self.indexfile, offset, curr)
2093 ifh.write(entry)
2096 ifh.write(entry)
2094 ifh.write(data[0])
2097 ifh.write(data[0])
2095 ifh.write(data[1])
2098 ifh.write(data[1])
2096 self._enforceinlinesize(transaction, ifh)
2099 self._enforceinlinesize(transaction, ifh)
2097
2100
2098 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2101 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2099 """
2102 """
2100 add a delta group
2103 add a delta group
2101
2104
2102 given a set of deltas, add them to the revision log. the
2105 given a set of deltas, add them to the revision log. the
2103 first delta is against its parent, which should be in our
2106 first delta is against its parent, which should be in our
2104 log, the rest are against the previous delta.
2107 log, the rest are against the previous delta.
2105
2108
2106 If ``addrevisioncb`` is defined, it will be called with arguments of
2109 If ``addrevisioncb`` is defined, it will be called with arguments of
2107 this revlog and the node that was added.
2110 this revlog and the node that was added.
2108 """
2111 """
2109
2112
2110 if self._writinghandles:
2113 if self._writinghandles:
2111 raise error.ProgrammingError('cannot nest addgroup() calls')
2114 raise error.ProgrammingError('cannot nest addgroup() calls')
2112
2115
2113 nodes = []
2116 nodes = []
2114
2117
2115 r = len(self)
2118 r = len(self)
2116 end = 0
2119 end = 0
2117 if r:
2120 if r:
2118 end = self.end(r - 1)
2121 end = self.end(r - 1)
2119 ifh = self._indexfp("a+")
2122 ifh = self._indexfp("a+")
2120 isize = r * self._io.size
2123 isize = r * self._io.size
2121 if self._inline:
2124 if self._inline:
2122 transaction.add(self.indexfile, end + isize, r)
2125 transaction.add(self.indexfile, end + isize, r)
2123 dfh = None
2126 dfh = None
2124 else:
2127 else:
2125 transaction.add(self.indexfile, isize, r)
2128 transaction.add(self.indexfile, isize, r)
2126 transaction.add(self.datafile, end)
2129 transaction.add(self.datafile, end)
2127 dfh = self._datafp("a+")
2130 dfh = self._datafp("a+")
2128 def flush():
2131 def flush():
2129 if dfh:
2132 if dfh:
2130 dfh.flush()
2133 dfh.flush()
2131 ifh.flush()
2134 ifh.flush()
2132
2135
2133 self._writinghandles = (ifh, dfh)
2136 self._writinghandles = (ifh, dfh)
2134
2137
2135 try:
2138 try:
2136 deltacomputer = deltautil.deltacomputer(self)
2139 deltacomputer = deltautil.deltacomputer(self)
2137 # loop through our set of deltas
2140 # loop through our set of deltas
2138 for data in deltas:
2141 for data in deltas:
2139 node, p1, p2, linknode, deltabase, delta, flags = data
2142 node, p1, p2, linknode, deltabase, delta, flags = data
2140 link = linkmapper(linknode)
2143 link = linkmapper(linknode)
2141 flags = flags or REVIDX_DEFAULT_FLAGS
2144 flags = flags or REVIDX_DEFAULT_FLAGS
2142
2145
2143 nodes.append(node)
2146 nodes.append(node)
2144
2147
2145 if node in self.nodemap:
2148 if node in self.nodemap:
2146 self._nodeduplicatecallback(transaction, node)
2149 self._nodeduplicatecallback(transaction, node)
2147 # this can happen if two branches make the same change
2150 # this can happen if two branches make the same change
2148 continue
2151 continue
2149
2152
2150 for p in (p1, p2):
2153 for p in (p1, p2):
2151 if p not in self.nodemap:
2154 if p not in self.nodemap:
2152 raise error.LookupError(p, self.indexfile,
2155 raise error.LookupError(p, self.indexfile,
2153 _('unknown parent'))
2156 _('unknown parent'))
2154
2157
2155 if deltabase not in self.nodemap:
2158 if deltabase not in self.nodemap:
2156 raise error.LookupError(deltabase, self.indexfile,
2159 raise error.LookupError(deltabase, self.indexfile,
2157 _('unknown delta base'))
2160 _('unknown delta base'))
2158
2161
2159 baserev = self.rev(deltabase)
2162 baserev = self.rev(deltabase)
2160
2163
2161 if baserev != nullrev and self.iscensored(baserev):
2164 if baserev != nullrev and self.iscensored(baserev):
2162 # if base is censored, delta must be full replacement in a
2165 # if base is censored, delta must be full replacement in a
2163 # single patch operation
2166 # single patch operation
2164 hlen = struct.calcsize(">lll")
2167 hlen = struct.calcsize(">lll")
2165 oldlen = self.rawsize(baserev)
2168 oldlen = self.rawsize(baserev)
2166 newlen = len(delta) - hlen
2169 newlen = len(delta) - hlen
2167 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2170 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2168 raise error.CensoredBaseError(self.indexfile,
2171 raise error.CensoredBaseError(self.indexfile,
2169 self.node(baserev))
2172 self.node(baserev))
2170
2173
2171 if not flags and self._peek_iscensored(baserev, delta, flush):
2174 if not flags and self._peek_iscensored(baserev, delta, flush):
2172 flags |= REVIDX_ISCENSORED
2175 flags |= REVIDX_ISCENSORED
2173
2176
2174 # We assume consumers of addrevisioncb will want to retrieve
2177 # We assume consumers of addrevisioncb will want to retrieve
2175 # the added revision, which will require a call to
2178 # the added revision, which will require a call to
2176 # revision(). revision() will fast path if there is a cache
2179 # revision(). revision() will fast path if there is a cache
2177 # hit. So, we tell _addrevision() to always cache in this case.
2180 # hit. So, we tell _addrevision() to always cache in this case.
2178 # We're only using addgroup() in the context of changegroup
2181 # We're only using addgroup() in the context of changegroup
2179 # generation so the revision data can always be handled as raw
2182 # generation so the revision data can always be handled as raw
2180 # by the flagprocessor.
2183 # by the flagprocessor.
2181 self._addrevision(node, None, transaction, link,
2184 self._addrevision(node, None, transaction, link,
2182 p1, p2, flags, (baserev, delta),
2185 p1, p2, flags, (baserev, delta),
2183 ifh, dfh,
2186 ifh, dfh,
2184 alwayscache=bool(addrevisioncb),
2187 alwayscache=bool(addrevisioncb),
2185 deltacomputer=deltacomputer)
2188 deltacomputer=deltacomputer)
2186
2189
2187 if addrevisioncb:
2190 if addrevisioncb:
2188 addrevisioncb(self, node)
2191 addrevisioncb(self, node)
2189
2192
2190 if not dfh and not self._inline:
2193 if not dfh and not self._inline:
2191 # addrevision switched from inline to conventional
2194 # addrevision switched from inline to conventional
2192 # reopen the index
2195 # reopen the index
2193 ifh.close()
2196 ifh.close()
2194 dfh = self._datafp("a+")
2197 dfh = self._datafp("a+")
2195 ifh = self._indexfp("a+")
2198 ifh = self._indexfp("a+")
2196 self._writinghandles = (ifh, dfh)
2199 self._writinghandles = (ifh, dfh)
2197 finally:
2200 finally:
2198 self._writinghandles = None
2201 self._writinghandles = None
2199
2202
2200 if dfh:
2203 if dfh:
2201 dfh.close()
2204 dfh.close()
2202 ifh.close()
2205 ifh.close()
2203
2206
2204 return nodes
2207 return nodes
2205
2208
2206 def iscensored(self, rev):
2209 def iscensored(self, rev):
2207 """Check if a file revision is censored."""
2210 """Check if a file revision is censored."""
2208 if not self._censorable:
2211 if not self._censorable:
2209 return False
2212 return False
2210
2213
2211 return self.flags(rev) & REVIDX_ISCENSORED
2214 return self.flags(rev) & REVIDX_ISCENSORED
2212
2215
2213 def _peek_iscensored(self, baserev, delta, flush):
2216 def _peek_iscensored(self, baserev, delta, flush):
2214 """Quickly check if a delta produces a censored revision."""
2217 """Quickly check if a delta produces a censored revision."""
2215 if not self._censorable:
2218 if not self._censorable:
2216 return False
2219 return False
2217
2220
2218 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2221 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2219
2222
2220 def getstrippoint(self, minlink):
2223 def getstrippoint(self, minlink):
2221 """find the minimum rev that must be stripped to strip the linkrev
2224 """find the minimum rev that must be stripped to strip the linkrev
2222
2225
2223 Returns a tuple containing the minimum rev and a set of all revs that
2226 Returns a tuple containing the minimum rev and a set of all revs that
2224 have linkrevs that will be broken by this strip.
2227 have linkrevs that will be broken by this strip.
2225 """
2228 """
2226 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2229 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2227 self.headrevs(),
2230 self.headrevs(),
2228 self.linkrev, self.parentrevs)
2231 self.linkrev, self.parentrevs)
2229
2232
2230 def strip(self, minlink, transaction):
2233 def strip(self, minlink, transaction):
2231 """truncate the revlog on the first revision with a linkrev >= minlink
2234 """truncate the revlog on the first revision with a linkrev >= minlink
2232
2235
2233 This function is called when we're stripping revision minlink and
2236 This function is called when we're stripping revision minlink and
2234 its descendants from the repository.
2237 its descendants from the repository.
2235
2238
2236 We have to remove all revisions with linkrev >= minlink, because
2239 We have to remove all revisions with linkrev >= minlink, because
2237 the equivalent changelog revisions will be renumbered after the
2240 the equivalent changelog revisions will be renumbered after the
2238 strip.
2241 strip.
2239
2242
2240 So we truncate the revlog on the first of these revisions, and
2243 So we truncate the revlog on the first of these revisions, and
2241 trust that the caller has saved the revisions that shouldn't be
2244 trust that the caller has saved the revisions that shouldn't be
2242 removed and that it'll re-add them after this truncation.
2245 removed and that it'll re-add them after this truncation.
2243 """
2246 """
2244 if len(self) == 0:
2247 if len(self) == 0:
2245 return
2248 return
2246
2249
2247 rev, _ = self.getstrippoint(minlink)
2250 rev, _ = self.getstrippoint(minlink)
2248 if rev == len(self):
2251 if rev == len(self):
2249 return
2252 return
2250
2253
2251 # first truncate the files on disk
2254 # first truncate the files on disk
2252 end = self.start(rev)
2255 end = self.start(rev)
2253 if not self._inline:
2256 if not self._inline:
2254 transaction.add(self.datafile, end)
2257 transaction.add(self.datafile, end)
2255 end = rev * self._io.size
2258 end = rev * self._io.size
2256 else:
2259 else:
2257 end += rev * self._io.size
2260 end += rev * self._io.size
2258
2261
2259 transaction.add(self.indexfile, end)
2262 transaction.add(self.indexfile, end)
2260
2263
2261 # then reset internal state in memory to forget those revisions
2264 # then reset internal state in memory to forget those revisions
2262 self._revisioncache = None
2265 self._revisioncache = None
2263 self._chaininfocache = {}
2266 self._chaininfocache = {}
2264 self._chunkclear()
2267 self._chunkclear()
2265 for x in pycompat.xrange(rev, len(self)):
2268 for x in pycompat.xrange(rev, len(self)):
2266 del self.nodemap[self.node(x)]
2269 del self.nodemap[self.node(x)]
2267
2270
2268 del self.index[rev:-1]
2271 del self.index[rev:-1]
2269 self._nodepos = None
2272 self._nodepos = None
2270
2273
2271 def checksize(self):
2274 def checksize(self):
2272 expected = 0
2275 expected = 0
2273 if len(self):
2276 if len(self):
2274 expected = max(0, self.end(len(self) - 1))
2277 expected = max(0, self.end(len(self) - 1))
2275
2278
2276 try:
2279 try:
2277 with self._datafp() as f:
2280 with self._datafp() as f:
2278 f.seek(0, 2)
2281 f.seek(0, 2)
2279 actual = f.tell()
2282 actual = f.tell()
2280 dd = actual - expected
2283 dd = actual - expected
2281 except IOError as inst:
2284 except IOError as inst:
2282 if inst.errno != errno.ENOENT:
2285 if inst.errno != errno.ENOENT:
2283 raise
2286 raise
2284 dd = 0
2287 dd = 0
2285
2288
2286 try:
2289 try:
2287 f = self.opener(self.indexfile)
2290 f = self.opener(self.indexfile)
2288 f.seek(0, 2)
2291 f.seek(0, 2)
2289 actual = f.tell()
2292 actual = f.tell()
2290 f.close()
2293 f.close()
2291 s = self._io.size
2294 s = self._io.size
2292 i = max(0, actual // s)
2295 i = max(0, actual // s)
2293 di = actual - (i * s)
2296 di = actual - (i * s)
2294 if self._inline:
2297 if self._inline:
2295 databytes = 0
2298 databytes = 0
2296 for r in self:
2299 for r in self:
2297 databytes += max(0, self.length(r))
2300 databytes += max(0, self.length(r))
2298 dd = 0
2301 dd = 0
2299 di = actual - len(self) * s - databytes
2302 di = actual - len(self) * s - databytes
2300 except IOError as inst:
2303 except IOError as inst:
2301 if inst.errno != errno.ENOENT:
2304 if inst.errno != errno.ENOENT:
2302 raise
2305 raise
2303 di = 0
2306 di = 0
2304
2307
2305 return (dd, di)
2308 return (dd, di)
2306
2309
2307 def files(self):
2310 def files(self):
2308 res = [self.indexfile]
2311 res = [self.indexfile]
2309 if not self._inline:
2312 if not self._inline:
2310 res.append(self.datafile)
2313 res.append(self.datafile)
2311 return res
2314 return res
2312
2315
2313 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2316 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2314 assumehaveparentrevisions=False,
2317 assumehaveparentrevisions=False,
2315 deltamode=repository.CG_DELTAMODE_STD):
2318 deltamode=repository.CG_DELTAMODE_STD):
2316 if nodesorder not in ('nodes', 'storage', 'linear', None):
2319 if nodesorder not in ('nodes', 'storage', 'linear', None):
2317 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2320 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2318 nodesorder)
2321 nodesorder)
2319
2322
2320 if nodesorder is None and not self._generaldelta:
2323 if nodesorder is None and not self._generaldelta:
2321 nodesorder = 'storage'
2324 nodesorder = 'storage'
2322
2325
2323 if (not self._storedeltachains and
2326 if (not self._storedeltachains and
2324 deltamode != repository.CG_DELTAMODE_PREV):
2327 deltamode != repository.CG_DELTAMODE_PREV):
2325 deltamode = repository.CG_DELTAMODE_FULL
2328 deltamode = repository.CG_DELTAMODE_FULL
2326
2329
2327 return storageutil.emitrevisions(
2330 return storageutil.emitrevisions(
2328 self, nodes, nodesorder, revlogrevisiondelta,
2331 self, nodes, nodesorder, revlogrevisiondelta,
2329 deltaparentfn=self.deltaparent,
2332 deltaparentfn=self.deltaparent,
2330 candeltafn=self.candelta,
2333 candeltafn=self.candelta,
2331 rawsizefn=self.rawsize,
2334 rawsizefn=self.rawsize,
2332 revdifffn=self.revdiff,
2335 revdifffn=self.revdiff,
2333 flagsfn=self.flags,
2336 flagsfn=self.flags,
2334 deltamode=deltamode,
2337 deltamode=deltamode,
2335 revisiondata=revisiondata,
2338 revisiondata=revisiondata,
2336 assumehaveparentrevisions=assumehaveparentrevisions)
2339 assumehaveparentrevisions=assumehaveparentrevisions)
2337
2340
2338 DELTAREUSEALWAYS = 'always'
2341 DELTAREUSEALWAYS = 'always'
2339 DELTAREUSESAMEREVS = 'samerevs'
2342 DELTAREUSESAMEREVS = 'samerevs'
2340 DELTAREUSENEVER = 'never'
2343 DELTAREUSENEVER = 'never'
2341
2344
2342 DELTAREUSEFULLADD = 'fulladd'
2345 DELTAREUSEFULLADD = 'fulladd'
2343
2346
2344 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2347 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2345
2348
2346 def clone(self, tr, destrevlog, addrevisioncb=None,
2349 def clone(self, tr, destrevlog, addrevisioncb=None,
2347 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2350 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2348 """Copy this revlog to another, possibly with format changes.
2351 """Copy this revlog to another, possibly with format changes.
2349
2352
2350 The destination revlog will contain the same revisions and nodes.
2353 The destination revlog will contain the same revisions and nodes.
2351 However, it may not be bit-for-bit identical due to e.g. delta encoding
2354 However, it may not be bit-for-bit identical due to e.g. delta encoding
2352 differences.
2355 differences.
2353
2356
2354 The ``deltareuse`` argument control how deltas from the existing revlog
2357 The ``deltareuse`` argument control how deltas from the existing revlog
2355 are preserved in the destination revlog. The argument can have the
2358 are preserved in the destination revlog. The argument can have the
2356 following values:
2359 following values:
2357
2360
2358 DELTAREUSEALWAYS
2361 DELTAREUSEALWAYS
2359 Deltas will always be reused (if possible), even if the destination
2362 Deltas will always be reused (if possible), even if the destination
2360 revlog would not select the same revisions for the delta. This is the
2363 revlog would not select the same revisions for the delta. This is the
2361 fastest mode of operation.
2364 fastest mode of operation.
2362 DELTAREUSESAMEREVS
2365 DELTAREUSESAMEREVS
2363 Deltas will be reused if the destination revlog would pick the same
2366 Deltas will be reused if the destination revlog would pick the same
2364 revisions for the delta. This mode strikes a balance between speed
2367 revisions for the delta. This mode strikes a balance between speed
2365 and optimization.
2368 and optimization.
2366 DELTAREUSENEVER
2369 DELTAREUSENEVER
2367 Deltas will never be reused. This is the slowest mode of execution.
2370 Deltas will never be reused. This is the slowest mode of execution.
2368 This mode can be used to recompute deltas (e.g. if the diff/delta
2371 This mode can be used to recompute deltas (e.g. if the diff/delta
2369 algorithm changes).
2372 algorithm changes).
2370
2373
2371 Delta computation can be slow, so the choice of delta reuse policy can
2374 Delta computation can be slow, so the choice of delta reuse policy can
2372 significantly affect run time.
2375 significantly affect run time.
2373
2376
2374 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2377 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2375 two extremes. Deltas will be reused if they are appropriate. But if the
2378 two extremes. Deltas will be reused if they are appropriate. But if the
2376 delta could choose a better revision, it will do so. This means if you
2379 delta could choose a better revision, it will do so. This means if you
2377 are converting a non-generaldelta revlog to a generaldelta revlog,
2380 are converting a non-generaldelta revlog to a generaldelta revlog,
2378 deltas will be recomputed if the delta's parent isn't a parent of the
2381 deltas will be recomputed if the delta's parent isn't a parent of the
2379 revision.
2382 revision.
2380
2383
2381 In addition to the delta policy, the ``forcedeltabothparents``
2384 In addition to the delta policy, the ``forcedeltabothparents``
2382 argument controls whether to force compute deltas against both parents
2385 argument controls whether to force compute deltas against both parents
2383 for merges. By default, the current default is used.
2386 for merges. By default, the current default is used.
2384 """
2387 """
2385 if deltareuse not in self.DELTAREUSEALL:
2388 if deltareuse not in self.DELTAREUSEALL:
2386 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2389 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2387
2390
2388 if len(destrevlog):
2391 if len(destrevlog):
2389 raise ValueError(_('destination revlog is not empty'))
2392 raise ValueError(_('destination revlog is not empty'))
2390
2393
2391 if getattr(self, 'filteredrevs', None):
2394 if getattr(self, 'filteredrevs', None):
2392 raise ValueError(_('source revlog has filtered revisions'))
2395 raise ValueError(_('source revlog has filtered revisions'))
2393 if getattr(destrevlog, 'filteredrevs', None):
2396 if getattr(destrevlog, 'filteredrevs', None):
2394 raise ValueError(_('destination revlog has filtered revisions'))
2397 raise ValueError(_('destination revlog has filtered revisions'))
2395
2398
2396 # lazydeltabase controls whether to reuse a cached delta, if possible.
2399 # lazydeltabase controls whether to reuse a cached delta, if possible.
2397 oldlazydeltabase = destrevlog._lazydeltabase
2400 oldlazydeltabase = destrevlog._lazydeltabase
2398 oldamd = destrevlog._deltabothparents
2401 oldamd = destrevlog._deltabothparents
2399
2402
2400 try:
2403 try:
2401 if deltareuse == self.DELTAREUSEALWAYS:
2404 if deltareuse == self.DELTAREUSEALWAYS:
2402 destrevlog._lazydeltabase = True
2405 destrevlog._lazydeltabase = True
2403 elif deltareuse == self.DELTAREUSESAMEREVS:
2406 elif deltareuse == self.DELTAREUSESAMEREVS:
2404 destrevlog._lazydeltabase = False
2407 destrevlog._lazydeltabase = False
2405
2408
2406 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2409 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2407
2410
2408 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2411 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2409 self.DELTAREUSESAMEREVS)
2412 self.DELTAREUSESAMEREVS)
2410
2413
2411 deltacomputer = deltautil.deltacomputer(destrevlog)
2414 deltacomputer = deltautil.deltacomputer(destrevlog)
2412 index = self.index
2415 index = self.index
2413 for rev in self:
2416 for rev in self:
2414 entry = index[rev]
2417 entry = index[rev]
2415
2418
2416 # Some classes override linkrev to take filtered revs into
2419 # Some classes override linkrev to take filtered revs into
2417 # account. Use raw entry from index.
2420 # account. Use raw entry from index.
2418 flags = entry[0] & 0xffff
2421 flags = entry[0] & 0xffff
2419 linkrev = entry[4]
2422 linkrev = entry[4]
2420 p1 = index[entry[5]][7]
2423 p1 = index[entry[5]][7]
2421 p2 = index[entry[6]][7]
2424 p2 = index[entry[6]][7]
2422 node = entry[7]
2425 node = entry[7]
2423
2426
2424 # (Possibly) reuse the delta from the revlog if allowed and
2427 # (Possibly) reuse the delta from the revlog if allowed and
2425 # the revlog chunk is a delta.
2428 # the revlog chunk is a delta.
2426 cachedelta = None
2429 cachedelta = None
2427 rawtext = None
2430 rawtext = None
2428 if populatecachedelta:
2431 if populatecachedelta:
2429 dp = self.deltaparent(rev)
2432 dp = self.deltaparent(rev)
2430 if dp != nullrev:
2433 if dp != nullrev:
2431 cachedelta = (dp, bytes(self._chunk(rev)))
2434 cachedelta = (dp, bytes(self._chunk(rev)))
2432
2435
2433 if not cachedelta:
2436 if not cachedelta:
2434 rawtext = self.revision(rev, raw=True)
2437 rawtext = self.revision(rev, raw=True)
2435
2438
2436
2439
2437 if deltareuse == self.DELTAREUSEFULLADD:
2440 if deltareuse == self.DELTAREUSEFULLADD:
2438 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2441 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2439 cachedelta=cachedelta,
2442 cachedelta=cachedelta,
2440 node=node, flags=flags,
2443 node=node, flags=flags,
2441 deltacomputer=deltacomputer)
2444 deltacomputer=deltacomputer)
2442 else:
2445 else:
2443 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2446 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2444 checkambig=False)
2447 checkambig=False)
2445 dfh = None
2448 dfh = None
2446 if not destrevlog._inline:
2449 if not destrevlog._inline:
2447 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2450 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2448 try:
2451 try:
2449 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2452 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2450 p2, flags, cachedelta, ifh, dfh,
2453 p2, flags, cachedelta, ifh, dfh,
2451 deltacomputer=deltacomputer)
2454 deltacomputer=deltacomputer)
2452 finally:
2455 finally:
2453 if dfh:
2456 if dfh:
2454 dfh.close()
2457 dfh.close()
2455 ifh.close()
2458 ifh.close()
2456
2459
2457 if addrevisioncb:
2460 if addrevisioncb:
2458 addrevisioncb(self, rev, node)
2461 addrevisioncb(self, rev, node)
2459 finally:
2462 finally:
2460 destrevlog._lazydeltabase = oldlazydeltabase
2463 destrevlog._lazydeltabase = oldlazydeltabase
2461 destrevlog._deltabothparents = oldamd
2464 destrevlog._deltabothparents = oldamd
2462
2465
2463 def censorrevision(self, tr, censornode, tombstone=b''):
2466 def censorrevision(self, tr, censornode, tombstone=b''):
2464 if (self.version & 0xFFFF) == REVLOGV0:
2467 if (self.version & 0xFFFF) == REVLOGV0:
2465 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2468 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2466 self.version)
2469 self.version)
2467
2470
2468 censorrev = self.rev(censornode)
2471 censorrev = self.rev(censornode)
2469 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2472 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2470
2473
2471 if len(tombstone) > self.rawsize(censorrev):
2474 if len(tombstone) > self.rawsize(censorrev):
2472 raise error.Abort(_('censor tombstone must be no longer than '
2475 raise error.Abort(_('censor tombstone must be no longer than '
2473 'censored data'))
2476 'censored data'))
2474
2477
2475 # Rewriting the revlog in place is hard. Our strategy for censoring is
2478 # Rewriting the revlog in place is hard. Our strategy for censoring is
2476 # to create a new revlog, copy all revisions to it, then replace the
2479 # to create a new revlog, copy all revisions to it, then replace the
2477 # revlogs on transaction close.
2480 # revlogs on transaction close.
2478
2481
2479 newindexfile = self.indexfile + b'.tmpcensored'
2482 newindexfile = self.indexfile + b'.tmpcensored'
2480 newdatafile = self.datafile + b'.tmpcensored'
2483 newdatafile = self.datafile + b'.tmpcensored'
2481
2484
2482 # This is a bit dangerous. We could easily have a mismatch of state.
2485 # This is a bit dangerous. We could easily have a mismatch of state.
2483 newrl = revlog(self.opener, newindexfile, newdatafile,
2486 newrl = revlog(self.opener, newindexfile, newdatafile,
2484 censorable=True)
2487 censorable=True)
2485 newrl.version = self.version
2488 newrl.version = self.version
2486 newrl._generaldelta = self._generaldelta
2489 newrl._generaldelta = self._generaldelta
2487 newrl._io = self._io
2490 newrl._io = self._io
2488
2491
2489 for rev in self.revs():
2492 for rev in self.revs():
2490 node = self.node(rev)
2493 node = self.node(rev)
2491 p1, p2 = self.parents(node)
2494 p1, p2 = self.parents(node)
2492
2495
2493 if rev == censorrev:
2496 if rev == censorrev:
2494 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2497 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2495 p1, p2, censornode, REVIDX_ISCENSORED)
2498 p1, p2, censornode, REVIDX_ISCENSORED)
2496
2499
2497 if newrl.deltaparent(rev) != nullrev:
2500 if newrl.deltaparent(rev) != nullrev:
2498 raise error.Abort(_('censored revision stored as delta; '
2501 raise error.Abort(_('censored revision stored as delta; '
2499 'cannot censor'),
2502 'cannot censor'),
2500 hint=_('censoring of revlogs is not '
2503 hint=_('censoring of revlogs is not '
2501 'fully implemented; please report '
2504 'fully implemented; please report '
2502 'this bug'))
2505 'this bug'))
2503 continue
2506 continue
2504
2507
2505 if self.iscensored(rev):
2508 if self.iscensored(rev):
2506 if self.deltaparent(rev) != nullrev:
2509 if self.deltaparent(rev) != nullrev:
2507 raise error.Abort(_('cannot censor due to censored '
2510 raise error.Abort(_('cannot censor due to censored '
2508 'revision having delta stored'))
2511 'revision having delta stored'))
2509 rawtext = self._chunk(rev)
2512 rawtext = self._chunk(rev)
2510 else:
2513 else:
2511 rawtext = self.revision(rev, raw=True)
2514 rawtext = self.revision(rev, raw=True)
2512
2515
2513 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2516 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2514 self.flags(rev))
2517 self.flags(rev))
2515
2518
2516 tr.addbackup(self.indexfile, location='store')
2519 tr.addbackup(self.indexfile, location='store')
2517 if not self._inline:
2520 if not self._inline:
2518 tr.addbackup(self.datafile, location='store')
2521 tr.addbackup(self.datafile, location='store')
2519
2522
2520 self.opener.rename(newrl.indexfile, self.indexfile)
2523 self.opener.rename(newrl.indexfile, self.indexfile)
2521 if not self._inline:
2524 if not self._inline:
2522 self.opener.rename(newrl.datafile, self.datafile)
2525 self.opener.rename(newrl.datafile, self.datafile)
2523
2526
2524 self.clearcaches()
2527 self.clearcaches()
2525 self._loadindex()
2528 self._loadindex()
2526
2529
2527 def verifyintegrity(self, state):
2530 def verifyintegrity(self, state):
2528 """Verifies the integrity of the revlog.
2531 """Verifies the integrity of the revlog.
2529
2532
2530 Yields ``revlogproblem`` instances describing problems that are
2533 Yields ``revlogproblem`` instances describing problems that are
2531 found.
2534 found.
2532 """
2535 """
2533 dd, di = self.checksize()
2536 dd, di = self.checksize()
2534 if dd:
2537 if dd:
2535 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2538 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2536 if di:
2539 if di:
2537 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2540 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2538
2541
2539 version = self.version & 0xFFFF
2542 version = self.version & 0xFFFF
2540
2543
2541 # The verifier tells us what version revlog we should be.
2544 # The verifier tells us what version revlog we should be.
2542 if version != state['expectedversion']:
2545 if version != state['expectedversion']:
2543 yield revlogproblem(
2546 yield revlogproblem(
2544 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2547 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2545 (self.indexfile, version, state['expectedversion']))
2548 (self.indexfile, version, state['expectedversion']))
2546
2549
2547 state['skipread'] = set()
2550 state['skipread'] = set()
2548
2551
2549 for rev in self:
2552 for rev in self:
2550 node = self.node(rev)
2553 node = self.node(rev)
2551
2554
2552 # Verify contents. 4 cases to care about:
2555 # Verify contents. 4 cases to care about:
2553 #
2556 #
2554 # common: the most common case
2557 # common: the most common case
2555 # rename: with a rename
2558 # rename: with a rename
2556 # meta: file content starts with b'\1\n', the metadata
2559 # meta: file content starts with b'\1\n', the metadata
2557 # header defined in filelog.py, but without a rename
2560 # header defined in filelog.py, but without a rename
2558 # ext: content stored externally
2561 # ext: content stored externally
2559 #
2562 #
2560 # More formally, their differences are shown below:
2563 # More formally, their differences are shown below:
2561 #
2564 #
2562 # | common | rename | meta | ext
2565 # | common | rename | meta | ext
2563 # -------------------------------------------------------
2566 # -------------------------------------------------------
2564 # flags() | 0 | 0 | 0 | not 0
2567 # flags() | 0 | 0 | 0 | not 0
2565 # renamed() | False | True | False | ?
2568 # renamed() | False | True | False | ?
2566 # rawtext[0:2]=='\1\n'| False | True | True | ?
2569 # rawtext[0:2]=='\1\n'| False | True | True | ?
2567 #
2570 #
2568 # "rawtext" means the raw text stored in revlog data, which
2571 # "rawtext" means the raw text stored in revlog data, which
2569 # could be retrieved by "revision(rev, raw=True)". "text"
2572 # could be retrieved by "revision(rev, raw=True)". "text"
2570 # mentioned below is "revision(rev, raw=False)".
2573 # mentioned below is "revision(rev, raw=False)".
2571 #
2574 #
2572 # There are 3 different lengths stored physically:
2575 # There are 3 different lengths stored physically:
2573 # 1. L1: rawsize, stored in revlog index
2576 # 1. L1: rawsize, stored in revlog index
2574 # 2. L2: len(rawtext), stored in revlog data
2577 # 2. L2: len(rawtext), stored in revlog data
2575 # 3. L3: len(text), stored in revlog data if flags==0, or
2578 # 3. L3: len(text), stored in revlog data if flags==0, or
2576 # possibly somewhere else if flags!=0
2579 # possibly somewhere else if flags!=0
2577 #
2580 #
2578 # L1 should be equal to L2. L3 could be different from them.
2581 # L1 should be equal to L2. L3 could be different from them.
2579 # "text" may or may not affect commit hash depending on flag
2582 # "text" may or may not affect commit hash depending on flag
2580 # processors (see revlog.addflagprocessor).
2583 # processors (see revlog.addflagprocessor).
2581 #
2584 #
2582 # | common | rename | meta | ext
2585 # | common | rename | meta | ext
2583 # -------------------------------------------------
2586 # -------------------------------------------------
2584 # rawsize() | L1 | L1 | L1 | L1
2587 # rawsize() | L1 | L1 | L1 | L1
2585 # size() | L1 | L2-LM | L1(*) | L1 (?)
2588 # size() | L1 | L2-LM | L1(*) | L1 (?)
2586 # len(rawtext) | L2 | L2 | L2 | L2
2589 # len(rawtext) | L2 | L2 | L2 | L2
2587 # len(text) | L2 | L2 | L2 | L3
2590 # len(text) | L2 | L2 | L2 | L3
2588 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2591 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2589 #
2592 #
2590 # LM: length of metadata, depending on rawtext
2593 # LM: length of metadata, depending on rawtext
2591 # (*): not ideal, see comment in filelog.size
2594 # (*): not ideal, see comment in filelog.size
2592 # (?): could be "- len(meta)" if the resolved content has
2595 # (?): could be "- len(meta)" if the resolved content has
2593 # rename metadata
2596 # rename metadata
2594 #
2597 #
2595 # Checks needed to be done:
2598 # Checks needed to be done:
2596 # 1. length check: L1 == L2, in all cases.
2599 # 1. length check: L1 == L2, in all cases.
2597 # 2. hash check: depending on flag processor, we may need to
2600 # 2. hash check: depending on flag processor, we may need to
2598 # use either "text" (external), or "rawtext" (in revlog).
2601 # use either "text" (external), or "rawtext" (in revlog).
2599
2602
2600 try:
2603 try:
2601 skipflags = state.get('skipflags', 0)
2604 skipflags = state.get('skipflags', 0)
2602 if skipflags:
2605 if skipflags:
2603 skipflags &= self.flags(rev)
2606 skipflags &= self.flags(rev)
2604
2607
2605 if skipflags:
2608 if skipflags:
2606 state['skipread'].add(node)
2609 state['skipread'].add(node)
2607 else:
2610 else:
2608 # Side-effect: read content and verify hash.
2611 # Side-effect: read content and verify hash.
2609 self.revision(node)
2612 self.revision(node)
2610
2613
2611 l1 = self.rawsize(rev)
2614 l1 = self.rawsize(rev)
2612 l2 = len(self.revision(node, raw=True))
2615 l2 = len(self.revision(node, raw=True))
2613
2616
2614 if l1 != l2:
2617 if l1 != l2:
2615 yield revlogproblem(
2618 yield revlogproblem(
2616 error=_('unpacked size is %d, %d expected') % (l2, l1),
2619 error=_('unpacked size is %d, %d expected') % (l2, l1),
2617 node=node)
2620 node=node)
2618
2621
2619 except error.CensoredNodeError:
2622 except error.CensoredNodeError:
2620 if state['erroroncensored']:
2623 if state['erroroncensored']:
2621 yield revlogproblem(error=_('censored file data'),
2624 yield revlogproblem(error=_('censored file data'),
2622 node=node)
2625 node=node)
2623 state['skipread'].add(node)
2626 state['skipread'].add(node)
2624 except Exception as e:
2627 except Exception as e:
2625 yield revlogproblem(
2628 yield revlogproblem(
2626 error=_('unpacking %s: %s') % (short(node),
2629 error=_('unpacking %s: %s') % (short(node),
2627 stringutil.forcebytestr(e)),
2630 stringutil.forcebytestr(e)),
2628 node=node)
2631 node=node)
2629 state['skipread'].add(node)
2632 state['skipread'].add(node)
2630
2633
2631 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2634 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2632 revisionscount=False, trackedsize=False,
2635 revisionscount=False, trackedsize=False,
2633 storedsize=False):
2636 storedsize=False):
2634 d = {}
2637 d = {}
2635
2638
2636 if exclusivefiles:
2639 if exclusivefiles:
2637 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2640 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2638 if not self._inline:
2641 if not self._inline:
2639 d['exclusivefiles'].append((self.opener, self.datafile))
2642 d['exclusivefiles'].append((self.opener, self.datafile))
2640
2643
2641 if sharedfiles:
2644 if sharedfiles:
2642 d['sharedfiles'] = []
2645 d['sharedfiles'] = []
2643
2646
2644 if revisionscount:
2647 if revisionscount:
2645 d['revisionscount'] = len(self)
2648 d['revisionscount'] = len(self)
2646
2649
2647 if trackedsize:
2650 if trackedsize:
2648 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2651 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2649
2652
2650 if storedsize:
2653 if storedsize:
2651 d['storedsize'] = sum(self.opener.stat(path).st_size
2654 d['storedsize'] = sum(self.opener.stat(path).st_size
2652 for path in self.files())
2655 for path in self.files())
2653
2656
2654 return d
2657 return d
@@ -1,1016 +1,1016 b''
1 # revlogdeltas.py - Logic around delta computation for revlog
1 # revlogdeltas.py - Logic around delta computation for revlog
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2018 Octobus <contact@octobus.net>
4 # Copyright 2018 Octobus <contact@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """Helper class to compute deltas stored inside revlogs"""
8 """Helper class to compute deltas stored inside revlogs"""
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import collections
12 import collections
13 import struct
13 import struct
14
14
15 # import stuff from node for others to import from revlog
15 # import stuff from node for others to import from revlog
16 from ..node import (
16 from ..node import (
17 nullrev,
17 nullrev,
18 )
18 )
19 from ..i18n import _
19 from ..i18n import _
20
20
21 from .constants import (
21 from .constants import (
22 REVIDX_ISCENSORED,
22 REVIDX_ISCENSORED,
23 REVIDX_RAWTEXT_CHANGING_FLAGS,
23 REVIDX_RAWTEXT_CHANGING_FLAGS,
24 )
24 )
25
25
26 from ..thirdparty import (
26 from ..thirdparty import (
27 attr,
27 attr,
28 )
28 )
29
29
30 from .. import (
30 from .. import (
31 error,
31 error,
32 mdiff,
32 mdiff,
33 util,
33 util,
34 )
34 )
35
35
36 # maximum <delta-chain-data>/<revision-text-length> ratio
36 # maximum <delta-chain-data>/<revision-text-length> ratio
37 LIMIT_DELTA2TEXT = 2
37 LIMIT_DELTA2TEXT = 2
38
38
39 class _testrevlog(object):
39 class _testrevlog(object):
40 """minimalist fake revlog to use in doctests"""
40 """minimalist fake revlog to use in doctests"""
41
41
42 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
42 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
43 """data is an list of revision payload boundaries"""
43 """data is an list of revision payload boundaries"""
44 self._data = data
44 self._data = data
45 self._srdensitythreshold = density
45 self._srdensitythreshold = density
46 self._srmingapsize = mingap
46 self._srmingapsize = mingap
47 self._snapshot = set(snapshot)
47 self._snapshot = set(snapshot)
48 self.index = None
48 self.index = None
49
49
50 def start(self, rev):
50 def start(self, rev):
51 if rev == nullrev:
51 if rev == nullrev:
52 return 0
52 return 0
53 if rev == 0:
53 if rev == 0:
54 return 0
54 return 0
55 return self._data[rev - 1]
55 return self._data[rev - 1]
56
56
57 def end(self, rev):
57 def end(self, rev):
58 if rev == nullrev:
58 if rev == nullrev:
59 return 0
59 return 0
60 return self._data[rev]
60 return self._data[rev]
61
61
62 def length(self, rev):
62 def length(self, rev):
63 return self.end(rev) - self.start(rev)
63 return self.end(rev) - self.start(rev)
64
64
65 def __len__(self):
65 def __len__(self):
66 return len(self._data)
66 return len(self._data)
67
67
68 def issnapshot(self, rev):
68 def issnapshot(self, rev):
69 if rev == nullrev:
69 if rev == nullrev:
70 return True
70 return True
71 return rev in self._snapshot
71 return rev in self._snapshot
72
72
73 def slicechunk(revlog, revs, targetsize=None):
73 def slicechunk(revlog, revs, targetsize=None):
74 """slice revs to reduce the amount of unrelated data to be read from disk.
74 """slice revs to reduce the amount of unrelated data to be read from disk.
75
75
76 ``revs`` is sliced into groups that should be read in one time.
76 ``revs`` is sliced into groups that should be read in one time.
77 Assume that revs are sorted.
77 Assume that revs are sorted.
78
78
79 The initial chunk is sliced until the overall density (payload/chunks-span
79 The initial chunk is sliced until the overall density (payload/chunks-span
80 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
80 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
81 `revlog._srmingapsize` is skipped.
81 `revlog._srmingapsize` is skipped.
82
82
83 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
83 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
84 For consistency with other slicing choice, this limit won't go lower than
84 For consistency with other slicing choice, this limit won't go lower than
85 `revlog._srmingapsize`.
85 `revlog._srmingapsize`.
86
86
87 If individual revisions chunk are larger than this limit, they will still
87 If individual revisions chunk are larger than this limit, they will still
88 be raised individually.
88 be raised individually.
89
89
90 >>> data = [
90 >>> data = [
91 ... 5, #00 (5)
91 ... 5, #00 (5)
92 ... 10, #01 (5)
92 ... 10, #01 (5)
93 ... 12, #02 (2)
93 ... 12, #02 (2)
94 ... 12, #03 (empty)
94 ... 12, #03 (empty)
95 ... 27, #04 (15)
95 ... 27, #04 (15)
96 ... 31, #05 (4)
96 ... 31, #05 (4)
97 ... 31, #06 (empty)
97 ... 31, #06 (empty)
98 ... 42, #07 (11)
98 ... 42, #07 (11)
99 ... 47, #08 (5)
99 ... 47, #08 (5)
100 ... 47, #09 (empty)
100 ... 47, #09 (empty)
101 ... 48, #10 (1)
101 ... 48, #10 (1)
102 ... 51, #11 (3)
102 ... 51, #11 (3)
103 ... 74, #12 (23)
103 ... 74, #12 (23)
104 ... 85, #13 (11)
104 ... 85, #13 (11)
105 ... 86, #14 (1)
105 ... 86, #14 (1)
106 ... 91, #15 (5)
106 ... 91, #15 (5)
107 ... ]
107 ... ]
108 >>> revlog = _testrevlog(data, snapshot=range(16))
108 >>> revlog = _testrevlog(data, snapshot=range(16))
109
109
110 >>> list(slicechunk(revlog, list(range(16))))
110 >>> list(slicechunk(revlog, list(range(16))))
111 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
111 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
112 >>> list(slicechunk(revlog, [0, 15]))
112 >>> list(slicechunk(revlog, [0, 15]))
113 [[0], [15]]
113 [[0], [15]]
114 >>> list(slicechunk(revlog, [0, 11, 15]))
114 >>> list(slicechunk(revlog, [0, 11, 15]))
115 [[0], [11], [15]]
115 [[0], [11], [15]]
116 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
116 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
117 [[0], [11, 13, 15]]
117 [[0], [11, 13, 15]]
118 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
118 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
119 [[1, 2], [5, 8, 10, 11], [14]]
119 [[1, 2], [5, 8, 10, 11], [14]]
120
120
121 Slicing with a maximum chunk size
121 Slicing with a maximum chunk size
122 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
122 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
123 [[0], [11], [13], [15]]
123 [[0], [11], [13], [15]]
124 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
124 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
125 [[0], [11], [13, 15]]
125 [[0], [11], [13, 15]]
126
126
127 Slicing involving nullrev
127 Slicing involving nullrev
128 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
128 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
129 [[-1, 0], [11], [13, 15]]
129 [[-1, 0], [11], [13, 15]]
130 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
130 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
131 [[-1], [13], [15]]
131 [[-1], [13], [15]]
132 """
132 """
133 if targetsize is not None:
133 if targetsize is not None:
134 targetsize = max(targetsize, revlog._srmingapsize)
134 targetsize = max(targetsize, revlog._srmingapsize)
135 # targetsize should not be specified when evaluating delta candidates:
135 # targetsize should not be specified when evaluating delta candidates:
136 # * targetsize is used to ensure we stay within specification when reading,
136 # * targetsize is used to ensure we stay within specification when reading,
137 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
137 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
138 if densityslicing is None:
138 if densityslicing is None:
139 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
139 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
140 for chunk in densityslicing(revs,
140 for chunk in densityslicing(revs,
141 revlog._srdensitythreshold,
141 revlog._srdensitythreshold,
142 revlog._srmingapsize):
142 revlog._srmingapsize):
143 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
143 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
144 yield subchunk
144 yield subchunk
145
145
146 def _slicechunktosize(revlog, revs, targetsize=None):
146 def _slicechunktosize(revlog, revs, targetsize=None):
147 """slice revs to match the target size
147 """slice revs to match the target size
148
148
149 This is intended to be used on chunk that density slicing selected by that
149 This is intended to be used on chunk that density slicing selected by that
150 are still too large compared to the read garantee of revlog. This might
150 are still too large compared to the read garantee of revlog. This might
151 happens when "minimal gap size" interrupted the slicing or when chain are
151 happens when "minimal gap size" interrupted the slicing or when chain are
152 built in a way that create large blocks next to each other.
152 built in a way that create large blocks next to each other.
153
153
154 >>> data = [
154 >>> data = [
155 ... 3, #0 (3)
155 ... 3, #0 (3)
156 ... 5, #1 (2)
156 ... 5, #1 (2)
157 ... 6, #2 (1)
157 ... 6, #2 (1)
158 ... 8, #3 (2)
158 ... 8, #3 (2)
159 ... 8, #4 (empty)
159 ... 8, #4 (empty)
160 ... 11, #5 (3)
160 ... 11, #5 (3)
161 ... 12, #6 (1)
161 ... 12, #6 (1)
162 ... 13, #7 (1)
162 ... 13, #7 (1)
163 ... 14, #8 (1)
163 ... 14, #8 (1)
164 ... ]
164 ... ]
165
165
166 == All snapshots cases ==
166 == All snapshots cases ==
167 >>> revlog = _testrevlog(data, snapshot=range(9))
167 >>> revlog = _testrevlog(data, snapshot=range(9))
168
168
169 Cases where chunk is already small enough
169 Cases where chunk is already small enough
170 >>> list(_slicechunktosize(revlog, [0], 3))
170 >>> list(_slicechunktosize(revlog, [0], 3))
171 [[0]]
171 [[0]]
172 >>> list(_slicechunktosize(revlog, [6, 7], 3))
172 >>> list(_slicechunktosize(revlog, [6, 7], 3))
173 [[6, 7]]
173 [[6, 7]]
174 >>> list(_slicechunktosize(revlog, [0], None))
174 >>> list(_slicechunktosize(revlog, [0], None))
175 [[0]]
175 [[0]]
176 >>> list(_slicechunktosize(revlog, [6, 7], None))
176 >>> list(_slicechunktosize(revlog, [6, 7], None))
177 [[6, 7]]
177 [[6, 7]]
178
178
179 cases where we need actual slicing
179 cases where we need actual slicing
180 >>> list(_slicechunktosize(revlog, [0, 1], 3))
180 >>> list(_slicechunktosize(revlog, [0, 1], 3))
181 [[0], [1]]
181 [[0], [1]]
182 >>> list(_slicechunktosize(revlog, [1, 3], 3))
182 >>> list(_slicechunktosize(revlog, [1, 3], 3))
183 [[1], [3]]
183 [[1], [3]]
184 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
184 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
185 [[1, 2], [3]]
185 [[1, 2], [3]]
186 >>> list(_slicechunktosize(revlog, [3, 5], 3))
186 >>> list(_slicechunktosize(revlog, [3, 5], 3))
187 [[3], [5]]
187 [[3], [5]]
188 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
188 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
189 [[3], [5]]
189 [[3], [5]]
190 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
190 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
191 [[5], [6, 7, 8]]
191 [[5], [6, 7, 8]]
192 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
192 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
193 [[0], [1, 2], [3], [5], [6, 7, 8]]
193 [[0], [1, 2], [3], [5], [6, 7, 8]]
194
194
195 Case with too large individual chunk (must return valid chunk)
195 Case with too large individual chunk (must return valid chunk)
196 >>> list(_slicechunktosize(revlog, [0, 1], 2))
196 >>> list(_slicechunktosize(revlog, [0, 1], 2))
197 [[0], [1]]
197 [[0], [1]]
198 >>> list(_slicechunktosize(revlog, [1, 3], 1))
198 >>> list(_slicechunktosize(revlog, [1, 3], 1))
199 [[1], [3]]
199 [[1], [3]]
200 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
200 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
201 [[3], [5]]
201 [[3], [5]]
202
202
203 == No Snapshot cases ==
203 == No Snapshot cases ==
204 >>> revlog = _testrevlog(data)
204 >>> revlog = _testrevlog(data)
205
205
206 Cases where chunk is already small enough
206 Cases where chunk is already small enough
207 >>> list(_slicechunktosize(revlog, [0], 3))
207 >>> list(_slicechunktosize(revlog, [0], 3))
208 [[0]]
208 [[0]]
209 >>> list(_slicechunktosize(revlog, [6, 7], 3))
209 >>> list(_slicechunktosize(revlog, [6, 7], 3))
210 [[6, 7]]
210 [[6, 7]]
211 >>> list(_slicechunktosize(revlog, [0], None))
211 >>> list(_slicechunktosize(revlog, [0], None))
212 [[0]]
212 [[0]]
213 >>> list(_slicechunktosize(revlog, [6, 7], None))
213 >>> list(_slicechunktosize(revlog, [6, 7], None))
214 [[6, 7]]
214 [[6, 7]]
215
215
216 cases where we need actual slicing
216 cases where we need actual slicing
217 >>> list(_slicechunktosize(revlog, [0, 1], 3))
217 >>> list(_slicechunktosize(revlog, [0, 1], 3))
218 [[0], [1]]
218 [[0], [1]]
219 >>> list(_slicechunktosize(revlog, [1, 3], 3))
219 >>> list(_slicechunktosize(revlog, [1, 3], 3))
220 [[1], [3]]
220 [[1], [3]]
221 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
221 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
222 [[1], [2, 3]]
222 [[1], [2, 3]]
223 >>> list(_slicechunktosize(revlog, [3, 5], 3))
223 >>> list(_slicechunktosize(revlog, [3, 5], 3))
224 [[3], [5]]
224 [[3], [5]]
225 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
225 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
226 [[3], [4, 5]]
226 [[3], [4, 5]]
227 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
227 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
228 [[5], [6, 7, 8]]
228 [[5], [6, 7, 8]]
229 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
229 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
230 [[0], [1, 2], [3], [5], [6, 7, 8]]
230 [[0], [1, 2], [3], [5], [6, 7, 8]]
231
231
232 Case with too large individual chunk (must return valid chunk)
232 Case with too large individual chunk (must return valid chunk)
233 >>> list(_slicechunktosize(revlog, [0, 1], 2))
233 >>> list(_slicechunktosize(revlog, [0, 1], 2))
234 [[0], [1]]
234 [[0], [1]]
235 >>> list(_slicechunktosize(revlog, [1, 3], 1))
235 >>> list(_slicechunktosize(revlog, [1, 3], 1))
236 [[1], [3]]
236 [[1], [3]]
237 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
237 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
238 [[3], [5]]
238 [[3], [5]]
239
239
240 == mixed case ==
240 == mixed case ==
241 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
241 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
242 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
242 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
243 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
243 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
244 """
244 """
245 assert targetsize is None or 0 <= targetsize
245 assert targetsize is None or 0 <= targetsize
246 startdata = revlog.start(revs[0])
246 startdata = revlog.start(revs[0])
247 enddata = revlog.end(revs[-1])
247 enddata = revlog.end(revs[-1])
248 fullspan = enddata - startdata
248 fullspan = enddata - startdata
249 if targetsize is None or fullspan <= targetsize:
249 if targetsize is None or fullspan <= targetsize:
250 yield revs
250 yield revs
251 return
251 return
252
252
253 startrevidx = 0
253 startrevidx = 0
254 endrevidx = 1
254 endrevidx = 1
255 iterrevs = enumerate(revs)
255 iterrevs = enumerate(revs)
256 next(iterrevs) # skip first rev.
256 next(iterrevs) # skip first rev.
257 # first step: get snapshots out of the way
257 # first step: get snapshots out of the way
258 for idx, r in iterrevs:
258 for idx, r in iterrevs:
259 span = revlog.end(r) - startdata
259 span = revlog.end(r) - startdata
260 snapshot = revlog.issnapshot(r)
260 snapshot = revlog.issnapshot(r)
261 if span <= targetsize and snapshot:
261 if span <= targetsize and snapshot:
262 endrevidx = idx + 1
262 endrevidx = idx + 1
263 else:
263 else:
264 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
264 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
265 if chunk:
265 if chunk:
266 yield chunk
266 yield chunk
267 startrevidx = idx
267 startrevidx = idx
268 startdata = revlog.start(r)
268 startdata = revlog.start(r)
269 endrevidx = idx + 1
269 endrevidx = idx + 1
270 if not snapshot:
270 if not snapshot:
271 break
271 break
272
272
273 # for the others, we use binary slicing to quickly converge toward valid
273 # for the others, we use binary slicing to quickly converge toward valid
274 # chunks (otherwise, we might end up looking for start/end of many
274 # chunks (otherwise, we might end up looking for start/end of many
275 # revisions). This logic is not looking for the perfect slicing point, it
275 # revisions). This logic is not looking for the perfect slicing point, it
276 # focuses on quickly converging toward valid chunks.
276 # focuses on quickly converging toward valid chunks.
277 nbitem = len(revs)
277 nbitem = len(revs)
278 while (enddata - startdata) > targetsize:
278 while (enddata - startdata) > targetsize:
279 endrevidx = nbitem
279 endrevidx = nbitem
280 if nbitem - startrevidx <= 1:
280 if nbitem - startrevidx <= 1:
281 break # protect against individual chunk larger than limit
281 break # protect against individual chunk larger than limit
282 localenddata = revlog.end(revs[endrevidx - 1])
282 localenddata = revlog.end(revs[endrevidx - 1])
283 span = localenddata - startdata
283 span = localenddata - startdata
284 while span > targetsize:
284 while span > targetsize:
285 if endrevidx - startrevidx <= 1:
285 if endrevidx - startrevidx <= 1:
286 break # protect against individual chunk larger than limit
286 break # protect against individual chunk larger than limit
287 endrevidx -= (endrevidx - startrevidx) // 2
287 endrevidx -= (endrevidx - startrevidx) // 2
288 localenddata = revlog.end(revs[endrevidx - 1])
288 localenddata = revlog.end(revs[endrevidx - 1])
289 span = localenddata - startdata
289 span = localenddata - startdata
290 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
290 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
291 if chunk:
291 if chunk:
292 yield chunk
292 yield chunk
293 startrevidx = endrevidx
293 startrevidx = endrevidx
294 startdata = revlog.start(revs[startrevidx])
294 startdata = revlog.start(revs[startrevidx])
295
295
296 chunk = _trimchunk(revlog, revs, startrevidx)
296 chunk = _trimchunk(revlog, revs, startrevidx)
297 if chunk:
297 if chunk:
298 yield chunk
298 yield chunk
299
299
300 def _slicechunktodensity(revlog, revs, targetdensity=0.5,
300 def _slicechunktodensity(revlog, revs, targetdensity=0.5,
301 mingapsize=0):
301 mingapsize=0):
302 """slice revs to reduce the amount of unrelated data to be read from disk.
302 """slice revs to reduce the amount of unrelated data to be read from disk.
303
303
304 ``revs`` is sliced into groups that should be read in one time.
304 ``revs`` is sliced into groups that should be read in one time.
305 Assume that revs are sorted.
305 Assume that revs are sorted.
306
306
307 The initial chunk is sliced until the overall density (payload/chunks-span
307 The initial chunk is sliced until the overall density (payload/chunks-span
308 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
308 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
309 skipped.
309 skipped.
310
310
311 >>> revlog = _testrevlog([
311 >>> revlog = _testrevlog([
312 ... 5, #00 (5)
312 ... 5, #00 (5)
313 ... 10, #01 (5)
313 ... 10, #01 (5)
314 ... 12, #02 (2)
314 ... 12, #02 (2)
315 ... 12, #03 (empty)
315 ... 12, #03 (empty)
316 ... 27, #04 (15)
316 ... 27, #04 (15)
317 ... 31, #05 (4)
317 ... 31, #05 (4)
318 ... 31, #06 (empty)
318 ... 31, #06 (empty)
319 ... 42, #07 (11)
319 ... 42, #07 (11)
320 ... 47, #08 (5)
320 ... 47, #08 (5)
321 ... 47, #09 (empty)
321 ... 47, #09 (empty)
322 ... 48, #10 (1)
322 ... 48, #10 (1)
323 ... 51, #11 (3)
323 ... 51, #11 (3)
324 ... 74, #12 (23)
324 ... 74, #12 (23)
325 ... 85, #13 (11)
325 ... 85, #13 (11)
326 ... 86, #14 (1)
326 ... 86, #14 (1)
327 ... 91, #15 (5)
327 ... 91, #15 (5)
328 ... ])
328 ... ])
329
329
330 >>> list(_slicechunktodensity(revlog, list(range(16))))
330 >>> list(_slicechunktodensity(revlog, list(range(16))))
331 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
331 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
332 >>> list(_slicechunktodensity(revlog, [0, 15]))
332 >>> list(_slicechunktodensity(revlog, [0, 15]))
333 [[0], [15]]
333 [[0], [15]]
334 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
334 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
335 [[0], [11], [15]]
335 [[0], [11], [15]]
336 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
336 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
337 [[0], [11, 13, 15]]
337 [[0], [11, 13, 15]]
338 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
338 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
339 [[1, 2], [5, 8, 10, 11], [14]]
339 [[1, 2], [5, 8, 10, 11], [14]]
340 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
340 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
341 ... mingapsize=20))
341 ... mingapsize=20))
342 [[1, 2, 3, 5, 8, 10, 11], [14]]
342 [[1, 2, 3, 5, 8, 10, 11], [14]]
343 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
343 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
344 ... targetdensity=0.95))
344 ... targetdensity=0.95))
345 [[1, 2], [5], [8, 10, 11], [14]]
345 [[1, 2], [5], [8, 10, 11], [14]]
346 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
346 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
347 ... targetdensity=0.95, mingapsize=12))
347 ... targetdensity=0.95, mingapsize=12))
348 [[1, 2], [5, 8, 10, 11], [14]]
348 [[1, 2], [5, 8, 10, 11], [14]]
349 """
349 """
350 start = revlog.start
350 start = revlog.start
351 length = revlog.length
351 length = revlog.length
352
352
353 if len(revs) <= 1:
353 if len(revs) <= 1:
354 yield revs
354 yield revs
355 return
355 return
356
356
357 deltachainspan = segmentspan(revlog, revs)
357 deltachainspan = segmentspan(revlog, revs)
358
358
359 if deltachainspan < mingapsize:
359 if deltachainspan < mingapsize:
360 yield revs
360 yield revs
361 return
361 return
362
362
363 readdata = deltachainspan
363 readdata = deltachainspan
364 chainpayload = sum(length(r) for r in revs)
364 chainpayload = sum(length(r) for r in revs)
365
365
366 if deltachainspan:
366 if deltachainspan:
367 density = chainpayload / float(deltachainspan)
367 density = chainpayload / float(deltachainspan)
368 else:
368 else:
369 density = 1.0
369 density = 1.0
370
370
371 if density >= targetdensity:
371 if density >= targetdensity:
372 yield revs
372 yield revs
373 return
373 return
374
374
375 # Store the gaps in a heap to have them sorted by decreasing size
375 # Store the gaps in a heap to have them sorted by decreasing size
376 gaps = []
376 gaps = []
377 prevend = None
377 prevend = None
378 for i, rev in enumerate(revs):
378 for i, rev in enumerate(revs):
379 revstart = start(rev)
379 revstart = start(rev)
380 revlen = length(rev)
380 revlen = length(rev)
381
381
382 # Skip empty revisions to form larger holes
382 # Skip empty revisions to form larger holes
383 if revlen == 0:
383 if revlen == 0:
384 continue
384 continue
385
385
386 if prevend is not None:
386 if prevend is not None:
387 gapsize = revstart - prevend
387 gapsize = revstart - prevend
388 # only consider holes that are large enough
388 # only consider holes that are large enough
389 if gapsize > mingapsize:
389 if gapsize > mingapsize:
390 gaps.append((gapsize, i))
390 gaps.append((gapsize, i))
391
391
392 prevend = revstart + revlen
392 prevend = revstart + revlen
393 # sort the gaps to pop them from largest to small
393 # sort the gaps to pop them from largest to small
394 gaps.sort()
394 gaps.sort()
395
395
396 # Collect the indices of the largest holes until the density is acceptable
396 # Collect the indices of the largest holes until the density is acceptable
397 selected = []
397 selected = []
398 while gaps and density < targetdensity:
398 while gaps and density < targetdensity:
399 gapsize, gapidx = gaps.pop()
399 gapsize, gapidx = gaps.pop()
400
400
401 selected.append(gapidx)
401 selected.append(gapidx)
402
402
403 # the gap sizes are stored as negatives to be sorted decreasingly
403 # the gap sizes are stored as negatives to be sorted decreasingly
404 # by the heap
404 # by the heap
405 readdata -= gapsize
405 readdata -= gapsize
406 if readdata > 0:
406 if readdata > 0:
407 density = chainpayload / float(readdata)
407 density = chainpayload / float(readdata)
408 else:
408 else:
409 density = 1.0
409 density = 1.0
410 selected.sort()
410 selected.sort()
411
411
412 # Cut the revs at collected indices
412 # Cut the revs at collected indices
413 previdx = 0
413 previdx = 0
414 for idx in selected:
414 for idx in selected:
415
415
416 chunk = _trimchunk(revlog, revs, previdx, idx)
416 chunk = _trimchunk(revlog, revs, previdx, idx)
417 if chunk:
417 if chunk:
418 yield chunk
418 yield chunk
419
419
420 previdx = idx
420 previdx = idx
421
421
422 chunk = _trimchunk(revlog, revs, previdx)
422 chunk = _trimchunk(revlog, revs, previdx)
423 if chunk:
423 if chunk:
424 yield chunk
424 yield chunk
425
425
426 def _trimchunk(revlog, revs, startidx, endidx=None):
426 def _trimchunk(revlog, revs, startidx, endidx=None):
427 """returns revs[startidx:endidx] without empty trailing revs
427 """returns revs[startidx:endidx] without empty trailing revs
428
428
429 Doctest Setup
429 Doctest Setup
430 >>> revlog = _testrevlog([
430 >>> revlog = _testrevlog([
431 ... 5, #0
431 ... 5, #0
432 ... 10, #1
432 ... 10, #1
433 ... 12, #2
433 ... 12, #2
434 ... 12, #3 (empty)
434 ... 12, #3 (empty)
435 ... 17, #4
435 ... 17, #4
436 ... 21, #5
436 ... 21, #5
437 ... 21, #6 (empty)
437 ... 21, #6 (empty)
438 ... ])
438 ... ])
439
439
440 Contiguous cases:
440 Contiguous cases:
441 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
441 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
442 [0, 1, 2, 3, 4, 5]
442 [0, 1, 2, 3, 4, 5]
443 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
443 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
444 [0, 1, 2, 3, 4]
444 [0, 1, 2, 3, 4]
445 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
445 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
446 [0, 1, 2]
446 [0, 1, 2]
447 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
447 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
448 [2]
448 [2]
449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
450 [3, 4, 5]
450 [3, 4, 5]
451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
452 [3, 4]
452 [3, 4]
453
453
454 Discontiguous cases:
454 Discontiguous cases:
455 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
455 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
456 [1, 3, 5]
456 [1, 3, 5]
457 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
457 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
458 [1]
458 [1]
459 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
459 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
460 [3, 5]
460 [3, 5]
461 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
461 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
462 [3, 5]
462 [3, 5]
463 """
463 """
464 length = revlog.length
464 length = revlog.length
465
465
466 if endidx is None:
466 if endidx is None:
467 endidx = len(revs)
467 endidx = len(revs)
468
468
469 # If we have a non-emtpy delta candidate, there are nothing to trim
469 # If we have a non-emtpy delta candidate, there are nothing to trim
470 if revs[endidx - 1] < len(revlog):
470 if revs[endidx - 1] < len(revlog):
471 # Trim empty revs at the end, except the very first revision of a chain
471 # Trim empty revs at the end, except the very first revision of a chain
472 while (endidx > 1
472 while (endidx > 1
473 and endidx > startidx
473 and endidx > startidx
474 and length(revs[endidx - 1]) == 0):
474 and length(revs[endidx - 1]) == 0):
475 endidx -= 1
475 endidx -= 1
476
476
477 return revs[startidx:endidx]
477 return revs[startidx:endidx]
478
478
479 def segmentspan(revlog, revs):
479 def segmentspan(revlog, revs):
480 """Get the byte span of a segment of revisions
480 """Get the byte span of a segment of revisions
481
481
482 revs is a sorted array of revision numbers
482 revs is a sorted array of revision numbers
483
483
484 >>> revlog = _testrevlog([
484 >>> revlog = _testrevlog([
485 ... 5, #0
485 ... 5, #0
486 ... 10, #1
486 ... 10, #1
487 ... 12, #2
487 ... 12, #2
488 ... 12, #3 (empty)
488 ... 12, #3 (empty)
489 ... 17, #4
489 ... 17, #4
490 ... ])
490 ... ])
491
491
492 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
492 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
493 17
493 17
494 >>> segmentspan(revlog, [0, 4])
494 >>> segmentspan(revlog, [0, 4])
495 17
495 17
496 >>> segmentspan(revlog, [3, 4])
496 >>> segmentspan(revlog, [3, 4])
497 5
497 5
498 >>> segmentspan(revlog, [1, 2, 3,])
498 >>> segmentspan(revlog, [1, 2, 3,])
499 7
499 7
500 >>> segmentspan(revlog, [1, 3])
500 >>> segmentspan(revlog, [1, 3])
501 7
501 7
502 """
502 """
503 if not revs:
503 if not revs:
504 return 0
504 return 0
505 end = revlog.end(revs[-1])
505 end = revlog.end(revs[-1])
506 return end - revlog.start(revs[0])
506 return end - revlog.start(revs[0])
507
507
508 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
508 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
509 """build full text from a (base, delta) pair and other metadata"""
509 """build full text from a (base, delta) pair and other metadata"""
510 # special case deltas which replace entire base; no need to decode
510 # special case deltas which replace entire base; no need to decode
511 # base revision. this neatly avoids censored bases, which throw when
511 # base revision. this neatly avoids censored bases, which throw when
512 # they're decoded.
512 # they're decoded.
513 hlen = struct.calcsize(">lll")
513 hlen = struct.calcsize(">lll")
514 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
514 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
515 len(delta) - hlen):
515 len(delta) - hlen):
516 fulltext = delta[hlen:]
516 fulltext = delta[hlen:]
517 else:
517 else:
518 # deltabase is rawtext before changed by flag processors, which is
518 # deltabase is rawtext before changed by flag processors, which is
519 # equivalent to non-raw text
519 # equivalent to non-raw text
520 basetext = revlog.revision(baserev, _df=fh, raw=False)
520 basetext = revlog.revision(baserev, _df=fh, raw=False)
521 fulltext = mdiff.patch(basetext, delta)
521 fulltext = mdiff.patch(basetext, delta)
522
522
523 try:
523 try:
524 res = revlog._processflags(fulltext, flags, 'read', raw=True)
524 res = revlog._processflags(fulltext, flags, 'read', raw=True)
525 fulltext, validatehash = res
525 fulltext, validatehash = res
526 if validatehash:
526 if validatehash:
527 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
527 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
528 if flags & REVIDX_ISCENSORED:
528 if flags & REVIDX_ISCENSORED:
529 raise error.StorageError(_('node %s is not censored') %
529 raise error.StorageError(_('node %s is not censored') %
530 expectednode)
530 expectednode)
531 except error.CensoredNodeError:
531 except error.CensoredNodeError:
532 # must pass the censored index flag to add censored revisions
532 # must pass the censored index flag to add censored revisions
533 if not flags & REVIDX_ISCENSORED:
533 if not flags & REVIDX_ISCENSORED:
534 raise
534 raise
535 return fulltext
535 return fulltext
536
536
537 @attr.s(slots=True, frozen=True)
537 @attr.s(slots=True, frozen=True)
538 class _deltainfo(object):
538 class _deltainfo(object):
539 distance = attr.ib()
539 distance = attr.ib()
540 deltalen = attr.ib()
540 deltalen = attr.ib()
541 data = attr.ib()
541 data = attr.ib()
542 base = attr.ib()
542 base = attr.ib()
543 chainbase = attr.ib()
543 chainbase = attr.ib()
544 chainlen = attr.ib()
544 chainlen = attr.ib()
545 compresseddeltalen = attr.ib()
545 compresseddeltalen = attr.ib()
546 snapshotdepth = attr.ib()
546 snapshotdepth = attr.ib()
547
547
548 def isgooddeltainfo(revlog, deltainfo, revinfo):
548 def isgooddeltainfo(revlog, deltainfo, revinfo):
549 """Returns True if the given delta is good. Good means that it is within
549 """Returns True if the given delta is good. Good means that it is within
550 the disk span, disk size, and chain length bounds that we know to be
550 the disk span, disk size, and chain length bounds that we know to be
551 performant."""
551 performant."""
552 if deltainfo is None:
552 if deltainfo is None:
553 return False
553 return False
554
554
555 # - 'deltainfo.distance' is the distance from the base revision --
555 # - 'deltainfo.distance' is the distance from the base revision --
556 # bounding it limits the amount of I/O we need to do.
556 # bounding it limits the amount of I/O we need to do.
557 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
557 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
558 # deltas we need to apply -- bounding it limits the amount of CPU
558 # deltas we need to apply -- bounding it limits the amount of CPU
559 # we consume.
559 # we consume.
560
560
561 textlen = revinfo.textlen
561 textlen = revinfo.textlen
562 defaultmax = textlen * 4
562 defaultmax = textlen * 4
563 maxdist = revlog._maxdeltachainspan
563 maxdist = revlog._maxdeltachainspan
564 if not maxdist:
564 if not maxdist:
565 maxdist = deltainfo.distance # ensure the conditional pass
565 maxdist = deltainfo.distance # ensure the conditional pass
566 maxdist = max(maxdist, defaultmax)
566 maxdist = max(maxdist, defaultmax)
567
567
568 # Bad delta from read span:
568 # Bad delta from read span:
569 #
569 #
570 # If the span of data read is larger than the maximum allowed.
570 # If the span of data read is larger than the maximum allowed.
571 #
571 #
572 # In the sparse-revlog case, we rely on the associated "sparse reading"
572 # In the sparse-revlog case, we rely on the associated "sparse reading"
573 # to avoid issue related to the span of data. In theory, it would be
573 # to avoid issue related to the span of data. In theory, it would be
574 # possible to build pathological revlog where delta pattern would lead
574 # possible to build pathological revlog where delta pattern would lead
575 # to too many reads. However, they do not happen in practice at all. So
575 # to too many reads. However, they do not happen in practice at all. So
576 # we skip the span check entirely.
576 # we skip the span check entirely.
577 if not revlog._sparserevlog and maxdist < deltainfo.distance:
577 if not revlog._sparserevlog and maxdist < deltainfo.distance:
578 return False
578 return False
579
579
580 # Bad delta from new delta size:
580 # Bad delta from new delta size:
581 #
581 #
582 # If the delta size is larger than the target text, storing the
582 # If the delta size is larger than the target text, storing the
583 # delta will be inefficient.
583 # delta will be inefficient.
584 if textlen < deltainfo.deltalen:
584 if textlen < deltainfo.deltalen:
585 return False
585 return False
586
586
587 # Bad delta from cumulated payload size:
587 # Bad delta from cumulated payload size:
588 #
588 #
589 # If the sum of delta get larger than K * target text length.
589 # If the sum of delta get larger than K * target text length.
590 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
590 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
591 return False
591 return False
592
592
593 # Bad delta from chain length:
593 # Bad delta from chain length:
594 #
594 #
595 # If the number of delta in the chain gets too high.
595 # If the number of delta in the chain gets too high.
596 if (revlog._maxchainlen
596 if (revlog._maxchainlen
597 and revlog._maxchainlen < deltainfo.chainlen):
597 and revlog._maxchainlen < deltainfo.chainlen):
598 return False
598 return False
599
599
600 # bad delta from intermediate snapshot size limit
600 # bad delta from intermediate snapshot size limit
601 #
601 #
602 # If an intermediate snapshot size is higher than the limit. The
602 # If an intermediate snapshot size is higher than the limit. The
603 # limit exist to prevent endless chain of intermediate delta to be
603 # limit exist to prevent endless chain of intermediate delta to be
604 # created.
604 # created.
605 if (deltainfo.snapshotdepth is not None and
605 if (deltainfo.snapshotdepth is not None and
606 (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
606 (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
607 return False
607 return False
608
608
609 # bad delta if new intermediate snapshot is larger than the previous
609 # bad delta if new intermediate snapshot is larger than the previous
610 # snapshot
610 # snapshot
611 if (deltainfo.snapshotdepth
611 if (deltainfo.snapshotdepth
612 and revlog.length(deltainfo.base) < deltainfo.deltalen):
612 and revlog.length(deltainfo.base) < deltainfo.deltalen):
613 return False
613 return False
614
614
615 return True
615 return True
616
616
617 # If a revision's full text is that much bigger than a base candidate full
617 # If a revision's full text is that much bigger than a base candidate full
618 # text's, it is very unlikely that it will produce a valid delta. We no longer
618 # text's, it is very unlikely that it will produce a valid delta. We no longer
619 # consider these candidates.
619 # consider these candidates.
620 LIMIT_BASE2TEXT = 500
620 LIMIT_BASE2TEXT = 500
621
621
622 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
622 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
623 """Provides group of revision to be tested as delta base
623 """Provides group of revision to be tested as delta base
624
624
625 This top level function focus on emitting groups with unique and worthwhile
625 This top level function focus on emitting groups with unique and worthwhile
626 content. See _raw_candidate_groups for details about the group order.
626 content. See _raw_candidate_groups for details about the group order.
627 """
627 """
628 # should we try to build a delta?
628 # should we try to build a delta?
629 if not (len(revlog) and revlog._storedeltachains):
629 if not (len(revlog) and revlog._storedeltachains):
630 yield None
630 yield None
631 return
631 return
632
632
633 deltalength = revlog.length
633 deltalength = revlog.length
634 deltaparent = revlog.deltaparent
634 deltaparent = revlog.deltaparent
635 sparse = revlog._sparserevlog
635 sparse = revlog._sparserevlog
636 good = None
636 good = None
637
637
638 deltas_limit = textlen * LIMIT_DELTA2TEXT
638 deltas_limit = textlen * LIMIT_DELTA2TEXT
639
639
640 tested = set([nullrev])
640 tested = set([nullrev])
641 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
641 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
642 while True:
642 while True:
643 temptative = candidates.send(good)
643 temptative = candidates.send(good)
644 if temptative is None:
644 if temptative is None:
645 break
645 break
646 group = []
646 group = []
647 for rev in temptative:
647 for rev in temptative:
648 # skip over empty delta (no need to include them in a chain)
648 # skip over empty delta (no need to include them in a chain)
649 while (revlog._generaldelta
649 while (revlog._generaldelta
650 and not (rev == nullrev
650 and not (rev == nullrev
651 or rev in tested
651 or rev in tested
652 or deltalength(rev))):
652 or deltalength(rev))):
653 tested.add(rev)
653 tested.add(rev)
654 rev = deltaparent(rev)
654 rev = deltaparent(rev)
655 # no need to try a delta against nullrev, this will be done as a
655 # no need to try a delta against nullrev, this will be done as a
656 # last resort.
656 # last resort.
657 if rev == nullrev:
657 if rev == nullrev:
658 continue
658 continue
659 # filter out revision we tested already
659 # filter out revision we tested already
660 if rev in tested:
660 if rev in tested:
661 continue
661 continue
662 tested.add(rev)
662 tested.add(rev)
663 # filter out delta base that will never produce good delta
663 # filter out delta base that will never produce good delta
664 if deltas_limit < revlog.length(rev):
664 if deltas_limit < revlog.length(rev):
665 continue
665 continue
666 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
666 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
667 continue
667 continue
668 # no delta for rawtext-changing revs (see "candelta" for why)
668 # no delta for rawtext-changing revs (see "candelta" for why)
669 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
669 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
670 continue
670 continue
671 # If we reach here, we are about to build and test a delta.
671 # If we reach here, we are about to build and test a delta.
672 # The delta building process will compute the chaininfo in all
672 # The delta building process will compute the chaininfo in all
673 # case, since that computation is cached, it is fine to access it
673 # case, since that computation is cached, it is fine to access it
674 # here too.
674 # here too.
675 chainlen, chainsize = revlog._chaininfo(rev)
675 chainlen, chainsize = revlog._chaininfo(rev)
676 # if chain will be too long, skip base
676 # if chain will be too long, skip base
677 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
677 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
678 continue
678 continue
679 # if chain already have too much data, skip base
679 # if chain already have too much data, skip base
680 if deltas_limit < chainsize:
680 if deltas_limit < chainsize:
681 continue
681 continue
682 group.append(rev)
682 group.append(rev)
683 if group:
683 if group:
684 # XXX: in the sparse revlog case, group can become large,
684 # XXX: in the sparse revlog case, group can become large,
685 # impacting performances. Some bounding or slicing mecanism
685 # impacting performances. Some bounding or slicing mecanism
686 # would help to reduce this impact.
686 # would help to reduce this impact.
687 good = yield tuple(group)
687 good = yield tuple(group)
688 yield None
688 yield None
689
689
690 def _findsnapshots(revlog, cache, start_rev):
690 def _findsnapshots(revlog, cache, start_rev):
691 """find snapshot from start_rev to tip"""
691 """find snapshot from start_rev to tip"""
692 if util.safehasattr(revlog.index, 'findsnapshots'):
692 if util.safehasattr(revlog.index, 'findsnapshots'):
693 revlog.index.findsnapshots(cache, start_rev)
693 revlog.index.findsnapshots(cache, start_rev)
694 else:
694 else:
695 deltaparent = revlog.deltaparent
695 deltaparent = revlog.deltaparent
696 issnapshot = revlog.issnapshot
696 issnapshot = revlog.issnapshot
697 for rev in revlog.revs(start_rev):
697 for rev in revlog.revs(start_rev):
698 if issnapshot(rev):
698 if issnapshot(rev):
699 cache[deltaparent(rev)].append(rev)
699 cache[deltaparent(rev)].append(rev)
700
700
701 def _refinedgroups(revlog, p1, p2, cachedelta):
701 def _refinedgroups(revlog, p1, p2, cachedelta):
702 good = None
702 good = None
703 # First we try to reuse a the delta contained in the bundle.
703 # First we try to reuse a the delta contained in the bundle.
704 # (or from the source revlog)
704 # (or from the source revlog)
705 #
705 #
706 # This logic only applies to general delta repositories and can be disabled
706 # This logic only applies to general delta repositories and can be disabled
707 # through configuration. Disabling reuse source delta is useful when
707 # through configuration. Disabling reuse source delta is useful when
708 # we want to make sure we recomputed "optimal" deltas.
708 # we want to make sure we recomputed "optimal" deltas.
709 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
709 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
710 # Assume what we received from the server is a good choice
710 # Assume what we received from the server is a good choice
711 # build delta will reuse the cache
711 # build delta will reuse the cache
712 good = yield (cachedelta[0],)
712 good = yield (cachedelta[0],)
713 if good is not None:
713 if good is not None:
714 yield None
714 yield None
715 return
715 return
716 snapshots = collections.defaultdict(list)
716 snapshots = collections.defaultdict(list)
717 for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
717 for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
718 good = yield candidates
718 good = yield candidates
719 if good is not None:
719 if good is not None:
720 break
720 break
721
721
722 # If sparse revlog is enabled, we can try to refine the available deltas
722 # If sparse revlog is enabled, we can try to refine the available deltas
723 if not revlog._sparserevlog:
723 if not revlog._sparserevlog:
724 yield None
724 yield None
725 return
725 return
726
726
727 # if we have a refinable value, try to refine it
727 # if we have a refinable value, try to refine it
728 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
728 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
729 # refine snapshot down
729 # refine snapshot down
730 previous = None
730 previous = None
731 while previous != good:
731 while previous != good:
732 previous = good
732 previous = good
733 base = revlog.deltaparent(good)
733 base = revlog.deltaparent(good)
734 if base == nullrev:
734 if base == nullrev:
735 break
735 break
736 good = yield (base,)
736 good = yield (base,)
737 # refine snapshot up
737 # refine snapshot up
738 if not snapshots:
738 if not snapshots:
739 _findsnapshots(revlog, snapshots, good + 1)
739 _findsnapshots(revlog, snapshots, good + 1)
740 previous = None
740 previous = None
741 while good != previous:
741 while good != previous:
742 previous = good
742 previous = good
743 children = tuple(sorted(c for c in snapshots[good]))
743 children = tuple(sorted(c for c in snapshots[good]))
744 good = yield children
744 good = yield children
745
745
746 # we have found nothing
746 # we have found nothing
747 yield None
747 yield None
748
748
749 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
749 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
750 """Provides group of revision to be tested as delta base
750 """Provides group of revision to be tested as delta base
751
751
752 This lower level function focus on emitting delta theorically interresting
752 This lower level function focus on emitting delta theorically interresting
753 without looking it any practical details.
753 without looking it any practical details.
754
754
755 The group order aims at providing fast or small candidates first.
755 The group order aims at providing fast or small candidates first.
756 """
756 """
757 gdelta = revlog._generaldelta
757 gdelta = revlog._generaldelta
758 # gate sparse behind general-delta because of issue6056
758 # gate sparse behind general-delta because of issue6056
759 sparse = gdelta and revlog._sparserevlog
759 sparse = gdelta and revlog._sparserevlog
760 curr = len(revlog)
760 curr = len(revlog)
761 prev = curr - 1
761 prev = curr - 1
762 deltachain = lambda rev: revlog._deltachain(rev)[0]
762 deltachain = lambda rev: revlog._deltachain(rev)[0]
763
763
764 if gdelta:
764 if gdelta:
765 # exclude already lazy tested base if any
765 # exclude already lazy tested base if any
766 parents = [p for p in (p1, p2) if p != nullrev]
766 parents = [p for p in (p1, p2) if p != nullrev]
767
767
768 if not revlog._deltabothparents and len(parents) == 2:
768 if not revlog._deltabothparents and len(parents) == 2:
769 parents.sort()
769 parents.sort()
770 # To minimize the chance of having to build a fulltext,
770 # To minimize the chance of having to build a fulltext,
771 # pick first whichever parent is closest to us (max rev)
771 # pick first whichever parent is closest to us (max rev)
772 yield (parents[1],)
772 yield (parents[1],)
773 # then the other one (min rev) if the first did not fit
773 # then the other one (min rev) if the first did not fit
774 yield (parents[0],)
774 yield (parents[0],)
775 elif len(parents) > 0:
775 elif len(parents) > 0:
776 # Test all parents (1 or 2), and keep the best candidate
776 # Test all parents (1 or 2), and keep the best candidate
777 yield parents
777 yield parents
778
778
779 if sparse and parents:
779 if sparse and parents:
780 if snapshots is None:
780 if snapshots is None:
781 # map: base-rev: snapshot-rev
781 # map: base-rev: snapshot-rev
782 snapshots = collections.defaultdict(list)
782 snapshots = collections.defaultdict(list)
783 # See if we can use an existing snapshot in the parent chains to use as
783 # See if we can use an existing snapshot in the parent chains to use as
784 # a base for a new intermediate-snapshot
784 # a base for a new intermediate-snapshot
785 #
785 #
786 # search for snapshot in parents delta chain
786 # search for snapshot in parents delta chain
787 # map: snapshot-level: snapshot-rev
787 # map: snapshot-level: snapshot-rev
788 parents_snaps = collections.defaultdict(set)
788 parents_snaps = collections.defaultdict(set)
789 candidate_chains = [deltachain(p) for p in parents]
789 candidate_chains = [deltachain(p) for p in parents]
790 for chain in candidate_chains:
790 for chain in candidate_chains:
791 for idx, s in enumerate(chain):
791 for idx, s in enumerate(chain):
792 if not revlog.issnapshot(s):
792 if not revlog.issnapshot(s):
793 break
793 break
794 parents_snaps[idx].add(s)
794 parents_snaps[idx].add(s)
795 snapfloor = min(parents_snaps[0]) + 1
795 snapfloor = min(parents_snaps[0]) + 1
796 _findsnapshots(revlog, snapshots, snapfloor)
796 _findsnapshots(revlog, snapshots, snapfloor)
797 # search for the highest "unrelated" revision
797 # search for the highest "unrelated" revision
798 #
798 #
799 # Adding snapshots used by "unrelated" revision increase the odd we
799 # Adding snapshots used by "unrelated" revision increase the odd we
800 # reuse an independant, yet better snapshot chain.
800 # reuse an independant, yet better snapshot chain.
801 #
801 #
802 # XXX instead of building a set of revisions, we could lazily enumerate
802 # XXX instead of building a set of revisions, we could lazily enumerate
803 # over the chains. That would be more efficient, however we stick to
803 # over the chains. That would be more efficient, however we stick to
804 # simple code for now.
804 # simple code for now.
805 all_revs = set()
805 all_revs = set()
806 for chain in candidate_chains:
806 for chain in candidate_chains:
807 all_revs.update(chain)
807 all_revs.update(chain)
808 other = None
808 other = None
809 for r in revlog.revs(prev, snapfloor):
809 for r in revlog.revs(prev, snapfloor):
810 if r not in all_revs:
810 if r not in all_revs:
811 other = r
811 other = r
812 break
812 break
813 if other is not None:
813 if other is not None:
814 # To avoid unfair competition, we won't use unrelated intermediate
814 # To avoid unfair competition, we won't use unrelated intermediate
815 # snapshot that are deeper than the ones from the parent delta
815 # snapshot that are deeper than the ones from the parent delta
816 # chain.
816 # chain.
817 max_depth = max(parents_snaps.keys())
817 max_depth = max(parents_snaps.keys())
818 chain = deltachain(other)
818 chain = deltachain(other)
819 for idx, s in enumerate(chain):
819 for idx, s in enumerate(chain):
820 if s < snapfloor:
820 if s < snapfloor:
821 continue
821 continue
822 if max_depth < idx:
822 if max_depth < idx:
823 break
823 break
824 if not revlog.issnapshot(s):
824 if not revlog.issnapshot(s):
825 break
825 break
826 parents_snaps[idx].add(s)
826 parents_snaps[idx].add(s)
827 # Test them as possible intermediate snapshot base
827 # Test them as possible intermediate snapshot base
828 # We test them from highest to lowest level. High level one are more
828 # We test them from highest to lowest level. High level one are more
829 # likely to result in small delta
829 # likely to result in small delta
830 floor = None
830 floor = None
831 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
831 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
832 siblings = set()
832 siblings = set()
833 for s in snaps:
833 for s in snaps:
834 siblings.update(snapshots[s])
834 siblings.update(snapshots[s])
835 # Before considering making a new intermediate snapshot, we check
835 # Before considering making a new intermediate snapshot, we check
836 # if an existing snapshot, children of base we consider, would be
836 # if an existing snapshot, children of base we consider, would be
837 # suitable.
837 # suitable.
838 #
838 #
839 # It give a change to reuse a delta chain "unrelated" to the
839 # It give a change to reuse a delta chain "unrelated" to the
840 # current revision instead of starting our own. Without such
840 # current revision instead of starting our own. Without such
841 # re-use, topological branches would keep reopening new chains.
841 # re-use, topological branches would keep reopening new chains.
842 # Creating more and more snapshot as the repository grow.
842 # Creating more and more snapshot as the repository grow.
843
843
844 if floor is not None:
844 if floor is not None:
845 # We only do this for siblings created after the one in our
845 # We only do this for siblings created after the one in our
846 # parent's delta chain. Those created before has less chances
846 # parent's delta chain. Those created before has less chances
847 # to be valid base since our ancestors had to create a new
847 # to be valid base since our ancestors had to create a new
848 # snapshot.
848 # snapshot.
849 siblings = [r for r in siblings if floor < r]
849 siblings = [r for r in siblings if floor < r]
850 yield tuple(sorted(siblings))
850 yield tuple(sorted(siblings))
851 # then test the base from our parent's delta chain.
851 # then test the base from our parent's delta chain.
852 yield tuple(sorted(snaps))
852 yield tuple(sorted(snaps))
853 floor = min(snaps)
853 floor = min(snaps)
854 # No suitable base found in the parent chain, search if any full
854 # No suitable base found in the parent chain, search if any full
855 # snapshots emitted since parent's base would be a suitable base for an
855 # snapshots emitted since parent's base would be a suitable base for an
856 # intermediate snapshot.
856 # intermediate snapshot.
857 #
857 #
858 # It give a chance to reuse a delta chain unrelated to the current
858 # It give a chance to reuse a delta chain unrelated to the current
859 # revisions instead of starting our own. Without such re-use,
859 # revisions instead of starting our own. Without such re-use,
860 # topological branches would keep reopening new full chains. Creating
860 # topological branches would keep reopening new full chains. Creating
861 # more and more snapshot as the repository grow.
861 # more and more snapshot as the repository grow.
862 yield tuple(snapshots[nullrev])
862 yield tuple(snapshots[nullrev])
863
863
864 if not sparse:
864 if not sparse:
865 # other approach failed try against prev to hopefully save us a
865 # other approach failed try against prev to hopefully save us a
866 # fulltext.
866 # fulltext.
867 yield (prev,)
867 yield (prev,)
868
868
869 class deltacomputer(object):
869 class deltacomputer(object):
870 def __init__(self, revlog):
870 def __init__(self, revlog):
871 self.revlog = revlog
871 self.revlog = revlog
872
872
873 def buildtext(self, revinfo, fh):
873 def buildtext(self, revinfo, fh):
874 """Builds a fulltext version of a revision
874 """Builds a fulltext version of a revision
875
875
876 revinfo: _revisioninfo instance that contains all needed info
876 revinfo: _revisioninfo instance that contains all needed info
877 fh: file handle to either the .i or the .d revlog file,
877 fh: file handle to either the .i or the .d revlog file,
878 depending on whether it is inlined or not
878 depending on whether it is inlined or not
879 """
879 """
880 btext = revinfo.btext
880 btext = revinfo.btext
881 if btext[0] is not None:
881 if btext[0] is not None:
882 return btext[0]
882 return btext[0]
883
883
884 revlog = self.revlog
884 revlog = self.revlog
885 cachedelta = revinfo.cachedelta
885 cachedelta = revinfo.cachedelta
886 baserev = cachedelta[0]
886 baserev = cachedelta[0]
887 delta = cachedelta[1]
887 delta = cachedelta[1]
888
888
889 fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
889 fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
890 revinfo.p1, revinfo.p2,
890 revinfo.p1, revinfo.p2,
891 revinfo.flags, revinfo.node)
891 revinfo.flags, revinfo.node)
892 return fulltext
892 return fulltext
893
893
894 def _builddeltadiff(self, base, revinfo, fh):
894 def _builddeltadiff(self, base, revinfo, fh):
895 revlog = self.revlog
895 revlog = self.revlog
896 t = self.buildtext(revinfo, fh)
896 t = self.buildtext(revinfo, fh)
897 if revlog.iscensored(base):
897 if revlog.iscensored(base):
898 # deltas based on a censored revision must replace the
898 # deltas based on a censored revision must replace the
899 # full content in one patch, so delta works everywhere
899 # full content in one patch, so delta works everywhere
900 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
900 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
901 delta = header + t
901 delta = header + t
902 else:
902 else:
903 ptext = revlog.revision(base, _df=fh, raw=True)
903 ptext = revlog.revision(base, _df=fh, raw=True)
904 delta = mdiff.textdiff(ptext, t)
904 delta = mdiff.textdiff(ptext, t)
905
905
906 return delta
906 return delta
907
907
908 def _builddeltainfo(self, revinfo, base, fh):
908 def _builddeltainfo(self, revinfo, base, fh):
909 # can we use the cached delta?
909 # can we use the cached delta?
910 delta = None
910 delta = None
911 if revinfo.cachedelta:
911 if revinfo.cachedelta:
912 cachebase, cachediff = revinfo.cachedelta
912 cachebase, cachediff = revinfo.cachedelta
913 #check if the diff still apply
913 #check if the diff still apply
914 currentbase = cachebase
914 currentbase = cachebase
915 while (currentbase != nullrev
915 while (currentbase != nullrev
916 and currentbase != base
916 and currentbase != base
917 and self.revlog.length(currentbase) == 0):
917 and self.revlog.length(currentbase) == 0):
918 currentbase = self.revlog.deltaparent(currentbase)
918 currentbase = self.revlog.deltaparent(currentbase)
919 if currentbase == base:
919 if self.revlog._lazydelta and currentbase == base:
920 delta = revinfo.cachedelta[1]
920 delta = revinfo.cachedelta[1]
921 if delta is None:
921 if delta is None:
922 delta = self._builddeltadiff(base, revinfo, fh)
922 delta = self._builddeltadiff(base, revinfo, fh)
923 revlog = self.revlog
923 revlog = self.revlog
924 header, data = revlog.compress(delta)
924 header, data = revlog.compress(delta)
925 deltalen = len(header) + len(data)
925 deltalen = len(header) + len(data)
926 chainbase = revlog.chainbase(base)
926 chainbase = revlog.chainbase(base)
927 offset = revlog.end(len(revlog) - 1)
927 offset = revlog.end(len(revlog) - 1)
928 dist = deltalen + offset - revlog.start(chainbase)
928 dist = deltalen + offset - revlog.start(chainbase)
929 if revlog._generaldelta:
929 if revlog._generaldelta:
930 deltabase = base
930 deltabase = base
931 else:
931 else:
932 deltabase = chainbase
932 deltabase = chainbase
933 chainlen, compresseddeltalen = revlog._chaininfo(base)
933 chainlen, compresseddeltalen = revlog._chaininfo(base)
934 chainlen += 1
934 chainlen += 1
935 compresseddeltalen += deltalen
935 compresseddeltalen += deltalen
936
936
937 revlog = self.revlog
937 revlog = self.revlog
938 snapshotdepth = None
938 snapshotdepth = None
939 if deltabase == nullrev:
939 if deltabase == nullrev:
940 snapshotdepth = 0
940 snapshotdepth = 0
941 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
941 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
942 # A delta chain should always be one full snapshot,
942 # A delta chain should always be one full snapshot,
943 # zero or more semi-snapshots, and zero or more deltas
943 # zero or more semi-snapshots, and zero or more deltas
944 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
944 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
945 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
945 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
946 snapshotdepth = len(revlog._deltachain(deltabase)[0])
946 snapshotdepth = len(revlog._deltachain(deltabase)[0])
947
947
948 return _deltainfo(dist, deltalen, (header, data), deltabase,
948 return _deltainfo(dist, deltalen, (header, data), deltabase,
949 chainbase, chainlen, compresseddeltalen,
949 chainbase, chainlen, compresseddeltalen,
950 snapshotdepth)
950 snapshotdepth)
951
951
952 def _fullsnapshotinfo(self, fh, revinfo):
952 def _fullsnapshotinfo(self, fh, revinfo):
953 curr = len(self.revlog)
953 curr = len(self.revlog)
954 rawtext = self.buildtext(revinfo, fh)
954 rawtext = self.buildtext(revinfo, fh)
955 data = self.revlog.compress(rawtext)
955 data = self.revlog.compress(rawtext)
956 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
956 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
957 deltabase = chainbase = curr
957 deltabase = chainbase = curr
958 snapshotdepth = 0
958 snapshotdepth = 0
959 chainlen = 1
959 chainlen = 1
960
960
961 return _deltainfo(dist, deltalen, data, deltabase,
961 return _deltainfo(dist, deltalen, data, deltabase,
962 chainbase, chainlen, compresseddeltalen,
962 chainbase, chainlen, compresseddeltalen,
963 snapshotdepth)
963 snapshotdepth)
964
964
965 def finddeltainfo(self, revinfo, fh):
965 def finddeltainfo(self, revinfo, fh):
966 """Find an acceptable delta against a candidate revision
966 """Find an acceptable delta against a candidate revision
967
967
968 revinfo: information about the revision (instance of _revisioninfo)
968 revinfo: information about the revision (instance of _revisioninfo)
969 fh: file handle to either the .i or the .d revlog file,
969 fh: file handle to either the .i or the .d revlog file,
970 depending on whether it is inlined or not
970 depending on whether it is inlined or not
971
971
972 Returns the first acceptable candidate revision, as ordered by
972 Returns the first acceptable candidate revision, as ordered by
973 _candidategroups
973 _candidategroups
974
974
975 If no suitable deltabase is found, we return delta info for a full
975 If no suitable deltabase is found, we return delta info for a full
976 snapshot.
976 snapshot.
977 """
977 """
978 if not revinfo.textlen:
978 if not revinfo.textlen:
979 return self._fullsnapshotinfo(fh, revinfo)
979 return self._fullsnapshotinfo(fh, revinfo)
980
980
981 # no delta for flag processor revision (see "candelta" for why)
981 # no delta for flag processor revision (see "candelta" for why)
982 # not calling candelta since only one revision needs test, also to
982 # not calling candelta since only one revision needs test, also to
983 # avoid overhead fetching flags again.
983 # avoid overhead fetching flags again.
984 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
984 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
985 return self._fullsnapshotinfo(fh, revinfo)
985 return self._fullsnapshotinfo(fh, revinfo)
986
986
987 cachedelta = revinfo.cachedelta
987 cachedelta = revinfo.cachedelta
988 p1 = revinfo.p1
988 p1 = revinfo.p1
989 p2 = revinfo.p2
989 p2 = revinfo.p2
990 revlog = self.revlog
990 revlog = self.revlog
991
991
992 deltainfo = None
992 deltainfo = None
993 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
993 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
994 groups = _candidategroups(self.revlog, revinfo.textlen,
994 groups = _candidategroups(self.revlog, revinfo.textlen,
995 p1r, p2r, cachedelta)
995 p1r, p2r, cachedelta)
996 candidaterevs = next(groups)
996 candidaterevs = next(groups)
997 while candidaterevs is not None:
997 while candidaterevs is not None:
998 nominateddeltas = []
998 nominateddeltas = []
999 if deltainfo is not None:
999 if deltainfo is not None:
1000 # if we already found a good delta,
1000 # if we already found a good delta,
1001 # challenge it against refined candidates
1001 # challenge it against refined candidates
1002 nominateddeltas.append(deltainfo)
1002 nominateddeltas.append(deltainfo)
1003 for candidaterev in candidaterevs:
1003 for candidaterev in candidaterevs:
1004 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1004 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1005 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1005 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1006 nominateddeltas.append(candidatedelta)
1006 nominateddeltas.append(candidatedelta)
1007 if nominateddeltas:
1007 if nominateddeltas:
1008 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1008 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1009 if deltainfo is not None:
1009 if deltainfo is not None:
1010 candidaterevs = groups.send(deltainfo.base)
1010 candidaterevs = groups.send(deltainfo.base)
1011 else:
1011 else:
1012 candidaterevs = next(groups)
1012 candidaterevs = next(groups)
1013
1013
1014 if deltainfo is None:
1014 if deltainfo is None:
1015 deltainfo = self._fullsnapshotinfo(fh, revinfo)
1015 deltainfo = self._fullsnapshotinfo(fh, revinfo)
1016 return deltainfo
1016 return deltainfo
@@ -1,147 +1,147 b''
1 ====================================
1 ====================================
2 Test delta choice with sparse revlog
2 Test delta choice with sparse revlog
3 ====================================
3 ====================================
4
4
5 Sparse-revlog usually shows the most gain on Manifest. However, it is simpler
5 Sparse-revlog usually shows the most gain on Manifest. However, it is simpler
6 to general an appropriate file, so we test with a single file instead. The
6 to general an appropriate file, so we test with a single file instead. The
7 goal is to observe intermediate snapshot being created.
7 goal is to observe intermediate snapshot being created.
8
8
9 We need a large enough file. Part of the content needs to be replaced
9 We need a large enough file. Part of the content needs to be replaced
10 repeatedly while some of it changes rarely.
10 repeatedly while some of it changes rarely.
11
11
12 $ bundlepath="$TESTDIR/artifacts/cache/big-file-churn.hg"
12 $ bundlepath="$TESTDIR/artifacts/cache/big-file-churn.hg"
13
13
14 $ expectedhash=`cat "$bundlepath".md5`
14 $ expectedhash=`cat "$bundlepath".md5`
15
15
16 #if slow
16 #if slow
17
17
18 $ if [ ! -f "$bundlepath" ]; then
18 $ if [ ! -f "$bundlepath" ]; then
19 > "$TESTDIR"/artifacts/scripts/generate-churning-bundle.py > /dev/null
19 > "$TESTDIR"/artifacts/scripts/generate-churning-bundle.py > /dev/null
20 > fi
20 > fi
21
21
22 #else
22 #else
23
23
24 $ if [ ! -f "$bundlepath" ]; then
24 $ if [ ! -f "$bundlepath" ]; then
25 > echo 'skipped: missing artifact, run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
25 > echo 'skipped: missing artifact, run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
26 > exit 80
26 > exit 80
27 > fi
27 > fi
28
28
29 #endif
29 #endif
30
30
31 $ currenthash=`f -M "$bundlepath" | cut -d = -f 2`
31 $ currenthash=`f -M "$bundlepath" | cut -d = -f 2`
32 $ if [ "$currenthash" != "$expectedhash" ]; then
32 $ if [ "$currenthash" != "$expectedhash" ]; then
33 > echo 'skipped: outdated artifact, md5 "'"$currenthash"'" expected "'"$expectedhash"'" run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
33 > echo 'skipped: outdated artifact, md5 "'"$currenthash"'" expected "'"$expectedhash"'" run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
34 > exit 80
34 > exit 80
35 > fi
35 > fi
36
36
37 $ cat >> $HGRCPATH << EOF
37 $ cat >> $HGRCPATH << EOF
38 > [format]
38 > [format]
39 > sparse-revlog = yes
39 > sparse-revlog = yes
40 > maxchainlen = 15
40 > maxchainlen = 15
41 > [storage]
41 > [storage]
42 > revlog.optimize-delta-parent-choice = yes
42 > revlog.optimize-delta-parent-choice = yes
43 > revlog.reuse-external-delta-parent = no
43 > revlog.reuse-external-delta = no
44 > EOF
44 > EOF
45 $ hg init sparse-repo
45 $ hg init sparse-repo
46 $ cd sparse-repo
46 $ cd sparse-repo
47 $ hg unbundle $bundlepath
47 $ hg unbundle $bundlepath
48 adding changesets
48 adding changesets
49 adding manifests
49 adding manifests
50 adding file changes
50 adding file changes
51 added 5001 changesets with 5001 changes to 1 files (+89 heads)
51 added 5001 changesets with 5001 changes to 1 files (+89 heads)
52 new changesets 9706f5af64f4:d9032adc8114 (5001 drafts)
52 new changesets 9706f5af64f4:d9032adc8114 (5001 drafts)
53 (run 'hg heads' to see heads, 'hg merge' to merge)
53 (run 'hg heads' to see heads, 'hg merge' to merge)
54 $ hg up
54 $ hg up
55 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 updated to "d9032adc8114: commit #5000"
56 updated to "d9032adc8114: commit #5000"
57 89 other heads for branch "default"
57 89 other heads for branch "default"
58
58
59 $ hg log --stat -r 0:3
59 $ hg log --stat -r 0:3
60 changeset: 0:9706f5af64f4
60 changeset: 0:9706f5af64f4
61 user: test
61 user: test
62 date: Thu Jan 01 00:00:00 1970 +0000
62 date: Thu Jan 01 00:00:00 1970 +0000
63 summary: initial commit
63 summary: initial commit
64
64
65 SPARSE-REVLOG-TEST-FILE | 10500 ++++++++++++++++++++++++++++++++++++++++++++++
65 SPARSE-REVLOG-TEST-FILE | 10500 ++++++++++++++++++++++++++++++++++++++++++++++
66 1 files changed, 10500 insertions(+), 0 deletions(-)
66 1 files changed, 10500 insertions(+), 0 deletions(-)
67
67
68 changeset: 1:724907deaa5e
68 changeset: 1:724907deaa5e
69 user: test
69 user: test
70 date: Thu Jan 01 00:00:00 1970 +0000
70 date: Thu Jan 01 00:00:00 1970 +0000
71 summary: commit #1
71 summary: commit #1
72
72
73 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
73 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
74 1 files changed, 534 insertions(+), 534 deletions(-)
74 1 files changed, 534 insertions(+), 534 deletions(-)
75
75
76 changeset: 2:62c41bce3e5d
76 changeset: 2:62c41bce3e5d
77 user: test
77 user: test
78 date: Thu Jan 01 00:00:00 1970 +0000
78 date: Thu Jan 01 00:00:00 1970 +0000
79 summary: commit #2
79 summary: commit #2
80
80
81 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
81 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
82 1 files changed, 534 insertions(+), 534 deletions(-)
82 1 files changed, 534 insertions(+), 534 deletions(-)
83
83
84 changeset: 3:348a9cbd6959
84 changeset: 3:348a9cbd6959
85 user: test
85 user: test
86 date: Thu Jan 01 00:00:00 1970 +0000
86 date: Thu Jan 01 00:00:00 1970 +0000
87 summary: commit #3
87 summary: commit #3
88
88
89 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
89 SPARSE-REVLOG-TEST-FILE | 1068 +++++++++++++++++++++++-----------------------
90 1 files changed, 534 insertions(+), 534 deletions(-)
90 1 files changed, 534 insertions(+), 534 deletions(-)
91
91
92
92
93 $ f -s .hg/store/data/*.d
93 $ f -s .hg/store/data/*.d
94 .hg/store/data/_s_p_a_r_s_e-_r_e_v_l_o_g-_t_e_s_t-_f_i_l_e.d: size=63327412
94 .hg/store/data/_s_p_a_r_s_e-_r_e_v_l_o_g-_t_e_s_t-_f_i_l_e.d: size=63327412
95 $ hg debugrevlog *
95 $ hg debugrevlog *
96 format : 1
96 format : 1
97 flags : generaldelta
97 flags : generaldelta
98
98
99 revisions : 5001
99 revisions : 5001
100 merges : 625 (12.50%)
100 merges : 625 (12.50%)
101 normal : 4376 (87.50%)
101 normal : 4376 (87.50%)
102 revisions : 5001
102 revisions : 5001
103 empty : 0 ( 0.00%)
103 empty : 0 ( 0.00%)
104 text : 0 (100.00%)
104 text : 0 (100.00%)
105 delta : 0 (100.00%)
105 delta : 0 (100.00%)
106 snapshot : 383 ( 7.66%)
106 snapshot : 383 ( 7.66%)
107 lvl-0 : 3 ( 0.06%)
107 lvl-0 : 3 ( 0.06%)
108 lvl-1 : 20 ( 0.40%)
108 lvl-1 : 20 ( 0.40%)
109 lvl-2 : 68 ( 1.36%)
109 lvl-2 : 68 ( 1.36%)
110 lvl-3 : 112 ( 2.24%)
110 lvl-3 : 112 ( 2.24%)
111 lvl-4 : 180 ( 3.60%)
111 lvl-4 : 180 ( 3.60%)
112 deltas : 4618 (92.34%)
112 deltas : 4618 (92.34%)
113 revision size : 63327412
113 revision size : 63327412
114 snapshot : 9886710 (15.61%)
114 snapshot : 9886710 (15.61%)
115 lvl-0 : 603104 ( 0.95%)
115 lvl-0 : 603104 ( 0.95%)
116 lvl-1 : 1559991 ( 2.46%)
116 lvl-1 : 1559991 ( 2.46%)
117 lvl-2 : 2295592 ( 3.62%)
117 lvl-2 : 2295592 ( 3.62%)
118 lvl-3 : 2531199 ( 4.00%)
118 lvl-3 : 2531199 ( 4.00%)
119 lvl-4 : 2896824 ( 4.57%)
119 lvl-4 : 2896824 ( 4.57%)
120 deltas : 53440702 (84.39%)
120 deltas : 53440702 (84.39%)
121
121
122 chunks : 5001
122 chunks : 5001
123 0x78 (x) : 5001 (100.00%)
123 0x78 (x) : 5001 (100.00%)
124 chunks size : 63327412
124 chunks size : 63327412
125 0x78 (x) : 63327412 (100.00%)
125 0x78 (x) : 63327412 (100.00%)
126
126
127 avg chain length : 9
127 avg chain length : 9
128 max chain length : 15
128 max chain length : 15
129 max chain reach : 28248745
129 max chain reach : 28248745
130 compression ratio : 27
130 compression ratio : 27
131
131
132 uncompressed data size (min/max/avg) : 346468 / 346472 / 346471
132 uncompressed data size (min/max/avg) : 346468 / 346472 / 346471
133 full revision size (min/max/avg) : 201008 / 201050 / 201034
133 full revision size (min/max/avg) : 201008 / 201050 / 201034
134 inter-snapshot size (min/max/avg) : 11596 / 168150 / 24430
134 inter-snapshot size (min/max/avg) : 11596 / 168150 / 24430
135 level-1 (min/max/avg) : 16653 / 168150 / 77999
135 level-1 (min/max/avg) : 16653 / 168150 / 77999
136 level-2 (min/max/avg) : 12951 / 85595 / 33758
136 level-2 (min/max/avg) : 12951 / 85595 / 33758
137 level-3 (min/max/avg) : 11608 / 43029 / 22599
137 level-3 (min/max/avg) : 11608 / 43029 / 22599
138 level-4 (min/max/avg) : 11596 / 21632 / 16093
138 level-4 (min/max/avg) : 11596 / 21632 / 16093
139 delta size (min/max/avg) : 10649 / 107163 / 11572
139 delta size (min/max/avg) : 10649 / 107163 / 11572
140
140
141 deltas against prev : 3910 (84.67%)
141 deltas against prev : 3910 (84.67%)
142 where prev = p1 : 3910 (100.00%)
142 where prev = p1 : 3910 (100.00%)
143 where prev = p2 : 0 ( 0.00%)
143 where prev = p2 : 0 ( 0.00%)
144 other : 0 ( 0.00%)
144 other : 0 ( 0.00%)
145 deltas against p1 : 648 (14.03%)
145 deltas against p1 : 648 (14.03%)
146 deltas against p2 : 60 ( 1.30%)
146 deltas against p2 : 60 ( 1.30%)
147 deltas against other : 0 ( 0.00%)
147 deltas against other : 0 ( 0.00%)
General Comments 0
You need to be logged in to leave comments. Login now