##// END OF EJS Templates
sparse-read: target density of 50% instead of 25%...
Paul Morelle -
r38651:eb850471 default
parent child Browse files
Show More
@@ -1,1367 +1,1367
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 coreconfigitem('alias', '.*',
116 coreconfigitem('alias', '.*',
117 default=dynamicdefault,
117 default=dynamicdefault,
118 generic=True,
118 generic=True,
119 )
119 )
120 coreconfigitem('annotate', 'nodates',
120 coreconfigitem('annotate', 'nodates',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'showfunc',
123 coreconfigitem('annotate', 'showfunc',
124 default=False,
124 default=False,
125 )
125 )
126 coreconfigitem('annotate', 'unified',
126 coreconfigitem('annotate', 'unified',
127 default=None,
127 default=None,
128 )
128 )
129 coreconfigitem('annotate', 'git',
129 coreconfigitem('annotate', 'git',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorews',
132 coreconfigitem('annotate', 'ignorews',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignorewsamount',
135 coreconfigitem('annotate', 'ignorewsamount',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignoreblanklines',
138 coreconfigitem('annotate', 'ignoreblanklines',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'ignorewseol',
141 coreconfigitem('annotate', 'ignorewseol',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'nobinary',
144 coreconfigitem('annotate', 'nobinary',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('annotate', 'noprefix',
147 coreconfigitem('annotate', 'noprefix',
148 default=False,
148 default=False,
149 )
149 )
150 coreconfigitem('annotate', 'word-diff',
150 coreconfigitem('annotate', 'word-diff',
151 default=False,
151 default=False,
152 )
152 )
153 coreconfigitem('auth', 'cookiefile',
153 coreconfigitem('auth', 'cookiefile',
154 default=None,
154 default=None,
155 )
155 )
156 # bookmarks.pushing: internal hack for discovery
156 # bookmarks.pushing: internal hack for discovery
157 coreconfigitem('bookmarks', 'pushing',
157 coreconfigitem('bookmarks', 'pushing',
158 default=list,
158 default=list,
159 )
159 )
160 # bundle.mainreporoot: internal hack for bundlerepo
160 # bundle.mainreporoot: internal hack for bundlerepo
161 coreconfigitem('bundle', 'mainreporoot',
161 coreconfigitem('bundle', 'mainreporoot',
162 default='',
162 default='',
163 )
163 )
164 # bundle.reorder: experimental config
164 # bundle.reorder: experimental config
165 coreconfigitem('bundle', 'reorder',
165 coreconfigitem('bundle', 'reorder',
166 default='auto',
166 default='auto',
167 )
167 )
168 coreconfigitem('censor', 'policy',
168 coreconfigitem('censor', 'policy',
169 default='abort',
169 default='abort',
170 )
170 )
171 coreconfigitem('chgserver', 'idletimeout',
171 coreconfigitem('chgserver', 'idletimeout',
172 default=3600,
172 default=3600,
173 )
173 )
174 coreconfigitem('chgserver', 'skiphash',
174 coreconfigitem('chgserver', 'skiphash',
175 default=False,
175 default=False,
176 )
176 )
177 coreconfigitem('cmdserver', 'log',
177 coreconfigitem('cmdserver', 'log',
178 default=None,
178 default=None,
179 )
179 )
180 coreconfigitem('color', '.*',
180 coreconfigitem('color', '.*',
181 default=None,
181 default=None,
182 generic=True,
182 generic=True,
183 )
183 )
184 coreconfigitem('color', 'mode',
184 coreconfigitem('color', 'mode',
185 default='auto',
185 default='auto',
186 )
186 )
187 coreconfigitem('color', 'pagermode',
187 coreconfigitem('color', 'pagermode',
188 default=dynamicdefault,
188 default=dynamicdefault,
189 )
189 )
190 coreconfigitem('commands', 'show.aliasprefix',
190 coreconfigitem('commands', 'show.aliasprefix',
191 default=list,
191 default=list,
192 )
192 )
193 coreconfigitem('commands', 'status.relative',
193 coreconfigitem('commands', 'status.relative',
194 default=False,
194 default=False,
195 )
195 )
196 coreconfigitem('commands', 'status.skipstates',
196 coreconfigitem('commands', 'status.skipstates',
197 default=[],
197 default=[],
198 )
198 )
199 coreconfigitem('commands', 'status.terse',
199 coreconfigitem('commands', 'status.terse',
200 default='',
200 default='',
201 )
201 )
202 coreconfigitem('commands', 'status.verbose',
202 coreconfigitem('commands', 'status.verbose',
203 default=False,
203 default=False,
204 )
204 )
205 coreconfigitem('commands', 'update.check',
205 coreconfigitem('commands', 'update.check',
206 default=None,
206 default=None,
207 )
207 )
208 coreconfigitem('commands', 'update.requiredest',
208 coreconfigitem('commands', 'update.requiredest',
209 default=False,
209 default=False,
210 )
210 )
211 coreconfigitem('committemplate', '.*',
211 coreconfigitem('committemplate', '.*',
212 default=None,
212 default=None,
213 generic=True,
213 generic=True,
214 )
214 )
215 coreconfigitem('convert', 'bzr.saverev',
215 coreconfigitem('convert', 'bzr.saverev',
216 default=True,
216 default=True,
217 )
217 )
218 coreconfigitem('convert', 'cvsps.cache',
218 coreconfigitem('convert', 'cvsps.cache',
219 default=True,
219 default=True,
220 )
220 )
221 coreconfigitem('convert', 'cvsps.fuzz',
221 coreconfigitem('convert', 'cvsps.fuzz',
222 default=60,
222 default=60,
223 )
223 )
224 coreconfigitem('convert', 'cvsps.logencoding',
224 coreconfigitem('convert', 'cvsps.logencoding',
225 default=None,
225 default=None,
226 )
226 )
227 coreconfigitem('convert', 'cvsps.mergefrom',
227 coreconfigitem('convert', 'cvsps.mergefrom',
228 default=None,
228 default=None,
229 )
229 )
230 coreconfigitem('convert', 'cvsps.mergeto',
230 coreconfigitem('convert', 'cvsps.mergeto',
231 default=None,
231 default=None,
232 )
232 )
233 coreconfigitem('convert', 'git.committeractions',
233 coreconfigitem('convert', 'git.committeractions',
234 default=lambda: ['messagedifferent'],
234 default=lambda: ['messagedifferent'],
235 )
235 )
236 coreconfigitem('convert', 'git.extrakeys',
236 coreconfigitem('convert', 'git.extrakeys',
237 default=list,
237 default=list,
238 )
238 )
239 coreconfigitem('convert', 'git.findcopiesharder',
239 coreconfigitem('convert', 'git.findcopiesharder',
240 default=False,
240 default=False,
241 )
241 )
242 coreconfigitem('convert', 'git.remoteprefix',
242 coreconfigitem('convert', 'git.remoteprefix',
243 default='remote',
243 default='remote',
244 )
244 )
245 coreconfigitem('convert', 'git.renamelimit',
245 coreconfigitem('convert', 'git.renamelimit',
246 default=400,
246 default=400,
247 )
247 )
248 coreconfigitem('convert', 'git.saverev',
248 coreconfigitem('convert', 'git.saverev',
249 default=True,
249 default=True,
250 )
250 )
251 coreconfigitem('convert', 'git.similarity',
251 coreconfigitem('convert', 'git.similarity',
252 default=50,
252 default=50,
253 )
253 )
254 coreconfigitem('convert', 'git.skipsubmodules',
254 coreconfigitem('convert', 'git.skipsubmodules',
255 default=False,
255 default=False,
256 )
256 )
257 coreconfigitem('convert', 'hg.clonebranches',
257 coreconfigitem('convert', 'hg.clonebranches',
258 default=False,
258 default=False,
259 )
259 )
260 coreconfigitem('convert', 'hg.ignoreerrors',
260 coreconfigitem('convert', 'hg.ignoreerrors',
261 default=False,
261 default=False,
262 )
262 )
263 coreconfigitem('convert', 'hg.revs',
263 coreconfigitem('convert', 'hg.revs',
264 default=None,
264 default=None,
265 )
265 )
266 coreconfigitem('convert', 'hg.saverev',
266 coreconfigitem('convert', 'hg.saverev',
267 default=False,
267 default=False,
268 )
268 )
269 coreconfigitem('convert', 'hg.sourcename',
269 coreconfigitem('convert', 'hg.sourcename',
270 default=None,
270 default=None,
271 )
271 )
272 coreconfigitem('convert', 'hg.startrev',
272 coreconfigitem('convert', 'hg.startrev',
273 default=None,
273 default=None,
274 )
274 )
275 coreconfigitem('convert', 'hg.tagsbranch',
275 coreconfigitem('convert', 'hg.tagsbranch',
276 default='default',
276 default='default',
277 )
277 )
278 coreconfigitem('convert', 'hg.usebranchnames',
278 coreconfigitem('convert', 'hg.usebranchnames',
279 default=True,
279 default=True,
280 )
280 )
281 coreconfigitem('convert', 'ignoreancestorcheck',
281 coreconfigitem('convert', 'ignoreancestorcheck',
282 default=False,
282 default=False,
283 )
283 )
284 coreconfigitem('convert', 'localtimezone',
284 coreconfigitem('convert', 'localtimezone',
285 default=False,
285 default=False,
286 )
286 )
287 coreconfigitem('convert', 'p4.encoding',
287 coreconfigitem('convert', 'p4.encoding',
288 default=dynamicdefault,
288 default=dynamicdefault,
289 )
289 )
290 coreconfigitem('convert', 'p4.startrev',
290 coreconfigitem('convert', 'p4.startrev',
291 default=0,
291 default=0,
292 )
292 )
293 coreconfigitem('convert', 'skiptags',
293 coreconfigitem('convert', 'skiptags',
294 default=False,
294 default=False,
295 )
295 )
296 coreconfigitem('convert', 'svn.debugsvnlog',
296 coreconfigitem('convert', 'svn.debugsvnlog',
297 default=True,
297 default=True,
298 )
298 )
299 coreconfigitem('convert', 'svn.trunk',
299 coreconfigitem('convert', 'svn.trunk',
300 default=None,
300 default=None,
301 )
301 )
302 coreconfigitem('convert', 'svn.tags',
302 coreconfigitem('convert', 'svn.tags',
303 default=None,
303 default=None,
304 )
304 )
305 coreconfigitem('convert', 'svn.branches',
305 coreconfigitem('convert', 'svn.branches',
306 default=None,
306 default=None,
307 )
307 )
308 coreconfigitem('convert', 'svn.startrev',
308 coreconfigitem('convert', 'svn.startrev',
309 default=0,
309 default=0,
310 )
310 )
311 coreconfigitem('debug', 'dirstate.delaywrite',
311 coreconfigitem('debug', 'dirstate.delaywrite',
312 default=0,
312 default=0,
313 )
313 )
314 coreconfigitem('defaults', '.*',
314 coreconfigitem('defaults', '.*',
315 default=None,
315 default=None,
316 generic=True,
316 generic=True,
317 )
317 )
318 coreconfigitem('devel', 'all-warnings',
318 coreconfigitem('devel', 'all-warnings',
319 default=False,
319 default=False,
320 )
320 )
321 coreconfigitem('devel', 'bundle2.debug',
321 coreconfigitem('devel', 'bundle2.debug',
322 default=False,
322 default=False,
323 )
323 )
324 coreconfigitem('devel', 'cache-vfs',
324 coreconfigitem('devel', 'cache-vfs',
325 default=None,
325 default=None,
326 )
326 )
327 coreconfigitem('devel', 'check-locks',
327 coreconfigitem('devel', 'check-locks',
328 default=False,
328 default=False,
329 )
329 )
330 coreconfigitem('devel', 'check-relroot',
330 coreconfigitem('devel', 'check-relroot',
331 default=False,
331 default=False,
332 )
332 )
333 coreconfigitem('devel', 'default-date',
333 coreconfigitem('devel', 'default-date',
334 default=None,
334 default=None,
335 )
335 )
336 coreconfigitem('devel', 'deprec-warn',
336 coreconfigitem('devel', 'deprec-warn',
337 default=False,
337 default=False,
338 )
338 )
339 coreconfigitem('devel', 'disableloaddefaultcerts',
339 coreconfigitem('devel', 'disableloaddefaultcerts',
340 default=False,
340 default=False,
341 )
341 )
342 coreconfigitem('devel', 'warn-empty-changegroup',
342 coreconfigitem('devel', 'warn-empty-changegroup',
343 default=False,
343 default=False,
344 )
344 )
345 coreconfigitem('devel', 'legacy.exchange',
345 coreconfigitem('devel', 'legacy.exchange',
346 default=list,
346 default=list,
347 )
347 )
348 coreconfigitem('devel', 'servercafile',
348 coreconfigitem('devel', 'servercafile',
349 default='',
349 default='',
350 )
350 )
351 coreconfigitem('devel', 'serverexactprotocol',
351 coreconfigitem('devel', 'serverexactprotocol',
352 default='',
352 default='',
353 )
353 )
354 coreconfigitem('devel', 'serverrequirecert',
354 coreconfigitem('devel', 'serverrequirecert',
355 default=False,
355 default=False,
356 )
356 )
357 coreconfigitem('devel', 'strip-obsmarkers',
357 coreconfigitem('devel', 'strip-obsmarkers',
358 default=True,
358 default=True,
359 )
359 )
360 coreconfigitem('devel', 'warn-config',
360 coreconfigitem('devel', 'warn-config',
361 default=None,
361 default=None,
362 )
362 )
363 coreconfigitem('devel', 'warn-config-default',
363 coreconfigitem('devel', 'warn-config-default',
364 default=None,
364 default=None,
365 )
365 )
366 coreconfigitem('devel', 'user.obsmarker',
366 coreconfigitem('devel', 'user.obsmarker',
367 default=None,
367 default=None,
368 )
368 )
369 coreconfigitem('devel', 'warn-config-unknown',
369 coreconfigitem('devel', 'warn-config-unknown',
370 default=None,
370 default=None,
371 )
371 )
372 coreconfigitem('devel', 'debug.peer-request',
372 coreconfigitem('devel', 'debug.peer-request',
373 default=False,
373 default=False,
374 )
374 )
375 coreconfigitem('diff', 'nodates',
375 coreconfigitem('diff', 'nodates',
376 default=False,
376 default=False,
377 )
377 )
378 coreconfigitem('diff', 'showfunc',
378 coreconfigitem('diff', 'showfunc',
379 default=False,
379 default=False,
380 )
380 )
381 coreconfigitem('diff', 'unified',
381 coreconfigitem('diff', 'unified',
382 default=None,
382 default=None,
383 )
383 )
384 coreconfigitem('diff', 'git',
384 coreconfigitem('diff', 'git',
385 default=False,
385 default=False,
386 )
386 )
387 coreconfigitem('diff', 'ignorews',
387 coreconfigitem('diff', 'ignorews',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem('diff', 'ignorewsamount',
390 coreconfigitem('diff', 'ignorewsamount',
391 default=False,
391 default=False,
392 )
392 )
393 coreconfigitem('diff', 'ignoreblanklines',
393 coreconfigitem('diff', 'ignoreblanklines',
394 default=False,
394 default=False,
395 )
395 )
396 coreconfigitem('diff', 'ignorewseol',
396 coreconfigitem('diff', 'ignorewseol',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem('diff', 'nobinary',
399 coreconfigitem('diff', 'nobinary',
400 default=False,
400 default=False,
401 )
401 )
402 coreconfigitem('diff', 'noprefix',
402 coreconfigitem('diff', 'noprefix',
403 default=False,
403 default=False,
404 )
404 )
405 coreconfigitem('diff', 'word-diff',
405 coreconfigitem('diff', 'word-diff',
406 default=False,
406 default=False,
407 )
407 )
408 coreconfigitem('email', 'bcc',
408 coreconfigitem('email', 'bcc',
409 default=None,
409 default=None,
410 )
410 )
411 coreconfigitem('email', 'cc',
411 coreconfigitem('email', 'cc',
412 default=None,
412 default=None,
413 )
413 )
414 coreconfigitem('email', 'charsets',
414 coreconfigitem('email', 'charsets',
415 default=list,
415 default=list,
416 )
416 )
417 coreconfigitem('email', 'from',
417 coreconfigitem('email', 'from',
418 default=None,
418 default=None,
419 )
419 )
420 coreconfigitem('email', 'method',
420 coreconfigitem('email', 'method',
421 default='smtp',
421 default='smtp',
422 )
422 )
423 coreconfigitem('email', 'reply-to',
423 coreconfigitem('email', 'reply-to',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem('email', 'to',
426 coreconfigitem('email', 'to',
427 default=None,
427 default=None,
428 )
428 )
429 coreconfigitem('experimental', 'archivemetatemplate',
429 coreconfigitem('experimental', 'archivemetatemplate',
430 default=dynamicdefault,
430 default=dynamicdefault,
431 )
431 )
432 coreconfigitem('experimental', 'bundle-phases',
432 coreconfigitem('experimental', 'bundle-phases',
433 default=False,
433 default=False,
434 )
434 )
435 coreconfigitem('experimental', 'bundle2-advertise',
435 coreconfigitem('experimental', 'bundle2-advertise',
436 default=True,
436 default=True,
437 )
437 )
438 coreconfigitem('experimental', 'bundle2-output-capture',
438 coreconfigitem('experimental', 'bundle2-output-capture',
439 default=False,
439 default=False,
440 )
440 )
441 coreconfigitem('experimental', 'bundle2.pushback',
441 coreconfigitem('experimental', 'bundle2.pushback',
442 default=False,
442 default=False,
443 )
443 )
444 coreconfigitem('experimental', 'bundle2.stream',
444 coreconfigitem('experimental', 'bundle2.stream',
445 default=False,
445 default=False,
446 )
446 )
447 coreconfigitem('experimental', 'bundle2lazylocking',
447 coreconfigitem('experimental', 'bundle2lazylocking',
448 default=False,
448 default=False,
449 )
449 )
450 coreconfigitem('experimental', 'bundlecomplevel',
450 coreconfigitem('experimental', 'bundlecomplevel',
451 default=None,
451 default=None,
452 )
452 )
453 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
453 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
454 default=None,
454 default=None,
455 )
455 )
456 coreconfigitem('experimental', 'bundlecomplevel.gzip',
456 coreconfigitem('experimental', 'bundlecomplevel.gzip',
457 default=None,
457 default=None,
458 )
458 )
459 coreconfigitem('experimental', 'bundlecomplevel.none',
459 coreconfigitem('experimental', 'bundlecomplevel.none',
460 default=None,
460 default=None,
461 )
461 )
462 coreconfigitem('experimental', 'bundlecomplevel.zstd',
462 coreconfigitem('experimental', 'bundlecomplevel.zstd',
463 default=None,
463 default=None,
464 )
464 )
465 coreconfigitem('experimental', 'changegroup3',
465 coreconfigitem('experimental', 'changegroup3',
466 default=False,
466 default=False,
467 )
467 )
468 coreconfigitem('experimental', 'clientcompressionengines',
468 coreconfigitem('experimental', 'clientcompressionengines',
469 default=list,
469 default=list,
470 )
470 )
471 coreconfigitem('experimental', 'copytrace',
471 coreconfigitem('experimental', 'copytrace',
472 default='on',
472 default='on',
473 )
473 )
474 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
474 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
475 default=100,
475 default=100,
476 )
476 )
477 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
477 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
478 default=100,
478 default=100,
479 )
479 )
480 coreconfigitem('experimental', 'crecordtest',
480 coreconfigitem('experimental', 'crecordtest',
481 default=None,
481 default=None,
482 )
482 )
483 coreconfigitem('experimental', 'directaccess',
483 coreconfigitem('experimental', 'directaccess',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem('experimental', 'directaccess.revnums',
486 coreconfigitem('experimental', 'directaccess.revnums',
487 default=False,
487 default=False,
488 )
488 )
489 coreconfigitem('experimental', 'editortmpinhg',
489 coreconfigitem('experimental', 'editortmpinhg',
490 default=False,
490 default=False,
491 )
491 )
492 coreconfigitem('experimental', 'evolution',
492 coreconfigitem('experimental', 'evolution',
493 default=list,
493 default=list,
494 )
494 )
495 coreconfigitem('experimental', 'evolution.allowdivergence',
495 coreconfigitem('experimental', 'evolution.allowdivergence',
496 default=False,
496 default=False,
497 alias=[('experimental', 'allowdivergence')]
497 alias=[('experimental', 'allowdivergence')]
498 )
498 )
499 coreconfigitem('experimental', 'evolution.allowunstable',
499 coreconfigitem('experimental', 'evolution.allowunstable',
500 default=None,
500 default=None,
501 )
501 )
502 coreconfigitem('experimental', 'evolution.createmarkers',
502 coreconfigitem('experimental', 'evolution.createmarkers',
503 default=None,
503 default=None,
504 )
504 )
505 coreconfigitem('experimental', 'evolution.effect-flags',
505 coreconfigitem('experimental', 'evolution.effect-flags',
506 default=True,
506 default=True,
507 alias=[('experimental', 'effect-flags')]
507 alias=[('experimental', 'effect-flags')]
508 )
508 )
509 coreconfigitem('experimental', 'evolution.exchange',
509 coreconfigitem('experimental', 'evolution.exchange',
510 default=None,
510 default=None,
511 )
511 )
512 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
512 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
513 default=False,
513 default=False,
514 )
514 )
515 coreconfigitem('experimental', 'evolution.report-instabilities',
515 coreconfigitem('experimental', 'evolution.report-instabilities',
516 default=True,
516 default=True,
517 )
517 )
518 coreconfigitem('experimental', 'evolution.track-operation',
518 coreconfigitem('experimental', 'evolution.track-operation',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem('experimental', 'maxdeltachainspan',
521 coreconfigitem('experimental', 'maxdeltachainspan',
522 default=-1,
522 default=-1,
523 )
523 )
524 coreconfigitem('experimental', 'mergetempdirprefix',
524 coreconfigitem('experimental', 'mergetempdirprefix',
525 default=None,
525 default=None,
526 )
526 )
527 coreconfigitem('experimental', 'mmapindexthreshold',
527 coreconfigitem('experimental', 'mmapindexthreshold',
528 default=None,
528 default=None,
529 )
529 )
530 coreconfigitem('experimental', 'nonnormalparanoidcheck',
530 coreconfigitem('experimental', 'nonnormalparanoidcheck',
531 default=False,
531 default=False,
532 )
532 )
533 coreconfigitem('experimental', 'exportableenviron',
533 coreconfigitem('experimental', 'exportableenviron',
534 default=list,
534 default=list,
535 )
535 )
536 coreconfigitem('experimental', 'extendedheader.index',
536 coreconfigitem('experimental', 'extendedheader.index',
537 default=None,
537 default=None,
538 )
538 )
539 coreconfigitem('experimental', 'extendedheader.similarity',
539 coreconfigitem('experimental', 'extendedheader.similarity',
540 default=False,
540 default=False,
541 )
541 )
542 coreconfigitem('experimental', 'format.compression',
542 coreconfigitem('experimental', 'format.compression',
543 default='zlib',
543 default='zlib',
544 )
544 )
545 coreconfigitem('experimental', 'graphshorten',
545 coreconfigitem('experimental', 'graphshorten',
546 default=False,
546 default=False,
547 )
547 )
548 coreconfigitem('experimental', 'graphstyle.parent',
548 coreconfigitem('experimental', 'graphstyle.parent',
549 default=dynamicdefault,
549 default=dynamicdefault,
550 )
550 )
551 coreconfigitem('experimental', 'graphstyle.missing',
551 coreconfigitem('experimental', 'graphstyle.missing',
552 default=dynamicdefault,
552 default=dynamicdefault,
553 )
553 )
554 coreconfigitem('experimental', 'graphstyle.grandparent',
554 coreconfigitem('experimental', 'graphstyle.grandparent',
555 default=dynamicdefault,
555 default=dynamicdefault,
556 )
556 )
557 coreconfigitem('experimental', 'hook-track-tags',
557 coreconfigitem('experimental', 'hook-track-tags',
558 default=False,
558 default=False,
559 )
559 )
560 coreconfigitem('experimental', 'httppeer.advertise-v2',
560 coreconfigitem('experimental', 'httppeer.advertise-v2',
561 default=False,
561 default=False,
562 )
562 )
563 coreconfigitem('experimental', 'httppostargs',
563 coreconfigitem('experimental', 'httppostargs',
564 default=False,
564 default=False,
565 )
565 )
566 coreconfigitem('experimental', 'mergedriver',
566 coreconfigitem('experimental', 'mergedriver',
567 default=None,
567 default=None,
568 )
568 )
569 coreconfigitem('experimental', 'nointerrupt', default=False)
569 coreconfigitem('experimental', 'nointerrupt', default=False)
570 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
570 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
571
571
572 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
572 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
573 default=False,
573 default=False,
574 )
574 )
575 coreconfigitem('experimental', 'remotenames',
575 coreconfigitem('experimental', 'remotenames',
576 default=False,
576 default=False,
577 )
577 )
578 coreconfigitem('experimental', 'removeemptydirs',
578 coreconfigitem('experimental', 'removeemptydirs',
579 default=True,
579 default=True,
580 )
580 )
581 coreconfigitem('experimental', 'revlogv2',
581 coreconfigitem('experimental', 'revlogv2',
582 default=None,
582 default=None,
583 )
583 )
584 coreconfigitem('experimental', 'single-head-per-branch',
584 coreconfigitem('experimental', 'single-head-per-branch',
585 default=False,
585 default=False,
586 )
586 )
587 coreconfigitem('experimental', 'sshserver.support-v2',
587 coreconfigitem('experimental', 'sshserver.support-v2',
588 default=False,
588 default=False,
589 )
589 )
590 coreconfigitem('experimental', 'spacemovesdown',
590 coreconfigitem('experimental', 'spacemovesdown',
591 default=False,
591 default=False,
592 )
592 )
593 coreconfigitem('experimental', 'sparse-read',
593 coreconfigitem('experimental', 'sparse-read',
594 default=False,
594 default=False,
595 )
595 )
596 coreconfigitem('experimental', 'sparse-read.density-threshold',
596 coreconfigitem('experimental', 'sparse-read.density-threshold',
597 default=0.25,
597 default=0.50,
598 )
598 )
599 coreconfigitem('experimental', 'sparse-read.min-gap-size',
599 coreconfigitem('experimental', 'sparse-read.min-gap-size',
600 default='256K',
600 default='256K',
601 )
601 )
602 coreconfigitem('experimental', 'treemanifest',
602 coreconfigitem('experimental', 'treemanifest',
603 default=False,
603 default=False,
604 )
604 )
605 coreconfigitem('experimental', 'update.atomic-file',
605 coreconfigitem('experimental', 'update.atomic-file',
606 default=False,
606 default=False,
607 )
607 )
608 coreconfigitem('experimental', 'sshpeer.advertise-v2',
608 coreconfigitem('experimental', 'sshpeer.advertise-v2',
609 default=False,
609 default=False,
610 )
610 )
611 coreconfigitem('experimental', 'web.apiserver',
611 coreconfigitem('experimental', 'web.apiserver',
612 default=False,
612 default=False,
613 )
613 )
614 coreconfigitem('experimental', 'web.api.http-v2',
614 coreconfigitem('experimental', 'web.api.http-v2',
615 default=False,
615 default=False,
616 )
616 )
617 coreconfigitem('experimental', 'web.api.debugreflect',
617 coreconfigitem('experimental', 'web.api.debugreflect',
618 default=False,
618 default=False,
619 )
619 )
620 coreconfigitem('experimental', 'xdiff',
620 coreconfigitem('experimental', 'xdiff',
621 default=False,
621 default=False,
622 )
622 )
623 coreconfigitem('extensions', '.*',
623 coreconfigitem('extensions', '.*',
624 default=None,
624 default=None,
625 generic=True,
625 generic=True,
626 )
626 )
627 coreconfigitem('extdata', '.*',
627 coreconfigitem('extdata', '.*',
628 default=None,
628 default=None,
629 generic=True,
629 generic=True,
630 )
630 )
631 coreconfigitem('format', 'aggressivemergedeltas',
631 coreconfigitem('format', 'aggressivemergedeltas',
632 default=True,
632 default=True,
633 )
633 )
634 coreconfigitem('format', 'chunkcachesize',
634 coreconfigitem('format', 'chunkcachesize',
635 default=None,
635 default=None,
636 )
636 )
637 coreconfigitem('format', 'dotencode',
637 coreconfigitem('format', 'dotencode',
638 default=True,
638 default=True,
639 )
639 )
640 coreconfigitem('format', 'generaldelta',
640 coreconfigitem('format', 'generaldelta',
641 default=False,
641 default=False,
642 )
642 )
643 coreconfigitem('format', 'manifestcachesize',
643 coreconfigitem('format', 'manifestcachesize',
644 default=None,
644 default=None,
645 )
645 )
646 coreconfigitem('format', 'maxchainlen',
646 coreconfigitem('format', 'maxchainlen',
647 default=None,
647 default=None,
648 )
648 )
649 coreconfigitem('format', 'obsstore-version',
649 coreconfigitem('format', 'obsstore-version',
650 default=None,
650 default=None,
651 )
651 )
652 coreconfigitem('format', 'usefncache',
652 coreconfigitem('format', 'usefncache',
653 default=True,
653 default=True,
654 )
654 )
655 coreconfigitem('format', 'usegeneraldelta',
655 coreconfigitem('format', 'usegeneraldelta',
656 default=True,
656 default=True,
657 )
657 )
658 coreconfigitem('format', 'usestore',
658 coreconfigitem('format', 'usestore',
659 default=True,
659 default=True,
660 )
660 )
661 coreconfigitem('fsmonitor', 'warn_when_unused',
661 coreconfigitem('fsmonitor', 'warn_when_unused',
662 default=True,
662 default=True,
663 )
663 )
664 coreconfigitem('fsmonitor', 'warn_update_file_count',
664 coreconfigitem('fsmonitor', 'warn_update_file_count',
665 default=50000,
665 default=50000,
666 )
666 )
667 coreconfigitem('hooks', '.*',
667 coreconfigitem('hooks', '.*',
668 default=dynamicdefault,
668 default=dynamicdefault,
669 generic=True,
669 generic=True,
670 )
670 )
671 coreconfigitem('hgweb-paths', '.*',
671 coreconfigitem('hgweb-paths', '.*',
672 default=list,
672 default=list,
673 generic=True,
673 generic=True,
674 )
674 )
675 coreconfigitem('hostfingerprints', '.*',
675 coreconfigitem('hostfingerprints', '.*',
676 default=list,
676 default=list,
677 generic=True,
677 generic=True,
678 )
678 )
679 coreconfigitem('hostsecurity', 'ciphers',
679 coreconfigitem('hostsecurity', 'ciphers',
680 default=None,
680 default=None,
681 )
681 )
682 coreconfigitem('hostsecurity', 'disabletls10warning',
682 coreconfigitem('hostsecurity', 'disabletls10warning',
683 default=False,
683 default=False,
684 )
684 )
685 coreconfigitem('hostsecurity', 'minimumprotocol',
685 coreconfigitem('hostsecurity', 'minimumprotocol',
686 default=dynamicdefault,
686 default=dynamicdefault,
687 )
687 )
688 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
688 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
689 default=dynamicdefault,
689 default=dynamicdefault,
690 generic=True,
690 generic=True,
691 )
691 )
692 coreconfigitem('hostsecurity', '.*:ciphers$',
692 coreconfigitem('hostsecurity', '.*:ciphers$',
693 default=dynamicdefault,
693 default=dynamicdefault,
694 generic=True,
694 generic=True,
695 )
695 )
696 coreconfigitem('hostsecurity', '.*:fingerprints$',
696 coreconfigitem('hostsecurity', '.*:fingerprints$',
697 default=list,
697 default=list,
698 generic=True,
698 generic=True,
699 )
699 )
700 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
700 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
701 default=None,
701 default=None,
702 generic=True,
702 generic=True,
703 )
703 )
704
704
705 coreconfigitem('http_proxy', 'always',
705 coreconfigitem('http_proxy', 'always',
706 default=False,
706 default=False,
707 )
707 )
708 coreconfigitem('http_proxy', 'host',
708 coreconfigitem('http_proxy', 'host',
709 default=None,
709 default=None,
710 )
710 )
711 coreconfigitem('http_proxy', 'no',
711 coreconfigitem('http_proxy', 'no',
712 default=list,
712 default=list,
713 )
713 )
714 coreconfigitem('http_proxy', 'passwd',
714 coreconfigitem('http_proxy', 'passwd',
715 default=None,
715 default=None,
716 )
716 )
717 coreconfigitem('http_proxy', 'user',
717 coreconfigitem('http_proxy', 'user',
718 default=None,
718 default=None,
719 )
719 )
720 coreconfigitem('logtoprocess', 'commandexception',
720 coreconfigitem('logtoprocess', 'commandexception',
721 default=None,
721 default=None,
722 )
722 )
723 coreconfigitem('logtoprocess', 'commandfinish',
723 coreconfigitem('logtoprocess', 'commandfinish',
724 default=None,
724 default=None,
725 )
725 )
726 coreconfigitem('logtoprocess', 'command',
726 coreconfigitem('logtoprocess', 'command',
727 default=None,
727 default=None,
728 )
728 )
729 coreconfigitem('logtoprocess', 'develwarn',
729 coreconfigitem('logtoprocess', 'develwarn',
730 default=None,
730 default=None,
731 )
731 )
732 coreconfigitem('logtoprocess', 'uiblocked',
732 coreconfigitem('logtoprocess', 'uiblocked',
733 default=None,
733 default=None,
734 )
734 )
735 coreconfigitem('merge', 'checkunknown',
735 coreconfigitem('merge', 'checkunknown',
736 default='abort',
736 default='abort',
737 )
737 )
738 coreconfigitem('merge', 'checkignored',
738 coreconfigitem('merge', 'checkignored',
739 default='abort',
739 default='abort',
740 )
740 )
741 coreconfigitem('experimental', 'merge.checkpathconflicts',
741 coreconfigitem('experimental', 'merge.checkpathconflicts',
742 default=False,
742 default=False,
743 )
743 )
744 coreconfigitem('merge', 'followcopies',
744 coreconfigitem('merge', 'followcopies',
745 default=True,
745 default=True,
746 )
746 )
747 coreconfigitem('merge', 'on-failure',
747 coreconfigitem('merge', 'on-failure',
748 default='continue',
748 default='continue',
749 )
749 )
750 coreconfigitem('merge', 'preferancestor',
750 coreconfigitem('merge', 'preferancestor',
751 default=lambda: ['*'],
751 default=lambda: ['*'],
752 )
752 )
753 coreconfigitem('merge-tools', '.*',
753 coreconfigitem('merge-tools', '.*',
754 default=None,
754 default=None,
755 generic=True,
755 generic=True,
756 )
756 )
757 coreconfigitem('merge-tools', br'.*\.args$',
757 coreconfigitem('merge-tools', br'.*\.args$',
758 default="$local $base $other",
758 default="$local $base $other",
759 generic=True,
759 generic=True,
760 priority=-1,
760 priority=-1,
761 )
761 )
762 coreconfigitem('merge-tools', br'.*\.binary$',
762 coreconfigitem('merge-tools', br'.*\.binary$',
763 default=False,
763 default=False,
764 generic=True,
764 generic=True,
765 priority=-1,
765 priority=-1,
766 )
766 )
767 coreconfigitem('merge-tools', br'.*\.check$',
767 coreconfigitem('merge-tools', br'.*\.check$',
768 default=list,
768 default=list,
769 generic=True,
769 generic=True,
770 priority=-1,
770 priority=-1,
771 )
771 )
772 coreconfigitem('merge-tools', br'.*\.checkchanged$',
772 coreconfigitem('merge-tools', br'.*\.checkchanged$',
773 default=False,
773 default=False,
774 generic=True,
774 generic=True,
775 priority=-1,
775 priority=-1,
776 )
776 )
777 coreconfigitem('merge-tools', br'.*\.executable$',
777 coreconfigitem('merge-tools', br'.*\.executable$',
778 default=dynamicdefault,
778 default=dynamicdefault,
779 generic=True,
779 generic=True,
780 priority=-1,
780 priority=-1,
781 )
781 )
782 coreconfigitem('merge-tools', br'.*\.fixeol$',
782 coreconfigitem('merge-tools', br'.*\.fixeol$',
783 default=False,
783 default=False,
784 generic=True,
784 generic=True,
785 priority=-1,
785 priority=-1,
786 )
786 )
787 coreconfigitem('merge-tools', br'.*\.gui$',
787 coreconfigitem('merge-tools', br'.*\.gui$',
788 default=False,
788 default=False,
789 generic=True,
789 generic=True,
790 priority=-1,
790 priority=-1,
791 )
791 )
792 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
792 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
793 default='basic',
793 default='basic',
794 generic=True,
794 generic=True,
795 priority=-1,
795 priority=-1,
796 )
796 )
797 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
797 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
798 default=dynamicdefault, # take from ui.mergemarkertemplate
798 default=dynamicdefault, # take from ui.mergemarkertemplate
799 generic=True,
799 generic=True,
800 priority=-1,
800 priority=-1,
801 )
801 )
802 coreconfigitem('merge-tools', br'.*\.priority$',
802 coreconfigitem('merge-tools', br'.*\.priority$',
803 default=0,
803 default=0,
804 generic=True,
804 generic=True,
805 priority=-1,
805 priority=-1,
806 )
806 )
807 coreconfigitem('merge-tools', br'.*\.premerge$',
807 coreconfigitem('merge-tools', br'.*\.premerge$',
808 default=dynamicdefault,
808 default=dynamicdefault,
809 generic=True,
809 generic=True,
810 priority=-1,
810 priority=-1,
811 )
811 )
812 coreconfigitem('merge-tools', br'.*\.symlink$',
812 coreconfigitem('merge-tools', br'.*\.symlink$',
813 default=False,
813 default=False,
814 generic=True,
814 generic=True,
815 priority=-1,
815 priority=-1,
816 )
816 )
817 coreconfigitem('pager', 'attend-.*',
817 coreconfigitem('pager', 'attend-.*',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 generic=True,
819 generic=True,
820 )
820 )
821 coreconfigitem('pager', 'ignore',
821 coreconfigitem('pager', 'ignore',
822 default=list,
822 default=list,
823 )
823 )
824 coreconfigitem('pager', 'pager',
824 coreconfigitem('pager', 'pager',
825 default=dynamicdefault,
825 default=dynamicdefault,
826 )
826 )
827 coreconfigitem('patch', 'eol',
827 coreconfigitem('patch', 'eol',
828 default='strict',
828 default='strict',
829 )
829 )
830 coreconfigitem('patch', 'fuzz',
830 coreconfigitem('patch', 'fuzz',
831 default=2,
831 default=2,
832 )
832 )
833 coreconfigitem('paths', 'default',
833 coreconfigitem('paths', 'default',
834 default=None,
834 default=None,
835 )
835 )
836 coreconfigitem('paths', 'default-push',
836 coreconfigitem('paths', 'default-push',
837 default=None,
837 default=None,
838 )
838 )
839 coreconfigitem('paths', '.*',
839 coreconfigitem('paths', '.*',
840 default=None,
840 default=None,
841 generic=True,
841 generic=True,
842 )
842 )
843 coreconfigitem('phases', 'checksubrepos',
843 coreconfigitem('phases', 'checksubrepos',
844 default='follow',
844 default='follow',
845 )
845 )
846 coreconfigitem('phases', 'new-commit',
846 coreconfigitem('phases', 'new-commit',
847 default='draft',
847 default='draft',
848 )
848 )
849 coreconfigitem('phases', 'publish',
849 coreconfigitem('phases', 'publish',
850 default=True,
850 default=True,
851 )
851 )
852 coreconfigitem('profiling', 'enabled',
852 coreconfigitem('profiling', 'enabled',
853 default=False,
853 default=False,
854 )
854 )
855 coreconfigitem('profiling', 'format',
855 coreconfigitem('profiling', 'format',
856 default='text',
856 default='text',
857 )
857 )
858 coreconfigitem('profiling', 'freq',
858 coreconfigitem('profiling', 'freq',
859 default=1000,
859 default=1000,
860 )
860 )
861 coreconfigitem('profiling', 'limit',
861 coreconfigitem('profiling', 'limit',
862 default=30,
862 default=30,
863 )
863 )
864 coreconfigitem('profiling', 'nested',
864 coreconfigitem('profiling', 'nested',
865 default=0,
865 default=0,
866 )
866 )
867 coreconfigitem('profiling', 'output',
867 coreconfigitem('profiling', 'output',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem('profiling', 'showmax',
870 coreconfigitem('profiling', 'showmax',
871 default=0.999,
871 default=0.999,
872 )
872 )
873 coreconfigitem('profiling', 'showmin',
873 coreconfigitem('profiling', 'showmin',
874 default=dynamicdefault,
874 default=dynamicdefault,
875 )
875 )
876 coreconfigitem('profiling', 'sort',
876 coreconfigitem('profiling', 'sort',
877 default='inlinetime',
877 default='inlinetime',
878 )
878 )
879 coreconfigitem('profiling', 'statformat',
879 coreconfigitem('profiling', 'statformat',
880 default='hotpath',
880 default='hotpath',
881 )
881 )
882 coreconfigitem('profiling', 'time-track',
882 coreconfigitem('profiling', 'time-track',
883 default='cpu',
883 default='cpu',
884 )
884 )
885 coreconfigitem('profiling', 'type',
885 coreconfigitem('profiling', 'type',
886 default='stat',
886 default='stat',
887 )
887 )
888 coreconfigitem('progress', 'assume-tty',
888 coreconfigitem('progress', 'assume-tty',
889 default=False,
889 default=False,
890 )
890 )
891 coreconfigitem('progress', 'changedelay',
891 coreconfigitem('progress', 'changedelay',
892 default=1,
892 default=1,
893 )
893 )
894 coreconfigitem('progress', 'clear-complete',
894 coreconfigitem('progress', 'clear-complete',
895 default=True,
895 default=True,
896 )
896 )
897 coreconfigitem('progress', 'debug',
897 coreconfigitem('progress', 'debug',
898 default=False,
898 default=False,
899 )
899 )
900 coreconfigitem('progress', 'delay',
900 coreconfigitem('progress', 'delay',
901 default=3,
901 default=3,
902 )
902 )
903 coreconfigitem('progress', 'disable',
903 coreconfigitem('progress', 'disable',
904 default=False,
904 default=False,
905 )
905 )
906 coreconfigitem('progress', 'estimateinterval',
906 coreconfigitem('progress', 'estimateinterval',
907 default=60.0,
907 default=60.0,
908 )
908 )
909 coreconfigitem('progress', 'format',
909 coreconfigitem('progress', 'format',
910 default=lambda: ['topic', 'bar', 'number', 'estimate'],
910 default=lambda: ['topic', 'bar', 'number', 'estimate'],
911 )
911 )
912 coreconfigitem('progress', 'refresh',
912 coreconfigitem('progress', 'refresh',
913 default=0.1,
913 default=0.1,
914 )
914 )
915 coreconfigitem('progress', 'width',
915 coreconfigitem('progress', 'width',
916 default=dynamicdefault,
916 default=dynamicdefault,
917 )
917 )
918 coreconfigitem('push', 'pushvars.server',
918 coreconfigitem('push', 'pushvars.server',
919 default=False,
919 default=False,
920 )
920 )
921 coreconfigitem('server', 'bookmarks-pushkey-compat',
921 coreconfigitem('server', 'bookmarks-pushkey-compat',
922 default=True,
922 default=True,
923 )
923 )
924 coreconfigitem('server', 'bundle1',
924 coreconfigitem('server', 'bundle1',
925 default=True,
925 default=True,
926 )
926 )
927 coreconfigitem('server', 'bundle1gd',
927 coreconfigitem('server', 'bundle1gd',
928 default=None,
928 default=None,
929 )
929 )
930 coreconfigitem('server', 'bundle1.pull',
930 coreconfigitem('server', 'bundle1.pull',
931 default=None,
931 default=None,
932 )
932 )
933 coreconfigitem('server', 'bundle1gd.pull',
933 coreconfigitem('server', 'bundle1gd.pull',
934 default=None,
934 default=None,
935 )
935 )
936 coreconfigitem('server', 'bundle1.push',
936 coreconfigitem('server', 'bundle1.push',
937 default=None,
937 default=None,
938 )
938 )
939 coreconfigitem('server', 'bundle1gd.push',
939 coreconfigitem('server', 'bundle1gd.push',
940 default=None,
940 default=None,
941 )
941 )
942 coreconfigitem('server', 'compressionengines',
942 coreconfigitem('server', 'compressionengines',
943 default=list,
943 default=list,
944 )
944 )
945 coreconfigitem('server', 'concurrent-push-mode',
945 coreconfigitem('server', 'concurrent-push-mode',
946 default='strict',
946 default='strict',
947 )
947 )
948 coreconfigitem('server', 'disablefullbundle',
948 coreconfigitem('server', 'disablefullbundle',
949 default=False,
949 default=False,
950 )
950 )
951 coreconfigitem('server', 'maxhttpheaderlen',
951 coreconfigitem('server', 'maxhttpheaderlen',
952 default=1024,
952 default=1024,
953 )
953 )
954 coreconfigitem('server', 'pullbundle',
954 coreconfigitem('server', 'pullbundle',
955 default=False,
955 default=False,
956 )
956 )
957 coreconfigitem('server', 'preferuncompressed',
957 coreconfigitem('server', 'preferuncompressed',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem('server', 'streamunbundle',
960 coreconfigitem('server', 'streamunbundle',
961 default=False,
961 default=False,
962 )
962 )
963 coreconfigitem('server', 'uncompressed',
963 coreconfigitem('server', 'uncompressed',
964 default=True,
964 default=True,
965 )
965 )
966 coreconfigitem('server', 'uncompressedallowsecret',
966 coreconfigitem('server', 'uncompressedallowsecret',
967 default=False,
967 default=False,
968 )
968 )
969 coreconfigitem('server', 'validate',
969 coreconfigitem('server', 'validate',
970 default=False,
970 default=False,
971 )
971 )
972 coreconfigitem('server', 'zliblevel',
972 coreconfigitem('server', 'zliblevel',
973 default=-1,
973 default=-1,
974 )
974 )
975 coreconfigitem('server', 'zstdlevel',
975 coreconfigitem('server', 'zstdlevel',
976 default=3,
976 default=3,
977 )
977 )
978 coreconfigitem('share', 'pool',
978 coreconfigitem('share', 'pool',
979 default=None,
979 default=None,
980 )
980 )
981 coreconfigitem('share', 'poolnaming',
981 coreconfigitem('share', 'poolnaming',
982 default='identity',
982 default='identity',
983 )
983 )
984 coreconfigitem('smtp', 'host',
984 coreconfigitem('smtp', 'host',
985 default=None,
985 default=None,
986 )
986 )
987 coreconfigitem('smtp', 'local_hostname',
987 coreconfigitem('smtp', 'local_hostname',
988 default=None,
988 default=None,
989 )
989 )
990 coreconfigitem('smtp', 'password',
990 coreconfigitem('smtp', 'password',
991 default=None,
991 default=None,
992 )
992 )
993 coreconfigitem('smtp', 'port',
993 coreconfigitem('smtp', 'port',
994 default=dynamicdefault,
994 default=dynamicdefault,
995 )
995 )
996 coreconfigitem('smtp', 'tls',
996 coreconfigitem('smtp', 'tls',
997 default='none',
997 default='none',
998 )
998 )
999 coreconfigitem('smtp', 'username',
999 coreconfigitem('smtp', 'username',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem('sparse', 'missingwarning',
1002 coreconfigitem('sparse', 'missingwarning',
1003 default=True,
1003 default=True,
1004 )
1004 )
1005 coreconfigitem('subrepos', 'allowed',
1005 coreconfigitem('subrepos', 'allowed',
1006 default=dynamicdefault, # to make backporting simpler
1006 default=dynamicdefault, # to make backporting simpler
1007 )
1007 )
1008 coreconfigitem('subrepos', 'hg:allowed',
1008 coreconfigitem('subrepos', 'hg:allowed',
1009 default=dynamicdefault,
1009 default=dynamicdefault,
1010 )
1010 )
1011 coreconfigitem('subrepos', 'git:allowed',
1011 coreconfigitem('subrepos', 'git:allowed',
1012 default=dynamicdefault,
1012 default=dynamicdefault,
1013 )
1013 )
1014 coreconfigitem('subrepos', 'svn:allowed',
1014 coreconfigitem('subrepos', 'svn:allowed',
1015 default=dynamicdefault,
1015 default=dynamicdefault,
1016 )
1016 )
1017 coreconfigitem('templates', '.*',
1017 coreconfigitem('templates', '.*',
1018 default=None,
1018 default=None,
1019 generic=True,
1019 generic=True,
1020 )
1020 )
1021 coreconfigitem('trusted', 'groups',
1021 coreconfigitem('trusted', 'groups',
1022 default=list,
1022 default=list,
1023 )
1023 )
1024 coreconfigitem('trusted', 'users',
1024 coreconfigitem('trusted', 'users',
1025 default=list,
1025 default=list,
1026 )
1026 )
1027 coreconfigitem('ui', '_usedassubrepo',
1027 coreconfigitem('ui', '_usedassubrepo',
1028 default=False,
1028 default=False,
1029 )
1029 )
1030 coreconfigitem('ui', 'allowemptycommit',
1030 coreconfigitem('ui', 'allowemptycommit',
1031 default=False,
1031 default=False,
1032 )
1032 )
1033 coreconfigitem('ui', 'archivemeta',
1033 coreconfigitem('ui', 'archivemeta',
1034 default=True,
1034 default=True,
1035 )
1035 )
1036 coreconfigitem('ui', 'askusername',
1036 coreconfigitem('ui', 'askusername',
1037 default=False,
1037 default=False,
1038 )
1038 )
1039 coreconfigitem('ui', 'clonebundlefallback',
1039 coreconfigitem('ui', 'clonebundlefallback',
1040 default=False,
1040 default=False,
1041 )
1041 )
1042 coreconfigitem('ui', 'clonebundleprefers',
1042 coreconfigitem('ui', 'clonebundleprefers',
1043 default=list,
1043 default=list,
1044 )
1044 )
1045 coreconfigitem('ui', 'clonebundles',
1045 coreconfigitem('ui', 'clonebundles',
1046 default=True,
1046 default=True,
1047 )
1047 )
1048 coreconfigitem('ui', 'color',
1048 coreconfigitem('ui', 'color',
1049 default='auto',
1049 default='auto',
1050 )
1050 )
1051 coreconfigitem('ui', 'commitsubrepos',
1051 coreconfigitem('ui', 'commitsubrepos',
1052 default=False,
1052 default=False,
1053 )
1053 )
1054 coreconfigitem('ui', 'debug',
1054 coreconfigitem('ui', 'debug',
1055 default=False,
1055 default=False,
1056 )
1056 )
1057 coreconfigitem('ui', 'debugger',
1057 coreconfigitem('ui', 'debugger',
1058 default=None,
1058 default=None,
1059 )
1059 )
1060 coreconfigitem('ui', 'editor',
1060 coreconfigitem('ui', 'editor',
1061 default=dynamicdefault,
1061 default=dynamicdefault,
1062 )
1062 )
1063 coreconfigitem('ui', 'fallbackencoding',
1063 coreconfigitem('ui', 'fallbackencoding',
1064 default=None,
1064 default=None,
1065 )
1065 )
1066 coreconfigitem('ui', 'forcecwd',
1066 coreconfigitem('ui', 'forcecwd',
1067 default=None,
1067 default=None,
1068 )
1068 )
1069 coreconfigitem('ui', 'forcemerge',
1069 coreconfigitem('ui', 'forcemerge',
1070 default=None,
1070 default=None,
1071 )
1071 )
1072 coreconfigitem('ui', 'formatdebug',
1072 coreconfigitem('ui', 'formatdebug',
1073 default=False,
1073 default=False,
1074 )
1074 )
1075 coreconfigitem('ui', 'formatjson',
1075 coreconfigitem('ui', 'formatjson',
1076 default=False,
1076 default=False,
1077 )
1077 )
1078 coreconfigitem('ui', 'formatted',
1078 coreconfigitem('ui', 'formatted',
1079 default=None,
1079 default=None,
1080 )
1080 )
1081 coreconfigitem('ui', 'graphnodetemplate',
1081 coreconfigitem('ui', 'graphnodetemplate',
1082 default=None,
1082 default=None,
1083 )
1083 )
1084 coreconfigitem('ui', 'interactive',
1084 coreconfigitem('ui', 'interactive',
1085 default=None,
1085 default=None,
1086 )
1086 )
1087 coreconfigitem('ui', 'interface',
1087 coreconfigitem('ui', 'interface',
1088 default=None,
1088 default=None,
1089 )
1089 )
1090 coreconfigitem('ui', 'interface.chunkselector',
1090 coreconfigitem('ui', 'interface.chunkselector',
1091 default=None,
1091 default=None,
1092 )
1092 )
1093 coreconfigitem('ui', 'large-file-limit',
1093 coreconfigitem('ui', 'large-file-limit',
1094 default=10000000,
1094 default=10000000,
1095 )
1095 )
1096 coreconfigitem('ui', 'logblockedtimes',
1096 coreconfigitem('ui', 'logblockedtimes',
1097 default=False,
1097 default=False,
1098 )
1098 )
1099 coreconfigitem('ui', 'logtemplate',
1099 coreconfigitem('ui', 'logtemplate',
1100 default=None,
1100 default=None,
1101 )
1101 )
1102 coreconfigitem('ui', 'merge',
1102 coreconfigitem('ui', 'merge',
1103 default=None,
1103 default=None,
1104 )
1104 )
1105 coreconfigitem('ui', 'mergemarkers',
1105 coreconfigitem('ui', 'mergemarkers',
1106 default='basic',
1106 default='basic',
1107 )
1107 )
1108 coreconfigitem('ui', 'mergemarkertemplate',
1108 coreconfigitem('ui', 'mergemarkertemplate',
1109 default=('{node|short} '
1109 default=('{node|short} '
1110 '{ifeq(tags, "tip", "", '
1110 '{ifeq(tags, "tip", "", '
1111 'ifeq(tags, "", "", "{tags} "))}'
1111 'ifeq(tags, "", "", "{tags} "))}'
1112 '{if(bookmarks, "{bookmarks} ")}'
1112 '{if(bookmarks, "{bookmarks} ")}'
1113 '{ifeq(branch, "default", "", "{branch} ")}'
1113 '{ifeq(branch, "default", "", "{branch} ")}'
1114 '- {author|user}: {desc|firstline}')
1114 '- {author|user}: {desc|firstline}')
1115 )
1115 )
1116 coreconfigitem('ui', 'nontty',
1116 coreconfigitem('ui', 'nontty',
1117 default=False,
1117 default=False,
1118 )
1118 )
1119 coreconfigitem('ui', 'origbackuppath',
1119 coreconfigitem('ui', 'origbackuppath',
1120 default=None,
1120 default=None,
1121 )
1121 )
1122 coreconfigitem('ui', 'paginate',
1122 coreconfigitem('ui', 'paginate',
1123 default=True,
1123 default=True,
1124 )
1124 )
1125 coreconfigitem('ui', 'patch',
1125 coreconfigitem('ui', 'patch',
1126 default=None,
1126 default=None,
1127 )
1127 )
1128 coreconfigitem('ui', 'portablefilenames',
1128 coreconfigitem('ui', 'portablefilenames',
1129 default='warn',
1129 default='warn',
1130 )
1130 )
1131 coreconfigitem('ui', 'promptecho',
1131 coreconfigitem('ui', 'promptecho',
1132 default=False,
1132 default=False,
1133 )
1133 )
1134 coreconfigitem('ui', 'quiet',
1134 coreconfigitem('ui', 'quiet',
1135 default=False,
1135 default=False,
1136 )
1136 )
1137 coreconfigitem('ui', 'quietbookmarkmove',
1137 coreconfigitem('ui', 'quietbookmarkmove',
1138 default=False,
1138 default=False,
1139 )
1139 )
1140 coreconfigitem('ui', 'remotecmd',
1140 coreconfigitem('ui', 'remotecmd',
1141 default='hg',
1141 default='hg',
1142 )
1142 )
1143 coreconfigitem('ui', 'report_untrusted',
1143 coreconfigitem('ui', 'report_untrusted',
1144 default=True,
1144 default=True,
1145 )
1145 )
1146 coreconfigitem('ui', 'rollback',
1146 coreconfigitem('ui', 'rollback',
1147 default=True,
1147 default=True,
1148 )
1148 )
1149 coreconfigitem('ui', 'signal-safe-lock',
1149 coreconfigitem('ui', 'signal-safe-lock',
1150 default=True,
1150 default=True,
1151 )
1151 )
1152 coreconfigitem('ui', 'slash',
1152 coreconfigitem('ui', 'slash',
1153 default=False,
1153 default=False,
1154 )
1154 )
1155 coreconfigitem('ui', 'ssh',
1155 coreconfigitem('ui', 'ssh',
1156 default='ssh',
1156 default='ssh',
1157 )
1157 )
1158 coreconfigitem('ui', 'ssherrorhint',
1158 coreconfigitem('ui', 'ssherrorhint',
1159 default=None,
1159 default=None,
1160 )
1160 )
1161 coreconfigitem('ui', 'statuscopies',
1161 coreconfigitem('ui', 'statuscopies',
1162 default=False,
1162 default=False,
1163 )
1163 )
1164 coreconfigitem('ui', 'strict',
1164 coreconfigitem('ui', 'strict',
1165 default=False,
1165 default=False,
1166 )
1166 )
1167 coreconfigitem('ui', 'style',
1167 coreconfigitem('ui', 'style',
1168 default='',
1168 default='',
1169 )
1169 )
1170 coreconfigitem('ui', 'supportcontact',
1170 coreconfigitem('ui', 'supportcontact',
1171 default=None,
1171 default=None,
1172 )
1172 )
1173 coreconfigitem('ui', 'textwidth',
1173 coreconfigitem('ui', 'textwidth',
1174 default=78,
1174 default=78,
1175 )
1175 )
1176 coreconfigitem('ui', 'timeout',
1176 coreconfigitem('ui', 'timeout',
1177 default='600',
1177 default='600',
1178 )
1178 )
1179 coreconfigitem('ui', 'timeout.warn',
1179 coreconfigitem('ui', 'timeout.warn',
1180 default=0,
1180 default=0,
1181 )
1181 )
1182 coreconfigitem('ui', 'traceback',
1182 coreconfigitem('ui', 'traceback',
1183 default=False,
1183 default=False,
1184 )
1184 )
1185 coreconfigitem('ui', 'tweakdefaults',
1185 coreconfigitem('ui', 'tweakdefaults',
1186 default=False,
1186 default=False,
1187 )
1187 )
1188 coreconfigitem('ui', 'username',
1188 coreconfigitem('ui', 'username',
1189 alias=[('ui', 'user')]
1189 alias=[('ui', 'user')]
1190 )
1190 )
1191 coreconfigitem('ui', 'verbose',
1191 coreconfigitem('ui', 'verbose',
1192 default=False,
1192 default=False,
1193 )
1193 )
1194 coreconfigitem('verify', 'skipflags',
1194 coreconfigitem('verify', 'skipflags',
1195 default=None,
1195 default=None,
1196 )
1196 )
1197 coreconfigitem('web', 'allowbz2',
1197 coreconfigitem('web', 'allowbz2',
1198 default=False,
1198 default=False,
1199 )
1199 )
1200 coreconfigitem('web', 'allowgz',
1200 coreconfigitem('web', 'allowgz',
1201 default=False,
1201 default=False,
1202 )
1202 )
1203 coreconfigitem('web', 'allow-pull',
1203 coreconfigitem('web', 'allow-pull',
1204 alias=[('web', 'allowpull')],
1204 alias=[('web', 'allowpull')],
1205 default=True,
1205 default=True,
1206 )
1206 )
1207 coreconfigitem('web', 'allow-push',
1207 coreconfigitem('web', 'allow-push',
1208 alias=[('web', 'allow_push')],
1208 alias=[('web', 'allow_push')],
1209 default=list,
1209 default=list,
1210 )
1210 )
1211 coreconfigitem('web', 'allowzip',
1211 coreconfigitem('web', 'allowzip',
1212 default=False,
1212 default=False,
1213 )
1213 )
1214 coreconfigitem('web', 'archivesubrepos',
1214 coreconfigitem('web', 'archivesubrepos',
1215 default=False,
1215 default=False,
1216 )
1216 )
1217 coreconfigitem('web', 'cache',
1217 coreconfigitem('web', 'cache',
1218 default=True,
1218 default=True,
1219 )
1219 )
1220 coreconfigitem('web', 'contact',
1220 coreconfigitem('web', 'contact',
1221 default=None,
1221 default=None,
1222 )
1222 )
1223 coreconfigitem('web', 'deny_push',
1223 coreconfigitem('web', 'deny_push',
1224 default=list,
1224 default=list,
1225 )
1225 )
1226 coreconfigitem('web', 'guessmime',
1226 coreconfigitem('web', 'guessmime',
1227 default=False,
1227 default=False,
1228 )
1228 )
1229 coreconfigitem('web', 'hidden',
1229 coreconfigitem('web', 'hidden',
1230 default=False,
1230 default=False,
1231 )
1231 )
1232 coreconfigitem('web', 'labels',
1232 coreconfigitem('web', 'labels',
1233 default=list,
1233 default=list,
1234 )
1234 )
1235 coreconfigitem('web', 'logoimg',
1235 coreconfigitem('web', 'logoimg',
1236 default='hglogo.png',
1236 default='hglogo.png',
1237 )
1237 )
1238 coreconfigitem('web', 'logourl',
1238 coreconfigitem('web', 'logourl',
1239 default='https://mercurial-scm.org/',
1239 default='https://mercurial-scm.org/',
1240 )
1240 )
1241 coreconfigitem('web', 'accesslog',
1241 coreconfigitem('web', 'accesslog',
1242 default='-',
1242 default='-',
1243 )
1243 )
1244 coreconfigitem('web', 'address',
1244 coreconfigitem('web', 'address',
1245 default='',
1245 default='',
1246 )
1246 )
1247 coreconfigitem('web', 'allow-archive',
1247 coreconfigitem('web', 'allow-archive',
1248 alias=[('web', 'allow_archive')],
1248 alias=[('web', 'allow_archive')],
1249 default=list,
1249 default=list,
1250 )
1250 )
1251 coreconfigitem('web', 'allow_read',
1251 coreconfigitem('web', 'allow_read',
1252 default=list,
1252 default=list,
1253 )
1253 )
1254 coreconfigitem('web', 'baseurl',
1254 coreconfigitem('web', 'baseurl',
1255 default=None,
1255 default=None,
1256 )
1256 )
1257 coreconfigitem('web', 'cacerts',
1257 coreconfigitem('web', 'cacerts',
1258 default=None,
1258 default=None,
1259 )
1259 )
1260 coreconfigitem('web', 'certificate',
1260 coreconfigitem('web', 'certificate',
1261 default=None,
1261 default=None,
1262 )
1262 )
1263 coreconfigitem('web', 'collapse',
1263 coreconfigitem('web', 'collapse',
1264 default=False,
1264 default=False,
1265 )
1265 )
1266 coreconfigitem('web', 'csp',
1266 coreconfigitem('web', 'csp',
1267 default=None,
1267 default=None,
1268 )
1268 )
1269 coreconfigitem('web', 'deny_read',
1269 coreconfigitem('web', 'deny_read',
1270 default=list,
1270 default=list,
1271 )
1271 )
1272 coreconfigitem('web', 'descend',
1272 coreconfigitem('web', 'descend',
1273 default=True,
1273 default=True,
1274 )
1274 )
1275 coreconfigitem('web', 'description',
1275 coreconfigitem('web', 'description',
1276 default="",
1276 default="",
1277 )
1277 )
1278 coreconfigitem('web', 'encoding',
1278 coreconfigitem('web', 'encoding',
1279 default=lambda: encoding.encoding,
1279 default=lambda: encoding.encoding,
1280 )
1280 )
1281 coreconfigitem('web', 'errorlog',
1281 coreconfigitem('web', 'errorlog',
1282 default='-',
1282 default='-',
1283 )
1283 )
1284 coreconfigitem('web', 'ipv6',
1284 coreconfigitem('web', 'ipv6',
1285 default=False,
1285 default=False,
1286 )
1286 )
1287 coreconfigitem('web', 'maxchanges',
1287 coreconfigitem('web', 'maxchanges',
1288 default=10,
1288 default=10,
1289 )
1289 )
1290 coreconfigitem('web', 'maxfiles',
1290 coreconfigitem('web', 'maxfiles',
1291 default=10,
1291 default=10,
1292 )
1292 )
1293 coreconfigitem('web', 'maxshortchanges',
1293 coreconfigitem('web', 'maxshortchanges',
1294 default=60,
1294 default=60,
1295 )
1295 )
1296 coreconfigitem('web', 'motd',
1296 coreconfigitem('web', 'motd',
1297 default='',
1297 default='',
1298 )
1298 )
1299 coreconfigitem('web', 'name',
1299 coreconfigitem('web', 'name',
1300 default=dynamicdefault,
1300 default=dynamicdefault,
1301 )
1301 )
1302 coreconfigitem('web', 'port',
1302 coreconfigitem('web', 'port',
1303 default=8000,
1303 default=8000,
1304 )
1304 )
1305 coreconfigitem('web', 'prefix',
1305 coreconfigitem('web', 'prefix',
1306 default='',
1306 default='',
1307 )
1307 )
1308 coreconfigitem('web', 'push_ssl',
1308 coreconfigitem('web', 'push_ssl',
1309 default=True,
1309 default=True,
1310 )
1310 )
1311 coreconfigitem('web', 'refreshinterval',
1311 coreconfigitem('web', 'refreshinterval',
1312 default=20,
1312 default=20,
1313 )
1313 )
1314 coreconfigitem('web', 'server-header',
1314 coreconfigitem('web', 'server-header',
1315 default=None,
1315 default=None,
1316 )
1316 )
1317 coreconfigitem('web', 'staticurl',
1317 coreconfigitem('web', 'staticurl',
1318 default=None,
1318 default=None,
1319 )
1319 )
1320 coreconfigitem('web', 'stripes',
1320 coreconfigitem('web', 'stripes',
1321 default=1,
1321 default=1,
1322 )
1322 )
1323 coreconfigitem('web', 'style',
1323 coreconfigitem('web', 'style',
1324 default='paper',
1324 default='paper',
1325 )
1325 )
1326 coreconfigitem('web', 'templates',
1326 coreconfigitem('web', 'templates',
1327 default=None,
1327 default=None,
1328 )
1328 )
1329 coreconfigitem('web', 'view',
1329 coreconfigitem('web', 'view',
1330 default='served',
1330 default='served',
1331 )
1331 )
1332 coreconfigitem('worker', 'backgroundclose',
1332 coreconfigitem('worker', 'backgroundclose',
1333 default=dynamicdefault,
1333 default=dynamicdefault,
1334 )
1334 )
1335 # Windows defaults to a limit of 512 open files. A buffer of 128
1335 # Windows defaults to a limit of 512 open files. A buffer of 128
1336 # should give us enough headway.
1336 # should give us enough headway.
1337 coreconfigitem('worker', 'backgroundclosemaxqueue',
1337 coreconfigitem('worker', 'backgroundclosemaxqueue',
1338 default=384,
1338 default=384,
1339 )
1339 )
1340 coreconfigitem('worker', 'backgroundcloseminfilecount',
1340 coreconfigitem('worker', 'backgroundcloseminfilecount',
1341 default=2048,
1341 default=2048,
1342 )
1342 )
1343 coreconfigitem('worker', 'backgroundclosethreadcount',
1343 coreconfigitem('worker', 'backgroundclosethreadcount',
1344 default=4,
1344 default=4,
1345 )
1345 )
1346 coreconfigitem('worker', 'enabled',
1346 coreconfigitem('worker', 'enabled',
1347 default=True,
1347 default=True,
1348 )
1348 )
1349 coreconfigitem('worker', 'numcpus',
1349 coreconfigitem('worker', 'numcpus',
1350 default=None,
1350 default=None,
1351 )
1351 )
1352
1352
1353 # Rebase related configuration moved to core because other extension are doing
1353 # Rebase related configuration moved to core because other extension are doing
1354 # strange things. For example, shelve import the extensions to reuse some bit
1354 # strange things. For example, shelve import the extensions to reuse some bit
1355 # without formally loading it.
1355 # without formally loading it.
1356 coreconfigitem('commands', 'rebase.requiredest',
1356 coreconfigitem('commands', 'rebase.requiredest',
1357 default=False,
1357 default=False,
1358 )
1358 )
1359 coreconfigitem('experimental', 'rebaseskipobsolete',
1359 coreconfigitem('experimental', 'rebaseskipobsolete',
1360 default=True,
1360 default=True,
1361 )
1361 )
1362 coreconfigitem('rebase', 'singletransaction',
1362 coreconfigitem('rebase', 'singletransaction',
1363 default=False,
1363 default=False,
1364 )
1364 )
1365 coreconfigitem('rebase', 'experimental.inmemory',
1365 coreconfigitem('rebase', 'experimental.inmemory',
1366 default=False,
1366 default=False,
1367 )
1367 )
@@ -1,2621 +1,2621
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import hashlib
19 import hashlib
20 import heapq
20 import heapq
21 import os
21 import os
22 import re
22 import re
23 import struct
23 import struct
24 import zlib
24 import zlib
25
25
26 # import stuff from node for others to import from revlog
26 # import stuff from node for others to import from revlog
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullid,
30 nullid,
31 nullrev,
31 nullrev,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .thirdparty import (
38 from .thirdparty import (
39 attr,
39 attr,
40 )
40 )
41 from . import (
41 from . import (
42 ancestor,
42 ancestor,
43 error,
43 error,
44 mdiff,
44 mdiff,
45 policy,
45 policy,
46 pycompat,
46 pycompat,
47 templatefilters,
47 templatefilters,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 parsers = policy.importmod(r'parsers')
54 parsers = policy.importmod(r'parsers')
55
55
56 # Aliased for performance.
56 # Aliased for performance.
57 _zlibdecompress = zlib.decompress
57 _zlibdecompress = zlib.decompress
58
58
59 # revlog header flags
59 # revlog header flags
60 REVLOGV0 = 0
60 REVLOGV0 = 0
61 REVLOGV1 = 1
61 REVLOGV1 = 1
62 # Dummy value until file format is finalized.
62 # Dummy value until file format is finalized.
63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
64 REVLOGV2 = 0xDEAD
64 REVLOGV2 = 0xDEAD
65 FLAG_INLINE_DATA = (1 << 16)
65 FLAG_INLINE_DATA = (1 << 16)
66 FLAG_GENERALDELTA = (1 << 17)
66 FLAG_GENERALDELTA = (1 << 17)
67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
68 REVLOG_DEFAULT_FORMAT = REVLOGV1
68 REVLOG_DEFAULT_FORMAT = REVLOGV1
69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
72
72
73 # revlog index flags
73 # revlog index flags
74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
77 REVIDX_DEFAULT_FLAGS = 0
77 REVIDX_DEFAULT_FLAGS = 0
78 # stable order in which flags need to be processed and their processors applied
78 # stable order in which flags need to be processed and their processors applied
79 REVIDX_FLAGS_ORDER = [
79 REVIDX_FLAGS_ORDER = [
80 REVIDX_ISCENSORED,
80 REVIDX_ISCENSORED,
81 REVIDX_ELLIPSIS,
81 REVIDX_ELLIPSIS,
82 REVIDX_EXTSTORED,
82 REVIDX_EXTSTORED,
83 ]
83 ]
84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
85 # bitmark for flags that could cause rawdata content change
85 # bitmark for flags that could cause rawdata content change
86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
87
87
88 # max size of revlog with inline data
88 # max size of revlog with inline data
89 _maxinline = 131072
89 _maxinline = 131072
90 _chunksize = 1048576
90 _chunksize = 1048576
91
91
92 RevlogError = error.RevlogError
92 RevlogError = error.RevlogError
93 LookupError = error.LookupError
93 LookupError = error.LookupError
94 CensoredNodeError = error.CensoredNodeError
94 CensoredNodeError = error.CensoredNodeError
95 ProgrammingError = error.ProgrammingError
95 ProgrammingError = error.ProgrammingError
96
96
97 # Store flag processors (cf. 'addflagprocessor()' to register)
97 # Store flag processors (cf. 'addflagprocessor()' to register)
98 _flagprocessors = {
98 _flagprocessors = {
99 REVIDX_ISCENSORED: None,
99 REVIDX_ISCENSORED: None,
100 }
100 }
101
101
102 _mdre = re.compile('\1\n')
102 _mdre = re.compile('\1\n')
103 def parsemeta(text):
103 def parsemeta(text):
104 """return (metadatadict, metadatasize)"""
104 """return (metadatadict, metadatasize)"""
105 # text can be buffer, so we can't use .startswith or .index
105 # text can be buffer, so we can't use .startswith or .index
106 if text[:2] != '\1\n':
106 if text[:2] != '\1\n':
107 return None, None
107 return None, None
108 s = _mdre.search(text, 2).start()
108 s = _mdre.search(text, 2).start()
109 mtext = text[2:s]
109 mtext = text[2:s]
110 meta = {}
110 meta = {}
111 for l in mtext.splitlines():
111 for l in mtext.splitlines():
112 k, v = l.split(": ", 1)
112 k, v = l.split(": ", 1)
113 meta[k] = v
113 meta[k] = v
114 return meta, (s + 2)
114 return meta, (s + 2)
115
115
116 def packmeta(meta, text):
116 def packmeta(meta, text):
117 keys = sorted(meta)
117 keys = sorted(meta)
118 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
118 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
119 return "\1\n%s\1\n%s" % (metatext, text)
119 return "\1\n%s\1\n%s" % (metatext, text)
120
120
121 def _censoredtext(text):
121 def _censoredtext(text):
122 m, offs = parsemeta(text)
122 m, offs = parsemeta(text)
123 return m and "censored" in m
123 return m and "censored" in m
124
124
125 def addflagprocessor(flag, processor):
125 def addflagprocessor(flag, processor):
126 """Register a flag processor on a revision data flag.
126 """Register a flag processor on a revision data flag.
127
127
128 Invariant:
128 Invariant:
129 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
129 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
130 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
130 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
131 - Only one flag processor can be registered on a specific flag.
131 - Only one flag processor can be registered on a specific flag.
132 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
132 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
133 following signatures:
133 following signatures:
134 - (read) f(self, rawtext) -> text, bool
134 - (read) f(self, rawtext) -> text, bool
135 - (write) f(self, text) -> rawtext, bool
135 - (write) f(self, text) -> rawtext, bool
136 - (raw) f(self, rawtext) -> bool
136 - (raw) f(self, rawtext) -> bool
137 "text" is presented to the user. "rawtext" is stored in revlog data, not
137 "text" is presented to the user. "rawtext" is stored in revlog data, not
138 directly visible to the user.
138 directly visible to the user.
139 The boolean returned by these transforms is used to determine whether
139 The boolean returned by these transforms is used to determine whether
140 the returned text can be used for hash integrity checking. For example,
140 the returned text can be used for hash integrity checking. For example,
141 if "write" returns False, then "text" is used to generate hash. If
141 if "write" returns False, then "text" is used to generate hash. If
142 "write" returns True, that basically means "rawtext" returned by "write"
142 "write" returns True, that basically means "rawtext" returned by "write"
143 should be used to generate hash. Usually, "write" and "read" return
143 should be used to generate hash. Usually, "write" and "read" return
144 different booleans. And "raw" returns a same boolean as "write".
144 different booleans. And "raw" returns a same boolean as "write".
145
145
146 Note: The 'raw' transform is used for changegroup generation and in some
146 Note: The 'raw' transform is used for changegroup generation and in some
147 debug commands. In this case the transform only indicates whether the
147 debug commands. In this case the transform only indicates whether the
148 contents can be used for hash integrity checks.
148 contents can be used for hash integrity checks.
149 """
149 """
150 if not flag & REVIDX_KNOWN_FLAGS:
150 if not flag & REVIDX_KNOWN_FLAGS:
151 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
151 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
152 raise ProgrammingError(msg)
152 raise ProgrammingError(msg)
153 if flag not in REVIDX_FLAGS_ORDER:
153 if flag not in REVIDX_FLAGS_ORDER:
154 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
154 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
155 raise ProgrammingError(msg)
155 raise ProgrammingError(msg)
156 if flag in _flagprocessors:
156 if flag in _flagprocessors:
157 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
157 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
158 raise error.Abort(msg)
158 raise error.Abort(msg)
159 _flagprocessors[flag] = processor
159 _flagprocessors[flag] = processor
160
160
161 def getoffset(q):
161 def getoffset(q):
162 return int(q >> 16)
162 return int(q >> 16)
163
163
164 def gettype(q):
164 def gettype(q):
165 return int(q & 0xFFFF)
165 return int(q & 0xFFFF)
166
166
167 def offset_type(offset, type):
167 def offset_type(offset, type):
168 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
168 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
169 raise ValueError('unknown revlog index flags')
169 raise ValueError('unknown revlog index flags')
170 return int(int(offset) << 16 | type)
170 return int(int(offset) << 16 | type)
171
171
172 _nullhash = hashlib.sha1(nullid)
172 _nullhash = hashlib.sha1(nullid)
173
173
174 def hash(text, p1, p2):
174 def hash(text, p1, p2):
175 """generate a hash from the given text and its parent hashes
175 """generate a hash from the given text and its parent hashes
176
176
177 This hash combines both the current file contents and its history
177 This hash combines both the current file contents and its history
178 in a manner that makes it easy to distinguish nodes with the same
178 in a manner that makes it easy to distinguish nodes with the same
179 content in the revision graph.
179 content in the revision graph.
180 """
180 """
181 # As of now, if one of the parent node is null, p2 is null
181 # As of now, if one of the parent node is null, p2 is null
182 if p2 == nullid:
182 if p2 == nullid:
183 # deep copy of a hash is faster than creating one
183 # deep copy of a hash is faster than creating one
184 s = _nullhash.copy()
184 s = _nullhash.copy()
185 s.update(p1)
185 s.update(p1)
186 else:
186 else:
187 # none of the parent nodes are nullid
187 # none of the parent nodes are nullid
188 if p1 < p2:
188 if p1 < p2:
189 a = p1
189 a = p1
190 b = p2
190 b = p2
191 else:
191 else:
192 a = p2
192 a = p2
193 b = p1
193 b = p1
194 s = hashlib.sha1(a)
194 s = hashlib.sha1(a)
195 s.update(b)
195 s.update(b)
196 s.update(text)
196 s.update(text)
197 return s.digest()
197 return s.digest()
198
198
199 def _trimchunk(revlog, revs, startidx, endidx=None):
199 def _trimchunk(revlog, revs, startidx, endidx=None):
200 """returns revs[startidx:endidx] without empty trailing revs
200 """returns revs[startidx:endidx] without empty trailing revs
201 """
201 """
202 length = revlog.length
202 length = revlog.length
203
203
204 if endidx is None:
204 if endidx is None:
205 endidx = len(revs)
205 endidx = len(revs)
206
206
207 # Trim empty revs at the end, but never the very first revision of a chain
207 # Trim empty revs at the end, but never the very first revision of a chain
208 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
208 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
209 endidx -= 1
209 endidx -= 1
210
210
211 return revs[startidx:endidx]
211 return revs[startidx:endidx]
212
212
213 def _slicechunk(revlog, revs):
213 def _slicechunk(revlog, revs):
214 """slice revs to reduce the amount of unrelated data to be read from disk.
214 """slice revs to reduce the amount of unrelated data to be read from disk.
215
215
216 ``revs`` is sliced into groups that should be read in one time.
216 ``revs`` is sliced into groups that should be read in one time.
217 Assume that revs are sorted.
217 Assume that revs are sorted.
218 """
218 """
219 start = revlog.start
219 start = revlog.start
220 length = revlog.length
220 length = revlog.length
221
221
222 if len(revs) <= 1:
222 if len(revs) <= 1:
223 yield revs
223 yield revs
224 return
224 return
225
225
226 startbyte = start(revs[0])
226 startbyte = start(revs[0])
227 endbyte = start(revs[-1]) + length(revs[-1])
227 endbyte = start(revs[-1]) + length(revs[-1])
228 readdata = deltachainspan = endbyte - startbyte
228 readdata = deltachainspan = endbyte - startbyte
229
229
230 chainpayload = sum(length(r) for r in revs)
230 chainpayload = sum(length(r) for r in revs)
231
231
232 if deltachainspan:
232 if deltachainspan:
233 density = chainpayload / float(deltachainspan)
233 density = chainpayload / float(deltachainspan)
234 else:
234 else:
235 density = 1.0
235 density = 1.0
236
236
237 # Store the gaps in a heap to have them sorted by decreasing size
237 # Store the gaps in a heap to have them sorted by decreasing size
238 gapsheap = []
238 gapsheap = []
239 heapq.heapify(gapsheap)
239 heapq.heapify(gapsheap)
240 prevend = None
240 prevend = None
241 for i, rev in enumerate(revs):
241 for i, rev in enumerate(revs):
242 revstart = start(rev)
242 revstart = start(rev)
243 revlen = length(rev)
243 revlen = length(rev)
244
244
245 # Skip empty revisions to form larger holes
245 # Skip empty revisions to form larger holes
246 if revlen == 0:
246 if revlen == 0:
247 continue
247 continue
248
248
249 if prevend is not None:
249 if prevend is not None:
250 gapsize = revstart - prevend
250 gapsize = revstart - prevend
251 # only consider holes that are large enough
251 # only consider holes that are large enough
252 if gapsize > revlog._srmingapsize:
252 if gapsize > revlog._srmingapsize:
253 heapq.heappush(gapsheap, (-gapsize, i))
253 heapq.heappush(gapsheap, (-gapsize, i))
254
254
255 prevend = revstart + revlen
255 prevend = revstart + revlen
256
256
257 # Collect the indices of the largest holes until the density is acceptable
257 # Collect the indices of the largest holes until the density is acceptable
258 indicesheap = []
258 indicesheap = []
259 heapq.heapify(indicesheap)
259 heapq.heapify(indicesheap)
260 while gapsheap and density < revlog._srdensitythreshold:
260 while gapsheap and density < revlog._srdensitythreshold:
261 oppgapsize, gapidx = heapq.heappop(gapsheap)
261 oppgapsize, gapidx = heapq.heappop(gapsheap)
262
262
263 heapq.heappush(indicesheap, gapidx)
263 heapq.heappush(indicesheap, gapidx)
264
264
265 # the gap sizes are stored as negatives to be sorted decreasingly
265 # the gap sizes are stored as negatives to be sorted decreasingly
266 # by the heap
266 # by the heap
267 readdata -= (-oppgapsize)
267 readdata -= (-oppgapsize)
268 if readdata > 0:
268 if readdata > 0:
269 density = chainpayload / float(readdata)
269 density = chainpayload / float(readdata)
270 else:
270 else:
271 density = 1.0
271 density = 1.0
272
272
273 # Cut the revs at collected indices
273 # Cut the revs at collected indices
274 previdx = 0
274 previdx = 0
275 while indicesheap:
275 while indicesheap:
276 idx = heapq.heappop(indicesheap)
276 idx = heapq.heappop(indicesheap)
277
277
278 chunk = _trimchunk(revlog, revs, previdx, idx)
278 chunk = _trimchunk(revlog, revs, previdx, idx)
279 if chunk:
279 if chunk:
280 yield chunk
280 yield chunk
281
281
282 previdx = idx
282 previdx = idx
283
283
284 chunk = _trimchunk(revlog, revs, previdx)
284 chunk = _trimchunk(revlog, revs, previdx)
285 if chunk:
285 if chunk:
286 yield chunk
286 yield chunk
287
287
288 @attr.s(slots=True, frozen=True)
288 @attr.s(slots=True, frozen=True)
289 class _deltainfo(object):
289 class _deltainfo(object):
290 distance = attr.ib()
290 distance = attr.ib()
291 deltalen = attr.ib()
291 deltalen = attr.ib()
292 data = attr.ib()
292 data = attr.ib()
293 base = attr.ib()
293 base = attr.ib()
294 chainbase = attr.ib()
294 chainbase = attr.ib()
295 chainlen = attr.ib()
295 chainlen = attr.ib()
296 compresseddeltalen = attr.ib()
296 compresseddeltalen = attr.ib()
297
297
298 class _deltacomputer(object):
298 class _deltacomputer(object):
299 def __init__(self, revlog):
299 def __init__(self, revlog):
300 self.revlog = revlog
300 self.revlog = revlog
301
301
302 def _getcandidaterevs(self, p1, p2, cachedelta):
302 def _getcandidaterevs(self, p1, p2, cachedelta):
303 """
303 """
304 Provides revisions that present an interest to be diffed against,
304 Provides revisions that present an interest to be diffed against,
305 grouped by level of easiness.
305 grouped by level of easiness.
306 """
306 """
307 revlog = self.revlog
307 revlog = self.revlog
308 gdelta = revlog._generaldelta
308 gdelta = revlog._generaldelta
309 curr = len(revlog)
309 curr = len(revlog)
310 prev = curr - 1
310 prev = curr - 1
311 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
311 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
312
312
313 # should we try to build a delta?
313 # should we try to build a delta?
314 if prev != nullrev and revlog.storedeltachains:
314 if prev != nullrev and revlog.storedeltachains:
315 tested = set()
315 tested = set()
316 # This condition is true most of the time when processing
316 # This condition is true most of the time when processing
317 # changegroup data into a generaldelta repo. The only time it
317 # changegroup data into a generaldelta repo. The only time it
318 # isn't true is if this is the first revision in a delta chain
318 # isn't true is if this is the first revision in a delta chain
319 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
319 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
320 if cachedelta and gdelta and revlog._lazydeltabase:
320 if cachedelta and gdelta and revlog._lazydeltabase:
321 # Assume what we received from the server is a good choice
321 # Assume what we received from the server is a good choice
322 # build delta will reuse the cache
322 # build delta will reuse the cache
323 yield (cachedelta[0],)
323 yield (cachedelta[0],)
324 tested.add(cachedelta[0])
324 tested.add(cachedelta[0])
325
325
326 if gdelta:
326 if gdelta:
327 # exclude already lazy tested base if any
327 # exclude already lazy tested base if any
328 parents = [p for p in (p1r, p2r)
328 parents = [p for p in (p1r, p2r)
329 if p != nullrev and p not in tested]
329 if p != nullrev and p not in tested]
330
330
331 if not revlog._aggressivemergedeltas and len(parents) == 2:
331 if not revlog._aggressivemergedeltas and len(parents) == 2:
332 parents.sort()
332 parents.sort()
333 # To minimize the chance of having to build a fulltext,
333 # To minimize the chance of having to build a fulltext,
334 # pick first whichever parent is closest to us (max rev)
334 # pick first whichever parent is closest to us (max rev)
335 yield (parents[1],)
335 yield (parents[1],)
336 # then the other one (min rev) if the first did not fit
336 # then the other one (min rev) if the first did not fit
337 yield (parents[0],)
337 yield (parents[0],)
338 tested.update(parents)
338 tested.update(parents)
339 elif len(parents) > 0:
339 elif len(parents) > 0:
340 # Test all parents (1 or 2), and keep the best candidate
340 # Test all parents (1 or 2), and keep the best candidate
341 yield parents
341 yield parents
342 tested.update(parents)
342 tested.update(parents)
343
343
344 if prev not in tested:
344 if prev not in tested:
345 # other approach failed try against prev to hopefully save us a
345 # other approach failed try against prev to hopefully save us a
346 # fulltext.
346 # fulltext.
347 yield (prev,)
347 yield (prev,)
348 tested.add(prev)
348 tested.add(prev)
349
349
350 def buildtext(self, revinfo, fh):
350 def buildtext(self, revinfo, fh):
351 """Builds a fulltext version of a revision
351 """Builds a fulltext version of a revision
352
352
353 revinfo: _revisioninfo instance that contains all needed info
353 revinfo: _revisioninfo instance that contains all needed info
354 fh: file handle to either the .i or the .d revlog file,
354 fh: file handle to either the .i or the .d revlog file,
355 depending on whether it is inlined or not
355 depending on whether it is inlined or not
356 """
356 """
357 btext = revinfo.btext
357 btext = revinfo.btext
358 if btext[0] is not None:
358 if btext[0] is not None:
359 return btext[0]
359 return btext[0]
360
360
361 revlog = self.revlog
361 revlog = self.revlog
362 cachedelta = revinfo.cachedelta
362 cachedelta = revinfo.cachedelta
363 flags = revinfo.flags
363 flags = revinfo.flags
364 node = revinfo.node
364 node = revinfo.node
365
365
366 baserev = cachedelta[0]
366 baserev = cachedelta[0]
367 delta = cachedelta[1]
367 delta = cachedelta[1]
368 # special case deltas which replace entire base; no need to decode
368 # special case deltas which replace entire base; no need to decode
369 # base revision. this neatly avoids censored bases, which throw when
369 # base revision. this neatly avoids censored bases, which throw when
370 # they're decoded.
370 # they're decoded.
371 hlen = struct.calcsize(">lll")
371 hlen = struct.calcsize(">lll")
372 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
372 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
373 len(delta) - hlen):
373 len(delta) - hlen):
374 btext[0] = delta[hlen:]
374 btext[0] = delta[hlen:]
375 else:
375 else:
376 # deltabase is rawtext before changed by flag processors, which is
376 # deltabase is rawtext before changed by flag processors, which is
377 # equivalent to non-raw text
377 # equivalent to non-raw text
378 basetext = revlog.revision(baserev, _df=fh, raw=False)
378 basetext = revlog.revision(baserev, _df=fh, raw=False)
379 btext[0] = mdiff.patch(basetext, delta)
379 btext[0] = mdiff.patch(basetext, delta)
380
380
381 try:
381 try:
382 res = revlog._processflags(btext[0], flags, 'read', raw=True)
382 res = revlog._processflags(btext[0], flags, 'read', raw=True)
383 btext[0], validatehash = res
383 btext[0], validatehash = res
384 if validatehash:
384 if validatehash:
385 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
385 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
386 if flags & REVIDX_ISCENSORED:
386 if flags & REVIDX_ISCENSORED:
387 raise RevlogError(_('node %s is not censored') % node)
387 raise RevlogError(_('node %s is not censored') % node)
388 except CensoredNodeError:
388 except CensoredNodeError:
389 # must pass the censored index flag to add censored revisions
389 # must pass the censored index flag to add censored revisions
390 if not flags & REVIDX_ISCENSORED:
390 if not flags & REVIDX_ISCENSORED:
391 raise
391 raise
392 return btext[0]
392 return btext[0]
393
393
394 def _builddeltadiff(self, base, revinfo, fh):
394 def _builddeltadiff(self, base, revinfo, fh):
395 revlog = self.revlog
395 revlog = self.revlog
396 t = self.buildtext(revinfo, fh)
396 t = self.buildtext(revinfo, fh)
397 if revlog.iscensored(base):
397 if revlog.iscensored(base):
398 # deltas based on a censored revision must replace the
398 # deltas based on a censored revision must replace the
399 # full content in one patch, so delta works everywhere
399 # full content in one patch, so delta works everywhere
400 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
400 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
401 delta = header + t
401 delta = header + t
402 else:
402 else:
403 ptext = revlog.revision(base, _df=fh, raw=True)
403 ptext = revlog.revision(base, _df=fh, raw=True)
404 delta = mdiff.textdiff(ptext, t)
404 delta = mdiff.textdiff(ptext, t)
405
405
406 return delta
406 return delta
407
407
408 def _builddeltainfo(self, revinfo, base, fh):
408 def _builddeltainfo(self, revinfo, base, fh):
409 # can we use the cached delta?
409 # can we use the cached delta?
410 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
410 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
411 delta = revinfo.cachedelta[1]
411 delta = revinfo.cachedelta[1]
412 else:
412 else:
413 delta = self._builddeltadiff(base, revinfo, fh)
413 delta = self._builddeltadiff(base, revinfo, fh)
414 revlog = self.revlog
414 revlog = self.revlog
415 header, data = revlog.compress(delta)
415 header, data = revlog.compress(delta)
416 deltalen = len(header) + len(data)
416 deltalen = len(header) + len(data)
417 chainbase = revlog.chainbase(base)
417 chainbase = revlog.chainbase(base)
418 offset = revlog.end(len(revlog) - 1)
418 offset = revlog.end(len(revlog) - 1)
419 dist = deltalen + offset - revlog.start(chainbase)
419 dist = deltalen + offset - revlog.start(chainbase)
420 if revlog._generaldelta:
420 if revlog._generaldelta:
421 deltabase = base
421 deltabase = base
422 else:
422 else:
423 deltabase = chainbase
423 deltabase = chainbase
424 chainlen, compresseddeltalen = revlog._chaininfo(base)
424 chainlen, compresseddeltalen = revlog._chaininfo(base)
425 chainlen += 1
425 chainlen += 1
426 compresseddeltalen += deltalen
426 compresseddeltalen += deltalen
427 return _deltainfo(dist, deltalen, (header, data), deltabase,
427 return _deltainfo(dist, deltalen, (header, data), deltabase,
428 chainbase, chainlen, compresseddeltalen)
428 chainbase, chainlen, compresseddeltalen)
429
429
430 def finddeltainfo(self, revinfo, fh):
430 def finddeltainfo(self, revinfo, fh):
431 """Find an acceptable delta against a candidate revision
431 """Find an acceptable delta against a candidate revision
432
432
433 revinfo: information about the revision (instance of _revisioninfo)
433 revinfo: information about the revision (instance of _revisioninfo)
434 fh: file handle to either the .i or the .d revlog file,
434 fh: file handle to either the .i or the .d revlog file,
435 depending on whether it is inlined or not
435 depending on whether it is inlined or not
436
436
437 Returns the first acceptable candidate revision, as ordered by
437 Returns the first acceptable candidate revision, as ordered by
438 _getcandidaterevs
438 _getcandidaterevs
439 """
439 """
440 cachedelta = revinfo.cachedelta
440 cachedelta = revinfo.cachedelta
441 p1 = revinfo.p1
441 p1 = revinfo.p1
442 p2 = revinfo.p2
442 p2 = revinfo.p2
443 revlog = self.revlog
443 revlog = self.revlog
444
444
445 deltainfo = None
445 deltainfo = None
446 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
446 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
447 nominateddeltas = []
447 nominateddeltas = []
448 for candidaterev in candidaterevs:
448 for candidaterev in candidaterevs:
449 # no delta for rawtext-changing revs (see "candelta" for why)
449 # no delta for rawtext-changing revs (see "candelta" for why)
450 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
450 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
451 continue
451 continue
452 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
452 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
453 if revlog._isgooddeltainfo(candidatedelta, revinfo):
453 if revlog._isgooddeltainfo(candidatedelta, revinfo):
454 nominateddeltas.append(candidatedelta)
454 nominateddeltas.append(candidatedelta)
455 if nominateddeltas:
455 if nominateddeltas:
456 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
456 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
457 break
457 break
458
458
459 return deltainfo
459 return deltainfo
460
460
461 @attr.s(slots=True, frozen=True)
461 @attr.s(slots=True, frozen=True)
462 class _revisioninfo(object):
462 class _revisioninfo(object):
463 """Information about a revision that allows building its fulltext
463 """Information about a revision that allows building its fulltext
464 node: expected hash of the revision
464 node: expected hash of the revision
465 p1, p2: parent revs of the revision
465 p1, p2: parent revs of the revision
466 btext: built text cache consisting of a one-element list
466 btext: built text cache consisting of a one-element list
467 cachedelta: (baserev, uncompressed_delta) or None
467 cachedelta: (baserev, uncompressed_delta) or None
468 flags: flags associated to the revision storage
468 flags: flags associated to the revision storage
469
469
470 One of btext[0] or cachedelta must be set.
470 One of btext[0] or cachedelta must be set.
471 """
471 """
472 node = attr.ib()
472 node = attr.ib()
473 p1 = attr.ib()
473 p1 = attr.ib()
474 p2 = attr.ib()
474 p2 = attr.ib()
475 btext = attr.ib()
475 btext = attr.ib()
476 textlen = attr.ib()
476 textlen = attr.ib()
477 cachedelta = attr.ib()
477 cachedelta = attr.ib()
478 flags = attr.ib()
478 flags = attr.ib()
479
479
480 # index v0:
480 # index v0:
481 # 4 bytes: offset
481 # 4 bytes: offset
482 # 4 bytes: compressed length
482 # 4 bytes: compressed length
483 # 4 bytes: base rev
483 # 4 bytes: base rev
484 # 4 bytes: link rev
484 # 4 bytes: link rev
485 # 20 bytes: parent 1 nodeid
485 # 20 bytes: parent 1 nodeid
486 # 20 bytes: parent 2 nodeid
486 # 20 bytes: parent 2 nodeid
487 # 20 bytes: nodeid
487 # 20 bytes: nodeid
488 indexformatv0 = struct.Struct(">4l20s20s20s")
488 indexformatv0 = struct.Struct(">4l20s20s20s")
489 indexformatv0_pack = indexformatv0.pack
489 indexformatv0_pack = indexformatv0.pack
490 indexformatv0_unpack = indexformatv0.unpack
490 indexformatv0_unpack = indexformatv0.unpack
491
491
492 class revlogoldio(object):
492 class revlogoldio(object):
493 def __init__(self):
493 def __init__(self):
494 self.size = indexformatv0.size
494 self.size = indexformatv0.size
495
495
496 def parseindex(self, data, inline):
496 def parseindex(self, data, inline):
497 s = self.size
497 s = self.size
498 index = []
498 index = []
499 nodemap = {nullid: nullrev}
499 nodemap = {nullid: nullrev}
500 n = off = 0
500 n = off = 0
501 l = len(data)
501 l = len(data)
502 while off + s <= l:
502 while off + s <= l:
503 cur = data[off:off + s]
503 cur = data[off:off + s]
504 off += s
504 off += s
505 e = indexformatv0_unpack(cur)
505 e = indexformatv0_unpack(cur)
506 # transform to revlogv1 format
506 # transform to revlogv1 format
507 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
507 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
508 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
508 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
509 index.append(e2)
509 index.append(e2)
510 nodemap[e[6]] = n
510 nodemap[e[6]] = n
511 n += 1
511 n += 1
512
512
513 # add the magic null revision at -1
513 # add the magic null revision at -1
514 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
514 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
515
515
516 return index, nodemap, None
516 return index, nodemap, None
517
517
518 def packentry(self, entry, node, version, rev):
518 def packentry(self, entry, node, version, rev):
519 if gettype(entry[0]):
519 if gettype(entry[0]):
520 raise RevlogError(_('index entry flags need revlog version 1'))
520 raise RevlogError(_('index entry flags need revlog version 1'))
521 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
521 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
522 node(entry[5]), node(entry[6]), entry[7])
522 node(entry[5]), node(entry[6]), entry[7])
523 return indexformatv0_pack(*e2)
523 return indexformatv0_pack(*e2)
524
524
525 # index ng:
525 # index ng:
526 # 6 bytes: offset
526 # 6 bytes: offset
527 # 2 bytes: flags
527 # 2 bytes: flags
528 # 4 bytes: compressed length
528 # 4 bytes: compressed length
529 # 4 bytes: uncompressed length
529 # 4 bytes: uncompressed length
530 # 4 bytes: base rev
530 # 4 bytes: base rev
531 # 4 bytes: link rev
531 # 4 bytes: link rev
532 # 4 bytes: parent 1 rev
532 # 4 bytes: parent 1 rev
533 # 4 bytes: parent 2 rev
533 # 4 bytes: parent 2 rev
534 # 32 bytes: nodeid
534 # 32 bytes: nodeid
535 indexformatng = struct.Struct(">Qiiiiii20s12x")
535 indexformatng = struct.Struct(">Qiiiiii20s12x")
536 indexformatng_pack = indexformatng.pack
536 indexformatng_pack = indexformatng.pack
537 versionformat = struct.Struct(">I")
537 versionformat = struct.Struct(">I")
538 versionformat_pack = versionformat.pack
538 versionformat_pack = versionformat.pack
539 versionformat_unpack = versionformat.unpack
539 versionformat_unpack = versionformat.unpack
540
540
541 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
541 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
542 # signed integer)
542 # signed integer)
543 _maxentrysize = 0x7fffffff
543 _maxentrysize = 0x7fffffff
544
544
545 class revlogio(object):
545 class revlogio(object):
546 def __init__(self):
546 def __init__(self):
547 self.size = indexformatng.size
547 self.size = indexformatng.size
548
548
549 def parseindex(self, data, inline):
549 def parseindex(self, data, inline):
550 # call the C implementation to parse the index data
550 # call the C implementation to parse the index data
551 index, cache = parsers.parse_index2(data, inline)
551 index, cache = parsers.parse_index2(data, inline)
552 return index, getattr(index, 'nodemap', None), cache
552 return index, getattr(index, 'nodemap', None), cache
553
553
554 def packentry(self, entry, node, version, rev):
554 def packentry(self, entry, node, version, rev):
555 p = indexformatng_pack(*entry)
555 p = indexformatng_pack(*entry)
556 if rev == 0:
556 if rev == 0:
557 p = versionformat_pack(version) + p[4:]
557 p = versionformat_pack(version) + p[4:]
558 return p
558 return p
559
559
560 class revlog(object):
560 class revlog(object):
561 """
561 """
562 the underlying revision storage object
562 the underlying revision storage object
563
563
564 A revlog consists of two parts, an index and the revision data.
564 A revlog consists of two parts, an index and the revision data.
565
565
566 The index is a file with a fixed record size containing
566 The index is a file with a fixed record size containing
567 information on each revision, including its nodeid (hash), the
567 information on each revision, including its nodeid (hash), the
568 nodeids of its parents, the position and offset of its data within
568 nodeids of its parents, the position and offset of its data within
569 the data file, and the revision it's based on. Finally, each entry
569 the data file, and the revision it's based on. Finally, each entry
570 contains a linkrev entry that can serve as a pointer to external
570 contains a linkrev entry that can serve as a pointer to external
571 data.
571 data.
572
572
573 The revision data itself is a linear collection of data chunks.
573 The revision data itself is a linear collection of data chunks.
574 Each chunk represents a revision and is usually represented as a
574 Each chunk represents a revision and is usually represented as a
575 delta against the previous chunk. To bound lookup time, runs of
575 delta against the previous chunk. To bound lookup time, runs of
576 deltas are limited to about 2 times the length of the original
576 deltas are limited to about 2 times the length of the original
577 version data. This makes retrieval of a version proportional to
577 version data. This makes retrieval of a version proportional to
578 its size, or O(1) relative to the number of revisions.
578 its size, or O(1) relative to the number of revisions.
579
579
580 Both pieces of the revlog are written to in an append-only
580 Both pieces of the revlog are written to in an append-only
581 fashion, which means we never need to rewrite a file to insert or
581 fashion, which means we never need to rewrite a file to insert or
582 remove data, and can use some simple techniques to avoid the need
582 remove data, and can use some simple techniques to avoid the need
583 for locking while reading.
583 for locking while reading.
584
584
585 If checkambig, indexfile is opened with checkambig=True at
585 If checkambig, indexfile is opened with checkambig=True at
586 writing, to avoid file stat ambiguity.
586 writing, to avoid file stat ambiguity.
587
587
588 If mmaplargeindex is True, and an mmapindexthreshold is set, the
588 If mmaplargeindex is True, and an mmapindexthreshold is set, the
589 index will be mmapped rather than read if it is larger than the
589 index will be mmapped rather than read if it is larger than the
590 configured threshold.
590 configured threshold.
591
591
592 If censorable is True, the revlog can have censored revisions.
592 If censorable is True, the revlog can have censored revisions.
593 """
593 """
594 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
594 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
595 mmaplargeindex=False, censorable=False):
595 mmaplargeindex=False, censorable=False):
596 """
596 """
597 create a revlog object
597 create a revlog object
598
598
599 opener is a function that abstracts the file opening operation
599 opener is a function that abstracts the file opening operation
600 and can be used to implement COW semantics or the like.
600 and can be used to implement COW semantics or the like.
601 """
601 """
602 self.indexfile = indexfile
602 self.indexfile = indexfile
603 self.datafile = datafile or (indexfile[:-2] + ".d")
603 self.datafile = datafile or (indexfile[:-2] + ".d")
604 self.opener = opener
604 self.opener = opener
605 # When True, indexfile is opened with checkambig=True at writing, to
605 # When True, indexfile is opened with checkambig=True at writing, to
606 # avoid file stat ambiguity.
606 # avoid file stat ambiguity.
607 self._checkambig = checkambig
607 self._checkambig = checkambig
608 self._censorable = censorable
608 self._censorable = censorable
609 # 3-tuple of (node, rev, text) for a raw revision.
609 # 3-tuple of (node, rev, text) for a raw revision.
610 self._cache = None
610 self._cache = None
611 # Maps rev to chain base rev.
611 # Maps rev to chain base rev.
612 self._chainbasecache = util.lrucachedict(100)
612 self._chainbasecache = util.lrucachedict(100)
613 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
613 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
614 self._chunkcache = (0, '')
614 self._chunkcache = (0, '')
615 # How much data to read and cache into the raw revlog data cache.
615 # How much data to read and cache into the raw revlog data cache.
616 self._chunkcachesize = 65536
616 self._chunkcachesize = 65536
617 self._maxchainlen = None
617 self._maxchainlen = None
618 self._aggressivemergedeltas = True
618 self._aggressivemergedeltas = True
619 self.index = []
619 self.index = []
620 # Mapping of partial identifiers to full nodes.
620 # Mapping of partial identifiers to full nodes.
621 self._pcache = {}
621 self._pcache = {}
622 # Mapping of revision integer to full node.
622 # Mapping of revision integer to full node.
623 self._nodecache = {nullid: nullrev}
623 self._nodecache = {nullid: nullrev}
624 self._nodepos = None
624 self._nodepos = None
625 self._compengine = 'zlib'
625 self._compengine = 'zlib'
626 self._maxdeltachainspan = -1
626 self._maxdeltachainspan = -1
627 self._withsparseread = False
627 self._withsparseread = False
628 self._srdensitythreshold = 0.25
628 self._srdensitythreshold = 0.50
629 self._srmingapsize = 262144
629 self._srmingapsize = 262144
630
630
631 mmapindexthreshold = None
631 mmapindexthreshold = None
632 v = REVLOG_DEFAULT_VERSION
632 v = REVLOG_DEFAULT_VERSION
633 opts = getattr(opener, 'options', None)
633 opts = getattr(opener, 'options', None)
634 if opts is not None:
634 if opts is not None:
635 if 'revlogv2' in opts:
635 if 'revlogv2' in opts:
636 # version 2 revlogs always use generaldelta.
636 # version 2 revlogs always use generaldelta.
637 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
637 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
638 elif 'revlogv1' in opts:
638 elif 'revlogv1' in opts:
639 if 'generaldelta' in opts:
639 if 'generaldelta' in opts:
640 v |= FLAG_GENERALDELTA
640 v |= FLAG_GENERALDELTA
641 else:
641 else:
642 v = 0
642 v = 0
643 if 'chunkcachesize' in opts:
643 if 'chunkcachesize' in opts:
644 self._chunkcachesize = opts['chunkcachesize']
644 self._chunkcachesize = opts['chunkcachesize']
645 if 'maxchainlen' in opts:
645 if 'maxchainlen' in opts:
646 self._maxchainlen = opts['maxchainlen']
646 self._maxchainlen = opts['maxchainlen']
647 if 'aggressivemergedeltas' in opts:
647 if 'aggressivemergedeltas' in opts:
648 self._aggressivemergedeltas = opts['aggressivemergedeltas']
648 self._aggressivemergedeltas = opts['aggressivemergedeltas']
649 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
649 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
650 if 'compengine' in opts:
650 if 'compengine' in opts:
651 self._compengine = opts['compengine']
651 self._compengine = opts['compengine']
652 if 'maxdeltachainspan' in opts:
652 if 'maxdeltachainspan' in opts:
653 self._maxdeltachainspan = opts['maxdeltachainspan']
653 self._maxdeltachainspan = opts['maxdeltachainspan']
654 if mmaplargeindex and 'mmapindexthreshold' in opts:
654 if mmaplargeindex and 'mmapindexthreshold' in opts:
655 mmapindexthreshold = opts['mmapindexthreshold']
655 mmapindexthreshold = opts['mmapindexthreshold']
656 self._withsparseread = bool(opts.get('with-sparse-read', False))
656 self._withsparseread = bool(opts.get('with-sparse-read', False))
657 if 'sparse-read-density-threshold' in opts:
657 if 'sparse-read-density-threshold' in opts:
658 self._srdensitythreshold = opts['sparse-read-density-threshold']
658 self._srdensitythreshold = opts['sparse-read-density-threshold']
659 if 'sparse-read-min-gap-size' in opts:
659 if 'sparse-read-min-gap-size' in opts:
660 self._srmingapsize = opts['sparse-read-min-gap-size']
660 self._srmingapsize = opts['sparse-read-min-gap-size']
661
661
662 if self._chunkcachesize <= 0:
662 if self._chunkcachesize <= 0:
663 raise RevlogError(_('revlog chunk cache size %r is not greater '
663 raise RevlogError(_('revlog chunk cache size %r is not greater '
664 'than 0') % self._chunkcachesize)
664 'than 0') % self._chunkcachesize)
665 elif self._chunkcachesize & (self._chunkcachesize - 1):
665 elif self._chunkcachesize & (self._chunkcachesize - 1):
666 raise RevlogError(_('revlog chunk cache size %r is not a power '
666 raise RevlogError(_('revlog chunk cache size %r is not a power '
667 'of 2') % self._chunkcachesize)
667 'of 2') % self._chunkcachesize)
668
668
669 indexdata = ''
669 indexdata = ''
670 self._initempty = True
670 self._initempty = True
671 try:
671 try:
672 with self._indexfp() as f:
672 with self._indexfp() as f:
673 if (mmapindexthreshold is not None and
673 if (mmapindexthreshold is not None and
674 self.opener.fstat(f).st_size >= mmapindexthreshold):
674 self.opener.fstat(f).st_size >= mmapindexthreshold):
675 indexdata = util.buffer(util.mmapread(f))
675 indexdata = util.buffer(util.mmapread(f))
676 else:
676 else:
677 indexdata = f.read()
677 indexdata = f.read()
678 if len(indexdata) > 0:
678 if len(indexdata) > 0:
679 v = versionformat_unpack(indexdata[:4])[0]
679 v = versionformat_unpack(indexdata[:4])[0]
680 self._initempty = False
680 self._initempty = False
681 except IOError as inst:
681 except IOError as inst:
682 if inst.errno != errno.ENOENT:
682 if inst.errno != errno.ENOENT:
683 raise
683 raise
684
684
685 self.version = v
685 self.version = v
686 self._inline = v & FLAG_INLINE_DATA
686 self._inline = v & FLAG_INLINE_DATA
687 self._generaldelta = v & FLAG_GENERALDELTA
687 self._generaldelta = v & FLAG_GENERALDELTA
688 flags = v & ~0xFFFF
688 flags = v & ~0xFFFF
689 fmt = v & 0xFFFF
689 fmt = v & 0xFFFF
690 if fmt == REVLOGV0:
690 if fmt == REVLOGV0:
691 if flags:
691 if flags:
692 raise RevlogError(_('unknown flags (%#04x) in version %d '
692 raise RevlogError(_('unknown flags (%#04x) in version %d '
693 'revlog %s') %
693 'revlog %s') %
694 (flags >> 16, fmt, self.indexfile))
694 (flags >> 16, fmt, self.indexfile))
695 elif fmt == REVLOGV1:
695 elif fmt == REVLOGV1:
696 if flags & ~REVLOGV1_FLAGS:
696 if flags & ~REVLOGV1_FLAGS:
697 raise RevlogError(_('unknown flags (%#04x) in version %d '
697 raise RevlogError(_('unknown flags (%#04x) in version %d '
698 'revlog %s') %
698 'revlog %s') %
699 (flags >> 16, fmt, self.indexfile))
699 (flags >> 16, fmt, self.indexfile))
700 elif fmt == REVLOGV2:
700 elif fmt == REVLOGV2:
701 if flags & ~REVLOGV2_FLAGS:
701 if flags & ~REVLOGV2_FLAGS:
702 raise RevlogError(_('unknown flags (%#04x) in version %d '
702 raise RevlogError(_('unknown flags (%#04x) in version %d '
703 'revlog %s') %
703 'revlog %s') %
704 (flags >> 16, fmt, self.indexfile))
704 (flags >> 16, fmt, self.indexfile))
705 else:
705 else:
706 raise RevlogError(_('unknown version (%d) in revlog %s') %
706 raise RevlogError(_('unknown version (%d) in revlog %s') %
707 (fmt, self.indexfile))
707 (fmt, self.indexfile))
708
708
709 self.storedeltachains = True
709 self.storedeltachains = True
710
710
711 self._io = revlogio()
711 self._io = revlogio()
712 if self.version == REVLOGV0:
712 if self.version == REVLOGV0:
713 self._io = revlogoldio()
713 self._io = revlogoldio()
714 try:
714 try:
715 d = self._io.parseindex(indexdata, self._inline)
715 d = self._io.parseindex(indexdata, self._inline)
716 except (ValueError, IndexError):
716 except (ValueError, IndexError):
717 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
717 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
718 self.index, nodemap, self._chunkcache = d
718 self.index, nodemap, self._chunkcache = d
719 if nodemap is not None:
719 if nodemap is not None:
720 self.nodemap = self._nodecache = nodemap
720 self.nodemap = self._nodecache = nodemap
721 if not self._chunkcache:
721 if not self._chunkcache:
722 self._chunkclear()
722 self._chunkclear()
723 # revnum -> (chain-length, sum-delta-length)
723 # revnum -> (chain-length, sum-delta-length)
724 self._chaininfocache = {}
724 self._chaininfocache = {}
725 # revlog header -> revlog compressor
725 # revlog header -> revlog compressor
726 self._decompressors = {}
726 self._decompressors = {}
727
727
728 @util.propertycache
728 @util.propertycache
729 def _compressor(self):
729 def _compressor(self):
730 return util.compengines[self._compengine].revlogcompressor()
730 return util.compengines[self._compengine].revlogcompressor()
731
731
732 def _indexfp(self, mode='r'):
732 def _indexfp(self, mode='r'):
733 """file object for the revlog's index file"""
733 """file object for the revlog's index file"""
734 args = {r'mode': mode}
734 args = {r'mode': mode}
735 if mode != 'r':
735 if mode != 'r':
736 args[r'checkambig'] = self._checkambig
736 args[r'checkambig'] = self._checkambig
737 if mode == 'w':
737 if mode == 'w':
738 args[r'atomictemp'] = True
738 args[r'atomictemp'] = True
739 return self.opener(self.indexfile, **args)
739 return self.opener(self.indexfile, **args)
740
740
741 def _datafp(self, mode='r'):
741 def _datafp(self, mode='r'):
742 """file object for the revlog's data file"""
742 """file object for the revlog's data file"""
743 return self.opener(self.datafile, mode=mode)
743 return self.opener(self.datafile, mode=mode)
744
744
745 @contextlib.contextmanager
745 @contextlib.contextmanager
746 def _datareadfp(self, existingfp=None):
746 def _datareadfp(self, existingfp=None):
747 """file object suitable to read data"""
747 """file object suitable to read data"""
748 if existingfp is not None:
748 if existingfp is not None:
749 yield existingfp
749 yield existingfp
750 else:
750 else:
751 if self._inline:
751 if self._inline:
752 func = self._indexfp
752 func = self._indexfp
753 else:
753 else:
754 func = self._datafp
754 func = self._datafp
755 with func() as fp:
755 with func() as fp:
756 yield fp
756 yield fp
757
757
758 def tip(self):
758 def tip(self):
759 return self.node(len(self.index) - 2)
759 return self.node(len(self.index) - 2)
760 def __contains__(self, rev):
760 def __contains__(self, rev):
761 return 0 <= rev < len(self)
761 return 0 <= rev < len(self)
762 def __len__(self):
762 def __len__(self):
763 return len(self.index) - 1
763 return len(self.index) - 1
764 def __iter__(self):
764 def __iter__(self):
765 return iter(xrange(len(self)))
765 return iter(xrange(len(self)))
766 def revs(self, start=0, stop=None):
766 def revs(self, start=0, stop=None):
767 """iterate over all rev in this revlog (from start to stop)"""
767 """iterate over all rev in this revlog (from start to stop)"""
768 step = 1
768 step = 1
769 if stop is not None:
769 if stop is not None:
770 if start > stop:
770 if start > stop:
771 step = -1
771 step = -1
772 stop += step
772 stop += step
773 else:
773 else:
774 stop = len(self)
774 stop = len(self)
775 return xrange(start, stop, step)
775 return xrange(start, stop, step)
776
776
777 @util.propertycache
777 @util.propertycache
778 def nodemap(self):
778 def nodemap(self):
779 self.rev(self.node(0))
779 self.rev(self.node(0))
780 return self._nodecache
780 return self._nodecache
781
781
782 def hasnode(self, node):
782 def hasnode(self, node):
783 try:
783 try:
784 self.rev(node)
784 self.rev(node)
785 return True
785 return True
786 except KeyError:
786 except KeyError:
787 return False
787 return False
788
788
789 def candelta(self, baserev, rev):
789 def candelta(self, baserev, rev):
790 """whether two revisions (baserev, rev) can be delta-ed or not"""
790 """whether two revisions (baserev, rev) can be delta-ed or not"""
791 # Disable delta if either rev requires a content-changing flag
791 # Disable delta if either rev requires a content-changing flag
792 # processor (ex. LFS). This is because such flag processor can alter
792 # processor (ex. LFS). This is because such flag processor can alter
793 # the rawtext content that the delta will be based on, and two clients
793 # the rawtext content that the delta will be based on, and two clients
794 # could have a same revlog node with different flags (i.e. different
794 # could have a same revlog node with different flags (i.e. different
795 # rawtext contents) and the delta could be incompatible.
795 # rawtext contents) and the delta could be incompatible.
796 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
796 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
797 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
797 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
798 return False
798 return False
799 return True
799 return True
800
800
801 def clearcaches(self):
801 def clearcaches(self):
802 self._cache = None
802 self._cache = None
803 self._chainbasecache.clear()
803 self._chainbasecache.clear()
804 self._chunkcache = (0, '')
804 self._chunkcache = (0, '')
805 self._pcache = {}
805 self._pcache = {}
806
806
807 try:
807 try:
808 self._nodecache.clearcaches()
808 self._nodecache.clearcaches()
809 except AttributeError:
809 except AttributeError:
810 self._nodecache = {nullid: nullrev}
810 self._nodecache = {nullid: nullrev}
811 self._nodepos = None
811 self._nodepos = None
812
812
813 def rev(self, node):
813 def rev(self, node):
814 try:
814 try:
815 return self._nodecache[node]
815 return self._nodecache[node]
816 except TypeError:
816 except TypeError:
817 raise
817 raise
818 except RevlogError:
818 except RevlogError:
819 # parsers.c radix tree lookup failed
819 # parsers.c radix tree lookup failed
820 if node == wdirid or node in wdirfilenodeids:
820 if node == wdirid or node in wdirfilenodeids:
821 raise error.WdirUnsupported
821 raise error.WdirUnsupported
822 raise LookupError(node, self.indexfile, _('no node'))
822 raise LookupError(node, self.indexfile, _('no node'))
823 except KeyError:
823 except KeyError:
824 # pure python cache lookup failed
824 # pure python cache lookup failed
825 n = self._nodecache
825 n = self._nodecache
826 i = self.index
826 i = self.index
827 p = self._nodepos
827 p = self._nodepos
828 if p is None:
828 if p is None:
829 p = len(i) - 2
829 p = len(i) - 2
830 else:
830 else:
831 assert p < len(i)
831 assert p < len(i)
832 for r in xrange(p, -1, -1):
832 for r in xrange(p, -1, -1):
833 v = i[r][7]
833 v = i[r][7]
834 n[v] = r
834 n[v] = r
835 if v == node:
835 if v == node:
836 self._nodepos = r - 1
836 self._nodepos = r - 1
837 return r
837 return r
838 if node == wdirid or node in wdirfilenodeids:
838 if node == wdirid or node in wdirfilenodeids:
839 raise error.WdirUnsupported
839 raise error.WdirUnsupported
840 raise LookupError(node, self.indexfile, _('no node'))
840 raise LookupError(node, self.indexfile, _('no node'))
841
841
842 # Accessors for index entries.
842 # Accessors for index entries.
843
843
844 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
844 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
845 # are flags.
845 # are flags.
846 def start(self, rev):
846 def start(self, rev):
847 return int(self.index[rev][0] >> 16)
847 return int(self.index[rev][0] >> 16)
848
848
849 def flags(self, rev):
849 def flags(self, rev):
850 return self.index[rev][0] & 0xFFFF
850 return self.index[rev][0] & 0xFFFF
851
851
852 def length(self, rev):
852 def length(self, rev):
853 return self.index[rev][1]
853 return self.index[rev][1]
854
854
855 def rawsize(self, rev):
855 def rawsize(self, rev):
856 """return the length of the uncompressed text for a given revision"""
856 """return the length of the uncompressed text for a given revision"""
857 l = self.index[rev][2]
857 l = self.index[rev][2]
858 if l >= 0:
858 if l >= 0:
859 return l
859 return l
860
860
861 t = self.revision(rev, raw=True)
861 t = self.revision(rev, raw=True)
862 return len(t)
862 return len(t)
863
863
864 def size(self, rev):
864 def size(self, rev):
865 """length of non-raw text (processed by a "read" flag processor)"""
865 """length of non-raw text (processed by a "read" flag processor)"""
866 # fast path: if no "read" flag processor could change the content,
866 # fast path: if no "read" flag processor could change the content,
867 # size is rawsize. note: ELLIPSIS is known to not change the content.
867 # size is rawsize. note: ELLIPSIS is known to not change the content.
868 flags = self.flags(rev)
868 flags = self.flags(rev)
869 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
869 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
870 return self.rawsize(rev)
870 return self.rawsize(rev)
871
871
872 return len(self.revision(rev, raw=False))
872 return len(self.revision(rev, raw=False))
873
873
874 def chainbase(self, rev):
874 def chainbase(self, rev):
875 base = self._chainbasecache.get(rev)
875 base = self._chainbasecache.get(rev)
876 if base is not None:
876 if base is not None:
877 return base
877 return base
878
878
879 index = self.index
879 index = self.index
880 iterrev = rev
880 iterrev = rev
881 base = index[iterrev][3]
881 base = index[iterrev][3]
882 while base != iterrev:
882 while base != iterrev:
883 iterrev = base
883 iterrev = base
884 base = index[iterrev][3]
884 base = index[iterrev][3]
885
885
886 self._chainbasecache[rev] = base
886 self._chainbasecache[rev] = base
887 return base
887 return base
888
888
889 def linkrev(self, rev):
889 def linkrev(self, rev):
890 return self.index[rev][4]
890 return self.index[rev][4]
891
891
892 def parentrevs(self, rev):
892 def parentrevs(self, rev):
893 try:
893 try:
894 entry = self.index[rev]
894 entry = self.index[rev]
895 except IndexError:
895 except IndexError:
896 if rev == wdirrev:
896 if rev == wdirrev:
897 raise error.WdirUnsupported
897 raise error.WdirUnsupported
898 raise
898 raise
899
899
900 return entry[5], entry[6]
900 return entry[5], entry[6]
901
901
902 def node(self, rev):
902 def node(self, rev):
903 try:
903 try:
904 return self.index[rev][7]
904 return self.index[rev][7]
905 except IndexError:
905 except IndexError:
906 if rev == wdirrev:
906 if rev == wdirrev:
907 raise error.WdirUnsupported
907 raise error.WdirUnsupported
908 raise
908 raise
909
909
910 # Derived from index values.
910 # Derived from index values.
911
911
912 def end(self, rev):
912 def end(self, rev):
913 return self.start(rev) + self.length(rev)
913 return self.start(rev) + self.length(rev)
914
914
915 def parents(self, node):
915 def parents(self, node):
916 i = self.index
916 i = self.index
917 d = i[self.rev(node)]
917 d = i[self.rev(node)]
918 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
918 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
919
919
920 def chainlen(self, rev):
920 def chainlen(self, rev):
921 return self._chaininfo(rev)[0]
921 return self._chaininfo(rev)[0]
922
922
923 def _chaininfo(self, rev):
923 def _chaininfo(self, rev):
924 chaininfocache = self._chaininfocache
924 chaininfocache = self._chaininfocache
925 if rev in chaininfocache:
925 if rev in chaininfocache:
926 return chaininfocache[rev]
926 return chaininfocache[rev]
927 index = self.index
927 index = self.index
928 generaldelta = self._generaldelta
928 generaldelta = self._generaldelta
929 iterrev = rev
929 iterrev = rev
930 e = index[iterrev]
930 e = index[iterrev]
931 clen = 0
931 clen = 0
932 compresseddeltalen = 0
932 compresseddeltalen = 0
933 while iterrev != e[3]:
933 while iterrev != e[3]:
934 clen += 1
934 clen += 1
935 compresseddeltalen += e[1]
935 compresseddeltalen += e[1]
936 if generaldelta:
936 if generaldelta:
937 iterrev = e[3]
937 iterrev = e[3]
938 else:
938 else:
939 iterrev -= 1
939 iterrev -= 1
940 if iterrev in chaininfocache:
940 if iterrev in chaininfocache:
941 t = chaininfocache[iterrev]
941 t = chaininfocache[iterrev]
942 clen += t[0]
942 clen += t[0]
943 compresseddeltalen += t[1]
943 compresseddeltalen += t[1]
944 break
944 break
945 e = index[iterrev]
945 e = index[iterrev]
946 else:
946 else:
947 # Add text length of base since decompressing that also takes
947 # Add text length of base since decompressing that also takes
948 # work. For cache hits the length is already included.
948 # work. For cache hits the length is already included.
949 compresseddeltalen += e[1]
949 compresseddeltalen += e[1]
950 r = (clen, compresseddeltalen)
950 r = (clen, compresseddeltalen)
951 chaininfocache[rev] = r
951 chaininfocache[rev] = r
952 return r
952 return r
953
953
954 def _deltachain(self, rev, stoprev=None):
954 def _deltachain(self, rev, stoprev=None):
955 """Obtain the delta chain for a revision.
955 """Obtain the delta chain for a revision.
956
956
957 ``stoprev`` specifies a revision to stop at. If not specified, we
957 ``stoprev`` specifies a revision to stop at. If not specified, we
958 stop at the base of the chain.
958 stop at the base of the chain.
959
959
960 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
960 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
961 revs in ascending order and ``stopped`` is a bool indicating whether
961 revs in ascending order and ``stopped`` is a bool indicating whether
962 ``stoprev`` was hit.
962 ``stoprev`` was hit.
963 """
963 """
964 # Try C implementation.
964 # Try C implementation.
965 try:
965 try:
966 return self.index.deltachain(rev, stoprev, self._generaldelta)
966 return self.index.deltachain(rev, stoprev, self._generaldelta)
967 except AttributeError:
967 except AttributeError:
968 pass
968 pass
969
969
970 chain = []
970 chain = []
971
971
972 # Alias to prevent attribute lookup in tight loop.
972 # Alias to prevent attribute lookup in tight loop.
973 index = self.index
973 index = self.index
974 generaldelta = self._generaldelta
974 generaldelta = self._generaldelta
975
975
976 iterrev = rev
976 iterrev = rev
977 e = index[iterrev]
977 e = index[iterrev]
978 while iterrev != e[3] and iterrev != stoprev:
978 while iterrev != e[3] and iterrev != stoprev:
979 chain.append(iterrev)
979 chain.append(iterrev)
980 if generaldelta:
980 if generaldelta:
981 iterrev = e[3]
981 iterrev = e[3]
982 else:
982 else:
983 iterrev -= 1
983 iterrev -= 1
984 e = index[iterrev]
984 e = index[iterrev]
985
985
986 if iterrev == stoprev:
986 if iterrev == stoprev:
987 stopped = True
987 stopped = True
988 else:
988 else:
989 chain.append(iterrev)
989 chain.append(iterrev)
990 stopped = False
990 stopped = False
991
991
992 chain.reverse()
992 chain.reverse()
993 return chain, stopped
993 return chain, stopped
994
994
995 def ancestors(self, revs, stoprev=0, inclusive=False):
995 def ancestors(self, revs, stoprev=0, inclusive=False):
996 """Generate the ancestors of 'revs' in reverse topological order.
996 """Generate the ancestors of 'revs' in reverse topological order.
997 Does not generate revs lower than stoprev.
997 Does not generate revs lower than stoprev.
998
998
999 See the documentation for ancestor.lazyancestors for more details."""
999 See the documentation for ancestor.lazyancestors for more details."""
1000
1000
1001 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1001 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1002 inclusive=inclusive)
1002 inclusive=inclusive)
1003
1003
1004 def descendants(self, revs):
1004 def descendants(self, revs):
1005 """Generate the descendants of 'revs' in revision order.
1005 """Generate the descendants of 'revs' in revision order.
1006
1006
1007 Yield a sequence of revision numbers starting with a child of
1007 Yield a sequence of revision numbers starting with a child of
1008 some rev in revs, i.e., each revision is *not* considered a
1008 some rev in revs, i.e., each revision is *not* considered a
1009 descendant of itself. Results are ordered by revision number (a
1009 descendant of itself. Results are ordered by revision number (a
1010 topological sort)."""
1010 topological sort)."""
1011 first = min(revs)
1011 first = min(revs)
1012 if first == nullrev:
1012 if first == nullrev:
1013 for i in self:
1013 for i in self:
1014 yield i
1014 yield i
1015 return
1015 return
1016
1016
1017 seen = set(revs)
1017 seen = set(revs)
1018 for i in self.revs(start=first + 1):
1018 for i in self.revs(start=first + 1):
1019 for x in self.parentrevs(i):
1019 for x in self.parentrevs(i):
1020 if x != nullrev and x in seen:
1020 if x != nullrev and x in seen:
1021 seen.add(i)
1021 seen.add(i)
1022 yield i
1022 yield i
1023 break
1023 break
1024
1024
1025 def findcommonmissing(self, common=None, heads=None):
1025 def findcommonmissing(self, common=None, heads=None):
1026 """Return a tuple of the ancestors of common and the ancestors of heads
1026 """Return a tuple of the ancestors of common and the ancestors of heads
1027 that are not ancestors of common. In revset terminology, we return the
1027 that are not ancestors of common. In revset terminology, we return the
1028 tuple:
1028 tuple:
1029
1029
1030 ::common, (::heads) - (::common)
1030 ::common, (::heads) - (::common)
1031
1031
1032 The list is sorted by revision number, meaning it is
1032 The list is sorted by revision number, meaning it is
1033 topologically sorted.
1033 topologically sorted.
1034
1034
1035 'heads' and 'common' are both lists of node IDs. If heads is
1035 'heads' and 'common' are both lists of node IDs. If heads is
1036 not supplied, uses all of the revlog's heads. If common is not
1036 not supplied, uses all of the revlog's heads. If common is not
1037 supplied, uses nullid."""
1037 supplied, uses nullid."""
1038 if common is None:
1038 if common is None:
1039 common = [nullid]
1039 common = [nullid]
1040 if heads is None:
1040 if heads is None:
1041 heads = self.heads()
1041 heads = self.heads()
1042
1042
1043 common = [self.rev(n) for n in common]
1043 common = [self.rev(n) for n in common]
1044 heads = [self.rev(n) for n in heads]
1044 heads = [self.rev(n) for n in heads]
1045
1045
1046 # we want the ancestors, but inclusive
1046 # we want the ancestors, but inclusive
1047 class lazyset(object):
1047 class lazyset(object):
1048 def __init__(self, lazyvalues):
1048 def __init__(self, lazyvalues):
1049 self.addedvalues = set()
1049 self.addedvalues = set()
1050 self.lazyvalues = lazyvalues
1050 self.lazyvalues = lazyvalues
1051
1051
1052 def __contains__(self, value):
1052 def __contains__(self, value):
1053 return value in self.addedvalues or value in self.lazyvalues
1053 return value in self.addedvalues or value in self.lazyvalues
1054
1054
1055 def __iter__(self):
1055 def __iter__(self):
1056 added = self.addedvalues
1056 added = self.addedvalues
1057 for r in added:
1057 for r in added:
1058 yield r
1058 yield r
1059 for r in self.lazyvalues:
1059 for r in self.lazyvalues:
1060 if not r in added:
1060 if not r in added:
1061 yield r
1061 yield r
1062
1062
1063 def add(self, value):
1063 def add(self, value):
1064 self.addedvalues.add(value)
1064 self.addedvalues.add(value)
1065
1065
1066 def update(self, values):
1066 def update(self, values):
1067 self.addedvalues.update(values)
1067 self.addedvalues.update(values)
1068
1068
1069 has = lazyset(self.ancestors(common))
1069 has = lazyset(self.ancestors(common))
1070 has.add(nullrev)
1070 has.add(nullrev)
1071 has.update(common)
1071 has.update(common)
1072
1072
1073 # take all ancestors from heads that aren't in has
1073 # take all ancestors from heads that aren't in has
1074 missing = set()
1074 missing = set()
1075 visit = collections.deque(r for r in heads if r not in has)
1075 visit = collections.deque(r for r in heads if r not in has)
1076 while visit:
1076 while visit:
1077 r = visit.popleft()
1077 r = visit.popleft()
1078 if r in missing:
1078 if r in missing:
1079 continue
1079 continue
1080 else:
1080 else:
1081 missing.add(r)
1081 missing.add(r)
1082 for p in self.parentrevs(r):
1082 for p in self.parentrevs(r):
1083 if p not in has:
1083 if p not in has:
1084 visit.append(p)
1084 visit.append(p)
1085 missing = list(missing)
1085 missing = list(missing)
1086 missing.sort()
1086 missing.sort()
1087 return has, [self.node(miss) for miss in missing]
1087 return has, [self.node(miss) for miss in missing]
1088
1088
1089 def incrementalmissingrevs(self, common=None):
1089 def incrementalmissingrevs(self, common=None):
1090 """Return an object that can be used to incrementally compute the
1090 """Return an object that can be used to incrementally compute the
1091 revision numbers of the ancestors of arbitrary sets that are not
1091 revision numbers of the ancestors of arbitrary sets that are not
1092 ancestors of common. This is an ancestor.incrementalmissingancestors
1092 ancestors of common. This is an ancestor.incrementalmissingancestors
1093 object.
1093 object.
1094
1094
1095 'common' is a list of revision numbers. If common is not supplied, uses
1095 'common' is a list of revision numbers. If common is not supplied, uses
1096 nullrev.
1096 nullrev.
1097 """
1097 """
1098 if common is None:
1098 if common is None:
1099 common = [nullrev]
1099 common = [nullrev]
1100
1100
1101 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1101 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1102
1102
1103 def findmissingrevs(self, common=None, heads=None):
1103 def findmissingrevs(self, common=None, heads=None):
1104 """Return the revision numbers of the ancestors of heads that
1104 """Return the revision numbers of the ancestors of heads that
1105 are not ancestors of common.
1105 are not ancestors of common.
1106
1106
1107 More specifically, return a list of revision numbers corresponding to
1107 More specifically, return a list of revision numbers corresponding to
1108 nodes N such that every N satisfies the following constraints:
1108 nodes N such that every N satisfies the following constraints:
1109
1109
1110 1. N is an ancestor of some node in 'heads'
1110 1. N is an ancestor of some node in 'heads'
1111 2. N is not an ancestor of any node in 'common'
1111 2. N is not an ancestor of any node in 'common'
1112
1112
1113 The list is sorted by revision number, meaning it is
1113 The list is sorted by revision number, meaning it is
1114 topologically sorted.
1114 topologically sorted.
1115
1115
1116 'heads' and 'common' are both lists of revision numbers. If heads is
1116 'heads' and 'common' are both lists of revision numbers. If heads is
1117 not supplied, uses all of the revlog's heads. If common is not
1117 not supplied, uses all of the revlog's heads. If common is not
1118 supplied, uses nullid."""
1118 supplied, uses nullid."""
1119 if common is None:
1119 if common is None:
1120 common = [nullrev]
1120 common = [nullrev]
1121 if heads is None:
1121 if heads is None:
1122 heads = self.headrevs()
1122 heads = self.headrevs()
1123
1123
1124 inc = self.incrementalmissingrevs(common=common)
1124 inc = self.incrementalmissingrevs(common=common)
1125 return inc.missingancestors(heads)
1125 return inc.missingancestors(heads)
1126
1126
1127 def findmissing(self, common=None, heads=None):
1127 def findmissing(self, common=None, heads=None):
1128 """Return the ancestors of heads that are not ancestors of common.
1128 """Return the ancestors of heads that are not ancestors of common.
1129
1129
1130 More specifically, return a list of nodes N such that every N
1130 More specifically, return a list of nodes N such that every N
1131 satisfies the following constraints:
1131 satisfies the following constraints:
1132
1132
1133 1. N is an ancestor of some node in 'heads'
1133 1. N is an ancestor of some node in 'heads'
1134 2. N is not an ancestor of any node in 'common'
1134 2. N is not an ancestor of any node in 'common'
1135
1135
1136 The list is sorted by revision number, meaning it is
1136 The list is sorted by revision number, meaning it is
1137 topologically sorted.
1137 topologically sorted.
1138
1138
1139 'heads' and 'common' are both lists of node IDs. If heads is
1139 'heads' and 'common' are both lists of node IDs. If heads is
1140 not supplied, uses all of the revlog's heads. If common is not
1140 not supplied, uses all of the revlog's heads. If common is not
1141 supplied, uses nullid."""
1141 supplied, uses nullid."""
1142 if common is None:
1142 if common is None:
1143 common = [nullid]
1143 common = [nullid]
1144 if heads is None:
1144 if heads is None:
1145 heads = self.heads()
1145 heads = self.heads()
1146
1146
1147 common = [self.rev(n) for n in common]
1147 common = [self.rev(n) for n in common]
1148 heads = [self.rev(n) for n in heads]
1148 heads = [self.rev(n) for n in heads]
1149
1149
1150 inc = self.incrementalmissingrevs(common=common)
1150 inc = self.incrementalmissingrevs(common=common)
1151 return [self.node(r) for r in inc.missingancestors(heads)]
1151 return [self.node(r) for r in inc.missingancestors(heads)]
1152
1152
1153 def nodesbetween(self, roots=None, heads=None):
1153 def nodesbetween(self, roots=None, heads=None):
1154 """Return a topological path from 'roots' to 'heads'.
1154 """Return a topological path from 'roots' to 'heads'.
1155
1155
1156 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1156 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1157 topologically sorted list of all nodes N that satisfy both of
1157 topologically sorted list of all nodes N that satisfy both of
1158 these constraints:
1158 these constraints:
1159
1159
1160 1. N is a descendant of some node in 'roots'
1160 1. N is a descendant of some node in 'roots'
1161 2. N is an ancestor of some node in 'heads'
1161 2. N is an ancestor of some node in 'heads'
1162
1162
1163 Every node is considered to be both a descendant and an ancestor
1163 Every node is considered to be both a descendant and an ancestor
1164 of itself, so every reachable node in 'roots' and 'heads' will be
1164 of itself, so every reachable node in 'roots' and 'heads' will be
1165 included in 'nodes'.
1165 included in 'nodes'.
1166
1166
1167 'outroots' is the list of reachable nodes in 'roots', i.e., the
1167 'outroots' is the list of reachable nodes in 'roots', i.e., the
1168 subset of 'roots' that is returned in 'nodes'. Likewise,
1168 subset of 'roots' that is returned in 'nodes'. Likewise,
1169 'outheads' is the subset of 'heads' that is also in 'nodes'.
1169 'outheads' is the subset of 'heads' that is also in 'nodes'.
1170
1170
1171 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1171 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1172 unspecified, uses nullid as the only root. If 'heads' is
1172 unspecified, uses nullid as the only root. If 'heads' is
1173 unspecified, uses list of all of the revlog's heads."""
1173 unspecified, uses list of all of the revlog's heads."""
1174 nonodes = ([], [], [])
1174 nonodes = ([], [], [])
1175 if roots is not None:
1175 if roots is not None:
1176 roots = list(roots)
1176 roots = list(roots)
1177 if not roots:
1177 if not roots:
1178 return nonodes
1178 return nonodes
1179 lowestrev = min([self.rev(n) for n in roots])
1179 lowestrev = min([self.rev(n) for n in roots])
1180 else:
1180 else:
1181 roots = [nullid] # Everybody's a descendant of nullid
1181 roots = [nullid] # Everybody's a descendant of nullid
1182 lowestrev = nullrev
1182 lowestrev = nullrev
1183 if (lowestrev == nullrev) and (heads is None):
1183 if (lowestrev == nullrev) and (heads is None):
1184 # We want _all_ the nodes!
1184 # We want _all_ the nodes!
1185 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1185 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1186 if heads is None:
1186 if heads is None:
1187 # All nodes are ancestors, so the latest ancestor is the last
1187 # All nodes are ancestors, so the latest ancestor is the last
1188 # node.
1188 # node.
1189 highestrev = len(self) - 1
1189 highestrev = len(self) - 1
1190 # Set ancestors to None to signal that every node is an ancestor.
1190 # Set ancestors to None to signal that every node is an ancestor.
1191 ancestors = None
1191 ancestors = None
1192 # Set heads to an empty dictionary for later discovery of heads
1192 # Set heads to an empty dictionary for later discovery of heads
1193 heads = {}
1193 heads = {}
1194 else:
1194 else:
1195 heads = list(heads)
1195 heads = list(heads)
1196 if not heads:
1196 if not heads:
1197 return nonodes
1197 return nonodes
1198 ancestors = set()
1198 ancestors = set()
1199 # Turn heads into a dictionary so we can remove 'fake' heads.
1199 # Turn heads into a dictionary so we can remove 'fake' heads.
1200 # Also, later we will be using it to filter out the heads we can't
1200 # Also, later we will be using it to filter out the heads we can't
1201 # find from roots.
1201 # find from roots.
1202 heads = dict.fromkeys(heads, False)
1202 heads = dict.fromkeys(heads, False)
1203 # Start at the top and keep marking parents until we're done.
1203 # Start at the top and keep marking parents until we're done.
1204 nodestotag = set(heads)
1204 nodestotag = set(heads)
1205 # Remember where the top was so we can use it as a limit later.
1205 # Remember where the top was so we can use it as a limit later.
1206 highestrev = max([self.rev(n) for n in nodestotag])
1206 highestrev = max([self.rev(n) for n in nodestotag])
1207 while nodestotag:
1207 while nodestotag:
1208 # grab a node to tag
1208 # grab a node to tag
1209 n = nodestotag.pop()
1209 n = nodestotag.pop()
1210 # Never tag nullid
1210 # Never tag nullid
1211 if n == nullid:
1211 if n == nullid:
1212 continue
1212 continue
1213 # A node's revision number represents its place in a
1213 # A node's revision number represents its place in a
1214 # topologically sorted list of nodes.
1214 # topologically sorted list of nodes.
1215 r = self.rev(n)
1215 r = self.rev(n)
1216 if r >= lowestrev:
1216 if r >= lowestrev:
1217 if n not in ancestors:
1217 if n not in ancestors:
1218 # If we are possibly a descendant of one of the roots
1218 # If we are possibly a descendant of one of the roots
1219 # and we haven't already been marked as an ancestor
1219 # and we haven't already been marked as an ancestor
1220 ancestors.add(n) # Mark as ancestor
1220 ancestors.add(n) # Mark as ancestor
1221 # Add non-nullid parents to list of nodes to tag.
1221 # Add non-nullid parents to list of nodes to tag.
1222 nodestotag.update([p for p in self.parents(n) if
1222 nodestotag.update([p for p in self.parents(n) if
1223 p != nullid])
1223 p != nullid])
1224 elif n in heads: # We've seen it before, is it a fake head?
1224 elif n in heads: # We've seen it before, is it a fake head?
1225 # So it is, real heads should not be the ancestors of
1225 # So it is, real heads should not be the ancestors of
1226 # any other heads.
1226 # any other heads.
1227 heads.pop(n)
1227 heads.pop(n)
1228 if not ancestors:
1228 if not ancestors:
1229 return nonodes
1229 return nonodes
1230 # Now that we have our set of ancestors, we want to remove any
1230 # Now that we have our set of ancestors, we want to remove any
1231 # roots that are not ancestors.
1231 # roots that are not ancestors.
1232
1232
1233 # If one of the roots was nullid, everything is included anyway.
1233 # If one of the roots was nullid, everything is included anyway.
1234 if lowestrev > nullrev:
1234 if lowestrev > nullrev:
1235 # But, since we weren't, let's recompute the lowest rev to not
1235 # But, since we weren't, let's recompute the lowest rev to not
1236 # include roots that aren't ancestors.
1236 # include roots that aren't ancestors.
1237
1237
1238 # Filter out roots that aren't ancestors of heads
1238 # Filter out roots that aren't ancestors of heads
1239 roots = [root for root in roots if root in ancestors]
1239 roots = [root for root in roots if root in ancestors]
1240 # Recompute the lowest revision
1240 # Recompute the lowest revision
1241 if roots:
1241 if roots:
1242 lowestrev = min([self.rev(root) for root in roots])
1242 lowestrev = min([self.rev(root) for root in roots])
1243 else:
1243 else:
1244 # No more roots? Return empty list
1244 # No more roots? Return empty list
1245 return nonodes
1245 return nonodes
1246 else:
1246 else:
1247 # We are descending from nullid, and don't need to care about
1247 # We are descending from nullid, and don't need to care about
1248 # any other roots.
1248 # any other roots.
1249 lowestrev = nullrev
1249 lowestrev = nullrev
1250 roots = [nullid]
1250 roots = [nullid]
1251 # Transform our roots list into a set.
1251 # Transform our roots list into a set.
1252 descendants = set(roots)
1252 descendants = set(roots)
1253 # Also, keep the original roots so we can filter out roots that aren't
1253 # Also, keep the original roots so we can filter out roots that aren't
1254 # 'real' roots (i.e. are descended from other roots).
1254 # 'real' roots (i.e. are descended from other roots).
1255 roots = descendants.copy()
1255 roots = descendants.copy()
1256 # Our topologically sorted list of output nodes.
1256 # Our topologically sorted list of output nodes.
1257 orderedout = []
1257 orderedout = []
1258 # Don't start at nullid since we don't want nullid in our output list,
1258 # Don't start at nullid since we don't want nullid in our output list,
1259 # and if nullid shows up in descendants, empty parents will look like
1259 # and if nullid shows up in descendants, empty parents will look like
1260 # they're descendants.
1260 # they're descendants.
1261 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1261 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1262 n = self.node(r)
1262 n = self.node(r)
1263 isdescendant = False
1263 isdescendant = False
1264 if lowestrev == nullrev: # Everybody is a descendant of nullid
1264 if lowestrev == nullrev: # Everybody is a descendant of nullid
1265 isdescendant = True
1265 isdescendant = True
1266 elif n in descendants:
1266 elif n in descendants:
1267 # n is already a descendant
1267 # n is already a descendant
1268 isdescendant = True
1268 isdescendant = True
1269 # This check only needs to be done here because all the roots
1269 # This check only needs to be done here because all the roots
1270 # will start being marked is descendants before the loop.
1270 # will start being marked is descendants before the loop.
1271 if n in roots:
1271 if n in roots:
1272 # If n was a root, check if it's a 'real' root.
1272 # If n was a root, check if it's a 'real' root.
1273 p = tuple(self.parents(n))
1273 p = tuple(self.parents(n))
1274 # If any of its parents are descendants, it's not a root.
1274 # If any of its parents are descendants, it's not a root.
1275 if (p[0] in descendants) or (p[1] in descendants):
1275 if (p[0] in descendants) or (p[1] in descendants):
1276 roots.remove(n)
1276 roots.remove(n)
1277 else:
1277 else:
1278 p = tuple(self.parents(n))
1278 p = tuple(self.parents(n))
1279 # A node is a descendant if either of its parents are
1279 # A node is a descendant if either of its parents are
1280 # descendants. (We seeded the dependents list with the roots
1280 # descendants. (We seeded the dependents list with the roots
1281 # up there, remember?)
1281 # up there, remember?)
1282 if (p[0] in descendants) or (p[1] in descendants):
1282 if (p[0] in descendants) or (p[1] in descendants):
1283 descendants.add(n)
1283 descendants.add(n)
1284 isdescendant = True
1284 isdescendant = True
1285 if isdescendant and ((ancestors is None) or (n in ancestors)):
1285 if isdescendant and ((ancestors is None) or (n in ancestors)):
1286 # Only include nodes that are both descendants and ancestors.
1286 # Only include nodes that are both descendants and ancestors.
1287 orderedout.append(n)
1287 orderedout.append(n)
1288 if (ancestors is not None) and (n in heads):
1288 if (ancestors is not None) and (n in heads):
1289 # We're trying to figure out which heads are reachable
1289 # We're trying to figure out which heads are reachable
1290 # from roots.
1290 # from roots.
1291 # Mark this head as having been reached
1291 # Mark this head as having been reached
1292 heads[n] = True
1292 heads[n] = True
1293 elif ancestors is None:
1293 elif ancestors is None:
1294 # Otherwise, we're trying to discover the heads.
1294 # Otherwise, we're trying to discover the heads.
1295 # Assume this is a head because if it isn't, the next step
1295 # Assume this is a head because if it isn't, the next step
1296 # will eventually remove it.
1296 # will eventually remove it.
1297 heads[n] = True
1297 heads[n] = True
1298 # But, obviously its parents aren't.
1298 # But, obviously its parents aren't.
1299 for p in self.parents(n):
1299 for p in self.parents(n):
1300 heads.pop(p, None)
1300 heads.pop(p, None)
1301 heads = [head for head, flag in heads.iteritems() if flag]
1301 heads = [head for head, flag in heads.iteritems() if flag]
1302 roots = list(roots)
1302 roots = list(roots)
1303 assert orderedout
1303 assert orderedout
1304 assert roots
1304 assert roots
1305 assert heads
1305 assert heads
1306 return (orderedout, roots, heads)
1306 return (orderedout, roots, heads)
1307
1307
1308 def headrevs(self):
1308 def headrevs(self):
1309 try:
1309 try:
1310 return self.index.headrevs()
1310 return self.index.headrevs()
1311 except AttributeError:
1311 except AttributeError:
1312 return self._headrevs()
1312 return self._headrevs()
1313
1313
1314 def computephases(self, roots):
1314 def computephases(self, roots):
1315 return self.index.computephasesmapsets(roots)
1315 return self.index.computephasesmapsets(roots)
1316
1316
1317 def _headrevs(self):
1317 def _headrevs(self):
1318 count = len(self)
1318 count = len(self)
1319 if not count:
1319 if not count:
1320 return [nullrev]
1320 return [nullrev]
1321 # we won't iter over filtered rev so nobody is a head at start
1321 # we won't iter over filtered rev so nobody is a head at start
1322 ishead = [0] * (count + 1)
1322 ishead = [0] * (count + 1)
1323 index = self.index
1323 index = self.index
1324 for r in self:
1324 for r in self:
1325 ishead[r] = 1 # I may be an head
1325 ishead[r] = 1 # I may be an head
1326 e = index[r]
1326 e = index[r]
1327 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1327 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1328 return [r for r, val in enumerate(ishead) if val]
1328 return [r for r, val in enumerate(ishead) if val]
1329
1329
1330 def heads(self, start=None, stop=None):
1330 def heads(self, start=None, stop=None):
1331 """return the list of all nodes that have no children
1331 """return the list of all nodes that have no children
1332
1332
1333 if start is specified, only heads that are descendants of
1333 if start is specified, only heads that are descendants of
1334 start will be returned
1334 start will be returned
1335 if stop is specified, it will consider all the revs from stop
1335 if stop is specified, it will consider all the revs from stop
1336 as if they had no children
1336 as if they had no children
1337 """
1337 """
1338 if start is None and stop is None:
1338 if start is None and stop is None:
1339 if not len(self):
1339 if not len(self):
1340 return [nullid]
1340 return [nullid]
1341 return [self.node(r) for r in self.headrevs()]
1341 return [self.node(r) for r in self.headrevs()]
1342
1342
1343 if start is None:
1343 if start is None:
1344 start = nullid
1344 start = nullid
1345 if stop is None:
1345 if stop is None:
1346 stop = []
1346 stop = []
1347 stoprevs = set([self.rev(n) for n in stop])
1347 stoprevs = set([self.rev(n) for n in stop])
1348 startrev = self.rev(start)
1348 startrev = self.rev(start)
1349 reachable = {startrev}
1349 reachable = {startrev}
1350 heads = {startrev}
1350 heads = {startrev}
1351
1351
1352 parentrevs = self.parentrevs
1352 parentrevs = self.parentrevs
1353 for r in self.revs(start=startrev + 1):
1353 for r in self.revs(start=startrev + 1):
1354 for p in parentrevs(r):
1354 for p in parentrevs(r):
1355 if p in reachable:
1355 if p in reachable:
1356 if r not in stoprevs:
1356 if r not in stoprevs:
1357 reachable.add(r)
1357 reachable.add(r)
1358 heads.add(r)
1358 heads.add(r)
1359 if p in heads and p not in stoprevs:
1359 if p in heads and p not in stoprevs:
1360 heads.remove(p)
1360 heads.remove(p)
1361
1361
1362 return [self.node(r) for r in heads]
1362 return [self.node(r) for r in heads]
1363
1363
1364 def children(self, node):
1364 def children(self, node):
1365 """find the children of a given node"""
1365 """find the children of a given node"""
1366 c = []
1366 c = []
1367 p = self.rev(node)
1367 p = self.rev(node)
1368 for r in self.revs(start=p + 1):
1368 for r in self.revs(start=p + 1):
1369 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1369 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1370 if prevs:
1370 if prevs:
1371 for pr in prevs:
1371 for pr in prevs:
1372 if pr == p:
1372 if pr == p:
1373 c.append(self.node(r))
1373 c.append(self.node(r))
1374 elif p == nullrev:
1374 elif p == nullrev:
1375 c.append(self.node(r))
1375 c.append(self.node(r))
1376 return c
1376 return c
1377
1377
1378 def descendant(self, start, end):
1378 def descendant(self, start, end):
1379 """True if revision 'end' is an descendant of revision 'start'
1379 """True if revision 'end' is an descendant of revision 'start'
1380
1380
1381 A revision is considered as a descendant of itself."""
1381 A revision is considered as a descendant of itself."""
1382 if start == nullrev:
1382 if start == nullrev:
1383 return True
1383 return True
1384 elif start == end:
1384 elif start == end:
1385 return True
1385 return True
1386 return start in self._commonancestorsheads(start, end)
1386 return start in self._commonancestorsheads(start, end)
1387
1387
1388 def commonancestorsheads(self, a, b):
1388 def commonancestorsheads(self, a, b):
1389 """calculate all the heads of the common ancestors of nodes a and b"""
1389 """calculate all the heads of the common ancestors of nodes a and b"""
1390 a, b = self.rev(a), self.rev(b)
1390 a, b = self.rev(a), self.rev(b)
1391 ancs = self._commonancestorsheads(a, b)
1391 ancs = self._commonancestorsheads(a, b)
1392 return pycompat.maplist(self.node, ancs)
1392 return pycompat.maplist(self.node, ancs)
1393
1393
1394 def _commonancestorsheads(self, *revs):
1394 def _commonancestorsheads(self, *revs):
1395 """calculate all the heads of the common ancestors of revs"""
1395 """calculate all the heads of the common ancestors of revs"""
1396 try:
1396 try:
1397 ancs = self.index.commonancestorsheads(*revs)
1397 ancs = self.index.commonancestorsheads(*revs)
1398 except (AttributeError, OverflowError): # C implementation failed
1398 except (AttributeError, OverflowError): # C implementation failed
1399 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1399 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1400 return ancs
1400 return ancs
1401
1401
1402 def isancestor(self, a, b):
1402 def isancestor(self, a, b):
1403 """return True if node a is an ancestor of node b
1403 """return True if node a is an ancestor of node b
1404
1404
1405 The implementation of this is trivial but the use of
1405 The implementation of this is trivial but the use of
1406 commonancestorsheads is not."""
1406 commonancestorsheads is not."""
1407 a, b = self.rev(a), self.rev(b)
1407 a, b = self.rev(a), self.rev(b)
1408 return self.descendant(a, b)
1408 return self.descendant(a, b)
1409
1409
1410 def ancestor(self, a, b):
1410 def ancestor(self, a, b):
1411 """calculate the "best" common ancestor of nodes a and b"""
1411 """calculate the "best" common ancestor of nodes a and b"""
1412
1412
1413 a, b = self.rev(a), self.rev(b)
1413 a, b = self.rev(a), self.rev(b)
1414 try:
1414 try:
1415 ancs = self.index.ancestors(a, b)
1415 ancs = self.index.ancestors(a, b)
1416 except (AttributeError, OverflowError):
1416 except (AttributeError, OverflowError):
1417 ancs = ancestor.ancestors(self.parentrevs, a, b)
1417 ancs = ancestor.ancestors(self.parentrevs, a, b)
1418 if ancs:
1418 if ancs:
1419 # choose a consistent winner when there's a tie
1419 # choose a consistent winner when there's a tie
1420 return min(map(self.node, ancs))
1420 return min(map(self.node, ancs))
1421 return nullid
1421 return nullid
1422
1422
1423 def _match(self, id):
1423 def _match(self, id):
1424 if isinstance(id, int):
1424 if isinstance(id, int):
1425 # rev
1425 # rev
1426 return self.node(id)
1426 return self.node(id)
1427 if len(id) == 20:
1427 if len(id) == 20:
1428 # possibly a binary node
1428 # possibly a binary node
1429 # odds of a binary node being all hex in ASCII are 1 in 10**25
1429 # odds of a binary node being all hex in ASCII are 1 in 10**25
1430 try:
1430 try:
1431 node = id
1431 node = id
1432 self.rev(node) # quick search the index
1432 self.rev(node) # quick search the index
1433 return node
1433 return node
1434 except LookupError:
1434 except LookupError:
1435 pass # may be partial hex id
1435 pass # may be partial hex id
1436 try:
1436 try:
1437 # str(rev)
1437 # str(rev)
1438 rev = int(id)
1438 rev = int(id)
1439 if "%d" % rev != id:
1439 if "%d" % rev != id:
1440 raise ValueError
1440 raise ValueError
1441 if rev < 0:
1441 if rev < 0:
1442 rev = len(self) + rev
1442 rev = len(self) + rev
1443 if rev < 0 or rev >= len(self):
1443 if rev < 0 or rev >= len(self):
1444 raise ValueError
1444 raise ValueError
1445 return self.node(rev)
1445 return self.node(rev)
1446 except (ValueError, OverflowError):
1446 except (ValueError, OverflowError):
1447 pass
1447 pass
1448 if len(id) == 40:
1448 if len(id) == 40:
1449 try:
1449 try:
1450 # a full hex nodeid?
1450 # a full hex nodeid?
1451 node = bin(id)
1451 node = bin(id)
1452 self.rev(node)
1452 self.rev(node)
1453 return node
1453 return node
1454 except (TypeError, LookupError):
1454 except (TypeError, LookupError):
1455 pass
1455 pass
1456
1456
1457 def _partialmatch(self, id):
1457 def _partialmatch(self, id):
1458 # we don't care wdirfilenodeids as they should be always full hash
1458 # we don't care wdirfilenodeids as they should be always full hash
1459 maybewdir = wdirhex.startswith(id)
1459 maybewdir = wdirhex.startswith(id)
1460 try:
1460 try:
1461 partial = self.index.partialmatch(id)
1461 partial = self.index.partialmatch(id)
1462 if partial and self.hasnode(partial):
1462 if partial and self.hasnode(partial):
1463 if maybewdir:
1463 if maybewdir:
1464 # single 'ff...' match in radix tree, ambiguous with wdir
1464 # single 'ff...' match in radix tree, ambiguous with wdir
1465 raise RevlogError
1465 raise RevlogError
1466 return partial
1466 return partial
1467 if maybewdir:
1467 if maybewdir:
1468 # no 'ff...' match in radix tree, wdir identified
1468 # no 'ff...' match in radix tree, wdir identified
1469 raise error.WdirUnsupported
1469 raise error.WdirUnsupported
1470 return None
1470 return None
1471 except RevlogError:
1471 except RevlogError:
1472 # parsers.c radix tree lookup gave multiple matches
1472 # parsers.c radix tree lookup gave multiple matches
1473 # fast path: for unfiltered changelog, radix tree is accurate
1473 # fast path: for unfiltered changelog, radix tree is accurate
1474 if not getattr(self, 'filteredrevs', None):
1474 if not getattr(self, 'filteredrevs', None):
1475 raise LookupError(id, self.indexfile,
1475 raise LookupError(id, self.indexfile,
1476 _('ambiguous identifier'))
1476 _('ambiguous identifier'))
1477 # fall through to slow path that filters hidden revisions
1477 # fall through to slow path that filters hidden revisions
1478 except (AttributeError, ValueError):
1478 except (AttributeError, ValueError):
1479 # we are pure python, or key was too short to search radix tree
1479 # we are pure python, or key was too short to search radix tree
1480 pass
1480 pass
1481
1481
1482 if id in self._pcache:
1482 if id in self._pcache:
1483 return self._pcache[id]
1483 return self._pcache[id]
1484
1484
1485 if len(id) <= 40:
1485 if len(id) <= 40:
1486 try:
1486 try:
1487 # hex(node)[:...]
1487 # hex(node)[:...]
1488 l = len(id) // 2 # grab an even number of digits
1488 l = len(id) // 2 # grab an even number of digits
1489 prefix = bin(id[:l * 2])
1489 prefix = bin(id[:l * 2])
1490 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1490 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1491 nl = [n for n in nl if hex(n).startswith(id) and
1491 nl = [n for n in nl if hex(n).startswith(id) and
1492 self.hasnode(n)]
1492 self.hasnode(n)]
1493 if len(nl) > 0:
1493 if len(nl) > 0:
1494 if len(nl) == 1 and not maybewdir:
1494 if len(nl) == 1 and not maybewdir:
1495 self._pcache[id] = nl[0]
1495 self._pcache[id] = nl[0]
1496 return nl[0]
1496 return nl[0]
1497 raise LookupError(id, self.indexfile,
1497 raise LookupError(id, self.indexfile,
1498 _('ambiguous identifier'))
1498 _('ambiguous identifier'))
1499 if maybewdir:
1499 if maybewdir:
1500 raise error.WdirUnsupported
1500 raise error.WdirUnsupported
1501 return None
1501 return None
1502 except TypeError:
1502 except TypeError:
1503 pass
1503 pass
1504
1504
1505 def lookup(self, id):
1505 def lookup(self, id):
1506 """locate a node based on:
1506 """locate a node based on:
1507 - revision number or str(revision number)
1507 - revision number or str(revision number)
1508 - nodeid or subset of hex nodeid
1508 - nodeid or subset of hex nodeid
1509 """
1509 """
1510 n = self._match(id)
1510 n = self._match(id)
1511 if n is not None:
1511 if n is not None:
1512 return n
1512 return n
1513 n = self._partialmatch(id)
1513 n = self._partialmatch(id)
1514 if n:
1514 if n:
1515 return n
1515 return n
1516
1516
1517 raise LookupError(id, self.indexfile, _('no match found'))
1517 raise LookupError(id, self.indexfile, _('no match found'))
1518
1518
1519 def shortest(self, node, minlength=1):
1519 def shortest(self, node, minlength=1):
1520 """Find the shortest unambiguous prefix that matches node."""
1520 """Find the shortest unambiguous prefix that matches node."""
1521 def isvalid(prefix):
1521 def isvalid(prefix):
1522 try:
1522 try:
1523 node = self._partialmatch(prefix)
1523 node = self._partialmatch(prefix)
1524 except error.RevlogError:
1524 except error.RevlogError:
1525 return False
1525 return False
1526 except error.WdirUnsupported:
1526 except error.WdirUnsupported:
1527 # single 'ff...' match
1527 # single 'ff...' match
1528 return True
1528 return True
1529 if node is None:
1529 if node is None:
1530 raise LookupError(node, self.indexfile, _('no node'))
1530 raise LookupError(node, self.indexfile, _('no node'))
1531 return True
1531 return True
1532
1532
1533 def maybewdir(prefix):
1533 def maybewdir(prefix):
1534 return all(c == 'f' for c in prefix)
1534 return all(c == 'f' for c in prefix)
1535
1535
1536 hexnode = hex(node)
1536 hexnode = hex(node)
1537
1537
1538 def disambiguate(hexnode, minlength):
1538 def disambiguate(hexnode, minlength):
1539 """Disambiguate against wdirid."""
1539 """Disambiguate against wdirid."""
1540 for length in range(minlength, 41):
1540 for length in range(minlength, 41):
1541 prefix = hexnode[:length]
1541 prefix = hexnode[:length]
1542 if not maybewdir(prefix):
1542 if not maybewdir(prefix):
1543 return prefix
1543 return prefix
1544
1544
1545 if not getattr(self, 'filteredrevs', None):
1545 if not getattr(self, 'filteredrevs', None):
1546 try:
1546 try:
1547 length = max(self.index.shortest(node), minlength)
1547 length = max(self.index.shortest(node), minlength)
1548 return disambiguate(hexnode, length)
1548 return disambiguate(hexnode, length)
1549 except RevlogError:
1549 except RevlogError:
1550 if node != wdirid:
1550 if node != wdirid:
1551 raise LookupError(node, self.indexfile, _('no node'))
1551 raise LookupError(node, self.indexfile, _('no node'))
1552 except AttributeError:
1552 except AttributeError:
1553 # Fall through to pure code
1553 # Fall through to pure code
1554 pass
1554 pass
1555
1555
1556 if node == wdirid:
1556 if node == wdirid:
1557 for length in range(minlength, 41):
1557 for length in range(minlength, 41):
1558 prefix = hexnode[:length]
1558 prefix = hexnode[:length]
1559 if isvalid(prefix):
1559 if isvalid(prefix):
1560 return prefix
1560 return prefix
1561
1561
1562 for length in range(minlength, 41):
1562 for length in range(minlength, 41):
1563 prefix = hexnode[:length]
1563 prefix = hexnode[:length]
1564 if isvalid(prefix):
1564 if isvalid(prefix):
1565 return disambiguate(hexnode, length)
1565 return disambiguate(hexnode, length)
1566
1566
1567 def cmp(self, node, text):
1567 def cmp(self, node, text):
1568 """compare text with a given file revision
1568 """compare text with a given file revision
1569
1569
1570 returns True if text is different than what is stored.
1570 returns True if text is different than what is stored.
1571 """
1571 """
1572 p1, p2 = self.parents(node)
1572 p1, p2 = self.parents(node)
1573 return hash(text, p1, p2) != node
1573 return hash(text, p1, p2) != node
1574
1574
1575 def _cachesegment(self, offset, data):
1575 def _cachesegment(self, offset, data):
1576 """Add a segment to the revlog cache.
1576 """Add a segment to the revlog cache.
1577
1577
1578 Accepts an absolute offset and the data that is at that location.
1578 Accepts an absolute offset and the data that is at that location.
1579 """
1579 """
1580 o, d = self._chunkcache
1580 o, d = self._chunkcache
1581 # try to add to existing cache
1581 # try to add to existing cache
1582 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1582 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1583 self._chunkcache = o, d + data
1583 self._chunkcache = o, d + data
1584 else:
1584 else:
1585 self._chunkcache = offset, data
1585 self._chunkcache = offset, data
1586
1586
1587 def _readsegment(self, offset, length, df=None):
1587 def _readsegment(self, offset, length, df=None):
1588 """Load a segment of raw data from the revlog.
1588 """Load a segment of raw data from the revlog.
1589
1589
1590 Accepts an absolute offset, length to read, and an optional existing
1590 Accepts an absolute offset, length to read, and an optional existing
1591 file handle to read from.
1591 file handle to read from.
1592
1592
1593 If an existing file handle is passed, it will be seeked and the
1593 If an existing file handle is passed, it will be seeked and the
1594 original seek position will NOT be restored.
1594 original seek position will NOT be restored.
1595
1595
1596 Returns a str or buffer of raw byte data.
1596 Returns a str or buffer of raw byte data.
1597 """
1597 """
1598 # Cache data both forward and backward around the requested
1598 # Cache data both forward and backward around the requested
1599 # data, in a fixed size window. This helps speed up operations
1599 # data, in a fixed size window. This helps speed up operations
1600 # involving reading the revlog backwards.
1600 # involving reading the revlog backwards.
1601 cachesize = self._chunkcachesize
1601 cachesize = self._chunkcachesize
1602 realoffset = offset & ~(cachesize - 1)
1602 realoffset = offset & ~(cachesize - 1)
1603 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1603 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1604 - realoffset)
1604 - realoffset)
1605 with self._datareadfp(df) as df:
1605 with self._datareadfp(df) as df:
1606 df.seek(realoffset)
1606 df.seek(realoffset)
1607 d = df.read(reallength)
1607 d = df.read(reallength)
1608 self._cachesegment(realoffset, d)
1608 self._cachesegment(realoffset, d)
1609 if offset != realoffset or reallength != length:
1609 if offset != realoffset or reallength != length:
1610 return util.buffer(d, offset - realoffset, length)
1610 return util.buffer(d, offset - realoffset, length)
1611 return d
1611 return d
1612
1612
1613 def _getsegment(self, offset, length, df=None):
1613 def _getsegment(self, offset, length, df=None):
1614 """Obtain a segment of raw data from the revlog.
1614 """Obtain a segment of raw data from the revlog.
1615
1615
1616 Accepts an absolute offset, length of bytes to obtain, and an
1616 Accepts an absolute offset, length of bytes to obtain, and an
1617 optional file handle to the already-opened revlog. If the file
1617 optional file handle to the already-opened revlog. If the file
1618 handle is used, it's original seek position will not be preserved.
1618 handle is used, it's original seek position will not be preserved.
1619
1619
1620 Requests for data may be returned from a cache.
1620 Requests for data may be returned from a cache.
1621
1621
1622 Returns a str or a buffer instance of raw byte data.
1622 Returns a str or a buffer instance of raw byte data.
1623 """
1623 """
1624 o, d = self._chunkcache
1624 o, d = self._chunkcache
1625 l = len(d)
1625 l = len(d)
1626
1626
1627 # is it in the cache?
1627 # is it in the cache?
1628 cachestart = offset - o
1628 cachestart = offset - o
1629 cacheend = cachestart + length
1629 cacheend = cachestart + length
1630 if cachestart >= 0 and cacheend <= l:
1630 if cachestart >= 0 and cacheend <= l:
1631 if cachestart == 0 and cacheend == l:
1631 if cachestart == 0 and cacheend == l:
1632 return d # avoid a copy
1632 return d # avoid a copy
1633 return util.buffer(d, cachestart, cacheend - cachestart)
1633 return util.buffer(d, cachestart, cacheend - cachestart)
1634
1634
1635 return self._readsegment(offset, length, df=df)
1635 return self._readsegment(offset, length, df=df)
1636
1636
1637 def _getsegmentforrevs(self, startrev, endrev, df=None):
1637 def _getsegmentforrevs(self, startrev, endrev, df=None):
1638 """Obtain a segment of raw data corresponding to a range of revisions.
1638 """Obtain a segment of raw data corresponding to a range of revisions.
1639
1639
1640 Accepts the start and end revisions and an optional already-open
1640 Accepts the start and end revisions and an optional already-open
1641 file handle to be used for reading. If the file handle is read, its
1641 file handle to be used for reading. If the file handle is read, its
1642 seek position will not be preserved.
1642 seek position will not be preserved.
1643
1643
1644 Requests for data may be satisfied by a cache.
1644 Requests for data may be satisfied by a cache.
1645
1645
1646 Returns a 2-tuple of (offset, data) for the requested range of
1646 Returns a 2-tuple of (offset, data) for the requested range of
1647 revisions. Offset is the integer offset from the beginning of the
1647 revisions. Offset is the integer offset from the beginning of the
1648 revlog and data is a str or buffer of the raw byte data.
1648 revlog and data is a str or buffer of the raw byte data.
1649
1649
1650 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1650 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1651 to determine where each revision's data begins and ends.
1651 to determine where each revision's data begins and ends.
1652 """
1652 """
1653 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1653 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1654 # (functions are expensive).
1654 # (functions are expensive).
1655 index = self.index
1655 index = self.index
1656 istart = index[startrev]
1656 istart = index[startrev]
1657 start = int(istart[0] >> 16)
1657 start = int(istart[0] >> 16)
1658 if startrev == endrev:
1658 if startrev == endrev:
1659 end = start + istart[1]
1659 end = start + istart[1]
1660 else:
1660 else:
1661 iend = index[endrev]
1661 iend = index[endrev]
1662 end = int(iend[0] >> 16) + iend[1]
1662 end = int(iend[0] >> 16) + iend[1]
1663
1663
1664 if self._inline:
1664 if self._inline:
1665 start += (startrev + 1) * self._io.size
1665 start += (startrev + 1) * self._io.size
1666 end += (endrev + 1) * self._io.size
1666 end += (endrev + 1) * self._io.size
1667 length = end - start
1667 length = end - start
1668
1668
1669 return start, self._getsegment(start, length, df=df)
1669 return start, self._getsegment(start, length, df=df)
1670
1670
1671 def _chunk(self, rev, df=None):
1671 def _chunk(self, rev, df=None):
1672 """Obtain a single decompressed chunk for a revision.
1672 """Obtain a single decompressed chunk for a revision.
1673
1673
1674 Accepts an integer revision and an optional already-open file handle
1674 Accepts an integer revision and an optional already-open file handle
1675 to be used for reading. If used, the seek position of the file will not
1675 to be used for reading. If used, the seek position of the file will not
1676 be preserved.
1676 be preserved.
1677
1677
1678 Returns a str holding uncompressed data for the requested revision.
1678 Returns a str holding uncompressed data for the requested revision.
1679 """
1679 """
1680 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1680 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1681
1681
1682 def _chunks(self, revs, df=None):
1682 def _chunks(self, revs, df=None):
1683 """Obtain decompressed chunks for the specified revisions.
1683 """Obtain decompressed chunks for the specified revisions.
1684
1684
1685 Accepts an iterable of numeric revisions that are assumed to be in
1685 Accepts an iterable of numeric revisions that are assumed to be in
1686 ascending order. Also accepts an optional already-open file handle
1686 ascending order. Also accepts an optional already-open file handle
1687 to be used for reading. If used, the seek position of the file will
1687 to be used for reading. If used, the seek position of the file will
1688 not be preserved.
1688 not be preserved.
1689
1689
1690 This function is similar to calling ``self._chunk()`` multiple times,
1690 This function is similar to calling ``self._chunk()`` multiple times,
1691 but is faster.
1691 but is faster.
1692
1692
1693 Returns a list with decompressed data for each requested revision.
1693 Returns a list with decompressed data for each requested revision.
1694 """
1694 """
1695 if not revs:
1695 if not revs:
1696 return []
1696 return []
1697 start = self.start
1697 start = self.start
1698 length = self.length
1698 length = self.length
1699 inline = self._inline
1699 inline = self._inline
1700 iosize = self._io.size
1700 iosize = self._io.size
1701 buffer = util.buffer
1701 buffer = util.buffer
1702
1702
1703 l = []
1703 l = []
1704 ladd = l.append
1704 ladd = l.append
1705
1705
1706 if not self._withsparseread:
1706 if not self._withsparseread:
1707 slicedchunks = (revs,)
1707 slicedchunks = (revs,)
1708 else:
1708 else:
1709 slicedchunks = _slicechunk(self, revs)
1709 slicedchunks = _slicechunk(self, revs)
1710
1710
1711 for revschunk in slicedchunks:
1711 for revschunk in slicedchunks:
1712 firstrev = revschunk[0]
1712 firstrev = revschunk[0]
1713 # Skip trailing revisions with empty diff
1713 # Skip trailing revisions with empty diff
1714 for lastrev in revschunk[::-1]:
1714 for lastrev in revschunk[::-1]:
1715 if length(lastrev) != 0:
1715 if length(lastrev) != 0:
1716 break
1716 break
1717
1717
1718 try:
1718 try:
1719 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1719 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1720 except OverflowError:
1720 except OverflowError:
1721 # issue4215 - we can't cache a run of chunks greater than
1721 # issue4215 - we can't cache a run of chunks greater than
1722 # 2G on Windows
1722 # 2G on Windows
1723 return [self._chunk(rev, df=df) for rev in revschunk]
1723 return [self._chunk(rev, df=df) for rev in revschunk]
1724
1724
1725 decomp = self.decompress
1725 decomp = self.decompress
1726 for rev in revschunk:
1726 for rev in revschunk:
1727 chunkstart = start(rev)
1727 chunkstart = start(rev)
1728 if inline:
1728 if inline:
1729 chunkstart += (rev + 1) * iosize
1729 chunkstart += (rev + 1) * iosize
1730 chunklength = length(rev)
1730 chunklength = length(rev)
1731 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1731 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1732
1732
1733 return l
1733 return l
1734
1734
1735 def _chunkclear(self):
1735 def _chunkclear(self):
1736 """Clear the raw chunk cache."""
1736 """Clear the raw chunk cache."""
1737 self._chunkcache = (0, '')
1737 self._chunkcache = (0, '')
1738
1738
1739 def deltaparent(self, rev):
1739 def deltaparent(self, rev):
1740 """return deltaparent of the given revision"""
1740 """return deltaparent of the given revision"""
1741 base = self.index[rev][3]
1741 base = self.index[rev][3]
1742 if base == rev:
1742 if base == rev:
1743 return nullrev
1743 return nullrev
1744 elif self._generaldelta:
1744 elif self._generaldelta:
1745 return base
1745 return base
1746 else:
1746 else:
1747 return rev - 1
1747 return rev - 1
1748
1748
1749 def revdiff(self, rev1, rev2):
1749 def revdiff(self, rev1, rev2):
1750 """return or calculate a delta between two revisions
1750 """return or calculate a delta between two revisions
1751
1751
1752 The delta calculated is in binary form and is intended to be written to
1752 The delta calculated is in binary form and is intended to be written to
1753 revlog data directly. So this function needs raw revision data.
1753 revlog data directly. So this function needs raw revision data.
1754 """
1754 """
1755 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1755 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1756 return bytes(self._chunk(rev2))
1756 return bytes(self._chunk(rev2))
1757
1757
1758 return mdiff.textdiff(self.revision(rev1, raw=True),
1758 return mdiff.textdiff(self.revision(rev1, raw=True),
1759 self.revision(rev2, raw=True))
1759 self.revision(rev2, raw=True))
1760
1760
1761 def revision(self, nodeorrev, _df=None, raw=False):
1761 def revision(self, nodeorrev, _df=None, raw=False):
1762 """return an uncompressed revision of a given node or revision
1762 """return an uncompressed revision of a given node or revision
1763 number.
1763 number.
1764
1764
1765 _df - an existing file handle to read from. (internal-only)
1765 _df - an existing file handle to read from. (internal-only)
1766 raw - an optional argument specifying if the revision data is to be
1766 raw - an optional argument specifying if the revision data is to be
1767 treated as raw data when applying flag transforms. 'raw' should be set
1767 treated as raw data when applying flag transforms. 'raw' should be set
1768 to True when generating changegroups or in debug commands.
1768 to True when generating changegroups or in debug commands.
1769 """
1769 """
1770 if isinstance(nodeorrev, int):
1770 if isinstance(nodeorrev, int):
1771 rev = nodeorrev
1771 rev = nodeorrev
1772 node = self.node(rev)
1772 node = self.node(rev)
1773 else:
1773 else:
1774 node = nodeorrev
1774 node = nodeorrev
1775 rev = None
1775 rev = None
1776
1776
1777 cachedrev = None
1777 cachedrev = None
1778 flags = None
1778 flags = None
1779 rawtext = None
1779 rawtext = None
1780 if node == nullid:
1780 if node == nullid:
1781 return ""
1781 return ""
1782 if self._cache:
1782 if self._cache:
1783 if self._cache[0] == node:
1783 if self._cache[0] == node:
1784 # _cache only stores rawtext
1784 # _cache only stores rawtext
1785 if raw:
1785 if raw:
1786 return self._cache[2]
1786 return self._cache[2]
1787 # duplicated, but good for perf
1787 # duplicated, but good for perf
1788 if rev is None:
1788 if rev is None:
1789 rev = self.rev(node)
1789 rev = self.rev(node)
1790 if flags is None:
1790 if flags is None:
1791 flags = self.flags(rev)
1791 flags = self.flags(rev)
1792 # no extra flags set, no flag processor runs, text = rawtext
1792 # no extra flags set, no flag processor runs, text = rawtext
1793 if flags == REVIDX_DEFAULT_FLAGS:
1793 if flags == REVIDX_DEFAULT_FLAGS:
1794 return self._cache[2]
1794 return self._cache[2]
1795 # rawtext is reusable. need to run flag processor
1795 # rawtext is reusable. need to run flag processor
1796 rawtext = self._cache[2]
1796 rawtext = self._cache[2]
1797
1797
1798 cachedrev = self._cache[1]
1798 cachedrev = self._cache[1]
1799
1799
1800 # look up what we need to read
1800 # look up what we need to read
1801 if rawtext is None:
1801 if rawtext is None:
1802 if rev is None:
1802 if rev is None:
1803 rev = self.rev(node)
1803 rev = self.rev(node)
1804
1804
1805 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1805 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1806 if stopped:
1806 if stopped:
1807 rawtext = self._cache[2]
1807 rawtext = self._cache[2]
1808
1808
1809 # drop cache to save memory
1809 # drop cache to save memory
1810 self._cache = None
1810 self._cache = None
1811
1811
1812 bins = self._chunks(chain, df=_df)
1812 bins = self._chunks(chain, df=_df)
1813 if rawtext is None:
1813 if rawtext is None:
1814 rawtext = bytes(bins[0])
1814 rawtext = bytes(bins[0])
1815 bins = bins[1:]
1815 bins = bins[1:]
1816
1816
1817 rawtext = mdiff.patches(rawtext, bins)
1817 rawtext = mdiff.patches(rawtext, bins)
1818 self._cache = (node, rev, rawtext)
1818 self._cache = (node, rev, rawtext)
1819
1819
1820 if flags is None:
1820 if flags is None:
1821 if rev is None:
1821 if rev is None:
1822 rev = self.rev(node)
1822 rev = self.rev(node)
1823 flags = self.flags(rev)
1823 flags = self.flags(rev)
1824
1824
1825 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1825 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1826 if validatehash:
1826 if validatehash:
1827 self.checkhash(text, node, rev=rev)
1827 self.checkhash(text, node, rev=rev)
1828
1828
1829 return text
1829 return text
1830
1830
1831 def hash(self, text, p1, p2):
1831 def hash(self, text, p1, p2):
1832 """Compute a node hash.
1832 """Compute a node hash.
1833
1833
1834 Available as a function so that subclasses can replace the hash
1834 Available as a function so that subclasses can replace the hash
1835 as needed.
1835 as needed.
1836 """
1836 """
1837 return hash(text, p1, p2)
1837 return hash(text, p1, p2)
1838
1838
1839 def _processflags(self, text, flags, operation, raw=False):
1839 def _processflags(self, text, flags, operation, raw=False):
1840 """Inspect revision data flags and applies transforms defined by
1840 """Inspect revision data flags and applies transforms defined by
1841 registered flag processors.
1841 registered flag processors.
1842
1842
1843 ``text`` - the revision data to process
1843 ``text`` - the revision data to process
1844 ``flags`` - the revision flags
1844 ``flags`` - the revision flags
1845 ``operation`` - the operation being performed (read or write)
1845 ``operation`` - the operation being performed (read or write)
1846 ``raw`` - an optional argument describing if the raw transform should be
1846 ``raw`` - an optional argument describing if the raw transform should be
1847 applied.
1847 applied.
1848
1848
1849 This method processes the flags in the order (or reverse order if
1849 This method processes the flags in the order (or reverse order if
1850 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1850 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1851 flag processors registered for present flags. The order of flags defined
1851 flag processors registered for present flags. The order of flags defined
1852 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1852 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1853
1853
1854 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1854 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1855 processed text and ``validatehash`` is a bool indicating whether the
1855 processed text and ``validatehash`` is a bool indicating whether the
1856 returned text should be checked for hash integrity.
1856 returned text should be checked for hash integrity.
1857
1857
1858 Note: If the ``raw`` argument is set, it has precedence over the
1858 Note: If the ``raw`` argument is set, it has precedence over the
1859 operation and will only update the value of ``validatehash``.
1859 operation and will only update the value of ``validatehash``.
1860 """
1860 """
1861 # fast path: no flag processors will run
1861 # fast path: no flag processors will run
1862 if flags == 0:
1862 if flags == 0:
1863 return text, True
1863 return text, True
1864 if not operation in ('read', 'write'):
1864 if not operation in ('read', 'write'):
1865 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1865 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1866 # Check all flags are known.
1866 # Check all flags are known.
1867 if flags & ~REVIDX_KNOWN_FLAGS:
1867 if flags & ~REVIDX_KNOWN_FLAGS:
1868 raise RevlogError(_("incompatible revision flag '%#x'") %
1868 raise RevlogError(_("incompatible revision flag '%#x'") %
1869 (flags & ~REVIDX_KNOWN_FLAGS))
1869 (flags & ~REVIDX_KNOWN_FLAGS))
1870 validatehash = True
1870 validatehash = True
1871 # Depending on the operation (read or write), the order might be
1871 # Depending on the operation (read or write), the order might be
1872 # reversed due to non-commutative transforms.
1872 # reversed due to non-commutative transforms.
1873 orderedflags = REVIDX_FLAGS_ORDER
1873 orderedflags = REVIDX_FLAGS_ORDER
1874 if operation == 'write':
1874 if operation == 'write':
1875 orderedflags = reversed(orderedflags)
1875 orderedflags = reversed(orderedflags)
1876
1876
1877 for flag in orderedflags:
1877 for flag in orderedflags:
1878 # If a flagprocessor has been registered for a known flag, apply the
1878 # If a flagprocessor has been registered for a known flag, apply the
1879 # related operation transform and update result tuple.
1879 # related operation transform and update result tuple.
1880 if flag & flags:
1880 if flag & flags:
1881 vhash = True
1881 vhash = True
1882
1882
1883 if flag not in _flagprocessors:
1883 if flag not in _flagprocessors:
1884 message = _("missing processor for flag '%#x'") % (flag)
1884 message = _("missing processor for flag '%#x'") % (flag)
1885 raise RevlogError(message)
1885 raise RevlogError(message)
1886
1886
1887 processor = _flagprocessors[flag]
1887 processor = _flagprocessors[flag]
1888 if processor is not None:
1888 if processor is not None:
1889 readtransform, writetransform, rawtransform = processor
1889 readtransform, writetransform, rawtransform = processor
1890
1890
1891 if raw:
1891 if raw:
1892 vhash = rawtransform(self, text)
1892 vhash = rawtransform(self, text)
1893 elif operation == 'read':
1893 elif operation == 'read':
1894 text, vhash = readtransform(self, text)
1894 text, vhash = readtransform(self, text)
1895 else: # write operation
1895 else: # write operation
1896 text, vhash = writetransform(self, text)
1896 text, vhash = writetransform(self, text)
1897 validatehash = validatehash and vhash
1897 validatehash = validatehash and vhash
1898
1898
1899 return text, validatehash
1899 return text, validatehash
1900
1900
1901 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1901 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1902 """Check node hash integrity.
1902 """Check node hash integrity.
1903
1903
1904 Available as a function so that subclasses can extend hash mismatch
1904 Available as a function so that subclasses can extend hash mismatch
1905 behaviors as needed.
1905 behaviors as needed.
1906 """
1906 """
1907 try:
1907 try:
1908 if p1 is None and p2 is None:
1908 if p1 is None and p2 is None:
1909 p1, p2 = self.parents(node)
1909 p1, p2 = self.parents(node)
1910 if node != self.hash(text, p1, p2):
1910 if node != self.hash(text, p1, p2):
1911 revornode = rev
1911 revornode = rev
1912 if revornode is None:
1912 if revornode is None:
1913 revornode = templatefilters.short(hex(node))
1913 revornode = templatefilters.short(hex(node))
1914 raise RevlogError(_("integrity check failed on %s:%s")
1914 raise RevlogError(_("integrity check failed on %s:%s")
1915 % (self.indexfile, pycompat.bytestr(revornode)))
1915 % (self.indexfile, pycompat.bytestr(revornode)))
1916 except RevlogError:
1916 except RevlogError:
1917 if self._censorable and _censoredtext(text):
1917 if self._censorable and _censoredtext(text):
1918 raise error.CensoredNodeError(self.indexfile, node, text)
1918 raise error.CensoredNodeError(self.indexfile, node, text)
1919 raise
1919 raise
1920
1920
1921 def _enforceinlinesize(self, tr, fp=None):
1921 def _enforceinlinesize(self, tr, fp=None):
1922 """Check if the revlog is too big for inline and convert if so.
1922 """Check if the revlog is too big for inline and convert if so.
1923
1923
1924 This should be called after revisions are added to the revlog. If the
1924 This should be called after revisions are added to the revlog. If the
1925 revlog has grown too large to be an inline revlog, it will convert it
1925 revlog has grown too large to be an inline revlog, it will convert it
1926 to use multiple index and data files.
1926 to use multiple index and data files.
1927 """
1927 """
1928 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1928 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1929 return
1929 return
1930
1930
1931 trinfo = tr.find(self.indexfile)
1931 trinfo = tr.find(self.indexfile)
1932 if trinfo is None:
1932 if trinfo is None:
1933 raise RevlogError(_("%s not found in the transaction")
1933 raise RevlogError(_("%s not found in the transaction")
1934 % self.indexfile)
1934 % self.indexfile)
1935
1935
1936 trindex = trinfo[2]
1936 trindex = trinfo[2]
1937 if trindex is not None:
1937 if trindex is not None:
1938 dataoff = self.start(trindex)
1938 dataoff = self.start(trindex)
1939 else:
1939 else:
1940 # revlog was stripped at start of transaction, use all leftover data
1940 # revlog was stripped at start of transaction, use all leftover data
1941 trindex = len(self) - 1
1941 trindex = len(self) - 1
1942 dataoff = self.end(-2)
1942 dataoff = self.end(-2)
1943
1943
1944 tr.add(self.datafile, dataoff)
1944 tr.add(self.datafile, dataoff)
1945
1945
1946 if fp:
1946 if fp:
1947 fp.flush()
1947 fp.flush()
1948 fp.close()
1948 fp.close()
1949
1949
1950 with self._datafp('w') as df:
1950 with self._datafp('w') as df:
1951 for r in self:
1951 for r in self:
1952 df.write(self._getsegmentforrevs(r, r)[1])
1952 df.write(self._getsegmentforrevs(r, r)[1])
1953
1953
1954 with self._indexfp('w') as fp:
1954 with self._indexfp('w') as fp:
1955 self.version &= ~FLAG_INLINE_DATA
1955 self.version &= ~FLAG_INLINE_DATA
1956 self._inline = False
1956 self._inline = False
1957 io = self._io
1957 io = self._io
1958 for i in self:
1958 for i in self:
1959 e = io.packentry(self.index[i], self.node, self.version, i)
1959 e = io.packentry(self.index[i], self.node, self.version, i)
1960 fp.write(e)
1960 fp.write(e)
1961
1961
1962 # the temp file replace the real index when we exit the context
1962 # the temp file replace the real index when we exit the context
1963 # manager
1963 # manager
1964
1964
1965 tr.replace(self.indexfile, trindex * self._io.size)
1965 tr.replace(self.indexfile, trindex * self._io.size)
1966 self._chunkclear()
1966 self._chunkclear()
1967
1967
1968 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1968 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1969 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1969 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1970 """add a revision to the log
1970 """add a revision to the log
1971
1971
1972 text - the revision data to add
1972 text - the revision data to add
1973 transaction - the transaction object used for rollback
1973 transaction - the transaction object used for rollback
1974 link - the linkrev data to add
1974 link - the linkrev data to add
1975 p1, p2 - the parent nodeids of the revision
1975 p1, p2 - the parent nodeids of the revision
1976 cachedelta - an optional precomputed delta
1976 cachedelta - an optional precomputed delta
1977 node - nodeid of revision; typically node is not specified, and it is
1977 node - nodeid of revision; typically node is not specified, and it is
1978 computed by default as hash(text, p1, p2), however subclasses might
1978 computed by default as hash(text, p1, p2), however subclasses might
1979 use different hashing method (and override checkhash() in such case)
1979 use different hashing method (and override checkhash() in such case)
1980 flags - the known flags to set on the revision
1980 flags - the known flags to set on the revision
1981 deltacomputer - an optional _deltacomputer instance shared between
1981 deltacomputer - an optional _deltacomputer instance shared between
1982 multiple calls
1982 multiple calls
1983 """
1983 """
1984 if link == nullrev:
1984 if link == nullrev:
1985 raise RevlogError(_("attempted to add linkrev -1 to %s")
1985 raise RevlogError(_("attempted to add linkrev -1 to %s")
1986 % self.indexfile)
1986 % self.indexfile)
1987
1987
1988 if flags:
1988 if flags:
1989 node = node or self.hash(text, p1, p2)
1989 node = node or self.hash(text, p1, p2)
1990
1990
1991 rawtext, validatehash = self._processflags(text, flags, 'write')
1991 rawtext, validatehash = self._processflags(text, flags, 'write')
1992
1992
1993 # If the flag processor modifies the revision data, ignore any provided
1993 # If the flag processor modifies the revision data, ignore any provided
1994 # cachedelta.
1994 # cachedelta.
1995 if rawtext != text:
1995 if rawtext != text:
1996 cachedelta = None
1996 cachedelta = None
1997
1997
1998 if len(rawtext) > _maxentrysize:
1998 if len(rawtext) > _maxentrysize:
1999 raise RevlogError(
1999 raise RevlogError(
2000 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2000 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2001 % (self.indexfile, len(rawtext)))
2001 % (self.indexfile, len(rawtext)))
2002
2002
2003 node = node or self.hash(rawtext, p1, p2)
2003 node = node or self.hash(rawtext, p1, p2)
2004 if node in self.nodemap:
2004 if node in self.nodemap:
2005 return node
2005 return node
2006
2006
2007 if validatehash:
2007 if validatehash:
2008 self.checkhash(rawtext, node, p1=p1, p2=p2)
2008 self.checkhash(rawtext, node, p1=p1, p2=p2)
2009
2009
2010 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2010 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2011 flags, cachedelta=cachedelta,
2011 flags, cachedelta=cachedelta,
2012 deltacomputer=deltacomputer)
2012 deltacomputer=deltacomputer)
2013
2013
2014 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2014 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2015 cachedelta=None, deltacomputer=None):
2015 cachedelta=None, deltacomputer=None):
2016 """add a raw revision with known flags, node and parents
2016 """add a raw revision with known flags, node and parents
2017 useful when reusing a revision not stored in this revlog (ex: received
2017 useful when reusing a revision not stored in this revlog (ex: received
2018 over wire, or read from an external bundle).
2018 over wire, or read from an external bundle).
2019 """
2019 """
2020 dfh = None
2020 dfh = None
2021 if not self._inline:
2021 if not self._inline:
2022 dfh = self._datafp("a+")
2022 dfh = self._datafp("a+")
2023 ifh = self._indexfp("a+")
2023 ifh = self._indexfp("a+")
2024 try:
2024 try:
2025 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2025 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2026 flags, cachedelta, ifh, dfh,
2026 flags, cachedelta, ifh, dfh,
2027 deltacomputer=deltacomputer)
2027 deltacomputer=deltacomputer)
2028 finally:
2028 finally:
2029 if dfh:
2029 if dfh:
2030 dfh.close()
2030 dfh.close()
2031 ifh.close()
2031 ifh.close()
2032
2032
2033 def compress(self, data):
2033 def compress(self, data):
2034 """Generate a possibly-compressed representation of data."""
2034 """Generate a possibly-compressed representation of data."""
2035 if not data:
2035 if not data:
2036 return '', data
2036 return '', data
2037
2037
2038 compressed = self._compressor.compress(data)
2038 compressed = self._compressor.compress(data)
2039
2039
2040 if compressed:
2040 if compressed:
2041 # The revlog compressor added the header in the returned data.
2041 # The revlog compressor added the header in the returned data.
2042 return '', compressed
2042 return '', compressed
2043
2043
2044 if data[0:1] == '\0':
2044 if data[0:1] == '\0':
2045 return '', data
2045 return '', data
2046 return 'u', data
2046 return 'u', data
2047
2047
2048 def decompress(self, data):
2048 def decompress(self, data):
2049 """Decompress a revlog chunk.
2049 """Decompress a revlog chunk.
2050
2050
2051 The chunk is expected to begin with a header identifying the
2051 The chunk is expected to begin with a header identifying the
2052 format type so it can be routed to an appropriate decompressor.
2052 format type so it can be routed to an appropriate decompressor.
2053 """
2053 """
2054 if not data:
2054 if not data:
2055 return data
2055 return data
2056
2056
2057 # Revlogs are read much more frequently than they are written and many
2057 # Revlogs are read much more frequently than they are written and many
2058 # chunks only take microseconds to decompress, so performance is
2058 # chunks only take microseconds to decompress, so performance is
2059 # important here.
2059 # important here.
2060 #
2060 #
2061 # We can make a few assumptions about revlogs:
2061 # We can make a few assumptions about revlogs:
2062 #
2062 #
2063 # 1) the majority of chunks will be compressed (as opposed to inline
2063 # 1) the majority of chunks will be compressed (as opposed to inline
2064 # raw data).
2064 # raw data).
2065 # 2) decompressing *any* data will likely by at least 10x slower than
2065 # 2) decompressing *any* data will likely by at least 10x slower than
2066 # returning raw inline data.
2066 # returning raw inline data.
2067 # 3) we want to prioritize common and officially supported compression
2067 # 3) we want to prioritize common and officially supported compression
2068 # engines
2068 # engines
2069 #
2069 #
2070 # It follows that we want to optimize for "decompress compressed data
2070 # It follows that we want to optimize for "decompress compressed data
2071 # when encoded with common and officially supported compression engines"
2071 # when encoded with common and officially supported compression engines"
2072 # case over "raw data" and "data encoded by less common or non-official
2072 # case over "raw data" and "data encoded by less common or non-official
2073 # compression engines." That is why we have the inline lookup first
2073 # compression engines." That is why we have the inline lookup first
2074 # followed by the compengines lookup.
2074 # followed by the compengines lookup.
2075 #
2075 #
2076 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2076 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2077 # compressed chunks. And this matters for changelog and manifest reads.
2077 # compressed chunks. And this matters for changelog and manifest reads.
2078 t = data[0:1]
2078 t = data[0:1]
2079
2079
2080 if t == 'x':
2080 if t == 'x':
2081 try:
2081 try:
2082 return _zlibdecompress(data)
2082 return _zlibdecompress(data)
2083 except zlib.error as e:
2083 except zlib.error as e:
2084 raise RevlogError(_('revlog decompress error: %s') %
2084 raise RevlogError(_('revlog decompress error: %s') %
2085 stringutil.forcebytestr(e))
2085 stringutil.forcebytestr(e))
2086 # '\0' is more common than 'u' so it goes first.
2086 # '\0' is more common than 'u' so it goes first.
2087 elif t == '\0':
2087 elif t == '\0':
2088 return data
2088 return data
2089 elif t == 'u':
2089 elif t == 'u':
2090 return util.buffer(data, 1)
2090 return util.buffer(data, 1)
2091
2091
2092 try:
2092 try:
2093 compressor = self._decompressors[t]
2093 compressor = self._decompressors[t]
2094 except KeyError:
2094 except KeyError:
2095 try:
2095 try:
2096 engine = util.compengines.forrevlogheader(t)
2096 engine = util.compengines.forrevlogheader(t)
2097 compressor = engine.revlogcompressor()
2097 compressor = engine.revlogcompressor()
2098 self._decompressors[t] = compressor
2098 self._decompressors[t] = compressor
2099 except KeyError:
2099 except KeyError:
2100 raise RevlogError(_('unknown compression type %r') % t)
2100 raise RevlogError(_('unknown compression type %r') % t)
2101
2101
2102 return compressor.decompress(data)
2102 return compressor.decompress(data)
2103
2103
2104 def _isgooddeltainfo(self, deltainfo, revinfo):
2104 def _isgooddeltainfo(self, deltainfo, revinfo):
2105 """Returns True if the given delta is good. Good means that it is within
2105 """Returns True if the given delta is good. Good means that it is within
2106 the disk span, disk size, and chain length bounds that we know to be
2106 the disk span, disk size, and chain length bounds that we know to be
2107 performant."""
2107 performant."""
2108 if deltainfo is None:
2108 if deltainfo is None:
2109 return False
2109 return False
2110
2110
2111 # - 'deltainfo.distance' is the distance from the base revision --
2111 # - 'deltainfo.distance' is the distance from the base revision --
2112 # bounding it limits the amount of I/O we need to do.
2112 # bounding it limits the amount of I/O we need to do.
2113 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2113 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2114 # deltas we need to apply -- bounding it limits the amount of CPU
2114 # deltas we need to apply -- bounding it limits the amount of CPU
2115 # we consume.
2115 # we consume.
2116
2116
2117 textlen = revinfo.textlen
2117 textlen = revinfo.textlen
2118 defaultmax = textlen * 4
2118 defaultmax = textlen * 4
2119 maxdist = self._maxdeltachainspan
2119 maxdist = self._maxdeltachainspan
2120 if not maxdist:
2120 if not maxdist:
2121 maxdist = deltainfo.distance # ensure the conditional pass
2121 maxdist = deltainfo.distance # ensure the conditional pass
2122 maxdist = max(maxdist, defaultmax)
2122 maxdist = max(maxdist, defaultmax)
2123 if (deltainfo.distance > maxdist or deltainfo.deltalen > textlen or
2123 if (deltainfo.distance > maxdist or deltainfo.deltalen > textlen or
2124 deltainfo.compresseddeltalen > textlen * 2 or
2124 deltainfo.compresseddeltalen > textlen * 2 or
2125 (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
2125 (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
2126 return False
2126 return False
2127
2127
2128 return True
2128 return True
2129
2129
2130 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2130 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2131 cachedelta, ifh, dfh, alwayscache=False,
2131 cachedelta, ifh, dfh, alwayscache=False,
2132 deltacomputer=None):
2132 deltacomputer=None):
2133 """internal function to add revisions to the log
2133 """internal function to add revisions to the log
2134
2134
2135 see addrevision for argument descriptions.
2135 see addrevision for argument descriptions.
2136
2136
2137 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2137 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2138
2138
2139 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2139 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2140 be used.
2140 be used.
2141
2141
2142 invariants:
2142 invariants:
2143 - rawtext is optional (can be None); if not set, cachedelta must be set.
2143 - rawtext is optional (can be None); if not set, cachedelta must be set.
2144 if both are set, they must correspond to each other.
2144 if both are set, they must correspond to each other.
2145 """
2145 """
2146 if node == nullid:
2146 if node == nullid:
2147 raise RevlogError(_("%s: attempt to add null revision") %
2147 raise RevlogError(_("%s: attempt to add null revision") %
2148 (self.indexfile))
2148 (self.indexfile))
2149 if node == wdirid or node in wdirfilenodeids:
2149 if node == wdirid or node in wdirfilenodeids:
2150 raise RevlogError(_("%s: attempt to add wdir revision") %
2150 raise RevlogError(_("%s: attempt to add wdir revision") %
2151 (self.indexfile))
2151 (self.indexfile))
2152
2152
2153 if self._inline:
2153 if self._inline:
2154 fh = ifh
2154 fh = ifh
2155 else:
2155 else:
2156 fh = dfh
2156 fh = dfh
2157
2157
2158 btext = [rawtext]
2158 btext = [rawtext]
2159
2159
2160 curr = len(self)
2160 curr = len(self)
2161 prev = curr - 1
2161 prev = curr - 1
2162 offset = self.end(prev)
2162 offset = self.end(prev)
2163 p1r, p2r = self.rev(p1), self.rev(p2)
2163 p1r, p2r = self.rev(p1), self.rev(p2)
2164
2164
2165 # full versions are inserted when the needed deltas
2165 # full versions are inserted when the needed deltas
2166 # become comparable to the uncompressed text
2166 # become comparable to the uncompressed text
2167 if rawtext is None:
2167 if rawtext is None:
2168 # need rawtext size, before changed by flag processors, which is
2168 # need rawtext size, before changed by flag processors, which is
2169 # the non-raw size. use revlog explicitly to avoid filelog's extra
2169 # the non-raw size. use revlog explicitly to avoid filelog's extra
2170 # logic that might remove metadata size.
2170 # logic that might remove metadata size.
2171 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2171 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2172 cachedelta[1])
2172 cachedelta[1])
2173 else:
2173 else:
2174 textlen = len(rawtext)
2174 textlen = len(rawtext)
2175
2175
2176 if deltacomputer is None:
2176 if deltacomputer is None:
2177 deltacomputer = _deltacomputer(self)
2177 deltacomputer = _deltacomputer(self)
2178
2178
2179 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2179 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2180
2180
2181 # no delta for flag processor revision (see "candelta" for why)
2181 # no delta for flag processor revision (see "candelta" for why)
2182 # not calling candelta since only one revision needs test, also to
2182 # not calling candelta since only one revision needs test, also to
2183 # avoid overhead fetching flags again.
2183 # avoid overhead fetching flags again.
2184 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2184 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2185 deltainfo = None
2185 deltainfo = None
2186 else:
2186 else:
2187 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2187 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2188
2188
2189 if deltainfo is not None:
2189 if deltainfo is not None:
2190 base = deltainfo.base
2190 base = deltainfo.base
2191 chainbase = deltainfo.chainbase
2191 chainbase = deltainfo.chainbase
2192 data = deltainfo.data
2192 data = deltainfo.data
2193 l = deltainfo.deltalen
2193 l = deltainfo.deltalen
2194 else:
2194 else:
2195 rawtext = deltacomputer.buildtext(revinfo, fh)
2195 rawtext = deltacomputer.buildtext(revinfo, fh)
2196 data = self.compress(rawtext)
2196 data = self.compress(rawtext)
2197 l = len(data[1]) + len(data[0])
2197 l = len(data[1]) + len(data[0])
2198 base = chainbase = curr
2198 base = chainbase = curr
2199
2199
2200 e = (offset_type(offset, flags), l, textlen,
2200 e = (offset_type(offset, flags), l, textlen,
2201 base, link, p1r, p2r, node)
2201 base, link, p1r, p2r, node)
2202 self.index.insert(-1, e)
2202 self.index.insert(-1, e)
2203 self.nodemap[node] = curr
2203 self.nodemap[node] = curr
2204
2204
2205 entry = self._io.packentry(e, self.node, self.version, curr)
2205 entry = self._io.packentry(e, self.node, self.version, curr)
2206 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2206 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2207
2207
2208 if alwayscache and rawtext is None:
2208 if alwayscache and rawtext is None:
2209 rawtext = deltacomputer._buildtext(revinfo, fh)
2209 rawtext = deltacomputer._buildtext(revinfo, fh)
2210
2210
2211 if type(rawtext) == bytes: # only accept immutable objects
2211 if type(rawtext) == bytes: # only accept immutable objects
2212 self._cache = (node, curr, rawtext)
2212 self._cache = (node, curr, rawtext)
2213 self._chainbasecache[curr] = chainbase
2213 self._chainbasecache[curr] = chainbase
2214 return node
2214 return node
2215
2215
2216 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2216 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2217 # Files opened in a+ mode have inconsistent behavior on various
2217 # Files opened in a+ mode have inconsistent behavior on various
2218 # platforms. Windows requires that a file positioning call be made
2218 # platforms. Windows requires that a file positioning call be made
2219 # when the file handle transitions between reads and writes. See
2219 # when the file handle transitions between reads and writes. See
2220 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2220 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2221 # platforms, Python or the platform itself can be buggy. Some versions
2221 # platforms, Python or the platform itself can be buggy. Some versions
2222 # of Solaris have been observed to not append at the end of the file
2222 # of Solaris have been observed to not append at the end of the file
2223 # if the file was seeked to before the end. See issue4943 for more.
2223 # if the file was seeked to before the end. See issue4943 for more.
2224 #
2224 #
2225 # We work around this issue by inserting a seek() before writing.
2225 # We work around this issue by inserting a seek() before writing.
2226 # Note: This is likely not necessary on Python 3.
2226 # Note: This is likely not necessary on Python 3.
2227 ifh.seek(0, os.SEEK_END)
2227 ifh.seek(0, os.SEEK_END)
2228 if dfh:
2228 if dfh:
2229 dfh.seek(0, os.SEEK_END)
2229 dfh.seek(0, os.SEEK_END)
2230
2230
2231 curr = len(self) - 1
2231 curr = len(self) - 1
2232 if not self._inline:
2232 if not self._inline:
2233 transaction.add(self.datafile, offset)
2233 transaction.add(self.datafile, offset)
2234 transaction.add(self.indexfile, curr * len(entry))
2234 transaction.add(self.indexfile, curr * len(entry))
2235 if data[0]:
2235 if data[0]:
2236 dfh.write(data[0])
2236 dfh.write(data[0])
2237 dfh.write(data[1])
2237 dfh.write(data[1])
2238 ifh.write(entry)
2238 ifh.write(entry)
2239 else:
2239 else:
2240 offset += curr * self._io.size
2240 offset += curr * self._io.size
2241 transaction.add(self.indexfile, offset, curr)
2241 transaction.add(self.indexfile, offset, curr)
2242 ifh.write(entry)
2242 ifh.write(entry)
2243 ifh.write(data[0])
2243 ifh.write(data[0])
2244 ifh.write(data[1])
2244 ifh.write(data[1])
2245 self._enforceinlinesize(transaction, ifh)
2245 self._enforceinlinesize(transaction, ifh)
2246
2246
2247 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2247 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2248 """
2248 """
2249 add a delta group
2249 add a delta group
2250
2250
2251 given a set of deltas, add them to the revision log. the
2251 given a set of deltas, add them to the revision log. the
2252 first delta is against its parent, which should be in our
2252 first delta is against its parent, which should be in our
2253 log, the rest are against the previous delta.
2253 log, the rest are against the previous delta.
2254
2254
2255 If ``addrevisioncb`` is defined, it will be called with arguments of
2255 If ``addrevisioncb`` is defined, it will be called with arguments of
2256 this revlog and the node that was added.
2256 this revlog and the node that was added.
2257 """
2257 """
2258
2258
2259 nodes = []
2259 nodes = []
2260
2260
2261 r = len(self)
2261 r = len(self)
2262 end = 0
2262 end = 0
2263 if r:
2263 if r:
2264 end = self.end(r - 1)
2264 end = self.end(r - 1)
2265 ifh = self._indexfp("a+")
2265 ifh = self._indexfp("a+")
2266 isize = r * self._io.size
2266 isize = r * self._io.size
2267 if self._inline:
2267 if self._inline:
2268 transaction.add(self.indexfile, end + isize, r)
2268 transaction.add(self.indexfile, end + isize, r)
2269 dfh = None
2269 dfh = None
2270 else:
2270 else:
2271 transaction.add(self.indexfile, isize, r)
2271 transaction.add(self.indexfile, isize, r)
2272 transaction.add(self.datafile, end)
2272 transaction.add(self.datafile, end)
2273 dfh = self._datafp("a+")
2273 dfh = self._datafp("a+")
2274 def flush():
2274 def flush():
2275 if dfh:
2275 if dfh:
2276 dfh.flush()
2276 dfh.flush()
2277 ifh.flush()
2277 ifh.flush()
2278 try:
2278 try:
2279 deltacomputer = _deltacomputer(self)
2279 deltacomputer = _deltacomputer(self)
2280 # loop through our set of deltas
2280 # loop through our set of deltas
2281 for data in deltas:
2281 for data in deltas:
2282 node, p1, p2, linknode, deltabase, delta, flags = data
2282 node, p1, p2, linknode, deltabase, delta, flags = data
2283 link = linkmapper(linknode)
2283 link = linkmapper(linknode)
2284 flags = flags or REVIDX_DEFAULT_FLAGS
2284 flags = flags or REVIDX_DEFAULT_FLAGS
2285
2285
2286 nodes.append(node)
2286 nodes.append(node)
2287
2287
2288 if node in self.nodemap:
2288 if node in self.nodemap:
2289 # this can happen if two branches make the same change
2289 # this can happen if two branches make the same change
2290 continue
2290 continue
2291
2291
2292 for p in (p1, p2):
2292 for p in (p1, p2):
2293 if p not in self.nodemap:
2293 if p not in self.nodemap:
2294 raise LookupError(p, self.indexfile,
2294 raise LookupError(p, self.indexfile,
2295 _('unknown parent'))
2295 _('unknown parent'))
2296
2296
2297 if deltabase not in self.nodemap:
2297 if deltabase not in self.nodemap:
2298 raise LookupError(deltabase, self.indexfile,
2298 raise LookupError(deltabase, self.indexfile,
2299 _('unknown delta base'))
2299 _('unknown delta base'))
2300
2300
2301 baserev = self.rev(deltabase)
2301 baserev = self.rev(deltabase)
2302
2302
2303 if baserev != nullrev and self.iscensored(baserev):
2303 if baserev != nullrev and self.iscensored(baserev):
2304 # if base is censored, delta must be full replacement in a
2304 # if base is censored, delta must be full replacement in a
2305 # single patch operation
2305 # single patch operation
2306 hlen = struct.calcsize(">lll")
2306 hlen = struct.calcsize(">lll")
2307 oldlen = self.rawsize(baserev)
2307 oldlen = self.rawsize(baserev)
2308 newlen = len(delta) - hlen
2308 newlen = len(delta) - hlen
2309 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2309 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2310 raise error.CensoredBaseError(self.indexfile,
2310 raise error.CensoredBaseError(self.indexfile,
2311 self.node(baserev))
2311 self.node(baserev))
2312
2312
2313 if not flags and self._peek_iscensored(baserev, delta, flush):
2313 if not flags and self._peek_iscensored(baserev, delta, flush):
2314 flags |= REVIDX_ISCENSORED
2314 flags |= REVIDX_ISCENSORED
2315
2315
2316 # We assume consumers of addrevisioncb will want to retrieve
2316 # We assume consumers of addrevisioncb will want to retrieve
2317 # the added revision, which will require a call to
2317 # the added revision, which will require a call to
2318 # revision(). revision() will fast path if there is a cache
2318 # revision(). revision() will fast path if there is a cache
2319 # hit. So, we tell _addrevision() to always cache in this case.
2319 # hit. So, we tell _addrevision() to always cache in this case.
2320 # We're only using addgroup() in the context of changegroup
2320 # We're only using addgroup() in the context of changegroup
2321 # generation so the revision data can always be handled as raw
2321 # generation so the revision data can always be handled as raw
2322 # by the flagprocessor.
2322 # by the flagprocessor.
2323 self._addrevision(node, None, transaction, link,
2323 self._addrevision(node, None, transaction, link,
2324 p1, p2, flags, (baserev, delta),
2324 p1, p2, flags, (baserev, delta),
2325 ifh, dfh,
2325 ifh, dfh,
2326 alwayscache=bool(addrevisioncb),
2326 alwayscache=bool(addrevisioncb),
2327 deltacomputer=deltacomputer)
2327 deltacomputer=deltacomputer)
2328
2328
2329 if addrevisioncb:
2329 if addrevisioncb:
2330 addrevisioncb(self, node)
2330 addrevisioncb(self, node)
2331
2331
2332 if not dfh and not self._inline:
2332 if not dfh and not self._inline:
2333 # addrevision switched from inline to conventional
2333 # addrevision switched from inline to conventional
2334 # reopen the index
2334 # reopen the index
2335 ifh.close()
2335 ifh.close()
2336 dfh = self._datafp("a+")
2336 dfh = self._datafp("a+")
2337 ifh = self._indexfp("a+")
2337 ifh = self._indexfp("a+")
2338 finally:
2338 finally:
2339 if dfh:
2339 if dfh:
2340 dfh.close()
2340 dfh.close()
2341 ifh.close()
2341 ifh.close()
2342
2342
2343 return nodes
2343 return nodes
2344
2344
2345 def iscensored(self, rev):
2345 def iscensored(self, rev):
2346 """Check if a file revision is censored."""
2346 """Check if a file revision is censored."""
2347 if not self._censorable:
2347 if not self._censorable:
2348 return False
2348 return False
2349
2349
2350 return self.flags(rev) & REVIDX_ISCENSORED
2350 return self.flags(rev) & REVIDX_ISCENSORED
2351
2351
2352 def _peek_iscensored(self, baserev, delta, flush):
2352 def _peek_iscensored(self, baserev, delta, flush):
2353 """Quickly check if a delta produces a censored revision."""
2353 """Quickly check if a delta produces a censored revision."""
2354 if not self._censorable:
2354 if not self._censorable:
2355 return False
2355 return False
2356
2356
2357 # Fragile heuristic: unless new file meta keys are added alphabetically
2357 # Fragile heuristic: unless new file meta keys are added alphabetically
2358 # preceding "censored", all censored revisions are prefixed by
2358 # preceding "censored", all censored revisions are prefixed by
2359 # "\1\ncensored:". A delta producing such a censored revision must be a
2359 # "\1\ncensored:". A delta producing such a censored revision must be a
2360 # full-replacement delta, so we inspect the first and only patch in the
2360 # full-replacement delta, so we inspect the first and only patch in the
2361 # delta for this prefix.
2361 # delta for this prefix.
2362 hlen = struct.calcsize(">lll")
2362 hlen = struct.calcsize(">lll")
2363 if len(delta) <= hlen:
2363 if len(delta) <= hlen:
2364 return False
2364 return False
2365
2365
2366 oldlen = self.rawsize(baserev)
2366 oldlen = self.rawsize(baserev)
2367 newlen = len(delta) - hlen
2367 newlen = len(delta) - hlen
2368 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2368 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2369 return False
2369 return False
2370
2370
2371 add = "\1\ncensored:"
2371 add = "\1\ncensored:"
2372 addlen = len(add)
2372 addlen = len(add)
2373 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2373 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2374
2374
2375 def getstrippoint(self, minlink):
2375 def getstrippoint(self, minlink):
2376 """find the minimum rev that must be stripped to strip the linkrev
2376 """find the minimum rev that must be stripped to strip the linkrev
2377
2377
2378 Returns a tuple containing the minimum rev and a set of all revs that
2378 Returns a tuple containing the minimum rev and a set of all revs that
2379 have linkrevs that will be broken by this strip.
2379 have linkrevs that will be broken by this strip.
2380 """
2380 """
2381 brokenrevs = set()
2381 brokenrevs = set()
2382 strippoint = len(self)
2382 strippoint = len(self)
2383
2383
2384 heads = {}
2384 heads = {}
2385 futurelargelinkrevs = set()
2385 futurelargelinkrevs = set()
2386 for head in self.headrevs():
2386 for head in self.headrevs():
2387 headlinkrev = self.linkrev(head)
2387 headlinkrev = self.linkrev(head)
2388 heads[head] = headlinkrev
2388 heads[head] = headlinkrev
2389 if headlinkrev >= minlink:
2389 if headlinkrev >= minlink:
2390 futurelargelinkrevs.add(headlinkrev)
2390 futurelargelinkrevs.add(headlinkrev)
2391
2391
2392 # This algorithm involves walking down the rev graph, starting at the
2392 # This algorithm involves walking down the rev graph, starting at the
2393 # heads. Since the revs are topologically sorted according to linkrev,
2393 # heads. Since the revs are topologically sorted according to linkrev,
2394 # once all head linkrevs are below the minlink, we know there are
2394 # once all head linkrevs are below the minlink, we know there are
2395 # no more revs that could have a linkrev greater than minlink.
2395 # no more revs that could have a linkrev greater than minlink.
2396 # So we can stop walking.
2396 # So we can stop walking.
2397 while futurelargelinkrevs:
2397 while futurelargelinkrevs:
2398 strippoint -= 1
2398 strippoint -= 1
2399 linkrev = heads.pop(strippoint)
2399 linkrev = heads.pop(strippoint)
2400
2400
2401 if linkrev < minlink:
2401 if linkrev < minlink:
2402 brokenrevs.add(strippoint)
2402 brokenrevs.add(strippoint)
2403 else:
2403 else:
2404 futurelargelinkrevs.remove(linkrev)
2404 futurelargelinkrevs.remove(linkrev)
2405
2405
2406 for p in self.parentrevs(strippoint):
2406 for p in self.parentrevs(strippoint):
2407 if p != nullrev:
2407 if p != nullrev:
2408 plinkrev = self.linkrev(p)
2408 plinkrev = self.linkrev(p)
2409 heads[p] = plinkrev
2409 heads[p] = plinkrev
2410 if plinkrev >= minlink:
2410 if plinkrev >= minlink:
2411 futurelargelinkrevs.add(plinkrev)
2411 futurelargelinkrevs.add(plinkrev)
2412
2412
2413 return strippoint, brokenrevs
2413 return strippoint, brokenrevs
2414
2414
2415 def strip(self, minlink, transaction):
2415 def strip(self, minlink, transaction):
2416 """truncate the revlog on the first revision with a linkrev >= minlink
2416 """truncate the revlog on the first revision with a linkrev >= minlink
2417
2417
2418 This function is called when we're stripping revision minlink and
2418 This function is called when we're stripping revision minlink and
2419 its descendants from the repository.
2419 its descendants from the repository.
2420
2420
2421 We have to remove all revisions with linkrev >= minlink, because
2421 We have to remove all revisions with linkrev >= minlink, because
2422 the equivalent changelog revisions will be renumbered after the
2422 the equivalent changelog revisions will be renumbered after the
2423 strip.
2423 strip.
2424
2424
2425 So we truncate the revlog on the first of these revisions, and
2425 So we truncate the revlog on the first of these revisions, and
2426 trust that the caller has saved the revisions that shouldn't be
2426 trust that the caller has saved the revisions that shouldn't be
2427 removed and that it'll re-add them after this truncation.
2427 removed and that it'll re-add them after this truncation.
2428 """
2428 """
2429 if len(self) == 0:
2429 if len(self) == 0:
2430 return
2430 return
2431
2431
2432 rev, _ = self.getstrippoint(minlink)
2432 rev, _ = self.getstrippoint(minlink)
2433 if rev == len(self):
2433 if rev == len(self):
2434 return
2434 return
2435
2435
2436 # first truncate the files on disk
2436 # first truncate the files on disk
2437 end = self.start(rev)
2437 end = self.start(rev)
2438 if not self._inline:
2438 if not self._inline:
2439 transaction.add(self.datafile, end)
2439 transaction.add(self.datafile, end)
2440 end = rev * self._io.size
2440 end = rev * self._io.size
2441 else:
2441 else:
2442 end += rev * self._io.size
2442 end += rev * self._io.size
2443
2443
2444 transaction.add(self.indexfile, end)
2444 transaction.add(self.indexfile, end)
2445
2445
2446 # then reset internal state in memory to forget those revisions
2446 # then reset internal state in memory to forget those revisions
2447 self._cache = None
2447 self._cache = None
2448 self._chaininfocache = {}
2448 self._chaininfocache = {}
2449 self._chunkclear()
2449 self._chunkclear()
2450 for x in xrange(rev, len(self)):
2450 for x in xrange(rev, len(self)):
2451 del self.nodemap[self.node(x)]
2451 del self.nodemap[self.node(x)]
2452
2452
2453 del self.index[rev:-1]
2453 del self.index[rev:-1]
2454 self._nodepos = None
2454 self._nodepos = None
2455
2455
2456 def checksize(self):
2456 def checksize(self):
2457 expected = 0
2457 expected = 0
2458 if len(self):
2458 if len(self):
2459 expected = max(0, self.end(len(self) - 1))
2459 expected = max(0, self.end(len(self) - 1))
2460
2460
2461 try:
2461 try:
2462 with self._datafp() as f:
2462 with self._datafp() as f:
2463 f.seek(0, 2)
2463 f.seek(0, 2)
2464 actual = f.tell()
2464 actual = f.tell()
2465 dd = actual - expected
2465 dd = actual - expected
2466 except IOError as inst:
2466 except IOError as inst:
2467 if inst.errno != errno.ENOENT:
2467 if inst.errno != errno.ENOENT:
2468 raise
2468 raise
2469 dd = 0
2469 dd = 0
2470
2470
2471 try:
2471 try:
2472 f = self.opener(self.indexfile)
2472 f = self.opener(self.indexfile)
2473 f.seek(0, 2)
2473 f.seek(0, 2)
2474 actual = f.tell()
2474 actual = f.tell()
2475 f.close()
2475 f.close()
2476 s = self._io.size
2476 s = self._io.size
2477 i = max(0, actual // s)
2477 i = max(0, actual // s)
2478 di = actual - (i * s)
2478 di = actual - (i * s)
2479 if self._inline:
2479 if self._inline:
2480 databytes = 0
2480 databytes = 0
2481 for r in self:
2481 for r in self:
2482 databytes += max(0, self.length(r))
2482 databytes += max(0, self.length(r))
2483 dd = 0
2483 dd = 0
2484 di = actual - len(self) * s - databytes
2484 di = actual - len(self) * s - databytes
2485 except IOError as inst:
2485 except IOError as inst:
2486 if inst.errno != errno.ENOENT:
2486 if inst.errno != errno.ENOENT:
2487 raise
2487 raise
2488 di = 0
2488 di = 0
2489
2489
2490 return (dd, di)
2490 return (dd, di)
2491
2491
2492 def files(self):
2492 def files(self):
2493 res = [self.indexfile]
2493 res = [self.indexfile]
2494 if not self._inline:
2494 if not self._inline:
2495 res.append(self.datafile)
2495 res.append(self.datafile)
2496 return res
2496 return res
2497
2497
2498 DELTAREUSEALWAYS = 'always'
2498 DELTAREUSEALWAYS = 'always'
2499 DELTAREUSESAMEREVS = 'samerevs'
2499 DELTAREUSESAMEREVS = 'samerevs'
2500 DELTAREUSENEVER = 'never'
2500 DELTAREUSENEVER = 'never'
2501
2501
2502 DELTAREUSEFULLADD = 'fulladd'
2502 DELTAREUSEFULLADD = 'fulladd'
2503
2503
2504 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2504 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2505
2505
2506 def clone(self, tr, destrevlog, addrevisioncb=None,
2506 def clone(self, tr, destrevlog, addrevisioncb=None,
2507 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2507 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2508 """Copy this revlog to another, possibly with format changes.
2508 """Copy this revlog to another, possibly with format changes.
2509
2509
2510 The destination revlog will contain the same revisions and nodes.
2510 The destination revlog will contain the same revisions and nodes.
2511 However, it may not be bit-for-bit identical due to e.g. delta encoding
2511 However, it may not be bit-for-bit identical due to e.g. delta encoding
2512 differences.
2512 differences.
2513
2513
2514 The ``deltareuse`` argument control how deltas from the existing revlog
2514 The ``deltareuse`` argument control how deltas from the existing revlog
2515 are preserved in the destination revlog. The argument can have the
2515 are preserved in the destination revlog. The argument can have the
2516 following values:
2516 following values:
2517
2517
2518 DELTAREUSEALWAYS
2518 DELTAREUSEALWAYS
2519 Deltas will always be reused (if possible), even if the destination
2519 Deltas will always be reused (if possible), even if the destination
2520 revlog would not select the same revisions for the delta. This is the
2520 revlog would not select the same revisions for the delta. This is the
2521 fastest mode of operation.
2521 fastest mode of operation.
2522 DELTAREUSESAMEREVS
2522 DELTAREUSESAMEREVS
2523 Deltas will be reused if the destination revlog would pick the same
2523 Deltas will be reused if the destination revlog would pick the same
2524 revisions for the delta. This mode strikes a balance between speed
2524 revisions for the delta. This mode strikes a balance between speed
2525 and optimization.
2525 and optimization.
2526 DELTAREUSENEVER
2526 DELTAREUSENEVER
2527 Deltas will never be reused. This is the slowest mode of execution.
2527 Deltas will never be reused. This is the slowest mode of execution.
2528 This mode can be used to recompute deltas (e.g. if the diff/delta
2528 This mode can be used to recompute deltas (e.g. if the diff/delta
2529 algorithm changes).
2529 algorithm changes).
2530
2530
2531 Delta computation can be slow, so the choice of delta reuse policy can
2531 Delta computation can be slow, so the choice of delta reuse policy can
2532 significantly affect run time.
2532 significantly affect run time.
2533
2533
2534 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2534 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2535 two extremes. Deltas will be reused if they are appropriate. But if the
2535 two extremes. Deltas will be reused if they are appropriate. But if the
2536 delta could choose a better revision, it will do so. This means if you
2536 delta could choose a better revision, it will do so. This means if you
2537 are converting a non-generaldelta revlog to a generaldelta revlog,
2537 are converting a non-generaldelta revlog to a generaldelta revlog,
2538 deltas will be recomputed if the delta's parent isn't a parent of the
2538 deltas will be recomputed if the delta's parent isn't a parent of the
2539 revision.
2539 revision.
2540
2540
2541 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2541 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2542 controls whether to compute deltas against both parents for merges.
2542 controls whether to compute deltas against both parents for merges.
2543 By default, the current default is used.
2543 By default, the current default is used.
2544 """
2544 """
2545 if deltareuse not in self.DELTAREUSEALL:
2545 if deltareuse not in self.DELTAREUSEALL:
2546 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2546 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2547
2547
2548 if len(destrevlog):
2548 if len(destrevlog):
2549 raise ValueError(_('destination revlog is not empty'))
2549 raise ValueError(_('destination revlog is not empty'))
2550
2550
2551 if getattr(self, 'filteredrevs', None):
2551 if getattr(self, 'filteredrevs', None):
2552 raise ValueError(_('source revlog has filtered revisions'))
2552 raise ValueError(_('source revlog has filtered revisions'))
2553 if getattr(destrevlog, 'filteredrevs', None):
2553 if getattr(destrevlog, 'filteredrevs', None):
2554 raise ValueError(_('destination revlog has filtered revisions'))
2554 raise ValueError(_('destination revlog has filtered revisions'))
2555
2555
2556 # lazydeltabase controls whether to reuse a cached delta, if possible.
2556 # lazydeltabase controls whether to reuse a cached delta, if possible.
2557 oldlazydeltabase = destrevlog._lazydeltabase
2557 oldlazydeltabase = destrevlog._lazydeltabase
2558 oldamd = destrevlog._aggressivemergedeltas
2558 oldamd = destrevlog._aggressivemergedeltas
2559
2559
2560 try:
2560 try:
2561 if deltareuse == self.DELTAREUSEALWAYS:
2561 if deltareuse == self.DELTAREUSEALWAYS:
2562 destrevlog._lazydeltabase = True
2562 destrevlog._lazydeltabase = True
2563 elif deltareuse == self.DELTAREUSESAMEREVS:
2563 elif deltareuse == self.DELTAREUSESAMEREVS:
2564 destrevlog._lazydeltabase = False
2564 destrevlog._lazydeltabase = False
2565
2565
2566 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2566 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2567
2567
2568 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2568 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2569 self.DELTAREUSESAMEREVS)
2569 self.DELTAREUSESAMEREVS)
2570
2570
2571 deltacomputer = _deltacomputer(destrevlog)
2571 deltacomputer = _deltacomputer(destrevlog)
2572 index = self.index
2572 index = self.index
2573 for rev in self:
2573 for rev in self:
2574 entry = index[rev]
2574 entry = index[rev]
2575
2575
2576 # Some classes override linkrev to take filtered revs into
2576 # Some classes override linkrev to take filtered revs into
2577 # account. Use raw entry from index.
2577 # account. Use raw entry from index.
2578 flags = entry[0] & 0xffff
2578 flags = entry[0] & 0xffff
2579 linkrev = entry[4]
2579 linkrev = entry[4]
2580 p1 = index[entry[5]][7]
2580 p1 = index[entry[5]][7]
2581 p2 = index[entry[6]][7]
2581 p2 = index[entry[6]][7]
2582 node = entry[7]
2582 node = entry[7]
2583
2583
2584 # (Possibly) reuse the delta from the revlog if allowed and
2584 # (Possibly) reuse the delta from the revlog if allowed and
2585 # the revlog chunk is a delta.
2585 # the revlog chunk is a delta.
2586 cachedelta = None
2586 cachedelta = None
2587 rawtext = None
2587 rawtext = None
2588 if populatecachedelta:
2588 if populatecachedelta:
2589 dp = self.deltaparent(rev)
2589 dp = self.deltaparent(rev)
2590 if dp != nullrev:
2590 if dp != nullrev:
2591 cachedelta = (dp, bytes(self._chunk(rev)))
2591 cachedelta = (dp, bytes(self._chunk(rev)))
2592
2592
2593 if not cachedelta:
2593 if not cachedelta:
2594 rawtext = self.revision(rev, raw=True)
2594 rawtext = self.revision(rev, raw=True)
2595
2595
2596
2596
2597 if deltareuse == self.DELTAREUSEFULLADD:
2597 if deltareuse == self.DELTAREUSEFULLADD:
2598 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2598 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2599 cachedelta=cachedelta,
2599 cachedelta=cachedelta,
2600 node=node, flags=flags,
2600 node=node, flags=flags,
2601 deltacomputer=deltacomputer)
2601 deltacomputer=deltacomputer)
2602 else:
2602 else:
2603 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2603 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2604 checkambig=False)
2604 checkambig=False)
2605 dfh = None
2605 dfh = None
2606 if not destrevlog._inline:
2606 if not destrevlog._inline:
2607 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2607 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2608 try:
2608 try:
2609 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2609 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2610 p2, flags, cachedelta, ifh, dfh,
2610 p2, flags, cachedelta, ifh, dfh,
2611 deltacomputer=deltacomputer)
2611 deltacomputer=deltacomputer)
2612 finally:
2612 finally:
2613 if dfh:
2613 if dfh:
2614 dfh.close()
2614 dfh.close()
2615 ifh.close()
2615 ifh.close()
2616
2616
2617 if addrevisioncb:
2617 if addrevisioncb:
2618 addrevisioncb(self, rev, node)
2618 addrevisioncb(self, rev, node)
2619 finally:
2619 finally:
2620 destrevlog._lazydeltabase = oldlazydeltabase
2620 destrevlog._lazydeltabase = oldlazydeltabase
2621 destrevlog._aggressivemergedeltas = oldamd
2621 destrevlog._aggressivemergedeltas = oldamd
General Comments 0
You need to be logged in to leave comments. Login now