##// END OF EJS Templates
revlog: add a `_get_decompressor` method...
marmoute -
r48028:eac3591a default
parent child Browse files
Show More
@@ -1,2696 +1,2697
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * include management of a persistent nodemap in the main docket
1153 # * include management of a persistent nodemap in the main docket
1154 # * enforce a "no-truncate" policy for mmap safety
1154 # * enforce a "no-truncate" policy for mmap safety
1155 # - for censoring operation
1155 # - for censoring operation
1156 # - for stripping operation
1156 # - for stripping operation
1157 # - for rollback operation
1157 # - for rollback operation
1158 # * proper streaming (race free) of the docket file
1158 # * proper streaming (race free) of the docket file
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 # * Exchange-wise, we will also need to do something more efficient than
1160 # * Exchange-wise, we will also need to do something more efficient than
1161 # keeping references to the affected revlogs, especially memory-wise when
1161 # keeping references to the affected revlogs, especially memory-wise when
1162 # rewriting sidedata.
1162 # rewriting sidedata.
1163 # * sidedata compression
1163 # * sidedata compression
1164 # * introduce a proper solution to reduce the number of filelog related files.
1164 # * introduce a proper solution to reduce the number of filelog related files.
1165 # * Improvement to consider
1165 # * Improvement to consider
1166 # - track compression mode in the index entris instead of the chunks
1166 # - avoid compression header in chunk using the default compression?
1167 # - forbid "inline" compression mode entirely?
1167 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1168 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1169 # - keep track of chain base or size (probably not that useful anymore)
1170 # - keep track of chain base or size (probably not that useful anymore)
1170 # - store data and sidedata in different files
1171 # - store data and sidedata in different files
1171 coreconfigitem(
1172 coreconfigitem(
1172 b'experimental',
1173 b'experimental',
1173 b'revlogv2',
1174 b'revlogv2',
1174 default=None,
1175 default=None,
1175 )
1176 )
1176 coreconfigitem(
1177 coreconfigitem(
1177 b'experimental',
1178 b'experimental',
1178 b'revisions.disambiguatewithin',
1179 b'revisions.disambiguatewithin',
1179 default=None,
1180 default=None,
1180 )
1181 )
1181 coreconfigitem(
1182 coreconfigitem(
1182 b'experimental',
1183 b'experimental',
1183 b'rust.index',
1184 b'rust.index',
1184 default=False,
1185 default=False,
1185 )
1186 )
1186 coreconfigitem(
1187 coreconfigitem(
1187 b'experimental',
1188 b'experimental',
1188 b'server.filesdata.recommended-batch-size',
1189 b'server.filesdata.recommended-batch-size',
1189 default=50000,
1190 default=50000,
1190 )
1191 )
1191 coreconfigitem(
1192 coreconfigitem(
1192 b'experimental',
1193 b'experimental',
1193 b'server.manifestdata.recommended-batch-size',
1194 b'server.manifestdata.recommended-batch-size',
1194 default=100000,
1195 default=100000,
1195 )
1196 )
1196 coreconfigitem(
1197 coreconfigitem(
1197 b'experimental',
1198 b'experimental',
1198 b'server.stream-narrow-clones',
1199 b'server.stream-narrow-clones',
1199 default=False,
1200 default=False,
1200 )
1201 )
1201 coreconfigitem(
1202 coreconfigitem(
1202 b'experimental',
1203 b'experimental',
1203 b'single-head-per-branch',
1204 b'single-head-per-branch',
1204 default=False,
1205 default=False,
1205 )
1206 )
1206 coreconfigitem(
1207 coreconfigitem(
1207 b'experimental',
1208 b'experimental',
1208 b'single-head-per-branch:account-closed-heads',
1209 b'single-head-per-branch:account-closed-heads',
1209 default=False,
1210 default=False,
1210 )
1211 )
1211 coreconfigitem(
1212 coreconfigitem(
1212 b'experimental',
1213 b'experimental',
1213 b'single-head-per-branch:public-changes-only',
1214 b'single-head-per-branch:public-changes-only',
1214 default=False,
1215 default=False,
1215 )
1216 )
1216 coreconfigitem(
1217 coreconfigitem(
1217 b'experimental',
1218 b'experimental',
1218 b'sshserver.support-v2',
1219 b'sshserver.support-v2',
1219 default=False,
1220 default=False,
1220 )
1221 )
1221 coreconfigitem(
1222 coreconfigitem(
1222 b'experimental',
1223 b'experimental',
1223 b'sparse-read',
1224 b'sparse-read',
1224 default=False,
1225 default=False,
1225 )
1226 )
1226 coreconfigitem(
1227 coreconfigitem(
1227 b'experimental',
1228 b'experimental',
1228 b'sparse-read.density-threshold',
1229 b'sparse-read.density-threshold',
1229 default=0.50,
1230 default=0.50,
1230 )
1231 )
1231 coreconfigitem(
1232 coreconfigitem(
1232 b'experimental',
1233 b'experimental',
1233 b'sparse-read.min-gap-size',
1234 b'sparse-read.min-gap-size',
1234 default=b'65K',
1235 default=b'65K',
1235 )
1236 )
1236 coreconfigitem(
1237 coreconfigitem(
1237 b'experimental',
1238 b'experimental',
1238 b'treemanifest',
1239 b'treemanifest',
1239 default=False,
1240 default=False,
1240 )
1241 )
1241 coreconfigitem(
1242 coreconfigitem(
1242 b'experimental',
1243 b'experimental',
1243 b'update.atomic-file',
1244 b'update.atomic-file',
1244 default=False,
1245 default=False,
1245 )
1246 )
1246 coreconfigitem(
1247 coreconfigitem(
1247 b'experimental',
1248 b'experimental',
1248 b'sshpeer.advertise-v2',
1249 b'sshpeer.advertise-v2',
1249 default=False,
1250 default=False,
1250 )
1251 )
1251 coreconfigitem(
1252 coreconfigitem(
1252 b'experimental',
1253 b'experimental',
1253 b'web.apiserver',
1254 b'web.apiserver',
1254 default=False,
1255 default=False,
1255 )
1256 )
1256 coreconfigitem(
1257 coreconfigitem(
1257 b'experimental',
1258 b'experimental',
1258 b'web.api.http-v2',
1259 b'web.api.http-v2',
1259 default=False,
1260 default=False,
1260 )
1261 )
1261 coreconfigitem(
1262 coreconfigitem(
1262 b'experimental',
1263 b'experimental',
1263 b'web.api.debugreflect',
1264 b'web.api.debugreflect',
1264 default=False,
1265 default=False,
1265 )
1266 )
1266 coreconfigitem(
1267 coreconfigitem(
1267 b'experimental',
1268 b'experimental',
1268 b'worker.wdir-get-thread-safe',
1269 b'worker.wdir-get-thread-safe',
1269 default=False,
1270 default=False,
1270 )
1271 )
1271 coreconfigitem(
1272 coreconfigitem(
1272 b'experimental',
1273 b'experimental',
1273 b'worker.repository-upgrade',
1274 b'worker.repository-upgrade',
1274 default=False,
1275 default=False,
1275 )
1276 )
1276 coreconfigitem(
1277 coreconfigitem(
1277 b'experimental',
1278 b'experimental',
1278 b'xdiff',
1279 b'xdiff',
1279 default=False,
1280 default=False,
1280 )
1281 )
1281 coreconfigitem(
1282 coreconfigitem(
1282 b'extensions',
1283 b'extensions',
1283 b'.*',
1284 b'.*',
1284 default=None,
1285 default=None,
1285 generic=True,
1286 generic=True,
1286 )
1287 )
1287 coreconfigitem(
1288 coreconfigitem(
1288 b'extdata',
1289 b'extdata',
1289 b'.*',
1290 b'.*',
1290 default=None,
1291 default=None,
1291 generic=True,
1292 generic=True,
1292 )
1293 )
1293 coreconfigitem(
1294 coreconfigitem(
1294 b'format',
1295 b'format',
1295 b'bookmarks-in-store',
1296 b'bookmarks-in-store',
1296 default=False,
1297 default=False,
1297 )
1298 )
1298 coreconfigitem(
1299 coreconfigitem(
1299 b'format',
1300 b'format',
1300 b'chunkcachesize',
1301 b'chunkcachesize',
1301 default=None,
1302 default=None,
1302 experimental=True,
1303 experimental=True,
1303 )
1304 )
1304 coreconfigitem(
1305 coreconfigitem(
1305 b'format',
1306 b'format',
1306 b'dotencode',
1307 b'dotencode',
1307 default=True,
1308 default=True,
1308 )
1309 )
1309 coreconfigitem(
1310 coreconfigitem(
1310 b'format',
1311 b'format',
1311 b'generaldelta',
1312 b'generaldelta',
1312 default=False,
1313 default=False,
1313 experimental=True,
1314 experimental=True,
1314 )
1315 )
1315 coreconfigitem(
1316 coreconfigitem(
1316 b'format',
1317 b'format',
1317 b'manifestcachesize',
1318 b'manifestcachesize',
1318 default=None,
1319 default=None,
1319 experimental=True,
1320 experimental=True,
1320 )
1321 )
1321 coreconfigitem(
1322 coreconfigitem(
1322 b'format',
1323 b'format',
1323 b'maxchainlen',
1324 b'maxchainlen',
1324 default=dynamicdefault,
1325 default=dynamicdefault,
1325 experimental=True,
1326 experimental=True,
1326 )
1327 )
1327 coreconfigitem(
1328 coreconfigitem(
1328 b'format',
1329 b'format',
1329 b'obsstore-version',
1330 b'obsstore-version',
1330 default=None,
1331 default=None,
1331 )
1332 )
1332 coreconfigitem(
1333 coreconfigitem(
1333 b'format',
1334 b'format',
1334 b'sparse-revlog',
1335 b'sparse-revlog',
1335 default=True,
1336 default=True,
1336 )
1337 )
1337 coreconfigitem(
1338 coreconfigitem(
1338 b'format',
1339 b'format',
1339 b'revlog-compression',
1340 b'revlog-compression',
1340 default=lambda: [b'zstd', b'zlib'],
1341 default=lambda: [b'zstd', b'zlib'],
1341 alias=[(b'experimental', b'format.compression')],
1342 alias=[(b'experimental', b'format.compression')],
1342 )
1343 )
1343 coreconfigitem(
1344 coreconfigitem(
1344 b'format',
1345 b'format',
1345 b'usefncache',
1346 b'usefncache',
1346 default=True,
1347 default=True,
1347 )
1348 )
1348 coreconfigitem(
1349 coreconfigitem(
1349 b'format',
1350 b'format',
1350 b'usegeneraldelta',
1351 b'usegeneraldelta',
1351 default=True,
1352 default=True,
1352 )
1353 )
1353 coreconfigitem(
1354 coreconfigitem(
1354 b'format',
1355 b'format',
1355 b'usestore',
1356 b'usestore',
1356 default=True,
1357 default=True,
1357 )
1358 )
1358
1359
1359
1360
1360 def _persistent_nodemap_default():
1361 def _persistent_nodemap_default():
1361 """compute `use-persistent-nodemap` default value
1362 """compute `use-persistent-nodemap` default value
1362
1363
1363 The feature is disabled unless a fast implementation is available.
1364 The feature is disabled unless a fast implementation is available.
1364 """
1365 """
1365 from . import policy
1366 from . import policy
1366
1367
1367 return policy.importrust('revlog') is not None
1368 return policy.importrust('revlog') is not None
1368
1369
1369
1370
1370 coreconfigitem(
1371 coreconfigitem(
1371 b'format',
1372 b'format',
1372 b'use-persistent-nodemap',
1373 b'use-persistent-nodemap',
1373 default=_persistent_nodemap_default,
1374 default=_persistent_nodemap_default,
1374 )
1375 )
1375 coreconfigitem(
1376 coreconfigitem(
1376 b'format',
1377 b'format',
1377 b'exp-use-copies-side-data-changeset',
1378 b'exp-use-copies-side-data-changeset',
1378 default=False,
1379 default=False,
1379 experimental=True,
1380 experimental=True,
1380 )
1381 )
1381 coreconfigitem(
1382 coreconfigitem(
1382 b'format',
1383 b'format',
1383 b'use-share-safe',
1384 b'use-share-safe',
1384 default=False,
1385 default=False,
1385 )
1386 )
1386 coreconfigitem(
1387 coreconfigitem(
1387 b'format',
1388 b'format',
1388 b'internal-phase',
1389 b'internal-phase',
1389 default=False,
1390 default=False,
1390 experimental=True,
1391 experimental=True,
1391 )
1392 )
1392 coreconfigitem(
1393 coreconfigitem(
1393 b'fsmonitor',
1394 b'fsmonitor',
1394 b'warn_when_unused',
1395 b'warn_when_unused',
1395 default=True,
1396 default=True,
1396 )
1397 )
1397 coreconfigitem(
1398 coreconfigitem(
1398 b'fsmonitor',
1399 b'fsmonitor',
1399 b'warn_update_file_count',
1400 b'warn_update_file_count',
1400 default=50000,
1401 default=50000,
1401 )
1402 )
1402 coreconfigitem(
1403 coreconfigitem(
1403 b'fsmonitor',
1404 b'fsmonitor',
1404 b'warn_update_file_count_rust',
1405 b'warn_update_file_count_rust',
1405 default=400000,
1406 default=400000,
1406 )
1407 )
1407 coreconfigitem(
1408 coreconfigitem(
1408 b'help',
1409 b'help',
1409 br'hidden-command\..*',
1410 br'hidden-command\..*',
1410 default=False,
1411 default=False,
1411 generic=True,
1412 generic=True,
1412 )
1413 )
1413 coreconfigitem(
1414 coreconfigitem(
1414 b'help',
1415 b'help',
1415 br'hidden-topic\..*',
1416 br'hidden-topic\..*',
1416 default=False,
1417 default=False,
1417 generic=True,
1418 generic=True,
1418 )
1419 )
1419 coreconfigitem(
1420 coreconfigitem(
1420 b'hooks',
1421 b'hooks',
1421 b'[^:]*',
1422 b'[^:]*',
1422 default=dynamicdefault,
1423 default=dynamicdefault,
1423 generic=True,
1424 generic=True,
1424 )
1425 )
1425 coreconfigitem(
1426 coreconfigitem(
1426 b'hooks',
1427 b'hooks',
1427 b'.*:run-with-plain',
1428 b'.*:run-with-plain',
1428 default=True,
1429 default=True,
1429 generic=True,
1430 generic=True,
1430 )
1431 )
1431 coreconfigitem(
1432 coreconfigitem(
1432 b'hgweb-paths',
1433 b'hgweb-paths',
1433 b'.*',
1434 b'.*',
1434 default=list,
1435 default=list,
1435 generic=True,
1436 generic=True,
1436 )
1437 )
1437 coreconfigitem(
1438 coreconfigitem(
1438 b'hostfingerprints',
1439 b'hostfingerprints',
1439 b'.*',
1440 b'.*',
1440 default=list,
1441 default=list,
1441 generic=True,
1442 generic=True,
1442 )
1443 )
1443 coreconfigitem(
1444 coreconfigitem(
1444 b'hostsecurity',
1445 b'hostsecurity',
1445 b'ciphers',
1446 b'ciphers',
1446 default=None,
1447 default=None,
1447 )
1448 )
1448 coreconfigitem(
1449 coreconfigitem(
1449 b'hostsecurity',
1450 b'hostsecurity',
1450 b'minimumprotocol',
1451 b'minimumprotocol',
1451 default=dynamicdefault,
1452 default=dynamicdefault,
1452 )
1453 )
1453 coreconfigitem(
1454 coreconfigitem(
1454 b'hostsecurity',
1455 b'hostsecurity',
1455 b'.*:minimumprotocol$',
1456 b'.*:minimumprotocol$',
1456 default=dynamicdefault,
1457 default=dynamicdefault,
1457 generic=True,
1458 generic=True,
1458 )
1459 )
1459 coreconfigitem(
1460 coreconfigitem(
1460 b'hostsecurity',
1461 b'hostsecurity',
1461 b'.*:ciphers$',
1462 b'.*:ciphers$',
1462 default=dynamicdefault,
1463 default=dynamicdefault,
1463 generic=True,
1464 generic=True,
1464 )
1465 )
1465 coreconfigitem(
1466 coreconfigitem(
1466 b'hostsecurity',
1467 b'hostsecurity',
1467 b'.*:fingerprints$',
1468 b'.*:fingerprints$',
1468 default=list,
1469 default=list,
1469 generic=True,
1470 generic=True,
1470 )
1471 )
1471 coreconfigitem(
1472 coreconfigitem(
1472 b'hostsecurity',
1473 b'hostsecurity',
1473 b'.*:verifycertsfile$',
1474 b'.*:verifycertsfile$',
1474 default=None,
1475 default=None,
1475 generic=True,
1476 generic=True,
1476 )
1477 )
1477
1478
1478 coreconfigitem(
1479 coreconfigitem(
1479 b'http_proxy',
1480 b'http_proxy',
1480 b'always',
1481 b'always',
1481 default=False,
1482 default=False,
1482 )
1483 )
1483 coreconfigitem(
1484 coreconfigitem(
1484 b'http_proxy',
1485 b'http_proxy',
1485 b'host',
1486 b'host',
1486 default=None,
1487 default=None,
1487 )
1488 )
1488 coreconfigitem(
1489 coreconfigitem(
1489 b'http_proxy',
1490 b'http_proxy',
1490 b'no',
1491 b'no',
1491 default=list,
1492 default=list,
1492 )
1493 )
1493 coreconfigitem(
1494 coreconfigitem(
1494 b'http_proxy',
1495 b'http_proxy',
1495 b'passwd',
1496 b'passwd',
1496 default=None,
1497 default=None,
1497 )
1498 )
1498 coreconfigitem(
1499 coreconfigitem(
1499 b'http_proxy',
1500 b'http_proxy',
1500 b'user',
1501 b'user',
1501 default=None,
1502 default=None,
1502 )
1503 )
1503
1504
1504 coreconfigitem(
1505 coreconfigitem(
1505 b'http',
1506 b'http',
1506 b'timeout',
1507 b'timeout',
1507 default=None,
1508 default=None,
1508 )
1509 )
1509
1510
1510 coreconfigitem(
1511 coreconfigitem(
1511 b'logtoprocess',
1512 b'logtoprocess',
1512 b'commandexception',
1513 b'commandexception',
1513 default=None,
1514 default=None,
1514 )
1515 )
1515 coreconfigitem(
1516 coreconfigitem(
1516 b'logtoprocess',
1517 b'logtoprocess',
1517 b'commandfinish',
1518 b'commandfinish',
1518 default=None,
1519 default=None,
1519 )
1520 )
1520 coreconfigitem(
1521 coreconfigitem(
1521 b'logtoprocess',
1522 b'logtoprocess',
1522 b'command',
1523 b'command',
1523 default=None,
1524 default=None,
1524 )
1525 )
1525 coreconfigitem(
1526 coreconfigitem(
1526 b'logtoprocess',
1527 b'logtoprocess',
1527 b'develwarn',
1528 b'develwarn',
1528 default=None,
1529 default=None,
1529 )
1530 )
1530 coreconfigitem(
1531 coreconfigitem(
1531 b'logtoprocess',
1532 b'logtoprocess',
1532 b'uiblocked',
1533 b'uiblocked',
1533 default=None,
1534 default=None,
1534 )
1535 )
1535 coreconfigitem(
1536 coreconfigitem(
1536 b'merge',
1537 b'merge',
1537 b'checkunknown',
1538 b'checkunknown',
1538 default=b'abort',
1539 default=b'abort',
1539 )
1540 )
1540 coreconfigitem(
1541 coreconfigitem(
1541 b'merge',
1542 b'merge',
1542 b'checkignored',
1543 b'checkignored',
1543 default=b'abort',
1544 default=b'abort',
1544 )
1545 )
1545 coreconfigitem(
1546 coreconfigitem(
1546 b'experimental',
1547 b'experimental',
1547 b'merge.checkpathconflicts',
1548 b'merge.checkpathconflicts',
1548 default=False,
1549 default=False,
1549 )
1550 )
1550 coreconfigitem(
1551 coreconfigitem(
1551 b'merge',
1552 b'merge',
1552 b'followcopies',
1553 b'followcopies',
1553 default=True,
1554 default=True,
1554 )
1555 )
1555 coreconfigitem(
1556 coreconfigitem(
1556 b'merge',
1557 b'merge',
1557 b'on-failure',
1558 b'on-failure',
1558 default=b'continue',
1559 default=b'continue',
1559 )
1560 )
1560 coreconfigitem(
1561 coreconfigitem(
1561 b'merge',
1562 b'merge',
1562 b'preferancestor',
1563 b'preferancestor',
1563 default=lambda: [b'*'],
1564 default=lambda: [b'*'],
1564 experimental=True,
1565 experimental=True,
1565 )
1566 )
1566 coreconfigitem(
1567 coreconfigitem(
1567 b'merge',
1568 b'merge',
1568 b'strict-capability-check',
1569 b'strict-capability-check',
1569 default=False,
1570 default=False,
1570 )
1571 )
1571 coreconfigitem(
1572 coreconfigitem(
1572 b'merge-tools',
1573 b'merge-tools',
1573 b'.*',
1574 b'.*',
1574 default=None,
1575 default=None,
1575 generic=True,
1576 generic=True,
1576 )
1577 )
1577 coreconfigitem(
1578 coreconfigitem(
1578 b'merge-tools',
1579 b'merge-tools',
1579 br'.*\.args$',
1580 br'.*\.args$',
1580 default=b"$local $base $other",
1581 default=b"$local $base $other",
1581 generic=True,
1582 generic=True,
1582 priority=-1,
1583 priority=-1,
1583 )
1584 )
1584 coreconfigitem(
1585 coreconfigitem(
1585 b'merge-tools',
1586 b'merge-tools',
1586 br'.*\.binary$',
1587 br'.*\.binary$',
1587 default=False,
1588 default=False,
1588 generic=True,
1589 generic=True,
1589 priority=-1,
1590 priority=-1,
1590 )
1591 )
1591 coreconfigitem(
1592 coreconfigitem(
1592 b'merge-tools',
1593 b'merge-tools',
1593 br'.*\.check$',
1594 br'.*\.check$',
1594 default=list,
1595 default=list,
1595 generic=True,
1596 generic=True,
1596 priority=-1,
1597 priority=-1,
1597 )
1598 )
1598 coreconfigitem(
1599 coreconfigitem(
1599 b'merge-tools',
1600 b'merge-tools',
1600 br'.*\.checkchanged$',
1601 br'.*\.checkchanged$',
1601 default=False,
1602 default=False,
1602 generic=True,
1603 generic=True,
1603 priority=-1,
1604 priority=-1,
1604 )
1605 )
1605 coreconfigitem(
1606 coreconfigitem(
1606 b'merge-tools',
1607 b'merge-tools',
1607 br'.*\.executable$',
1608 br'.*\.executable$',
1608 default=dynamicdefault,
1609 default=dynamicdefault,
1609 generic=True,
1610 generic=True,
1610 priority=-1,
1611 priority=-1,
1611 )
1612 )
1612 coreconfigitem(
1613 coreconfigitem(
1613 b'merge-tools',
1614 b'merge-tools',
1614 br'.*\.fixeol$',
1615 br'.*\.fixeol$',
1615 default=False,
1616 default=False,
1616 generic=True,
1617 generic=True,
1617 priority=-1,
1618 priority=-1,
1618 )
1619 )
1619 coreconfigitem(
1620 coreconfigitem(
1620 b'merge-tools',
1621 b'merge-tools',
1621 br'.*\.gui$',
1622 br'.*\.gui$',
1622 default=False,
1623 default=False,
1623 generic=True,
1624 generic=True,
1624 priority=-1,
1625 priority=-1,
1625 )
1626 )
1626 coreconfigitem(
1627 coreconfigitem(
1627 b'merge-tools',
1628 b'merge-tools',
1628 br'.*\.mergemarkers$',
1629 br'.*\.mergemarkers$',
1629 default=b'basic',
1630 default=b'basic',
1630 generic=True,
1631 generic=True,
1631 priority=-1,
1632 priority=-1,
1632 )
1633 )
1633 coreconfigitem(
1634 coreconfigitem(
1634 b'merge-tools',
1635 b'merge-tools',
1635 br'.*\.mergemarkertemplate$',
1636 br'.*\.mergemarkertemplate$',
1636 default=dynamicdefault, # take from command-templates.mergemarker
1637 default=dynamicdefault, # take from command-templates.mergemarker
1637 generic=True,
1638 generic=True,
1638 priority=-1,
1639 priority=-1,
1639 )
1640 )
1640 coreconfigitem(
1641 coreconfigitem(
1641 b'merge-tools',
1642 b'merge-tools',
1642 br'.*\.priority$',
1643 br'.*\.priority$',
1643 default=0,
1644 default=0,
1644 generic=True,
1645 generic=True,
1645 priority=-1,
1646 priority=-1,
1646 )
1647 )
1647 coreconfigitem(
1648 coreconfigitem(
1648 b'merge-tools',
1649 b'merge-tools',
1649 br'.*\.premerge$',
1650 br'.*\.premerge$',
1650 default=dynamicdefault,
1651 default=dynamicdefault,
1651 generic=True,
1652 generic=True,
1652 priority=-1,
1653 priority=-1,
1653 )
1654 )
1654 coreconfigitem(
1655 coreconfigitem(
1655 b'merge-tools',
1656 b'merge-tools',
1656 br'.*\.symlink$',
1657 br'.*\.symlink$',
1657 default=False,
1658 default=False,
1658 generic=True,
1659 generic=True,
1659 priority=-1,
1660 priority=-1,
1660 )
1661 )
1661 coreconfigitem(
1662 coreconfigitem(
1662 b'pager',
1663 b'pager',
1663 b'attend-.*',
1664 b'attend-.*',
1664 default=dynamicdefault,
1665 default=dynamicdefault,
1665 generic=True,
1666 generic=True,
1666 )
1667 )
1667 coreconfigitem(
1668 coreconfigitem(
1668 b'pager',
1669 b'pager',
1669 b'ignore',
1670 b'ignore',
1670 default=list,
1671 default=list,
1671 )
1672 )
1672 coreconfigitem(
1673 coreconfigitem(
1673 b'pager',
1674 b'pager',
1674 b'pager',
1675 b'pager',
1675 default=dynamicdefault,
1676 default=dynamicdefault,
1676 )
1677 )
1677 coreconfigitem(
1678 coreconfigitem(
1678 b'patch',
1679 b'patch',
1679 b'eol',
1680 b'eol',
1680 default=b'strict',
1681 default=b'strict',
1681 )
1682 )
1682 coreconfigitem(
1683 coreconfigitem(
1683 b'patch',
1684 b'patch',
1684 b'fuzz',
1685 b'fuzz',
1685 default=2,
1686 default=2,
1686 )
1687 )
1687 coreconfigitem(
1688 coreconfigitem(
1688 b'paths',
1689 b'paths',
1689 b'default',
1690 b'default',
1690 default=None,
1691 default=None,
1691 )
1692 )
1692 coreconfigitem(
1693 coreconfigitem(
1693 b'paths',
1694 b'paths',
1694 b'default-push',
1695 b'default-push',
1695 default=None,
1696 default=None,
1696 )
1697 )
1697 coreconfigitem(
1698 coreconfigitem(
1698 b'paths',
1699 b'paths',
1699 b'.*',
1700 b'.*',
1700 default=None,
1701 default=None,
1701 generic=True,
1702 generic=True,
1702 )
1703 )
1703 coreconfigitem(
1704 coreconfigitem(
1704 b'phases',
1705 b'phases',
1705 b'checksubrepos',
1706 b'checksubrepos',
1706 default=b'follow',
1707 default=b'follow',
1707 )
1708 )
1708 coreconfigitem(
1709 coreconfigitem(
1709 b'phases',
1710 b'phases',
1710 b'new-commit',
1711 b'new-commit',
1711 default=b'draft',
1712 default=b'draft',
1712 )
1713 )
1713 coreconfigitem(
1714 coreconfigitem(
1714 b'phases',
1715 b'phases',
1715 b'publish',
1716 b'publish',
1716 default=True,
1717 default=True,
1717 )
1718 )
1718 coreconfigitem(
1719 coreconfigitem(
1719 b'profiling',
1720 b'profiling',
1720 b'enabled',
1721 b'enabled',
1721 default=False,
1722 default=False,
1722 )
1723 )
1723 coreconfigitem(
1724 coreconfigitem(
1724 b'profiling',
1725 b'profiling',
1725 b'format',
1726 b'format',
1726 default=b'text',
1727 default=b'text',
1727 )
1728 )
1728 coreconfigitem(
1729 coreconfigitem(
1729 b'profiling',
1730 b'profiling',
1730 b'freq',
1731 b'freq',
1731 default=1000,
1732 default=1000,
1732 )
1733 )
1733 coreconfigitem(
1734 coreconfigitem(
1734 b'profiling',
1735 b'profiling',
1735 b'limit',
1736 b'limit',
1736 default=30,
1737 default=30,
1737 )
1738 )
1738 coreconfigitem(
1739 coreconfigitem(
1739 b'profiling',
1740 b'profiling',
1740 b'nested',
1741 b'nested',
1741 default=0,
1742 default=0,
1742 )
1743 )
1743 coreconfigitem(
1744 coreconfigitem(
1744 b'profiling',
1745 b'profiling',
1745 b'output',
1746 b'output',
1746 default=None,
1747 default=None,
1747 )
1748 )
1748 coreconfigitem(
1749 coreconfigitem(
1749 b'profiling',
1750 b'profiling',
1750 b'showmax',
1751 b'showmax',
1751 default=0.999,
1752 default=0.999,
1752 )
1753 )
1753 coreconfigitem(
1754 coreconfigitem(
1754 b'profiling',
1755 b'profiling',
1755 b'showmin',
1756 b'showmin',
1756 default=dynamicdefault,
1757 default=dynamicdefault,
1757 )
1758 )
1758 coreconfigitem(
1759 coreconfigitem(
1759 b'profiling',
1760 b'profiling',
1760 b'showtime',
1761 b'showtime',
1761 default=True,
1762 default=True,
1762 )
1763 )
1763 coreconfigitem(
1764 coreconfigitem(
1764 b'profiling',
1765 b'profiling',
1765 b'sort',
1766 b'sort',
1766 default=b'inlinetime',
1767 default=b'inlinetime',
1767 )
1768 )
1768 coreconfigitem(
1769 coreconfigitem(
1769 b'profiling',
1770 b'profiling',
1770 b'statformat',
1771 b'statformat',
1771 default=b'hotpath',
1772 default=b'hotpath',
1772 )
1773 )
1773 coreconfigitem(
1774 coreconfigitem(
1774 b'profiling',
1775 b'profiling',
1775 b'time-track',
1776 b'time-track',
1776 default=dynamicdefault,
1777 default=dynamicdefault,
1777 )
1778 )
1778 coreconfigitem(
1779 coreconfigitem(
1779 b'profiling',
1780 b'profiling',
1780 b'type',
1781 b'type',
1781 default=b'stat',
1782 default=b'stat',
1782 )
1783 )
1783 coreconfigitem(
1784 coreconfigitem(
1784 b'progress',
1785 b'progress',
1785 b'assume-tty',
1786 b'assume-tty',
1786 default=False,
1787 default=False,
1787 )
1788 )
1788 coreconfigitem(
1789 coreconfigitem(
1789 b'progress',
1790 b'progress',
1790 b'changedelay',
1791 b'changedelay',
1791 default=1,
1792 default=1,
1792 )
1793 )
1793 coreconfigitem(
1794 coreconfigitem(
1794 b'progress',
1795 b'progress',
1795 b'clear-complete',
1796 b'clear-complete',
1796 default=True,
1797 default=True,
1797 )
1798 )
1798 coreconfigitem(
1799 coreconfigitem(
1799 b'progress',
1800 b'progress',
1800 b'debug',
1801 b'debug',
1801 default=False,
1802 default=False,
1802 )
1803 )
1803 coreconfigitem(
1804 coreconfigitem(
1804 b'progress',
1805 b'progress',
1805 b'delay',
1806 b'delay',
1806 default=3,
1807 default=3,
1807 )
1808 )
1808 coreconfigitem(
1809 coreconfigitem(
1809 b'progress',
1810 b'progress',
1810 b'disable',
1811 b'disable',
1811 default=False,
1812 default=False,
1812 )
1813 )
1813 coreconfigitem(
1814 coreconfigitem(
1814 b'progress',
1815 b'progress',
1815 b'estimateinterval',
1816 b'estimateinterval',
1816 default=60.0,
1817 default=60.0,
1817 )
1818 )
1818 coreconfigitem(
1819 coreconfigitem(
1819 b'progress',
1820 b'progress',
1820 b'format',
1821 b'format',
1821 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1822 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1822 )
1823 )
1823 coreconfigitem(
1824 coreconfigitem(
1824 b'progress',
1825 b'progress',
1825 b'refresh',
1826 b'refresh',
1826 default=0.1,
1827 default=0.1,
1827 )
1828 )
1828 coreconfigitem(
1829 coreconfigitem(
1829 b'progress',
1830 b'progress',
1830 b'width',
1831 b'width',
1831 default=dynamicdefault,
1832 default=dynamicdefault,
1832 )
1833 )
1833 coreconfigitem(
1834 coreconfigitem(
1834 b'pull',
1835 b'pull',
1835 b'confirm',
1836 b'confirm',
1836 default=False,
1837 default=False,
1837 )
1838 )
1838 coreconfigitem(
1839 coreconfigitem(
1839 b'push',
1840 b'push',
1840 b'pushvars.server',
1841 b'pushvars.server',
1841 default=False,
1842 default=False,
1842 )
1843 )
1843 coreconfigitem(
1844 coreconfigitem(
1844 b'rewrite',
1845 b'rewrite',
1845 b'backup-bundle',
1846 b'backup-bundle',
1846 default=True,
1847 default=True,
1847 alias=[(b'ui', b'history-editing-backup')],
1848 alias=[(b'ui', b'history-editing-backup')],
1848 )
1849 )
1849 coreconfigitem(
1850 coreconfigitem(
1850 b'rewrite',
1851 b'rewrite',
1851 b'update-timestamp',
1852 b'update-timestamp',
1852 default=False,
1853 default=False,
1853 )
1854 )
1854 coreconfigitem(
1855 coreconfigitem(
1855 b'rewrite',
1856 b'rewrite',
1856 b'empty-successor',
1857 b'empty-successor',
1857 default=b'skip',
1858 default=b'skip',
1858 experimental=True,
1859 experimental=True,
1859 )
1860 )
1860 coreconfigitem(
1861 coreconfigitem(
1861 b'storage',
1862 b'storage',
1862 b'new-repo-backend',
1863 b'new-repo-backend',
1863 default=b'revlogv1',
1864 default=b'revlogv1',
1864 experimental=True,
1865 experimental=True,
1865 )
1866 )
1866 coreconfigitem(
1867 coreconfigitem(
1867 b'storage',
1868 b'storage',
1868 b'revlog.optimize-delta-parent-choice',
1869 b'revlog.optimize-delta-parent-choice',
1869 default=True,
1870 default=True,
1870 alias=[(b'format', b'aggressivemergedeltas')],
1871 alias=[(b'format', b'aggressivemergedeltas')],
1871 )
1872 )
1872 # experimental as long as rust is experimental (or a C version is implemented)
1873 # experimental as long as rust is experimental (or a C version is implemented)
1873 coreconfigitem(
1874 coreconfigitem(
1874 b'storage',
1875 b'storage',
1875 b'revlog.persistent-nodemap.mmap',
1876 b'revlog.persistent-nodemap.mmap',
1876 default=True,
1877 default=True,
1877 )
1878 )
1878 # experimental as long as format.use-persistent-nodemap is.
1879 # experimental as long as format.use-persistent-nodemap is.
1879 coreconfigitem(
1880 coreconfigitem(
1880 b'storage',
1881 b'storage',
1881 b'revlog.persistent-nodemap.slow-path',
1882 b'revlog.persistent-nodemap.slow-path',
1882 default=b"abort",
1883 default=b"abort",
1883 )
1884 )
1884
1885
1885 coreconfigitem(
1886 coreconfigitem(
1886 b'storage',
1887 b'storage',
1887 b'revlog.reuse-external-delta',
1888 b'revlog.reuse-external-delta',
1888 default=True,
1889 default=True,
1889 )
1890 )
1890 coreconfigitem(
1891 coreconfigitem(
1891 b'storage',
1892 b'storage',
1892 b'revlog.reuse-external-delta-parent',
1893 b'revlog.reuse-external-delta-parent',
1893 default=None,
1894 default=None,
1894 )
1895 )
1895 coreconfigitem(
1896 coreconfigitem(
1896 b'storage',
1897 b'storage',
1897 b'revlog.zlib.level',
1898 b'revlog.zlib.level',
1898 default=None,
1899 default=None,
1899 )
1900 )
1900 coreconfigitem(
1901 coreconfigitem(
1901 b'storage',
1902 b'storage',
1902 b'revlog.zstd.level',
1903 b'revlog.zstd.level',
1903 default=None,
1904 default=None,
1904 )
1905 )
1905 coreconfigitem(
1906 coreconfigitem(
1906 b'server',
1907 b'server',
1907 b'bookmarks-pushkey-compat',
1908 b'bookmarks-pushkey-compat',
1908 default=True,
1909 default=True,
1909 )
1910 )
1910 coreconfigitem(
1911 coreconfigitem(
1911 b'server',
1912 b'server',
1912 b'bundle1',
1913 b'bundle1',
1913 default=True,
1914 default=True,
1914 )
1915 )
1915 coreconfigitem(
1916 coreconfigitem(
1916 b'server',
1917 b'server',
1917 b'bundle1gd',
1918 b'bundle1gd',
1918 default=None,
1919 default=None,
1919 )
1920 )
1920 coreconfigitem(
1921 coreconfigitem(
1921 b'server',
1922 b'server',
1922 b'bundle1.pull',
1923 b'bundle1.pull',
1923 default=None,
1924 default=None,
1924 )
1925 )
1925 coreconfigitem(
1926 coreconfigitem(
1926 b'server',
1927 b'server',
1927 b'bundle1gd.pull',
1928 b'bundle1gd.pull',
1928 default=None,
1929 default=None,
1929 )
1930 )
1930 coreconfigitem(
1931 coreconfigitem(
1931 b'server',
1932 b'server',
1932 b'bundle1.push',
1933 b'bundle1.push',
1933 default=None,
1934 default=None,
1934 )
1935 )
1935 coreconfigitem(
1936 coreconfigitem(
1936 b'server',
1937 b'server',
1937 b'bundle1gd.push',
1938 b'bundle1gd.push',
1938 default=None,
1939 default=None,
1939 )
1940 )
1940 coreconfigitem(
1941 coreconfigitem(
1941 b'server',
1942 b'server',
1942 b'bundle2.stream',
1943 b'bundle2.stream',
1943 default=True,
1944 default=True,
1944 alias=[(b'experimental', b'bundle2.stream')],
1945 alias=[(b'experimental', b'bundle2.stream')],
1945 )
1946 )
1946 coreconfigitem(
1947 coreconfigitem(
1947 b'server',
1948 b'server',
1948 b'compressionengines',
1949 b'compressionengines',
1949 default=list,
1950 default=list,
1950 )
1951 )
1951 coreconfigitem(
1952 coreconfigitem(
1952 b'server',
1953 b'server',
1953 b'concurrent-push-mode',
1954 b'concurrent-push-mode',
1954 default=b'check-related',
1955 default=b'check-related',
1955 )
1956 )
1956 coreconfigitem(
1957 coreconfigitem(
1957 b'server',
1958 b'server',
1958 b'disablefullbundle',
1959 b'disablefullbundle',
1959 default=False,
1960 default=False,
1960 )
1961 )
1961 coreconfigitem(
1962 coreconfigitem(
1962 b'server',
1963 b'server',
1963 b'maxhttpheaderlen',
1964 b'maxhttpheaderlen',
1964 default=1024,
1965 default=1024,
1965 )
1966 )
1966 coreconfigitem(
1967 coreconfigitem(
1967 b'server',
1968 b'server',
1968 b'pullbundle',
1969 b'pullbundle',
1969 default=False,
1970 default=False,
1970 )
1971 )
1971 coreconfigitem(
1972 coreconfigitem(
1972 b'server',
1973 b'server',
1973 b'preferuncompressed',
1974 b'preferuncompressed',
1974 default=False,
1975 default=False,
1975 )
1976 )
1976 coreconfigitem(
1977 coreconfigitem(
1977 b'server',
1978 b'server',
1978 b'streamunbundle',
1979 b'streamunbundle',
1979 default=False,
1980 default=False,
1980 )
1981 )
1981 coreconfigitem(
1982 coreconfigitem(
1982 b'server',
1983 b'server',
1983 b'uncompressed',
1984 b'uncompressed',
1984 default=True,
1985 default=True,
1985 )
1986 )
1986 coreconfigitem(
1987 coreconfigitem(
1987 b'server',
1988 b'server',
1988 b'uncompressedallowsecret',
1989 b'uncompressedallowsecret',
1989 default=False,
1990 default=False,
1990 )
1991 )
1991 coreconfigitem(
1992 coreconfigitem(
1992 b'server',
1993 b'server',
1993 b'view',
1994 b'view',
1994 default=b'served',
1995 default=b'served',
1995 )
1996 )
1996 coreconfigitem(
1997 coreconfigitem(
1997 b'server',
1998 b'server',
1998 b'validate',
1999 b'validate',
1999 default=False,
2000 default=False,
2000 )
2001 )
2001 coreconfigitem(
2002 coreconfigitem(
2002 b'server',
2003 b'server',
2003 b'zliblevel',
2004 b'zliblevel',
2004 default=-1,
2005 default=-1,
2005 )
2006 )
2006 coreconfigitem(
2007 coreconfigitem(
2007 b'server',
2008 b'server',
2008 b'zstdlevel',
2009 b'zstdlevel',
2009 default=3,
2010 default=3,
2010 )
2011 )
2011 coreconfigitem(
2012 coreconfigitem(
2012 b'share',
2013 b'share',
2013 b'pool',
2014 b'pool',
2014 default=None,
2015 default=None,
2015 )
2016 )
2016 coreconfigitem(
2017 coreconfigitem(
2017 b'share',
2018 b'share',
2018 b'poolnaming',
2019 b'poolnaming',
2019 default=b'identity',
2020 default=b'identity',
2020 )
2021 )
2021 coreconfigitem(
2022 coreconfigitem(
2022 b'share',
2023 b'share',
2023 b'safe-mismatch.source-not-safe',
2024 b'safe-mismatch.source-not-safe',
2024 default=b'abort',
2025 default=b'abort',
2025 )
2026 )
2026 coreconfigitem(
2027 coreconfigitem(
2027 b'share',
2028 b'share',
2028 b'safe-mismatch.source-safe',
2029 b'safe-mismatch.source-safe',
2029 default=b'abort',
2030 default=b'abort',
2030 )
2031 )
2031 coreconfigitem(
2032 coreconfigitem(
2032 b'share',
2033 b'share',
2033 b'safe-mismatch.source-not-safe.warn',
2034 b'safe-mismatch.source-not-safe.warn',
2034 default=True,
2035 default=True,
2035 )
2036 )
2036 coreconfigitem(
2037 coreconfigitem(
2037 b'share',
2038 b'share',
2038 b'safe-mismatch.source-safe.warn',
2039 b'safe-mismatch.source-safe.warn',
2039 default=True,
2040 default=True,
2040 )
2041 )
2041 coreconfigitem(
2042 coreconfigitem(
2042 b'shelve',
2043 b'shelve',
2043 b'maxbackups',
2044 b'maxbackups',
2044 default=10,
2045 default=10,
2045 )
2046 )
2046 coreconfigitem(
2047 coreconfigitem(
2047 b'smtp',
2048 b'smtp',
2048 b'host',
2049 b'host',
2049 default=None,
2050 default=None,
2050 )
2051 )
2051 coreconfigitem(
2052 coreconfigitem(
2052 b'smtp',
2053 b'smtp',
2053 b'local_hostname',
2054 b'local_hostname',
2054 default=None,
2055 default=None,
2055 )
2056 )
2056 coreconfigitem(
2057 coreconfigitem(
2057 b'smtp',
2058 b'smtp',
2058 b'password',
2059 b'password',
2059 default=None,
2060 default=None,
2060 )
2061 )
2061 coreconfigitem(
2062 coreconfigitem(
2062 b'smtp',
2063 b'smtp',
2063 b'port',
2064 b'port',
2064 default=dynamicdefault,
2065 default=dynamicdefault,
2065 )
2066 )
2066 coreconfigitem(
2067 coreconfigitem(
2067 b'smtp',
2068 b'smtp',
2068 b'tls',
2069 b'tls',
2069 default=b'none',
2070 default=b'none',
2070 )
2071 )
2071 coreconfigitem(
2072 coreconfigitem(
2072 b'smtp',
2073 b'smtp',
2073 b'username',
2074 b'username',
2074 default=None,
2075 default=None,
2075 )
2076 )
2076 coreconfigitem(
2077 coreconfigitem(
2077 b'sparse',
2078 b'sparse',
2078 b'missingwarning',
2079 b'missingwarning',
2079 default=True,
2080 default=True,
2080 experimental=True,
2081 experimental=True,
2081 )
2082 )
2082 coreconfigitem(
2083 coreconfigitem(
2083 b'subrepos',
2084 b'subrepos',
2084 b'allowed',
2085 b'allowed',
2085 default=dynamicdefault, # to make backporting simpler
2086 default=dynamicdefault, # to make backporting simpler
2086 )
2087 )
2087 coreconfigitem(
2088 coreconfigitem(
2088 b'subrepos',
2089 b'subrepos',
2089 b'hg:allowed',
2090 b'hg:allowed',
2090 default=dynamicdefault,
2091 default=dynamicdefault,
2091 )
2092 )
2092 coreconfigitem(
2093 coreconfigitem(
2093 b'subrepos',
2094 b'subrepos',
2094 b'git:allowed',
2095 b'git:allowed',
2095 default=dynamicdefault,
2096 default=dynamicdefault,
2096 )
2097 )
2097 coreconfigitem(
2098 coreconfigitem(
2098 b'subrepos',
2099 b'subrepos',
2099 b'svn:allowed',
2100 b'svn:allowed',
2100 default=dynamicdefault,
2101 default=dynamicdefault,
2101 )
2102 )
2102 coreconfigitem(
2103 coreconfigitem(
2103 b'templates',
2104 b'templates',
2104 b'.*',
2105 b'.*',
2105 default=None,
2106 default=None,
2106 generic=True,
2107 generic=True,
2107 )
2108 )
2108 coreconfigitem(
2109 coreconfigitem(
2109 b'templateconfig',
2110 b'templateconfig',
2110 b'.*',
2111 b'.*',
2111 default=dynamicdefault,
2112 default=dynamicdefault,
2112 generic=True,
2113 generic=True,
2113 )
2114 )
2114 coreconfigitem(
2115 coreconfigitem(
2115 b'trusted',
2116 b'trusted',
2116 b'groups',
2117 b'groups',
2117 default=list,
2118 default=list,
2118 )
2119 )
2119 coreconfigitem(
2120 coreconfigitem(
2120 b'trusted',
2121 b'trusted',
2121 b'users',
2122 b'users',
2122 default=list,
2123 default=list,
2123 )
2124 )
2124 coreconfigitem(
2125 coreconfigitem(
2125 b'ui',
2126 b'ui',
2126 b'_usedassubrepo',
2127 b'_usedassubrepo',
2127 default=False,
2128 default=False,
2128 )
2129 )
2129 coreconfigitem(
2130 coreconfigitem(
2130 b'ui',
2131 b'ui',
2131 b'allowemptycommit',
2132 b'allowemptycommit',
2132 default=False,
2133 default=False,
2133 )
2134 )
2134 coreconfigitem(
2135 coreconfigitem(
2135 b'ui',
2136 b'ui',
2136 b'archivemeta',
2137 b'archivemeta',
2137 default=True,
2138 default=True,
2138 )
2139 )
2139 coreconfigitem(
2140 coreconfigitem(
2140 b'ui',
2141 b'ui',
2141 b'askusername',
2142 b'askusername',
2142 default=False,
2143 default=False,
2143 )
2144 )
2144 coreconfigitem(
2145 coreconfigitem(
2145 b'ui',
2146 b'ui',
2146 b'available-memory',
2147 b'available-memory',
2147 default=None,
2148 default=None,
2148 )
2149 )
2149
2150
2150 coreconfigitem(
2151 coreconfigitem(
2151 b'ui',
2152 b'ui',
2152 b'clonebundlefallback',
2153 b'clonebundlefallback',
2153 default=False,
2154 default=False,
2154 )
2155 )
2155 coreconfigitem(
2156 coreconfigitem(
2156 b'ui',
2157 b'ui',
2157 b'clonebundleprefers',
2158 b'clonebundleprefers',
2158 default=list,
2159 default=list,
2159 )
2160 )
2160 coreconfigitem(
2161 coreconfigitem(
2161 b'ui',
2162 b'ui',
2162 b'clonebundles',
2163 b'clonebundles',
2163 default=True,
2164 default=True,
2164 )
2165 )
2165 coreconfigitem(
2166 coreconfigitem(
2166 b'ui',
2167 b'ui',
2167 b'color',
2168 b'color',
2168 default=b'auto',
2169 default=b'auto',
2169 )
2170 )
2170 coreconfigitem(
2171 coreconfigitem(
2171 b'ui',
2172 b'ui',
2172 b'commitsubrepos',
2173 b'commitsubrepos',
2173 default=False,
2174 default=False,
2174 )
2175 )
2175 coreconfigitem(
2176 coreconfigitem(
2176 b'ui',
2177 b'ui',
2177 b'debug',
2178 b'debug',
2178 default=False,
2179 default=False,
2179 )
2180 )
2180 coreconfigitem(
2181 coreconfigitem(
2181 b'ui',
2182 b'ui',
2182 b'debugger',
2183 b'debugger',
2183 default=None,
2184 default=None,
2184 )
2185 )
2185 coreconfigitem(
2186 coreconfigitem(
2186 b'ui',
2187 b'ui',
2187 b'editor',
2188 b'editor',
2188 default=dynamicdefault,
2189 default=dynamicdefault,
2189 )
2190 )
2190 coreconfigitem(
2191 coreconfigitem(
2191 b'ui',
2192 b'ui',
2192 b'detailed-exit-code',
2193 b'detailed-exit-code',
2193 default=False,
2194 default=False,
2194 experimental=True,
2195 experimental=True,
2195 )
2196 )
2196 coreconfigitem(
2197 coreconfigitem(
2197 b'ui',
2198 b'ui',
2198 b'fallbackencoding',
2199 b'fallbackencoding',
2199 default=None,
2200 default=None,
2200 )
2201 )
2201 coreconfigitem(
2202 coreconfigitem(
2202 b'ui',
2203 b'ui',
2203 b'forcecwd',
2204 b'forcecwd',
2204 default=None,
2205 default=None,
2205 )
2206 )
2206 coreconfigitem(
2207 coreconfigitem(
2207 b'ui',
2208 b'ui',
2208 b'forcemerge',
2209 b'forcemerge',
2209 default=None,
2210 default=None,
2210 )
2211 )
2211 coreconfigitem(
2212 coreconfigitem(
2212 b'ui',
2213 b'ui',
2213 b'formatdebug',
2214 b'formatdebug',
2214 default=False,
2215 default=False,
2215 )
2216 )
2216 coreconfigitem(
2217 coreconfigitem(
2217 b'ui',
2218 b'ui',
2218 b'formatjson',
2219 b'formatjson',
2219 default=False,
2220 default=False,
2220 )
2221 )
2221 coreconfigitem(
2222 coreconfigitem(
2222 b'ui',
2223 b'ui',
2223 b'formatted',
2224 b'formatted',
2224 default=None,
2225 default=None,
2225 )
2226 )
2226 coreconfigitem(
2227 coreconfigitem(
2227 b'ui',
2228 b'ui',
2228 b'interactive',
2229 b'interactive',
2229 default=None,
2230 default=None,
2230 )
2231 )
2231 coreconfigitem(
2232 coreconfigitem(
2232 b'ui',
2233 b'ui',
2233 b'interface',
2234 b'interface',
2234 default=None,
2235 default=None,
2235 )
2236 )
2236 coreconfigitem(
2237 coreconfigitem(
2237 b'ui',
2238 b'ui',
2238 b'interface.chunkselector',
2239 b'interface.chunkselector',
2239 default=None,
2240 default=None,
2240 )
2241 )
2241 coreconfigitem(
2242 coreconfigitem(
2242 b'ui',
2243 b'ui',
2243 b'large-file-limit',
2244 b'large-file-limit',
2244 default=10000000,
2245 default=10000000,
2245 )
2246 )
2246 coreconfigitem(
2247 coreconfigitem(
2247 b'ui',
2248 b'ui',
2248 b'logblockedtimes',
2249 b'logblockedtimes',
2249 default=False,
2250 default=False,
2250 )
2251 )
2251 coreconfigitem(
2252 coreconfigitem(
2252 b'ui',
2253 b'ui',
2253 b'merge',
2254 b'merge',
2254 default=None,
2255 default=None,
2255 )
2256 )
2256 coreconfigitem(
2257 coreconfigitem(
2257 b'ui',
2258 b'ui',
2258 b'mergemarkers',
2259 b'mergemarkers',
2259 default=b'basic',
2260 default=b'basic',
2260 )
2261 )
2261 coreconfigitem(
2262 coreconfigitem(
2262 b'ui',
2263 b'ui',
2263 b'message-output',
2264 b'message-output',
2264 default=b'stdio',
2265 default=b'stdio',
2265 )
2266 )
2266 coreconfigitem(
2267 coreconfigitem(
2267 b'ui',
2268 b'ui',
2268 b'nontty',
2269 b'nontty',
2269 default=False,
2270 default=False,
2270 )
2271 )
2271 coreconfigitem(
2272 coreconfigitem(
2272 b'ui',
2273 b'ui',
2273 b'origbackuppath',
2274 b'origbackuppath',
2274 default=None,
2275 default=None,
2275 )
2276 )
2276 coreconfigitem(
2277 coreconfigitem(
2277 b'ui',
2278 b'ui',
2278 b'paginate',
2279 b'paginate',
2279 default=True,
2280 default=True,
2280 )
2281 )
2281 coreconfigitem(
2282 coreconfigitem(
2282 b'ui',
2283 b'ui',
2283 b'patch',
2284 b'patch',
2284 default=None,
2285 default=None,
2285 )
2286 )
2286 coreconfigitem(
2287 coreconfigitem(
2287 b'ui',
2288 b'ui',
2288 b'portablefilenames',
2289 b'portablefilenames',
2289 default=b'warn',
2290 default=b'warn',
2290 )
2291 )
2291 coreconfigitem(
2292 coreconfigitem(
2292 b'ui',
2293 b'ui',
2293 b'promptecho',
2294 b'promptecho',
2294 default=False,
2295 default=False,
2295 )
2296 )
2296 coreconfigitem(
2297 coreconfigitem(
2297 b'ui',
2298 b'ui',
2298 b'quiet',
2299 b'quiet',
2299 default=False,
2300 default=False,
2300 )
2301 )
2301 coreconfigitem(
2302 coreconfigitem(
2302 b'ui',
2303 b'ui',
2303 b'quietbookmarkmove',
2304 b'quietbookmarkmove',
2304 default=False,
2305 default=False,
2305 )
2306 )
2306 coreconfigitem(
2307 coreconfigitem(
2307 b'ui',
2308 b'ui',
2308 b'relative-paths',
2309 b'relative-paths',
2309 default=b'legacy',
2310 default=b'legacy',
2310 )
2311 )
2311 coreconfigitem(
2312 coreconfigitem(
2312 b'ui',
2313 b'ui',
2313 b'remotecmd',
2314 b'remotecmd',
2314 default=b'hg',
2315 default=b'hg',
2315 )
2316 )
2316 coreconfigitem(
2317 coreconfigitem(
2317 b'ui',
2318 b'ui',
2318 b'report_untrusted',
2319 b'report_untrusted',
2319 default=True,
2320 default=True,
2320 )
2321 )
2321 coreconfigitem(
2322 coreconfigitem(
2322 b'ui',
2323 b'ui',
2323 b'rollback',
2324 b'rollback',
2324 default=True,
2325 default=True,
2325 )
2326 )
2326 coreconfigitem(
2327 coreconfigitem(
2327 b'ui',
2328 b'ui',
2328 b'signal-safe-lock',
2329 b'signal-safe-lock',
2329 default=True,
2330 default=True,
2330 )
2331 )
2331 coreconfigitem(
2332 coreconfigitem(
2332 b'ui',
2333 b'ui',
2333 b'slash',
2334 b'slash',
2334 default=False,
2335 default=False,
2335 )
2336 )
2336 coreconfigitem(
2337 coreconfigitem(
2337 b'ui',
2338 b'ui',
2338 b'ssh',
2339 b'ssh',
2339 default=b'ssh',
2340 default=b'ssh',
2340 )
2341 )
2341 coreconfigitem(
2342 coreconfigitem(
2342 b'ui',
2343 b'ui',
2343 b'ssherrorhint',
2344 b'ssherrorhint',
2344 default=None,
2345 default=None,
2345 )
2346 )
2346 coreconfigitem(
2347 coreconfigitem(
2347 b'ui',
2348 b'ui',
2348 b'statuscopies',
2349 b'statuscopies',
2349 default=False,
2350 default=False,
2350 )
2351 )
2351 coreconfigitem(
2352 coreconfigitem(
2352 b'ui',
2353 b'ui',
2353 b'strict',
2354 b'strict',
2354 default=False,
2355 default=False,
2355 )
2356 )
2356 coreconfigitem(
2357 coreconfigitem(
2357 b'ui',
2358 b'ui',
2358 b'style',
2359 b'style',
2359 default=b'',
2360 default=b'',
2360 )
2361 )
2361 coreconfigitem(
2362 coreconfigitem(
2362 b'ui',
2363 b'ui',
2363 b'supportcontact',
2364 b'supportcontact',
2364 default=None,
2365 default=None,
2365 )
2366 )
2366 coreconfigitem(
2367 coreconfigitem(
2367 b'ui',
2368 b'ui',
2368 b'textwidth',
2369 b'textwidth',
2369 default=78,
2370 default=78,
2370 )
2371 )
2371 coreconfigitem(
2372 coreconfigitem(
2372 b'ui',
2373 b'ui',
2373 b'timeout',
2374 b'timeout',
2374 default=b'600',
2375 default=b'600',
2375 )
2376 )
2376 coreconfigitem(
2377 coreconfigitem(
2377 b'ui',
2378 b'ui',
2378 b'timeout.warn',
2379 b'timeout.warn',
2379 default=0,
2380 default=0,
2380 )
2381 )
2381 coreconfigitem(
2382 coreconfigitem(
2382 b'ui',
2383 b'ui',
2383 b'timestamp-output',
2384 b'timestamp-output',
2384 default=False,
2385 default=False,
2385 )
2386 )
2386 coreconfigitem(
2387 coreconfigitem(
2387 b'ui',
2388 b'ui',
2388 b'traceback',
2389 b'traceback',
2389 default=False,
2390 default=False,
2390 )
2391 )
2391 coreconfigitem(
2392 coreconfigitem(
2392 b'ui',
2393 b'ui',
2393 b'tweakdefaults',
2394 b'tweakdefaults',
2394 default=False,
2395 default=False,
2395 )
2396 )
2396 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2397 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2397 coreconfigitem(
2398 coreconfigitem(
2398 b'ui',
2399 b'ui',
2399 b'verbose',
2400 b'verbose',
2400 default=False,
2401 default=False,
2401 )
2402 )
2402 coreconfigitem(
2403 coreconfigitem(
2403 b'verify',
2404 b'verify',
2404 b'skipflags',
2405 b'skipflags',
2405 default=None,
2406 default=None,
2406 )
2407 )
2407 coreconfigitem(
2408 coreconfigitem(
2408 b'web',
2409 b'web',
2409 b'allowbz2',
2410 b'allowbz2',
2410 default=False,
2411 default=False,
2411 )
2412 )
2412 coreconfigitem(
2413 coreconfigitem(
2413 b'web',
2414 b'web',
2414 b'allowgz',
2415 b'allowgz',
2415 default=False,
2416 default=False,
2416 )
2417 )
2417 coreconfigitem(
2418 coreconfigitem(
2418 b'web',
2419 b'web',
2419 b'allow-pull',
2420 b'allow-pull',
2420 alias=[(b'web', b'allowpull')],
2421 alias=[(b'web', b'allowpull')],
2421 default=True,
2422 default=True,
2422 )
2423 )
2423 coreconfigitem(
2424 coreconfigitem(
2424 b'web',
2425 b'web',
2425 b'allow-push',
2426 b'allow-push',
2426 alias=[(b'web', b'allow_push')],
2427 alias=[(b'web', b'allow_push')],
2427 default=list,
2428 default=list,
2428 )
2429 )
2429 coreconfigitem(
2430 coreconfigitem(
2430 b'web',
2431 b'web',
2431 b'allowzip',
2432 b'allowzip',
2432 default=False,
2433 default=False,
2433 )
2434 )
2434 coreconfigitem(
2435 coreconfigitem(
2435 b'web',
2436 b'web',
2436 b'archivesubrepos',
2437 b'archivesubrepos',
2437 default=False,
2438 default=False,
2438 )
2439 )
2439 coreconfigitem(
2440 coreconfigitem(
2440 b'web',
2441 b'web',
2441 b'cache',
2442 b'cache',
2442 default=True,
2443 default=True,
2443 )
2444 )
2444 coreconfigitem(
2445 coreconfigitem(
2445 b'web',
2446 b'web',
2446 b'comparisoncontext',
2447 b'comparisoncontext',
2447 default=5,
2448 default=5,
2448 )
2449 )
2449 coreconfigitem(
2450 coreconfigitem(
2450 b'web',
2451 b'web',
2451 b'contact',
2452 b'contact',
2452 default=None,
2453 default=None,
2453 )
2454 )
2454 coreconfigitem(
2455 coreconfigitem(
2455 b'web',
2456 b'web',
2456 b'deny_push',
2457 b'deny_push',
2457 default=list,
2458 default=list,
2458 )
2459 )
2459 coreconfigitem(
2460 coreconfigitem(
2460 b'web',
2461 b'web',
2461 b'guessmime',
2462 b'guessmime',
2462 default=False,
2463 default=False,
2463 )
2464 )
2464 coreconfigitem(
2465 coreconfigitem(
2465 b'web',
2466 b'web',
2466 b'hidden',
2467 b'hidden',
2467 default=False,
2468 default=False,
2468 )
2469 )
2469 coreconfigitem(
2470 coreconfigitem(
2470 b'web',
2471 b'web',
2471 b'labels',
2472 b'labels',
2472 default=list,
2473 default=list,
2473 )
2474 )
2474 coreconfigitem(
2475 coreconfigitem(
2475 b'web',
2476 b'web',
2476 b'logoimg',
2477 b'logoimg',
2477 default=b'hglogo.png',
2478 default=b'hglogo.png',
2478 )
2479 )
2479 coreconfigitem(
2480 coreconfigitem(
2480 b'web',
2481 b'web',
2481 b'logourl',
2482 b'logourl',
2482 default=b'https://mercurial-scm.org/',
2483 default=b'https://mercurial-scm.org/',
2483 )
2484 )
2484 coreconfigitem(
2485 coreconfigitem(
2485 b'web',
2486 b'web',
2486 b'accesslog',
2487 b'accesslog',
2487 default=b'-',
2488 default=b'-',
2488 )
2489 )
2489 coreconfigitem(
2490 coreconfigitem(
2490 b'web',
2491 b'web',
2491 b'address',
2492 b'address',
2492 default=b'',
2493 default=b'',
2493 )
2494 )
2494 coreconfigitem(
2495 coreconfigitem(
2495 b'web',
2496 b'web',
2496 b'allow-archive',
2497 b'allow-archive',
2497 alias=[(b'web', b'allow_archive')],
2498 alias=[(b'web', b'allow_archive')],
2498 default=list,
2499 default=list,
2499 )
2500 )
2500 coreconfigitem(
2501 coreconfigitem(
2501 b'web',
2502 b'web',
2502 b'allow_read',
2503 b'allow_read',
2503 default=list,
2504 default=list,
2504 )
2505 )
2505 coreconfigitem(
2506 coreconfigitem(
2506 b'web',
2507 b'web',
2507 b'baseurl',
2508 b'baseurl',
2508 default=None,
2509 default=None,
2509 )
2510 )
2510 coreconfigitem(
2511 coreconfigitem(
2511 b'web',
2512 b'web',
2512 b'cacerts',
2513 b'cacerts',
2513 default=None,
2514 default=None,
2514 )
2515 )
2515 coreconfigitem(
2516 coreconfigitem(
2516 b'web',
2517 b'web',
2517 b'certificate',
2518 b'certificate',
2518 default=None,
2519 default=None,
2519 )
2520 )
2520 coreconfigitem(
2521 coreconfigitem(
2521 b'web',
2522 b'web',
2522 b'collapse',
2523 b'collapse',
2523 default=False,
2524 default=False,
2524 )
2525 )
2525 coreconfigitem(
2526 coreconfigitem(
2526 b'web',
2527 b'web',
2527 b'csp',
2528 b'csp',
2528 default=None,
2529 default=None,
2529 )
2530 )
2530 coreconfigitem(
2531 coreconfigitem(
2531 b'web',
2532 b'web',
2532 b'deny_read',
2533 b'deny_read',
2533 default=list,
2534 default=list,
2534 )
2535 )
2535 coreconfigitem(
2536 coreconfigitem(
2536 b'web',
2537 b'web',
2537 b'descend',
2538 b'descend',
2538 default=True,
2539 default=True,
2539 )
2540 )
2540 coreconfigitem(
2541 coreconfigitem(
2541 b'web',
2542 b'web',
2542 b'description',
2543 b'description',
2543 default=b"",
2544 default=b"",
2544 )
2545 )
2545 coreconfigitem(
2546 coreconfigitem(
2546 b'web',
2547 b'web',
2547 b'encoding',
2548 b'encoding',
2548 default=lambda: encoding.encoding,
2549 default=lambda: encoding.encoding,
2549 )
2550 )
2550 coreconfigitem(
2551 coreconfigitem(
2551 b'web',
2552 b'web',
2552 b'errorlog',
2553 b'errorlog',
2553 default=b'-',
2554 default=b'-',
2554 )
2555 )
2555 coreconfigitem(
2556 coreconfigitem(
2556 b'web',
2557 b'web',
2557 b'ipv6',
2558 b'ipv6',
2558 default=False,
2559 default=False,
2559 )
2560 )
2560 coreconfigitem(
2561 coreconfigitem(
2561 b'web',
2562 b'web',
2562 b'maxchanges',
2563 b'maxchanges',
2563 default=10,
2564 default=10,
2564 )
2565 )
2565 coreconfigitem(
2566 coreconfigitem(
2566 b'web',
2567 b'web',
2567 b'maxfiles',
2568 b'maxfiles',
2568 default=10,
2569 default=10,
2569 )
2570 )
2570 coreconfigitem(
2571 coreconfigitem(
2571 b'web',
2572 b'web',
2572 b'maxshortchanges',
2573 b'maxshortchanges',
2573 default=60,
2574 default=60,
2574 )
2575 )
2575 coreconfigitem(
2576 coreconfigitem(
2576 b'web',
2577 b'web',
2577 b'motd',
2578 b'motd',
2578 default=b'',
2579 default=b'',
2579 )
2580 )
2580 coreconfigitem(
2581 coreconfigitem(
2581 b'web',
2582 b'web',
2582 b'name',
2583 b'name',
2583 default=dynamicdefault,
2584 default=dynamicdefault,
2584 )
2585 )
2585 coreconfigitem(
2586 coreconfigitem(
2586 b'web',
2587 b'web',
2587 b'port',
2588 b'port',
2588 default=8000,
2589 default=8000,
2589 )
2590 )
2590 coreconfigitem(
2591 coreconfigitem(
2591 b'web',
2592 b'web',
2592 b'prefix',
2593 b'prefix',
2593 default=b'',
2594 default=b'',
2594 )
2595 )
2595 coreconfigitem(
2596 coreconfigitem(
2596 b'web',
2597 b'web',
2597 b'push_ssl',
2598 b'push_ssl',
2598 default=True,
2599 default=True,
2599 )
2600 )
2600 coreconfigitem(
2601 coreconfigitem(
2601 b'web',
2602 b'web',
2602 b'refreshinterval',
2603 b'refreshinterval',
2603 default=20,
2604 default=20,
2604 )
2605 )
2605 coreconfigitem(
2606 coreconfigitem(
2606 b'web',
2607 b'web',
2607 b'server-header',
2608 b'server-header',
2608 default=None,
2609 default=None,
2609 )
2610 )
2610 coreconfigitem(
2611 coreconfigitem(
2611 b'web',
2612 b'web',
2612 b'static',
2613 b'static',
2613 default=None,
2614 default=None,
2614 )
2615 )
2615 coreconfigitem(
2616 coreconfigitem(
2616 b'web',
2617 b'web',
2617 b'staticurl',
2618 b'staticurl',
2618 default=None,
2619 default=None,
2619 )
2620 )
2620 coreconfigitem(
2621 coreconfigitem(
2621 b'web',
2622 b'web',
2622 b'stripes',
2623 b'stripes',
2623 default=1,
2624 default=1,
2624 )
2625 )
2625 coreconfigitem(
2626 coreconfigitem(
2626 b'web',
2627 b'web',
2627 b'style',
2628 b'style',
2628 default=b'paper',
2629 default=b'paper',
2629 )
2630 )
2630 coreconfigitem(
2631 coreconfigitem(
2631 b'web',
2632 b'web',
2632 b'templates',
2633 b'templates',
2633 default=None,
2634 default=None,
2634 )
2635 )
2635 coreconfigitem(
2636 coreconfigitem(
2636 b'web',
2637 b'web',
2637 b'view',
2638 b'view',
2638 default=b'served',
2639 default=b'served',
2639 experimental=True,
2640 experimental=True,
2640 )
2641 )
2641 coreconfigitem(
2642 coreconfigitem(
2642 b'worker',
2643 b'worker',
2643 b'backgroundclose',
2644 b'backgroundclose',
2644 default=dynamicdefault,
2645 default=dynamicdefault,
2645 )
2646 )
2646 # Windows defaults to a limit of 512 open files. A buffer of 128
2647 # Windows defaults to a limit of 512 open files. A buffer of 128
2647 # should give us enough headway.
2648 # should give us enough headway.
2648 coreconfigitem(
2649 coreconfigitem(
2649 b'worker',
2650 b'worker',
2650 b'backgroundclosemaxqueue',
2651 b'backgroundclosemaxqueue',
2651 default=384,
2652 default=384,
2652 )
2653 )
2653 coreconfigitem(
2654 coreconfigitem(
2654 b'worker',
2655 b'worker',
2655 b'backgroundcloseminfilecount',
2656 b'backgroundcloseminfilecount',
2656 default=2048,
2657 default=2048,
2657 )
2658 )
2658 coreconfigitem(
2659 coreconfigitem(
2659 b'worker',
2660 b'worker',
2660 b'backgroundclosethreadcount',
2661 b'backgroundclosethreadcount',
2661 default=4,
2662 default=4,
2662 )
2663 )
2663 coreconfigitem(
2664 coreconfigitem(
2664 b'worker',
2665 b'worker',
2665 b'enabled',
2666 b'enabled',
2666 default=True,
2667 default=True,
2667 )
2668 )
2668 coreconfigitem(
2669 coreconfigitem(
2669 b'worker',
2670 b'worker',
2670 b'numcpus',
2671 b'numcpus',
2671 default=None,
2672 default=None,
2672 )
2673 )
2673
2674
2674 # Rebase related configuration moved to core because other extension are doing
2675 # Rebase related configuration moved to core because other extension are doing
2675 # strange things. For example, shelve import the extensions to reuse some bit
2676 # strange things. For example, shelve import the extensions to reuse some bit
2676 # without formally loading it.
2677 # without formally loading it.
2677 coreconfigitem(
2678 coreconfigitem(
2678 b'commands',
2679 b'commands',
2679 b'rebase.requiredest',
2680 b'rebase.requiredest',
2680 default=False,
2681 default=False,
2681 )
2682 )
2682 coreconfigitem(
2683 coreconfigitem(
2683 b'experimental',
2684 b'experimental',
2684 b'rebaseskipobsolete',
2685 b'rebaseskipobsolete',
2685 default=True,
2686 default=True,
2686 )
2687 )
2687 coreconfigitem(
2688 coreconfigitem(
2688 b'rebase',
2689 b'rebase',
2689 b'singletransaction',
2690 b'singletransaction',
2690 default=False,
2691 default=False,
2691 )
2692 )
2692 coreconfigitem(
2693 coreconfigitem(
2693 b'rebase',
2694 b'rebase',
2694 b'experimental.inmemory',
2695 b'experimental.inmemory',
2695 default=False,
2696 default=False,
2696 )
2697 )
@@ -1,3360 +1,3364
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 COMP_MODE_INLINE,
38 COMP_MODE_INLINE,
39 COMP_MODE_PLAIN,
39 COMP_MODE_PLAIN,
40 FEATURES_BY_VERSION,
40 FEATURES_BY_VERSION,
41 FLAG_GENERALDELTA,
41 FLAG_GENERALDELTA,
42 FLAG_INLINE_DATA,
42 FLAG_INLINE_DATA,
43 INDEX_HEADER,
43 INDEX_HEADER,
44 REVLOGV0,
44 REVLOGV0,
45 REVLOGV1,
45 REVLOGV1,
46 REVLOGV1_FLAGS,
46 REVLOGV1_FLAGS,
47 REVLOGV2,
47 REVLOGV2,
48 REVLOGV2_FLAGS,
48 REVLOGV2_FLAGS,
49 REVLOG_DEFAULT_FLAGS,
49 REVLOG_DEFAULT_FLAGS,
50 REVLOG_DEFAULT_FORMAT,
50 REVLOG_DEFAULT_FORMAT,
51 REVLOG_DEFAULT_VERSION,
51 REVLOG_DEFAULT_VERSION,
52 SUPPORTED_FLAGS,
52 SUPPORTED_FLAGS,
53 )
53 )
54 from .revlogutils.flagutil import (
54 from .revlogutils.flagutil import (
55 REVIDX_DEFAULT_FLAGS,
55 REVIDX_DEFAULT_FLAGS,
56 REVIDX_ELLIPSIS,
56 REVIDX_ELLIPSIS,
57 REVIDX_EXTSTORED,
57 REVIDX_EXTSTORED,
58 REVIDX_FLAGS_ORDER,
58 REVIDX_FLAGS_ORDER,
59 REVIDX_HASCOPIESINFO,
59 REVIDX_HASCOPIESINFO,
60 REVIDX_ISCENSORED,
60 REVIDX_ISCENSORED,
61 REVIDX_RAWTEXT_CHANGING_FLAGS,
61 REVIDX_RAWTEXT_CHANGING_FLAGS,
62 )
62 )
63 from .thirdparty import attr
63 from .thirdparty import attr
64 from . import (
64 from . import (
65 ancestor,
65 ancestor,
66 dagop,
66 dagop,
67 error,
67 error,
68 mdiff,
68 mdiff,
69 policy,
69 policy,
70 pycompat,
70 pycompat,
71 templatefilters,
71 templatefilters,
72 util,
72 util,
73 )
73 )
74 from .interfaces import (
74 from .interfaces import (
75 repository,
75 repository,
76 util as interfaceutil,
76 util as interfaceutil,
77 )
77 )
78 from .revlogutils import (
78 from .revlogutils import (
79 deltas as deltautil,
79 deltas as deltautil,
80 docket as docketutil,
80 docket as docketutil,
81 flagutil,
81 flagutil,
82 nodemap as nodemaputil,
82 nodemap as nodemaputil,
83 revlogv0,
83 revlogv0,
84 sidedata as sidedatautil,
84 sidedata as sidedatautil,
85 )
85 )
86 from .utils import (
86 from .utils import (
87 storageutil,
87 storageutil,
88 stringutil,
88 stringutil,
89 )
89 )
90
90
91 # blanked usage of all the name to prevent pyflakes constraints
91 # blanked usage of all the name to prevent pyflakes constraints
92 # We need these name available in the module for extensions.
92 # We need these name available in the module for extensions.
93
93
94 REVLOGV0
94 REVLOGV0
95 REVLOGV1
95 REVLOGV1
96 REVLOGV2
96 REVLOGV2
97 FLAG_INLINE_DATA
97 FLAG_INLINE_DATA
98 FLAG_GENERALDELTA
98 FLAG_GENERALDELTA
99 REVLOG_DEFAULT_FLAGS
99 REVLOG_DEFAULT_FLAGS
100 REVLOG_DEFAULT_FORMAT
100 REVLOG_DEFAULT_FORMAT
101 REVLOG_DEFAULT_VERSION
101 REVLOG_DEFAULT_VERSION
102 REVLOGV1_FLAGS
102 REVLOGV1_FLAGS
103 REVLOGV2_FLAGS
103 REVLOGV2_FLAGS
104 REVIDX_ISCENSORED
104 REVIDX_ISCENSORED
105 REVIDX_ELLIPSIS
105 REVIDX_ELLIPSIS
106 REVIDX_HASCOPIESINFO
106 REVIDX_HASCOPIESINFO
107 REVIDX_EXTSTORED
107 REVIDX_EXTSTORED
108 REVIDX_DEFAULT_FLAGS
108 REVIDX_DEFAULT_FLAGS
109 REVIDX_FLAGS_ORDER
109 REVIDX_FLAGS_ORDER
110 REVIDX_RAWTEXT_CHANGING_FLAGS
110 REVIDX_RAWTEXT_CHANGING_FLAGS
111
111
112 parsers = policy.importmod('parsers')
112 parsers = policy.importmod('parsers')
113 rustancestor = policy.importrust('ancestor')
113 rustancestor = policy.importrust('ancestor')
114 rustdagop = policy.importrust('dagop')
114 rustdagop = policy.importrust('dagop')
115 rustrevlog = policy.importrust('revlog')
115 rustrevlog = policy.importrust('revlog')
116
116
117 # Aliased for performance.
117 # Aliased for performance.
118 _zlibdecompress = zlib.decompress
118 _zlibdecompress = zlib.decompress
119
119
120 # max size of revlog with inline data
120 # max size of revlog with inline data
121 _maxinline = 131072
121 _maxinline = 131072
122 _chunksize = 1048576
122 _chunksize = 1048576
123
123
124 # Flag processors for REVIDX_ELLIPSIS.
124 # Flag processors for REVIDX_ELLIPSIS.
125 def ellipsisreadprocessor(rl, text):
125 def ellipsisreadprocessor(rl, text):
126 return text, False
126 return text, False
127
127
128
128
129 def ellipsiswriteprocessor(rl, text):
129 def ellipsiswriteprocessor(rl, text):
130 return text, False
130 return text, False
131
131
132
132
133 def ellipsisrawprocessor(rl, text):
133 def ellipsisrawprocessor(rl, text):
134 return False
134 return False
135
135
136
136
137 ellipsisprocessor = (
137 ellipsisprocessor = (
138 ellipsisreadprocessor,
138 ellipsisreadprocessor,
139 ellipsiswriteprocessor,
139 ellipsiswriteprocessor,
140 ellipsisrawprocessor,
140 ellipsisrawprocessor,
141 )
141 )
142
142
143
143
144 def offset_type(offset, type):
144 def offset_type(offset, type):
145 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
145 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
146 raise ValueError(b'unknown revlog index flags')
146 raise ValueError(b'unknown revlog index flags')
147 return int(int(offset) << 16 | type)
147 return int(int(offset) << 16 | type)
148
148
149
149
150 def _verify_revision(rl, skipflags, state, node):
150 def _verify_revision(rl, skipflags, state, node):
151 """Verify the integrity of the given revlog ``node`` while providing a hook
151 """Verify the integrity of the given revlog ``node`` while providing a hook
152 point for extensions to influence the operation."""
152 point for extensions to influence the operation."""
153 if skipflags:
153 if skipflags:
154 state[b'skipread'].add(node)
154 state[b'skipread'].add(node)
155 else:
155 else:
156 # Side-effect: read content and verify hash.
156 # Side-effect: read content and verify hash.
157 rl.revision(node)
157 rl.revision(node)
158
158
159
159
160 # True if a fast implementation for persistent-nodemap is available
160 # True if a fast implementation for persistent-nodemap is available
161 #
161 #
162 # We also consider we have a "fast" implementation in "pure" python because
162 # We also consider we have a "fast" implementation in "pure" python because
163 # people using pure don't really have performance consideration (and a
163 # people using pure don't really have performance consideration (and a
164 # wheelbarrow of other slowness source)
164 # wheelbarrow of other slowness source)
165 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
165 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
166 parsers, 'BaseIndexObject'
166 parsers, 'BaseIndexObject'
167 )
167 )
168
168
169
169
170 @attr.s(slots=True, frozen=True)
170 @attr.s(slots=True, frozen=True)
171 class _revisioninfo(object):
171 class _revisioninfo(object):
172 """Information about a revision that allows building its fulltext
172 """Information about a revision that allows building its fulltext
173 node: expected hash of the revision
173 node: expected hash of the revision
174 p1, p2: parent revs of the revision
174 p1, p2: parent revs of the revision
175 btext: built text cache consisting of a one-element list
175 btext: built text cache consisting of a one-element list
176 cachedelta: (baserev, uncompressed_delta) or None
176 cachedelta: (baserev, uncompressed_delta) or None
177 flags: flags associated to the revision storage
177 flags: flags associated to the revision storage
178
178
179 One of btext[0] or cachedelta must be set.
179 One of btext[0] or cachedelta must be set.
180 """
180 """
181
181
182 node = attr.ib()
182 node = attr.ib()
183 p1 = attr.ib()
183 p1 = attr.ib()
184 p2 = attr.ib()
184 p2 = attr.ib()
185 btext = attr.ib()
185 btext = attr.ib()
186 textlen = attr.ib()
186 textlen = attr.ib()
187 cachedelta = attr.ib()
187 cachedelta = attr.ib()
188 flags = attr.ib()
188 flags = attr.ib()
189
189
190
190
191 @interfaceutil.implementer(repository.irevisiondelta)
191 @interfaceutil.implementer(repository.irevisiondelta)
192 @attr.s(slots=True)
192 @attr.s(slots=True)
193 class revlogrevisiondelta(object):
193 class revlogrevisiondelta(object):
194 node = attr.ib()
194 node = attr.ib()
195 p1node = attr.ib()
195 p1node = attr.ib()
196 p2node = attr.ib()
196 p2node = attr.ib()
197 basenode = attr.ib()
197 basenode = attr.ib()
198 flags = attr.ib()
198 flags = attr.ib()
199 baserevisionsize = attr.ib()
199 baserevisionsize = attr.ib()
200 revision = attr.ib()
200 revision = attr.ib()
201 delta = attr.ib()
201 delta = attr.ib()
202 sidedata = attr.ib()
202 sidedata = attr.ib()
203 protocol_flags = attr.ib()
203 protocol_flags = attr.ib()
204 linknode = attr.ib(default=None)
204 linknode = attr.ib(default=None)
205
205
206
206
207 @interfaceutil.implementer(repository.iverifyproblem)
207 @interfaceutil.implementer(repository.iverifyproblem)
208 @attr.s(frozen=True)
208 @attr.s(frozen=True)
209 class revlogproblem(object):
209 class revlogproblem(object):
210 warning = attr.ib(default=None)
210 warning = attr.ib(default=None)
211 error = attr.ib(default=None)
211 error = attr.ib(default=None)
212 node = attr.ib(default=None)
212 node = attr.ib(default=None)
213
213
214
214
215 def parse_index_v1(data, inline):
215 def parse_index_v1(data, inline):
216 # call the C implementation to parse the index data
216 # call the C implementation to parse the index data
217 index, cache = parsers.parse_index2(data, inline)
217 index, cache = parsers.parse_index2(data, inline)
218 return index, cache
218 return index, cache
219
219
220
220
221 def parse_index_v2(data, inline):
221 def parse_index_v2(data, inline):
222 # call the C implementation to parse the index data
222 # call the C implementation to parse the index data
223 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
223 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
224 return index, cache
224 return index, cache
225
225
226
226
227 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
227 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
228
228
229 def parse_index_v1_nodemap(data, inline):
229 def parse_index_v1_nodemap(data, inline):
230 index, cache = parsers.parse_index_devel_nodemap(data, inline)
230 index, cache = parsers.parse_index_devel_nodemap(data, inline)
231 return index, cache
231 return index, cache
232
232
233
233
234 else:
234 else:
235 parse_index_v1_nodemap = None
235 parse_index_v1_nodemap = None
236
236
237
237
238 def parse_index_v1_mixed(data, inline):
238 def parse_index_v1_mixed(data, inline):
239 index, cache = parse_index_v1(data, inline)
239 index, cache = parse_index_v1(data, inline)
240 return rustrevlog.MixedIndex(index), cache
240 return rustrevlog.MixedIndex(index), cache
241
241
242
242
243 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
243 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
244 # signed integer)
244 # signed integer)
245 _maxentrysize = 0x7FFFFFFF
245 _maxentrysize = 0x7FFFFFFF
246
246
247
247
248 class revlog(object):
248 class revlog(object):
249 """
249 """
250 the underlying revision storage object
250 the underlying revision storage object
251
251
252 A revlog consists of two parts, an index and the revision data.
252 A revlog consists of two parts, an index and the revision data.
253
253
254 The index is a file with a fixed record size containing
254 The index is a file with a fixed record size containing
255 information on each revision, including its nodeid (hash), the
255 information on each revision, including its nodeid (hash), the
256 nodeids of its parents, the position and offset of its data within
256 nodeids of its parents, the position and offset of its data within
257 the data file, and the revision it's based on. Finally, each entry
257 the data file, and the revision it's based on. Finally, each entry
258 contains a linkrev entry that can serve as a pointer to external
258 contains a linkrev entry that can serve as a pointer to external
259 data.
259 data.
260
260
261 The revision data itself is a linear collection of data chunks.
261 The revision data itself is a linear collection of data chunks.
262 Each chunk represents a revision and is usually represented as a
262 Each chunk represents a revision and is usually represented as a
263 delta against the previous chunk. To bound lookup time, runs of
263 delta against the previous chunk. To bound lookup time, runs of
264 deltas are limited to about 2 times the length of the original
264 deltas are limited to about 2 times the length of the original
265 version data. This makes retrieval of a version proportional to
265 version data. This makes retrieval of a version proportional to
266 its size, or O(1) relative to the number of revisions.
266 its size, or O(1) relative to the number of revisions.
267
267
268 Both pieces of the revlog are written to in an append-only
268 Both pieces of the revlog are written to in an append-only
269 fashion, which means we never need to rewrite a file to insert or
269 fashion, which means we never need to rewrite a file to insert or
270 remove data, and can use some simple techniques to avoid the need
270 remove data, and can use some simple techniques to avoid the need
271 for locking while reading.
271 for locking while reading.
272
272
273 If checkambig, indexfile is opened with checkambig=True at
273 If checkambig, indexfile is opened with checkambig=True at
274 writing, to avoid file stat ambiguity.
274 writing, to avoid file stat ambiguity.
275
275
276 If mmaplargeindex is True, and an mmapindexthreshold is set, the
276 If mmaplargeindex is True, and an mmapindexthreshold is set, the
277 index will be mmapped rather than read if it is larger than the
277 index will be mmapped rather than read if it is larger than the
278 configured threshold.
278 configured threshold.
279
279
280 If censorable is True, the revlog can have censored revisions.
280 If censorable is True, the revlog can have censored revisions.
281
281
282 If `upperboundcomp` is not None, this is the expected maximal gain from
282 If `upperboundcomp` is not None, this is the expected maximal gain from
283 compression for the data content.
283 compression for the data content.
284
284
285 `concurrencychecker` is an optional function that receives 3 arguments: a
285 `concurrencychecker` is an optional function that receives 3 arguments: a
286 file handle, a filename, and an expected position. It should check whether
286 file handle, a filename, and an expected position. It should check whether
287 the current position in the file handle is valid, and log/warn/fail (by
287 the current position in the file handle is valid, and log/warn/fail (by
288 raising).
288 raising).
289
289
290
290
291 Internal details
291 Internal details
292 ----------------
292 ----------------
293
293
294 A large part of the revlog logic deals with revisions' "index entries", tuple
294 A large part of the revlog logic deals with revisions' "index entries", tuple
295 objects that contains the same "items" whatever the revlog version.
295 objects that contains the same "items" whatever the revlog version.
296 Different versions will have different ways of storing these items (sometimes
296 Different versions will have different ways of storing these items (sometimes
297 not having them at all), but the tuple will always be the same. New fields
297 not having them at all), but the tuple will always be the same. New fields
298 are usually added at the end to avoid breaking existing code that relies
298 are usually added at the end to avoid breaking existing code that relies
299 on the existing order. The field are defined as follows:
299 on the existing order. The field are defined as follows:
300
300
301 [0] offset:
301 [0] offset:
302 The byte index of the start of revision data chunk.
302 The byte index of the start of revision data chunk.
303 That value is shifted up by 16 bits. use "offset = field >> 16" to
303 That value is shifted up by 16 bits. use "offset = field >> 16" to
304 retrieve it.
304 retrieve it.
305
305
306 flags:
306 flags:
307 A flag field that carries special information or changes the behavior
307 A flag field that carries special information or changes the behavior
308 of the revision. (see `REVIDX_*` constants for details)
308 of the revision. (see `REVIDX_*` constants for details)
309 The flag field only occupies the first 16 bits of this field,
309 The flag field only occupies the first 16 bits of this field,
310 use "flags = field & 0xFFFF" to retrieve the value.
310 use "flags = field & 0xFFFF" to retrieve the value.
311
311
312 [1] compressed length:
312 [1] compressed length:
313 The size, in bytes, of the chunk on disk
313 The size, in bytes, of the chunk on disk
314
314
315 [2] uncompressed length:
315 [2] uncompressed length:
316 The size, in bytes, of the full revision once reconstructed.
316 The size, in bytes, of the full revision once reconstructed.
317
317
318 [3] base rev:
318 [3] base rev:
319 Either the base of the revision delta chain (without general
319 Either the base of the revision delta chain (without general
320 delta), or the base of the delta (stored in the data chunk)
320 delta), or the base of the delta (stored in the data chunk)
321 with general delta.
321 with general delta.
322
322
323 [4] link rev:
323 [4] link rev:
324 Changelog revision number of the changeset introducing this
324 Changelog revision number of the changeset introducing this
325 revision.
325 revision.
326
326
327 [5] parent 1 rev:
327 [5] parent 1 rev:
328 Revision number of the first parent
328 Revision number of the first parent
329
329
330 [6] parent 2 rev:
330 [6] parent 2 rev:
331 Revision number of the second parent
331 Revision number of the second parent
332
332
333 [7] node id:
333 [7] node id:
334 The node id of the current revision
334 The node id of the current revision
335
335
336 [8] sidedata offset:
336 [8] sidedata offset:
337 The byte index of the start of the revision's side-data chunk.
337 The byte index of the start of the revision's side-data chunk.
338
338
339 [9] sidedata chunk length:
339 [9] sidedata chunk length:
340 The size, in bytes, of the revision's side-data chunk.
340 The size, in bytes, of the revision's side-data chunk.
341
341
342 [10] data compression mode:
342 [10] data compression mode:
343 two bits that detail the way the data chunk is compressed on disk.
343 two bits that detail the way the data chunk is compressed on disk.
344 (see "COMP_MODE_*" constants for details). For revlog version 0 and
344 (see "COMP_MODE_*" constants for details). For revlog version 0 and
345 1 this will always be COMP_MODE_INLINE.
345 1 this will always be COMP_MODE_INLINE.
346
346
347 """
347 """
348
348
349 _flagserrorclass = error.RevlogError
349 _flagserrorclass = error.RevlogError
350
350
351 def __init__(
351 def __init__(
352 self,
352 self,
353 opener,
353 opener,
354 target,
354 target,
355 radix,
355 radix,
356 postfix=None, # only exist for `tmpcensored` now
356 postfix=None, # only exist for `tmpcensored` now
357 checkambig=False,
357 checkambig=False,
358 mmaplargeindex=False,
358 mmaplargeindex=False,
359 censorable=False,
359 censorable=False,
360 upperboundcomp=None,
360 upperboundcomp=None,
361 persistentnodemap=False,
361 persistentnodemap=False,
362 concurrencychecker=None,
362 concurrencychecker=None,
363 trypending=False,
363 trypending=False,
364 ):
364 ):
365 """
365 """
366 create a revlog object
366 create a revlog object
367
367
368 opener is a function that abstracts the file opening operation
368 opener is a function that abstracts the file opening operation
369 and can be used to implement COW semantics or the like.
369 and can be used to implement COW semantics or the like.
370
370
371 `target`: a (KIND, ID) tuple that identify the content stored in
371 `target`: a (KIND, ID) tuple that identify the content stored in
372 this revlog. It help the rest of the code to understand what the revlog
372 this revlog. It help the rest of the code to understand what the revlog
373 is about without having to resort to heuristic and index filename
373 is about without having to resort to heuristic and index filename
374 analysis. Note: that this must be reliably be set by normal code, but
374 analysis. Note: that this must be reliably be set by normal code, but
375 that test, debug, or performance measurement code might not set this to
375 that test, debug, or performance measurement code might not set this to
376 accurate value.
376 accurate value.
377 """
377 """
378 self.upperboundcomp = upperboundcomp
378 self.upperboundcomp = upperboundcomp
379
379
380 self.radix = radix
380 self.radix = radix
381
381
382 self._docket_file = None
382 self._docket_file = None
383 self._indexfile = None
383 self._indexfile = None
384 self._datafile = None
384 self._datafile = None
385 self._nodemap_file = None
385 self._nodemap_file = None
386 self.postfix = postfix
386 self.postfix = postfix
387 self._trypending = trypending
387 self._trypending = trypending
388 self.opener = opener
388 self.opener = opener
389 if persistentnodemap:
389 if persistentnodemap:
390 self._nodemap_file = nodemaputil.get_nodemap_file(self)
390 self._nodemap_file = nodemaputil.get_nodemap_file(self)
391
391
392 assert target[0] in ALL_KINDS
392 assert target[0] in ALL_KINDS
393 assert len(target) == 2
393 assert len(target) == 2
394 self.target = target
394 self.target = target
395 # When True, indexfile is opened with checkambig=True at writing, to
395 # When True, indexfile is opened with checkambig=True at writing, to
396 # avoid file stat ambiguity.
396 # avoid file stat ambiguity.
397 self._checkambig = checkambig
397 self._checkambig = checkambig
398 self._mmaplargeindex = mmaplargeindex
398 self._mmaplargeindex = mmaplargeindex
399 self._censorable = censorable
399 self._censorable = censorable
400 # 3-tuple of (node, rev, text) for a raw revision.
400 # 3-tuple of (node, rev, text) for a raw revision.
401 self._revisioncache = None
401 self._revisioncache = None
402 # Maps rev to chain base rev.
402 # Maps rev to chain base rev.
403 self._chainbasecache = util.lrucachedict(100)
403 self._chainbasecache = util.lrucachedict(100)
404 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
404 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
405 self._chunkcache = (0, b'')
405 self._chunkcache = (0, b'')
406 # How much data to read and cache into the raw revlog data cache.
406 # How much data to read and cache into the raw revlog data cache.
407 self._chunkcachesize = 65536
407 self._chunkcachesize = 65536
408 self._maxchainlen = None
408 self._maxchainlen = None
409 self._deltabothparents = True
409 self._deltabothparents = True
410 self.index = None
410 self.index = None
411 self._docket = None
411 self._docket = None
412 self._nodemap_docket = None
412 self._nodemap_docket = None
413 # Mapping of partial identifiers to full nodes.
413 # Mapping of partial identifiers to full nodes.
414 self._pcache = {}
414 self._pcache = {}
415 # Mapping of revision integer to full node.
415 # Mapping of revision integer to full node.
416 self._compengine = b'zlib'
416 self._compengine = b'zlib'
417 self._compengineopts = {}
417 self._compengineopts = {}
418 self._maxdeltachainspan = -1
418 self._maxdeltachainspan = -1
419 self._withsparseread = False
419 self._withsparseread = False
420 self._sparserevlog = False
420 self._sparserevlog = False
421 self.hassidedata = False
421 self.hassidedata = False
422 self._srdensitythreshold = 0.50
422 self._srdensitythreshold = 0.50
423 self._srmingapsize = 262144
423 self._srmingapsize = 262144
424
424
425 # Make copy of flag processors so each revlog instance can support
425 # Make copy of flag processors so each revlog instance can support
426 # custom flags.
426 # custom flags.
427 self._flagprocessors = dict(flagutil.flagprocessors)
427 self._flagprocessors = dict(flagutil.flagprocessors)
428
428
429 # 2-tuple of file handles being used for active writing.
429 # 2-tuple of file handles being used for active writing.
430 self._writinghandles = None
430 self._writinghandles = None
431 # prevent nesting of addgroup
431 # prevent nesting of addgroup
432 self._adding_group = None
432 self._adding_group = None
433
433
434 self._loadindex()
434 self._loadindex()
435
435
436 self._concurrencychecker = concurrencychecker
436 self._concurrencychecker = concurrencychecker
437
437
438 def _init_opts(self):
438 def _init_opts(self):
439 """process options (from above/config) to setup associated default revlog mode
439 """process options (from above/config) to setup associated default revlog mode
440
440
441 These values might be affected when actually reading on disk information.
441 These values might be affected when actually reading on disk information.
442
442
443 The relevant values are returned for use in _loadindex().
443 The relevant values are returned for use in _loadindex().
444
444
445 * newversionflags:
445 * newversionflags:
446 version header to use if we need to create a new revlog
446 version header to use if we need to create a new revlog
447
447
448 * mmapindexthreshold:
448 * mmapindexthreshold:
449 minimal index size for start to use mmap
449 minimal index size for start to use mmap
450
450
451 * force_nodemap:
451 * force_nodemap:
452 force the usage of a "development" version of the nodemap code
452 force the usage of a "development" version of the nodemap code
453 """
453 """
454 mmapindexthreshold = None
454 mmapindexthreshold = None
455 opts = self.opener.options
455 opts = self.opener.options
456
456
457 if b'revlogv2' in opts:
457 if b'revlogv2' in opts:
458 new_header = REVLOGV2 | FLAG_INLINE_DATA
458 new_header = REVLOGV2 | FLAG_INLINE_DATA
459 elif b'revlogv1' in opts:
459 elif b'revlogv1' in opts:
460 new_header = REVLOGV1 | FLAG_INLINE_DATA
460 new_header = REVLOGV1 | FLAG_INLINE_DATA
461 if b'generaldelta' in opts:
461 if b'generaldelta' in opts:
462 new_header |= FLAG_GENERALDELTA
462 new_header |= FLAG_GENERALDELTA
463 elif b'revlogv0' in self.opener.options:
463 elif b'revlogv0' in self.opener.options:
464 new_header = REVLOGV0
464 new_header = REVLOGV0
465 else:
465 else:
466 new_header = REVLOG_DEFAULT_VERSION
466 new_header = REVLOG_DEFAULT_VERSION
467
467
468 if b'chunkcachesize' in opts:
468 if b'chunkcachesize' in opts:
469 self._chunkcachesize = opts[b'chunkcachesize']
469 self._chunkcachesize = opts[b'chunkcachesize']
470 if b'maxchainlen' in opts:
470 if b'maxchainlen' in opts:
471 self._maxchainlen = opts[b'maxchainlen']
471 self._maxchainlen = opts[b'maxchainlen']
472 if b'deltabothparents' in opts:
472 if b'deltabothparents' in opts:
473 self._deltabothparents = opts[b'deltabothparents']
473 self._deltabothparents = opts[b'deltabothparents']
474 self._lazydelta = bool(opts.get(b'lazydelta', True))
474 self._lazydelta = bool(opts.get(b'lazydelta', True))
475 self._lazydeltabase = False
475 self._lazydeltabase = False
476 if self._lazydelta:
476 if self._lazydelta:
477 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
477 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
478 if b'compengine' in opts:
478 if b'compengine' in opts:
479 self._compengine = opts[b'compengine']
479 self._compengine = opts[b'compengine']
480 if b'zlib.level' in opts:
480 if b'zlib.level' in opts:
481 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
481 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
482 if b'zstd.level' in opts:
482 if b'zstd.level' in opts:
483 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
483 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
484 if b'maxdeltachainspan' in opts:
484 if b'maxdeltachainspan' in opts:
485 self._maxdeltachainspan = opts[b'maxdeltachainspan']
485 self._maxdeltachainspan = opts[b'maxdeltachainspan']
486 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
486 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
487 mmapindexthreshold = opts[b'mmapindexthreshold']
487 mmapindexthreshold = opts[b'mmapindexthreshold']
488 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
488 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
489 withsparseread = bool(opts.get(b'with-sparse-read', False))
489 withsparseread = bool(opts.get(b'with-sparse-read', False))
490 # sparse-revlog forces sparse-read
490 # sparse-revlog forces sparse-read
491 self._withsparseread = self._sparserevlog or withsparseread
491 self._withsparseread = self._sparserevlog or withsparseread
492 if b'sparse-read-density-threshold' in opts:
492 if b'sparse-read-density-threshold' in opts:
493 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
493 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
494 if b'sparse-read-min-gap-size' in opts:
494 if b'sparse-read-min-gap-size' in opts:
495 self._srmingapsize = opts[b'sparse-read-min-gap-size']
495 self._srmingapsize = opts[b'sparse-read-min-gap-size']
496 if opts.get(b'enableellipsis'):
496 if opts.get(b'enableellipsis'):
497 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
497 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
498
498
499 # revlog v0 doesn't have flag processors
499 # revlog v0 doesn't have flag processors
500 for flag, processor in pycompat.iteritems(
500 for flag, processor in pycompat.iteritems(
501 opts.get(b'flagprocessors', {})
501 opts.get(b'flagprocessors', {})
502 ):
502 ):
503 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
503 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
504
504
505 if self._chunkcachesize <= 0:
505 if self._chunkcachesize <= 0:
506 raise error.RevlogError(
506 raise error.RevlogError(
507 _(b'revlog chunk cache size %r is not greater than 0')
507 _(b'revlog chunk cache size %r is not greater than 0')
508 % self._chunkcachesize
508 % self._chunkcachesize
509 )
509 )
510 elif self._chunkcachesize & (self._chunkcachesize - 1):
510 elif self._chunkcachesize & (self._chunkcachesize - 1):
511 raise error.RevlogError(
511 raise error.RevlogError(
512 _(b'revlog chunk cache size %r is not a power of 2')
512 _(b'revlog chunk cache size %r is not a power of 2')
513 % self._chunkcachesize
513 % self._chunkcachesize
514 )
514 )
515 force_nodemap = opts.get(b'devel-force-nodemap', False)
515 force_nodemap = opts.get(b'devel-force-nodemap', False)
516 return new_header, mmapindexthreshold, force_nodemap
516 return new_header, mmapindexthreshold, force_nodemap
517
517
518 def _get_data(self, filepath, mmap_threshold, size=None):
518 def _get_data(self, filepath, mmap_threshold, size=None):
519 """return a file content with or without mmap
519 """return a file content with or without mmap
520
520
521 If the file is missing return the empty string"""
521 If the file is missing return the empty string"""
522 try:
522 try:
523 with self.opener(filepath) as fp:
523 with self.opener(filepath) as fp:
524 if mmap_threshold is not None:
524 if mmap_threshold is not None:
525 file_size = self.opener.fstat(fp).st_size
525 file_size = self.opener.fstat(fp).st_size
526 if file_size >= mmap_threshold:
526 if file_size >= mmap_threshold:
527 if size is not None:
527 if size is not None:
528 # avoid potentiel mmap crash
528 # avoid potentiel mmap crash
529 size = min(file_size, size)
529 size = min(file_size, size)
530 # TODO: should .close() to release resources without
530 # TODO: should .close() to release resources without
531 # relying on Python GC
531 # relying on Python GC
532 if size is None:
532 if size is None:
533 return util.buffer(util.mmapread(fp))
533 return util.buffer(util.mmapread(fp))
534 else:
534 else:
535 return util.buffer(util.mmapread(fp, size))
535 return util.buffer(util.mmapread(fp, size))
536 if size is None:
536 if size is None:
537 return fp.read()
537 return fp.read()
538 else:
538 else:
539 return fp.read(size)
539 return fp.read(size)
540 except IOError as inst:
540 except IOError as inst:
541 if inst.errno != errno.ENOENT:
541 if inst.errno != errno.ENOENT:
542 raise
542 raise
543 return b''
543 return b''
544
544
545 def _loadindex(self):
545 def _loadindex(self):
546
546
547 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
547 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
548
548
549 if self.postfix is not None:
549 if self.postfix is not None:
550 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
550 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
551 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
551 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
552 entry_point = b'%s.i.a' % self.radix
552 entry_point = b'%s.i.a' % self.radix
553 else:
553 else:
554 entry_point = b'%s.i' % self.radix
554 entry_point = b'%s.i' % self.radix
555
555
556 entry_data = b''
556 entry_data = b''
557 self._initempty = True
557 self._initempty = True
558 entry_data = self._get_data(entry_point, mmapindexthreshold)
558 entry_data = self._get_data(entry_point, mmapindexthreshold)
559 if len(entry_data) > 0:
559 if len(entry_data) > 0:
560 header = INDEX_HEADER.unpack(entry_data[:4])[0]
560 header = INDEX_HEADER.unpack(entry_data[:4])[0]
561 self._initempty = False
561 self._initempty = False
562 else:
562 else:
563 header = new_header
563 header = new_header
564
564
565 self._format_flags = header & ~0xFFFF
565 self._format_flags = header & ~0xFFFF
566 self._format_version = header & 0xFFFF
566 self._format_version = header & 0xFFFF
567
567
568 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
568 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
569 if supported_flags is None:
569 if supported_flags is None:
570 msg = _(b'unknown version (%d) in revlog %s')
570 msg = _(b'unknown version (%d) in revlog %s')
571 msg %= (self._format_version, self.display_id)
571 msg %= (self._format_version, self.display_id)
572 raise error.RevlogError(msg)
572 raise error.RevlogError(msg)
573 elif self._format_flags & ~supported_flags:
573 elif self._format_flags & ~supported_flags:
574 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
574 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
575 display_flag = self._format_flags >> 16
575 display_flag = self._format_flags >> 16
576 msg %= (display_flag, self._format_version, self.display_id)
576 msg %= (display_flag, self._format_version, self.display_id)
577 raise error.RevlogError(msg)
577 raise error.RevlogError(msg)
578
578
579 features = FEATURES_BY_VERSION[self._format_version]
579 features = FEATURES_BY_VERSION[self._format_version]
580 self._inline = features[b'inline'](self._format_flags)
580 self._inline = features[b'inline'](self._format_flags)
581 self._generaldelta = features[b'generaldelta'](self._format_flags)
581 self._generaldelta = features[b'generaldelta'](self._format_flags)
582 self.hassidedata = features[b'sidedata']
582 self.hassidedata = features[b'sidedata']
583
583
584 if not features[b'docket']:
584 if not features[b'docket']:
585 self._indexfile = entry_point
585 self._indexfile = entry_point
586 index_data = entry_data
586 index_data = entry_data
587 else:
587 else:
588 self._docket_file = entry_point
588 self._docket_file = entry_point
589 if self._initempty:
589 if self._initempty:
590 self._docket = docketutil.default_docket(self, header)
590 self._docket = docketutil.default_docket(self, header)
591 else:
591 else:
592 self._docket = docketutil.parse_docket(
592 self._docket = docketutil.parse_docket(
593 self, entry_data, use_pending=self._trypending
593 self, entry_data, use_pending=self._trypending
594 )
594 )
595 self._indexfile = self._docket.index_filepath()
595 self._indexfile = self._docket.index_filepath()
596 index_data = b''
596 index_data = b''
597 index_size = self._docket.index_end
597 index_size = self._docket.index_end
598 if index_size > 0:
598 if index_size > 0:
599 index_data = self._get_data(
599 index_data = self._get_data(
600 self._indexfile, mmapindexthreshold, size=index_size
600 self._indexfile, mmapindexthreshold, size=index_size
601 )
601 )
602 if len(index_data) < index_size:
602 if len(index_data) < index_size:
603 msg = _(b'too few index data for %s: got %d, expected %d')
603 msg = _(b'too few index data for %s: got %d, expected %d')
604 msg %= (self.display_id, len(index_data), index_size)
604 msg %= (self.display_id, len(index_data), index_size)
605 raise error.RevlogError(msg)
605 raise error.RevlogError(msg)
606
606
607 self._inline = False
607 self._inline = False
608 # generaldelta implied by version 2 revlogs.
608 # generaldelta implied by version 2 revlogs.
609 self._generaldelta = True
609 self._generaldelta = True
610 # the logic for persistent nodemap will be dealt with within the
610 # the logic for persistent nodemap will be dealt with within the
611 # main docket, so disable it for now.
611 # main docket, so disable it for now.
612 self._nodemap_file = None
612 self._nodemap_file = None
613
613
614 if self.postfix is None:
614 if self.postfix is None:
615 self._datafile = b'%s.d' % self.radix
615 self._datafile = b'%s.d' % self.radix
616 else:
616 else:
617 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
617 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
618
618
619 self.nodeconstants = sha1nodeconstants
619 self.nodeconstants = sha1nodeconstants
620 self.nullid = self.nodeconstants.nullid
620 self.nullid = self.nodeconstants.nullid
621
621
622 # sparse-revlog can't be on without general-delta (issue6056)
622 # sparse-revlog can't be on without general-delta (issue6056)
623 if not self._generaldelta:
623 if not self._generaldelta:
624 self._sparserevlog = False
624 self._sparserevlog = False
625
625
626 self._storedeltachains = True
626 self._storedeltachains = True
627
627
628 devel_nodemap = (
628 devel_nodemap = (
629 self._nodemap_file
629 self._nodemap_file
630 and force_nodemap
630 and force_nodemap
631 and parse_index_v1_nodemap is not None
631 and parse_index_v1_nodemap is not None
632 )
632 )
633
633
634 use_rust_index = False
634 use_rust_index = False
635 if rustrevlog is not None:
635 if rustrevlog is not None:
636 if self._nodemap_file is not None:
636 if self._nodemap_file is not None:
637 use_rust_index = True
637 use_rust_index = True
638 else:
638 else:
639 use_rust_index = self.opener.options.get(b'rust.index')
639 use_rust_index = self.opener.options.get(b'rust.index')
640
640
641 self._parse_index = parse_index_v1
641 self._parse_index = parse_index_v1
642 if self._format_version == REVLOGV0:
642 if self._format_version == REVLOGV0:
643 self._parse_index = revlogv0.parse_index_v0
643 self._parse_index = revlogv0.parse_index_v0
644 elif self._format_version == REVLOGV2:
644 elif self._format_version == REVLOGV2:
645 self._parse_index = parse_index_v2
645 self._parse_index = parse_index_v2
646 elif devel_nodemap:
646 elif devel_nodemap:
647 self._parse_index = parse_index_v1_nodemap
647 self._parse_index = parse_index_v1_nodemap
648 elif use_rust_index:
648 elif use_rust_index:
649 self._parse_index = parse_index_v1_mixed
649 self._parse_index = parse_index_v1_mixed
650 try:
650 try:
651 d = self._parse_index(index_data, self._inline)
651 d = self._parse_index(index_data, self._inline)
652 index, _chunkcache = d
652 index, _chunkcache = d
653 use_nodemap = (
653 use_nodemap = (
654 not self._inline
654 not self._inline
655 and self._nodemap_file is not None
655 and self._nodemap_file is not None
656 and util.safehasattr(index, 'update_nodemap_data')
656 and util.safehasattr(index, 'update_nodemap_data')
657 )
657 )
658 if use_nodemap:
658 if use_nodemap:
659 nodemap_data = nodemaputil.persisted_data(self)
659 nodemap_data = nodemaputil.persisted_data(self)
660 if nodemap_data is not None:
660 if nodemap_data is not None:
661 docket = nodemap_data[0]
661 docket = nodemap_data[0]
662 if (
662 if (
663 len(d[0]) > docket.tip_rev
663 len(d[0]) > docket.tip_rev
664 and d[0][docket.tip_rev][7] == docket.tip_node
664 and d[0][docket.tip_rev][7] == docket.tip_node
665 ):
665 ):
666 # no changelog tampering
666 # no changelog tampering
667 self._nodemap_docket = docket
667 self._nodemap_docket = docket
668 index.update_nodemap_data(*nodemap_data)
668 index.update_nodemap_data(*nodemap_data)
669 except (ValueError, IndexError):
669 except (ValueError, IndexError):
670 raise error.RevlogError(
670 raise error.RevlogError(
671 _(b"index %s is corrupted") % self.display_id
671 _(b"index %s is corrupted") % self.display_id
672 )
672 )
673 self.index, self._chunkcache = d
673 self.index, self._chunkcache = d
674 if not self._chunkcache:
674 if not self._chunkcache:
675 self._chunkclear()
675 self._chunkclear()
676 # revnum -> (chain-length, sum-delta-length)
676 # revnum -> (chain-length, sum-delta-length)
677 self._chaininfocache = util.lrucachedict(500)
677 self._chaininfocache = util.lrucachedict(500)
678 # revlog header -> revlog compressor
678 # revlog header -> revlog compressor
679 self._decompressors = {}
679 self._decompressors = {}
680
680
681 @util.propertycache
681 @util.propertycache
682 def revlog_kind(self):
682 def revlog_kind(self):
683 return self.target[0]
683 return self.target[0]
684
684
685 @util.propertycache
685 @util.propertycache
686 def display_id(self):
686 def display_id(self):
687 """The public facing "ID" of the revlog that we use in message"""
687 """The public facing "ID" of the revlog that we use in message"""
688 # Maybe we should build a user facing representation of
688 # Maybe we should build a user facing representation of
689 # revlog.target instead of using `self.radix`
689 # revlog.target instead of using `self.radix`
690 return self.radix
690 return self.radix
691
691
692 def _get_decompressor(self, t):
693 try:
694 compressor = self._decompressors[t]
695 except KeyError:
696 try:
697 engine = util.compengines.forrevlogheader(t)
698 compressor = engine.revlogcompressor(self._compengineopts)
699 self._decompressors[t] = compressor
700 except KeyError:
701 raise error.RevlogError(
702 _(b'unknown compression type %s') % binascii.hexlify(t)
703 )
704 return compressor
705
692 @util.propertycache
706 @util.propertycache
693 def _compressor(self):
707 def _compressor(self):
694 engine = util.compengines[self._compengine]
708 engine = util.compengines[self._compengine]
695 return engine.revlogcompressor(self._compengineopts)
709 return engine.revlogcompressor(self._compengineopts)
696
710
697 def _indexfp(self):
711 def _indexfp(self):
698 """file object for the revlog's index file"""
712 """file object for the revlog's index file"""
699 return self.opener(self._indexfile, mode=b"r")
713 return self.opener(self._indexfile, mode=b"r")
700
714
701 def __index_write_fp(self):
715 def __index_write_fp(self):
702 # You should not use this directly and use `_writing` instead
716 # You should not use this directly and use `_writing` instead
703 try:
717 try:
704 f = self.opener(
718 f = self.opener(
705 self._indexfile, mode=b"r+", checkambig=self._checkambig
719 self._indexfile, mode=b"r+", checkambig=self._checkambig
706 )
720 )
707 if self._docket is None:
721 if self._docket is None:
708 f.seek(0, os.SEEK_END)
722 f.seek(0, os.SEEK_END)
709 else:
723 else:
710 f.seek(self._docket.index_end, os.SEEK_SET)
724 f.seek(self._docket.index_end, os.SEEK_SET)
711 return f
725 return f
712 except IOError as inst:
726 except IOError as inst:
713 if inst.errno != errno.ENOENT:
727 if inst.errno != errno.ENOENT:
714 raise
728 raise
715 return self.opener(
729 return self.opener(
716 self._indexfile, mode=b"w+", checkambig=self._checkambig
730 self._indexfile, mode=b"w+", checkambig=self._checkambig
717 )
731 )
718
732
719 def __index_new_fp(self):
733 def __index_new_fp(self):
720 # You should not use this unless you are upgrading from inline revlog
734 # You should not use this unless you are upgrading from inline revlog
721 return self.opener(
735 return self.opener(
722 self._indexfile,
736 self._indexfile,
723 mode=b"w",
737 mode=b"w",
724 checkambig=self._checkambig,
738 checkambig=self._checkambig,
725 atomictemp=True,
739 atomictemp=True,
726 )
740 )
727
741
728 def _datafp(self, mode=b'r'):
742 def _datafp(self, mode=b'r'):
729 """file object for the revlog's data file"""
743 """file object for the revlog's data file"""
730 return self.opener(self._datafile, mode=mode)
744 return self.opener(self._datafile, mode=mode)
731
745
732 @contextlib.contextmanager
746 @contextlib.contextmanager
733 def _datareadfp(self, existingfp=None):
747 def _datareadfp(self, existingfp=None):
734 """file object suitable to read data"""
748 """file object suitable to read data"""
735 # Use explicit file handle, if given.
749 # Use explicit file handle, if given.
736 if existingfp is not None:
750 if existingfp is not None:
737 yield existingfp
751 yield existingfp
738
752
739 # Use a file handle being actively used for writes, if available.
753 # Use a file handle being actively used for writes, if available.
740 # There is some danger to doing this because reads will seek the
754 # There is some danger to doing this because reads will seek the
741 # file. However, _writeentry() performs a SEEK_END before all writes,
755 # file. However, _writeentry() performs a SEEK_END before all writes,
742 # so we should be safe.
756 # so we should be safe.
743 elif self._writinghandles:
757 elif self._writinghandles:
744 if self._inline:
758 if self._inline:
745 yield self._writinghandles[0]
759 yield self._writinghandles[0]
746 else:
760 else:
747 yield self._writinghandles[1]
761 yield self._writinghandles[1]
748
762
749 # Otherwise open a new file handle.
763 # Otherwise open a new file handle.
750 else:
764 else:
751 if self._inline:
765 if self._inline:
752 func = self._indexfp
766 func = self._indexfp
753 else:
767 else:
754 func = self._datafp
768 func = self._datafp
755 with func() as fp:
769 with func() as fp:
756 yield fp
770 yield fp
757
771
758 def tiprev(self):
772 def tiprev(self):
759 return len(self.index) - 1
773 return len(self.index) - 1
760
774
761 def tip(self):
775 def tip(self):
762 return self.node(self.tiprev())
776 return self.node(self.tiprev())
763
777
764 def __contains__(self, rev):
778 def __contains__(self, rev):
765 return 0 <= rev < len(self)
779 return 0 <= rev < len(self)
766
780
767 def __len__(self):
781 def __len__(self):
768 return len(self.index)
782 return len(self.index)
769
783
770 def __iter__(self):
784 def __iter__(self):
771 return iter(pycompat.xrange(len(self)))
785 return iter(pycompat.xrange(len(self)))
772
786
773 def revs(self, start=0, stop=None):
787 def revs(self, start=0, stop=None):
774 """iterate over all rev in this revlog (from start to stop)"""
788 """iterate over all rev in this revlog (from start to stop)"""
775 return storageutil.iterrevs(len(self), start=start, stop=stop)
789 return storageutil.iterrevs(len(self), start=start, stop=stop)
776
790
777 @property
791 @property
778 def nodemap(self):
792 def nodemap(self):
779 msg = (
793 msg = (
780 b"revlog.nodemap is deprecated, "
794 b"revlog.nodemap is deprecated, "
781 b"use revlog.index.[has_node|rev|get_rev]"
795 b"use revlog.index.[has_node|rev|get_rev]"
782 )
796 )
783 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
797 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
784 return self.index.nodemap
798 return self.index.nodemap
785
799
786 @property
800 @property
787 def _nodecache(self):
801 def _nodecache(self):
788 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
802 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
789 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
803 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
790 return self.index.nodemap
804 return self.index.nodemap
791
805
792 def hasnode(self, node):
806 def hasnode(self, node):
793 try:
807 try:
794 self.rev(node)
808 self.rev(node)
795 return True
809 return True
796 except KeyError:
810 except KeyError:
797 return False
811 return False
798
812
799 def candelta(self, baserev, rev):
813 def candelta(self, baserev, rev):
800 """whether two revisions (baserev, rev) can be delta-ed or not"""
814 """whether two revisions (baserev, rev) can be delta-ed or not"""
801 # Disable delta if either rev requires a content-changing flag
815 # Disable delta if either rev requires a content-changing flag
802 # processor (ex. LFS). This is because such flag processor can alter
816 # processor (ex. LFS). This is because such flag processor can alter
803 # the rawtext content that the delta will be based on, and two clients
817 # the rawtext content that the delta will be based on, and two clients
804 # could have a same revlog node with different flags (i.e. different
818 # could have a same revlog node with different flags (i.e. different
805 # rawtext contents) and the delta could be incompatible.
819 # rawtext contents) and the delta could be incompatible.
806 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
820 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
807 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
821 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
808 ):
822 ):
809 return False
823 return False
810 return True
824 return True
811
825
812 def update_caches(self, transaction):
826 def update_caches(self, transaction):
813 if self._nodemap_file is not None:
827 if self._nodemap_file is not None:
814 if transaction is None:
828 if transaction is None:
815 nodemaputil.update_persistent_nodemap(self)
829 nodemaputil.update_persistent_nodemap(self)
816 else:
830 else:
817 nodemaputil.setup_persistent_nodemap(transaction, self)
831 nodemaputil.setup_persistent_nodemap(transaction, self)
818
832
819 def clearcaches(self):
833 def clearcaches(self):
820 self._revisioncache = None
834 self._revisioncache = None
821 self._chainbasecache.clear()
835 self._chainbasecache.clear()
822 self._chunkcache = (0, b'')
836 self._chunkcache = (0, b'')
823 self._pcache = {}
837 self._pcache = {}
824 self._nodemap_docket = None
838 self._nodemap_docket = None
825 self.index.clearcaches()
839 self.index.clearcaches()
826 # The python code is the one responsible for validating the docket, we
840 # The python code is the one responsible for validating the docket, we
827 # end up having to refresh it here.
841 # end up having to refresh it here.
828 use_nodemap = (
842 use_nodemap = (
829 not self._inline
843 not self._inline
830 and self._nodemap_file is not None
844 and self._nodemap_file is not None
831 and util.safehasattr(self.index, 'update_nodemap_data')
845 and util.safehasattr(self.index, 'update_nodemap_data')
832 )
846 )
833 if use_nodemap:
847 if use_nodemap:
834 nodemap_data = nodemaputil.persisted_data(self)
848 nodemap_data = nodemaputil.persisted_data(self)
835 if nodemap_data is not None:
849 if nodemap_data is not None:
836 self._nodemap_docket = nodemap_data[0]
850 self._nodemap_docket = nodemap_data[0]
837 self.index.update_nodemap_data(*nodemap_data)
851 self.index.update_nodemap_data(*nodemap_data)
838
852
839 def rev(self, node):
853 def rev(self, node):
840 try:
854 try:
841 return self.index.rev(node)
855 return self.index.rev(node)
842 except TypeError:
856 except TypeError:
843 raise
857 raise
844 except error.RevlogError:
858 except error.RevlogError:
845 # parsers.c radix tree lookup failed
859 # parsers.c radix tree lookup failed
846 if (
860 if (
847 node == self.nodeconstants.wdirid
861 node == self.nodeconstants.wdirid
848 or node in self.nodeconstants.wdirfilenodeids
862 or node in self.nodeconstants.wdirfilenodeids
849 ):
863 ):
850 raise error.WdirUnsupported
864 raise error.WdirUnsupported
851 raise error.LookupError(node, self.display_id, _(b'no node'))
865 raise error.LookupError(node, self.display_id, _(b'no node'))
852
866
853 # Accessors for index entries.
867 # Accessors for index entries.
854
868
855 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
869 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
856 # are flags.
870 # are flags.
857 def start(self, rev):
871 def start(self, rev):
858 return int(self.index[rev][0] >> 16)
872 return int(self.index[rev][0] >> 16)
859
873
860 def flags(self, rev):
874 def flags(self, rev):
861 return self.index[rev][0] & 0xFFFF
875 return self.index[rev][0] & 0xFFFF
862
876
863 def length(self, rev):
877 def length(self, rev):
864 return self.index[rev][1]
878 return self.index[rev][1]
865
879
866 def sidedata_length(self, rev):
880 def sidedata_length(self, rev):
867 if not self.hassidedata:
881 if not self.hassidedata:
868 return 0
882 return 0
869 return self.index[rev][9]
883 return self.index[rev][9]
870
884
871 def rawsize(self, rev):
885 def rawsize(self, rev):
872 """return the length of the uncompressed text for a given revision"""
886 """return the length of the uncompressed text for a given revision"""
873 l = self.index[rev][2]
887 l = self.index[rev][2]
874 if l >= 0:
888 if l >= 0:
875 return l
889 return l
876
890
877 t = self.rawdata(rev)
891 t = self.rawdata(rev)
878 return len(t)
892 return len(t)
879
893
880 def size(self, rev):
894 def size(self, rev):
881 """length of non-raw text (processed by a "read" flag processor)"""
895 """length of non-raw text (processed by a "read" flag processor)"""
882 # fast path: if no "read" flag processor could change the content,
896 # fast path: if no "read" flag processor could change the content,
883 # size is rawsize. note: ELLIPSIS is known to not change the content.
897 # size is rawsize. note: ELLIPSIS is known to not change the content.
884 flags = self.flags(rev)
898 flags = self.flags(rev)
885 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
899 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
886 return self.rawsize(rev)
900 return self.rawsize(rev)
887
901
888 return len(self.revision(rev, raw=False))
902 return len(self.revision(rev, raw=False))
889
903
890 def chainbase(self, rev):
904 def chainbase(self, rev):
891 base = self._chainbasecache.get(rev)
905 base = self._chainbasecache.get(rev)
892 if base is not None:
906 if base is not None:
893 return base
907 return base
894
908
895 index = self.index
909 index = self.index
896 iterrev = rev
910 iterrev = rev
897 base = index[iterrev][3]
911 base = index[iterrev][3]
898 while base != iterrev:
912 while base != iterrev:
899 iterrev = base
913 iterrev = base
900 base = index[iterrev][3]
914 base = index[iterrev][3]
901
915
902 self._chainbasecache[rev] = base
916 self._chainbasecache[rev] = base
903 return base
917 return base
904
918
905 def linkrev(self, rev):
919 def linkrev(self, rev):
906 return self.index[rev][4]
920 return self.index[rev][4]
907
921
908 def parentrevs(self, rev):
922 def parentrevs(self, rev):
909 try:
923 try:
910 entry = self.index[rev]
924 entry = self.index[rev]
911 except IndexError:
925 except IndexError:
912 if rev == wdirrev:
926 if rev == wdirrev:
913 raise error.WdirUnsupported
927 raise error.WdirUnsupported
914 raise
928 raise
915 if entry[5] == nullrev:
929 if entry[5] == nullrev:
916 return entry[6], entry[5]
930 return entry[6], entry[5]
917 else:
931 else:
918 return entry[5], entry[6]
932 return entry[5], entry[6]
919
933
920 # fast parentrevs(rev) where rev isn't filtered
934 # fast parentrevs(rev) where rev isn't filtered
921 _uncheckedparentrevs = parentrevs
935 _uncheckedparentrevs = parentrevs
922
936
923 def node(self, rev):
937 def node(self, rev):
924 try:
938 try:
925 return self.index[rev][7]
939 return self.index[rev][7]
926 except IndexError:
940 except IndexError:
927 if rev == wdirrev:
941 if rev == wdirrev:
928 raise error.WdirUnsupported
942 raise error.WdirUnsupported
929 raise
943 raise
930
944
931 # Derived from index values.
945 # Derived from index values.
932
946
933 def end(self, rev):
947 def end(self, rev):
934 return self.start(rev) + self.length(rev)
948 return self.start(rev) + self.length(rev)
935
949
936 def parents(self, node):
950 def parents(self, node):
937 i = self.index
951 i = self.index
938 d = i[self.rev(node)]
952 d = i[self.rev(node)]
939 # inline node() to avoid function call overhead
953 # inline node() to avoid function call overhead
940 if d[5] == self.nullid:
954 if d[5] == self.nullid:
941 return i[d[6]][7], i[d[5]][7]
955 return i[d[6]][7], i[d[5]][7]
942 else:
956 else:
943 return i[d[5]][7], i[d[6]][7]
957 return i[d[5]][7], i[d[6]][7]
944
958
945 def chainlen(self, rev):
959 def chainlen(self, rev):
946 return self._chaininfo(rev)[0]
960 return self._chaininfo(rev)[0]
947
961
948 def _chaininfo(self, rev):
962 def _chaininfo(self, rev):
949 chaininfocache = self._chaininfocache
963 chaininfocache = self._chaininfocache
950 if rev in chaininfocache:
964 if rev in chaininfocache:
951 return chaininfocache[rev]
965 return chaininfocache[rev]
952 index = self.index
966 index = self.index
953 generaldelta = self._generaldelta
967 generaldelta = self._generaldelta
954 iterrev = rev
968 iterrev = rev
955 e = index[iterrev]
969 e = index[iterrev]
956 clen = 0
970 clen = 0
957 compresseddeltalen = 0
971 compresseddeltalen = 0
958 while iterrev != e[3]:
972 while iterrev != e[3]:
959 clen += 1
973 clen += 1
960 compresseddeltalen += e[1]
974 compresseddeltalen += e[1]
961 if generaldelta:
975 if generaldelta:
962 iterrev = e[3]
976 iterrev = e[3]
963 else:
977 else:
964 iterrev -= 1
978 iterrev -= 1
965 if iterrev in chaininfocache:
979 if iterrev in chaininfocache:
966 t = chaininfocache[iterrev]
980 t = chaininfocache[iterrev]
967 clen += t[0]
981 clen += t[0]
968 compresseddeltalen += t[1]
982 compresseddeltalen += t[1]
969 break
983 break
970 e = index[iterrev]
984 e = index[iterrev]
971 else:
985 else:
972 # Add text length of base since decompressing that also takes
986 # Add text length of base since decompressing that also takes
973 # work. For cache hits the length is already included.
987 # work. For cache hits the length is already included.
974 compresseddeltalen += e[1]
988 compresseddeltalen += e[1]
975 r = (clen, compresseddeltalen)
989 r = (clen, compresseddeltalen)
976 chaininfocache[rev] = r
990 chaininfocache[rev] = r
977 return r
991 return r
978
992
979 def _deltachain(self, rev, stoprev=None):
993 def _deltachain(self, rev, stoprev=None):
980 """Obtain the delta chain for a revision.
994 """Obtain the delta chain for a revision.
981
995
982 ``stoprev`` specifies a revision to stop at. If not specified, we
996 ``stoprev`` specifies a revision to stop at. If not specified, we
983 stop at the base of the chain.
997 stop at the base of the chain.
984
998
985 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
999 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
986 revs in ascending order and ``stopped`` is a bool indicating whether
1000 revs in ascending order and ``stopped`` is a bool indicating whether
987 ``stoprev`` was hit.
1001 ``stoprev`` was hit.
988 """
1002 """
989 # Try C implementation.
1003 # Try C implementation.
990 try:
1004 try:
991 return self.index.deltachain(rev, stoprev, self._generaldelta)
1005 return self.index.deltachain(rev, stoprev, self._generaldelta)
992 except AttributeError:
1006 except AttributeError:
993 pass
1007 pass
994
1008
995 chain = []
1009 chain = []
996
1010
997 # Alias to prevent attribute lookup in tight loop.
1011 # Alias to prevent attribute lookup in tight loop.
998 index = self.index
1012 index = self.index
999 generaldelta = self._generaldelta
1013 generaldelta = self._generaldelta
1000
1014
1001 iterrev = rev
1015 iterrev = rev
1002 e = index[iterrev]
1016 e = index[iterrev]
1003 while iterrev != e[3] and iterrev != stoprev:
1017 while iterrev != e[3] and iterrev != stoprev:
1004 chain.append(iterrev)
1018 chain.append(iterrev)
1005 if generaldelta:
1019 if generaldelta:
1006 iterrev = e[3]
1020 iterrev = e[3]
1007 else:
1021 else:
1008 iterrev -= 1
1022 iterrev -= 1
1009 e = index[iterrev]
1023 e = index[iterrev]
1010
1024
1011 if iterrev == stoprev:
1025 if iterrev == stoprev:
1012 stopped = True
1026 stopped = True
1013 else:
1027 else:
1014 chain.append(iterrev)
1028 chain.append(iterrev)
1015 stopped = False
1029 stopped = False
1016
1030
1017 chain.reverse()
1031 chain.reverse()
1018 return chain, stopped
1032 return chain, stopped
1019
1033
1020 def ancestors(self, revs, stoprev=0, inclusive=False):
1034 def ancestors(self, revs, stoprev=0, inclusive=False):
1021 """Generate the ancestors of 'revs' in reverse revision order.
1035 """Generate the ancestors of 'revs' in reverse revision order.
1022 Does not generate revs lower than stoprev.
1036 Does not generate revs lower than stoprev.
1023
1037
1024 See the documentation for ancestor.lazyancestors for more details."""
1038 See the documentation for ancestor.lazyancestors for more details."""
1025
1039
1026 # first, make sure start revisions aren't filtered
1040 # first, make sure start revisions aren't filtered
1027 revs = list(revs)
1041 revs = list(revs)
1028 checkrev = self.node
1042 checkrev = self.node
1029 for r in revs:
1043 for r in revs:
1030 checkrev(r)
1044 checkrev(r)
1031 # and we're sure ancestors aren't filtered as well
1045 # and we're sure ancestors aren't filtered as well
1032
1046
1033 if rustancestor is not None:
1047 if rustancestor is not None:
1034 lazyancestors = rustancestor.LazyAncestors
1048 lazyancestors = rustancestor.LazyAncestors
1035 arg = self.index
1049 arg = self.index
1036 else:
1050 else:
1037 lazyancestors = ancestor.lazyancestors
1051 lazyancestors = ancestor.lazyancestors
1038 arg = self._uncheckedparentrevs
1052 arg = self._uncheckedparentrevs
1039 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1053 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1040
1054
1041 def descendants(self, revs):
1055 def descendants(self, revs):
1042 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1056 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1043
1057
1044 def findcommonmissing(self, common=None, heads=None):
1058 def findcommonmissing(self, common=None, heads=None):
1045 """Return a tuple of the ancestors of common and the ancestors of heads
1059 """Return a tuple of the ancestors of common and the ancestors of heads
1046 that are not ancestors of common. In revset terminology, we return the
1060 that are not ancestors of common. In revset terminology, we return the
1047 tuple:
1061 tuple:
1048
1062
1049 ::common, (::heads) - (::common)
1063 ::common, (::heads) - (::common)
1050
1064
1051 The list is sorted by revision number, meaning it is
1065 The list is sorted by revision number, meaning it is
1052 topologically sorted.
1066 topologically sorted.
1053
1067
1054 'heads' and 'common' are both lists of node IDs. If heads is
1068 'heads' and 'common' are both lists of node IDs. If heads is
1055 not supplied, uses all of the revlog's heads. If common is not
1069 not supplied, uses all of the revlog's heads. If common is not
1056 supplied, uses nullid."""
1070 supplied, uses nullid."""
1057 if common is None:
1071 if common is None:
1058 common = [self.nullid]
1072 common = [self.nullid]
1059 if heads is None:
1073 if heads is None:
1060 heads = self.heads()
1074 heads = self.heads()
1061
1075
1062 common = [self.rev(n) for n in common]
1076 common = [self.rev(n) for n in common]
1063 heads = [self.rev(n) for n in heads]
1077 heads = [self.rev(n) for n in heads]
1064
1078
1065 # we want the ancestors, but inclusive
1079 # we want the ancestors, but inclusive
1066 class lazyset(object):
1080 class lazyset(object):
1067 def __init__(self, lazyvalues):
1081 def __init__(self, lazyvalues):
1068 self.addedvalues = set()
1082 self.addedvalues = set()
1069 self.lazyvalues = lazyvalues
1083 self.lazyvalues = lazyvalues
1070
1084
1071 def __contains__(self, value):
1085 def __contains__(self, value):
1072 return value in self.addedvalues or value in self.lazyvalues
1086 return value in self.addedvalues or value in self.lazyvalues
1073
1087
1074 def __iter__(self):
1088 def __iter__(self):
1075 added = self.addedvalues
1089 added = self.addedvalues
1076 for r in added:
1090 for r in added:
1077 yield r
1091 yield r
1078 for r in self.lazyvalues:
1092 for r in self.lazyvalues:
1079 if not r in added:
1093 if not r in added:
1080 yield r
1094 yield r
1081
1095
1082 def add(self, value):
1096 def add(self, value):
1083 self.addedvalues.add(value)
1097 self.addedvalues.add(value)
1084
1098
1085 def update(self, values):
1099 def update(self, values):
1086 self.addedvalues.update(values)
1100 self.addedvalues.update(values)
1087
1101
1088 has = lazyset(self.ancestors(common))
1102 has = lazyset(self.ancestors(common))
1089 has.add(nullrev)
1103 has.add(nullrev)
1090 has.update(common)
1104 has.update(common)
1091
1105
1092 # take all ancestors from heads that aren't in has
1106 # take all ancestors from heads that aren't in has
1093 missing = set()
1107 missing = set()
1094 visit = collections.deque(r for r in heads if r not in has)
1108 visit = collections.deque(r for r in heads if r not in has)
1095 while visit:
1109 while visit:
1096 r = visit.popleft()
1110 r = visit.popleft()
1097 if r in missing:
1111 if r in missing:
1098 continue
1112 continue
1099 else:
1113 else:
1100 missing.add(r)
1114 missing.add(r)
1101 for p in self.parentrevs(r):
1115 for p in self.parentrevs(r):
1102 if p not in has:
1116 if p not in has:
1103 visit.append(p)
1117 visit.append(p)
1104 missing = list(missing)
1118 missing = list(missing)
1105 missing.sort()
1119 missing.sort()
1106 return has, [self.node(miss) for miss in missing]
1120 return has, [self.node(miss) for miss in missing]
1107
1121
1108 def incrementalmissingrevs(self, common=None):
1122 def incrementalmissingrevs(self, common=None):
1109 """Return an object that can be used to incrementally compute the
1123 """Return an object that can be used to incrementally compute the
1110 revision numbers of the ancestors of arbitrary sets that are not
1124 revision numbers of the ancestors of arbitrary sets that are not
1111 ancestors of common. This is an ancestor.incrementalmissingancestors
1125 ancestors of common. This is an ancestor.incrementalmissingancestors
1112 object.
1126 object.
1113
1127
1114 'common' is a list of revision numbers. If common is not supplied, uses
1128 'common' is a list of revision numbers. If common is not supplied, uses
1115 nullrev.
1129 nullrev.
1116 """
1130 """
1117 if common is None:
1131 if common is None:
1118 common = [nullrev]
1132 common = [nullrev]
1119
1133
1120 if rustancestor is not None:
1134 if rustancestor is not None:
1121 return rustancestor.MissingAncestors(self.index, common)
1135 return rustancestor.MissingAncestors(self.index, common)
1122 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1136 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1123
1137
1124 def findmissingrevs(self, common=None, heads=None):
1138 def findmissingrevs(self, common=None, heads=None):
1125 """Return the revision numbers of the ancestors of heads that
1139 """Return the revision numbers of the ancestors of heads that
1126 are not ancestors of common.
1140 are not ancestors of common.
1127
1141
1128 More specifically, return a list of revision numbers corresponding to
1142 More specifically, return a list of revision numbers corresponding to
1129 nodes N such that every N satisfies the following constraints:
1143 nodes N such that every N satisfies the following constraints:
1130
1144
1131 1. N is an ancestor of some node in 'heads'
1145 1. N is an ancestor of some node in 'heads'
1132 2. N is not an ancestor of any node in 'common'
1146 2. N is not an ancestor of any node in 'common'
1133
1147
1134 The list is sorted by revision number, meaning it is
1148 The list is sorted by revision number, meaning it is
1135 topologically sorted.
1149 topologically sorted.
1136
1150
1137 'heads' and 'common' are both lists of revision numbers. If heads is
1151 'heads' and 'common' are both lists of revision numbers. If heads is
1138 not supplied, uses all of the revlog's heads. If common is not
1152 not supplied, uses all of the revlog's heads. If common is not
1139 supplied, uses nullid."""
1153 supplied, uses nullid."""
1140 if common is None:
1154 if common is None:
1141 common = [nullrev]
1155 common = [nullrev]
1142 if heads is None:
1156 if heads is None:
1143 heads = self.headrevs()
1157 heads = self.headrevs()
1144
1158
1145 inc = self.incrementalmissingrevs(common=common)
1159 inc = self.incrementalmissingrevs(common=common)
1146 return inc.missingancestors(heads)
1160 return inc.missingancestors(heads)
1147
1161
1148 def findmissing(self, common=None, heads=None):
1162 def findmissing(self, common=None, heads=None):
1149 """Return the ancestors of heads that are not ancestors of common.
1163 """Return the ancestors of heads that are not ancestors of common.
1150
1164
1151 More specifically, return a list of nodes N such that every N
1165 More specifically, return a list of nodes N such that every N
1152 satisfies the following constraints:
1166 satisfies the following constraints:
1153
1167
1154 1. N is an ancestor of some node in 'heads'
1168 1. N is an ancestor of some node in 'heads'
1155 2. N is not an ancestor of any node in 'common'
1169 2. N is not an ancestor of any node in 'common'
1156
1170
1157 The list is sorted by revision number, meaning it is
1171 The list is sorted by revision number, meaning it is
1158 topologically sorted.
1172 topologically sorted.
1159
1173
1160 'heads' and 'common' are both lists of node IDs. If heads is
1174 'heads' and 'common' are both lists of node IDs. If heads is
1161 not supplied, uses all of the revlog's heads. If common is not
1175 not supplied, uses all of the revlog's heads. If common is not
1162 supplied, uses nullid."""
1176 supplied, uses nullid."""
1163 if common is None:
1177 if common is None:
1164 common = [self.nullid]
1178 common = [self.nullid]
1165 if heads is None:
1179 if heads is None:
1166 heads = self.heads()
1180 heads = self.heads()
1167
1181
1168 common = [self.rev(n) for n in common]
1182 common = [self.rev(n) for n in common]
1169 heads = [self.rev(n) for n in heads]
1183 heads = [self.rev(n) for n in heads]
1170
1184
1171 inc = self.incrementalmissingrevs(common=common)
1185 inc = self.incrementalmissingrevs(common=common)
1172 return [self.node(r) for r in inc.missingancestors(heads)]
1186 return [self.node(r) for r in inc.missingancestors(heads)]
1173
1187
1174 def nodesbetween(self, roots=None, heads=None):
1188 def nodesbetween(self, roots=None, heads=None):
1175 """Return a topological path from 'roots' to 'heads'.
1189 """Return a topological path from 'roots' to 'heads'.
1176
1190
1177 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1191 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1178 topologically sorted list of all nodes N that satisfy both of
1192 topologically sorted list of all nodes N that satisfy both of
1179 these constraints:
1193 these constraints:
1180
1194
1181 1. N is a descendant of some node in 'roots'
1195 1. N is a descendant of some node in 'roots'
1182 2. N is an ancestor of some node in 'heads'
1196 2. N is an ancestor of some node in 'heads'
1183
1197
1184 Every node is considered to be both a descendant and an ancestor
1198 Every node is considered to be both a descendant and an ancestor
1185 of itself, so every reachable node in 'roots' and 'heads' will be
1199 of itself, so every reachable node in 'roots' and 'heads' will be
1186 included in 'nodes'.
1200 included in 'nodes'.
1187
1201
1188 'outroots' is the list of reachable nodes in 'roots', i.e., the
1202 'outroots' is the list of reachable nodes in 'roots', i.e., the
1189 subset of 'roots' that is returned in 'nodes'. Likewise,
1203 subset of 'roots' that is returned in 'nodes'. Likewise,
1190 'outheads' is the subset of 'heads' that is also in 'nodes'.
1204 'outheads' is the subset of 'heads' that is also in 'nodes'.
1191
1205
1192 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1206 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1193 unspecified, uses nullid as the only root. If 'heads' is
1207 unspecified, uses nullid as the only root. If 'heads' is
1194 unspecified, uses list of all of the revlog's heads."""
1208 unspecified, uses list of all of the revlog's heads."""
1195 nonodes = ([], [], [])
1209 nonodes = ([], [], [])
1196 if roots is not None:
1210 if roots is not None:
1197 roots = list(roots)
1211 roots = list(roots)
1198 if not roots:
1212 if not roots:
1199 return nonodes
1213 return nonodes
1200 lowestrev = min([self.rev(n) for n in roots])
1214 lowestrev = min([self.rev(n) for n in roots])
1201 else:
1215 else:
1202 roots = [self.nullid] # Everybody's a descendant of nullid
1216 roots = [self.nullid] # Everybody's a descendant of nullid
1203 lowestrev = nullrev
1217 lowestrev = nullrev
1204 if (lowestrev == nullrev) and (heads is None):
1218 if (lowestrev == nullrev) and (heads is None):
1205 # We want _all_ the nodes!
1219 # We want _all_ the nodes!
1206 return (
1220 return (
1207 [self.node(r) for r in self],
1221 [self.node(r) for r in self],
1208 [self.nullid],
1222 [self.nullid],
1209 list(self.heads()),
1223 list(self.heads()),
1210 )
1224 )
1211 if heads is None:
1225 if heads is None:
1212 # All nodes are ancestors, so the latest ancestor is the last
1226 # All nodes are ancestors, so the latest ancestor is the last
1213 # node.
1227 # node.
1214 highestrev = len(self) - 1
1228 highestrev = len(self) - 1
1215 # Set ancestors to None to signal that every node is an ancestor.
1229 # Set ancestors to None to signal that every node is an ancestor.
1216 ancestors = None
1230 ancestors = None
1217 # Set heads to an empty dictionary for later discovery of heads
1231 # Set heads to an empty dictionary for later discovery of heads
1218 heads = {}
1232 heads = {}
1219 else:
1233 else:
1220 heads = list(heads)
1234 heads = list(heads)
1221 if not heads:
1235 if not heads:
1222 return nonodes
1236 return nonodes
1223 ancestors = set()
1237 ancestors = set()
1224 # Turn heads into a dictionary so we can remove 'fake' heads.
1238 # Turn heads into a dictionary so we can remove 'fake' heads.
1225 # Also, later we will be using it to filter out the heads we can't
1239 # Also, later we will be using it to filter out the heads we can't
1226 # find from roots.
1240 # find from roots.
1227 heads = dict.fromkeys(heads, False)
1241 heads = dict.fromkeys(heads, False)
1228 # Start at the top and keep marking parents until we're done.
1242 # Start at the top and keep marking parents until we're done.
1229 nodestotag = set(heads)
1243 nodestotag = set(heads)
1230 # Remember where the top was so we can use it as a limit later.
1244 # Remember where the top was so we can use it as a limit later.
1231 highestrev = max([self.rev(n) for n in nodestotag])
1245 highestrev = max([self.rev(n) for n in nodestotag])
1232 while nodestotag:
1246 while nodestotag:
1233 # grab a node to tag
1247 # grab a node to tag
1234 n = nodestotag.pop()
1248 n = nodestotag.pop()
1235 # Never tag nullid
1249 # Never tag nullid
1236 if n == self.nullid:
1250 if n == self.nullid:
1237 continue
1251 continue
1238 # A node's revision number represents its place in a
1252 # A node's revision number represents its place in a
1239 # topologically sorted list of nodes.
1253 # topologically sorted list of nodes.
1240 r = self.rev(n)
1254 r = self.rev(n)
1241 if r >= lowestrev:
1255 if r >= lowestrev:
1242 if n not in ancestors:
1256 if n not in ancestors:
1243 # If we are possibly a descendant of one of the roots
1257 # If we are possibly a descendant of one of the roots
1244 # and we haven't already been marked as an ancestor
1258 # and we haven't already been marked as an ancestor
1245 ancestors.add(n) # Mark as ancestor
1259 ancestors.add(n) # Mark as ancestor
1246 # Add non-nullid parents to list of nodes to tag.
1260 # Add non-nullid parents to list of nodes to tag.
1247 nodestotag.update(
1261 nodestotag.update(
1248 [p for p in self.parents(n) if p != self.nullid]
1262 [p for p in self.parents(n) if p != self.nullid]
1249 )
1263 )
1250 elif n in heads: # We've seen it before, is it a fake head?
1264 elif n in heads: # We've seen it before, is it a fake head?
1251 # So it is, real heads should not be the ancestors of
1265 # So it is, real heads should not be the ancestors of
1252 # any other heads.
1266 # any other heads.
1253 heads.pop(n)
1267 heads.pop(n)
1254 if not ancestors:
1268 if not ancestors:
1255 return nonodes
1269 return nonodes
1256 # Now that we have our set of ancestors, we want to remove any
1270 # Now that we have our set of ancestors, we want to remove any
1257 # roots that are not ancestors.
1271 # roots that are not ancestors.
1258
1272
1259 # If one of the roots was nullid, everything is included anyway.
1273 # If one of the roots was nullid, everything is included anyway.
1260 if lowestrev > nullrev:
1274 if lowestrev > nullrev:
1261 # But, since we weren't, let's recompute the lowest rev to not
1275 # But, since we weren't, let's recompute the lowest rev to not
1262 # include roots that aren't ancestors.
1276 # include roots that aren't ancestors.
1263
1277
1264 # Filter out roots that aren't ancestors of heads
1278 # Filter out roots that aren't ancestors of heads
1265 roots = [root for root in roots if root in ancestors]
1279 roots = [root for root in roots if root in ancestors]
1266 # Recompute the lowest revision
1280 # Recompute the lowest revision
1267 if roots:
1281 if roots:
1268 lowestrev = min([self.rev(root) for root in roots])
1282 lowestrev = min([self.rev(root) for root in roots])
1269 else:
1283 else:
1270 # No more roots? Return empty list
1284 # No more roots? Return empty list
1271 return nonodes
1285 return nonodes
1272 else:
1286 else:
1273 # We are descending from nullid, and don't need to care about
1287 # We are descending from nullid, and don't need to care about
1274 # any other roots.
1288 # any other roots.
1275 lowestrev = nullrev
1289 lowestrev = nullrev
1276 roots = [self.nullid]
1290 roots = [self.nullid]
1277 # Transform our roots list into a set.
1291 # Transform our roots list into a set.
1278 descendants = set(roots)
1292 descendants = set(roots)
1279 # Also, keep the original roots so we can filter out roots that aren't
1293 # Also, keep the original roots so we can filter out roots that aren't
1280 # 'real' roots (i.e. are descended from other roots).
1294 # 'real' roots (i.e. are descended from other roots).
1281 roots = descendants.copy()
1295 roots = descendants.copy()
1282 # Our topologically sorted list of output nodes.
1296 # Our topologically sorted list of output nodes.
1283 orderedout = []
1297 orderedout = []
1284 # Don't start at nullid since we don't want nullid in our output list,
1298 # Don't start at nullid since we don't want nullid in our output list,
1285 # and if nullid shows up in descendants, empty parents will look like
1299 # and if nullid shows up in descendants, empty parents will look like
1286 # they're descendants.
1300 # they're descendants.
1287 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1301 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1288 n = self.node(r)
1302 n = self.node(r)
1289 isdescendant = False
1303 isdescendant = False
1290 if lowestrev == nullrev: # Everybody is a descendant of nullid
1304 if lowestrev == nullrev: # Everybody is a descendant of nullid
1291 isdescendant = True
1305 isdescendant = True
1292 elif n in descendants:
1306 elif n in descendants:
1293 # n is already a descendant
1307 # n is already a descendant
1294 isdescendant = True
1308 isdescendant = True
1295 # This check only needs to be done here because all the roots
1309 # This check only needs to be done here because all the roots
1296 # will start being marked is descendants before the loop.
1310 # will start being marked is descendants before the loop.
1297 if n in roots:
1311 if n in roots:
1298 # If n was a root, check if it's a 'real' root.
1312 # If n was a root, check if it's a 'real' root.
1299 p = tuple(self.parents(n))
1313 p = tuple(self.parents(n))
1300 # If any of its parents are descendants, it's not a root.
1314 # If any of its parents are descendants, it's not a root.
1301 if (p[0] in descendants) or (p[1] in descendants):
1315 if (p[0] in descendants) or (p[1] in descendants):
1302 roots.remove(n)
1316 roots.remove(n)
1303 else:
1317 else:
1304 p = tuple(self.parents(n))
1318 p = tuple(self.parents(n))
1305 # A node is a descendant if either of its parents are
1319 # A node is a descendant if either of its parents are
1306 # descendants. (We seeded the dependents list with the roots
1320 # descendants. (We seeded the dependents list with the roots
1307 # up there, remember?)
1321 # up there, remember?)
1308 if (p[0] in descendants) or (p[1] in descendants):
1322 if (p[0] in descendants) or (p[1] in descendants):
1309 descendants.add(n)
1323 descendants.add(n)
1310 isdescendant = True
1324 isdescendant = True
1311 if isdescendant and ((ancestors is None) or (n in ancestors)):
1325 if isdescendant and ((ancestors is None) or (n in ancestors)):
1312 # Only include nodes that are both descendants and ancestors.
1326 # Only include nodes that are both descendants and ancestors.
1313 orderedout.append(n)
1327 orderedout.append(n)
1314 if (ancestors is not None) and (n in heads):
1328 if (ancestors is not None) and (n in heads):
1315 # We're trying to figure out which heads are reachable
1329 # We're trying to figure out which heads are reachable
1316 # from roots.
1330 # from roots.
1317 # Mark this head as having been reached
1331 # Mark this head as having been reached
1318 heads[n] = True
1332 heads[n] = True
1319 elif ancestors is None:
1333 elif ancestors is None:
1320 # Otherwise, we're trying to discover the heads.
1334 # Otherwise, we're trying to discover the heads.
1321 # Assume this is a head because if it isn't, the next step
1335 # Assume this is a head because if it isn't, the next step
1322 # will eventually remove it.
1336 # will eventually remove it.
1323 heads[n] = True
1337 heads[n] = True
1324 # But, obviously its parents aren't.
1338 # But, obviously its parents aren't.
1325 for p in self.parents(n):
1339 for p in self.parents(n):
1326 heads.pop(p, None)
1340 heads.pop(p, None)
1327 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1341 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1328 roots = list(roots)
1342 roots = list(roots)
1329 assert orderedout
1343 assert orderedout
1330 assert roots
1344 assert roots
1331 assert heads
1345 assert heads
1332 return (orderedout, roots, heads)
1346 return (orderedout, roots, heads)
1333
1347
1334 def headrevs(self, revs=None):
1348 def headrevs(self, revs=None):
1335 if revs is None:
1349 if revs is None:
1336 try:
1350 try:
1337 return self.index.headrevs()
1351 return self.index.headrevs()
1338 except AttributeError:
1352 except AttributeError:
1339 return self._headrevs()
1353 return self._headrevs()
1340 if rustdagop is not None:
1354 if rustdagop is not None:
1341 return rustdagop.headrevs(self.index, revs)
1355 return rustdagop.headrevs(self.index, revs)
1342 return dagop.headrevs(revs, self._uncheckedparentrevs)
1356 return dagop.headrevs(revs, self._uncheckedparentrevs)
1343
1357
1344 def computephases(self, roots):
1358 def computephases(self, roots):
1345 return self.index.computephasesmapsets(roots)
1359 return self.index.computephasesmapsets(roots)
1346
1360
1347 def _headrevs(self):
1361 def _headrevs(self):
1348 count = len(self)
1362 count = len(self)
1349 if not count:
1363 if not count:
1350 return [nullrev]
1364 return [nullrev]
1351 # we won't iter over filtered rev so nobody is a head at start
1365 # we won't iter over filtered rev so nobody is a head at start
1352 ishead = [0] * (count + 1)
1366 ishead = [0] * (count + 1)
1353 index = self.index
1367 index = self.index
1354 for r in self:
1368 for r in self:
1355 ishead[r] = 1 # I may be an head
1369 ishead[r] = 1 # I may be an head
1356 e = index[r]
1370 e = index[r]
1357 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1371 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1358 return [r for r, val in enumerate(ishead) if val]
1372 return [r for r, val in enumerate(ishead) if val]
1359
1373
1360 def heads(self, start=None, stop=None):
1374 def heads(self, start=None, stop=None):
1361 """return the list of all nodes that have no children
1375 """return the list of all nodes that have no children
1362
1376
1363 if start is specified, only heads that are descendants of
1377 if start is specified, only heads that are descendants of
1364 start will be returned
1378 start will be returned
1365 if stop is specified, it will consider all the revs from stop
1379 if stop is specified, it will consider all the revs from stop
1366 as if they had no children
1380 as if they had no children
1367 """
1381 """
1368 if start is None and stop is None:
1382 if start is None and stop is None:
1369 if not len(self):
1383 if not len(self):
1370 return [self.nullid]
1384 return [self.nullid]
1371 return [self.node(r) for r in self.headrevs()]
1385 return [self.node(r) for r in self.headrevs()]
1372
1386
1373 if start is None:
1387 if start is None:
1374 start = nullrev
1388 start = nullrev
1375 else:
1389 else:
1376 start = self.rev(start)
1390 start = self.rev(start)
1377
1391
1378 stoprevs = {self.rev(n) for n in stop or []}
1392 stoprevs = {self.rev(n) for n in stop or []}
1379
1393
1380 revs = dagop.headrevssubset(
1394 revs = dagop.headrevssubset(
1381 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1395 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1382 )
1396 )
1383
1397
1384 return [self.node(rev) for rev in revs]
1398 return [self.node(rev) for rev in revs]
1385
1399
1386 def children(self, node):
1400 def children(self, node):
1387 """find the children of a given node"""
1401 """find the children of a given node"""
1388 c = []
1402 c = []
1389 p = self.rev(node)
1403 p = self.rev(node)
1390 for r in self.revs(start=p + 1):
1404 for r in self.revs(start=p + 1):
1391 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1405 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1392 if prevs:
1406 if prevs:
1393 for pr in prevs:
1407 for pr in prevs:
1394 if pr == p:
1408 if pr == p:
1395 c.append(self.node(r))
1409 c.append(self.node(r))
1396 elif p == nullrev:
1410 elif p == nullrev:
1397 c.append(self.node(r))
1411 c.append(self.node(r))
1398 return c
1412 return c
1399
1413
1400 def commonancestorsheads(self, a, b):
1414 def commonancestorsheads(self, a, b):
1401 """calculate all the heads of the common ancestors of nodes a and b"""
1415 """calculate all the heads of the common ancestors of nodes a and b"""
1402 a, b = self.rev(a), self.rev(b)
1416 a, b = self.rev(a), self.rev(b)
1403 ancs = self._commonancestorsheads(a, b)
1417 ancs = self._commonancestorsheads(a, b)
1404 return pycompat.maplist(self.node, ancs)
1418 return pycompat.maplist(self.node, ancs)
1405
1419
1406 def _commonancestorsheads(self, *revs):
1420 def _commonancestorsheads(self, *revs):
1407 """calculate all the heads of the common ancestors of revs"""
1421 """calculate all the heads of the common ancestors of revs"""
1408 try:
1422 try:
1409 ancs = self.index.commonancestorsheads(*revs)
1423 ancs = self.index.commonancestorsheads(*revs)
1410 except (AttributeError, OverflowError): # C implementation failed
1424 except (AttributeError, OverflowError): # C implementation failed
1411 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1425 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1412 return ancs
1426 return ancs
1413
1427
1414 def isancestor(self, a, b):
1428 def isancestor(self, a, b):
1415 """return True if node a is an ancestor of node b
1429 """return True if node a is an ancestor of node b
1416
1430
1417 A revision is considered an ancestor of itself."""
1431 A revision is considered an ancestor of itself."""
1418 a, b = self.rev(a), self.rev(b)
1432 a, b = self.rev(a), self.rev(b)
1419 return self.isancestorrev(a, b)
1433 return self.isancestorrev(a, b)
1420
1434
1421 def isancestorrev(self, a, b):
1435 def isancestorrev(self, a, b):
1422 """return True if revision a is an ancestor of revision b
1436 """return True if revision a is an ancestor of revision b
1423
1437
1424 A revision is considered an ancestor of itself.
1438 A revision is considered an ancestor of itself.
1425
1439
1426 The implementation of this is trivial but the use of
1440 The implementation of this is trivial but the use of
1427 reachableroots is not."""
1441 reachableroots is not."""
1428 if a == nullrev:
1442 if a == nullrev:
1429 return True
1443 return True
1430 elif a == b:
1444 elif a == b:
1431 return True
1445 return True
1432 elif a > b:
1446 elif a > b:
1433 return False
1447 return False
1434 return bool(self.reachableroots(a, [b], [a], includepath=False))
1448 return bool(self.reachableroots(a, [b], [a], includepath=False))
1435
1449
1436 def reachableroots(self, minroot, heads, roots, includepath=False):
1450 def reachableroots(self, minroot, heads, roots, includepath=False):
1437 """return (heads(::(<roots> and <roots>::<heads>)))
1451 """return (heads(::(<roots> and <roots>::<heads>)))
1438
1452
1439 If includepath is True, return (<roots>::<heads>)."""
1453 If includepath is True, return (<roots>::<heads>)."""
1440 try:
1454 try:
1441 return self.index.reachableroots2(
1455 return self.index.reachableroots2(
1442 minroot, heads, roots, includepath
1456 minroot, heads, roots, includepath
1443 )
1457 )
1444 except AttributeError:
1458 except AttributeError:
1445 return dagop._reachablerootspure(
1459 return dagop._reachablerootspure(
1446 self.parentrevs, minroot, roots, heads, includepath
1460 self.parentrevs, minroot, roots, heads, includepath
1447 )
1461 )
1448
1462
1449 def ancestor(self, a, b):
1463 def ancestor(self, a, b):
1450 """calculate the "best" common ancestor of nodes a and b"""
1464 """calculate the "best" common ancestor of nodes a and b"""
1451
1465
1452 a, b = self.rev(a), self.rev(b)
1466 a, b = self.rev(a), self.rev(b)
1453 try:
1467 try:
1454 ancs = self.index.ancestors(a, b)
1468 ancs = self.index.ancestors(a, b)
1455 except (AttributeError, OverflowError):
1469 except (AttributeError, OverflowError):
1456 ancs = ancestor.ancestors(self.parentrevs, a, b)
1470 ancs = ancestor.ancestors(self.parentrevs, a, b)
1457 if ancs:
1471 if ancs:
1458 # choose a consistent winner when there's a tie
1472 # choose a consistent winner when there's a tie
1459 return min(map(self.node, ancs))
1473 return min(map(self.node, ancs))
1460 return self.nullid
1474 return self.nullid
1461
1475
1462 def _match(self, id):
1476 def _match(self, id):
1463 if isinstance(id, int):
1477 if isinstance(id, int):
1464 # rev
1478 # rev
1465 return self.node(id)
1479 return self.node(id)
1466 if len(id) == self.nodeconstants.nodelen:
1480 if len(id) == self.nodeconstants.nodelen:
1467 # possibly a binary node
1481 # possibly a binary node
1468 # odds of a binary node being all hex in ASCII are 1 in 10**25
1482 # odds of a binary node being all hex in ASCII are 1 in 10**25
1469 try:
1483 try:
1470 node = id
1484 node = id
1471 self.rev(node) # quick search the index
1485 self.rev(node) # quick search the index
1472 return node
1486 return node
1473 except error.LookupError:
1487 except error.LookupError:
1474 pass # may be partial hex id
1488 pass # may be partial hex id
1475 try:
1489 try:
1476 # str(rev)
1490 # str(rev)
1477 rev = int(id)
1491 rev = int(id)
1478 if b"%d" % rev != id:
1492 if b"%d" % rev != id:
1479 raise ValueError
1493 raise ValueError
1480 if rev < 0:
1494 if rev < 0:
1481 rev = len(self) + rev
1495 rev = len(self) + rev
1482 if rev < 0 or rev >= len(self):
1496 if rev < 0 or rev >= len(self):
1483 raise ValueError
1497 raise ValueError
1484 return self.node(rev)
1498 return self.node(rev)
1485 except (ValueError, OverflowError):
1499 except (ValueError, OverflowError):
1486 pass
1500 pass
1487 if len(id) == 2 * self.nodeconstants.nodelen:
1501 if len(id) == 2 * self.nodeconstants.nodelen:
1488 try:
1502 try:
1489 # a full hex nodeid?
1503 # a full hex nodeid?
1490 node = bin(id)
1504 node = bin(id)
1491 self.rev(node)
1505 self.rev(node)
1492 return node
1506 return node
1493 except (TypeError, error.LookupError):
1507 except (TypeError, error.LookupError):
1494 pass
1508 pass
1495
1509
1496 def _partialmatch(self, id):
1510 def _partialmatch(self, id):
1497 # we don't care wdirfilenodeids as they should be always full hash
1511 # we don't care wdirfilenodeids as they should be always full hash
1498 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1512 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1499 try:
1513 try:
1500 partial = self.index.partialmatch(id)
1514 partial = self.index.partialmatch(id)
1501 if partial and self.hasnode(partial):
1515 if partial and self.hasnode(partial):
1502 if maybewdir:
1516 if maybewdir:
1503 # single 'ff...' match in radix tree, ambiguous with wdir
1517 # single 'ff...' match in radix tree, ambiguous with wdir
1504 raise error.RevlogError
1518 raise error.RevlogError
1505 return partial
1519 return partial
1506 if maybewdir:
1520 if maybewdir:
1507 # no 'ff...' match in radix tree, wdir identified
1521 # no 'ff...' match in radix tree, wdir identified
1508 raise error.WdirUnsupported
1522 raise error.WdirUnsupported
1509 return None
1523 return None
1510 except error.RevlogError:
1524 except error.RevlogError:
1511 # parsers.c radix tree lookup gave multiple matches
1525 # parsers.c radix tree lookup gave multiple matches
1512 # fast path: for unfiltered changelog, radix tree is accurate
1526 # fast path: for unfiltered changelog, radix tree is accurate
1513 if not getattr(self, 'filteredrevs', None):
1527 if not getattr(self, 'filteredrevs', None):
1514 raise error.AmbiguousPrefixLookupError(
1528 raise error.AmbiguousPrefixLookupError(
1515 id, self.display_id, _(b'ambiguous identifier')
1529 id, self.display_id, _(b'ambiguous identifier')
1516 )
1530 )
1517 # fall through to slow path that filters hidden revisions
1531 # fall through to slow path that filters hidden revisions
1518 except (AttributeError, ValueError):
1532 except (AttributeError, ValueError):
1519 # we are pure python, or key was too short to search radix tree
1533 # we are pure python, or key was too short to search radix tree
1520 pass
1534 pass
1521
1535
1522 if id in self._pcache:
1536 if id in self._pcache:
1523 return self._pcache[id]
1537 return self._pcache[id]
1524
1538
1525 if len(id) <= 40:
1539 if len(id) <= 40:
1526 try:
1540 try:
1527 # hex(node)[:...]
1541 # hex(node)[:...]
1528 l = len(id) // 2 # grab an even number of digits
1542 l = len(id) // 2 # grab an even number of digits
1529 prefix = bin(id[: l * 2])
1543 prefix = bin(id[: l * 2])
1530 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1544 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1531 nl = [
1545 nl = [
1532 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1546 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1533 ]
1547 ]
1534 if self.nodeconstants.nullhex.startswith(id):
1548 if self.nodeconstants.nullhex.startswith(id):
1535 nl.append(self.nullid)
1549 nl.append(self.nullid)
1536 if len(nl) > 0:
1550 if len(nl) > 0:
1537 if len(nl) == 1 and not maybewdir:
1551 if len(nl) == 1 and not maybewdir:
1538 self._pcache[id] = nl[0]
1552 self._pcache[id] = nl[0]
1539 return nl[0]
1553 return nl[0]
1540 raise error.AmbiguousPrefixLookupError(
1554 raise error.AmbiguousPrefixLookupError(
1541 id, self.display_id, _(b'ambiguous identifier')
1555 id, self.display_id, _(b'ambiguous identifier')
1542 )
1556 )
1543 if maybewdir:
1557 if maybewdir:
1544 raise error.WdirUnsupported
1558 raise error.WdirUnsupported
1545 return None
1559 return None
1546 except TypeError:
1560 except TypeError:
1547 pass
1561 pass
1548
1562
1549 def lookup(self, id):
1563 def lookup(self, id):
1550 """locate a node based on:
1564 """locate a node based on:
1551 - revision number or str(revision number)
1565 - revision number or str(revision number)
1552 - nodeid or subset of hex nodeid
1566 - nodeid or subset of hex nodeid
1553 """
1567 """
1554 n = self._match(id)
1568 n = self._match(id)
1555 if n is not None:
1569 if n is not None:
1556 return n
1570 return n
1557 n = self._partialmatch(id)
1571 n = self._partialmatch(id)
1558 if n:
1572 if n:
1559 return n
1573 return n
1560
1574
1561 raise error.LookupError(id, self.display_id, _(b'no match found'))
1575 raise error.LookupError(id, self.display_id, _(b'no match found'))
1562
1576
1563 def shortest(self, node, minlength=1):
1577 def shortest(self, node, minlength=1):
1564 """Find the shortest unambiguous prefix that matches node."""
1578 """Find the shortest unambiguous prefix that matches node."""
1565
1579
1566 def isvalid(prefix):
1580 def isvalid(prefix):
1567 try:
1581 try:
1568 matchednode = self._partialmatch(prefix)
1582 matchednode = self._partialmatch(prefix)
1569 except error.AmbiguousPrefixLookupError:
1583 except error.AmbiguousPrefixLookupError:
1570 return False
1584 return False
1571 except error.WdirUnsupported:
1585 except error.WdirUnsupported:
1572 # single 'ff...' match
1586 # single 'ff...' match
1573 return True
1587 return True
1574 if matchednode is None:
1588 if matchednode is None:
1575 raise error.LookupError(node, self.display_id, _(b'no node'))
1589 raise error.LookupError(node, self.display_id, _(b'no node'))
1576 return True
1590 return True
1577
1591
1578 def maybewdir(prefix):
1592 def maybewdir(prefix):
1579 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1593 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1580
1594
1581 hexnode = hex(node)
1595 hexnode = hex(node)
1582
1596
1583 def disambiguate(hexnode, minlength):
1597 def disambiguate(hexnode, minlength):
1584 """Disambiguate against wdirid."""
1598 """Disambiguate against wdirid."""
1585 for length in range(minlength, len(hexnode) + 1):
1599 for length in range(minlength, len(hexnode) + 1):
1586 prefix = hexnode[:length]
1600 prefix = hexnode[:length]
1587 if not maybewdir(prefix):
1601 if not maybewdir(prefix):
1588 return prefix
1602 return prefix
1589
1603
1590 if not getattr(self, 'filteredrevs', None):
1604 if not getattr(self, 'filteredrevs', None):
1591 try:
1605 try:
1592 length = max(self.index.shortest(node), minlength)
1606 length = max(self.index.shortest(node), minlength)
1593 return disambiguate(hexnode, length)
1607 return disambiguate(hexnode, length)
1594 except error.RevlogError:
1608 except error.RevlogError:
1595 if node != self.nodeconstants.wdirid:
1609 if node != self.nodeconstants.wdirid:
1596 raise error.LookupError(
1610 raise error.LookupError(
1597 node, self.display_id, _(b'no node')
1611 node, self.display_id, _(b'no node')
1598 )
1612 )
1599 except AttributeError:
1613 except AttributeError:
1600 # Fall through to pure code
1614 # Fall through to pure code
1601 pass
1615 pass
1602
1616
1603 if node == self.nodeconstants.wdirid:
1617 if node == self.nodeconstants.wdirid:
1604 for length in range(minlength, len(hexnode) + 1):
1618 for length in range(minlength, len(hexnode) + 1):
1605 prefix = hexnode[:length]
1619 prefix = hexnode[:length]
1606 if isvalid(prefix):
1620 if isvalid(prefix):
1607 return prefix
1621 return prefix
1608
1622
1609 for length in range(minlength, len(hexnode) + 1):
1623 for length in range(minlength, len(hexnode) + 1):
1610 prefix = hexnode[:length]
1624 prefix = hexnode[:length]
1611 if isvalid(prefix):
1625 if isvalid(prefix):
1612 return disambiguate(hexnode, length)
1626 return disambiguate(hexnode, length)
1613
1627
1614 def cmp(self, node, text):
1628 def cmp(self, node, text):
1615 """compare text with a given file revision
1629 """compare text with a given file revision
1616
1630
1617 returns True if text is different than what is stored.
1631 returns True if text is different than what is stored.
1618 """
1632 """
1619 p1, p2 = self.parents(node)
1633 p1, p2 = self.parents(node)
1620 return storageutil.hashrevisionsha1(text, p1, p2) != node
1634 return storageutil.hashrevisionsha1(text, p1, p2) != node
1621
1635
1622 def _cachesegment(self, offset, data):
1636 def _cachesegment(self, offset, data):
1623 """Add a segment to the revlog cache.
1637 """Add a segment to the revlog cache.
1624
1638
1625 Accepts an absolute offset and the data that is at that location.
1639 Accepts an absolute offset and the data that is at that location.
1626 """
1640 """
1627 o, d = self._chunkcache
1641 o, d = self._chunkcache
1628 # try to add to existing cache
1642 # try to add to existing cache
1629 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1643 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1630 self._chunkcache = o, d + data
1644 self._chunkcache = o, d + data
1631 else:
1645 else:
1632 self._chunkcache = offset, data
1646 self._chunkcache = offset, data
1633
1647
1634 def _readsegment(self, offset, length, df=None):
1648 def _readsegment(self, offset, length, df=None):
1635 """Load a segment of raw data from the revlog.
1649 """Load a segment of raw data from the revlog.
1636
1650
1637 Accepts an absolute offset, length to read, and an optional existing
1651 Accepts an absolute offset, length to read, and an optional existing
1638 file handle to read from.
1652 file handle to read from.
1639
1653
1640 If an existing file handle is passed, it will be seeked and the
1654 If an existing file handle is passed, it will be seeked and the
1641 original seek position will NOT be restored.
1655 original seek position will NOT be restored.
1642
1656
1643 Returns a str or buffer of raw byte data.
1657 Returns a str or buffer of raw byte data.
1644
1658
1645 Raises if the requested number of bytes could not be read.
1659 Raises if the requested number of bytes could not be read.
1646 """
1660 """
1647 # Cache data both forward and backward around the requested
1661 # Cache data both forward and backward around the requested
1648 # data, in a fixed size window. This helps speed up operations
1662 # data, in a fixed size window. This helps speed up operations
1649 # involving reading the revlog backwards.
1663 # involving reading the revlog backwards.
1650 cachesize = self._chunkcachesize
1664 cachesize = self._chunkcachesize
1651 realoffset = offset & ~(cachesize - 1)
1665 realoffset = offset & ~(cachesize - 1)
1652 reallength = (
1666 reallength = (
1653 (offset + length + cachesize) & ~(cachesize - 1)
1667 (offset + length + cachesize) & ~(cachesize - 1)
1654 ) - realoffset
1668 ) - realoffset
1655 with self._datareadfp(df) as df:
1669 with self._datareadfp(df) as df:
1656 df.seek(realoffset)
1670 df.seek(realoffset)
1657 d = df.read(reallength)
1671 d = df.read(reallength)
1658
1672
1659 self._cachesegment(realoffset, d)
1673 self._cachesegment(realoffset, d)
1660 if offset != realoffset or reallength != length:
1674 if offset != realoffset or reallength != length:
1661 startoffset = offset - realoffset
1675 startoffset = offset - realoffset
1662 if len(d) - startoffset < length:
1676 if len(d) - startoffset < length:
1663 raise error.RevlogError(
1677 raise error.RevlogError(
1664 _(
1678 _(
1665 b'partial read of revlog %s; expected %d bytes from '
1679 b'partial read of revlog %s; expected %d bytes from '
1666 b'offset %d, got %d'
1680 b'offset %d, got %d'
1667 )
1681 )
1668 % (
1682 % (
1669 self._indexfile if self._inline else self._datafile,
1683 self._indexfile if self._inline else self._datafile,
1670 length,
1684 length,
1671 offset,
1685 offset,
1672 len(d) - startoffset,
1686 len(d) - startoffset,
1673 )
1687 )
1674 )
1688 )
1675
1689
1676 return util.buffer(d, startoffset, length)
1690 return util.buffer(d, startoffset, length)
1677
1691
1678 if len(d) < length:
1692 if len(d) < length:
1679 raise error.RevlogError(
1693 raise error.RevlogError(
1680 _(
1694 _(
1681 b'partial read of revlog %s; expected %d bytes from offset '
1695 b'partial read of revlog %s; expected %d bytes from offset '
1682 b'%d, got %d'
1696 b'%d, got %d'
1683 )
1697 )
1684 % (
1698 % (
1685 self._indexfile if self._inline else self._datafile,
1699 self._indexfile if self._inline else self._datafile,
1686 length,
1700 length,
1687 offset,
1701 offset,
1688 len(d),
1702 len(d),
1689 )
1703 )
1690 )
1704 )
1691
1705
1692 return d
1706 return d
1693
1707
1694 def _getsegment(self, offset, length, df=None):
1708 def _getsegment(self, offset, length, df=None):
1695 """Obtain a segment of raw data from the revlog.
1709 """Obtain a segment of raw data from the revlog.
1696
1710
1697 Accepts an absolute offset, length of bytes to obtain, and an
1711 Accepts an absolute offset, length of bytes to obtain, and an
1698 optional file handle to the already-opened revlog. If the file
1712 optional file handle to the already-opened revlog. If the file
1699 handle is used, it's original seek position will not be preserved.
1713 handle is used, it's original seek position will not be preserved.
1700
1714
1701 Requests for data may be returned from a cache.
1715 Requests for data may be returned from a cache.
1702
1716
1703 Returns a str or a buffer instance of raw byte data.
1717 Returns a str or a buffer instance of raw byte data.
1704 """
1718 """
1705 o, d = self._chunkcache
1719 o, d = self._chunkcache
1706 l = len(d)
1720 l = len(d)
1707
1721
1708 # is it in the cache?
1722 # is it in the cache?
1709 cachestart = offset - o
1723 cachestart = offset - o
1710 cacheend = cachestart + length
1724 cacheend = cachestart + length
1711 if cachestart >= 0 and cacheend <= l:
1725 if cachestart >= 0 and cacheend <= l:
1712 if cachestart == 0 and cacheend == l:
1726 if cachestart == 0 and cacheend == l:
1713 return d # avoid a copy
1727 return d # avoid a copy
1714 return util.buffer(d, cachestart, cacheend - cachestart)
1728 return util.buffer(d, cachestart, cacheend - cachestart)
1715
1729
1716 return self._readsegment(offset, length, df=df)
1730 return self._readsegment(offset, length, df=df)
1717
1731
1718 def _getsegmentforrevs(self, startrev, endrev, df=None):
1732 def _getsegmentforrevs(self, startrev, endrev, df=None):
1719 """Obtain a segment of raw data corresponding to a range of revisions.
1733 """Obtain a segment of raw data corresponding to a range of revisions.
1720
1734
1721 Accepts the start and end revisions and an optional already-open
1735 Accepts the start and end revisions and an optional already-open
1722 file handle to be used for reading. If the file handle is read, its
1736 file handle to be used for reading. If the file handle is read, its
1723 seek position will not be preserved.
1737 seek position will not be preserved.
1724
1738
1725 Requests for data may be satisfied by a cache.
1739 Requests for data may be satisfied by a cache.
1726
1740
1727 Returns a 2-tuple of (offset, data) for the requested range of
1741 Returns a 2-tuple of (offset, data) for the requested range of
1728 revisions. Offset is the integer offset from the beginning of the
1742 revisions. Offset is the integer offset from the beginning of the
1729 revlog and data is a str or buffer of the raw byte data.
1743 revlog and data is a str or buffer of the raw byte data.
1730
1744
1731 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1745 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1732 to determine where each revision's data begins and ends.
1746 to determine where each revision's data begins and ends.
1733 """
1747 """
1734 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1748 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1735 # (functions are expensive).
1749 # (functions are expensive).
1736 index = self.index
1750 index = self.index
1737 istart = index[startrev]
1751 istart = index[startrev]
1738 start = int(istart[0] >> 16)
1752 start = int(istart[0] >> 16)
1739 if startrev == endrev:
1753 if startrev == endrev:
1740 end = start + istart[1]
1754 end = start + istart[1]
1741 else:
1755 else:
1742 iend = index[endrev]
1756 iend = index[endrev]
1743 end = int(iend[0] >> 16) + iend[1]
1757 end = int(iend[0] >> 16) + iend[1]
1744
1758
1745 if self._inline:
1759 if self._inline:
1746 start += (startrev + 1) * self.index.entry_size
1760 start += (startrev + 1) * self.index.entry_size
1747 end += (endrev + 1) * self.index.entry_size
1761 end += (endrev + 1) * self.index.entry_size
1748 length = end - start
1762 length = end - start
1749
1763
1750 return start, self._getsegment(start, length, df=df)
1764 return start, self._getsegment(start, length, df=df)
1751
1765
1752 def _chunk(self, rev, df=None):
1766 def _chunk(self, rev, df=None):
1753 """Obtain a single decompressed chunk for a revision.
1767 """Obtain a single decompressed chunk for a revision.
1754
1768
1755 Accepts an integer revision and an optional already-open file handle
1769 Accepts an integer revision and an optional already-open file handle
1756 to be used for reading. If used, the seek position of the file will not
1770 to be used for reading. If used, the seek position of the file will not
1757 be preserved.
1771 be preserved.
1758
1772
1759 Returns a str holding uncompressed data for the requested revision.
1773 Returns a str holding uncompressed data for the requested revision.
1760 """
1774 """
1761 compression_mode = self.index[rev][10]
1775 compression_mode = self.index[rev][10]
1762 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1776 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1763 if compression_mode == COMP_MODE_PLAIN:
1777 if compression_mode == COMP_MODE_PLAIN:
1764 return data
1778 return data
1765 elif compression_mode == COMP_MODE_INLINE:
1779 elif compression_mode == COMP_MODE_INLINE:
1766 return self.decompress(data)
1780 return self.decompress(data)
1767 else:
1781 else:
1768 msg = 'unknown compression mode %d'
1782 msg = 'unknown compression mode %d'
1769 msg %= compression_mode
1783 msg %= compression_mode
1770 raise error.RevlogError(msg)
1784 raise error.RevlogError(msg)
1771
1785
1772 def _chunks(self, revs, df=None, targetsize=None):
1786 def _chunks(self, revs, df=None, targetsize=None):
1773 """Obtain decompressed chunks for the specified revisions.
1787 """Obtain decompressed chunks for the specified revisions.
1774
1788
1775 Accepts an iterable of numeric revisions that are assumed to be in
1789 Accepts an iterable of numeric revisions that are assumed to be in
1776 ascending order. Also accepts an optional already-open file handle
1790 ascending order. Also accepts an optional already-open file handle
1777 to be used for reading. If used, the seek position of the file will
1791 to be used for reading. If used, the seek position of the file will
1778 not be preserved.
1792 not be preserved.
1779
1793
1780 This function is similar to calling ``self._chunk()`` multiple times,
1794 This function is similar to calling ``self._chunk()`` multiple times,
1781 but is faster.
1795 but is faster.
1782
1796
1783 Returns a list with decompressed data for each requested revision.
1797 Returns a list with decompressed data for each requested revision.
1784 """
1798 """
1785 if not revs:
1799 if not revs:
1786 return []
1800 return []
1787 start = self.start
1801 start = self.start
1788 length = self.length
1802 length = self.length
1789 inline = self._inline
1803 inline = self._inline
1790 iosize = self.index.entry_size
1804 iosize = self.index.entry_size
1791 buffer = util.buffer
1805 buffer = util.buffer
1792
1806
1793 l = []
1807 l = []
1794 ladd = l.append
1808 ladd = l.append
1795
1809
1796 if not self._withsparseread:
1810 if not self._withsparseread:
1797 slicedchunks = (revs,)
1811 slicedchunks = (revs,)
1798 else:
1812 else:
1799 slicedchunks = deltautil.slicechunk(
1813 slicedchunks = deltautil.slicechunk(
1800 self, revs, targetsize=targetsize
1814 self, revs, targetsize=targetsize
1801 )
1815 )
1802
1816
1803 for revschunk in slicedchunks:
1817 for revschunk in slicedchunks:
1804 firstrev = revschunk[0]
1818 firstrev = revschunk[0]
1805 # Skip trailing revisions with empty diff
1819 # Skip trailing revisions with empty diff
1806 for lastrev in revschunk[::-1]:
1820 for lastrev in revschunk[::-1]:
1807 if length(lastrev) != 0:
1821 if length(lastrev) != 0:
1808 break
1822 break
1809
1823
1810 try:
1824 try:
1811 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1825 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1812 except OverflowError:
1826 except OverflowError:
1813 # issue4215 - we can't cache a run of chunks greater than
1827 # issue4215 - we can't cache a run of chunks greater than
1814 # 2G on Windows
1828 # 2G on Windows
1815 return [self._chunk(rev, df=df) for rev in revschunk]
1829 return [self._chunk(rev, df=df) for rev in revschunk]
1816
1830
1817 decomp = self.decompress
1831 decomp = self.decompress
1818 for rev in revschunk:
1832 for rev in revschunk:
1819 chunkstart = start(rev)
1833 chunkstart = start(rev)
1820 if inline:
1834 if inline:
1821 chunkstart += (rev + 1) * iosize
1835 chunkstart += (rev + 1) * iosize
1822 chunklength = length(rev)
1836 chunklength = length(rev)
1823 comp_mode = self.index[rev][10]
1837 comp_mode = self.index[rev][10]
1824 c = buffer(data, chunkstart - offset, chunklength)
1838 c = buffer(data, chunkstart - offset, chunklength)
1825 if comp_mode == COMP_MODE_PLAIN:
1839 if comp_mode == COMP_MODE_PLAIN:
1826 ladd(c)
1840 ladd(c)
1827 elif comp_mode == COMP_MODE_INLINE:
1841 elif comp_mode == COMP_MODE_INLINE:
1828 ladd(decomp(c))
1842 ladd(decomp(c))
1829 else:
1843 else:
1830 msg = 'unknown compression mode %d'
1844 msg = 'unknown compression mode %d'
1831 msg %= comp_mode
1845 msg %= comp_mode
1832 raise error.RevlogError(msg)
1846 raise error.RevlogError(msg)
1833
1847
1834 return l
1848 return l
1835
1849
1836 def _chunkclear(self):
1850 def _chunkclear(self):
1837 """Clear the raw chunk cache."""
1851 """Clear the raw chunk cache."""
1838 self._chunkcache = (0, b'')
1852 self._chunkcache = (0, b'')
1839
1853
1840 def deltaparent(self, rev):
1854 def deltaparent(self, rev):
1841 """return deltaparent of the given revision"""
1855 """return deltaparent of the given revision"""
1842 base = self.index[rev][3]
1856 base = self.index[rev][3]
1843 if base == rev:
1857 if base == rev:
1844 return nullrev
1858 return nullrev
1845 elif self._generaldelta:
1859 elif self._generaldelta:
1846 return base
1860 return base
1847 else:
1861 else:
1848 return rev - 1
1862 return rev - 1
1849
1863
1850 def issnapshot(self, rev):
1864 def issnapshot(self, rev):
1851 """tells whether rev is a snapshot"""
1865 """tells whether rev is a snapshot"""
1852 if not self._sparserevlog:
1866 if not self._sparserevlog:
1853 return self.deltaparent(rev) == nullrev
1867 return self.deltaparent(rev) == nullrev
1854 elif util.safehasattr(self.index, b'issnapshot'):
1868 elif util.safehasattr(self.index, b'issnapshot'):
1855 # directly assign the method to cache the testing and access
1869 # directly assign the method to cache the testing and access
1856 self.issnapshot = self.index.issnapshot
1870 self.issnapshot = self.index.issnapshot
1857 return self.issnapshot(rev)
1871 return self.issnapshot(rev)
1858 if rev == nullrev:
1872 if rev == nullrev:
1859 return True
1873 return True
1860 entry = self.index[rev]
1874 entry = self.index[rev]
1861 base = entry[3]
1875 base = entry[3]
1862 if base == rev:
1876 if base == rev:
1863 return True
1877 return True
1864 if base == nullrev:
1878 if base == nullrev:
1865 return True
1879 return True
1866 p1 = entry[5]
1880 p1 = entry[5]
1867 p2 = entry[6]
1881 p2 = entry[6]
1868 if base == p1 or base == p2:
1882 if base == p1 or base == p2:
1869 return False
1883 return False
1870 return self.issnapshot(base)
1884 return self.issnapshot(base)
1871
1885
1872 def snapshotdepth(self, rev):
1886 def snapshotdepth(self, rev):
1873 """number of snapshot in the chain before this one"""
1887 """number of snapshot in the chain before this one"""
1874 if not self.issnapshot(rev):
1888 if not self.issnapshot(rev):
1875 raise error.ProgrammingError(b'revision %d not a snapshot')
1889 raise error.ProgrammingError(b'revision %d not a snapshot')
1876 return len(self._deltachain(rev)[0]) - 1
1890 return len(self._deltachain(rev)[0]) - 1
1877
1891
1878 def revdiff(self, rev1, rev2):
1892 def revdiff(self, rev1, rev2):
1879 """return or calculate a delta between two revisions
1893 """return or calculate a delta between two revisions
1880
1894
1881 The delta calculated is in binary form and is intended to be written to
1895 The delta calculated is in binary form and is intended to be written to
1882 revlog data directly. So this function needs raw revision data.
1896 revlog data directly. So this function needs raw revision data.
1883 """
1897 """
1884 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1898 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1885 return bytes(self._chunk(rev2))
1899 return bytes(self._chunk(rev2))
1886
1900
1887 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1901 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1888
1902
1889 def _processflags(self, text, flags, operation, raw=False):
1903 def _processflags(self, text, flags, operation, raw=False):
1890 """deprecated entry point to access flag processors"""
1904 """deprecated entry point to access flag processors"""
1891 msg = b'_processflag(...) use the specialized variant'
1905 msg = b'_processflag(...) use the specialized variant'
1892 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1906 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1893 if raw:
1907 if raw:
1894 return text, flagutil.processflagsraw(self, text, flags)
1908 return text, flagutil.processflagsraw(self, text, flags)
1895 elif operation == b'read':
1909 elif operation == b'read':
1896 return flagutil.processflagsread(self, text, flags)
1910 return flagutil.processflagsread(self, text, flags)
1897 else: # write operation
1911 else: # write operation
1898 return flagutil.processflagswrite(self, text, flags)
1912 return flagutil.processflagswrite(self, text, flags)
1899
1913
1900 def revision(self, nodeorrev, _df=None, raw=False):
1914 def revision(self, nodeorrev, _df=None, raw=False):
1901 """return an uncompressed revision of a given node or revision
1915 """return an uncompressed revision of a given node or revision
1902 number.
1916 number.
1903
1917
1904 _df - an existing file handle to read from. (internal-only)
1918 _df - an existing file handle to read from. (internal-only)
1905 raw - an optional argument specifying if the revision data is to be
1919 raw - an optional argument specifying if the revision data is to be
1906 treated as raw data when applying flag transforms. 'raw' should be set
1920 treated as raw data when applying flag transforms. 'raw' should be set
1907 to True when generating changegroups or in debug commands.
1921 to True when generating changegroups or in debug commands.
1908 """
1922 """
1909 if raw:
1923 if raw:
1910 msg = (
1924 msg = (
1911 b'revlog.revision(..., raw=True) is deprecated, '
1925 b'revlog.revision(..., raw=True) is deprecated, '
1912 b'use revlog.rawdata(...)'
1926 b'use revlog.rawdata(...)'
1913 )
1927 )
1914 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1928 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1915 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1929 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1916
1930
1917 def sidedata(self, nodeorrev, _df=None):
1931 def sidedata(self, nodeorrev, _df=None):
1918 """a map of extra data related to the changeset but not part of the hash
1932 """a map of extra data related to the changeset but not part of the hash
1919
1933
1920 This function currently return a dictionary. However, more advanced
1934 This function currently return a dictionary. However, more advanced
1921 mapping object will likely be used in the future for a more
1935 mapping object will likely be used in the future for a more
1922 efficient/lazy code.
1936 efficient/lazy code.
1923 """
1937 """
1924 return self._revisiondata(nodeorrev, _df)[1]
1938 return self._revisiondata(nodeorrev, _df)[1]
1925
1939
1926 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1940 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1927 # deal with <nodeorrev> argument type
1941 # deal with <nodeorrev> argument type
1928 if isinstance(nodeorrev, int):
1942 if isinstance(nodeorrev, int):
1929 rev = nodeorrev
1943 rev = nodeorrev
1930 node = self.node(rev)
1944 node = self.node(rev)
1931 else:
1945 else:
1932 node = nodeorrev
1946 node = nodeorrev
1933 rev = None
1947 rev = None
1934
1948
1935 # fast path the special `nullid` rev
1949 # fast path the special `nullid` rev
1936 if node == self.nullid:
1950 if node == self.nullid:
1937 return b"", {}
1951 return b"", {}
1938
1952
1939 # ``rawtext`` is the text as stored inside the revlog. Might be the
1953 # ``rawtext`` is the text as stored inside the revlog. Might be the
1940 # revision or might need to be processed to retrieve the revision.
1954 # revision or might need to be processed to retrieve the revision.
1941 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1955 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1942
1956
1943 if self.hassidedata:
1957 if self.hassidedata:
1944 if rev is None:
1958 if rev is None:
1945 rev = self.rev(node)
1959 rev = self.rev(node)
1946 sidedata = self._sidedata(rev)
1960 sidedata = self._sidedata(rev)
1947 else:
1961 else:
1948 sidedata = {}
1962 sidedata = {}
1949
1963
1950 if raw and validated:
1964 if raw and validated:
1951 # if we don't want to process the raw text and that raw
1965 # if we don't want to process the raw text and that raw
1952 # text is cached, we can exit early.
1966 # text is cached, we can exit early.
1953 return rawtext, sidedata
1967 return rawtext, sidedata
1954 if rev is None:
1968 if rev is None:
1955 rev = self.rev(node)
1969 rev = self.rev(node)
1956 # the revlog's flag for this revision
1970 # the revlog's flag for this revision
1957 # (usually alter its state or content)
1971 # (usually alter its state or content)
1958 flags = self.flags(rev)
1972 flags = self.flags(rev)
1959
1973
1960 if validated and flags == REVIDX_DEFAULT_FLAGS:
1974 if validated and flags == REVIDX_DEFAULT_FLAGS:
1961 # no extra flags set, no flag processor runs, text = rawtext
1975 # no extra flags set, no flag processor runs, text = rawtext
1962 return rawtext, sidedata
1976 return rawtext, sidedata
1963
1977
1964 if raw:
1978 if raw:
1965 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1979 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1966 text = rawtext
1980 text = rawtext
1967 else:
1981 else:
1968 r = flagutil.processflagsread(self, rawtext, flags)
1982 r = flagutil.processflagsread(self, rawtext, flags)
1969 text, validatehash = r
1983 text, validatehash = r
1970 if validatehash:
1984 if validatehash:
1971 self.checkhash(text, node, rev=rev)
1985 self.checkhash(text, node, rev=rev)
1972 if not validated:
1986 if not validated:
1973 self._revisioncache = (node, rev, rawtext)
1987 self._revisioncache = (node, rev, rawtext)
1974
1988
1975 return text, sidedata
1989 return text, sidedata
1976
1990
1977 def _rawtext(self, node, rev, _df=None):
1991 def _rawtext(self, node, rev, _df=None):
1978 """return the possibly unvalidated rawtext for a revision
1992 """return the possibly unvalidated rawtext for a revision
1979
1993
1980 returns (rev, rawtext, validated)
1994 returns (rev, rawtext, validated)
1981 """
1995 """
1982
1996
1983 # revision in the cache (could be useful to apply delta)
1997 # revision in the cache (could be useful to apply delta)
1984 cachedrev = None
1998 cachedrev = None
1985 # An intermediate text to apply deltas to
1999 # An intermediate text to apply deltas to
1986 basetext = None
2000 basetext = None
1987
2001
1988 # Check if we have the entry in cache
2002 # Check if we have the entry in cache
1989 # The cache entry looks like (node, rev, rawtext)
2003 # The cache entry looks like (node, rev, rawtext)
1990 if self._revisioncache:
2004 if self._revisioncache:
1991 if self._revisioncache[0] == node:
2005 if self._revisioncache[0] == node:
1992 return (rev, self._revisioncache[2], True)
2006 return (rev, self._revisioncache[2], True)
1993 cachedrev = self._revisioncache[1]
2007 cachedrev = self._revisioncache[1]
1994
2008
1995 if rev is None:
2009 if rev is None:
1996 rev = self.rev(node)
2010 rev = self.rev(node)
1997
2011
1998 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2012 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1999 if stopped:
2013 if stopped:
2000 basetext = self._revisioncache[2]
2014 basetext = self._revisioncache[2]
2001
2015
2002 # drop cache to save memory, the caller is expected to
2016 # drop cache to save memory, the caller is expected to
2003 # update self._revisioncache after validating the text
2017 # update self._revisioncache after validating the text
2004 self._revisioncache = None
2018 self._revisioncache = None
2005
2019
2006 targetsize = None
2020 targetsize = None
2007 rawsize = self.index[rev][2]
2021 rawsize = self.index[rev][2]
2008 if 0 <= rawsize:
2022 if 0 <= rawsize:
2009 targetsize = 4 * rawsize
2023 targetsize = 4 * rawsize
2010
2024
2011 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2025 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2012 if basetext is None:
2026 if basetext is None:
2013 basetext = bytes(bins[0])
2027 basetext = bytes(bins[0])
2014 bins = bins[1:]
2028 bins = bins[1:]
2015
2029
2016 rawtext = mdiff.patches(basetext, bins)
2030 rawtext = mdiff.patches(basetext, bins)
2017 del basetext # let us have a chance to free memory early
2031 del basetext # let us have a chance to free memory early
2018 return (rev, rawtext, False)
2032 return (rev, rawtext, False)
2019
2033
2020 def _sidedata(self, rev):
2034 def _sidedata(self, rev):
2021 """Return the sidedata for a given revision number."""
2035 """Return the sidedata for a given revision number."""
2022 index_entry = self.index[rev]
2036 index_entry = self.index[rev]
2023 sidedata_offset = index_entry[8]
2037 sidedata_offset = index_entry[8]
2024 sidedata_size = index_entry[9]
2038 sidedata_size = index_entry[9]
2025
2039
2026 if self._inline:
2040 if self._inline:
2027 sidedata_offset += self.index.entry_size * (1 + rev)
2041 sidedata_offset += self.index.entry_size * (1 + rev)
2028 if sidedata_size == 0:
2042 if sidedata_size == 0:
2029 return {}
2043 return {}
2030
2044
2031 segment = self._getsegment(sidedata_offset, sidedata_size)
2045 segment = self._getsegment(sidedata_offset, sidedata_size)
2032 sidedata = sidedatautil.deserialize_sidedata(segment)
2046 sidedata = sidedatautil.deserialize_sidedata(segment)
2033 return sidedata
2047 return sidedata
2034
2048
2035 def rawdata(self, nodeorrev, _df=None):
2049 def rawdata(self, nodeorrev, _df=None):
2036 """return an uncompressed raw data of a given node or revision number.
2050 """return an uncompressed raw data of a given node or revision number.
2037
2051
2038 _df - an existing file handle to read from. (internal-only)
2052 _df - an existing file handle to read from. (internal-only)
2039 """
2053 """
2040 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2054 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2041
2055
2042 def hash(self, text, p1, p2):
2056 def hash(self, text, p1, p2):
2043 """Compute a node hash.
2057 """Compute a node hash.
2044
2058
2045 Available as a function so that subclasses can replace the hash
2059 Available as a function so that subclasses can replace the hash
2046 as needed.
2060 as needed.
2047 """
2061 """
2048 return storageutil.hashrevisionsha1(text, p1, p2)
2062 return storageutil.hashrevisionsha1(text, p1, p2)
2049
2063
2050 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2064 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2051 """Check node hash integrity.
2065 """Check node hash integrity.
2052
2066
2053 Available as a function so that subclasses can extend hash mismatch
2067 Available as a function so that subclasses can extend hash mismatch
2054 behaviors as needed.
2068 behaviors as needed.
2055 """
2069 """
2056 try:
2070 try:
2057 if p1 is None and p2 is None:
2071 if p1 is None and p2 is None:
2058 p1, p2 = self.parents(node)
2072 p1, p2 = self.parents(node)
2059 if node != self.hash(text, p1, p2):
2073 if node != self.hash(text, p1, p2):
2060 # Clear the revision cache on hash failure. The revision cache
2074 # Clear the revision cache on hash failure. The revision cache
2061 # only stores the raw revision and clearing the cache does have
2075 # only stores the raw revision and clearing the cache does have
2062 # the side-effect that we won't have a cache hit when the raw
2076 # the side-effect that we won't have a cache hit when the raw
2063 # revision data is accessed. But this case should be rare and
2077 # revision data is accessed. But this case should be rare and
2064 # it is extra work to teach the cache about the hash
2078 # it is extra work to teach the cache about the hash
2065 # verification state.
2079 # verification state.
2066 if self._revisioncache and self._revisioncache[0] == node:
2080 if self._revisioncache and self._revisioncache[0] == node:
2067 self._revisioncache = None
2081 self._revisioncache = None
2068
2082
2069 revornode = rev
2083 revornode = rev
2070 if revornode is None:
2084 if revornode is None:
2071 revornode = templatefilters.short(hex(node))
2085 revornode = templatefilters.short(hex(node))
2072 raise error.RevlogError(
2086 raise error.RevlogError(
2073 _(b"integrity check failed on %s:%s")
2087 _(b"integrity check failed on %s:%s")
2074 % (self.display_id, pycompat.bytestr(revornode))
2088 % (self.display_id, pycompat.bytestr(revornode))
2075 )
2089 )
2076 except error.RevlogError:
2090 except error.RevlogError:
2077 if self._censorable and storageutil.iscensoredtext(text):
2091 if self._censorable and storageutil.iscensoredtext(text):
2078 raise error.CensoredNodeError(self.display_id, node, text)
2092 raise error.CensoredNodeError(self.display_id, node, text)
2079 raise
2093 raise
2080
2094
2081 def _enforceinlinesize(self, tr):
2095 def _enforceinlinesize(self, tr):
2082 """Check if the revlog is too big for inline and convert if so.
2096 """Check if the revlog is too big for inline and convert if so.
2083
2097
2084 This should be called after revisions are added to the revlog. If the
2098 This should be called after revisions are added to the revlog. If the
2085 revlog has grown too large to be an inline revlog, it will convert it
2099 revlog has grown too large to be an inline revlog, it will convert it
2086 to use multiple index and data files.
2100 to use multiple index and data files.
2087 """
2101 """
2088 tiprev = len(self) - 1
2102 tiprev = len(self) - 1
2089 total_size = self.start(tiprev) + self.length(tiprev)
2103 total_size = self.start(tiprev) + self.length(tiprev)
2090 if not self._inline or total_size < _maxinline:
2104 if not self._inline or total_size < _maxinline:
2091 return
2105 return
2092
2106
2093 troffset = tr.findoffset(self._indexfile)
2107 troffset = tr.findoffset(self._indexfile)
2094 if troffset is None:
2108 if troffset is None:
2095 raise error.RevlogError(
2109 raise error.RevlogError(
2096 _(b"%s not found in the transaction") % self._indexfile
2110 _(b"%s not found in the transaction") % self._indexfile
2097 )
2111 )
2098 trindex = 0
2112 trindex = 0
2099 tr.add(self._datafile, 0)
2113 tr.add(self._datafile, 0)
2100
2114
2101 existing_handles = False
2115 existing_handles = False
2102 if self._writinghandles is not None:
2116 if self._writinghandles is not None:
2103 existing_handles = True
2117 existing_handles = True
2104 fp = self._writinghandles[0]
2118 fp = self._writinghandles[0]
2105 fp.flush()
2119 fp.flush()
2106 fp.close()
2120 fp.close()
2107 # We can't use the cached file handle after close(). So prevent
2121 # We can't use the cached file handle after close(). So prevent
2108 # its usage.
2122 # its usage.
2109 self._writinghandles = None
2123 self._writinghandles = None
2110
2124
2111 new_dfh = self._datafp(b'w+')
2125 new_dfh = self._datafp(b'w+')
2112 new_dfh.truncate(0) # drop any potentially existing data
2126 new_dfh.truncate(0) # drop any potentially existing data
2113 try:
2127 try:
2114 with self._indexfp() as read_ifh:
2128 with self._indexfp() as read_ifh:
2115 for r in self:
2129 for r in self:
2116 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2130 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2117 if troffset <= self.start(r):
2131 if troffset <= self.start(r):
2118 trindex = r
2132 trindex = r
2119 new_dfh.flush()
2133 new_dfh.flush()
2120
2134
2121 with self.__index_new_fp() as fp:
2135 with self.__index_new_fp() as fp:
2122 self._format_flags &= ~FLAG_INLINE_DATA
2136 self._format_flags &= ~FLAG_INLINE_DATA
2123 self._inline = False
2137 self._inline = False
2124 for i in self:
2138 for i in self:
2125 e = self.index.entry_binary(i)
2139 e = self.index.entry_binary(i)
2126 if i == 0 and self._docket is None:
2140 if i == 0 and self._docket is None:
2127 header = self._format_flags | self._format_version
2141 header = self._format_flags | self._format_version
2128 header = self.index.pack_header(header)
2142 header = self.index.pack_header(header)
2129 e = header + e
2143 e = header + e
2130 fp.write(e)
2144 fp.write(e)
2131 if self._docket is not None:
2145 if self._docket is not None:
2132 self._docket.index_end = fp.tell()
2146 self._docket.index_end = fp.tell()
2133 # the temp file replace the real index when we exit the context
2147 # the temp file replace the real index when we exit the context
2134 # manager
2148 # manager
2135
2149
2136 tr.replace(self._indexfile, trindex * self.index.entry_size)
2150 tr.replace(self._indexfile, trindex * self.index.entry_size)
2137 nodemaputil.setup_persistent_nodemap(tr, self)
2151 nodemaputil.setup_persistent_nodemap(tr, self)
2138 self._chunkclear()
2152 self._chunkclear()
2139
2153
2140 if existing_handles:
2154 if existing_handles:
2141 # switched from inline to conventional reopen the index
2155 # switched from inline to conventional reopen the index
2142 ifh = self.__index_write_fp()
2156 ifh = self.__index_write_fp()
2143 self._writinghandles = (ifh, new_dfh)
2157 self._writinghandles = (ifh, new_dfh)
2144 new_dfh = None
2158 new_dfh = None
2145 finally:
2159 finally:
2146 if new_dfh is not None:
2160 if new_dfh is not None:
2147 new_dfh.close()
2161 new_dfh.close()
2148
2162
2149 def _nodeduplicatecallback(self, transaction, node):
2163 def _nodeduplicatecallback(self, transaction, node):
2150 """called when trying to add a node already stored."""
2164 """called when trying to add a node already stored."""
2151
2165
2152 @contextlib.contextmanager
2166 @contextlib.contextmanager
2153 def _writing(self, transaction):
2167 def _writing(self, transaction):
2154 if self._trypending:
2168 if self._trypending:
2155 msg = b'try to write in a `trypending` revlog: %s'
2169 msg = b'try to write in a `trypending` revlog: %s'
2156 msg %= self.display_id
2170 msg %= self.display_id
2157 raise error.ProgrammingError(msg)
2171 raise error.ProgrammingError(msg)
2158 if self._writinghandles is not None:
2172 if self._writinghandles is not None:
2159 yield
2173 yield
2160 else:
2174 else:
2161 r = len(self)
2175 r = len(self)
2162 dsize = 0
2176 dsize = 0
2163 if r:
2177 if r:
2164 dsize = self.end(r - 1)
2178 dsize = self.end(r - 1)
2165 dfh = None
2179 dfh = None
2166 if not self._inline:
2180 if not self._inline:
2167 try:
2181 try:
2168 dfh = self._datafp(b"r+")
2182 dfh = self._datafp(b"r+")
2169 if self._docket is None:
2183 if self._docket is None:
2170 dfh.seek(0, os.SEEK_END)
2184 dfh.seek(0, os.SEEK_END)
2171 else:
2185 else:
2172 dfh.seek(self._docket.data_end, os.SEEK_SET)
2186 dfh.seek(self._docket.data_end, os.SEEK_SET)
2173 except IOError as inst:
2187 except IOError as inst:
2174 if inst.errno != errno.ENOENT:
2188 if inst.errno != errno.ENOENT:
2175 raise
2189 raise
2176 dfh = self._datafp(b"w+")
2190 dfh = self._datafp(b"w+")
2177 transaction.add(self._datafile, dsize)
2191 transaction.add(self._datafile, dsize)
2178 try:
2192 try:
2179 isize = r * self.index.entry_size
2193 isize = r * self.index.entry_size
2180 ifh = self.__index_write_fp()
2194 ifh = self.__index_write_fp()
2181 if self._inline:
2195 if self._inline:
2182 transaction.add(self._indexfile, dsize + isize)
2196 transaction.add(self._indexfile, dsize + isize)
2183 else:
2197 else:
2184 transaction.add(self._indexfile, isize)
2198 transaction.add(self._indexfile, isize)
2185 try:
2199 try:
2186 self._writinghandles = (ifh, dfh)
2200 self._writinghandles = (ifh, dfh)
2187 try:
2201 try:
2188 yield
2202 yield
2189 if self._docket is not None:
2203 if self._docket is not None:
2190 self._write_docket(transaction)
2204 self._write_docket(transaction)
2191 finally:
2205 finally:
2192 self._writinghandles = None
2206 self._writinghandles = None
2193 finally:
2207 finally:
2194 ifh.close()
2208 ifh.close()
2195 finally:
2209 finally:
2196 if dfh is not None:
2210 if dfh is not None:
2197 dfh.close()
2211 dfh.close()
2198
2212
2199 def _write_docket(self, transaction):
2213 def _write_docket(self, transaction):
2200 """write the current docket on disk
2214 """write the current docket on disk
2201
2215
2202 Exist as a method to help changelog to implement transaction logic
2216 Exist as a method to help changelog to implement transaction logic
2203
2217
2204 We could also imagine using the same transaction logic for all revlog
2218 We could also imagine using the same transaction logic for all revlog
2205 since docket are cheap."""
2219 since docket are cheap."""
2206 self._docket.write(transaction)
2220 self._docket.write(transaction)
2207
2221
2208 def addrevision(
2222 def addrevision(
2209 self,
2223 self,
2210 text,
2224 text,
2211 transaction,
2225 transaction,
2212 link,
2226 link,
2213 p1,
2227 p1,
2214 p2,
2228 p2,
2215 cachedelta=None,
2229 cachedelta=None,
2216 node=None,
2230 node=None,
2217 flags=REVIDX_DEFAULT_FLAGS,
2231 flags=REVIDX_DEFAULT_FLAGS,
2218 deltacomputer=None,
2232 deltacomputer=None,
2219 sidedata=None,
2233 sidedata=None,
2220 ):
2234 ):
2221 """add a revision to the log
2235 """add a revision to the log
2222
2236
2223 text - the revision data to add
2237 text - the revision data to add
2224 transaction - the transaction object used for rollback
2238 transaction - the transaction object used for rollback
2225 link - the linkrev data to add
2239 link - the linkrev data to add
2226 p1, p2 - the parent nodeids of the revision
2240 p1, p2 - the parent nodeids of the revision
2227 cachedelta - an optional precomputed delta
2241 cachedelta - an optional precomputed delta
2228 node - nodeid of revision; typically node is not specified, and it is
2242 node - nodeid of revision; typically node is not specified, and it is
2229 computed by default as hash(text, p1, p2), however subclasses might
2243 computed by default as hash(text, p1, p2), however subclasses might
2230 use different hashing method (and override checkhash() in such case)
2244 use different hashing method (and override checkhash() in such case)
2231 flags - the known flags to set on the revision
2245 flags - the known flags to set on the revision
2232 deltacomputer - an optional deltacomputer instance shared between
2246 deltacomputer - an optional deltacomputer instance shared between
2233 multiple calls
2247 multiple calls
2234 """
2248 """
2235 if link == nullrev:
2249 if link == nullrev:
2236 raise error.RevlogError(
2250 raise error.RevlogError(
2237 _(b"attempted to add linkrev -1 to %s") % self.display_id
2251 _(b"attempted to add linkrev -1 to %s") % self.display_id
2238 )
2252 )
2239
2253
2240 if sidedata is None:
2254 if sidedata is None:
2241 sidedata = {}
2255 sidedata = {}
2242 elif sidedata and not self.hassidedata:
2256 elif sidedata and not self.hassidedata:
2243 raise error.ProgrammingError(
2257 raise error.ProgrammingError(
2244 _(b"trying to add sidedata to a revlog who don't support them")
2258 _(b"trying to add sidedata to a revlog who don't support them")
2245 )
2259 )
2246
2260
2247 if flags:
2261 if flags:
2248 node = node or self.hash(text, p1, p2)
2262 node = node or self.hash(text, p1, p2)
2249
2263
2250 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2264 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2251
2265
2252 # If the flag processor modifies the revision data, ignore any provided
2266 # If the flag processor modifies the revision data, ignore any provided
2253 # cachedelta.
2267 # cachedelta.
2254 if rawtext != text:
2268 if rawtext != text:
2255 cachedelta = None
2269 cachedelta = None
2256
2270
2257 if len(rawtext) > _maxentrysize:
2271 if len(rawtext) > _maxentrysize:
2258 raise error.RevlogError(
2272 raise error.RevlogError(
2259 _(
2273 _(
2260 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2274 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2261 )
2275 )
2262 % (self.display_id, len(rawtext))
2276 % (self.display_id, len(rawtext))
2263 )
2277 )
2264
2278
2265 node = node or self.hash(rawtext, p1, p2)
2279 node = node or self.hash(rawtext, p1, p2)
2266 rev = self.index.get_rev(node)
2280 rev = self.index.get_rev(node)
2267 if rev is not None:
2281 if rev is not None:
2268 return rev
2282 return rev
2269
2283
2270 if validatehash:
2284 if validatehash:
2271 self.checkhash(rawtext, node, p1=p1, p2=p2)
2285 self.checkhash(rawtext, node, p1=p1, p2=p2)
2272
2286
2273 return self.addrawrevision(
2287 return self.addrawrevision(
2274 rawtext,
2288 rawtext,
2275 transaction,
2289 transaction,
2276 link,
2290 link,
2277 p1,
2291 p1,
2278 p2,
2292 p2,
2279 node,
2293 node,
2280 flags,
2294 flags,
2281 cachedelta=cachedelta,
2295 cachedelta=cachedelta,
2282 deltacomputer=deltacomputer,
2296 deltacomputer=deltacomputer,
2283 sidedata=sidedata,
2297 sidedata=sidedata,
2284 )
2298 )
2285
2299
2286 def addrawrevision(
2300 def addrawrevision(
2287 self,
2301 self,
2288 rawtext,
2302 rawtext,
2289 transaction,
2303 transaction,
2290 link,
2304 link,
2291 p1,
2305 p1,
2292 p2,
2306 p2,
2293 node,
2307 node,
2294 flags,
2308 flags,
2295 cachedelta=None,
2309 cachedelta=None,
2296 deltacomputer=None,
2310 deltacomputer=None,
2297 sidedata=None,
2311 sidedata=None,
2298 ):
2312 ):
2299 """add a raw revision with known flags, node and parents
2313 """add a raw revision with known flags, node and parents
2300 useful when reusing a revision not stored in this revlog (ex: received
2314 useful when reusing a revision not stored in this revlog (ex: received
2301 over wire, or read from an external bundle).
2315 over wire, or read from an external bundle).
2302 """
2316 """
2303 with self._writing(transaction):
2317 with self._writing(transaction):
2304 return self._addrevision(
2318 return self._addrevision(
2305 node,
2319 node,
2306 rawtext,
2320 rawtext,
2307 transaction,
2321 transaction,
2308 link,
2322 link,
2309 p1,
2323 p1,
2310 p2,
2324 p2,
2311 flags,
2325 flags,
2312 cachedelta,
2326 cachedelta,
2313 deltacomputer=deltacomputer,
2327 deltacomputer=deltacomputer,
2314 sidedata=sidedata,
2328 sidedata=sidedata,
2315 )
2329 )
2316
2330
2317 def compress(self, data):
2331 def compress(self, data):
2318 """Generate a possibly-compressed representation of data."""
2332 """Generate a possibly-compressed representation of data."""
2319 if not data:
2333 if not data:
2320 return b'', data
2334 return b'', data
2321
2335
2322 compressed = self._compressor.compress(data)
2336 compressed = self._compressor.compress(data)
2323
2337
2324 if compressed:
2338 if compressed:
2325 # The revlog compressor added the header in the returned data.
2339 # The revlog compressor added the header in the returned data.
2326 return b'', compressed
2340 return b'', compressed
2327
2341
2328 if data[0:1] == b'\0':
2342 if data[0:1] == b'\0':
2329 return b'', data
2343 return b'', data
2330 return b'u', data
2344 return b'u', data
2331
2345
2332 def decompress(self, data):
2346 def decompress(self, data):
2333 """Decompress a revlog chunk.
2347 """Decompress a revlog chunk.
2334
2348
2335 The chunk is expected to begin with a header identifying the
2349 The chunk is expected to begin with a header identifying the
2336 format type so it can be routed to an appropriate decompressor.
2350 format type so it can be routed to an appropriate decompressor.
2337 """
2351 """
2338 if not data:
2352 if not data:
2339 return data
2353 return data
2340
2354
2341 # Revlogs are read much more frequently than they are written and many
2355 # Revlogs are read much more frequently than they are written and many
2342 # chunks only take microseconds to decompress, so performance is
2356 # chunks only take microseconds to decompress, so performance is
2343 # important here.
2357 # important here.
2344 #
2358 #
2345 # We can make a few assumptions about revlogs:
2359 # We can make a few assumptions about revlogs:
2346 #
2360 #
2347 # 1) the majority of chunks will be compressed (as opposed to inline
2361 # 1) the majority of chunks will be compressed (as opposed to inline
2348 # raw data).
2362 # raw data).
2349 # 2) decompressing *any* data will likely by at least 10x slower than
2363 # 2) decompressing *any* data will likely by at least 10x slower than
2350 # returning raw inline data.
2364 # returning raw inline data.
2351 # 3) we want to prioritize common and officially supported compression
2365 # 3) we want to prioritize common and officially supported compression
2352 # engines
2366 # engines
2353 #
2367 #
2354 # It follows that we want to optimize for "decompress compressed data
2368 # It follows that we want to optimize for "decompress compressed data
2355 # when encoded with common and officially supported compression engines"
2369 # when encoded with common and officially supported compression engines"
2356 # case over "raw data" and "data encoded by less common or non-official
2370 # case over "raw data" and "data encoded by less common or non-official
2357 # compression engines." That is why we have the inline lookup first
2371 # compression engines." That is why we have the inline lookup first
2358 # followed by the compengines lookup.
2372 # followed by the compengines lookup.
2359 #
2373 #
2360 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2374 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2361 # compressed chunks. And this matters for changelog and manifest reads.
2375 # compressed chunks. And this matters for changelog and manifest reads.
2362 t = data[0:1]
2376 t = data[0:1]
2363
2377
2364 if t == b'x':
2378 if t == b'x':
2365 try:
2379 try:
2366 return _zlibdecompress(data)
2380 return _zlibdecompress(data)
2367 except zlib.error as e:
2381 except zlib.error as e:
2368 raise error.RevlogError(
2382 raise error.RevlogError(
2369 _(b'revlog decompress error: %s')
2383 _(b'revlog decompress error: %s')
2370 % stringutil.forcebytestr(e)
2384 % stringutil.forcebytestr(e)
2371 )
2385 )
2372 # '\0' is more common than 'u' so it goes first.
2386 # '\0' is more common than 'u' so it goes first.
2373 elif t == b'\0':
2387 elif t == b'\0':
2374 return data
2388 return data
2375 elif t == b'u':
2389 elif t == b'u':
2376 return util.buffer(data, 1)
2390 return util.buffer(data, 1)
2377
2391
2378 try:
2392 compressor = self._get_decompressor(t)
2379 compressor = self._decompressors[t]
2380 except KeyError:
2381 try:
2382 engine = util.compengines.forrevlogheader(t)
2383 compressor = engine.revlogcompressor(self._compengineopts)
2384 self._decompressors[t] = compressor
2385 except KeyError:
2386 raise error.RevlogError(
2387 _(b'unknown compression type %s') % binascii.hexlify(t)
2388 )
2389
2393
2390 return compressor.decompress(data)
2394 return compressor.decompress(data)
2391
2395
2392 def _addrevision(
2396 def _addrevision(
2393 self,
2397 self,
2394 node,
2398 node,
2395 rawtext,
2399 rawtext,
2396 transaction,
2400 transaction,
2397 link,
2401 link,
2398 p1,
2402 p1,
2399 p2,
2403 p2,
2400 flags,
2404 flags,
2401 cachedelta,
2405 cachedelta,
2402 alwayscache=False,
2406 alwayscache=False,
2403 deltacomputer=None,
2407 deltacomputer=None,
2404 sidedata=None,
2408 sidedata=None,
2405 ):
2409 ):
2406 """internal function to add revisions to the log
2410 """internal function to add revisions to the log
2407
2411
2408 see addrevision for argument descriptions.
2412 see addrevision for argument descriptions.
2409
2413
2410 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2414 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2411
2415
2412 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2416 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2413 be used.
2417 be used.
2414
2418
2415 invariants:
2419 invariants:
2416 - rawtext is optional (can be None); if not set, cachedelta must be set.
2420 - rawtext is optional (can be None); if not set, cachedelta must be set.
2417 if both are set, they must correspond to each other.
2421 if both are set, they must correspond to each other.
2418 """
2422 """
2419 if node == self.nullid:
2423 if node == self.nullid:
2420 raise error.RevlogError(
2424 raise error.RevlogError(
2421 _(b"%s: attempt to add null revision") % self.display_id
2425 _(b"%s: attempt to add null revision") % self.display_id
2422 )
2426 )
2423 if (
2427 if (
2424 node == self.nodeconstants.wdirid
2428 node == self.nodeconstants.wdirid
2425 or node in self.nodeconstants.wdirfilenodeids
2429 or node in self.nodeconstants.wdirfilenodeids
2426 ):
2430 ):
2427 raise error.RevlogError(
2431 raise error.RevlogError(
2428 _(b"%s: attempt to add wdir revision") % self.display_id
2432 _(b"%s: attempt to add wdir revision") % self.display_id
2429 )
2433 )
2430 if self._writinghandles is None:
2434 if self._writinghandles is None:
2431 msg = b'adding revision outside `revlog._writing` context'
2435 msg = b'adding revision outside `revlog._writing` context'
2432 raise error.ProgrammingError(msg)
2436 raise error.ProgrammingError(msg)
2433
2437
2434 if self._inline:
2438 if self._inline:
2435 fh = self._writinghandles[0]
2439 fh = self._writinghandles[0]
2436 else:
2440 else:
2437 fh = self._writinghandles[1]
2441 fh = self._writinghandles[1]
2438
2442
2439 btext = [rawtext]
2443 btext = [rawtext]
2440
2444
2441 curr = len(self)
2445 curr = len(self)
2442 prev = curr - 1
2446 prev = curr - 1
2443
2447
2444 offset = self._get_data_offset(prev)
2448 offset = self._get_data_offset(prev)
2445
2449
2446 if self._concurrencychecker:
2450 if self._concurrencychecker:
2447 ifh, dfh = self._writinghandles
2451 ifh, dfh = self._writinghandles
2448 if self._inline:
2452 if self._inline:
2449 # offset is "as if" it were in the .d file, so we need to add on
2453 # offset is "as if" it were in the .d file, so we need to add on
2450 # the size of the entry metadata.
2454 # the size of the entry metadata.
2451 self._concurrencychecker(
2455 self._concurrencychecker(
2452 ifh, self._indexfile, offset + curr * self.index.entry_size
2456 ifh, self._indexfile, offset + curr * self.index.entry_size
2453 )
2457 )
2454 else:
2458 else:
2455 # Entries in the .i are a consistent size.
2459 # Entries in the .i are a consistent size.
2456 self._concurrencychecker(
2460 self._concurrencychecker(
2457 ifh, self._indexfile, curr * self.index.entry_size
2461 ifh, self._indexfile, curr * self.index.entry_size
2458 )
2462 )
2459 self._concurrencychecker(dfh, self._datafile, offset)
2463 self._concurrencychecker(dfh, self._datafile, offset)
2460
2464
2461 p1r, p2r = self.rev(p1), self.rev(p2)
2465 p1r, p2r = self.rev(p1), self.rev(p2)
2462
2466
2463 # full versions are inserted when the needed deltas
2467 # full versions are inserted when the needed deltas
2464 # become comparable to the uncompressed text
2468 # become comparable to the uncompressed text
2465 if rawtext is None:
2469 if rawtext is None:
2466 # need rawtext size, before changed by flag processors, which is
2470 # need rawtext size, before changed by flag processors, which is
2467 # the non-raw size. use revlog explicitly to avoid filelog's extra
2471 # the non-raw size. use revlog explicitly to avoid filelog's extra
2468 # logic that might remove metadata size.
2472 # logic that might remove metadata size.
2469 textlen = mdiff.patchedsize(
2473 textlen = mdiff.patchedsize(
2470 revlog.size(self, cachedelta[0]), cachedelta[1]
2474 revlog.size(self, cachedelta[0]), cachedelta[1]
2471 )
2475 )
2472 else:
2476 else:
2473 textlen = len(rawtext)
2477 textlen = len(rawtext)
2474
2478
2475 if deltacomputer is None:
2479 if deltacomputer is None:
2476 deltacomputer = deltautil.deltacomputer(self)
2480 deltacomputer = deltautil.deltacomputer(self)
2477
2481
2478 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2482 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2479
2483
2480 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2484 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2481
2485
2482 compression_mode = COMP_MODE_INLINE
2486 compression_mode = COMP_MODE_INLINE
2483 if self._docket is not None:
2487 if self._docket is not None:
2484 h, d = deltainfo.data
2488 h, d = deltainfo.data
2485 if not h and not d:
2489 if not h and not d:
2486 # not data to store at all... declare them uncompressed
2490 # not data to store at all... declare them uncompressed
2487 compression_mode = COMP_MODE_PLAIN
2491 compression_mode = COMP_MODE_PLAIN
2488 elif not h and d[0:1] == b'\0':
2492 elif not h and d[0:1] == b'\0':
2489 compression_mode = COMP_MODE_PLAIN
2493 compression_mode = COMP_MODE_PLAIN
2490 elif h == b'u':
2494 elif h == b'u':
2491 # we have a more efficient way to declare uncompressed
2495 # we have a more efficient way to declare uncompressed
2492 h = b''
2496 h = b''
2493 compression_mode = COMP_MODE_PLAIN
2497 compression_mode = COMP_MODE_PLAIN
2494 deltainfo = deltautil.drop_u_compression(deltainfo)
2498 deltainfo = deltautil.drop_u_compression(deltainfo)
2495
2499
2496 if sidedata and self.hassidedata:
2500 if sidedata and self.hassidedata:
2497 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2501 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2498 sidedata_offset = offset + deltainfo.deltalen
2502 sidedata_offset = offset + deltainfo.deltalen
2499 else:
2503 else:
2500 serialized_sidedata = b""
2504 serialized_sidedata = b""
2501 # Don't store the offset if the sidedata is empty, that way
2505 # Don't store the offset if the sidedata is empty, that way
2502 # we can easily detect empty sidedata and they will be no different
2506 # we can easily detect empty sidedata and they will be no different
2503 # than ones we manually add.
2507 # than ones we manually add.
2504 sidedata_offset = 0
2508 sidedata_offset = 0
2505
2509
2506 e = (
2510 e = (
2507 offset_type(offset, flags),
2511 offset_type(offset, flags),
2508 deltainfo.deltalen,
2512 deltainfo.deltalen,
2509 textlen,
2513 textlen,
2510 deltainfo.base,
2514 deltainfo.base,
2511 link,
2515 link,
2512 p1r,
2516 p1r,
2513 p2r,
2517 p2r,
2514 node,
2518 node,
2515 sidedata_offset,
2519 sidedata_offset,
2516 len(serialized_sidedata),
2520 len(serialized_sidedata),
2517 compression_mode,
2521 compression_mode,
2518 )
2522 )
2519
2523
2520 self.index.append(e)
2524 self.index.append(e)
2521 entry = self.index.entry_binary(curr)
2525 entry = self.index.entry_binary(curr)
2522 if curr == 0 and self._docket is None:
2526 if curr == 0 and self._docket is None:
2523 header = self._format_flags | self._format_version
2527 header = self._format_flags | self._format_version
2524 header = self.index.pack_header(header)
2528 header = self.index.pack_header(header)
2525 entry = header + entry
2529 entry = header + entry
2526 self._writeentry(
2530 self._writeentry(
2527 transaction,
2531 transaction,
2528 entry,
2532 entry,
2529 deltainfo.data,
2533 deltainfo.data,
2530 link,
2534 link,
2531 offset,
2535 offset,
2532 serialized_sidedata,
2536 serialized_sidedata,
2533 )
2537 )
2534
2538
2535 rawtext = btext[0]
2539 rawtext = btext[0]
2536
2540
2537 if alwayscache and rawtext is None:
2541 if alwayscache and rawtext is None:
2538 rawtext = deltacomputer.buildtext(revinfo, fh)
2542 rawtext = deltacomputer.buildtext(revinfo, fh)
2539
2543
2540 if type(rawtext) == bytes: # only accept immutable objects
2544 if type(rawtext) == bytes: # only accept immutable objects
2541 self._revisioncache = (node, curr, rawtext)
2545 self._revisioncache = (node, curr, rawtext)
2542 self._chainbasecache[curr] = deltainfo.chainbase
2546 self._chainbasecache[curr] = deltainfo.chainbase
2543 return curr
2547 return curr
2544
2548
2545 def _get_data_offset(self, prev):
2549 def _get_data_offset(self, prev):
2546 """Returns the current offset in the (in-transaction) data file.
2550 """Returns the current offset in the (in-transaction) data file.
2547 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2551 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2548 file to store that information: since sidedata can be rewritten to the
2552 file to store that information: since sidedata can be rewritten to the
2549 end of the data file within a transaction, you can have cases where, for
2553 end of the data file within a transaction, you can have cases where, for
2550 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2554 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2551 to `n - 1`'s sidedata being written after `n`'s data.
2555 to `n - 1`'s sidedata being written after `n`'s data.
2552
2556
2553 TODO cache this in a docket file before getting out of experimental."""
2557 TODO cache this in a docket file before getting out of experimental."""
2554 if self._docket is None:
2558 if self._docket is None:
2555 return self.end(prev)
2559 return self.end(prev)
2556 else:
2560 else:
2557 return self._docket.data_end
2561 return self._docket.data_end
2558
2562
2559 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2563 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2560 # Files opened in a+ mode have inconsistent behavior on various
2564 # Files opened in a+ mode have inconsistent behavior on various
2561 # platforms. Windows requires that a file positioning call be made
2565 # platforms. Windows requires that a file positioning call be made
2562 # when the file handle transitions between reads and writes. See
2566 # when the file handle transitions between reads and writes. See
2563 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2567 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2564 # platforms, Python or the platform itself can be buggy. Some versions
2568 # platforms, Python or the platform itself can be buggy. Some versions
2565 # of Solaris have been observed to not append at the end of the file
2569 # of Solaris have been observed to not append at the end of the file
2566 # if the file was seeked to before the end. See issue4943 for more.
2570 # if the file was seeked to before the end. See issue4943 for more.
2567 #
2571 #
2568 # We work around this issue by inserting a seek() before writing.
2572 # We work around this issue by inserting a seek() before writing.
2569 # Note: This is likely not necessary on Python 3. However, because
2573 # Note: This is likely not necessary on Python 3. However, because
2570 # the file handle is reused for reads and may be seeked there, we need
2574 # the file handle is reused for reads and may be seeked there, we need
2571 # to be careful before changing this.
2575 # to be careful before changing this.
2572 if self._writinghandles is None:
2576 if self._writinghandles is None:
2573 msg = b'adding revision outside `revlog._writing` context'
2577 msg = b'adding revision outside `revlog._writing` context'
2574 raise error.ProgrammingError(msg)
2578 raise error.ProgrammingError(msg)
2575 ifh, dfh = self._writinghandles
2579 ifh, dfh = self._writinghandles
2576 if self._docket is None:
2580 if self._docket is None:
2577 ifh.seek(0, os.SEEK_END)
2581 ifh.seek(0, os.SEEK_END)
2578 else:
2582 else:
2579 ifh.seek(self._docket.index_end, os.SEEK_SET)
2583 ifh.seek(self._docket.index_end, os.SEEK_SET)
2580 if dfh:
2584 if dfh:
2581 if self._docket is None:
2585 if self._docket is None:
2582 dfh.seek(0, os.SEEK_END)
2586 dfh.seek(0, os.SEEK_END)
2583 else:
2587 else:
2584 dfh.seek(self._docket.data_end, os.SEEK_SET)
2588 dfh.seek(self._docket.data_end, os.SEEK_SET)
2585
2589
2586 curr = len(self) - 1
2590 curr = len(self) - 1
2587 if not self._inline:
2591 if not self._inline:
2588 transaction.add(self._datafile, offset)
2592 transaction.add(self._datafile, offset)
2589 transaction.add(self._indexfile, curr * len(entry))
2593 transaction.add(self._indexfile, curr * len(entry))
2590 if data[0]:
2594 if data[0]:
2591 dfh.write(data[0])
2595 dfh.write(data[0])
2592 dfh.write(data[1])
2596 dfh.write(data[1])
2593 if sidedata:
2597 if sidedata:
2594 dfh.write(sidedata)
2598 dfh.write(sidedata)
2595 ifh.write(entry)
2599 ifh.write(entry)
2596 else:
2600 else:
2597 offset += curr * self.index.entry_size
2601 offset += curr * self.index.entry_size
2598 transaction.add(self._indexfile, offset)
2602 transaction.add(self._indexfile, offset)
2599 ifh.write(entry)
2603 ifh.write(entry)
2600 ifh.write(data[0])
2604 ifh.write(data[0])
2601 ifh.write(data[1])
2605 ifh.write(data[1])
2602 if sidedata:
2606 if sidedata:
2603 ifh.write(sidedata)
2607 ifh.write(sidedata)
2604 self._enforceinlinesize(transaction)
2608 self._enforceinlinesize(transaction)
2605 if self._docket is not None:
2609 if self._docket is not None:
2606 self._docket.index_end = self._writinghandles[0].tell()
2610 self._docket.index_end = self._writinghandles[0].tell()
2607 self._docket.data_end = self._writinghandles[1].tell()
2611 self._docket.data_end = self._writinghandles[1].tell()
2608
2612
2609 nodemaputil.setup_persistent_nodemap(transaction, self)
2613 nodemaputil.setup_persistent_nodemap(transaction, self)
2610
2614
2611 def addgroup(
2615 def addgroup(
2612 self,
2616 self,
2613 deltas,
2617 deltas,
2614 linkmapper,
2618 linkmapper,
2615 transaction,
2619 transaction,
2616 alwayscache=False,
2620 alwayscache=False,
2617 addrevisioncb=None,
2621 addrevisioncb=None,
2618 duplicaterevisioncb=None,
2622 duplicaterevisioncb=None,
2619 ):
2623 ):
2620 """
2624 """
2621 add a delta group
2625 add a delta group
2622
2626
2623 given a set of deltas, add them to the revision log. the
2627 given a set of deltas, add them to the revision log. the
2624 first delta is against its parent, which should be in our
2628 first delta is against its parent, which should be in our
2625 log, the rest are against the previous delta.
2629 log, the rest are against the previous delta.
2626
2630
2627 If ``addrevisioncb`` is defined, it will be called with arguments of
2631 If ``addrevisioncb`` is defined, it will be called with arguments of
2628 this revlog and the node that was added.
2632 this revlog and the node that was added.
2629 """
2633 """
2630
2634
2631 if self._adding_group:
2635 if self._adding_group:
2632 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2636 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2633
2637
2634 self._adding_group = True
2638 self._adding_group = True
2635 empty = True
2639 empty = True
2636 try:
2640 try:
2637 with self._writing(transaction):
2641 with self._writing(transaction):
2638 deltacomputer = deltautil.deltacomputer(self)
2642 deltacomputer = deltautil.deltacomputer(self)
2639 # loop through our set of deltas
2643 # loop through our set of deltas
2640 for data in deltas:
2644 for data in deltas:
2641 (
2645 (
2642 node,
2646 node,
2643 p1,
2647 p1,
2644 p2,
2648 p2,
2645 linknode,
2649 linknode,
2646 deltabase,
2650 deltabase,
2647 delta,
2651 delta,
2648 flags,
2652 flags,
2649 sidedata,
2653 sidedata,
2650 ) = data
2654 ) = data
2651 link = linkmapper(linknode)
2655 link = linkmapper(linknode)
2652 flags = flags or REVIDX_DEFAULT_FLAGS
2656 flags = flags or REVIDX_DEFAULT_FLAGS
2653
2657
2654 rev = self.index.get_rev(node)
2658 rev = self.index.get_rev(node)
2655 if rev is not None:
2659 if rev is not None:
2656 # this can happen if two branches make the same change
2660 # this can happen if two branches make the same change
2657 self._nodeduplicatecallback(transaction, rev)
2661 self._nodeduplicatecallback(transaction, rev)
2658 if duplicaterevisioncb:
2662 if duplicaterevisioncb:
2659 duplicaterevisioncb(self, rev)
2663 duplicaterevisioncb(self, rev)
2660 empty = False
2664 empty = False
2661 continue
2665 continue
2662
2666
2663 for p in (p1, p2):
2667 for p in (p1, p2):
2664 if not self.index.has_node(p):
2668 if not self.index.has_node(p):
2665 raise error.LookupError(
2669 raise error.LookupError(
2666 p, self.radix, _(b'unknown parent')
2670 p, self.radix, _(b'unknown parent')
2667 )
2671 )
2668
2672
2669 if not self.index.has_node(deltabase):
2673 if not self.index.has_node(deltabase):
2670 raise error.LookupError(
2674 raise error.LookupError(
2671 deltabase, self.display_id, _(b'unknown delta base')
2675 deltabase, self.display_id, _(b'unknown delta base')
2672 )
2676 )
2673
2677
2674 baserev = self.rev(deltabase)
2678 baserev = self.rev(deltabase)
2675
2679
2676 if baserev != nullrev and self.iscensored(baserev):
2680 if baserev != nullrev and self.iscensored(baserev):
2677 # if base is censored, delta must be full replacement in a
2681 # if base is censored, delta must be full replacement in a
2678 # single patch operation
2682 # single patch operation
2679 hlen = struct.calcsize(b">lll")
2683 hlen = struct.calcsize(b">lll")
2680 oldlen = self.rawsize(baserev)
2684 oldlen = self.rawsize(baserev)
2681 newlen = len(delta) - hlen
2685 newlen = len(delta) - hlen
2682 if delta[:hlen] != mdiff.replacediffheader(
2686 if delta[:hlen] != mdiff.replacediffheader(
2683 oldlen, newlen
2687 oldlen, newlen
2684 ):
2688 ):
2685 raise error.CensoredBaseError(
2689 raise error.CensoredBaseError(
2686 self.display_id, self.node(baserev)
2690 self.display_id, self.node(baserev)
2687 )
2691 )
2688
2692
2689 if not flags and self._peek_iscensored(baserev, delta):
2693 if not flags and self._peek_iscensored(baserev, delta):
2690 flags |= REVIDX_ISCENSORED
2694 flags |= REVIDX_ISCENSORED
2691
2695
2692 # We assume consumers of addrevisioncb will want to retrieve
2696 # We assume consumers of addrevisioncb will want to retrieve
2693 # the added revision, which will require a call to
2697 # the added revision, which will require a call to
2694 # revision(). revision() will fast path if there is a cache
2698 # revision(). revision() will fast path if there is a cache
2695 # hit. So, we tell _addrevision() to always cache in this case.
2699 # hit. So, we tell _addrevision() to always cache in this case.
2696 # We're only using addgroup() in the context of changegroup
2700 # We're only using addgroup() in the context of changegroup
2697 # generation so the revision data can always be handled as raw
2701 # generation so the revision data can always be handled as raw
2698 # by the flagprocessor.
2702 # by the flagprocessor.
2699 rev = self._addrevision(
2703 rev = self._addrevision(
2700 node,
2704 node,
2701 None,
2705 None,
2702 transaction,
2706 transaction,
2703 link,
2707 link,
2704 p1,
2708 p1,
2705 p2,
2709 p2,
2706 flags,
2710 flags,
2707 (baserev, delta),
2711 (baserev, delta),
2708 alwayscache=alwayscache,
2712 alwayscache=alwayscache,
2709 deltacomputer=deltacomputer,
2713 deltacomputer=deltacomputer,
2710 sidedata=sidedata,
2714 sidedata=sidedata,
2711 )
2715 )
2712
2716
2713 if addrevisioncb:
2717 if addrevisioncb:
2714 addrevisioncb(self, rev)
2718 addrevisioncb(self, rev)
2715 empty = False
2719 empty = False
2716 finally:
2720 finally:
2717 self._adding_group = False
2721 self._adding_group = False
2718 return not empty
2722 return not empty
2719
2723
2720 def iscensored(self, rev):
2724 def iscensored(self, rev):
2721 """Check if a file revision is censored."""
2725 """Check if a file revision is censored."""
2722 if not self._censorable:
2726 if not self._censorable:
2723 return False
2727 return False
2724
2728
2725 return self.flags(rev) & REVIDX_ISCENSORED
2729 return self.flags(rev) & REVIDX_ISCENSORED
2726
2730
2727 def _peek_iscensored(self, baserev, delta):
2731 def _peek_iscensored(self, baserev, delta):
2728 """Quickly check if a delta produces a censored revision."""
2732 """Quickly check if a delta produces a censored revision."""
2729 if not self._censorable:
2733 if not self._censorable:
2730 return False
2734 return False
2731
2735
2732 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2736 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2733
2737
2734 def getstrippoint(self, minlink):
2738 def getstrippoint(self, minlink):
2735 """find the minimum rev that must be stripped to strip the linkrev
2739 """find the minimum rev that must be stripped to strip the linkrev
2736
2740
2737 Returns a tuple containing the minimum rev and a set of all revs that
2741 Returns a tuple containing the minimum rev and a set of all revs that
2738 have linkrevs that will be broken by this strip.
2742 have linkrevs that will be broken by this strip.
2739 """
2743 """
2740 return storageutil.resolvestripinfo(
2744 return storageutil.resolvestripinfo(
2741 minlink,
2745 minlink,
2742 len(self) - 1,
2746 len(self) - 1,
2743 self.headrevs(),
2747 self.headrevs(),
2744 self.linkrev,
2748 self.linkrev,
2745 self.parentrevs,
2749 self.parentrevs,
2746 )
2750 )
2747
2751
2748 def strip(self, minlink, transaction):
2752 def strip(self, minlink, transaction):
2749 """truncate the revlog on the first revision with a linkrev >= minlink
2753 """truncate the revlog on the first revision with a linkrev >= minlink
2750
2754
2751 This function is called when we're stripping revision minlink and
2755 This function is called when we're stripping revision minlink and
2752 its descendants from the repository.
2756 its descendants from the repository.
2753
2757
2754 We have to remove all revisions with linkrev >= minlink, because
2758 We have to remove all revisions with linkrev >= minlink, because
2755 the equivalent changelog revisions will be renumbered after the
2759 the equivalent changelog revisions will be renumbered after the
2756 strip.
2760 strip.
2757
2761
2758 So we truncate the revlog on the first of these revisions, and
2762 So we truncate the revlog on the first of these revisions, and
2759 trust that the caller has saved the revisions that shouldn't be
2763 trust that the caller has saved the revisions that shouldn't be
2760 removed and that it'll re-add them after this truncation.
2764 removed and that it'll re-add them after this truncation.
2761 """
2765 """
2762 if len(self) == 0:
2766 if len(self) == 0:
2763 return
2767 return
2764
2768
2765 rev, _ = self.getstrippoint(minlink)
2769 rev, _ = self.getstrippoint(minlink)
2766 if rev == len(self):
2770 if rev == len(self):
2767 return
2771 return
2768
2772
2769 # first truncate the files on disk
2773 # first truncate the files on disk
2770 data_end = self.start(rev)
2774 data_end = self.start(rev)
2771 if not self._inline:
2775 if not self._inline:
2772 transaction.add(self._datafile, data_end)
2776 transaction.add(self._datafile, data_end)
2773 end = rev * self.index.entry_size
2777 end = rev * self.index.entry_size
2774 else:
2778 else:
2775 end = data_end + (rev * self.index.entry_size)
2779 end = data_end + (rev * self.index.entry_size)
2776
2780
2777 transaction.add(self._indexfile, end)
2781 transaction.add(self._indexfile, end)
2778 if self._docket is not None:
2782 if self._docket is not None:
2779 # XXX we could, leverage the docket while stripping. However it is
2783 # XXX we could, leverage the docket while stripping. However it is
2780 # not powerfull enough at the time of this comment
2784 # not powerfull enough at the time of this comment
2781 self._docket.index_end = end
2785 self._docket.index_end = end
2782 self._docket.data_end = data_end
2786 self._docket.data_end = data_end
2783 self._docket.write(transaction, stripping=True)
2787 self._docket.write(transaction, stripping=True)
2784
2788
2785 # then reset internal state in memory to forget those revisions
2789 # then reset internal state in memory to forget those revisions
2786 self._revisioncache = None
2790 self._revisioncache = None
2787 self._chaininfocache = util.lrucachedict(500)
2791 self._chaininfocache = util.lrucachedict(500)
2788 self._chunkclear()
2792 self._chunkclear()
2789
2793
2790 del self.index[rev:-1]
2794 del self.index[rev:-1]
2791
2795
2792 def checksize(self):
2796 def checksize(self):
2793 """Check size of index and data files
2797 """Check size of index and data files
2794
2798
2795 return a (dd, di) tuple.
2799 return a (dd, di) tuple.
2796 - dd: extra bytes for the "data" file
2800 - dd: extra bytes for the "data" file
2797 - di: extra bytes for the "index" file
2801 - di: extra bytes for the "index" file
2798
2802
2799 A healthy revlog will return (0, 0).
2803 A healthy revlog will return (0, 0).
2800 """
2804 """
2801 expected = 0
2805 expected = 0
2802 if len(self):
2806 if len(self):
2803 expected = max(0, self.end(len(self) - 1))
2807 expected = max(0, self.end(len(self) - 1))
2804
2808
2805 try:
2809 try:
2806 with self._datafp() as f:
2810 with self._datafp() as f:
2807 f.seek(0, io.SEEK_END)
2811 f.seek(0, io.SEEK_END)
2808 actual = f.tell()
2812 actual = f.tell()
2809 dd = actual - expected
2813 dd = actual - expected
2810 except IOError as inst:
2814 except IOError as inst:
2811 if inst.errno != errno.ENOENT:
2815 if inst.errno != errno.ENOENT:
2812 raise
2816 raise
2813 dd = 0
2817 dd = 0
2814
2818
2815 try:
2819 try:
2816 f = self.opener(self._indexfile)
2820 f = self.opener(self._indexfile)
2817 f.seek(0, io.SEEK_END)
2821 f.seek(0, io.SEEK_END)
2818 actual = f.tell()
2822 actual = f.tell()
2819 f.close()
2823 f.close()
2820 s = self.index.entry_size
2824 s = self.index.entry_size
2821 i = max(0, actual // s)
2825 i = max(0, actual // s)
2822 di = actual - (i * s)
2826 di = actual - (i * s)
2823 if self._inline:
2827 if self._inline:
2824 databytes = 0
2828 databytes = 0
2825 for r in self:
2829 for r in self:
2826 databytes += max(0, self.length(r))
2830 databytes += max(0, self.length(r))
2827 dd = 0
2831 dd = 0
2828 di = actual - len(self) * s - databytes
2832 di = actual - len(self) * s - databytes
2829 except IOError as inst:
2833 except IOError as inst:
2830 if inst.errno != errno.ENOENT:
2834 if inst.errno != errno.ENOENT:
2831 raise
2835 raise
2832 di = 0
2836 di = 0
2833
2837
2834 return (dd, di)
2838 return (dd, di)
2835
2839
2836 def files(self):
2840 def files(self):
2837 res = [self._indexfile]
2841 res = [self._indexfile]
2838 if not self._inline:
2842 if not self._inline:
2839 res.append(self._datafile)
2843 res.append(self._datafile)
2840 return res
2844 return res
2841
2845
2842 def emitrevisions(
2846 def emitrevisions(
2843 self,
2847 self,
2844 nodes,
2848 nodes,
2845 nodesorder=None,
2849 nodesorder=None,
2846 revisiondata=False,
2850 revisiondata=False,
2847 assumehaveparentrevisions=False,
2851 assumehaveparentrevisions=False,
2848 deltamode=repository.CG_DELTAMODE_STD,
2852 deltamode=repository.CG_DELTAMODE_STD,
2849 sidedata_helpers=None,
2853 sidedata_helpers=None,
2850 ):
2854 ):
2851 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2855 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2852 raise error.ProgrammingError(
2856 raise error.ProgrammingError(
2853 b'unhandled value for nodesorder: %s' % nodesorder
2857 b'unhandled value for nodesorder: %s' % nodesorder
2854 )
2858 )
2855
2859
2856 if nodesorder is None and not self._generaldelta:
2860 if nodesorder is None and not self._generaldelta:
2857 nodesorder = b'storage'
2861 nodesorder = b'storage'
2858
2862
2859 if (
2863 if (
2860 not self._storedeltachains
2864 not self._storedeltachains
2861 and deltamode != repository.CG_DELTAMODE_PREV
2865 and deltamode != repository.CG_DELTAMODE_PREV
2862 ):
2866 ):
2863 deltamode = repository.CG_DELTAMODE_FULL
2867 deltamode = repository.CG_DELTAMODE_FULL
2864
2868
2865 return storageutil.emitrevisions(
2869 return storageutil.emitrevisions(
2866 self,
2870 self,
2867 nodes,
2871 nodes,
2868 nodesorder,
2872 nodesorder,
2869 revlogrevisiondelta,
2873 revlogrevisiondelta,
2870 deltaparentfn=self.deltaparent,
2874 deltaparentfn=self.deltaparent,
2871 candeltafn=self.candelta,
2875 candeltafn=self.candelta,
2872 rawsizefn=self.rawsize,
2876 rawsizefn=self.rawsize,
2873 revdifffn=self.revdiff,
2877 revdifffn=self.revdiff,
2874 flagsfn=self.flags,
2878 flagsfn=self.flags,
2875 deltamode=deltamode,
2879 deltamode=deltamode,
2876 revisiondata=revisiondata,
2880 revisiondata=revisiondata,
2877 assumehaveparentrevisions=assumehaveparentrevisions,
2881 assumehaveparentrevisions=assumehaveparentrevisions,
2878 sidedata_helpers=sidedata_helpers,
2882 sidedata_helpers=sidedata_helpers,
2879 )
2883 )
2880
2884
2881 DELTAREUSEALWAYS = b'always'
2885 DELTAREUSEALWAYS = b'always'
2882 DELTAREUSESAMEREVS = b'samerevs'
2886 DELTAREUSESAMEREVS = b'samerevs'
2883 DELTAREUSENEVER = b'never'
2887 DELTAREUSENEVER = b'never'
2884
2888
2885 DELTAREUSEFULLADD = b'fulladd'
2889 DELTAREUSEFULLADD = b'fulladd'
2886
2890
2887 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2891 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2888
2892
2889 def clone(
2893 def clone(
2890 self,
2894 self,
2891 tr,
2895 tr,
2892 destrevlog,
2896 destrevlog,
2893 addrevisioncb=None,
2897 addrevisioncb=None,
2894 deltareuse=DELTAREUSESAMEREVS,
2898 deltareuse=DELTAREUSESAMEREVS,
2895 forcedeltabothparents=None,
2899 forcedeltabothparents=None,
2896 sidedata_helpers=None,
2900 sidedata_helpers=None,
2897 ):
2901 ):
2898 """Copy this revlog to another, possibly with format changes.
2902 """Copy this revlog to another, possibly with format changes.
2899
2903
2900 The destination revlog will contain the same revisions and nodes.
2904 The destination revlog will contain the same revisions and nodes.
2901 However, it may not be bit-for-bit identical due to e.g. delta encoding
2905 However, it may not be bit-for-bit identical due to e.g. delta encoding
2902 differences.
2906 differences.
2903
2907
2904 The ``deltareuse`` argument control how deltas from the existing revlog
2908 The ``deltareuse`` argument control how deltas from the existing revlog
2905 are preserved in the destination revlog. The argument can have the
2909 are preserved in the destination revlog. The argument can have the
2906 following values:
2910 following values:
2907
2911
2908 DELTAREUSEALWAYS
2912 DELTAREUSEALWAYS
2909 Deltas will always be reused (if possible), even if the destination
2913 Deltas will always be reused (if possible), even if the destination
2910 revlog would not select the same revisions for the delta. This is the
2914 revlog would not select the same revisions for the delta. This is the
2911 fastest mode of operation.
2915 fastest mode of operation.
2912 DELTAREUSESAMEREVS
2916 DELTAREUSESAMEREVS
2913 Deltas will be reused if the destination revlog would pick the same
2917 Deltas will be reused if the destination revlog would pick the same
2914 revisions for the delta. This mode strikes a balance between speed
2918 revisions for the delta. This mode strikes a balance between speed
2915 and optimization.
2919 and optimization.
2916 DELTAREUSENEVER
2920 DELTAREUSENEVER
2917 Deltas will never be reused. This is the slowest mode of execution.
2921 Deltas will never be reused. This is the slowest mode of execution.
2918 This mode can be used to recompute deltas (e.g. if the diff/delta
2922 This mode can be used to recompute deltas (e.g. if the diff/delta
2919 algorithm changes).
2923 algorithm changes).
2920 DELTAREUSEFULLADD
2924 DELTAREUSEFULLADD
2921 Revision will be re-added as if their were new content. This is
2925 Revision will be re-added as if their were new content. This is
2922 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2926 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2923 eg: large file detection and handling.
2927 eg: large file detection and handling.
2924
2928
2925 Delta computation can be slow, so the choice of delta reuse policy can
2929 Delta computation can be slow, so the choice of delta reuse policy can
2926 significantly affect run time.
2930 significantly affect run time.
2927
2931
2928 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2932 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2929 two extremes. Deltas will be reused if they are appropriate. But if the
2933 two extremes. Deltas will be reused if they are appropriate. But if the
2930 delta could choose a better revision, it will do so. This means if you
2934 delta could choose a better revision, it will do so. This means if you
2931 are converting a non-generaldelta revlog to a generaldelta revlog,
2935 are converting a non-generaldelta revlog to a generaldelta revlog,
2932 deltas will be recomputed if the delta's parent isn't a parent of the
2936 deltas will be recomputed if the delta's parent isn't a parent of the
2933 revision.
2937 revision.
2934
2938
2935 In addition to the delta policy, the ``forcedeltabothparents``
2939 In addition to the delta policy, the ``forcedeltabothparents``
2936 argument controls whether to force compute deltas against both parents
2940 argument controls whether to force compute deltas against both parents
2937 for merges. By default, the current default is used.
2941 for merges. By default, the current default is used.
2938
2942
2939 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2943 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2940 `sidedata_helpers`.
2944 `sidedata_helpers`.
2941 """
2945 """
2942 if deltareuse not in self.DELTAREUSEALL:
2946 if deltareuse not in self.DELTAREUSEALL:
2943 raise ValueError(
2947 raise ValueError(
2944 _(b'value for deltareuse invalid: %s') % deltareuse
2948 _(b'value for deltareuse invalid: %s') % deltareuse
2945 )
2949 )
2946
2950
2947 if len(destrevlog):
2951 if len(destrevlog):
2948 raise ValueError(_(b'destination revlog is not empty'))
2952 raise ValueError(_(b'destination revlog is not empty'))
2949
2953
2950 if getattr(self, 'filteredrevs', None):
2954 if getattr(self, 'filteredrevs', None):
2951 raise ValueError(_(b'source revlog has filtered revisions'))
2955 raise ValueError(_(b'source revlog has filtered revisions'))
2952 if getattr(destrevlog, 'filteredrevs', None):
2956 if getattr(destrevlog, 'filteredrevs', None):
2953 raise ValueError(_(b'destination revlog has filtered revisions'))
2957 raise ValueError(_(b'destination revlog has filtered revisions'))
2954
2958
2955 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2959 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2956 # if possible.
2960 # if possible.
2957 oldlazydelta = destrevlog._lazydelta
2961 oldlazydelta = destrevlog._lazydelta
2958 oldlazydeltabase = destrevlog._lazydeltabase
2962 oldlazydeltabase = destrevlog._lazydeltabase
2959 oldamd = destrevlog._deltabothparents
2963 oldamd = destrevlog._deltabothparents
2960
2964
2961 try:
2965 try:
2962 if deltareuse == self.DELTAREUSEALWAYS:
2966 if deltareuse == self.DELTAREUSEALWAYS:
2963 destrevlog._lazydeltabase = True
2967 destrevlog._lazydeltabase = True
2964 destrevlog._lazydelta = True
2968 destrevlog._lazydelta = True
2965 elif deltareuse == self.DELTAREUSESAMEREVS:
2969 elif deltareuse == self.DELTAREUSESAMEREVS:
2966 destrevlog._lazydeltabase = False
2970 destrevlog._lazydeltabase = False
2967 destrevlog._lazydelta = True
2971 destrevlog._lazydelta = True
2968 elif deltareuse == self.DELTAREUSENEVER:
2972 elif deltareuse == self.DELTAREUSENEVER:
2969 destrevlog._lazydeltabase = False
2973 destrevlog._lazydeltabase = False
2970 destrevlog._lazydelta = False
2974 destrevlog._lazydelta = False
2971
2975
2972 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2976 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2973
2977
2974 self._clone(
2978 self._clone(
2975 tr,
2979 tr,
2976 destrevlog,
2980 destrevlog,
2977 addrevisioncb,
2981 addrevisioncb,
2978 deltareuse,
2982 deltareuse,
2979 forcedeltabothparents,
2983 forcedeltabothparents,
2980 sidedata_helpers,
2984 sidedata_helpers,
2981 )
2985 )
2982
2986
2983 finally:
2987 finally:
2984 destrevlog._lazydelta = oldlazydelta
2988 destrevlog._lazydelta = oldlazydelta
2985 destrevlog._lazydeltabase = oldlazydeltabase
2989 destrevlog._lazydeltabase = oldlazydeltabase
2986 destrevlog._deltabothparents = oldamd
2990 destrevlog._deltabothparents = oldamd
2987
2991
2988 def _clone(
2992 def _clone(
2989 self,
2993 self,
2990 tr,
2994 tr,
2991 destrevlog,
2995 destrevlog,
2992 addrevisioncb,
2996 addrevisioncb,
2993 deltareuse,
2997 deltareuse,
2994 forcedeltabothparents,
2998 forcedeltabothparents,
2995 sidedata_helpers,
2999 sidedata_helpers,
2996 ):
3000 ):
2997 """perform the core duty of `revlog.clone` after parameter processing"""
3001 """perform the core duty of `revlog.clone` after parameter processing"""
2998 deltacomputer = deltautil.deltacomputer(destrevlog)
3002 deltacomputer = deltautil.deltacomputer(destrevlog)
2999 index = self.index
3003 index = self.index
3000 for rev in self:
3004 for rev in self:
3001 entry = index[rev]
3005 entry = index[rev]
3002
3006
3003 # Some classes override linkrev to take filtered revs into
3007 # Some classes override linkrev to take filtered revs into
3004 # account. Use raw entry from index.
3008 # account. Use raw entry from index.
3005 flags = entry[0] & 0xFFFF
3009 flags = entry[0] & 0xFFFF
3006 linkrev = entry[4]
3010 linkrev = entry[4]
3007 p1 = index[entry[5]][7]
3011 p1 = index[entry[5]][7]
3008 p2 = index[entry[6]][7]
3012 p2 = index[entry[6]][7]
3009 node = entry[7]
3013 node = entry[7]
3010
3014
3011 # (Possibly) reuse the delta from the revlog if allowed and
3015 # (Possibly) reuse the delta from the revlog if allowed and
3012 # the revlog chunk is a delta.
3016 # the revlog chunk is a delta.
3013 cachedelta = None
3017 cachedelta = None
3014 rawtext = None
3018 rawtext = None
3015 if deltareuse == self.DELTAREUSEFULLADD:
3019 if deltareuse == self.DELTAREUSEFULLADD:
3016 text, sidedata = self._revisiondata(rev)
3020 text, sidedata = self._revisiondata(rev)
3017
3021
3018 if sidedata_helpers is not None:
3022 if sidedata_helpers is not None:
3019 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3023 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3020 self, sidedata_helpers, sidedata, rev
3024 self, sidedata_helpers, sidedata, rev
3021 )
3025 )
3022 flags = flags | new_flags[0] & ~new_flags[1]
3026 flags = flags | new_flags[0] & ~new_flags[1]
3023
3027
3024 destrevlog.addrevision(
3028 destrevlog.addrevision(
3025 text,
3029 text,
3026 tr,
3030 tr,
3027 linkrev,
3031 linkrev,
3028 p1,
3032 p1,
3029 p2,
3033 p2,
3030 cachedelta=cachedelta,
3034 cachedelta=cachedelta,
3031 node=node,
3035 node=node,
3032 flags=flags,
3036 flags=flags,
3033 deltacomputer=deltacomputer,
3037 deltacomputer=deltacomputer,
3034 sidedata=sidedata,
3038 sidedata=sidedata,
3035 )
3039 )
3036 else:
3040 else:
3037 if destrevlog._lazydelta:
3041 if destrevlog._lazydelta:
3038 dp = self.deltaparent(rev)
3042 dp = self.deltaparent(rev)
3039 if dp != nullrev:
3043 if dp != nullrev:
3040 cachedelta = (dp, bytes(self._chunk(rev)))
3044 cachedelta = (dp, bytes(self._chunk(rev)))
3041
3045
3042 sidedata = None
3046 sidedata = None
3043 if not cachedelta:
3047 if not cachedelta:
3044 rawtext, sidedata = self._revisiondata(rev)
3048 rawtext, sidedata = self._revisiondata(rev)
3045 if sidedata is None:
3049 if sidedata is None:
3046 sidedata = self.sidedata(rev)
3050 sidedata = self.sidedata(rev)
3047
3051
3048 if sidedata_helpers is not None:
3052 if sidedata_helpers is not None:
3049 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3053 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3050 self, sidedata_helpers, sidedata, rev
3054 self, sidedata_helpers, sidedata, rev
3051 )
3055 )
3052 flags = flags | new_flags[0] & ~new_flags[1]
3056 flags = flags | new_flags[0] & ~new_flags[1]
3053
3057
3054 with destrevlog._writing(tr):
3058 with destrevlog._writing(tr):
3055 destrevlog._addrevision(
3059 destrevlog._addrevision(
3056 node,
3060 node,
3057 rawtext,
3061 rawtext,
3058 tr,
3062 tr,
3059 linkrev,
3063 linkrev,
3060 p1,
3064 p1,
3061 p2,
3065 p2,
3062 flags,
3066 flags,
3063 cachedelta,
3067 cachedelta,
3064 deltacomputer=deltacomputer,
3068 deltacomputer=deltacomputer,
3065 sidedata=sidedata,
3069 sidedata=sidedata,
3066 )
3070 )
3067
3071
3068 if addrevisioncb:
3072 if addrevisioncb:
3069 addrevisioncb(self, rev, node)
3073 addrevisioncb(self, rev, node)
3070
3074
3071 def censorrevision(self, tr, censornode, tombstone=b''):
3075 def censorrevision(self, tr, censornode, tombstone=b''):
3072 if self._format_version == REVLOGV0:
3076 if self._format_version == REVLOGV0:
3073 raise error.RevlogError(
3077 raise error.RevlogError(
3074 _(b'cannot censor with version %d revlogs')
3078 _(b'cannot censor with version %d revlogs')
3075 % self._format_version
3079 % self._format_version
3076 )
3080 )
3077
3081
3078 censorrev = self.rev(censornode)
3082 censorrev = self.rev(censornode)
3079 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3083 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3080
3084
3081 if len(tombstone) > self.rawsize(censorrev):
3085 if len(tombstone) > self.rawsize(censorrev):
3082 raise error.Abort(
3086 raise error.Abort(
3083 _(b'censor tombstone must be no longer than censored data')
3087 _(b'censor tombstone must be no longer than censored data')
3084 )
3088 )
3085
3089
3086 # Rewriting the revlog in place is hard. Our strategy for censoring is
3090 # Rewriting the revlog in place is hard. Our strategy for censoring is
3087 # to create a new revlog, copy all revisions to it, then replace the
3091 # to create a new revlog, copy all revisions to it, then replace the
3088 # revlogs on transaction close.
3092 # revlogs on transaction close.
3089 #
3093 #
3090 # This is a bit dangerous. We could easily have a mismatch of state.
3094 # This is a bit dangerous. We could easily have a mismatch of state.
3091 newrl = revlog(
3095 newrl = revlog(
3092 self.opener,
3096 self.opener,
3093 target=self.target,
3097 target=self.target,
3094 radix=self.radix,
3098 radix=self.radix,
3095 postfix=b'tmpcensored',
3099 postfix=b'tmpcensored',
3096 censorable=True,
3100 censorable=True,
3097 )
3101 )
3098 newrl._format_version = self._format_version
3102 newrl._format_version = self._format_version
3099 newrl._format_flags = self._format_flags
3103 newrl._format_flags = self._format_flags
3100 newrl._generaldelta = self._generaldelta
3104 newrl._generaldelta = self._generaldelta
3101 newrl._parse_index = self._parse_index
3105 newrl._parse_index = self._parse_index
3102
3106
3103 for rev in self.revs():
3107 for rev in self.revs():
3104 node = self.node(rev)
3108 node = self.node(rev)
3105 p1, p2 = self.parents(node)
3109 p1, p2 = self.parents(node)
3106
3110
3107 if rev == censorrev:
3111 if rev == censorrev:
3108 newrl.addrawrevision(
3112 newrl.addrawrevision(
3109 tombstone,
3113 tombstone,
3110 tr,
3114 tr,
3111 self.linkrev(censorrev),
3115 self.linkrev(censorrev),
3112 p1,
3116 p1,
3113 p2,
3117 p2,
3114 censornode,
3118 censornode,
3115 REVIDX_ISCENSORED,
3119 REVIDX_ISCENSORED,
3116 )
3120 )
3117
3121
3118 if newrl.deltaparent(rev) != nullrev:
3122 if newrl.deltaparent(rev) != nullrev:
3119 raise error.Abort(
3123 raise error.Abort(
3120 _(
3124 _(
3121 b'censored revision stored as delta; '
3125 b'censored revision stored as delta; '
3122 b'cannot censor'
3126 b'cannot censor'
3123 ),
3127 ),
3124 hint=_(
3128 hint=_(
3125 b'censoring of revlogs is not '
3129 b'censoring of revlogs is not '
3126 b'fully implemented; please report '
3130 b'fully implemented; please report '
3127 b'this bug'
3131 b'this bug'
3128 ),
3132 ),
3129 )
3133 )
3130 continue
3134 continue
3131
3135
3132 if self.iscensored(rev):
3136 if self.iscensored(rev):
3133 if self.deltaparent(rev) != nullrev:
3137 if self.deltaparent(rev) != nullrev:
3134 raise error.Abort(
3138 raise error.Abort(
3135 _(
3139 _(
3136 b'cannot censor due to censored '
3140 b'cannot censor due to censored '
3137 b'revision having delta stored'
3141 b'revision having delta stored'
3138 )
3142 )
3139 )
3143 )
3140 rawtext = self._chunk(rev)
3144 rawtext = self._chunk(rev)
3141 else:
3145 else:
3142 rawtext = self.rawdata(rev)
3146 rawtext = self.rawdata(rev)
3143
3147
3144 newrl.addrawrevision(
3148 newrl.addrawrevision(
3145 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3149 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3146 )
3150 )
3147
3151
3148 tr.addbackup(self._indexfile, location=b'store')
3152 tr.addbackup(self._indexfile, location=b'store')
3149 if not self._inline:
3153 if not self._inline:
3150 tr.addbackup(self._datafile, location=b'store')
3154 tr.addbackup(self._datafile, location=b'store')
3151
3155
3152 self.opener.rename(newrl._indexfile, self._indexfile)
3156 self.opener.rename(newrl._indexfile, self._indexfile)
3153 if not self._inline:
3157 if not self._inline:
3154 self.opener.rename(newrl._datafile, self._datafile)
3158 self.opener.rename(newrl._datafile, self._datafile)
3155
3159
3156 self.clearcaches()
3160 self.clearcaches()
3157 self._loadindex()
3161 self._loadindex()
3158
3162
3159 def verifyintegrity(self, state):
3163 def verifyintegrity(self, state):
3160 """Verifies the integrity of the revlog.
3164 """Verifies the integrity of the revlog.
3161
3165
3162 Yields ``revlogproblem`` instances describing problems that are
3166 Yields ``revlogproblem`` instances describing problems that are
3163 found.
3167 found.
3164 """
3168 """
3165 dd, di = self.checksize()
3169 dd, di = self.checksize()
3166 if dd:
3170 if dd:
3167 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3171 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3168 if di:
3172 if di:
3169 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3173 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3170
3174
3171 version = self._format_version
3175 version = self._format_version
3172
3176
3173 # The verifier tells us what version revlog we should be.
3177 # The verifier tells us what version revlog we should be.
3174 if version != state[b'expectedversion']:
3178 if version != state[b'expectedversion']:
3175 yield revlogproblem(
3179 yield revlogproblem(
3176 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3180 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3177 % (self.display_id, version, state[b'expectedversion'])
3181 % (self.display_id, version, state[b'expectedversion'])
3178 )
3182 )
3179
3183
3180 state[b'skipread'] = set()
3184 state[b'skipread'] = set()
3181 state[b'safe_renamed'] = set()
3185 state[b'safe_renamed'] = set()
3182
3186
3183 for rev in self:
3187 for rev in self:
3184 node = self.node(rev)
3188 node = self.node(rev)
3185
3189
3186 # Verify contents. 4 cases to care about:
3190 # Verify contents. 4 cases to care about:
3187 #
3191 #
3188 # common: the most common case
3192 # common: the most common case
3189 # rename: with a rename
3193 # rename: with a rename
3190 # meta: file content starts with b'\1\n', the metadata
3194 # meta: file content starts with b'\1\n', the metadata
3191 # header defined in filelog.py, but without a rename
3195 # header defined in filelog.py, but without a rename
3192 # ext: content stored externally
3196 # ext: content stored externally
3193 #
3197 #
3194 # More formally, their differences are shown below:
3198 # More formally, their differences are shown below:
3195 #
3199 #
3196 # | common | rename | meta | ext
3200 # | common | rename | meta | ext
3197 # -------------------------------------------------------
3201 # -------------------------------------------------------
3198 # flags() | 0 | 0 | 0 | not 0
3202 # flags() | 0 | 0 | 0 | not 0
3199 # renamed() | False | True | False | ?
3203 # renamed() | False | True | False | ?
3200 # rawtext[0:2]=='\1\n'| False | True | True | ?
3204 # rawtext[0:2]=='\1\n'| False | True | True | ?
3201 #
3205 #
3202 # "rawtext" means the raw text stored in revlog data, which
3206 # "rawtext" means the raw text stored in revlog data, which
3203 # could be retrieved by "rawdata(rev)". "text"
3207 # could be retrieved by "rawdata(rev)". "text"
3204 # mentioned below is "revision(rev)".
3208 # mentioned below is "revision(rev)".
3205 #
3209 #
3206 # There are 3 different lengths stored physically:
3210 # There are 3 different lengths stored physically:
3207 # 1. L1: rawsize, stored in revlog index
3211 # 1. L1: rawsize, stored in revlog index
3208 # 2. L2: len(rawtext), stored in revlog data
3212 # 2. L2: len(rawtext), stored in revlog data
3209 # 3. L3: len(text), stored in revlog data if flags==0, or
3213 # 3. L3: len(text), stored in revlog data if flags==0, or
3210 # possibly somewhere else if flags!=0
3214 # possibly somewhere else if flags!=0
3211 #
3215 #
3212 # L1 should be equal to L2. L3 could be different from them.
3216 # L1 should be equal to L2. L3 could be different from them.
3213 # "text" may or may not affect commit hash depending on flag
3217 # "text" may or may not affect commit hash depending on flag
3214 # processors (see flagutil.addflagprocessor).
3218 # processors (see flagutil.addflagprocessor).
3215 #
3219 #
3216 # | common | rename | meta | ext
3220 # | common | rename | meta | ext
3217 # -------------------------------------------------
3221 # -------------------------------------------------
3218 # rawsize() | L1 | L1 | L1 | L1
3222 # rawsize() | L1 | L1 | L1 | L1
3219 # size() | L1 | L2-LM | L1(*) | L1 (?)
3223 # size() | L1 | L2-LM | L1(*) | L1 (?)
3220 # len(rawtext) | L2 | L2 | L2 | L2
3224 # len(rawtext) | L2 | L2 | L2 | L2
3221 # len(text) | L2 | L2 | L2 | L3
3225 # len(text) | L2 | L2 | L2 | L3
3222 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3226 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3223 #
3227 #
3224 # LM: length of metadata, depending on rawtext
3228 # LM: length of metadata, depending on rawtext
3225 # (*): not ideal, see comment in filelog.size
3229 # (*): not ideal, see comment in filelog.size
3226 # (?): could be "- len(meta)" if the resolved content has
3230 # (?): could be "- len(meta)" if the resolved content has
3227 # rename metadata
3231 # rename metadata
3228 #
3232 #
3229 # Checks needed to be done:
3233 # Checks needed to be done:
3230 # 1. length check: L1 == L2, in all cases.
3234 # 1. length check: L1 == L2, in all cases.
3231 # 2. hash check: depending on flag processor, we may need to
3235 # 2. hash check: depending on flag processor, we may need to
3232 # use either "text" (external), or "rawtext" (in revlog).
3236 # use either "text" (external), or "rawtext" (in revlog).
3233
3237
3234 try:
3238 try:
3235 skipflags = state.get(b'skipflags', 0)
3239 skipflags = state.get(b'skipflags', 0)
3236 if skipflags:
3240 if skipflags:
3237 skipflags &= self.flags(rev)
3241 skipflags &= self.flags(rev)
3238
3242
3239 _verify_revision(self, skipflags, state, node)
3243 _verify_revision(self, skipflags, state, node)
3240
3244
3241 l1 = self.rawsize(rev)
3245 l1 = self.rawsize(rev)
3242 l2 = len(self.rawdata(node))
3246 l2 = len(self.rawdata(node))
3243
3247
3244 if l1 != l2:
3248 if l1 != l2:
3245 yield revlogproblem(
3249 yield revlogproblem(
3246 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3250 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3247 node=node,
3251 node=node,
3248 )
3252 )
3249
3253
3250 except error.CensoredNodeError:
3254 except error.CensoredNodeError:
3251 if state[b'erroroncensored']:
3255 if state[b'erroroncensored']:
3252 yield revlogproblem(
3256 yield revlogproblem(
3253 error=_(b'censored file data'), node=node
3257 error=_(b'censored file data'), node=node
3254 )
3258 )
3255 state[b'skipread'].add(node)
3259 state[b'skipread'].add(node)
3256 except Exception as e:
3260 except Exception as e:
3257 yield revlogproblem(
3261 yield revlogproblem(
3258 error=_(b'unpacking %s: %s')
3262 error=_(b'unpacking %s: %s')
3259 % (short(node), stringutil.forcebytestr(e)),
3263 % (short(node), stringutil.forcebytestr(e)),
3260 node=node,
3264 node=node,
3261 )
3265 )
3262 state[b'skipread'].add(node)
3266 state[b'skipread'].add(node)
3263
3267
3264 def storageinfo(
3268 def storageinfo(
3265 self,
3269 self,
3266 exclusivefiles=False,
3270 exclusivefiles=False,
3267 sharedfiles=False,
3271 sharedfiles=False,
3268 revisionscount=False,
3272 revisionscount=False,
3269 trackedsize=False,
3273 trackedsize=False,
3270 storedsize=False,
3274 storedsize=False,
3271 ):
3275 ):
3272 d = {}
3276 d = {}
3273
3277
3274 if exclusivefiles:
3278 if exclusivefiles:
3275 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3279 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3276 if not self._inline:
3280 if not self._inline:
3277 d[b'exclusivefiles'].append((self.opener, self._datafile))
3281 d[b'exclusivefiles'].append((self.opener, self._datafile))
3278
3282
3279 if sharedfiles:
3283 if sharedfiles:
3280 d[b'sharedfiles'] = []
3284 d[b'sharedfiles'] = []
3281
3285
3282 if revisionscount:
3286 if revisionscount:
3283 d[b'revisionscount'] = len(self)
3287 d[b'revisionscount'] = len(self)
3284
3288
3285 if trackedsize:
3289 if trackedsize:
3286 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3290 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3287
3291
3288 if storedsize:
3292 if storedsize:
3289 d[b'storedsize'] = sum(
3293 d[b'storedsize'] = sum(
3290 self.opener.stat(path).st_size for path in self.files()
3294 self.opener.stat(path).st_size for path in self.files()
3291 )
3295 )
3292
3296
3293 return d
3297 return d
3294
3298
3295 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3299 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3296 if not self.hassidedata:
3300 if not self.hassidedata:
3297 return
3301 return
3298 # revlog formats with sidedata support does not support inline
3302 # revlog formats with sidedata support does not support inline
3299 assert not self._inline
3303 assert not self._inline
3300 if not helpers[1] and not helpers[2]:
3304 if not helpers[1] and not helpers[2]:
3301 # Nothing to generate or remove
3305 # Nothing to generate or remove
3302 return
3306 return
3303
3307
3304 new_entries = []
3308 new_entries = []
3305 # append the new sidedata
3309 # append the new sidedata
3306 with self._writing(transaction):
3310 with self._writing(transaction):
3307 ifh, dfh = self._writinghandles
3311 ifh, dfh = self._writinghandles
3308 if self._docket is not None:
3312 if self._docket is not None:
3309 dfh.seek(self._docket.data_end, os.SEEK_SET)
3313 dfh.seek(self._docket.data_end, os.SEEK_SET)
3310 else:
3314 else:
3311 dfh.seek(0, os.SEEK_END)
3315 dfh.seek(0, os.SEEK_END)
3312
3316
3313 current_offset = dfh.tell()
3317 current_offset = dfh.tell()
3314 for rev in range(startrev, endrev + 1):
3318 for rev in range(startrev, endrev + 1):
3315 entry = self.index[rev]
3319 entry = self.index[rev]
3316 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3320 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3317 store=self,
3321 store=self,
3318 sidedata_helpers=helpers,
3322 sidedata_helpers=helpers,
3319 sidedata={},
3323 sidedata={},
3320 rev=rev,
3324 rev=rev,
3321 )
3325 )
3322
3326
3323 serialized_sidedata = sidedatautil.serialize_sidedata(
3327 serialized_sidedata = sidedatautil.serialize_sidedata(
3324 new_sidedata
3328 new_sidedata
3325 )
3329 )
3326 if entry[8] != 0 or entry[9] != 0:
3330 if entry[8] != 0 or entry[9] != 0:
3327 # rewriting entries that already have sidedata is not
3331 # rewriting entries that already have sidedata is not
3328 # supported yet, because it introduces garbage data in the
3332 # supported yet, because it introduces garbage data in the
3329 # revlog.
3333 # revlog.
3330 msg = b"rewriting existing sidedata is not supported yet"
3334 msg = b"rewriting existing sidedata is not supported yet"
3331 raise error.Abort(msg)
3335 raise error.Abort(msg)
3332
3336
3333 # Apply (potential) flags to add and to remove after running
3337 # Apply (potential) flags to add and to remove after running
3334 # the sidedata helpers
3338 # the sidedata helpers
3335 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3339 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3336 entry_update = (
3340 entry_update = (
3337 current_offset,
3341 current_offset,
3338 len(serialized_sidedata),
3342 len(serialized_sidedata),
3339 new_offset_flags,
3343 new_offset_flags,
3340 )
3344 )
3341
3345
3342 # the sidedata computation might have move the file cursors around
3346 # the sidedata computation might have move the file cursors around
3343 dfh.seek(current_offset, os.SEEK_SET)
3347 dfh.seek(current_offset, os.SEEK_SET)
3344 dfh.write(serialized_sidedata)
3348 dfh.write(serialized_sidedata)
3345 new_entries.append(entry_update)
3349 new_entries.append(entry_update)
3346 current_offset += len(serialized_sidedata)
3350 current_offset += len(serialized_sidedata)
3347 if self._docket is not None:
3351 if self._docket is not None:
3348 self._docket.data_end = dfh.tell()
3352 self._docket.data_end = dfh.tell()
3349
3353
3350 # rewrite the new index entries
3354 # rewrite the new index entries
3351 ifh.seek(startrev * self.index.entry_size)
3355 ifh.seek(startrev * self.index.entry_size)
3352 for i, e in enumerate(new_entries):
3356 for i, e in enumerate(new_entries):
3353 rev = startrev + i
3357 rev = startrev + i
3354 self.index.replace_sidedata_info(rev, *e)
3358 self.index.replace_sidedata_info(rev, *e)
3355 packed = self.index.entry_binary(rev)
3359 packed = self.index.entry_binary(rev)
3356 if rev == 0 and self._docket is None:
3360 if rev == 0 and self._docket is None:
3357 header = self._format_flags | self._format_version
3361 header = self._format_flags | self._format_version
3358 header = self.index.pack_header(header)
3362 header = self.index.pack_header(header)
3359 packed = header + packed
3363 packed = header + packed
3360 ifh.write(packed)
3364 ifh.write(packed)
General Comments 0
You need to be logged in to leave comments. Login now