##// END OF EJS Templates
changelogv2: use a dedicated on disk format for changelogv2...
marmoute -
r48044:25ce16bf default
parent child Browse files
Show More
@@ -1,2712 +1,2711 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * include management of a persistent nodemap in the main docket
1153 # * include management of a persistent nodemap in the main docket
1154 # * enforce a "no-truncate" policy for mmap safety
1154 # * enforce a "no-truncate" policy for mmap safety
1155 # - for censoring operation
1155 # - for censoring operation
1156 # - for stripping operation
1156 # - for stripping operation
1157 # - for rollback operation
1157 # - for rollback operation
1158 # * proper streaming (race free) of the docket file
1158 # * proper streaming (race free) of the docket file
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 # * Exchange-wise, we will also need to do something more efficient than
1160 # * Exchange-wise, we will also need to do something more efficient than
1161 # keeping references to the affected revlogs, especially memory-wise when
1161 # keeping references to the affected revlogs, especially memory-wise when
1162 # rewriting sidedata.
1162 # rewriting sidedata.
1163 # * sidedata compression
1163 # * sidedata compression
1164 # * introduce a proper solution to reduce the number of filelog related files.
1164 # * introduce a proper solution to reduce the number of filelog related files.
1165 # * Improvement to consider
1165 # * Improvement to consider
1166 # - avoid compression header in chunk using the default compression?
1166 # - avoid compression header in chunk using the default compression?
1167 # - forbid "inline" compression mode entirely?
1167 # - forbid "inline" compression mode entirely?
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 # - keep track of chain base or size (probably not that useful anymore)
1170 # - keep track of chain base or size (probably not that useful anymore)
1171 # - store data and sidedata in different files
1171 # - store data and sidedata in different files
1172 coreconfigitem(
1172 coreconfigitem(
1173 b'experimental',
1173 b'experimental',
1174 b'revlogv2',
1174 b'revlogv2',
1175 default=None,
1175 default=None,
1176 )
1176 )
1177 coreconfigitem(
1177 coreconfigitem(
1178 b'experimental',
1178 b'experimental',
1179 b'revisions.disambiguatewithin',
1179 b'revisions.disambiguatewithin',
1180 default=None,
1180 default=None,
1181 )
1181 )
1182 coreconfigitem(
1182 coreconfigitem(
1183 b'experimental',
1183 b'experimental',
1184 b'rust.index',
1184 b'rust.index',
1185 default=False,
1185 default=False,
1186 )
1186 )
1187 coreconfigitem(
1187 coreconfigitem(
1188 b'experimental',
1188 b'experimental',
1189 b'server.filesdata.recommended-batch-size',
1189 b'server.filesdata.recommended-batch-size',
1190 default=50000,
1190 default=50000,
1191 )
1191 )
1192 coreconfigitem(
1192 coreconfigitem(
1193 b'experimental',
1193 b'experimental',
1194 b'server.manifestdata.recommended-batch-size',
1194 b'server.manifestdata.recommended-batch-size',
1195 default=100000,
1195 default=100000,
1196 )
1196 )
1197 coreconfigitem(
1197 coreconfigitem(
1198 b'experimental',
1198 b'experimental',
1199 b'server.stream-narrow-clones',
1199 b'server.stream-narrow-clones',
1200 default=False,
1200 default=False,
1201 )
1201 )
1202 coreconfigitem(
1202 coreconfigitem(
1203 b'experimental',
1203 b'experimental',
1204 b'single-head-per-branch',
1204 b'single-head-per-branch',
1205 default=False,
1205 default=False,
1206 )
1206 )
1207 coreconfigitem(
1207 coreconfigitem(
1208 b'experimental',
1208 b'experimental',
1209 b'single-head-per-branch:account-closed-heads',
1209 b'single-head-per-branch:account-closed-heads',
1210 default=False,
1210 default=False,
1211 )
1211 )
1212 coreconfigitem(
1212 coreconfigitem(
1213 b'experimental',
1213 b'experimental',
1214 b'single-head-per-branch:public-changes-only',
1214 b'single-head-per-branch:public-changes-only',
1215 default=False,
1215 default=False,
1216 )
1216 )
1217 coreconfigitem(
1217 coreconfigitem(
1218 b'experimental',
1218 b'experimental',
1219 b'sshserver.support-v2',
1219 b'sshserver.support-v2',
1220 default=False,
1220 default=False,
1221 )
1221 )
1222 coreconfigitem(
1222 coreconfigitem(
1223 b'experimental',
1223 b'experimental',
1224 b'sparse-read',
1224 b'sparse-read',
1225 default=False,
1225 default=False,
1226 )
1226 )
1227 coreconfigitem(
1227 coreconfigitem(
1228 b'experimental',
1228 b'experimental',
1229 b'sparse-read.density-threshold',
1229 b'sparse-read.density-threshold',
1230 default=0.50,
1230 default=0.50,
1231 )
1231 )
1232 coreconfigitem(
1232 coreconfigitem(
1233 b'experimental',
1233 b'experimental',
1234 b'sparse-read.min-gap-size',
1234 b'sparse-read.min-gap-size',
1235 default=b'65K',
1235 default=b'65K',
1236 )
1236 )
1237 coreconfigitem(
1237 coreconfigitem(
1238 b'experimental',
1238 b'experimental',
1239 b'treemanifest',
1239 b'treemanifest',
1240 default=False,
1240 default=False,
1241 )
1241 )
1242 coreconfigitem(
1242 coreconfigitem(
1243 b'experimental',
1243 b'experimental',
1244 b'update.atomic-file',
1244 b'update.atomic-file',
1245 default=False,
1245 default=False,
1246 )
1246 )
1247 coreconfigitem(
1247 coreconfigitem(
1248 b'experimental',
1248 b'experimental',
1249 b'sshpeer.advertise-v2',
1249 b'sshpeer.advertise-v2',
1250 default=False,
1250 default=False,
1251 )
1251 )
1252 coreconfigitem(
1252 coreconfigitem(
1253 b'experimental',
1253 b'experimental',
1254 b'web.apiserver',
1254 b'web.apiserver',
1255 default=False,
1255 default=False,
1256 )
1256 )
1257 coreconfigitem(
1257 coreconfigitem(
1258 b'experimental',
1258 b'experimental',
1259 b'web.api.http-v2',
1259 b'web.api.http-v2',
1260 default=False,
1260 default=False,
1261 )
1261 )
1262 coreconfigitem(
1262 coreconfigitem(
1263 b'experimental',
1263 b'experimental',
1264 b'web.api.debugreflect',
1264 b'web.api.debugreflect',
1265 default=False,
1265 default=False,
1266 )
1266 )
1267 coreconfigitem(
1267 coreconfigitem(
1268 b'experimental',
1268 b'experimental',
1269 b'worker.wdir-get-thread-safe',
1269 b'worker.wdir-get-thread-safe',
1270 default=False,
1270 default=False,
1271 )
1271 )
1272 coreconfigitem(
1272 coreconfigitem(
1273 b'experimental',
1273 b'experimental',
1274 b'worker.repository-upgrade',
1274 b'worker.repository-upgrade',
1275 default=False,
1275 default=False,
1276 )
1276 )
1277 coreconfigitem(
1277 coreconfigitem(
1278 b'experimental',
1278 b'experimental',
1279 b'xdiff',
1279 b'xdiff',
1280 default=False,
1280 default=False,
1281 )
1281 )
1282 coreconfigitem(
1282 coreconfigitem(
1283 b'extensions',
1283 b'extensions',
1284 b'.*',
1284 b'.*',
1285 default=None,
1285 default=None,
1286 generic=True,
1286 generic=True,
1287 )
1287 )
1288 coreconfigitem(
1288 coreconfigitem(
1289 b'extdata',
1289 b'extdata',
1290 b'.*',
1290 b'.*',
1291 default=None,
1291 default=None,
1292 generic=True,
1292 generic=True,
1293 )
1293 )
1294 coreconfigitem(
1294 coreconfigitem(
1295 b'format',
1295 b'format',
1296 b'bookmarks-in-store',
1296 b'bookmarks-in-store',
1297 default=False,
1297 default=False,
1298 )
1298 )
1299 coreconfigitem(
1299 coreconfigitem(
1300 b'format',
1300 b'format',
1301 b'chunkcachesize',
1301 b'chunkcachesize',
1302 default=None,
1302 default=None,
1303 experimental=True,
1303 experimental=True,
1304 )
1304 )
1305 coreconfigitem(
1305 coreconfigitem(
1306 b'format',
1306 b'format',
1307 b'dotencode',
1307 b'dotencode',
1308 default=True,
1308 default=True,
1309 )
1309 )
1310 coreconfigitem(
1310 coreconfigitem(
1311 b'format',
1311 b'format',
1312 b'generaldelta',
1312 b'generaldelta',
1313 default=False,
1313 default=False,
1314 experimental=True,
1314 experimental=True,
1315 )
1315 )
1316 coreconfigitem(
1316 coreconfigitem(
1317 b'format',
1317 b'format',
1318 b'manifestcachesize',
1318 b'manifestcachesize',
1319 default=None,
1319 default=None,
1320 experimental=True,
1320 experimental=True,
1321 )
1321 )
1322 coreconfigitem(
1322 coreconfigitem(
1323 b'format',
1323 b'format',
1324 b'maxchainlen',
1324 b'maxchainlen',
1325 default=dynamicdefault,
1325 default=dynamicdefault,
1326 experimental=True,
1326 experimental=True,
1327 )
1327 )
1328 coreconfigitem(
1328 coreconfigitem(
1329 b'format',
1329 b'format',
1330 b'obsstore-version',
1330 b'obsstore-version',
1331 default=None,
1331 default=None,
1332 )
1332 )
1333 coreconfigitem(
1333 coreconfigitem(
1334 b'format',
1334 b'format',
1335 b'sparse-revlog',
1335 b'sparse-revlog',
1336 default=True,
1336 default=True,
1337 )
1337 )
1338 coreconfigitem(
1338 coreconfigitem(
1339 b'format',
1339 b'format',
1340 b'revlog-compression',
1340 b'revlog-compression',
1341 default=lambda: [b'zstd', b'zlib'],
1341 default=lambda: [b'zstd', b'zlib'],
1342 alias=[(b'experimental', b'format.compression')],
1342 alias=[(b'experimental', b'format.compression')],
1343 )
1343 )
1344 # Experimental TODOs:
1344 # Experimental TODOs:
1345 #
1345 #
1346 # * Same as for evlogv2 (but for the reduction of the number of files)
1346 # * Same as for evlogv2 (but for the reduction of the number of files)
1347 # * drop the storage of the base
1348 # * Improvement to investigate
1347 # * Improvement to investigate
1349 # - storing .hgtags fnode
1348 # - storing .hgtags fnode
1350 # - storing `rank` of changesets
1349 # - storing `rank` of changesets
1351 # - storing branch related identifier
1350 # - storing branch related identifier
1352
1351
1353 coreconfigitem(
1352 coreconfigitem(
1354 b'format',
1353 b'format',
1355 b'exp-use-changelog-v2',
1354 b'exp-use-changelog-v2',
1356 default=None,
1355 default=None,
1357 experimental=True,
1356 experimental=True,
1358 )
1357 )
1359 coreconfigitem(
1358 coreconfigitem(
1360 b'format',
1359 b'format',
1361 b'usefncache',
1360 b'usefncache',
1362 default=True,
1361 default=True,
1363 )
1362 )
1364 coreconfigitem(
1363 coreconfigitem(
1365 b'format',
1364 b'format',
1366 b'usegeneraldelta',
1365 b'usegeneraldelta',
1367 default=True,
1366 default=True,
1368 )
1367 )
1369 coreconfigitem(
1368 coreconfigitem(
1370 b'format',
1369 b'format',
1371 b'usestore',
1370 b'usestore',
1372 default=True,
1371 default=True,
1373 )
1372 )
1374
1373
1375
1374
1376 def _persistent_nodemap_default():
1375 def _persistent_nodemap_default():
1377 """compute `use-persistent-nodemap` default value
1376 """compute `use-persistent-nodemap` default value
1378
1377
1379 The feature is disabled unless a fast implementation is available.
1378 The feature is disabled unless a fast implementation is available.
1380 """
1379 """
1381 from . import policy
1380 from . import policy
1382
1381
1383 return policy.importrust('revlog') is not None
1382 return policy.importrust('revlog') is not None
1384
1383
1385
1384
1386 coreconfigitem(
1385 coreconfigitem(
1387 b'format',
1386 b'format',
1388 b'use-persistent-nodemap',
1387 b'use-persistent-nodemap',
1389 default=_persistent_nodemap_default,
1388 default=_persistent_nodemap_default,
1390 )
1389 )
1391 coreconfigitem(
1390 coreconfigitem(
1392 b'format',
1391 b'format',
1393 b'exp-use-copies-side-data-changeset',
1392 b'exp-use-copies-side-data-changeset',
1394 default=False,
1393 default=False,
1395 experimental=True,
1394 experimental=True,
1396 )
1395 )
1397 coreconfigitem(
1396 coreconfigitem(
1398 b'format',
1397 b'format',
1399 b'use-share-safe',
1398 b'use-share-safe',
1400 default=False,
1399 default=False,
1401 )
1400 )
1402 coreconfigitem(
1401 coreconfigitem(
1403 b'format',
1402 b'format',
1404 b'internal-phase',
1403 b'internal-phase',
1405 default=False,
1404 default=False,
1406 experimental=True,
1405 experimental=True,
1407 )
1406 )
1408 coreconfigitem(
1407 coreconfigitem(
1409 b'fsmonitor',
1408 b'fsmonitor',
1410 b'warn_when_unused',
1409 b'warn_when_unused',
1411 default=True,
1410 default=True,
1412 )
1411 )
1413 coreconfigitem(
1412 coreconfigitem(
1414 b'fsmonitor',
1413 b'fsmonitor',
1415 b'warn_update_file_count',
1414 b'warn_update_file_count',
1416 default=50000,
1415 default=50000,
1417 )
1416 )
1418 coreconfigitem(
1417 coreconfigitem(
1419 b'fsmonitor',
1418 b'fsmonitor',
1420 b'warn_update_file_count_rust',
1419 b'warn_update_file_count_rust',
1421 default=400000,
1420 default=400000,
1422 )
1421 )
1423 coreconfigitem(
1422 coreconfigitem(
1424 b'help',
1423 b'help',
1425 br'hidden-command\..*',
1424 br'hidden-command\..*',
1426 default=False,
1425 default=False,
1427 generic=True,
1426 generic=True,
1428 )
1427 )
1429 coreconfigitem(
1428 coreconfigitem(
1430 b'help',
1429 b'help',
1431 br'hidden-topic\..*',
1430 br'hidden-topic\..*',
1432 default=False,
1431 default=False,
1433 generic=True,
1432 generic=True,
1434 )
1433 )
1435 coreconfigitem(
1434 coreconfigitem(
1436 b'hooks',
1435 b'hooks',
1437 b'[^:]*',
1436 b'[^:]*',
1438 default=dynamicdefault,
1437 default=dynamicdefault,
1439 generic=True,
1438 generic=True,
1440 )
1439 )
1441 coreconfigitem(
1440 coreconfigitem(
1442 b'hooks',
1441 b'hooks',
1443 b'.*:run-with-plain',
1442 b'.*:run-with-plain',
1444 default=True,
1443 default=True,
1445 generic=True,
1444 generic=True,
1446 )
1445 )
1447 coreconfigitem(
1446 coreconfigitem(
1448 b'hgweb-paths',
1447 b'hgweb-paths',
1449 b'.*',
1448 b'.*',
1450 default=list,
1449 default=list,
1451 generic=True,
1450 generic=True,
1452 )
1451 )
1453 coreconfigitem(
1452 coreconfigitem(
1454 b'hostfingerprints',
1453 b'hostfingerprints',
1455 b'.*',
1454 b'.*',
1456 default=list,
1455 default=list,
1457 generic=True,
1456 generic=True,
1458 )
1457 )
1459 coreconfigitem(
1458 coreconfigitem(
1460 b'hostsecurity',
1459 b'hostsecurity',
1461 b'ciphers',
1460 b'ciphers',
1462 default=None,
1461 default=None,
1463 )
1462 )
1464 coreconfigitem(
1463 coreconfigitem(
1465 b'hostsecurity',
1464 b'hostsecurity',
1466 b'minimumprotocol',
1465 b'minimumprotocol',
1467 default=dynamicdefault,
1466 default=dynamicdefault,
1468 )
1467 )
1469 coreconfigitem(
1468 coreconfigitem(
1470 b'hostsecurity',
1469 b'hostsecurity',
1471 b'.*:minimumprotocol$',
1470 b'.*:minimumprotocol$',
1472 default=dynamicdefault,
1471 default=dynamicdefault,
1473 generic=True,
1472 generic=True,
1474 )
1473 )
1475 coreconfigitem(
1474 coreconfigitem(
1476 b'hostsecurity',
1475 b'hostsecurity',
1477 b'.*:ciphers$',
1476 b'.*:ciphers$',
1478 default=dynamicdefault,
1477 default=dynamicdefault,
1479 generic=True,
1478 generic=True,
1480 )
1479 )
1481 coreconfigitem(
1480 coreconfigitem(
1482 b'hostsecurity',
1481 b'hostsecurity',
1483 b'.*:fingerprints$',
1482 b'.*:fingerprints$',
1484 default=list,
1483 default=list,
1485 generic=True,
1484 generic=True,
1486 )
1485 )
1487 coreconfigitem(
1486 coreconfigitem(
1488 b'hostsecurity',
1487 b'hostsecurity',
1489 b'.*:verifycertsfile$',
1488 b'.*:verifycertsfile$',
1490 default=None,
1489 default=None,
1491 generic=True,
1490 generic=True,
1492 )
1491 )
1493
1492
1494 coreconfigitem(
1493 coreconfigitem(
1495 b'http_proxy',
1494 b'http_proxy',
1496 b'always',
1495 b'always',
1497 default=False,
1496 default=False,
1498 )
1497 )
1499 coreconfigitem(
1498 coreconfigitem(
1500 b'http_proxy',
1499 b'http_proxy',
1501 b'host',
1500 b'host',
1502 default=None,
1501 default=None,
1503 )
1502 )
1504 coreconfigitem(
1503 coreconfigitem(
1505 b'http_proxy',
1504 b'http_proxy',
1506 b'no',
1505 b'no',
1507 default=list,
1506 default=list,
1508 )
1507 )
1509 coreconfigitem(
1508 coreconfigitem(
1510 b'http_proxy',
1509 b'http_proxy',
1511 b'passwd',
1510 b'passwd',
1512 default=None,
1511 default=None,
1513 )
1512 )
1514 coreconfigitem(
1513 coreconfigitem(
1515 b'http_proxy',
1514 b'http_proxy',
1516 b'user',
1515 b'user',
1517 default=None,
1516 default=None,
1518 )
1517 )
1519
1518
1520 coreconfigitem(
1519 coreconfigitem(
1521 b'http',
1520 b'http',
1522 b'timeout',
1521 b'timeout',
1523 default=None,
1522 default=None,
1524 )
1523 )
1525
1524
1526 coreconfigitem(
1525 coreconfigitem(
1527 b'logtoprocess',
1526 b'logtoprocess',
1528 b'commandexception',
1527 b'commandexception',
1529 default=None,
1528 default=None,
1530 )
1529 )
1531 coreconfigitem(
1530 coreconfigitem(
1532 b'logtoprocess',
1531 b'logtoprocess',
1533 b'commandfinish',
1532 b'commandfinish',
1534 default=None,
1533 default=None,
1535 )
1534 )
1536 coreconfigitem(
1535 coreconfigitem(
1537 b'logtoprocess',
1536 b'logtoprocess',
1538 b'command',
1537 b'command',
1539 default=None,
1538 default=None,
1540 )
1539 )
1541 coreconfigitem(
1540 coreconfigitem(
1542 b'logtoprocess',
1541 b'logtoprocess',
1543 b'develwarn',
1542 b'develwarn',
1544 default=None,
1543 default=None,
1545 )
1544 )
1546 coreconfigitem(
1545 coreconfigitem(
1547 b'logtoprocess',
1546 b'logtoprocess',
1548 b'uiblocked',
1547 b'uiblocked',
1549 default=None,
1548 default=None,
1550 )
1549 )
1551 coreconfigitem(
1550 coreconfigitem(
1552 b'merge',
1551 b'merge',
1553 b'checkunknown',
1552 b'checkunknown',
1554 default=b'abort',
1553 default=b'abort',
1555 )
1554 )
1556 coreconfigitem(
1555 coreconfigitem(
1557 b'merge',
1556 b'merge',
1558 b'checkignored',
1557 b'checkignored',
1559 default=b'abort',
1558 default=b'abort',
1560 )
1559 )
1561 coreconfigitem(
1560 coreconfigitem(
1562 b'experimental',
1561 b'experimental',
1563 b'merge.checkpathconflicts',
1562 b'merge.checkpathconflicts',
1564 default=False,
1563 default=False,
1565 )
1564 )
1566 coreconfigitem(
1565 coreconfigitem(
1567 b'merge',
1566 b'merge',
1568 b'followcopies',
1567 b'followcopies',
1569 default=True,
1568 default=True,
1570 )
1569 )
1571 coreconfigitem(
1570 coreconfigitem(
1572 b'merge',
1571 b'merge',
1573 b'on-failure',
1572 b'on-failure',
1574 default=b'continue',
1573 default=b'continue',
1575 )
1574 )
1576 coreconfigitem(
1575 coreconfigitem(
1577 b'merge',
1576 b'merge',
1578 b'preferancestor',
1577 b'preferancestor',
1579 default=lambda: [b'*'],
1578 default=lambda: [b'*'],
1580 experimental=True,
1579 experimental=True,
1581 )
1580 )
1582 coreconfigitem(
1581 coreconfigitem(
1583 b'merge',
1582 b'merge',
1584 b'strict-capability-check',
1583 b'strict-capability-check',
1585 default=False,
1584 default=False,
1586 )
1585 )
1587 coreconfigitem(
1586 coreconfigitem(
1588 b'merge-tools',
1587 b'merge-tools',
1589 b'.*',
1588 b'.*',
1590 default=None,
1589 default=None,
1591 generic=True,
1590 generic=True,
1592 )
1591 )
1593 coreconfigitem(
1592 coreconfigitem(
1594 b'merge-tools',
1593 b'merge-tools',
1595 br'.*\.args$',
1594 br'.*\.args$',
1596 default=b"$local $base $other",
1595 default=b"$local $base $other",
1597 generic=True,
1596 generic=True,
1598 priority=-1,
1597 priority=-1,
1599 )
1598 )
1600 coreconfigitem(
1599 coreconfigitem(
1601 b'merge-tools',
1600 b'merge-tools',
1602 br'.*\.binary$',
1601 br'.*\.binary$',
1603 default=False,
1602 default=False,
1604 generic=True,
1603 generic=True,
1605 priority=-1,
1604 priority=-1,
1606 )
1605 )
1607 coreconfigitem(
1606 coreconfigitem(
1608 b'merge-tools',
1607 b'merge-tools',
1609 br'.*\.check$',
1608 br'.*\.check$',
1610 default=list,
1609 default=list,
1611 generic=True,
1610 generic=True,
1612 priority=-1,
1611 priority=-1,
1613 )
1612 )
1614 coreconfigitem(
1613 coreconfigitem(
1615 b'merge-tools',
1614 b'merge-tools',
1616 br'.*\.checkchanged$',
1615 br'.*\.checkchanged$',
1617 default=False,
1616 default=False,
1618 generic=True,
1617 generic=True,
1619 priority=-1,
1618 priority=-1,
1620 )
1619 )
1621 coreconfigitem(
1620 coreconfigitem(
1622 b'merge-tools',
1621 b'merge-tools',
1623 br'.*\.executable$',
1622 br'.*\.executable$',
1624 default=dynamicdefault,
1623 default=dynamicdefault,
1625 generic=True,
1624 generic=True,
1626 priority=-1,
1625 priority=-1,
1627 )
1626 )
1628 coreconfigitem(
1627 coreconfigitem(
1629 b'merge-tools',
1628 b'merge-tools',
1630 br'.*\.fixeol$',
1629 br'.*\.fixeol$',
1631 default=False,
1630 default=False,
1632 generic=True,
1631 generic=True,
1633 priority=-1,
1632 priority=-1,
1634 )
1633 )
1635 coreconfigitem(
1634 coreconfigitem(
1636 b'merge-tools',
1635 b'merge-tools',
1637 br'.*\.gui$',
1636 br'.*\.gui$',
1638 default=False,
1637 default=False,
1639 generic=True,
1638 generic=True,
1640 priority=-1,
1639 priority=-1,
1641 )
1640 )
1642 coreconfigitem(
1641 coreconfigitem(
1643 b'merge-tools',
1642 b'merge-tools',
1644 br'.*\.mergemarkers$',
1643 br'.*\.mergemarkers$',
1645 default=b'basic',
1644 default=b'basic',
1646 generic=True,
1645 generic=True,
1647 priority=-1,
1646 priority=-1,
1648 )
1647 )
1649 coreconfigitem(
1648 coreconfigitem(
1650 b'merge-tools',
1649 b'merge-tools',
1651 br'.*\.mergemarkertemplate$',
1650 br'.*\.mergemarkertemplate$',
1652 default=dynamicdefault, # take from command-templates.mergemarker
1651 default=dynamicdefault, # take from command-templates.mergemarker
1653 generic=True,
1652 generic=True,
1654 priority=-1,
1653 priority=-1,
1655 )
1654 )
1656 coreconfigitem(
1655 coreconfigitem(
1657 b'merge-tools',
1656 b'merge-tools',
1658 br'.*\.priority$',
1657 br'.*\.priority$',
1659 default=0,
1658 default=0,
1660 generic=True,
1659 generic=True,
1661 priority=-1,
1660 priority=-1,
1662 )
1661 )
1663 coreconfigitem(
1662 coreconfigitem(
1664 b'merge-tools',
1663 b'merge-tools',
1665 br'.*\.premerge$',
1664 br'.*\.premerge$',
1666 default=dynamicdefault,
1665 default=dynamicdefault,
1667 generic=True,
1666 generic=True,
1668 priority=-1,
1667 priority=-1,
1669 )
1668 )
1670 coreconfigitem(
1669 coreconfigitem(
1671 b'merge-tools',
1670 b'merge-tools',
1672 br'.*\.symlink$',
1671 br'.*\.symlink$',
1673 default=False,
1672 default=False,
1674 generic=True,
1673 generic=True,
1675 priority=-1,
1674 priority=-1,
1676 )
1675 )
1677 coreconfigitem(
1676 coreconfigitem(
1678 b'pager',
1677 b'pager',
1679 b'attend-.*',
1678 b'attend-.*',
1680 default=dynamicdefault,
1679 default=dynamicdefault,
1681 generic=True,
1680 generic=True,
1682 )
1681 )
1683 coreconfigitem(
1682 coreconfigitem(
1684 b'pager',
1683 b'pager',
1685 b'ignore',
1684 b'ignore',
1686 default=list,
1685 default=list,
1687 )
1686 )
1688 coreconfigitem(
1687 coreconfigitem(
1689 b'pager',
1688 b'pager',
1690 b'pager',
1689 b'pager',
1691 default=dynamicdefault,
1690 default=dynamicdefault,
1692 )
1691 )
1693 coreconfigitem(
1692 coreconfigitem(
1694 b'patch',
1693 b'patch',
1695 b'eol',
1694 b'eol',
1696 default=b'strict',
1695 default=b'strict',
1697 )
1696 )
1698 coreconfigitem(
1697 coreconfigitem(
1699 b'patch',
1698 b'patch',
1700 b'fuzz',
1699 b'fuzz',
1701 default=2,
1700 default=2,
1702 )
1701 )
1703 coreconfigitem(
1702 coreconfigitem(
1704 b'paths',
1703 b'paths',
1705 b'default',
1704 b'default',
1706 default=None,
1705 default=None,
1707 )
1706 )
1708 coreconfigitem(
1707 coreconfigitem(
1709 b'paths',
1708 b'paths',
1710 b'default-push',
1709 b'default-push',
1711 default=None,
1710 default=None,
1712 )
1711 )
1713 coreconfigitem(
1712 coreconfigitem(
1714 b'paths',
1713 b'paths',
1715 b'.*',
1714 b'.*',
1716 default=None,
1715 default=None,
1717 generic=True,
1716 generic=True,
1718 )
1717 )
1719 coreconfigitem(
1718 coreconfigitem(
1720 b'phases',
1719 b'phases',
1721 b'checksubrepos',
1720 b'checksubrepos',
1722 default=b'follow',
1721 default=b'follow',
1723 )
1722 )
1724 coreconfigitem(
1723 coreconfigitem(
1725 b'phases',
1724 b'phases',
1726 b'new-commit',
1725 b'new-commit',
1727 default=b'draft',
1726 default=b'draft',
1728 )
1727 )
1729 coreconfigitem(
1728 coreconfigitem(
1730 b'phases',
1729 b'phases',
1731 b'publish',
1730 b'publish',
1732 default=True,
1731 default=True,
1733 )
1732 )
1734 coreconfigitem(
1733 coreconfigitem(
1735 b'profiling',
1734 b'profiling',
1736 b'enabled',
1735 b'enabled',
1737 default=False,
1736 default=False,
1738 )
1737 )
1739 coreconfigitem(
1738 coreconfigitem(
1740 b'profiling',
1739 b'profiling',
1741 b'format',
1740 b'format',
1742 default=b'text',
1741 default=b'text',
1743 )
1742 )
1744 coreconfigitem(
1743 coreconfigitem(
1745 b'profiling',
1744 b'profiling',
1746 b'freq',
1745 b'freq',
1747 default=1000,
1746 default=1000,
1748 )
1747 )
1749 coreconfigitem(
1748 coreconfigitem(
1750 b'profiling',
1749 b'profiling',
1751 b'limit',
1750 b'limit',
1752 default=30,
1751 default=30,
1753 )
1752 )
1754 coreconfigitem(
1753 coreconfigitem(
1755 b'profiling',
1754 b'profiling',
1756 b'nested',
1755 b'nested',
1757 default=0,
1756 default=0,
1758 )
1757 )
1759 coreconfigitem(
1758 coreconfigitem(
1760 b'profiling',
1759 b'profiling',
1761 b'output',
1760 b'output',
1762 default=None,
1761 default=None,
1763 )
1762 )
1764 coreconfigitem(
1763 coreconfigitem(
1765 b'profiling',
1764 b'profiling',
1766 b'showmax',
1765 b'showmax',
1767 default=0.999,
1766 default=0.999,
1768 )
1767 )
1769 coreconfigitem(
1768 coreconfigitem(
1770 b'profiling',
1769 b'profiling',
1771 b'showmin',
1770 b'showmin',
1772 default=dynamicdefault,
1771 default=dynamicdefault,
1773 )
1772 )
1774 coreconfigitem(
1773 coreconfigitem(
1775 b'profiling',
1774 b'profiling',
1776 b'showtime',
1775 b'showtime',
1777 default=True,
1776 default=True,
1778 )
1777 )
1779 coreconfigitem(
1778 coreconfigitem(
1780 b'profiling',
1779 b'profiling',
1781 b'sort',
1780 b'sort',
1782 default=b'inlinetime',
1781 default=b'inlinetime',
1783 )
1782 )
1784 coreconfigitem(
1783 coreconfigitem(
1785 b'profiling',
1784 b'profiling',
1786 b'statformat',
1785 b'statformat',
1787 default=b'hotpath',
1786 default=b'hotpath',
1788 )
1787 )
1789 coreconfigitem(
1788 coreconfigitem(
1790 b'profiling',
1789 b'profiling',
1791 b'time-track',
1790 b'time-track',
1792 default=dynamicdefault,
1791 default=dynamicdefault,
1793 )
1792 )
1794 coreconfigitem(
1793 coreconfigitem(
1795 b'profiling',
1794 b'profiling',
1796 b'type',
1795 b'type',
1797 default=b'stat',
1796 default=b'stat',
1798 )
1797 )
1799 coreconfigitem(
1798 coreconfigitem(
1800 b'progress',
1799 b'progress',
1801 b'assume-tty',
1800 b'assume-tty',
1802 default=False,
1801 default=False,
1803 )
1802 )
1804 coreconfigitem(
1803 coreconfigitem(
1805 b'progress',
1804 b'progress',
1806 b'changedelay',
1805 b'changedelay',
1807 default=1,
1806 default=1,
1808 )
1807 )
1809 coreconfigitem(
1808 coreconfigitem(
1810 b'progress',
1809 b'progress',
1811 b'clear-complete',
1810 b'clear-complete',
1812 default=True,
1811 default=True,
1813 )
1812 )
1814 coreconfigitem(
1813 coreconfigitem(
1815 b'progress',
1814 b'progress',
1816 b'debug',
1815 b'debug',
1817 default=False,
1816 default=False,
1818 )
1817 )
1819 coreconfigitem(
1818 coreconfigitem(
1820 b'progress',
1819 b'progress',
1821 b'delay',
1820 b'delay',
1822 default=3,
1821 default=3,
1823 )
1822 )
1824 coreconfigitem(
1823 coreconfigitem(
1825 b'progress',
1824 b'progress',
1826 b'disable',
1825 b'disable',
1827 default=False,
1826 default=False,
1828 )
1827 )
1829 coreconfigitem(
1828 coreconfigitem(
1830 b'progress',
1829 b'progress',
1831 b'estimateinterval',
1830 b'estimateinterval',
1832 default=60.0,
1831 default=60.0,
1833 )
1832 )
1834 coreconfigitem(
1833 coreconfigitem(
1835 b'progress',
1834 b'progress',
1836 b'format',
1835 b'format',
1837 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1836 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1838 )
1837 )
1839 coreconfigitem(
1838 coreconfigitem(
1840 b'progress',
1839 b'progress',
1841 b'refresh',
1840 b'refresh',
1842 default=0.1,
1841 default=0.1,
1843 )
1842 )
1844 coreconfigitem(
1843 coreconfigitem(
1845 b'progress',
1844 b'progress',
1846 b'width',
1845 b'width',
1847 default=dynamicdefault,
1846 default=dynamicdefault,
1848 )
1847 )
1849 coreconfigitem(
1848 coreconfigitem(
1850 b'pull',
1849 b'pull',
1851 b'confirm',
1850 b'confirm',
1852 default=False,
1851 default=False,
1853 )
1852 )
1854 coreconfigitem(
1853 coreconfigitem(
1855 b'push',
1854 b'push',
1856 b'pushvars.server',
1855 b'pushvars.server',
1857 default=False,
1856 default=False,
1858 )
1857 )
1859 coreconfigitem(
1858 coreconfigitem(
1860 b'rewrite',
1859 b'rewrite',
1861 b'backup-bundle',
1860 b'backup-bundle',
1862 default=True,
1861 default=True,
1863 alias=[(b'ui', b'history-editing-backup')],
1862 alias=[(b'ui', b'history-editing-backup')],
1864 )
1863 )
1865 coreconfigitem(
1864 coreconfigitem(
1866 b'rewrite',
1865 b'rewrite',
1867 b'update-timestamp',
1866 b'update-timestamp',
1868 default=False,
1867 default=False,
1869 )
1868 )
1870 coreconfigitem(
1869 coreconfigitem(
1871 b'rewrite',
1870 b'rewrite',
1872 b'empty-successor',
1871 b'empty-successor',
1873 default=b'skip',
1872 default=b'skip',
1874 experimental=True,
1873 experimental=True,
1875 )
1874 )
1876 coreconfigitem(
1875 coreconfigitem(
1877 b'storage',
1876 b'storage',
1878 b'new-repo-backend',
1877 b'new-repo-backend',
1879 default=b'revlogv1',
1878 default=b'revlogv1',
1880 experimental=True,
1879 experimental=True,
1881 )
1880 )
1882 coreconfigitem(
1881 coreconfigitem(
1883 b'storage',
1882 b'storage',
1884 b'revlog.optimize-delta-parent-choice',
1883 b'revlog.optimize-delta-parent-choice',
1885 default=True,
1884 default=True,
1886 alias=[(b'format', b'aggressivemergedeltas')],
1885 alias=[(b'format', b'aggressivemergedeltas')],
1887 )
1886 )
1888 # experimental as long as rust is experimental (or a C version is implemented)
1887 # experimental as long as rust is experimental (or a C version is implemented)
1889 coreconfigitem(
1888 coreconfigitem(
1890 b'storage',
1889 b'storage',
1891 b'revlog.persistent-nodemap.mmap',
1890 b'revlog.persistent-nodemap.mmap',
1892 default=True,
1891 default=True,
1893 )
1892 )
1894 # experimental as long as format.use-persistent-nodemap is.
1893 # experimental as long as format.use-persistent-nodemap is.
1895 coreconfigitem(
1894 coreconfigitem(
1896 b'storage',
1895 b'storage',
1897 b'revlog.persistent-nodemap.slow-path',
1896 b'revlog.persistent-nodemap.slow-path',
1898 default=b"abort",
1897 default=b"abort",
1899 )
1898 )
1900
1899
1901 coreconfigitem(
1900 coreconfigitem(
1902 b'storage',
1901 b'storage',
1903 b'revlog.reuse-external-delta',
1902 b'revlog.reuse-external-delta',
1904 default=True,
1903 default=True,
1905 )
1904 )
1906 coreconfigitem(
1905 coreconfigitem(
1907 b'storage',
1906 b'storage',
1908 b'revlog.reuse-external-delta-parent',
1907 b'revlog.reuse-external-delta-parent',
1909 default=None,
1908 default=None,
1910 )
1909 )
1911 coreconfigitem(
1910 coreconfigitem(
1912 b'storage',
1911 b'storage',
1913 b'revlog.zlib.level',
1912 b'revlog.zlib.level',
1914 default=None,
1913 default=None,
1915 )
1914 )
1916 coreconfigitem(
1915 coreconfigitem(
1917 b'storage',
1916 b'storage',
1918 b'revlog.zstd.level',
1917 b'revlog.zstd.level',
1919 default=None,
1918 default=None,
1920 )
1919 )
1921 coreconfigitem(
1920 coreconfigitem(
1922 b'server',
1921 b'server',
1923 b'bookmarks-pushkey-compat',
1922 b'bookmarks-pushkey-compat',
1924 default=True,
1923 default=True,
1925 )
1924 )
1926 coreconfigitem(
1925 coreconfigitem(
1927 b'server',
1926 b'server',
1928 b'bundle1',
1927 b'bundle1',
1929 default=True,
1928 default=True,
1930 )
1929 )
1931 coreconfigitem(
1930 coreconfigitem(
1932 b'server',
1931 b'server',
1933 b'bundle1gd',
1932 b'bundle1gd',
1934 default=None,
1933 default=None,
1935 )
1934 )
1936 coreconfigitem(
1935 coreconfigitem(
1937 b'server',
1936 b'server',
1938 b'bundle1.pull',
1937 b'bundle1.pull',
1939 default=None,
1938 default=None,
1940 )
1939 )
1941 coreconfigitem(
1940 coreconfigitem(
1942 b'server',
1941 b'server',
1943 b'bundle1gd.pull',
1942 b'bundle1gd.pull',
1944 default=None,
1943 default=None,
1945 )
1944 )
1946 coreconfigitem(
1945 coreconfigitem(
1947 b'server',
1946 b'server',
1948 b'bundle1.push',
1947 b'bundle1.push',
1949 default=None,
1948 default=None,
1950 )
1949 )
1951 coreconfigitem(
1950 coreconfigitem(
1952 b'server',
1951 b'server',
1953 b'bundle1gd.push',
1952 b'bundle1gd.push',
1954 default=None,
1953 default=None,
1955 )
1954 )
1956 coreconfigitem(
1955 coreconfigitem(
1957 b'server',
1956 b'server',
1958 b'bundle2.stream',
1957 b'bundle2.stream',
1959 default=True,
1958 default=True,
1960 alias=[(b'experimental', b'bundle2.stream')],
1959 alias=[(b'experimental', b'bundle2.stream')],
1961 )
1960 )
1962 coreconfigitem(
1961 coreconfigitem(
1963 b'server',
1962 b'server',
1964 b'compressionengines',
1963 b'compressionengines',
1965 default=list,
1964 default=list,
1966 )
1965 )
1967 coreconfigitem(
1966 coreconfigitem(
1968 b'server',
1967 b'server',
1969 b'concurrent-push-mode',
1968 b'concurrent-push-mode',
1970 default=b'check-related',
1969 default=b'check-related',
1971 )
1970 )
1972 coreconfigitem(
1971 coreconfigitem(
1973 b'server',
1972 b'server',
1974 b'disablefullbundle',
1973 b'disablefullbundle',
1975 default=False,
1974 default=False,
1976 )
1975 )
1977 coreconfigitem(
1976 coreconfigitem(
1978 b'server',
1977 b'server',
1979 b'maxhttpheaderlen',
1978 b'maxhttpheaderlen',
1980 default=1024,
1979 default=1024,
1981 )
1980 )
1982 coreconfigitem(
1981 coreconfigitem(
1983 b'server',
1982 b'server',
1984 b'pullbundle',
1983 b'pullbundle',
1985 default=False,
1984 default=False,
1986 )
1985 )
1987 coreconfigitem(
1986 coreconfigitem(
1988 b'server',
1987 b'server',
1989 b'preferuncompressed',
1988 b'preferuncompressed',
1990 default=False,
1989 default=False,
1991 )
1990 )
1992 coreconfigitem(
1991 coreconfigitem(
1993 b'server',
1992 b'server',
1994 b'streamunbundle',
1993 b'streamunbundle',
1995 default=False,
1994 default=False,
1996 )
1995 )
1997 coreconfigitem(
1996 coreconfigitem(
1998 b'server',
1997 b'server',
1999 b'uncompressed',
1998 b'uncompressed',
2000 default=True,
1999 default=True,
2001 )
2000 )
2002 coreconfigitem(
2001 coreconfigitem(
2003 b'server',
2002 b'server',
2004 b'uncompressedallowsecret',
2003 b'uncompressedallowsecret',
2005 default=False,
2004 default=False,
2006 )
2005 )
2007 coreconfigitem(
2006 coreconfigitem(
2008 b'server',
2007 b'server',
2009 b'view',
2008 b'view',
2010 default=b'served',
2009 default=b'served',
2011 )
2010 )
2012 coreconfigitem(
2011 coreconfigitem(
2013 b'server',
2012 b'server',
2014 b'validate',
2013 b'validate',
2015 default=False,
2014 default=False,
2016 )
2015 )
2017 coreconfigitem(
2016 coreconfigitem(
2018 b'server',
2017 b'server',
2019 b'zliblevel',
2018 b'zliblevel',
2020 default=-1,
2019 default=-1,
2021 )
2020 )
2022 coreconfigitem(
2021 coreconfigitem(
2023 b'server',
2022 b'server',
2024 b'zstdlevel',
2023 b'zstdlevel',
2025 default=3,
2024 default=3,
2026 )
2025 )
2027 coreconfigitem(
2026 coreconfigitem(
2028 b'share',
2027 b'share',
2029 b'pool',
2028 b'pool',
2030 default=None,
2029 default=None,
2031 )
2030 )
2032 coreconfigitem(
2031 coreconfigitem(
2033 b'share',
2032 b'share',
2034 b'poolnaming',
2033 b'poolnaming',
2035 default=b'identity',
2034 default=b'identity',
2036 )
2035 )
2037 coreconfigitem(
2036 coreconfigitem(
2038 b'share',
2037 b'share',
2039 b'safe-mismatch.source-not-safe',
2038 b'safe-mismatch.source-not-safe',
2040 default=b'abort',
2039 default=b'abort',
2041 )
2040 )
2042 coreconfigitem(
2041 coreconfigitem(
2043 b'share',
2042 b'share',
2044 b'safe-mismatch.source-safe',
2043 b'safe-mismatch.source-safe',
2045 default=b'abort',
2044 default=b'abort',
2046 )
2045 )
2047 coreconfigitem(
2046 coreconfigitem(
2048 b'share',
2047 b'share',
2049 b'safe-mismatch.source-not-safe.warn',
2048 b'safe-mismatch.source-not-safe.warn',
2050 default=True,
2049 default=True,
2051 )
2050 )
2052 coreconfigitem(
2051 coreconfigitem(
2053 b'share',
2052 b'share',
2054 b'safe-mismatch.source-safe.warn',
2053 b'safe-mismatch.source-safe.warn',
2055 default=True,
2054 default=True,
2056 )
2055 )
2057 coreconfigitem(
2056 coreconfigitem(
2058 b'shelve',
2057 b'shelve',
2059 b'maxbackups',
2058 b'maxbackups',
2060 default=10,
2059 default=10,
2061 )
2060 )
2062 coreconfigitem(
2061 coreconfigitem(
2063 b'smtp',
2062 b'smtp',
2064 b'host',
2063 b'host',
2065 default=None,
2064 default=None,
2066 )
2065 )
2067 coreconfigitem(
2066 coreconfigitem(
2068 b'smtp',
2067 b'smtp',
2069 b'local_hostname',
2068 b'local_hostname',
2070 default=None,
2069 default=None,
2071 )
2070 )
2072 coreconfigitem(
2071 coreconfigitem(
2073 b'smtp',
2072 b'smtp',
2074 b'password',
2073 b'password',
2075 default=None,
2074 default=None,
2076 )
2075 )
2077 coreconfigitem(
2076 coreconfigitem(
2078 b'smtp',
2077 b'smtp',
2079 b'port',
2078 b'port',
2080 default=dynamicdefault,
2079 default=dynamicdefault,
2081 )
2080 )
2082 coreconfigitem(
2081 coreconfigitem(
2083 b'smtp',
2082 b'smtp',
2084 b'tls',
2083 b'tls',
2085 default=b'none',
2084 default=b'none',
2086 )
2085 )
2087 coreconfigitem(
2086 coreconfigitem(
2088 b'smtp',
2087 b'smtp',
2089 b'username',
2088 b'username',
2090 default=None,
2089 default=None,
2091 )
2090 )
2092 coreconfigitem(
2091 coreconfigitem(
2093 b'sparse',
2092 b'sparse',
2094 b'missingwarning',
2093 b'missingwarning',
2095 default=True,
2094 default=True,
2096 experimental=True,
2095 experimental=True,
2097 )
2096 )
2098 coreconfigitem(
2097 coreconfigitem(
2099 b'subrepos',
2098 b'subrepos',
2100 b'allowed',
2099 b'allowed',
2101 default=dynamicdefault, # to make backporting simpler
2100 default=dynamicdefault, # to make backporting simpler
2102 )
2101 )
2103 coreconfigitem(
2102 coreconfigitem(
2104 b'subrepos',
2103 b'subrepos',
2105 b'hg:allowed',
2104 b'hg:allowed',
2106 default=dynamicdefault,
2105 default=dynamicdefault,
2107 )
2106 )
2108 coreconfigitem(
2107 coreconfigitem(
2109 b'subrepos',
2108 b'subrepos',
2110 b'git:allowed',
2109 b'git:allowed',
2111 default=dynamicdefault,
2110 default=dynamicdefault,
2112 )
2111 )
2113 coreconfigitem(
2112 coreconfigitem(
2114 b'subrepos',
2113 b'subrepos',
2115 b'svn:allowed',
2114 b'svn:allowed',
2116 default=dynamicdefault,
2115 default=dynamicdefault,
2117 )
2116 )
2118 coreconfigitem(
2117 coreconfigitem(
2119 b'templates',
2118 b'templates',
2120 b'.*',
2119 b'.*',
2121 default=None,
2120 default=None,
2122 generic=True,
2121 generic=True,
2123 )
2122 )
2124 coreconfigitem(
2123 coreconfigitem(
2125 b'templateconfig',
2124 b'templateconfig',
2126 b'.*',
2125 b'.*',
2127 default=dynamicdefault,
2126 default=dynamicdefault,
2128 generic=True,
2127 generic=True,
2129 )
2128 )
2130 coreconfigitem(
2129 coreconfigitem(
2131 b'trusted',
2130 b'trusted',
2132 b'groups',
2131 b'groups',
2133 default=list,
2132 default=list,
2134 )
2133 )
2135 coreconfigitem(
2134 coreconfigitem(
2136 b'trusted',
2135 b'trusted',
2137 b'users',
2136 b'users',
2138 default=list,
2137 default=list,
2139 )
2138 )
2140 coreconfigitem(
2139 coreconfigitem(
2141 b'ui',
2140 b'ui',
2142 b'_usedassubrepo',
2141 b'_usedassubrepo',
2143 default=False,
2142 default=False,
2144 )
2143 )
2145 coreconfigitem(
2144 coreconfigitem(
2146 b'ui',
2145 b'ui',
2147 b'allowemptycommit',
2146 b'allowemptycommit',
2148 default=False,
2147 default=False,
2149 )
2148 )
2150 coreconfigitem(
2149 coreconfigitem(
2151 b'ui',
2150 b'ui',
2152 b'archivemeta',
2151 b'archivemeta',
2153 default=True,
2152 default=True,
2154 )
2153 )
2155 coreconfigitem(
2154 coreconfigitem(
2156 b'ui',
2155 b'ui',
2157 b'askusername',
2156 b'askusername',
2158 default=False,
2157 default=False,
2159 )
2158 )
2160 coreconfigitem(
2159 coreconfigitem(
2161 b'ui',
2160 b'ui',
2162 b'available-memory',
2161 b'available-memory',
2163 default=None,
2162 default=None,
2164 )
2163 )
2165
2164
2166 coreconfigitem(
2165 coreconfigitem(
2167 b'ui',
2166 b'ui',
2168 b'clonebundlefallback',
2167 b'clonebundlefallback',
2169 default=False,
2168 default=False,
2170 )
2169 )
2171 coreconfigitem(
2170 coreconfigitem(
2172 b'ui',
2171 b'ui',
2173 b'clonebundleprefers',
2172 b'clonebundleprefers',
2174 default=list,
2173 default=list,
2175 )
2174 )
2176 coreconfigitem(
2175 coreconfigitem(
2177 b'ui',
2176 b'ui',
2178 b'clonebundles',
2177 b'clonebundles',
2179 default=True,
2178 default=True,
2180 )
2179 )
2181 coreconfigitem(
2180 coreconfigitem(
2182 b'ui',
2181 b'ui',
2183 b'color',
2182 b'color',
2184 default=b'auto',
2183 default=b'auto',
2185 )
2184 )
2186 coreconfigitem(
2185 coreconfigitem(
2187 b'ui',
2186 b'ui',
2188 b'commitsubrepos',
2187 b'commitsubrepos',
2189 default=False,
2188 default=False,
2190 )
2189 )
2191 coreconfigitem(
2190 coreconfigitem(
2192 b'ui',
2191 b'ui',
2193 b'debug',
2192 b'debug',
2194 default=False,
2193 default=False,
2195 )
2194 )
2196 coreconfigitem(
2195 coreconfigitem(
2197 b'ui',
2196 b'ui',
2198 b'debugger',
2197 b'debugger',
2199 default=None,
2198 default=None,
2200 )
2199 )
2201 coreconfigitem(
2200 coreconfigitem(
2202 b'ui',
2201 b'ui',
2203 b'editor',
2202 b'editor',
2204 default=dynamicdefault,
2203 default=dynamicdefault,
2205 )
2204 )
2206 coreconfigitem(
2205 coreconfigitem(
2207 b'ui',
2206 b'ui',
2208 b'detailed-exit-code',
2207 b'detailed-exit-code',
2209 default=False,
2208 default=False,
2210 experimental=True,
2209 experimental=True,
2211 )
2210 )
2212 coreconfigitem(
2211 coreconfigitem(
2213 b'ui',
2212 b'ui',
2214 b'fallbackencoding',
2213 b'fallbackencoding',
2215 default=None,
2214 default=None,
2216 )
2215 )
2217 coreconfigitem(
2216 coreconfigitem(
2218 b'ui',
2217 b'ui',
2219 b'forcecwd',
2218 b'forcecwd',
2220 default=None,
2219 default=None,
2221 )
2220 )
2222 coreconfigitem(
2221 coreconfigitem(
2223 b'ui',
2222 b'ui',
2224 b'forcemerge',
2223 b'forcemerge',
2225 default=None,
2224 default=None,
2226 )
2225 )
2227 coreconfigitem(
2226 coreconfigitem(
2228 b'ui',
2227 b'ui',
2229 b'formatdebug',
2228 b'formatdebug',
2230 default=False,
2229 default=False,
2231 )
2230 )
2232 coreconfigitem(
2231 coreconfigitem(
2233 b'ui',
2232 b'ui',
2234 b'formatjson',
2233 b'formatjson',
2235 default=False,
2234 default=False,
2236 )
2235 )
2237 coreconfigitem(
2236 coreconfigitem(
2238 b'ui',
2237 b'ui',
2239 b'formatted',
2238 b'formatted',
2240 default=None,
2239 default=None,
2241 )
2240 )
2242 coreconfigitem(
2241 coreconfigitem(
2243 b'ui',
2242 b'ui',
2244 b'interactive',
2243 b'interactive',
2245 default=None,
2244 default=None,
2246 )
2245 )
2247 coreconfigitem(
2246 coreconfigitem(
2248 b'ui',
2247 b'ui',
2249 b'interface',
2248 b'interface',
2250 default=None,
2249 default=None,
2251 )
2250 )
2252 coreconfigitem(
2251 coreconfigitem(
2253 b'ui',
2252 b'ui',
2254 b'interface.chunkselector',
2253 b'interface.chunkselector',
2255 default=None,
2254 default=None,
2256 )
2255 )
2257 coreconfigitem(
2256 coreconfigitem(
2258 b'ui',
2257 b'ui',
2259 b'large-file-limit',
2258 b'large-file-limit',
2260 default=10000000,
2259 default=10000000,
2261 )
2260 )
2262 coreconfigitem(
2261 coreconfigitem(
2263 b'ui',
2262 b'ui',
2264 b'logblockedtimes',
2263 b'logblockedtimes',
2265 default=False,
2264 default=False,
2266 )
2265 )
2267 coreconfigitem(
2266 coreconfigitem(
2268 b'ui',
2267 b'ui',
2269 b'merge',
2268 b'merge',
2270 default=None,
2269 default=None,
2271 )
2270 )
2272 coreconfigitem(
2271 coreconfigitem(
2273 b'ui',
2272 b'ui',
2274 b'mergemarkers',
2273 b'mergemarkers',
2275 default=b'basic',
2274 default=b'basic',
2276 )
2275 )
2277 coreconfigitem(
2276 coreconfigitem(
2278 b'ui',
2277 b'ui',
2279 b'message-output',
2278 b'message-output',
2280 default=b'stdio',
2279 default=b'stdio',
2281 )
2280 )
2282 coreconfigitem(
2281 coreconfigitem(
2283 b'ui',
2282 b'ui',
2284 b'nontty',
2283 b'nontty',
2285 default=False,
2284 default=False,
2286 )
2285 )
2287 coreconfigitem(
2286 coreconfigitem(
2288 b'ui',
2287 b'ui',
2289 b'origbackuppath',
2288 b'origbackuppath',
2290 default=None,
2289 default=None,
2291 )
2290 )
2292 coreconfigitem(
2291 coreconfigitem(
2293 b'ui',
2292 b'ui',
2294 b'paginate',
2293 b'paginate',
2295 default=True,
2294 default=True,
2296 )
2295 )
2297 coreconfigitem(
2296 coreconfigitem(
2298 b'ui',
2297 b'ui',
2299 b'patch',
2298 b'patch',
2300 default=None,
2299 default=None,
2301 )
2300 )
2302 coreconfigitem(
2301 coreconfigitem(
2303 b'ui',
2302 b'ui',
2304 b'portablefilenames',
2303 b'portablefilenames',
2305 default=b'warn',
2304 default=b'warn',
2306 )
2305 )
2307 coreconfigitem(
2306 coreconfigitem(
2308 b'ui',
2307 b'ui',
2309 b'promptecho',
2308 b'promptecho',
2310 default=False,
2309 default=False,
2311 )
2310 )
2312 coreconfigitem(
2311 coreconfigitem(
2313 b'ui',
2312 b'ui',
2314 b'quiet',
2313 b'quiet',
2315 default=False,
2314 default=False,
2316 )
2315 )
2317 coreconfigitem(
2316 coreconfigitem(
2318 b'ui',
2317 b'ui',
2319 b'quietbookmarkmove',
2318 b'quietbookmarkmove',
2320 default=False,
2319 default=False,
2321 )
2320 )
2322 coreconfigitem(
2321 coreconfigitem(
2323 b'ui',
2322 b'ui',
2324 b'relative-paths',
2323 b'relative-paths',
2325 default=b'legacy',
2324 default=b'legacy',
2326 )
2325 )
2327 coreconfigitem(
2326 coreconfigitem(
2328 b'ui',
2327 b'ui',
2329 b'remotecmd',
2328 b'remotecmd',
2330 default=b'hg',
2329 default=b'hg',
2331 )
2330 )
2332 coreconfigitem(
2331 coreconfigitem(
2333 b'ui',
2332 b'ui',
2334 b'report_untrusted',
2333 b'report_untrusted',
2335 default=True,
2334 default=True,
2336 )
2335 )
2337 coreconfigitem(
2336 coreconfigitem(
2338 b'ui',
2337 b'ui',
2339 b'rollback',
2338 b'rollback',
2340 default=True,
2339 default=True,
2341 )
2340 )
2342 coreconfigitem(
2341 coreconfigitem(
2343 b'ui',
2342 b'ui',
2344 b'signal-safe-lock',
2343 b'signal-safe-lock',
2345 default=True,
2344 default=True,
2346 )
2345 )
2347 coreconfigitem(
2346 coreconfigitem(
2348 b'ui',
2347 b'ui',
2349 b'slash',
2348 b'slash',
2350 default=False,
2349 default=False,
2351 )
2350 )
2352 coreconfigitem(
2351 coreconfigitem(
2353 b'ui',
2352 b'ui',
2354 b'ssh',
2353 b'ssh',
2355 default=b'ssh',
2354 default=b'ssh',
2356 )
2355 )
2357 coreconfigitem(
2356 coreconfigitem(
2358 b'ui',
2357 b'ui',
2359 b'ssherrorhint',
2358 b'ssherrorhint',
2360 default=None,
2359 default=None,
2361 )
2360 )
2362 coreconfigitem(
2361 coreconfigitem(
2363 b'ui',
2362 b'ui',
2364 b'statuscopies',
2363 b'statuscopies',
2365 default=False,
2364 default=False,
2366 )
2365 )
2367 coreconfigitem(
2366 coreconfigitem(
2368 b'ui',
2367 b'ui',
2369 b'strict',
2368 b'strict',
2370 default=False,
2369 default=False,
2371 )
2370 )
2372 coreconfigitem(
2371 coreconfigitem(
2373 b'ui',
2372 b'ui',
2374 b'style',
2373 b'style',
2375 default=b'',
2374 default=b'',
2376 )
2375 )
2377 coreconfigitem(
2376 coreconfigitem(
2378 b'ui',
2377 b'ui',
2379 b'supportcontact',
2378 b'supportcontact',
2380 default=None,
2379 default=None,
2381 )
2380 )
2382 coreconfigitem(
2381 coreconfigitem(
2383 b'ui',
2382 b'ui',
2384 b'textwidth',
2383 b'textwidth',
2385 default=78,
2384 default=78,
2386 )
2385 )
2387 coreconfigitem(
2386 coreconfigitem(
2388 b'ui',
2387 b'ui',
2389 b'timeout',
2388 b'timeout',
2390 default=b'600',
2389 default=b'600',
2391 )
2390 )
2392 coreconfigitem(
2391 coreconfigitem(
2393 b'ui',
2392 b'ui',
2394 b'timeout.warn',
2393 b'timeout.warn',
2395 default=0,
2394 default=0,
2396 )
2395 )
2397 coreconfigitem(
2396 coreconfigitem(
2398 b'ui',
2397 b'ui',
2399 b'timestamp-output',
2398 b'timestamp-output',
2400 default=False,
2399 default=False,
2401 )
2400 )
2402 coreconfigitem(
2401 coreconfigitem(
2403 b'ui',
2402 b'ui',
2404 b'traceback',
2403 b'traceback',
2405 default=False,
2404 default=False,
2406 )
2405 )
2407 coreconfigitem(
2406 coreconfigitem(
2408 b'ui',
2407 b'ui',
2409 b'tweakdefaults',
2408 b'tweakdefaults',
2410 default=False,
2409 default=False,
2411 )
2410 )
2412 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2411 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2413 coreconfigitem(
2412 coreconfigitem(
2414 b'ui',
2413 b'ui',
2415 b'verbose',
2414 b'verbose',
2416 default=False,
2415 default=False,
2417 )
2416 )
2418 coreconfigitem(
2417 coreconfigitem(
2419 b'verify',
2418 b'verify',
2420 b'skipflags',
2419 b'skipflags',
2421 default=None,
2420 default=None,
2422 )
2421 )
2423 coreconfigitem(
2422 coreconfigitem(
2424 b'web',
2423 b'web',
2425 b'allowbz2',
2424 b'allowbz2',
2426 default=False,
2425 default=False,
2427 )
2426 )
2428 coreconfigitem(
2427 coreconfigitem(
2429 b'web',
2428 b'web',
2430 b'allowgz',
2429 b'allowgz',
2431 default=False,
2430 default=False,
2432 )
2431 )
2433 coreconfigitem(
2432 coreconfigitem(
2434 b'web',
2433 b'web',
2435 b'allow-pull',
2434 b'allow-pull',
2436 alias=[(b'web', b'allowpull')],
2435 alias=[(b'web', b'allowpull')],
2437 default=True,
2436 default=True,
2438 )
2437 )
2439 coreconfigitem(
2438 coreconfigitem(
2440 b'web',
2439 b'web',
2441 b'allow-push',
2440 b'allow-push',
2442 alias=[(b'web', b'allow_push')],
2441 alias=[(b'web', b'allow_push')],
2443 default=list,
2442 default=list,
2444 )
2443 )
2445 coreconfigitem(
2444 coreconfigitem(
2446 b'web',
2445 b'web',
2447 b'allowzip',
2446 b'allowzip',
2448 default=False,
2447 default=False,
2449 )
2448 )
2450 coreconfigitem(
2449 coreconfigitem(
2451 b'web',
2450 b'web',
2452 b'archivesubrepos',
2451 b'archivesubrepos',
2453 default=False,
2452 default=False,
2454 )
2453 )
2455 coreconfigitem(
2454 coreconfigitem(
2456 b'web',
2455 b'web',
2457 b'cache',
2456 b'cache',
2458 default=True,
2457 default=True,
2459 )
2458 )
2460 coreconfigitem(
2459 coreconfigitem(
2461 b'web',
2460 b'web',
2462 b'comparisoncontext',
2461 b'comparisoncontext',
2463 default=5,
2462 default=5,
2464 )
2463 )
2465 coreconfigitem(
2464 coreconfigitem(
2466 b'web',
2465 b'web',
2467 b'contact',
2466 b'contact',
2468 default=None,
2467 default=None,
2469 )
2468 )
2470 coreconfigitem(
2469 coreconfigitem(
2471 b'web',
2470 b'web',
2472 b'deny_push',
2471 b'deny_push',
2473 default=list,
2472 default=list,
2474 )
2473 )
2475 coreconfigitem(
2474 coreconfigitem(
2476 b'web',
2475 b'web',
2477 b'guessmime',
2476 b'guessmime',
2478 default=False,
2477 default=False,
2479 )
2478 )
2480 coreconfigitem(
2479 coreconfigitem(
2481 b'web',
2480 b'web',
2482 b'hidden',
2481 b'hidden',
2483 default=False,
2482 default=False,
2484 )
2483 )
2485 coreconfigitem(
2484 coreconfigitem(
2486 b'web',
2485 b'web',
2487 b'labels',
2486 b'labels',
2488 default=list,
2487 default=list,
2489 )
2488 )
2490 coreconfigitem(
2489 coreconfigitem(
2491 b'web',
2490 b'web',
2492 b'logoimg',
2491 b'logoimg',
2493 default=b'hglogo.png',
2492 default=b'hglogo.png',
2494 )
2493 )
2495 coreconfigitem(
2494 coreconfigitem(
2496 b'web',
2495 b'web',
2497 b'logourl',
2496 b'logourl',
2498 default=b'https://mercurial-scm.org/',
2497 default=b'https://mercurial-scm.org/',
2499 )
2498 )
2500 coreconfigitem(
2499 coreconfigitem(
2501 b'web',
2500 b'web',
2502 b'accesslog',
2501 b'accesslog',
2503 default=b'-',
2502 default=b'-',
2504 )
2503 )
2505 coreconfigitem(
2504 coreconfigitem(
2506 b'web',
2505 b'web',
2507 b'address',
2506 b'address',
2508 default=b'',
2507 default=b'',
2509 )
2508 )
2510 coreconfigitem(
2509 coreconfigitem(
2511 b'web',
2510 b'web',
2512 b'allow-archive',
2511 b'allow-archive',
2513 alias=[(b'web', b'allow_archive')],
2512 alias=[(b'web', b'allow_archive')],
2514 default=list,
2513 default=list,
2515 )
2514 )
2516 coreconfigitem(
2515 coreconfigitem(
2517 b'web',
2516 b'web',
2518 b'allow_read',
2517 b'allow_read',
2519 default=list,
2518 default=list,
2520 )
2519 )
2521 coreconfigitem(
2520 coreconfigitem(
2522 b'web',
2521 b'web',
2523 b'baseurl',
2522 b'baseurl',
2524 default=None,
2523 default=None,
2525 )
2524 )
2526 coreconfigitem(
2525 coreconfigitem(
2527 b'web',
2526 b'web',
2528 b'cacerts',
2527 b'cacerts',
2529 default=None,
2528 default=None,
2530 )
2529 )
2531 coreconfigitem(
2530 coreconfigitem(
2532 b'web',
2531 b'web',
2533 b'certificate',
2532 b'certificate',
2534 default=None,
2533 default=None,
2535 )
2534 )
2536 coreconfigitem(
2535 coreconfigitem(
2537 b'web',
2536 b'web',
2538 b'collapse',
2537 b'collapse',
2539 default=False,
2538 default=False,
2540 )
2539 )
2541 coreconfigitem(
2540 coreconfigitem(
2542 b'web',
2541 b'web',
2543 b'csp',
2542 b'csp',
2544 default=None,
2543 default=None,
2545 )
2544 )
2546 coreconfigitem(
2545 coreconfigitem(
2547 b'web',
2546 b'web',
2548 b'deny_read',
2547 b'deny_read',
2549 default=list,
2548 default=list,
2550 )
2549 )
2551 coreconfigitem(
2550 coreconfigitem(
2552 b'web',
2551 b'web',
2553 b'descend',
2552 b'descend',
2554 default=True,
2553 default=True,
2555 )
2554 )
2556 coreconfigitem(
2555 coreconfigitem(
2557 b'web',
2556 b'web',
2558 b'description',
2557 b'description',
2559 default=b"",
2558 default=b"",
2560 )
2559 )
2561 coreconfigitem(
2560 coreconfigitem(
2562 b'web',
2561 b'web',
2563 b'encoding',
2562 b'encoding',
2564 default=lambda: encoding.encoding,
2563 default=lambda: encoding.encoding,
2565 )
2564 )
2566 coreconfigitem(
2565 coreconfigitem(
2567 b'web',
2566 b'web',
2568 b'errorlog',
2567 b'errorlog',
2569 default=b'-',
2568 default=b'-',
2570 )
2569 )
2571 coreconfigitem(
2570 coreconfigitem(
2572 b'web',
2571 b'web',
2573 b'ipv6',
2572 b'ipv6',
2574 default=False,
2573 default=False,
2575 )
2574 )
2576 coreconfigitem(
2575 coreconfigitem(
2577 b'web',
2576 b'web',
2578 b'maxchanges',
2577 b'maxchanges',
2579 default=10,
2578 default=10,
2580 )
2579 )
2581 coreconfigitem(
2580 coreconfigitem(
2582 b'web',
2581 b'web',
2583 b'maxfiles',
2582 b'maxfiles',
2584 default=10,
2583 default=10,
2585 )
2584 )
2586 coreconfigitem(
2585 coreconfigitem(
2587 b'web',
2586 b'web',
2588 b'maxshortchanges',
2587 b'maxshortchanges',
2589 default=60,
2588 default=60,
2590 )
2589 )
2591 coreconfigitem(
2590 coreconfigitem(
2592 b'web',
2591 b'web',
2593 b'motd',
2592 b'motd',
2594 default=b'',
2593 default=b'',
2595 )
2594 )
2596 coreconfigitem(
2595 coreconfigitem(
2597 b'web',
2596 b'web',
2598 b'name',
2597 b'name',
2599 default=dynamicdefault,
2598 default=dynamicdefault,
2600 )
2599 )
2601 coreconfigitem(
2600 coreconfigitem(
2602 b'web',
2601 b'web',
2603 b'port',
2602 b'port',
2604 default=8000,
2603 default=8000,
2605 )
2604 )
2606 coreconfigitem(
2605 coreconfigitem(
2607 b'web',
2606 b'web',
2608 b'prefix',
2607 b'prefix',
2609 default=b'',
2608 default=b'',
2610 )
2609 )
2611 coreconfigitem(
2610 coreconfigitem(
2612 b'web',
2611 b'web',
2613 b'push_ssl',
2612 b'push_ssl',
2614 default=True,
2613 default=True,
2615 )
2614 )
2616 coreconfigitem(
2615 coreconfigitem(
2617 b'web',
2616 b'web',
2618 b'refreshinterval',
2617 b'refreshinterval',
2619 default=20,
2618 default=20,
2620 )
2619 )
2621 coreconfigitem(
2620 coreconfigitem(
2622 b'web',
2621 b'web',
2623 b'server-header',
2622 b'server-header',
2624 default=None,
2623 default=None,
2625 )
2624 )
2626 coreconfigitem(
2625 coreconfigitem(
2627 b'web',
2626 b'web',
2628 b'static',
2627 b'static',
2629 default=None,
2628 default=None,
2630 )
2629 )
2631 coreconfigitem(
2630 coreconfigitem(
2632 b'web',
2631 b'web',
2633 b'staticurl',
2632 b'staticurl',
2634 default=None,
2633 default=None,
2635 )
2634 )
2636 coreconfigitem(
2635 coreconfigitem(
2637 b'web',
2636 b'web',
2638 b'stripes',
2637 b'stripes',
2639 default=1,
2638 default=1,
2640 )
2639 )
2641 coreconfigitem(
2640 coreconfigitem(
2642 b'web',
2641 b'web',
2643 b'style',
2642 b'style',
2644 default=b'paper',
2643 default=b'paper',
2645 )
2644 )
2646 coreconfigitem(
2645 coreconfigitem(
2647 b'web',
2646 b'web',
2648 b'templates',
2647 b'templates',
2649 default=None,
2648 default=None,
2650 )
2649 )
2651 coreconfigitem(
2650 coreconfigitem(
2652 b'web',
2651 b'web',
2653 b'view',
2652 b'view',
2654 default=b'served',
2653 default=b'served',
2655 experimental=True,
2654 experimental=True,
2656 )
2655 )
2657 coreconfigitem(
2656 coreconfigitem(
2658 b'worker',
2657 b'worker',
2659 b'backgroundclose',
2658 b'backgroundclose',
2660 default=dynamicdefault,
2659 default=dynamicdefault,
2661 )
2660 )
2662 # Windows defaults to a limit of 512 open files. A buffer of 128
2661 # Windows defaults to a limit of 512 open files. A buffer of 128
2663 # should give us enough headway.
2662 # should give us enough headway.
2664 coreconfigitem(
2663 coreconfigitem(
2665 b'worker',
2664 b'worker',
2666 b'backgroundclosemaxqueue',
2665 b'backgroundclosemaxqueue',
2667 default=384,
2666 default=384,
2668 )
2667 )
2669 coreconfigitem(
2668 coreconfigitem(
2670 b'worker',
2669 b'worker',
2671 b'backgroundcloseminfilecount',
2670 b'backgroundcloseminfilecount',
2672 default=2048,
2671 default=2048,
2673 )
2672 )
2674 coreconfigitem(
2673 coreconfigitem(
2675 b'worker',
2674 b'worker',
2676 b'backgroundclosethreadcount',
2675 b'backgroundclosethreadcount',
2677 default=4,
2676 default=4,
2678 )
2677 )
2679 coreconfigitem(
2678 coreconfigitem(
2680 b'worker',
2679 b'worker',
2681 b'enabled',
2680 b'enabled',
2682 default=True,
2681 default=True,
2683 )
2682 )
2684 coreconfigitem(
2683 coreconfigitem(
2685 b'worker',
2684 b'worker',
2686 b'numcpus',
2685 b'numcpus',
2687 default=None,
2686 default=None,
2688 )
2687 )
2689
2688
2690 # Rebase related configuration moved to core because other extension are doing
2689 # Rebase related configuration moved to core because other extension are doing
2691 # strange things. For example, shelve import the extensions to reuse some bit
2690 # strange things. For example, shelve import the extensions to reuse some bit
2692 # without formally loading it.
2691 # without formally loading it.
2693 coreconfigitem(
2692 coreconfigitem(
2694 b'commands',
2693 b'commands',
2695 b'rebase.requiredest',
2694 b'rebase.requiredest',
2696 default=False,
2695 default=False,
2697 )
2696 )
2698 coreconfigitem(
2697 coreconfigitem(
2699 b'experimental',
2698 b'experimental',
2700 b'rebaseskipobsolete',
2699 b'rebaseskipobsolete',
2701 default=True,
2700 default=True,
2702 )
2701 )
2703 coreconfigitem(
2702 coreconfigitem(
2704 b'rebase',
2703 b'rebase',
2705 b'singletransaction',
2704 b'singletransaction',
2706 default=False,
2705 default=False,
2707 )
2706 )
2708 coreconfigitem(
2707 coreconfigitem(
2709 b'rebase',
2708 b'rebase',
2710 b'experimental.inmemory',
2709 b'experimental.inmemory',
2711 default=False,
2710 default=False,
2712 )
2711 )
@@ -1,408 +1,432 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 pycompat,
19 pycompat,
20 util,
20 util,
21 )
21 )
22
22
23 from ..revlogutils import nodemap as nodemaputil
23 from ..revlogutils import nodemap as nodemaputil
24 from ..revlogutils import constants as revlog_constants
24 from ..revlogutils import constants as revlog_constants
25
25
26 stringio = pycompat.bytesio
26 stringio = pycompat.bytesio
27
27
28
28
29 _pack = struct.pack
29 _pack = struct.pack
30 _unpack = struct.unpack
30 _unpack = struct.unpack
31 _compress = zlib.compress
31 _compress = zlib.compress
32 _decompress = zlib.decompress
32 _decompress = zlib.decompress
33
33
34 # Some code below makes tuples directly because it's more convenient. However,
34 # Some code below makes tuples directly because it's more convenient. However,
35 # code outside this module should always use dirstatetuple.
35 # code outside this module should always use dirstatetuple.
36 def dirstatetuple(*x):
36 def dirstatetuple(*x):
37 # x is a tuple
37 # x is a tuple
38 return x
38 return x
39
39
40
40
41 def gettype(q):
41 def gettype(q):
42 return int(q & 0xFFFF)
42 return int(q & 0xFFFF)
43
43
44
44
45 def offset_type(offset, type):
45 def offset_type(offset, type):
46 return int(int(offset) << 16 | type)
46 return int(int(offset) << 16 | type)
47
47
48
48
49 class BaseIndexObject(object):
49 class BaseIndexObject(object):
50 # Can I be passed to an algorithme implemented in Rust ?
50 # Can I be passed to an algorithme implemented in Rust ?
51 rust_ext_compat = 0
51 rust_ext_compat = 0
52 # Format of an index entry according to Python's `struct` language
52 # Format of an index entry according to Python's `struct` language
53 index_format = revlog_constants.INDEX_ENTRY_V1
53 index_format = revlog_constants.INDEX_ENTRY_V1
54 # Size of a C unsigned long long int, platform independent
54 # Size of a C unsigned long long int, platform independent
55 big_int_size = struct.calcsize(b'>Q')
55 big_int_size = struct.calcsize(b'>Q')
56 # Size of a C long int, platform independent
56 # Size of a C long int, platform independent
57 int_size = struct.calcsize(b'>i')
57 int_size = struct.calcsize(b'>i')
58 # An empty index entry, used as a default value to be overridden, or nullrev
58 # An empty index entry, used as a default value to be overridden, or nullrev
59 null_item = (
59 null_item = (
60 0,
60 0,
61 0,
61 0,
62 0,
62 0,
63 -1,
63 -1,
64 -1,
64 -1,
65 -1,
65 -1,
66 -1,
66 -1,
67 sha1nodeconstants.nullid,
67 sha1nodeconstants.nullid,
68 0,
68 0,
69 0,
69 0,
70 revlog_constants.COMP_MODE_INLINE,
70 revlog_constants.COMP_MODE_INLINE,
71 revlog_constants.COMP_MODE_INLINE,
71 revlog_constants.COMP_MODE_INLINE,
72 )
72 )
73
73
74 @util.propertycache
74 @util.propertycache
75 def entry_size(self):
75 def entry_size(self):
76 return self.index_format.size
76 return self.index_format.size
77
77
78 @property
78 @property
79 def nodemap(self):
79 def nodemap(self):
80 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
80 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
81 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
81 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
82 return self._nodemap
82 return self._nodemap
83
83
84 @util.propertycache
84 @util.propertycache
85 def _nodemap(self):
85 def _nodemap(self):
86 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
86 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
87 for r in range(0, len(self)):
87 for r in range(0, len(self)):
88 n = self[r][7]
88 n = self[r][7]
89 nodemap[n] = r
89 nodemap[n] = r
90 return nodemap
90 return nodemap
91
91
92 def has_node(self, node):
92 def has_node(self, node):
93 """return True if the node exist in the index"""
93 """return True if the node exist in the index"""
94 return node in self._nodemap
94 return node in self._nodemap
95
95
96 def rev(self, node):
96 def rev(self, node):
97 """return a revision for a node
97 """return a revision for a node
98
98
99 If the node is unknown, raise a RevlogError"""
99 If the node is unknown, raise a RevlogError"""
100 return self._nodemap[node]
100 return self._nodemap[node]
101
101
102 def get_rev(self, node):
102 def get_rev(self, node):
103 """return a revision for a node
103 """return a revision for a node
104
104
105 If the node is unknown, return None"""
105 If the node is unknown, return None"""
106 return self._nodemap.get(node)
106 return self._nodemap.get(node)
107
107
108 def _stripnodes(self, start):
108 def _stripnodes(self, start):
109 if '_nodemap' in vars(self):
109 if '_nodemap' in vars(self):
110 for r in range(start, len(self)):
110 for r in range(start, len(self)):
111 n = self[r][7]
111 n = self[r][7]
112 del self._nodemap[n]
112 del self._nodemap[n]
113
113
114 def clearcaches(self):
114 def clearcaches(self):
115 self.__dict__.pop('_nodemap', None)
115 self.__dict__.pop('_nodemap', None)
116
116
117 def __len__(self):
117 def __len__(self):
118 return self._lgt + len(self._extra)
118 return self._lgt + len(self._extra)
119
119
120 def append(self, tup):
120 def append(self, tup):
121 if '_nodemap' in vars(self):
121 if '_nodemap' in vars(self):
122 self._nodemap[tup[7]] = len(self)
122 self._nodemap[tup[7]] = len(self)
123 data = self._pack_entry(len(self), tup)
123 data = self._pack_entry(len(self), tup)
124 self._extra.append(data)
124 self._extra.append(data)
125
125
126 def _pack_entry(self, rev, entry):
126 def _pack_entry(self, rev, entry):
127 assert entry[8] == 0
127 assert entry[8] == 0
128 assert entry[9] == 0
128 assert entry[9] == 0
129 return self.index_format.pack(*entry[:8])
129 return self.index_format.pack(*entry[:8])
130
130
131 def _check_index(self, i):
131 def _check_index(self, i):
132 if not isinstance(i, int):
132 if not isinstance(i, int):
133 raise TypeError(b"expecting int indexes")
133 raise TypeError(b"expecting int indexes")
134 if i < 0 or i >= len(self):
134 if i < 0 or i >= len(self):
135 raise IndexError
135 raise IndexError
136
136
137 def __getitem__(self, i):
137 def __getitem__(self, i):
138 if i == -1:
138 if i == -1:
139 return self.null_item
139 return self.null_item
140 self._check_index(i)
140 self._check_index(i)
141 if i >= self._lgt:
141 if i >= self._lgt:
142 data = self._extra[i - self._lgt]
142 data = self._extra[i - self._lgt]
143 else:
143 else:
144 index = self._calculate_index(i)
144 index = self._calculate_index(i)
145 data = self._data[index : index + self.entry_size]
145 data = self._data[index : index + self.entry_size]
146 r = self._unpack_entry(i, data)
146 r = self._unpack_entry(i, data)
147 if self._lgt and i == 0:
147 if self._lgt and i == 0:
148 r = (offset_type(0, gettype(r[0])),) + r[1:]
148 r = (offset_type(0, gettype(r[0])),) + r[1:]
149 return r
149 return r
150
150
151 def _unpack_entry(self, rev, data):
151 def _unpack_entry(self, rev, data):
152 r = self.index_format.unpack(data)
152 r = self.index_format.unpack(data)
153 r = r + (
153 r = r + (
154 0,
154 0,
155 0,
155 0,
156 revlog_constants.COMP_MODE_INLINE,
156 revlog_constants.COMP_MODE_INLINE,
157 revlog_constants.COMP_MODE_INLINE,
157 revlog_constants.COMP_MODE_INLINE,
158 )
158 )
159 return r
159 return r
160
160
161 def pack_header(self, header):
161 def pack_header(self, header):
162 """pack header information as binary"""
162 """pack header information as binary"""
163 v_fmt = revlog_constants.INDEX_HEADER
163 v_fmt = revlog_constants.INDEX_HEADER
164 return v_fmt.pack(header)
164 return v_fmt.pack(header)
165
165
166 def entry_binary(self, rev):
166 def entry_binary(self, rev):
167 """return the raw binary string representing a revision"""
167 """return the raw binary string representing a revision"""
168 entry = self[rev]
168 entry = self[rev]
169 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
169 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
170 if rev == 0:
170 if rev == 0:
171 p = p[revlog_constants.INDEX_HEADER.size :]
171 p = p[revlog_constants.INDEX_HEADER.size :]
172 return p
172 return p
173
173
174
174
175 class IndexObject(BaseIndexObject):
175 class IndexObject(BaseIndexObject):
176 def __init__(self, data):
176 def __init__(self, data):
177 assert len(data) % self.entry_size == 0, (
177 assert len(data) % self.entry_size == 0, (
178 len(data),
178 len(data),
179 self.entry_size,
179 self.entry_size,
180 len(data) % self.entry_size,
180 len(data) % self.entry_size,
181 )
181 )
182 self._data = data
182 self._data = data
183 self._lgt = len(data) // self.entry_size
183 self._lgt = len(data) // self.entry_size
184 self._extra = []
184 self._extra = []
185
185
186 def _calculate_index(self, i):
186 def _calculate_index(self, i):
187 return i * self.entry_size
187 return i * self.entry_size
188
188
189 def __delitem__(self, i):
189 def __delitem__(self, i):
190 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
190 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
191 raise ValueError(b"deleting slices only supports a:-1 with step 1")
191 raise ValueError(b"deleting slices only supports a:-1 with step 1")
192 i = i.start
192 i = i.start
193 self._check_index(i)
193 self._check_index(i)
194 self._stripnodes(i)
194 self._stripnodes(i)
195 if i < self._lgt:
195 if i < self._lgt:
196 self._data = self._data[: i * self.entry_size]
196 self._data = self._data[: i * self.entry_size]
197 self._lgt = i
197 self._lgt = i
198 self._extra = []
198 self._extra = []
199 else:
199 else:
200 self._extra = self._extra[: i - self._lgt]
200 self._extra = self._extra[: i - self._lgt]
201
201
202
202
203 class PersistentNodeMapIndexObject(IndexObject):
203 class PersistentNodeMapIndexObject(IndexObject):
204 """a Debug oriented class to test persistent nodemap
204 """a Debug oriented class to test persistent nodemap
205
205
206 We need a simple python object to test API and higher level behavior. See
206 We need a simple python object to test API and higher level behavior. See
207 the Rust implementation for more serious usage. This should be used only
207 the Rust implementation for more serious usage. This should be used only
208 through the dedicated `devel.persistent-nodemap` config.
208 through the dedicated `devel.persistent-nodemap` config.
209 """
209 """
210
210
211 def nodemap_data_all(self):
211 def nodemap_data_all(self):
212 """Return bytes containing a full serialization of a nodemap
212 """Return bytes containing a full serialization of a nodemap
213
213
214 The nodemap should be valid for the full set of revisions in the
214 The nodemap should be valid for the full set of revisions in the
215 index."""
215 index."""
216 return nodemaputil.persistent_data(self)
216 return nodemaputil.persistent_data(self)
217
217
218 def nodemap_data_incremental(self):
218 def nodemap_data_incremental(self):
219 """Return bytes containing a incremental update to persistent nodemap
219 """Return bytes containing a incremental update to persistent nodemap
220
220
221 This containst the data for an append-only update of the data provided
221 This containst the data for an append-only update of the data provided
222 in the last call to `update_nodemap_data`.
222 in the last call to `update_nodemap_data`.
223 """
223 """
224 if self._nm_root is None:
224 if self._nm_root is None:
225 return None
225 return None
226 docket = self._nm_docket
226 docket = self._nm_docket
227 changed, data = nodemaputil.update_persistent_data(
227 changed, data = nodemaputil.update_persistent_data(
228 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
228 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
229 )
229 )
230
230
231 self._nm_root = self._nm_max_idx = self._nm_docket = None
231 self._nm_root = self._nm_max_idx = self._nm_docket = None
232 return docket, changed, data
232 return docket, changed, data
233
233
234 def update_nodemap_data(self, docket, nm_data):
234 def update_nodemap_data(self, docket, nm_data):
235 """provide full block of persisted binary data for a nodemap
235 """provide full block of persisted binary data for a nodemap
236
236
237 The data are expected to come from disk. See `nodemap_data_all` for a
237 The data are expected to come from disk. See `nodemap_data_all` for a
238 produceur of such data."""
238 produceur of such data."""
239 if nm_data is not None:
239 if nm_data is not None:
240 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
240 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
241 if self._nm_root:
241 if self._nm_root:
242 self._nm_docket = docket
242 self._nm_docket = docket
243 else:
243 else:
244 self._nm_root = self._nm_max_idx = self._nm_docket = None
244 self._nm_root = self._nm_max_idx = self._nm_docket = None
245
245
246
246
247 class InlinedIndexObject(BaseIndexObject):
247 class InlinedIndexObject(BaseIndexObject):
248 def __init__(self, data, inline=0):
248 def __init__(self, data, inline=0):
249 self._data = data
249 self._data = data
250 self._lgt = self._inline_scan(None)
250 self._lgt = self._inline_scan(None)
251 self._inline_scan(self._lgt)
251 self._inline_scan(self._lgt)
252 self._extra = []
252 self._extra = []
253
253
254 def _inline_scan(self, lgt):
254 def _inline_scan(self, lgt):
255 off = 0
255 off = 0
256 if lgt is not None:
256 if lgt is not None:
257 self._offsets = [0] * lgt
257 self._offsets = [0] * lgt
258 count = 0
258 count = 0
259 while off <= len(self._data) - self.entry_size:
259 while off <= len(self._data) - self.entry_size:
260 start = off + self.big_int_size
260 start = off + self.big_int_size
261 (s,) = struct.unpack(
261 (s,) = struct.unpack(
262 b'>i',
262 b'>i',
263 self._data[start : start + self.int_size],
263 self._data[start : start + self.int_size],
264 )
264 )
265 if lgt is not None:
265 if lgt is not None:
266 self._offsets[count] = off
266 self._offsets[count] = off
267 count += 1
267 count += 1
268 off += self.entry_size + s
268 off += self.entry_size + s
269 if off != len(self._data):
269 if off != len(self._data):
270 raise ValueError(b"corrupted data")
270 raise ValueError(b"corrupted data")
271 return count
271 return count
272
272
273 def __delitem__(self, i):
273 def __delitem__(self, i):
274 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
274 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
275 raise ValueError(b"deleting slices only supports a:-1 with step 1")
275 raise ValueError(b"deleting slices only supports a:-1 with step 1")
276 i = i.start
276 i = i.start
277 self._check_index(i)
277 self._check_index(i)
278 self._stripnodes(i)
278 self._stripnodes(i)
279 if i < self._lgt:
279 if i < self._lgt:
280 self._offsets = self._offsets[:i]
280 self._offsets = self._offsets[:i]
281 self._lgt = i
281 self._lgt = i
282 self._extra = []
282 self._extra = []
283 else:
283 else:
284 self._extra = self._extra[: i - self._lgt]
284 self._extra = self._extra[: i - self._lgt]
285
285
286 def _calculate_index(self, i):
286 def _calculate_index(self, i):
287 return self._offsets[i]
287 return self._offsets[i]
288
288
289
289
290 def parse_index2(data, inline, revlogv2=False):
290 def parse_index2(data, inline, revlogv2=False):
291 if not inline:
291 if not inline:
292 cls = IndexObject2 if revlogv2 else IndexObject
292 cls = IndexObject2 if revlogv2 else IndexObject
293 return cls(data), None
293 return cls(data), None
294 cls = InlinedIndexObject
294 cls = InlinedIndexObject
295 return cls(data, inline), (0, data)
295 return cls(data, inline), (0, data)
296
296
297
297
298 def parse_index_cl_v2(data):
299 return IndexChangelogV2(data), None
300
301
298 class IndexObject2(IndexObject):
302 class IndexObject2(IndexObject):
299 index_format = revlog_constants.INDEX_ENTRY_V2
303 index_format = revlog_constants.INDEX_ENTRY_V2
300
304
301 def replace_sidedata_info(
305 def replace_sidedata_info(
302 self,
306 self,
303 rev,
307 rev,
304 sidedata_offset,
308 sidedata_offset,
305 sidedata_length,
309 sidedata_length,
306 offset_flags,
310 offset_flags,
307 compression_mode,
311 compression_mode,
308 ):
312 ):
309 """
313 """
310 Replace an existing index entry's sidedata offset and length with new
314 Replace an existing index entry's sidedata offset and length with new
311 ones.
315 ones.
312 This cannot be used outside of the context of sidedata rewriting,
316 This cannot be used outside of the context of sidedata rewriting,
313 inside the transaction that creates the revision `rev`.
317 inside the transaction that creates the revision `rev`.
314 """
318 """
315 if rev < 0:
319 if rev < 0:
316 raise KeyError
320 raise KeyError
317 self._check_index(rev)
321 self._check_index(rev)
318 if rev < self._lgt:
322 if rev < self._lgt:
319 msg = b"cannot rewrite entries outside of this transaction"
323 msg = b"cannot rewrite entries outside of this transaction"
320 raise KeyError(msg)
324 raise KeyError(msg)
321 else:
325 else:
322 entry = list(self[rev])
326 entry = list(self[rev])
323 entry[0] = offset_flags
327 entry[0] = offset_flags
324 entry[8] = sidedata_offset
328 entry[8] = sidedata_offset
325 entry[9] = sidedata_length
329 entry[9] = sidedata_length
326 entry[11] = compression_mode
330 entry[11] = compression_mode
327 entry = tuple(entry)
331 entry = tuple(entry)
328 new = self._pack_entry(rev, entry)
332 new = self._pack_entry(rev, entry)
329 self._extra[rev - self._lgt] = new
333 self._extra[rev - self._lgt] = new
330
334
331 def _unpack_entry(self, rev, data):
335 def _unpack_entry(self, rev, data):
332 data = self.index_format.unpack(data)
336 data = self.index_format.unpack(data)
333 entry = data[:10]
337 entry = data[:10]
334 data_comp = data[10] & 3
338 data_comp = data[10] & 3
335 sidedata_comp = (data[10] & (3 << 2)) >> 2
339 sidedata_comp = (data[10] & (3 << 2)) >> 2
336 return entry + (data_comp, sidedata_comp)
340 return entry + (data_comp, sidedata_comp)
337
341
338 def _pack_entry(self, rev, entry):
342 def _pack_entry(self, rev, entry):
339 data = entry[:10]
343 data = entry[:10]
340 data_comp = entry[10] & 3
344 data_comp = entry[10] & 3
341 sidedata_comp = (entry[11] & 3) << 2
345 sidedata_comp = (entry[11] & 3) << 2
342 data += (data_comp | sidedata_comp,)
346 data += (data_comp | sidedata_comp,)
343
347
344 return self.index_format.pack(*data)
348 return self.index_format.pack(*data)
345
349
346 def entry_binary(self, rev):
350 def entry_binary(self, rev):
347 """return the raw binary string representing a revision"""
351 """return the raw binary string representing a revision"""
348 entry = self[rev]
352 entry = self[rev]
349 return self._pack_entry(rev, entry)
353 return self._pack_entry(rev, entry)
350
354
351 def pack_header(self, header):
355 def pack_header(self, header):
352 """pack header information as binary"""
356 """pack header information as binary"""
353 msg = 'version header should go in the docket, not the index: %d'
357 msg = 'version header should go in the docket, not the index: %d'
354 msg %= header
358 msg %= header
355 raise error.ProgrammingError(msg)
359 raise error.ProgrammingError(msg)
356
360
357
361
362 class IndexChangelogV2(IndexObject2):
363 index_format = revlog_constants.INDEX_ENTRY_CL_V2
364
365 def _unpack_entry(self, rev, data, r=True):
366 items = self.index_format.unpack(data)
367 entry = items[:3] + (rev, rev) + items[3:8]
368 data_comp = items[8] & 3
369 sidedata_comp = (items[8] >> 2) & 3
370 return entry + (data_comp, sidedata_comp)
371
372 def _pack_entry(self, rev, entry):
373 assert entry[3] == rev, entry[3]
374 assert entry[4] == rev, entry[4]
375 data = entry[:3] + entry[5:10]
376 data_comp = entry[10] & 3
377 sidedata_comp = (entry[11] & 3) << 2
378 data += (data_comp | sidedata_comp,)
379 return self.index_format.pack(*data)
380
381
358 def parse_index_devel_nodemap(data, inline):
382 def parse_index_devel_nodemap(data, inline):
359 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
383 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
360 return PersistentNodeMapIndexObject(data), None
384 return PersistentNodeMapIndexObject(data), None
361
385
362
386
363 def parse_dirstate(dmap, copymap, st):
387 def parse_dirstate(dmap, copymap, st):
364 parents = [st[:20], st[20:40]]
388 parents = [st[:20], st[20:40]]
365 # dereference fields so they will be local in loop
389 # dereference fields so they will be local in loop
366 format = b">cllll"
390 format = b">cllll"
367 e_size = struct.calcsize(format)
391 e_size = struct.calcsize(format)
368 pos1 = 40
392 pos1 = 40
369 l = len(st)
393 l = len(st)
370
394
371 # the inner loop
395 # the inner loop
372 while pos1 < l:
396 while pos1 < l:
373 pos2 = pos1 + e_size
397 pos2 = pos1 + e_size
374 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
398 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
375 pos1 = pos2 + e[4]
399 pos1 = pos2 + e[4]
376 f = st[pos2:pos1]
400 f = st[pos2:pos1]
377 if b'\0' in f:
401 if b'\0' in f:
378 f, c = f.split(b'\0')
402 f, c = f.split(b'\0')
379 copymap[f] = c
403 copymap[f] = c
380 dmap[f] = e[:4]
404 dmap[f] = e[:4]
381 return parents
405 return parents
382
406
383
407
384 def pack_dirstate(dmap, copymap, pl, now):
408 def pack_dirstate(dmap, copymap, pl, now):
385 now = int(now)
409 now = int(now)
386 cs = stringio()
410 cs = stringio()
387 write = cs.write
411 write = cs.write
388 write(b"".join(pl))
412 write(b"".join(pl))
389 for f, e in pycompat.iteritems(dmap):
413 for f, e in pycompat.iteritems(dmap):
390 if e[0] == b'n' and e[3] == now:
414 if e[0] == b'n' and e[3] == now:
391 # The file was last modified "simultaneously" with the current
415 # The file was last modified "simultaneously" with the current
392 # write to dirstate (i.e. within the same second for file-
416 # write to dirstate (i.e. within the same second for file-
393 # systems with a granularity of 1 sec). This commonly happens
417 # systems with a granularity of 1 sec). This commonly happens
394 # for at least a couple of files on 'update'.
418 # for at least a couple of files on 'update'.
395 # The user could change the file without changing its size
419 # The user could change the file without changing its size
396 # within the same second. Invalidate the file's mtime in
420 # within the same second. Invalidate the file's mtime in
397 # dirstate, forcing future 'status' calls to compare the
421 # dirstate, forcing future 'status' calls to compare the
398 # contents of the file if the size is the same. This prevents
422 # contents of the file if the size is the same. This prevents
399 # mistakenly treating such files as clean.
423 # mistakenly treating such files as clean.
400 e = dirstatetuple(e[0], e[1], e[2], -1)
424 e = dirstatetuple(e[0], e[1], e[2], -1)
401 dmap[f] = e
425 dmap[f] = e
402
426
403 if f in copymap:
427 if f in copymap:
404 f = b"%s\0%s" % (f, copymap[f])
428 f = b"%s\0%s" % (f, copymap[f])
405 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
429 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
406 write(e)
430 write(e)
407 write(f)
431 write(f)
408 return cs.getvalue()
432 return cs.getvalue()
@@ -1,3445 +1,3454 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 CHANGELOGV2,
38 CHANGELOGV2,
39 COMP_MODE_DEFAULT,
39 COMP_MODE_DEFAULT,
40 COMP_MODE_INLINE,
40 COMP_MODE_INLINE,
41 COMP_MODE_PLAIN,
41 COMP_MODE_PLAIN,
42 FEATURES_BY_VERSION,
42 FEATURES_BY_VERSION,
43 FLAG_GENERALDELTA,
43 FLAG_GENERALDELTA,
44 FLAG_INLINE_DATA,
44 FLAG_INLINE_DATA,
45 INDEX_HEADER,
45 INDEX_HEADER,
46 KIND_CHANGELOG,
46 KIND_CHANGELOG,
47 REVLOGV0,
47 REVLOGV0,
48 REVLOGV1,
48 REVLOGV1,
49 REVLOGV1_FLAGS,
49 REVLOGV1_FLAGS,
50 REVLOGV2,
50 REVLOGV2,
51 REVLOGV2_FLAGS,
51 REVLOGV2_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
53 REVLOG_DEFAULT_FORMAT,
53 REVLOG_DEFAULT_FORMAT,
54 REVLOG_DEFAULT_VERSION,
54 REVLOG_DEFAULT_VERSION,
55 SUPPORTED_FLAGS,
55 SUPPORTED_FLAGS,
56 )
56 )
57 from .revlogutils.flagutil import (
57 from .revlogutils.flagutil import (
58 REVIDX_DEFAULT_FLAGS,
58 REVIDX_DEFAULT_FLAGS,
59 REVIDX_ELLIPSIS,
59 REVIDX_ELLIPSIS,
60 REVIDX_EXTSTORED,
60 REVIDX_EXTSTORED,
61 REVIDX_FLAGS_ORDER,
61 REVIDX_FLAGS_ORDER,
62 REVIDX_HASCOPIESINFO,
62 REVIDX_HASCOPIESINFO,
63 REVIDX_ISCENSORED,
63 REVIDX_ISCENSORED,
64 REVIDX_RAWTEXT_CHANGING_FLAGS,
64 REVIDX_RAWTEXT_CHANGING_FLAGS,
65 )
65 )
66 from .thirdparty import attr
66 from .thirdparty import attr
67 from . import (
67 from . import (
68 ancestor,
68 ancestor,
69 dagop,
69 dagop,
70 error,
70 error,
71 mdiff,
71 mdiff,
72 policy,
72 policy,
73 pycompat,
73 pycompat,
74 templatefilters,
74 templatefilters,
75 util,
75 util,
76 )
76 )
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81 from .revlogutils import (
81 from .revlogutils import (
82 deltas as deltautil,
82 deltas as deltautil,
83 docket as docketutil,
83 docket as docketutil,
84 flagutil,
84 flagutil,
85 nodemap as nodemaputil,
85 nodemap as nodemaputil,
86 revlogv0,
86 revlogv0,
87 sidedata as sidedatautil,
87 sidedata as sidedatautil,
88 )
88 )
89 from .utils import (
89 from .utils import (
90 storageutil,
90 storageutil,
91 stringutil,
91 stringutil,
92 )
92 )
93
93
94 # blanked usage of all the name to prevent pyflakes constraints
94 # blanked usage of all the name to prevent pyflakes constraints
95 # We need these name available in the module for extensions.
95 # We need these name available in the module for extensions.
96
96
97 REVLOGV0
97 REVLOGV0
98 REVLOGV1
98 REVLOGV1
99 REVLOGV2
99 REVLOGV2
100 FLAG_INLINE_DATA
100 FLAG_INLINE_DATA
101 FLAG_GENERALDELTA
101 FLAG_GENERALDELTA
102 REVLOG_DEFAULT_FLAGS
102 REVLOG_DEFAULT_FLAGS
103 REVLOG_DEFAULT_FORMAT
103 REVLOG_DEFAULT_FORMAT
104 REVLOG_DEFAULT_VERSION
104 REVLOG_DEFAULT_VERSION
105 REVLOGV1_FLAGS
105 REVLOGV1_FLAGS
106 REVLOGV2_FLAGS
106 REVLOGV2_FLAGS
107 REVIDX_ISCENSORED
107 REVIDX_ISCENSORED
108 REVIDX_ELLIPSIS
108 REVIDX_ELLIPSIS
109 REVIDX_HASCOPIESINFO
109 REVIDX_HASCOPIESINFO
110 REVIDX_EXTSTORED
110 REVIDX_EXTSTORED
111 REVIDX_DEFAULT_FLAGS
111 REVIDX_DEFAULT_FLAGS
112 REVIDX_FLAGS_ORDER
112 REVIDX_FLAGS_ORDER
113 REVIDX_RAWTEXT_CHANGING_FLAGS
113 REVIDX_RAWTEXT_CHANGING_FLAGS
114
114
115 parsers = policy.importmod('parsers')
115 parsers = policy.importmod('parsers')
116 rustancestor = policy.importrust('ancestor')
116 rustancestor = policy.importrust('ancestor')
117 rustdagop = policy.importrust('dagop')
117 rustdagop = policy.importrust('dagop')
118 rustrevlog = policy.importrust('revlog')
118 rustrevlog = policy.importrust('revlog')
119
119
120 # Aliased for performance.
120 # Aliased for performance.
121 _zlibdecompress = zlib.decompress
121 _zlibdecompress = zlib.decompress
122
122
123 # max size of revlog with inline data
123 # max size of revlog with inline data
124 _maxinline = 131072
124 _maxinline = 131072
125 _chunksize = 1048576
125 _chunksize = 1048576
126
126
127 # Flag processors for REVIDX_ELLIPSIS.
127 # Flag processors for REVIDX_ELLIPSIS.
128 def ellipsisreadprocessor(rl, text):
128 def ellipsisreadprocessor(rl, text):
129 return text, False
129 return text, False
130
130
131
131
132 def ellipsiswriteprocessor(rl, text):
132 def ellipsiswriteprocessor(rl, text):
133 return text, False
133 return text, False
134
134
135
135
136 def ellipsisrawprocessor(rl, text):
136 def ellipsisrawprocessor(rl, text):
137 return False
137 return False
138
138
139
139
140 ellipsisprocessor = (
140 ellipsisprocessor = (
141 ellipsisreadprocessor,
141 ellipsisreadprocessor,
142 ellipsiswriteprocessor,
142 ellipsiswriteprocessor,
143 ellipsisrawprocessor,
143 ellipsisrawprocessor,
144 )
144 )
145
145
146
146
147 def offset_type(offset, type):
147 def offset_type(offset, type):
148 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
148 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
149 raise ValueError(b'unknown revlog index flags')
149 raise ValueError(b'unknown revlog index flags')
150 return int(int(offset) << 16 | type)
150 return int(int(offset) << 16 | type)
151
151
152
152
153 def _verify_revision(rl, skipflags, state, node):
153 def _verify_revision(rl, skipflags, state, node):
154 """Verify the integrity of the given revlog ``node`` while providing a hook
154 """Verify the integrity of the given revlog ``node`` while providing a hook
155 point for extensions to influence the operation."""
155 point for extensions to influence the operation."""
156 if skipflags:
156 if skipflags:
157 state[b'skipread'].add(node)
157 state[b'skipread'].add(node)
158 else:
158 else:
159 # Side-effect: read content and verify hash.
159 # Side-effect: read content and verify hash.
160 rl.revision(node)
160 rl.revision(node)
161
161
162
162
163 # True if a fast implementation for persistent-nodemap is available
163 # True if a fast implementation for persistent-nodemap is available
164 #
164 #
165 # We also consider we have a "fast" implementation in "pure" python because
165 # We also consider we have a "fast" implementation in "pure" python because
166 # people using pure don't really have performance consideration (and a
166 # people using pure don't really have performance consideration (and a
167 # wheelbarrow of other slowness source)
167 # wheelbarrow of other slowness source)
168 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
169 parsers, 'BaseIndexObject'
169 parsers, 'BaseIndexObject'
170 )
170 )
171
171
172
172
173 @attr.s(slots=True, frozen=True)
173 @attr.s(slots=True, frozen=True)
174 class _revisioninfo(object):
174 class _revisioninfo(object):
175 """Information about a revision that allows building its fulltext
175 """Information about a revision that allows building its fulltext
176 node: expected hash of the revision
176 node: expected hash of the revision
177 p1, p2: parent revs of the revision
177 p1, p2: parent revs of the revision
178 btext: built text cache consisting of a one-element list
178 btext: built text cache consisting of a one-element list
179 cachedelta: (baserev, uncompressed_delta) or None
179 cachedelta: (baserev, uncompressed_delta) or None
180 flags: flags associated to the revision storage
180 flags: flags associated to the revision storage
181
181
182 One of btext[0] or cachedelta must be set.
182 One of btext[0] or cachedelta must be set.
183 """
183 """
184
184
185 node = attr.ib()
185 node = attr.ib()
186 p1 = attr.ib()
186 p1 = attr.ib()
187 p2 = attr.ib()
187 p2 = attr.ib()
188 btext = attr.ib()
188 btext = attr.ib()
189 textlen = attr.ib()
189 textlen = attr.ib()
190 cachedelta = attr.ib()
190 cachedelta = attr.ib()
191 flags = attr.ib()
191 flags = attr.ib()
192
192
193
193
194 @interfaceutil.implementer(repository.irevisiondelta)
194 @interfaceutil.implementer(repository.irevisiondelta)
195 @attr.s(slots=True)
195 @attr.s(slots=True)
196 class revlogrevisiondelta(object):
196 class revlogrevisiondelta(object):
197 node = attr.ib()
197 node = attr.ib()
198 p1node = attr.ib()
198 p1node = attr.ib()
199 p2node = attr.ib()
199 p2node = attr.ib()
200 basenode = attr.ib()
200 basenode = attr.ib()
201 flags = attr.ib()
201 flags = attr.ib()
202 baserevisionsize = attr.ib()
202 baserevisionsize = attr.ib()
203 revision = attr.ib()
203 revision = attr.ib()
204 delta = attr.ib()
204 delta = attr.ib()
205 sidedata = attr.ib()
205 sidedata = attr.ib()
206 protocol_flags = attr.ib()
206 protocol_flags = attr.ib()
207 linknode = attr.ib(default=None)
207 linknode = attr.ib(default=None)
208
208
209
209
210 @interfaceutil.implementer(repository.iverifyproblem)
210 @interfaceutil.implementer(repository.iverifyproblem)
211 @attr.s(frozen=True)
211 @attr.s(frozen=True)
212 class revlogproblem(object):
212 class revlogproblem(object):
213 warning = attr.ib(default=None)
213 warning = attr.ib(default=None)
214 error = attr.ib(default=None)
214 error = attr.ib(default=None)
215 node = attr.ib(default=None)
215 node = attr.ib(default=None)
216
216
217
217
218 def parse_index_v1(data, inline):
218 def parse_index_v1(data, inline):
219 # call the C implementation to parse the index data
219 # call the C implementation to parse the index data
220 index, cache = parsers.parse_index2(data, inline)
220 index, cache = parsers.parse_index2(data, inline)
221 return index, cache
221 return index, cache
222
222
223
223
224 def parse_index_v2(data, inline):
224 def parse_index_v2(data, inline):
225 # call the C implementation to parse the index data
225 # call the C implementation to parse the index data
226 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
226 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
227 return index, cache
227 return index, cache
228
228
229
229
230 def parse_index_cl_v2(data, inline):
231 # call the C implementation to parse the index data
232 assert not inline
233 from .pure.parsers import parse_index_cl_v2
234
235 index, cache = parse_index_cl_v2(data)
236 return index, cache
237
238
230 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
239 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
231
240
232 def parse_index_v1_nodemap(data, inline):
241 def parse_index_v1_nodemap(data, inline):
233 index, cache = parsers.parse_index_devel_nodemap(data, inline)
242 index, cache = parsers.parse_index_devel_nodemap(data, inline)
234 return index, cache
243 return index, cache
235
244
236
245
237 else:
246 else:
238 parse_index_v1_nodemap = None
247 parse_index_v1_nodemap = None
239
248
240
249
241 def parse_index_v1_mixed(data, inline):
250 def parse_index_v1_mixed(data, inline):
242 index, cache = parse_index_v1(data, inline)
251 index, cache = parse_index_v1(data, inline)
243 return rustrevlog.MixedIndex(index), cache
252 return rustrevlog.MixedIndex(index), cache
244
253
245
254
246 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
255 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
247 # signed integer)
256 # signed integer)
248 _maxentrysize = 0x7FFFFFFF
257 _maxentrysize = 0x7FFFFFFF
249
258
250
259
251 class revlog(object):
260 class revlog(object):
252 """
261 """
253 the underlying revision storage object
262 the underlying revision storage object
254
263
255 A revlog consists of two parts, an index and the revision data.
264 A revlog consists of two parts, an index and the revision data.
256
265
257 The index is a file with a fixed record size containing
266 The index is a file with a fixed record size containing
258 information on each revision, including its nodeid (hash), the
267 information on each revision, including its nodeid (hash), the
259 nodeids of its parents, the position and offset of its data within
268 nodeids of its parents, the position and offset of its data within
260 the data file, and the revision it's based on. Finally, each entry
269 the data file, and the revision it's based on. Finally, each entry
261 contains a linkrev entry that can serve as a pointer to external
270 contains a linkrev entry that can serve as a pointer to external
262 data.
271 data.
263
272
264 The revision data itself is a linear collection of data chunks.
273 The revision data itself is a linear collection of data chunks.
265 Each chunk represents a revision and is usually represented as a
274 Each chunk represents a revision and is usually represented as a
266 delta against the previous chunk. To bound lookup time, runs of
275 delta against the previous chunk. To bound lookup time, runs of
267 deltas are limited to about 2 times the length of the original
276 deltas are limited to about 2 times the length of the original
268 version data. This makes retrieval of a version proportional to
277 version data. This makes retrieval of a version proportional to
269 its size, or O(1) relative to the number of revisions.
278 its size, or O(1) relative to the number of revisions.
270
279
271 Both pieces of the revlog are written to in an append-only
280 Both pieces of the revlog are written to in an append-only
272 fashion, which means we never need to rewrite a file to insert or
281 fashion, which means we never need to rewrite a file to insert or
273 remove data, and can use some simple techniques to avoid the need
282 remove data, and can use some simple techniques to avoid the need
274 for locking while reading.
283 for locking while reading.
275
284
276 If checkambig, indexfile is opened with checkambig=True at
285 If checkambig, indexfile is opened with checkambig=True at
277 writing, to avoid file stat ambiguity.
286 writing, to avoid file stat ambiguity.
278
287
279 If mmaplargeindex is True, and an mmapindexthreshold is set, the
288 If mmaplargeindex is True, and an mmapindexthreshold is set, the
280 index will be mmapped rather than read if it is larger than the
289 index will be mmapped rather than read if it is larger than the
281 configured threshold.
290 configured threshold.
282
291
283 If censorable is True, the revlog can have censored revisions.
292 If censorable is True, the revlog can have censored revisions.
284
293
285 If `upperboundcomp` is not None, this is the expected maximal gain from
294 If `upperboundcomp` is not None, this is the expected maximal gain from
286 compression for the data content.
295 compression for the data content.
287
296
288 `concurrencychecker` is an optional function that receives 3 arguments: a
297 `concurrencychecker` is an optional function that receives 3 arguments: a
289 file handle, a filename, and an expected position. It should check whether
298 file handle, a filename, and an expected position. It should check whether
290 the current position in the file handle is valid, and log/warn/fail (by
299 the current position in the file handle is valid, and log/warn/fail (by
291 raising).
300 raising).
292
301
293
302
294 Internal details
303 Internal details
295 ----------------
304 ----------------
296
305
297 A large part of the revlog logic deals with revisions' "index entries", tuple
306 A large part of the revlog logic deals with revisions' "index entries", tuple
298 objects that contains the same "items" whatever the revlog version.
307 objects that contains the same "items" whatever the revlog version.
299 Different versions will have different ways of storing these items (sometimes
308 Different versions will have different ways of storing these items (sometimes
300 not having them at all), but the tuple will always be the same. New fields
309 not having them at all), but the tuple will always be the same. New fields
301 are usually added at the end to avoid breaking existing code that relies
310 are usually added at the end to avoid breaking existing code that relies
302 on the existing order. The field are defined as follows:
311 on the existing order. The field are defined as follows:
303
312
304 [0] offset:
313 [0] offset:
305 The byte index of the start of revision data chunk.
314 The byte index of the start of revision data chunk.
306 That value is shifted up by 16 bits. use "offset = field >> 16" to
315 That value is shifted up by 16 bits. use "offset = field >> 16" to
307 retrieve it.
316 retrieve it.
308
317
309 flags:
318 flags:
310 A flag field that carries special information or changes the behavior
319 A flag field that carries special information or changes the behavior
311 of the revision. (see `REVIDX_*` constants for details)
320 of the revision. (see `REVIDX_*` constants for details)
312 The flag field only occupies the first 16 bits of this field,
321 The flag field only occupies the first 16 bits of this field,
313 use "flags = field & 0xFFFF" to retrieve the value.
322 use "flags = field & 0xFFFF" to retrieve the value.
314
323
315 [1] compressed length:
324 [1] compressed length:
316 The size, in bytes, of the chunk on disk
325 The size, in bytes, of the chunk on disk
317
326
318 [2] uncompressed length:
327 [2] uncompressed length:
319 The size, in bytes, of the full revision once reconstructed.
328 The size, in bytes, of the full revision once reconstructed.
320
329
321 [3] base rev:
330 [3] base rev:
322 Either the base of the revision delta chain (without general
331 Either the base of the revision delta chain (without general
323 delta), or the base of the delta (stored in the data chunk)
332 delta), or the base of the delta (stored in the data chunk)
324 with general delta.
333 with general delta.
325
334
326 [4] link rev:
335 [4] link rev:
327 Changelog revision number of the changeset introducing this
336 Changelog revision number of the changeset introducing this
328 revision.
337 revision.
329
338
330 [5] parent 1 rev:
339 [5] parent 1 rev:
331 Revision number of the first parent
340 Revision number of the first parent
332
341
333 [6] parent 2 rev:
342 [6] parent 2 rev:
334 Revision number of the second parent
343 Revision number of the second parent
335
344
336 [7] node id:
345 [7] node id:
337 The node id of the current revision
346 The node id of the current revision
338
347
339 [8] sidedata offset:
348 [8] sidedata offset:
340 The byte index of the start of the revision's side-data chunk.
349 The byte index of the start of the revision's side-data chunk.
341
350
342 [9] sidedata chunk length:
351 [9] sidedata chunk length:
343 The size, in bytes, of the revision's side-data chunk.
352 The size, in bytes, of the revision's side-data chunk.
344
353
345 [10] data compression mode:
354 [10] data compression mode:
346 two bits that detail the way the data chunk is compressed on disk.
355 two bits that detail the way the data chunk is compressed on disk.
347 (see "COMP_MODE_*" constants for details). For revlog version 0 and
356 (see "COMP_MODE_*" constants for details). For revlog version 0 and
348 1 this will always be COMP_MODE_INLINE.
357 1 this will always be COMP_MODE_INLINE.
349
358
350 [11] side-data compression mode:
359 [11] side-data compression mode:
351 two bits that detail the way the sidedata chunk is compressed on disk.
360 two bits that detail the way the sidedata chunk is compressed on disk.
352 (see "COMP_MODE_*" constants for details)
361 (see "COMP_MODE_*" constants for details)
353 """
362 """
354
363
355 _flagserrorclass = error.RevlogError
364 _flagserrorclass = error.RevlogError
356
365
357 def __init__(
366 def __init__(
358 self,
367 self,
359 opener,
368 opener,
360 target,
369 target,
361 radix,
370 radix,
362 postfix=None, # only exist for `tmpcensored` now
371 postfix=None, # only exist for `tmpcensored` now
363 checkambig=False,
372 checkambig=False,
364 mmaplargeindex=False,
373 mmaplargeindex=False,
365 censorable=False,
374 censorable=False,
366 upperboundcomp=None,
375 upperboundcomp=None,
367 persistentnodemap=False,
376 persistentnodemap=False,
368 concurrencychecker=None,
377 concurrencychecker=None,
369 trypending=False,
378 trypending=False,
370 ):
379 ):
371 """
380 """
372 create a revlog object
381 create a revlog object
373
382
374 opener is a function that abstracts the file opening operation
383 opener is a function that abstracts the file opening operation
375 and can be used to implement COW semantics or the like.
384 and can be used to implement COW semantics or the like.
376
385
377 `target`: a (KIND, ID) tuple that identify the content stored in
386 `target`: a (KIND, ID) tuple that identify the content stored in
378 this revlog. It help the rest of the code to understand what the revlog
387 this revlog. It help the rest of the code to understand what the revlog
379 is about without having to resort to heuristic and index filename
388 is about without having to resort to heuristic and index filename
380 analysis. Note: that this must be reliably be set by normal code, but
389 analysis. Note: that this must be reliably be set by normal code, but
381 that test, debug, or performance measurement code might not set this to
390 that test, debug, or performance measurement code might not set this to
382 accurate value.
391 accurate value.
383 """
392 """
384 self.upperboundcomp = upperboundcomp
393 self.upperboundcomp = upperboundcomp
385
394
386 self.radix = radix
395 self.radix = radix
387
396
388 self._docket_file = None
397 self._docket_file = None
389 self._indexfile = None
398 self._indexfile = None
390 self._datafile = None
399 self._datafile = None
391 self._nodemap_file = None
400 self._nodemap_file = None
392 self.postfix = postfix
401 self.postfix = postfix
393 self._trypending = trypending
402 self._trypending = trypending
394 self.opener = opener
403 self.opener = opener
395 if persistentnodemap:
404 if persistentnodemap:
396 self._nodemap_file = nodemaputil.get_nodemap_file(self)
405 self._nodemap_file = nodemaputil.get_nodemap_file(self)
397
406
398 assert target[0] in ALL_KINDS
407 assert target[0] in ALL_KINDS
399 assert len(target) == 2
408 assert len(target) == 2
400 self.target = target
409 self.target = target
401 # When True, indexfile is opened with checkambig=True at writing, to
410 # When True, indexfile is opened with checkambig=True at writing, to
402 # avoid file stat ambiguity.
411 # avoid file stat ambiguity.
403 self._checkambig = checkambig
412 self._checkambig = checkambig
404 self._mmaplargeindex = mmaplargeindex
413 self._mmaplargeindex = mmaplargeindex
405 self._censorable = censorable
414 self._censorable = censorable
406 # 3-tuple of (node, rev, text) for a raw revision.
415 # 3-tuple of (node, rev, text) for a raw revision.
407 self._revisioncache = None
416 self._revisioncache = None
408 # Maps rev to chain base rev.
417 # Maps rev to chain base rev.
409 self._chainbasecache = util.lrucachedict(100)
418 self._chainbasecache = util.lrucachedict(100)
410 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
419 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
411 self._chunkcache = (0, b'')
420 self._chunkcache = (0, b'')
412 # How much data to read and cache into the raw revlog data cache.
421 # How much data to read and cache into the raw revlog data cache.
413 self._chunkcachesize = 65536
422 self._chunkcachesize = 65536
414 self._maxchainlen = None
423 self._maxchainlen = None
415 self._deltabothparents = True
424 self._deltabothparents = True
416 self.index = None
425 self.index = None
417 self._docket = None
426 self._docket = None
418 self._nodemap_docket = None
427 self._nodemap_docket = None
419 # Mapping of partial identifiers to full nodes.
428 # Mapping of partial identifiers to full nodes.
420 self._pcache = {}
429 self._pcache = {}
421 # Mapping of revision integer to full node.
430 # Mapping of revision integer to full node.
422 self._compengine = b'zlib'
431 self._compengine = b'zlib'
423 self._compengineopts = {}
432 self._compengineopts = {}
424 self._maxdeltachainspan = -1
433 self._maxdeltachainspan = -1
425 self._withsparseread = False
434 self._withsparseread = False
426 self._sparserevlog = False
435 self._sparserevlog = False
427 self.hassidedata = False
436 self.hassidedata = False
428 self._srdensitythreshold = 0.50
437 self._srdensitythreshold = 0.50
429 self._srmingapsize = 262144
438 self._srmingapsize = 262144
430
439
431 # Make copy of flag processors so each revlog instance can support
440 # Make copy of flag processors so each revlog instance can support
432 # custom flags.
441 # custom flags.
433 self._flagprocessors = dict(flagutil.flagprocessors)
442 self._flagprocessors = dict(flagutil.flagprocessors)
434
443
435 # 2-tuple of file handles being used for active writing.
444 # 2-tuple of file handles being used for active writing.
436 self._writinghandles = None
445 self._writinghandles = None
437 # prevent nesting of addgroup
446 # prevent nesting of addgroup
438 self._adding_group = None
447 self._adding_group = None
439
448
440 self._loadindex()
449 self._loadindex()
441
450
442 self._concurrencychecker = concurrencychecker
451 self._concurrencychecker = concurrencychecker
443
452
444 def _init_opts(self):
453 def _init_opts(self):
445 """process options (from above/config) to setup associated default revlog mode
454 """process options (from above/config) to setup associated default revlog mode
446
455
447 These values might be affected when actually reading on disk information.
456 These values might be affected when actually reading on disk information.
448
457
449 The relevant values are returned for use in _loadindex().
458 The relevant values are returned for use in _loadindex().
450
459
451 * newversionflags:
460 * newversionflags:
452 version header to use if we need to create a new revlog
461 version header to use if we need to create a new revlog
453
462
454 * mmapindexthreshold:
463 * mmapindexthreshold:
455 minimal index size for start to use mmap
464 minimal index size for start to use mmap
456
465
457 * force_nodemap:
466 * force_nodemap:
458 force the usage of a "development" version of the nodemap code
467 force the usage of a "development" version of the nodemap code
459 """
468 """
460 mmapindexthreshold = None
469 mmapindexthreshold = None
461 opts = self.opener.options
470 opts = self.opener.options
462
471
463 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
472 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
464 new_header = CHANGELOGV2
473 new_header = CHANGELOGV2
465 elif b'revlogv2' in opts:
474 elif b'revlogv2' in opts:
466 new_header = REVLOGV2
475 new_header = REVLOGV2
467 elif b'revlogv1' in opts:
476 elif b'revlogv1' in opts:
468 new_header = REVLOGV1 | FLAG_INLINE_DATA
477 new_header = REVLOGV1 | FLAG_INLINE_DATA
469 if b'generaldelta' in opts:
478 if b'generaldelta' in opts:
470 new_header |= FLAG_GENERALDELTA
479 new_header |= FLAG_GENERALDELTA
471 elif b'revlogv0' in self.opener.options:
480 elif b'revlogv0' in self.opener.options:
472 new_header = REVLOGV0
481 new_header = REVLOGV0
473 else:
482 else:
474 new_header = REVLOG_DEFAULT_VERSION
483 new_header = REVLOG_DEFAULT_VERSION
475
484
476 if b'chunkcachesize' in opts:
485 if b'chunkcachesize' in opts:
477 self._chunkcachesize = opts[b'chunkcachesize']
486 self._chunkcachesize = opts[b'chunkcachesize']
478 if b'maxchainlen' in opts:
487 if b'maxchainlen' in opts:
479 self._maxchainlen = opts[b'maxchainlen']
488 self._maxchainlen = opts[b'maxchainlen']
480 if b'deltabothparents' in opts:
489 if b'deltabothparents' in opts:
481 self._deltabothparents = opts[b'deltabothparents']
490 self._deltabothparents = opts[b'deltabothparents']
482 self._lazydelta = bool(opts.get(b'lazydelta', True))
491 self._lazydelta = bool(opts.get(b'lazydelta', True))
483 self._lazydeltabase = False
492 self._lazydeltabase = False
484 if self._lazydelta:
493 if self._lazydelta:
485 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
494 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
486 if b'compengine' in opts:
495 if b'compengine' in opts:
487 self._compengine = opts[b'compengine']
496 self._compengine = opts[b'compengine']
488 if b'zlib.level' in opts:
497 if b'zlib.level' in opts:
489 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
498 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
490 if b'zstd.level' in opts:
499 if b'zstd.level' in opts:
491 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
500 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
492 if b'maxdeltachainspan' in opts:
501 if b'maxdeltachainspan' in opts:
493 self._maxdeltachainspan = opts[b'maxdeltachainspan']
502 self._maxdeltachainspan = opts[b'maxdeltachainspan']
494 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
503 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
495 mmapindexthreshold = opts[b'mmapindexthreshold']
504 mmapindexthreshold = opts[b'mmapindexthreshold']
496 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
505 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
497 withsparseread = bool(opts.get(b'with-sparse-read', False))
506 withsparseread = bool(opts.get(b'with-sparse-read', False))
498 # sparse-revlog forces sparse-read
507 # sparse-revlog forces sparse-read
499 self._withsparseread = self._sparserevlog or withsparseread
508 self._withsparseread = self._sparserevlog or withsparseread
500 if b'sparse-read-density-threshold' in opts:
509 if b'sparse-read-density-threshold' in opts:
501 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
510 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
502 if b'sparse-read-min-gap-size' in opts:
511 if b'sparse-read-min-gap-size' in opts:
503 self._srmingapsize = opts[b'sparse-read-min-gap-size']
512 self._srmingapsize = opts[b'sparse-read-min-gap-size']
504 if opts.get(b'enableellipsis'):
513 if opts.get(b'enableellipsis'):
505 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
514 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
506
515
507 # revlog v0 doesn't have flag processors
516 # revlog v0 doesn't have flag processors
508 for flag, processor in pycompat.iteritems(
517 for flag, processor in pycompat.iteritems(
509 opts.get(b'flagprocessors', {})
518 opts.get(b'flagprocessors', {})
510 ):
519 ):
511 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
520 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
512
521
513 if self._chunkcachesize <= 0:
522 if self._chunkcachesize <= 0:
514 raise error.RevlogError(
523 raise error.RevlogError(
515 _(b'revlog chunk cache size %r is not greater than 0')
524 _(b'revlog chunk cache size %r is not greater than 0')
516 % self._chunkcachesize
525 % self._chunkcachesize
517 )
526 )
518 elif self._chunkcachesize & (self._chunkcachesize - 1):
527 elif self._chunkcachesize & (self._chunkcachesize - 1):
519 raise error.RevlogError(
528 raise error.RevlogError(
520 _(b'revlog chunk cache size %r is not a power of 2')
529 _(b'revlog chunk cache size %r is not a power of 2')
521 % self._chunkcachesize
530 % self._chunkcachesize
522 )
531 )
523 force_nodemap = opts.get(b'devel-force-nodemap', False)
532 force_nodemap = opts.get(b'devel-force-nodemap', False)
524 return new_header, mmapindexthreshold, force_nodemap
533 return new_header, mmapindexthreshold, force_nodemap
525
534
526 def _get_data(self, filepath, mmap_threshold, size=None):
535 def _get_data(self, filepath, mmap_threshold, size=None):
527 """return a file content with or without mmap
536 """return a file content with or without mmap
528
537
529 If the file is missing return the empty string"""
538 If the file is missing return the empty string"""
530 try:
539 try:
531 with self.opener(filepath) as fp:
540 with self.opener(filepath) as fp:
532 if mmap_threshold is not None:
541 if mmap_threshold is not None:
533 file_size = self.opener.fstat(fp).st_size
542 file_size = self.opener.fstat(fp).st_size
534 if file_size >= mmap_threshold:
543 if file_size >= mmap_threshold:
535 if size is not None:
544 if size is not None:
536 # avoid potentiel mmap crash
545 # avoid potentiel mmap crash
537 size = min(file_size, size)
546 size = min(file_size, size)
538 # TODO: should .close() to release resources without
547 # TODO: should .close() to release resources without
539 # relying on Python GC
548 # relying on Python GC
540 if size is None:
549 if size is None:
541 return util.buffer(util.mmapread(fp))
550 return util.buffer(util.mmapread(fp))
542 else:
551 else:
543 return util.buffer(util.mmapread(fp, size))
552 return util.buffer(util.mmapread(fp, size))
544 if size is None:
553 if size is None:
545 return fp.read()
554 return fp.read()
546 else:
555 else:
547 return fp.read(size)
556 return fp.read(size)
548 except IOError as inst:
557 except IOError as inst:
549 if inst.errno != errno.ENOENT:
558 if inst.errno != errno.ENOENT:
550 raise
559 raise
551 return b''
560 return b''
552
561
553 def _loadindex(self):
562 def _loadindex(self):
554
563
555 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
564 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
556
565
557 if self.postfix is not None:
566 if self.postfix is not None:
558 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
567 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
559 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
568 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
560 entry_point = b'%s.i.a' % self.radix
569 entry_point = b'%s.i.a' % self.radix
561 else:
570 else:
562 entry_point = b'%s.i' % self.radix
571 entry_point = b'%s.i' % self.radix
563
572
564 entry_data = b''
573 entry_data = b''
565 self._initempty = True
574 self._initempty = True
566 entry_data = self._get_data(entry_point, mmapindexthreshold)
575 entry_data = self._get_data(entry_point, mmapindexthreshold)
567 if len(entry_data) > 0:
576 if len(entry_data) > 0:
568 header = INDEX_HEADER.unpack(entry_data[:4])[0]
577 header = INDEX_HEADER.unpack(entry_data[:4])[0]
569 self._initempty = False
578 self._initempty = False
570 else:
579 else:
571 header = new_header
580 header = new_header
572
581
573 self._format_flags = header & ~0xFFFF
582 self._format_flags = header & ~0xFFFF
574 self._format_version = header & 0xFFFF
583 self._format_version = header & 0xFFFF
575
584
576 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
585 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
577 if supported_flags is None:
586 if supported_flags is None:
578 msg = _(b'unknown version (%d) in revlog %s')
587 msg = _(b'unknown version (%d) in revlog %s')
579 msg %= (self._format_version, self.display_id)
588 msg %= (self._format_version, self.display_id)
580 raise error.RevlogError(msg)
589 raise error.RevlogError(msg)
581 elif self._format_flags & ~supported_flags:
590 elif self._format_flags & ~supported_flags:
582 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
591 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
583 display_flag = self._format_flags >> 16
592 display_flag = self._format_flags >> 16
584 msg %= (display_flag, self._format_version, self.display_id)
593 msg %= (display_flag, self._format_version, self.display_id)
585 raise error.RevlogError(msg)
594 raise error.RevlogError(msg)
586
595
587 features = FEATURES_BY_VERSION[self._format_version]
596 features = FEATURES_BY_VERSION[self._format_version]
588 self._inline = features[b'inline'](self._format_flags)
597 self._inline = features[b'inline'](self._format_flags)
589 self._generaldelta = features[b'generaldelta'](self._format_flags)
598 self._generaldelta = features[b'generaldelta'](self._format_flags)
590 self.hassidedata = features[b'sidedata']
599 self.hassidedata = features[b'sidedata']
591
600
592 if not features[b'docket']:
601 if not features[b'docket']:
593 self._indexfile = entry_point
602 self._indexfile = entry_point
594 index_data = entry_data
603 index_data = entry_data
595 else:
604 else:
596 self._docket_file = entry_point
605 self._docket_file = entry_point
597 if self._initempty:
606 if self._initempty:
598 self._docket = docketutil.default_docket(self, header)
607 self._docket = docketutil.default_docket(self, header)
599 else:
608 else:
600 self._docket = docketutil.parse_docket(
609 self._docket = docketutil.parse_docket(
601 self, entry_data, use_pending=self._trypending
610 self, entry_data, use_pending=self._trypending
602 )
611 )
603 self._indexfile = self._docket.index_filepath()
612 self._indexfile = self._docket.index_filepath()
604 index_data = b''
613 index_data = b''
605 index_size = self._docket.index_end
614 index_size = self._docket.index_end
606 if index_size > 0:
615 if index_size > 0:
607 index_data = self._get_data(
616 index_data = self._get_data(
608 self._indexfile, mmapindexthreshold, size=index_size
617 self._indexfile, mmapindexthreshold, size=index_size
609 )
618 )
610 if len(index_data) < index_size:
619 if len(index_data) < index_size:
611 msg = _(b'too few index data for %s: got %d, expected %d')
620 msg = _(b'too few index data for %s: got %d, expected %d')
612 msg %= (self.display_id, len(index_data), index_size)
621 msg %= (self.display_id, len(index_data), index_size)
613 raise error.RevlogError(msg)
622 raise error.RevlogError(msg)
614
623
615 self._inline = False
624 self._inline = False
616 # generaldelta implied by version 2 revlogs.
625 # generaldelta implied by version 2 revlogs.
617 self._generaldelta = True
626 self._generaldelta = True
618 # the logic for persistent nodemap will be dealt with within the
627 # the logic for persistent nodemap will be dealt with within the
619 # main docket, so disable it for now.
628 # main docket, so disable it for now.
620 self._nodemap_file = None
629 self._nodemap_file = None
621
630
622 if self.postfix is None:
631 if self.postfix is None:
623 self._datafile = b'%s.d' % self.radix
632 self._datafile = b'%s.d' % self.radix
624 else:
633 else:
625 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
634 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
626
635
627 self.nodeconstants = sha1nodeconstants
636 self.nodeconstants = sha1nodeconstants
628 self.nullid = self.nodeconstants.nullid
637 self.nullid = self.nodeconstants.nullid
629
638
630 # sparse-revlog can't be on without general-delta (issue6056)
639 # sparse-revlog can't be on without general-delta (issue6056)
631 if not self._generaldelta:
640 if not self._generaldelta:
632 self._sparserevlog = False
641 self._sparserevlog = False
633
642
634 self._storedeltachains = True
643 self._storedeltachains = True
635
644
636 devel_nodemap = (
645 devel_nodemap = (
637 self._nodemap_file
646 self._nodemap_file
638 and force_nodemap
647 and force_nodemap
639 and parse_index_v1_nodemap is not None
648 and parse_index_v1_nodemap is not None
640 )
649 )
641
650
642 use_rust_index = False
651 use_rust_index = False
643 if rustrevlog is not None:
652 if rustrevlog is not None:
644 if self._nodemap_file is not None:
653 if self._nodemap_file is not None:
645 use_rust_index = True
654 use_rust_index = True
646 else:
655 else:
647 use_rust_index = self.opener.options.get(b'rust.index')
656 use_rust_index = self.opener.options.get(b'rust.index')
648
657
649 self._parse_index = parse_index_v1
658 self._parse_index = parse_index_v1
650 if self._format_version == REVLOGV0:
659 if self._format_version == REVLOGV0:
651 self._parse_index = revlogv0.parse_index_v0
660 self._parse_index = revlogv0.parse_index_v0
652 elif self._format_version == REVLOGV2:
661 elif self._format_version == REVLOGV2:
653 self._parse_index = parse_index_v2
662 self._parse_index = parse_index_v2
654 elif self._format_version == CHANGELOGV2:
663 elif self._format_version == CHANGELOGV2:
655 self._parse_index = parse_index_v2
664 self._parse_index = parse_index_cl_v2
656 elif devel_nodemap:
665 elif devel_nodemap:
657 self._parse_index = parse_index_v1_nodemap
666 self._parse_index = parse_index_v1_nodemap
658 elif use_rust_index:
667 elif use_rust_index:
659 self._parse_index = parse_index_v1_mixed
668 self._parse_index = parse_index_v1_mixed
660 try:
669 try:
661 d = self._parse_index(index_data, self._inline)
670 d = self._parse_index(index_data, self._inline)
662 index, _chunkcache = d
671 index, _chunkcache = d
663 use_nodemap = (
672 use_nodemap = (
664 not self._inline
673 not self._inline
665 and self._nodemap_file is not None
674 and self._nodemap_file is not None
666 and util.safehasattr(index, 'update_nodemap_data')
675 and util.safehasattr(index, 'update_nodemap_data')
667 )
676 )
668 if use_nodemap:
677 if use_nodemap:
669 nodemap_data = nodemaputil.persisted_data(self)
678 nodemap_data = nodemaputil.persisted_data(self)
670 if nodemap_data is not None:
679 if nodemap_data is not None:
671 docket = nodemap_data[0]
680 docket = nodemap_data[0]
672 if (
681 if (
673 len(d[0]) > docket.tip_rev
682 len(d[0]) > docket.tip_rev
674 and d[0][docket.tip_rev][7] == docket.tip_node
683 and d[0][docket.tip_rev][7] == docket.tip_node
675 ):
684 ):
676 # no changelog tampering
685 # no changelog tampering
677 self._nodemap_docket = docket
686 self._nodemap_docket = docket
678 index.update_nodemap_data(*nodemap_data)
687 index.update_nodemap_data(*nodemap_data)
679 except (ValueError, IndexError):
688 except (ValueError, IndexError):
680 raise error.RevlogError(
689 raise error.RevlogError(
681 _(b"index %s is corrupted") % self.display_id
690 _(b"index %s is corrupted") % self.display_id
682 )
691 )
683 self.index, self._chunkcache = d
692 self.index, self._chunkcache = d
684 if not self._chunkcache:
693 if not self._chunkcache:
685 self._chunkclear()
694 self._chunkclear()
686 # revnum -> (chain-length, sum-delta-length)
695 # revnum -> (chain-length, sum-delta-length)
687 self._chaininfocache = util.lrucachedict(500)
696 self._chaininfocache = util.lrucachedict(500)
688 # revlog header -> revlog compressor
697 # revlog header -> revlog compressor
689 self._decompressors = {}
698 self._decompressors = {}
690
699
691 @util.propertycache
700 @util.propertycache
692 def revlog_kind(self):
701 def revlog_kind(self):
693 return self.target[0]
702 return self.target[0]
694
703
695 @util.propertycache
704 @util.propertycache
696 def display_id(self):
705 def display_id(self):
697 """The public facing "ID" of the revlog that we use in message"""
706 """The public facing "ID" of the revlog that we use in message"""
698 # Maybe we should build a user facing representation of
707 # Maybe we should build a user facing representation of
699 # revlog.target instead of using `self.radix`
708 # revlog.target instead of using `self.radix`
700 return self.radix
709 return self.radix
701
710
702 def _get_decompressor(self, t):
711 def _get_decompressor(self, t):
703 try:
712 try:
704 compressor = self._decompressors[t]
713 compressor = self._decompressors[t]
705 except KeyError:
714 except KeyError:
706 try:
715 try:
707 engine = util.compengines.forrevlogheader(t)
716 engine = util.compengines.forrevlogheader(t)
708 compressor = engine.revlogcompressor(self._compengineopts)
717 compressor = engine.revlogcompressor(self._compengineopts)
709 self._decompressors[t] = compressor
718 self._decompressors[t] = compressor
710 except KeyError:
719 except KeyError:
711 raise error.RevlogError(
720 raise error.RevlogError(
712 _(b'unknown compression type %s') % binascii.hexlify(t)
721 _(b'unknown compression type %s') % binascii.hexlify(t)
713 )
722 )
714 return compressor
723 return compressor
715
724
716 @util.propertycache
725 @util.propertycache
717 def _compressor(self):
726 def _compressor(self):
718 engine = util.compengines[self._compengine]
727 engine = util.compengines[self._compengine]
719 return engine.revlogcompressor(self._compengineopts)
728 return engine.revlogcompressor(self._compengineopts)
720
729
721 @util.propertycache
730 @util.propertycache
722 def _decompressor(self):
731 def _decompressor(self):
723 """the default decompressor"""
732 """the default decompressor"""
724 if self._docket is None:
733 if self._docket is None:
725 return None
734 return None
726 t = self._docket.default_compression_header
735 t = self._docket.default_compression_header
727 c = self._get_decompressor(t)
736 c = self._get_decompressor(t)
728 return c.decompress
737 return c.decompress
729
738
730 def _indexfp(self):
739 def _indexfp(self):
731 """file object for the revlog's index file"""
740 """file object for the revlog's index file"""
732 return self.opener(self._indexfile, mode=b"r")
741 return self.opener(self._indexfile, mode=b"r")
733
742
734 def __index_write_fp(self):
743 def __index_write_fp(self):
735 # You should not use this directly and use `_writing` instead
744 # You should not use this directly and use `_writing` instead
736 try:
745 try:
737 f = self.opener(
746 f = self.opener(
738 self._indexfile, mode=b"r+", checkambig=self._checkambig
747 self._indexfile, mode=b"r+", checkambig=self._checkambig
739 )
748 )
740 if self._docket is None:
749 if self._docket is None:
741 f.seek(0, os.SEEK_END)
750 f.seek(0, os.SEEK_END)
742 else:
751 else:
743 f.seek(self._docket.index_end, os.SEEK_SET)
752 f.seek(self._docket.index_end, os.SEEK_SET)
744 return f
753 return f
745 except IOError as inst:
754 except IOError as inst:
746 if inst.errno != errno.ENOENT:
755 if inst.errno != errno.ENOENT:
747 raise
756 raise
748 return self.opener(
757 return self.opener(
749 self._indexfile, mode=b"w+", checkambig=self._checkambig
758 self._indexfile, mode=b"w+", checkambig=self._checkambig
750 )
759 )
751
760
752 def __index_new_fp(self):
761 def __index_new_fp(self):
753 # You should not use this unless you are upgrading from inline revlog
762 # You should not use this unless you are upgrading from inline revlog
754 return self.opener(
763 return self.opener(
755 self._indexfile,
764 self._indexfile,
756 mode=b"w",
765 mode=b"w",
757 checkambig=self._checkambig,
766 checkambig=self._checkambig,
758 atomictemp=True,
767 atomictemp=True,
759 )
768 )
760
769
761 def _datafp(self, mode=b'r'):
770 def _datafp(self, mode=b'r'):
762 """file object for the revlog's data file"""
771 """file object for the revlog's data file"""
763 return self.opener(self._datafile, mode=mode)
772 return self.opener(self._datafile, mode=mode)
764
773
765 @contextlib.contextmanager
774 @contextlib.contextmanager
766 def _datareadfp(self, existingfp=None):
775 def _datareadfp(self, existingfp=None):
767 """file object suitable to read data"""
776 """file object suitable to read data"""
768 # Use explicit file handle, if given.
777 # Use explicit file handle, if given.
769 if existingfp is not None:
778 if existingfp is not None:
770 yield existingfp
779 yield existingfp
771
780
772 # Use a file handle being actively used for writes, if available.
781 # Use a file handle being actively used for writes, if available.
773 # There is some danger to doing this because reads will seek the
782 # There is some danger to doing this because reads will seek the
774 # file. However, _writeentry() performs a SEEK_END before all writes,
783 # file. However, _writeentry() performs a SEEK_END before all writes,
775 # so we should be safe.
784 # so we should be safe.
776 elif self._writinghandles:
785 elif self._writinghandles:
777 if self._inline:
786 if self._inline:
778 yield self._writinghandles[0]
787 yield self._writinghandles[0]
779 else:
788 else:
780 yield self._writinghandles[1]
789 yield self._writinghandles[1]
781
790
782 # Otherwise open a new file handle.
791 # Otherwise open a new file handle.
783 else:
792 else:
784 if self._inline:
793 if self._inline:
785 func = self._indexfp
794 func = self._indexfp
786 else:
795 else:
787 func = self._datafp
796 func = self._datafp
788 with func() as fp:
797 with func() as fp:
789 yield fp
798 yield fp
790
799
791 def tiprev(self):
800 def tiprev(self):
792 return len(self.index) - 1
801 return len(self.index) - 1
793
802
794 def tip(self):
803 def tip(self):
795 return self.node(self.tiprev())
804 return self.node(self.tiprev())
796
805
797 def __contains__(self, rev):
806 def __contains__(self, rev):
798 return 0 <= rev < len(self)
807 return 0 <= rev < len(self)
799
808
800 def __len__(self):
809 def __len__(self):
801 return len(self.index)
810 return len(self.index)
802
811
803 def __iter__(self):
812 def __iter__(self):
804 return iter(pycompat.xrange(len(self)))
813 return iter(pycompat.xrange(len(self)))
805
814
806 def revs(self, start=0, stop=None):
815 def revs(self, start=0, stop=None):
807 """iterate over all rev in this revlog (from start to stop)"""
816 """iterate over all rev in this revlog (from start to stop)"""
808 return storageutil.iterrevs(len(self), start=start, stop=stop)
817 return storageutil.iterrevs(len(self), start=start, stop=stop)
809
818
810 @property
819 @property
811 def nodemap(self):
820 def nodemap(self):
812 msg = (
821 msg = (
813 b"revlog.nodemap is deprecated, "
822 b"revlog.nodemap is deprecated, "
814 b"use revlog.index.[has_node|rev|get_rev]"
823 b"use revlog.index.[has_node|rev|get_rev]"
815 )
824 )
816 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
825 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
817 return self.index.nodemap
826 return self.index.nodemap
818
827
819 @property
828 @property
820 def _nodecache(self):
829 def _nodecache(self):
821 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
830 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
822 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
831 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
823 return self.index.nodemap
832 return self.index.nodemap
824
833
825 def hasnode(self, node):
834 def hasnode(self, node):
826 try:
835 try:
827 self.rev(node)
836 self.rev(node)
828 return True
837 return True
829 except KeyError:
838 except KeyError:
830 return False
839 return False
831
840
832 def candelta(self, baserev, rev):
841 def candelta(self, baserev, rev):
833 """whether two revisions (baserev, rev) can be delta-ed or not"""
842 """whether two revisions (baserev, rev) can be delta-ed or not"""
834 # Disable delta if either rev requires a content-changing flag
843 # Disable delta if either rev requires a content-changing flag
835 # processor (ex. LFS). This is because such flag processor can alter
844 # processor (ex. LFS). This is because such flag processor can alter
836 # the rawtext content that the delta will be based on, and two clients
845 # the rawtext content that the delta will be based on, and two clients
837 # could have a same revlog node with different flags (i.e. different
846 # could have a same revlog node with different flags (i.e. different
838 # rawtext contents) and the delta could be incompatible.
847 # rawtext contents) and the delta could be incompatible.
839 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
848 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
840 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
849 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
841 ):
850 ):
842 return False
851 return False
843 return True
852 return True
844
853
845 def update_caches(self, transaction):
854 def update_caches(self, transaction):
846 if self._nodemap_file is not None:
855 if self._nodemap_file is not None:
847 if transaction is None:
856 if transaction is None:
848 nodemaputil.update_persistent_nodemap(self)
857 nodemaputil.update_persistent_nodemap(self)
849 else:
858 else:
850 nodemaputil.setup_persistent_nodemap(transaction, self)
859 nodemaputil.setup_persistent_nodemap(transaction, self)
851
860
852 def clearcaches(self):
861 def clearcaches(self):
853 self._revisioncache = None
862 self._revisioncache = None
854 self._chainbasecache.clear()
863 self._chainbasecache.clear()
855 self._chunkcache = (0, b'')
864 self._chunkcache = (0, b'')
856 self._pcache = {}
865 self._pcache = {}
857 self._nodemap_docket = None
866 self._nodemap_docket = None
858 self.index.clearcaches()
867 self.index.clearcaches()
859 # The python code is the one responsible for validating the docket, we
868 # The python code is the one responsible for validating the docket, we
860 # end up having to refresh it here.
869 # end up having to refresh it here.
861 use_nodemap = (
870 use_nodemap = (
862 not self._inline
871 not self._inline
863 and self._nodemap_file is not None
872 and self._nodemap_file is not None
864 and util.safehasattr(self.index, 'update_nodemap_data')
873 and util.safehasattr(self.index, 'update_nodemap_data')
865 )
874 )
866 if use_nodemap:
875 if use_nodemap:
867 nodemap_data = nodemaputil.persisted_data(self)
876 nodemap_data = nodemaputil.persisted_data(self)
868 if nodemap_data is not None:
877 if nodemap_data is not None:
869 self._nodemap_docket = nodemap_data[0]
878 self._nodemap_docket = nodemap_data[0]
870 self.index.update_nodemap_data(*nodemap_data)
879 self.index.update_nodemap_data(*nodemap_data)
871
880
872 def rev(self, node):
881 def rev(self, node):
873 try:
882 try:
874 return self.index.rev(node)
883 return self.index.rev(node)
875 except TypeError:
884 except TypeError:
876 raise
885 raise
877 except error.RevlogError:
886 except error.RevlogError:
878 # parsers.c radix tree lookup failed
887 # parsers.c radix tree lookup failed
879 if (
888 if (
880 node == self.nodeconstants.wdirid
889 node == self.nodeconstants.wdirid
881 or node in self.nodeconstants.wdirfilenodeids
890 or node in self.nodeconstants.wdirfilenodeids
882 ):
891 ):
883 raise error.WdirUnsupported
892 raise error.WdirUnsupported
884 raise error.LookupError(node, self.display_id, _(b'no node'))
893 raise error.LookupError(node, self.display_id, _(b'no node'))
885
894
886 # Accessors for index entries.
895 # Accessors for index entries.
887
896
888 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
897 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
889 # are flags.
898 # are flags.
890 def start(self, rev):
899 def start(self, rev):
891 return int(self.index[rev][0] >> 16)
900 return int(self.index[rev][0] >> 16)
892
901
893 def flags(self, rev):
902 def flags(self, rev):
894 return self.index[rev][0] & 0xFFFF
903 return self.index[rev][0] & 0xFFFF
895
904
896 def length(self, rev):
905 def length(self, rev):
897 return self.index[rev][1]
906 return self.index[rev][1]
898
907
899 def sidedata_length(self, rev):
908 def sidedata_length(self, rev):
900 if not self.hassidedata:
909 if not self.hassidedata:
901 return 0
910 return 0
902 return self.index[rev][9]
911 return self.index[rev][9]
903
912
904 def rawsize(self, rev):
913 def rawsize(self, rev):
905 """return the length of the uncompressed text for a given revision"""
914 """return the length of the uncompressed text for a given revision"""
906 l = self.index[rev][2]
915 l = self.index[rev][2]
907 if l >= 0:
916 if l >= 0:
908 return l
917 return l
909
918
910 t = self.rawdata(rev)
919 t = self.rawdata(rev)
911 return len(t)
920 return len(t)
912
921
913 def size(self, rev):
922 def size(self, rev):
914 """length of non-raw text (processed by a "read" flag processor)"""
923 """length of non-raw text (processed by a "read" flag processor)"""
915 # fast path: if no "read" flag processor could change the content,
924 # fast path: if no "read" flag processor could change the content,
916 # size is rawsize. note: ELLIPSIS is known to not change the content.
925 # size is rawsize. note: ELLIPSIS is known to not change the content.
917 flags = self.flags(rev)
926 flags = self.flags(rev)
918 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
927 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
919 return self.rawsize(rev)
928 return self.rawsize(rev)
920
929
921 return len(self.revision(rev, raw=False))
930 return len(self.revision(rev, raw=False))
922
931
923 def chainbase(self, rev):
932 def chainbase(self, rev):
924 base = self._chainbasecache.get(rev)
933 base = self._chainbasecache.get(rev)
925 if base is not None:
934 if base is not None:
926 return base
935 return base
927
936
928 index = self.index
937 index = self.index
929 iterrev = rev
938 iterrev = rev
930 base = index[iterrev][3]
939 base = index[iterrev][3]
931 while base != iterrev:
940 while base != iterrev:
932 iterrev = base
941 iterrev = base
933 base = index[iterrev][3]
942 base = index[iterrev][3]
934
943
935 self._chainbasecache[rev] = base
944 self._chainbasecache[rev] = base
936 return base
945 return base
937
946
938 def linkrev(self, rev):
947 def linkrev(self, rev):
939 return self.index[rev][4]
948 return self.index[rev][4]
940
949
941 def parentrevs(self, rev):
950 def parentrevs(self, rev):
942 try:
951 try:
943 entry = self.index[rev]
952 entry = self.index[rev]
944 except IndexError:
953 except IndexError:
945 if rev == wdirrev:
954 if rev == wdirrev:
946 raise error.WdirUnsupported
955 raise error.WdirUnsupported
947 raise
956 raise
948 if entry[5] == nullrev:
957 if entry[5] == nullrev:
949 return entry[6], entry[5]
958 return entry[6], entry[5]
950 else:
959 else:
951 return entry[5], entry[6]
960 return entry[5], entry[6]
952
961
953 # fast parentrevs(rev) where rev isn't filtered
962 # fast parentrevs(rev) where rev isn't filtered
954 _uncheckedparentrevs = parentrevs
963 _uncheckedparentrevs = parentrevs
955
964
956 def node(self, rev):
965 def node(self, rev):
957 try:
966 try:
958 return self.index[rev][7]
967 return self.index[rev][7]
959 except IndexError:
968 except IndexError:
960 if rev == wdirrev:
969 if rev == wdirrev:
961 raise error.WdirUnsupported
970 raise error.WdirUnsupported
962 raise
971 raise
963
972
964 # Derived from index values.
973 # Derived from index values.
965
974
966 def end(self, rev):
975 def end(self, rev):
967 return self.start(rev) + self.length(rev)
976 return self.start(rev) + self.length(rev)
968
977
969 def parents(self, node):
978 def parents(self, node):
970 i = self.index
979 i = self.index
971 d = i[self.rev(node)]
980 d = i[self.rev(node)]
972 # inline node() to avoid function call overhead
981 # inline node() to avoid function call overhead
973 if d[5] == self.nullid:
982 if d[5] == self.nullid:
974 return i[d[6]][7], i[d[5]][7]
983 return i[d[6]][7], i[d[5]][7]
975 else:
984 else:
976 return i[d[5]][7], i[d[6]][7]
985 return i[d[5]][7], i[d[6]][7]
977
986
978 def chainlen(self, rev):
987 def chainlen(self, rev):
979 return self._chaininfo(rev)[0]
988 return self._chaininfo(rev)[0]
980
989
981 def _chaininfo(self, rev):
990 def _chaininfo(self, rev):
982 chaininfocache = self._chaininfocache
991 chaininfocache = self._chaininfocache
983 if rev in chaininfocache:
992 if rev in chaininfocache:
984 return chaininfocache[rev]
993 return chaininfocache[rev]
985 index = self.index
994 index = self.index
986 generaldelta = self._generaldelta
995 generaldelta = self._generaldelta
987 iterrev = rev
996 iterrev = rev
988 e = index[iterrev]
997 e = index[iterrev]
989 clen = 0
998 clen = 0
990 compresseddeltalen = 0
999 compresseddeltalen = 0
991 while iterrev != e[3]:
1000 while iterrev != e[3]:
992 clen += 1
1001 clen += 1
993 compresseddeltalen += e[1]
1002 compresseddeltalen += e[1]
994 if generaldelta:
1003 if generaldelta:
995 iterrev = e[3]
1004 iterrev = e[3]
996 else:
1005 else:
997 iterrev -= 1
1006 iterrev -= 1
998 if iterrev in chaininfocache:
1007 if iterrev in chaininfocache:
999 t = chaininfocache[iterrev]
1008 t = chaininfocache[iterrev]
1000 clen += t[0]
1009 clen += t[0]
1001 compresseddeltalen += t[1]
1010 compresseddeltalen += t[1]
1002 break
1011 break
1003 e = index[iterrev]
1012 e = index[iterrev]
1004 else:
1013 else:
1005 # Add text length of base since decompressing that also takes
1014 # Add text length of base since decompressing that also takes
1006 # work. For cache hits the length is already included.
1015 # work. For cache hits the length is already included.
1007 compresseddeltalen += e[1]
1016 compresseddeltalen += e[1]
1008 r = (clen, compresseddeltalen)
1017 r = (clen, compresseddeltalen)
1009 chaininfocache[rev] = r
1018 chaininfocache[rev] = r
1010 return r
1019 return r
1011
1020
1012 def _deltachain(self, rev, stoprev=None):
1021 def _deltachain(self, rev, stoprev=None):
1013 """Obtain the delta chain for a revision.
1022 """Obtain the delta chain for a revision.
1014
1023
1015 ``stoprev`` specifies a revision to stop at. If not specified, we
1024 ``stoprev`` specifies a revision to stop at. If not specified, we
1016 stop at the base of the chain.
1025 stop at the base of the chain.
1017
1026
1018 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1027 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1019 revs in ascending order and ``stopped`` is a bool indicating whether
1028 revs in ascending order and ``stopped`` is a bool indicating whether
1020 ``stoprev`` was hit.
1029 ``stoprev`` was hit.
1021 """
1030 """
1022 # Try C implementation.
1031 # Try C implementation.
1023 try:
1032 try:
1024 return self.index.deltachain(rev, stoprev, self._generaldelta)
1033 return self.index.deltachain(rev, stoprev, self._generaldelta)
1025 except AttributeError:
1034 except AttributeError:
1026 pass
1035 pass
1027
1036
1028 chain = []
1037 chain = []
1029
1038
1030 # Alias to prevent attribute lookup in tight loop.
1039 # Alias to prevent attribute lookup in tight loop.
1031 index = self.index
1040 index = self.index
1032 generaldelta = self._generaldelta
1041 generaldelta = self._generaldelta
1033
1042
1034 iterrev = rev
1043 iterrev = rev
1035 e = index[iterrev]
1044 e = index[iterrev]
1036 while iterrev != e[3] and iterrev != stoprev:
1045 while iterrev != e[3] and iterrev != stoprev:
1037 chain.append(iterrev)
1046 chain.append(iterrev)
1038 if generaldelta:
1047 if generaldelta:
1039 iterrev = e[3]
1048 iterrev = e[3]
1040 else:
1049 else:
1041 iterrev -= 1
1050 iterrev -= 1
1042 e = index[iterrev]
1051 e = index[iterrev]
1043
1052
1044 if iterrev == stoprev:
1053 if iterrev == stoprev:
1045 stopped = True
1054 stopped = True
1046 else:
1055 else:
1047 chain.append(iterrev)
1056 chain.append(iterrev)
1048 stopped = False
1057 stopped = False
1049
1058
1050 chain.reverse()
1059 chain.reverse()
1051 return chain, stopped
1060 return chain, stopped
1052
1061
1053 def ancestors(self, revs, stoprev=0, inclusive=False):
1062 def ancestors(self, revs, stoprev=0, inclusive=False):
1054 """Generate the ancestors of 'revs' in reverse revision order.
1063 """Generate the ancestors of 'revs' in reverse revision order.
1055 Does not generate revs lower than stoprev.
1064 Does not generate revs lower than stoprev.
1056
1065
1057 See the documentation for ancestor.lazyancestors for more details."""
1066 See the documentation for ancestor.lazyancestors for more details."""
1058
1067
1059 # first, make sure start revisions aren't filtered
1068 # first, make sure start revisions aren't filtered
1060 revs = list(revs)
1069 revs = list(revs)
1061 checkrev = self.node
1070 checkrev = self.node
1062 for r in revs:
1071 for r in revs:
1063 checkrev(r)
1072 checkrev(r)
1064 # and we're sure ancestors aren't filtered as well
1073 # and we're sure ancestors aren't filtered as well
1065
1074
1066 if rustancestor is not None and self.index.rust_ext_compat:
1075 if rustancestor is not None and self.index.rust_ext_compat:
1067 lazyancestors = rustancestor.LazyAncestors
1076 lazyancestors = rustancestor.LazyAncestors
1068 arg = self.index
1077 arg = self.index
1069 else:
1078 else:
1070 lazyancestors = ancestor.lazyancestors
1079 lazyancestors = ancestor.lazyancestors
1071 arg = self._uncheckedparentrevs
1080 arg = self._uncheckedparentrevs
1072 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1081 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1073
1082
1074 def descendants(self, revs):
1083 def descendants(self, revs):
1075 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1084 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1076
1085
1077 def findcommonmissing(self, common=None, heads=None):
1086 def findcommonmissing(self, common=None, heads=None):
1078 """Return a tuple of the ancestors of common and the ancestors of heads
1087 """Return a tuple of the ancestors of common and the ancestors of heads
1079 that are not ancestors of common. In revset terminology, we return the
1088 that are not ancestors of common. In revset terminology, we return the
1080 tuple:
1089 tuple:
1081
1090
1082 ::common, (::heads) - (::common)
1091 ::common, (::heads) - (::common)
1083
1092
1084 The list is sorted by revision number, meaning it is
1093 The list is sorted by revision number, meaning it is
1085 topologically sorted.
1094 topologically sorted.
1086
1095
1087 'heads' and 'common' are both lists of node IDs. If heads is
1096 'heads' and 'common' are both lists of node IDs. If heads is
1088 not supplied, uses all of the revlog's heads. If common is not
1097 not supplied, uses all of the revlog's heads. If common is not
1089 supplied, uses nullid."""
1098 supplied, uses nullid."""
1090 if common is None:
1099 if common is None:
1091 common = [self.nullid]
1100 common = [self.nullid]
1092 if heads is None:
1101 if heads is None:
1093 heads = self.heads()
1102 heads = self.heads()
1094
1103
1095 common = [self.rev(n) for n in common]
1104 common = [self.rev(n) for n in common]
1096 heads = [self.rev(n) for n in heads]
1105 heads = [self.rev(n) for n in heads]
1097
1106
1098 # we want the ancestors, but inclusive
1107 # we want the ancestors, but inclusive
1099 class lazyset(object):
1108 class lazyset(object):
1100 def __init__(self, lazyvalues):
1109 def __init__(self, lazyvalues):
1101 self.addedvalues = set()
1110 self.addedvalues = set()
1102 self.lazyvalues = lazyvalues
1111 self.lazyvalues = lazyvalues
1103
1112
1104 def __contains__(self, value):
1113 def __contains__(self, value):
1105 return value in self.addedvalues or value in self.lazyvalues
1114 return value in self.addedvalues or value in self.lazyvalues
1106
1115
1107 def __iter__(self):
1116 def __iter__(self):
1108 added = self.addedvalues
1117 added = self.addedvalues
1109 for r in added:
1118 for r in added:
1110 yield r
1119 yield r
1111 for r in self.lazyvalues:
1120 for r in self.lazyvalues:
1112 if not r in added:
1121 if not r in added:
1113 yield r
1122 yield r
1114
1123
1115 def add(self, value):
1124 def add(self, value):
1116 self.addedvalues.add(value)
1125 self.addedvalues.add(value)
1117
1126
1118 def update(self, values):
1127 def update(self, values):
1119 self.addedvalues.update(values)
1128 self.addedvalues.update(values)
1120
1129
1121 has = lazyset(self.ancestors(common))
1130 has = lazyset(self.ancestors(common))
1122 has.add(nullrev)
1131 has.add(nullrev)
1123 has.update(common)
1132 has.update(common)
1124
1133
1125 # take all ancestors from heads that aren't in has
1134 # take all ancestors from heads that aren't in has
1126 missing = set()
1135 missing = set()
1127 visit = collections.deque(r for r in heads if r not in has)
1136 visit = collections.deque(r for r in heads if r not in has)
1128 while visit:
1137 while visit:
1129 r = visit.popleft()
1138 r = visit.popleft()
1130 if r in missing:
1139 if r in missing:
1131 continue
1140 continue
1132 else:
1141 else:
1133 missing.add(r)
1142 missing.add(r)
1134 for p in self.parentrevs(r):
1143 for p in self.parentrevs(r):
1135 if p not in has:
1144 if p not in has:
1136 visit.append(p)
1145 visit.append(p)
1137 missing = list(missing)
1146 missing = list(missing)
1138 missing.sort()
1147 missing.sort()
1139 return has, [self.node(miss) for miss in missing]
1148 return has, [self.node(miss) for miss in missing]
1140
1149
1141 def incrementalmissingrevs(self, common=None):
1150 def incrementalmissingrevs(self, common=None):
1142 """Return an object that can be used to incrementally compute the
1151 """Return an object that can be used to incrementally compute the
1143 revision numbers of the ancestors of arbitrary sets that are not
1152 revision numbers of the ancestors of arbitrary sets that are not
1144 ancestors of common. This is an ancestor.incrementalmissingancestors
1153 ancestors of common. This is an ancestor.incrementalmissingancestors
1145 object.
1154 object.
1146
1155
1147 'common' is a list of revision numbers. If common is not supplied, uses
1156 'common' is a list of revision numbers. If common is not supplied, uses
1148 nullrev.
1157 nullrev.
1149 """
1158 """
1150 if common is None:
1159 if common is None:
1151 common = [nullrev]
1160 common = [nullrev]
1152
1161
1153 if rustancestor is not None and self.index.rust_ext_compat:
1162 if rustancestor is not None and self.index.rust_ext_compat:
1154 return rustancestor.MissingAncestors(self.index, common)
1163 return rustancestor.MissingAncestors(self.index, common)
1155 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1164 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1156
1165
1157 def findmissingrevs(self, common=None, heads=None):
1166 def findmissingrevs(self, common=None, heads=None):
1158 """Return the revision numbers of the ancestors of heads that
1167 """Return the revision numbers of the ancestors of heads that
1159 are not ancestors of common.
1168 are not ancestors of common.
1160
1169
1161 More specifically, return a list of revision numbers corresponding to
1170 More specifically, return a list of revision numbers corresponding to
1162 nodes N such that every N satisfies the following constraints:
1171 nodes N such that every N satisfies the following constraints:
1163
1172
1164 1. N is an ancestor of some node in 'heads'
1173 1. N is an ancestor of some node in 'heads'
1165 2. N is not an ancestor of any node in 'common'
1174 2. N is not an ancestor of any node in 'common'
1166
1175
1167 The list is sorted by revision number, meaning it is
1176 The list is sorted by revision number, meaning it is
1168 topologically sorted.
1177 topologically sorted.
1169
1178
1170 'heads' and 'common' are both lists of revision numbers. If heads is
1179 'heads' and 'common' are both lists of revision numbers. If heads is
1171 not supplied, uses all of the revlog's heads. If common is not
1180 not supplied, uses all of the revlog's heads. If common is not
1172 supplied, uses nullid."""
1181 supplied, uses nullid."""
1173 if common is None:
1182 if common is None:
1174 common = [nullrev]
1183 common = [nullrev]
1175 if heads is None:
1184 if heads is None:
1176 heads = self.headrevs()
1185 heads = self.headrevs()
1177
1186
1178 inc = self.incrementalmissingrevs(common=common)
1187 inc = self.incrementalmissingrevs(common=common)
1179 return inc.missingancestors(heads)
1188 return inc.missingancestors(heads)
1180
1189
1181 def findmissing(self, common=None, heads=None):
1190 def findmissing(self, common=None, heads=None):
1182 """Return the ancestors of heads that are not ancestors of common.
1191 """Return the ancestors of heads that are not ancestors of common.
1183
1192
1184 More specifically, return a list of nodes N such that every N
1193 More specifically, return a list of nodes N such that every N
1185 satisfies the following constraints:
1194 satisfies the following constraints:
1186
1195
1187 1. N is an ancestor of some node in 'heads'
1196 1. N is an ancestor of some node in 'heads'
1188 2. N is not an ancestor of any node in 'common'
1197 2. N is not an ancestor of any node in 'common'
1189
1198
1190 The list is sorted by revision number, meaning it is
1199 The list is sorted by revision number, meaning it is
1191 topologically sorted.
1200 topologically sorted.
1192
1201
1193 'heads' and 'common' are both lists of node IDs. If heads is
1202 'heads' and 'common' are both lists of node IDs. If heads is
1194 not supplied, uses all of the revlog's heads. If common is not
1203 not supplied, uses all of the revlog's heads. If common is not
1195 supplied, uses nullid."""
1204 supplied, uses nullid."""
1196 if common is None:
1205 if common is None:
1197 common = [self.nullid]
1206 common = [self.nullid]
1198 if heads is None:
1207 if heads is None:
1199 heads = self.heads()
1208 heads = self.heads()
1200
1209
1201 common = [self.rev(n) for n in common]
1210 common = [self.rev(n) for n in common]
1202 heads = [self.rev(n) for n in heads]
1211 heads = [self.rev(n) for n in heads]
1203
1212
1204 inc = self.incrementalmissingrevs(common=common)
1213 inc = self.incrementalmissingrevs(common=common)
1205 return [self.node(r) for r in inc.missingancestors(heads)]
1214 return [self.node(r) for r in inc.missingancestors(heads)]
1206
1215
1207 def nodesbetween(self, roots=None, heads=None):
1216 def nodesbetween(self, roots=None, heads=None):
1208 """Return a topological path from 'roots' to 'heads'.
1217 """Return a topological path from 'roots' to 'heads'.
1209
1218
1210 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1219 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1211 topologically sorted list of all nodes N that satisfy both of
1220 topologically sorted list of all nodes N that satisfy both of
1212 these constraints:
1221 these constraints:
1213
1222
1214 1. N is a descendant of some node in 'roots'
1223 1. N is a descendant of some node in 'roots'
1215 2. N is an ancestor of some node in 'heads'
1224 2. N is an ancestor of some node in 'heads'
1216
1225
1217 Every node is considered to be both a descendant and an ancestor
1226 Every node is considered to be both a descendant and an ancestor
1218 of itself, so every reachable node in 'roots' and 'heads' will be
1227 of itself, so every reachable node in 'roots' and 'heads' will be
1219 included in 'nodes'.
1228 included in 'nodes'.
1220
1229
1221 'outroots' is the list of reachable nodes in 'roots', i.e., the
1230 'outroots' is the list of reachable nodes in 'roots', i.e., the
1222 subset of 'roots' that is returned in 'nodes'. Likewise,
1231 subset of 'roots' that is returned in 'nodes'. Likewise,
1223 'outheads' is the subset of 'heads' that is also in 'nodes'.
1232 'outheads' is the subset of 'heads' that is also in 'nodes'.
1224
1233
1225 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1234 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1226 unspecified, uses nullid as the only root. If 'heads' is
1235 unspecified, uses nullid as the only root. If 'heads' is
1227 unspecified, uses list of all of the revlog's heads."""
1236 unspecified, uses list of all of the revlog's heads."""
1228 nonodes = ([], [], [])
1237 nonodes = ([], [], [])
1229 if roots is not None:
1238 if roots is not None:
1230 roots = list(roots)
1239 roots = list(roots)
1231 if not roots:
1240 if not roots:
1232 return nonodes
1241 return nonodes
1233 lowestrev = min([self.rev(n) for n in roots])
1242 lowestrev = min([self.rev(n) for n in roots])
1234 else:
1243 else:
1235 roots = [self.nullid] # Everybody's a descendant of nullid
1244 roots = [self.nullid] # Everybody's a descendant of nullid
1236 lowestrev = nullrev
1245 lowestrev = nullrev
1237 if (lowestrev == nullrev) and (heads is None):
1246 if (lowestrev == nullrev) and (heads is None):
1238 # We want _all_ the nodes!
1247 # We want _all_ the nodes!
1239 return (
1248 return (
1240 [self.node(r) for r in self],
1249 [self.node(r) for r in self],
1241 [self.nullid],
1250 [self.nullid],
1242 list(self.heads()),
1251 list(self.heads()),
1243 )
1252 )
1244 if heads is None:
1253 if heads is None:
1245 # All nodes are ancestors, so the latest ancestor is the last
1254 # All nodes are ancestors, so the latest ancestor is the last
1246 # node.
1255 # node.
1247 highestrev = len(self) - 1
1256 highestrev = len(self) - 1
1248 # Set ancestors to None to signal that every node is an ancestor.
1257 # Set ancestors to None to signal that every node is an ancestor.
1249 ancestors = None
1258 ancestors = None
1250 # Set heads to an empty dictionary for later discovery of heads
1259 # Set heads to an empty dictionary for later discovery of heads
1251 heads = {}
1260 heads = {}
1252 else:
1261 else:
1253 heads = list(heads)
1262 heads = list(heads)
1254 if not heads:
1263 if not heads:
1255 return nonodes
1264 return nonodes
1256 ancestors = set()
1265 ancestors = set()
1257 # Turn heads into a dictionary so we can remove 'fake' heads.
1266 # Turn heads into a dictionary so we can remove 'fake' heads.
1258 # Also, later we will be using it to filter out the heads we can't
1267 # Also, later we will be using it to filter out the heads we can't
1259 # find from roots.
1268 # find from roots.
1260 heads = dict.fromkeys(heads, False)
1269 heads = dict.fromkeys(heads, False)
1261 # Start at the top and keep marking parents until we're done.
1270 # Start at the top and keep marking parents until we're done.
1262 nodestotag = set(heads)
1271 nodestotag = set(heads)
1263 # Remember where the top was so we can use it as a limit later.
1272 # Remember where the top was so we can use it as a limit later.
1264 highestrev = max([self.rev(n) for n in nodestotag])
1273 highestrev = max([self.rev(n) for n in nodestotag])
1265 while nodestotag:
1274 while nodestotag:
1266 # grab a node to tag
1275 # grab a node to tag
1267 n = nodestotag.pop()
1276 n = nodestotag.pop()
1268 # Never tag nullid
1277 # Never tag nullid
1269 if n == self.nullid:
1278 if n == self.nullid:
1270 continue
1279 continue
1271 # A node's revision number represents its place in a
1280 # A node's revision number represents its place in a
1272 # topologically sorted list of nodes.
1281 # topologically sorted list of nodes.
1273 r = self.rev(n)
1282 r = self.rev(n)
1274 if r >= lowestrev:
1283 if r >= lowestrev:
1275 if n not in ancestors:
1284 if n not in ancestors:
1276 # If we are possibly a descendant of one of the roots
1285 # If we are possibly a descendant of one of the roots
1277 # and we haven't already been marked as an ancestor
1286 # and we haven't already been marked as an ancestor
1278 ancestors.add(n) # Mark as ancestor
1287 ancestors.add(n) # Mark as ancestor
1279 # Add non-nullid parents to list of nodes to tag.
1288 # Add non-nullid parents to list of nodes to tag.
1280 nodestotag.update(
1289 nodestotag.update(
1281 [p for p in self.parents(n) if p != self.nullid]
1290 [p for p in self.parents(n) if p != self.nullid]
1282 )
1291 )
1283 elif n in heads: # We've seen it before, is it a fake head?
1292 elif n in heads: # We've seen it before, is it a fake head?
1284 # So it is, real heads should not be the ancestors of
1293 # So it is, real heads should not be the ancestors of
1285 # any other heads.
1294 # any other heads.
1286 heads.pop(n)
1295 heads.pop(n)
1287 if not ancestors:
1296 if not ancestors:
1288 return nonodes
1297 return nonodes
1289 # Now that we have our set of ancestors, we want to remove any
1298 # Now that we have our set of ancestors, we want to remove any
1290 # roots that are not ancestors.
1299 # roots that are not ancestors.
1291
1300
1292 # If one of the roots was nullid, everything is included anyway.
1301 # If one of the roots was nullid, everything is included anyway.
1293 if lowestrev > nullrev:
1302 if lowestrev > nullrev:
1294 # But, since we weren't, let's recompute the lowest rev to not
1303 # But, since we weren't, let's recompute the lowest rev to not
1295 # include roots that aren't ancestors.
1304 # include roots that aren't ancestors.
1296
1305
1297 # Filter out roots that aren't ancestors of heads
1306 # Filter out roots that aren't ancestors of heads
1298 roots = [root for root in roots if root in ancestors]
1307 roots = [root for root in roots if root in ancestors]
1299 # Recompute the lowest revision
1308 # Recompute the lowest revision
1300 if roots:
1309 if roots:
1301 lowestrev = min([self.rev(root) for root in roots])
1310 lowestrev = min([self.rev(root) for root in roots])
1302 else:
1311 else:
1303 # No more roots? Return empty list
1312 # No more roots? Return empty list
1304 return nonodes
1313 return nonodes
1305 else:
1314 else:
1306 # We are descending from nullid, and don't need to care about
1315 # We are descending from nullid, and don't need to care about
1307 # any other roots.
1316 # any other roots.
1308 lowestrev = nullrev
1317 lowestrev = nullrev
1309 roots = [self.nullid]
1318 roots = [self.nullid]
1310 # Transform our roots list into a set.
1319 # Transform our roots list into a set.
1311 descendants = set(roots)
1320 descendants = set(roots)
1312 # Also, keep the original roots so we can filter out roots that aren't
1321 # Also, keep the original roots so we can filter out roots that aren't
1313 # 'real' roots (i.e. are descended from other roots).
1322 # 'real' roots (i.e. are descended from other roots).
1314 roots = descendants.copy()
1323 roots = descendants.copy()
1315 # Our topologically sorted list of output nodes.
1324 # Our topologically sorted list of output nodes.
1316 orderedout = []
1325 orderedout = []
1317 # Don't start at nullid since we don't want nullid in our output list,
1326 # Don't start at nullid since we don't want nullid in our output list,
1318 # and if nullid shows up in descendants, empty parents will look like
1327 # and if nullid shows up in descendants, empty parents will look like
1319 # they're descendants.
1328 # they're descendants.
1320 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1329 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1321 n = self.node(r)
1330 n = self.node(r)
1322 isdescendant = False
1331 isdescendant = False
1323 if lowestrev == nullrev: # Everybody is a descendant of nullid
1332 if lowestrev == nullrev: # Everybody is a descendant of nullid
1324 isdescendant = True
1333 isdescendant = True
1325 elif n in descendants:
1334 elif n in descendants:
1326 # n is already a descendant
1335 # n is already a descendant
1327 isdescendant = True
1336 isdescendant = True
1328 # This check only needs to be done here because all the roots
1337 # This check only needs to be done here because all the roots
1329 # will start being marked is descendants before the loop.
1338 # will start being marked is descendants before the loop.
1330 if n in roots:
1339 if n in roots:
1331 # If n was a root, check if it's a 'real' root.
1340 # If n was a root, check if it's a 'real' root.
1332 p = tuple(self.parents(n))
1341 p = tuple(self.parents(n))
1333 # If any of its parents are descendants, it's not a root.
1342 # If any of its parents are descendants, it's not a root.
1334 if (p[0] in descendants) or (p[1] in descendants):
1343 if (p[0] in descendants) or (p[1] in descendants):
1335 roots.remove(n)
1344 roots.remove(n)
1336 else:
1345 else:
1337 p = tuple(self.parents(n))
1346 p = tuple(self.parents(n))
1338 # A node is a descendant if either of its parents are
1347 # A node is a descendant if either of its parents are
1339 # descendants. (We seeded the dependents list with the roots
1348 # descendants. (We seeded the dependents list with the roots
1340 # up there, remember?)
1349 # up there, remember?)
1341 if (p[0] in descendants) or (p[1] in descendants):
1350 if (p[0] in descendants) or (p[1] in descendants):
1342 descendants.add(n)
1351 descendants.add(n)
1343 isdescendant = True
1352 isdescendant = True
1344 if isdescendant and ((ancestors is None) or (n in ancestors)):
1353 if isdescendant and ((ancestors is None) or (n in ancestors)):
1345 # Only include nodes that are both descendants and ancestors.
1354 # Only include nodes that are both descendants and ancestors.
1346 orderedout.append(n)
1355 orderedout.append(n)
1347 if (ancestors is not None) and (n in heads):
1356 if (ancestors is not None) and (n in heads):
1348 # We're trying to figure out which heads are reachable
1357 # We're trying to figure out which heads are reachable
1349 # from roots.
1358 # from roots.
1350 # Mark this head as having been reached
1359 # Mark this head as having been reached
1351 heads[n] = True
1360 heads[n] = True
1352 elif ancestors is None:
1361 elif ancestors is None:
1353 # Otherwise, we're trying to discover the heads.
1362 # Otherwise, we're trying to discover the heads.
1354 # Assume this is a head because if it isn't, the next step
1363 # Assume this is a head because if it isn't, the next step
1355 # will eventually remove it.
1364 # will eventually remove it.
1356 heads[n] = True
1365 heads[n] = True
1357 # But, obviously its parents aren't.
1366 # But, obviously its parents aren't.
1358 for p in self.parents(n):
1367 for p in self.parents(n):
1359 heads.pop(p, None)
1368 heads.pop(p, None)
1360 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1369 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1361 roots = list(roots)
1370 roots = list(roots)
1362 assert orderedout
1371 assert orderedout
1363 assert roots
1372 assert roots
1364 assert heads
1373 assert heads
1365 return (orderedout, roots, heads)
1374 return (orderedout, roots, heads)
1366
1375
1367 def headrevs(self, revs=None):
1376 def headrevs(self, revs=None):
1368 if revs is None:
1377 if revs is None:
1369 try:
1378 try:
1370 return self.index.headrevs()
1379 return self.index.headrevs()
1371 except AttributeError:
1380 except AttributeError:
1372 return self._headrevs()
1381 return self._headrevs()
1373 if rustdagop is not None and self.index.rust_ext_compat:
1382 if rustdagop is not None and self.index.rust_ext_compat:
1374 return rustdagop.headrevs(self.index, revs)
1383 return rustdagop.headrevs(self.index, revs)
1375 return dagop.headrevs(revs, self._uncheckedparentrevs)
1384 return dagop.headrevs(revs, self._uncheckedparentrevs)
1376
1385
1377 def computephases(self, roots):
1386 def computephases(self, roots):
1378 return self.index.computephasesmapsets(roots)
1387 return self.index.computephasesmapsets(roots)
1379
1388
1380 def _headrevs(self):
1389 def _headrevs(self):
1381 count = len(self)
1390 count = len(self)
1382 if not count:
1391 if not count:
1383 return [nullrev]
1392 return [nullrev]
1384 # we won't iter over filtered rev so nobody is a head at start
1393 # we won't iter over filtered rev so nobody is a head at start
1385 ishead = [0] * (count + 1)
1394 ishead = [0] * (count + 1)
1386 index = self.index
1395 index = self.index
1387 for r in self:
1396 for r in self:
1388 ishead[r] = 1 # I may be an head
1397 ishead[r] = 1 # I may be an head
1389 e = index[r]
1398 e = index[r]
1390 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1399 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1391 return [r for r, val in enumerate(ishead) if val]
1400 return [r for r, val in enumerate(ishead) if val]
1392
1401
1393 def heads(self, start=None, stop=None):
1402 def heads(self, start=None, stop=None):
1394 """return the list of all nodes that have no children
1403 """return the list of all nodes that have no children
1395
1404
1396 if start is specified, only heads that are descendants of
1405 if start is specified, only heads that are descendants of
1397 start will be returned
1406 start will be returned
1398 if stop is specified, it will consider all the revs from stop
1407 if stop is specified, it will consider all the revs from stop
1399 as if they had no children
1408 as if they had no children
1400 """
1409 """
1401 if start is None and stop is None:
1410 if start is None and stop is None:
1402 if not len(self):
1411 if not len(self):
1403 return [self.nullid]
1412 return [self.nullid]
1404 return [self.node(r) for r in self.headrevs()]
1413 return [self.node(r) for r in self.headrevs()]
1405
1414
1406 if start is None:
1415 if start is None:
1407 start = nullrev
1416 start = nullrev
1408 else:
1417 else:
1409 start = self.rev(start)
1418 start = self.rev(start)
1410
1419
1411 stoprevs = {self.rev(n) for n in stop or []}
1420 stoprevs = {self.rev(n) for n in stop or []}
1412
1421
1413 revs = dagop.headrevssubset(
1422 revs = dagop.headrevssubset(
1414 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1423 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1415 )
1424 )
1416
1425
1417 return [self.node(rev) for rev in revs]
1426 return [self.node(rev) for rev in revs]
1418
1427
1419 def children(self, node):
1428 def children(self, node):
1420 """find the children of a given node"""
1429 """find the children of a given node"""
1421 c = []
1430 c = []
1422 p = self.rev(node)
1431 p = self.rev(node)
1423 for r in self.revs(start=p + 1):
1432 for r in self.revs(start=p + 1):
1424 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1433 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1425 if prevs:
1434 if prevs:
1426 for pr in prevs:
1435 for pr in prevs:
1427 if pr == p:
1436 if pr == p:
1428 c.append(self.node(r))
1437 c.append(self.node(r))
1429 elif p == nullrev:
1438 elif p == nullrev:
1430 c.append(self.node(r))
1439 c.append(self.node(r))
1431 return c
1440 return c
1432
1441
1433 def commonancestorsheads(self, a, b):
1442 def commonancestorsheads(self, a, b):
1434 """calculate all the heads of the common ancestors of nodes a and b"""
1443 """calculate all the heads of the common ancestors of nodes a and b"""
1435 a, b = self.rev(a), self.rev(b)
1444 a, b = self.rev(a), self.rev(b)
1436 ancs = self._commonancestorsheads(a, b)
1445 ancs = self._commonancestorsheads(a, b)
1437 return pycompat.maplist(self.node, ancs)
1446 return pycompat.maplist(self.node, ancs)
1438
1447
1439 def _commonancestorsheads(self, *revs):
1448 def _commonancestorsheads(self, *revs):
1440 """calculate all the heads of the common ancestors of revs"""
1449 """calculate all the heads of the common ancestors of revs"""
1441 try:
1450 try:
1442 ancs = self.index.commonancestorsheads(*revs)
1451 ancs = self.index.commonancestorsheads(*revs)
1443 except (AttributeError, OverflowError): # C implementation failed
1452 except (AttributeError, OverflowError): # C implementation failed
1444 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1453 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1445 return ancs
1454 return ancs
1446
1455
1447 def isancestor(self, a, b):
1456 def isancestor(self, a, b):
1448 """return True if node a is an ancestor of node b
1457 """return True if node a is an ancestor of node b
1449
1458
1450 A revision is considered an ancestor of itself."""
1459 A revision is considered an ancestor of itself."""
1451 a, b = self.rev(a), self.rev(b)
1460 a, b = self.rev(a), self.rev(b)
1452 return self.isancestorrev(a, b)
1461 return self.isancestorrev(a, b)
1453
1462
1454 def isancestorrev(self, a, b):
1463 def isancestorrev(self, a, b):
1455 """return True if revision a is an ancestor of revision b
1464 """return True if revision a is an ancestor of revision b
1456
1465
1457 A revision is considered an ancestor of itself.
1466 A revision is considered an ancestor of itself.
1458
1467
1459 The implementation of this is trivial but the use of
1468 The implementation of this is trivial but the use of
1460 reachableroots is not."""
1469 reachableroots is not."""
1461 if a == nullrev:
1470 if a == nullrev:
1462 return True
1471 return True
1463 elif a == b:
1472 elif a == b:
1464 return True
1473 return True
1465 elif a > b:
1474 elif a > b:
1466 return False
1475 return False
1467 return bool(self.reachableroots(a, [b], [a], includepath=False))
1476 return bool(self.reachableroots(a, [b], [a], includepath=False))
1468
1477
1469 def reachableroots(self, minroot, heads, roots, includepath=False):
1478 def reachableroots(self, minroot, heads, roots, includepath=False):
1470 """return (heads(::(<roots> and <roots>::<heads>)))
1479 """return (heads(::(<roots> and <roots>::<heads>)))
1471
1480
1472 If includepath is True, return (<roots>::<heads>)."""
1481 If includepath is True, return (<roots>::<heads>)."""
1473 try:
1482 try:
1474 return self.index.reachableroots2(
1483 return self.index.reachableroots2(
1475 minroot, heads, roots, includepath
1484 minroot, heads, roots, includepath
1476 )
1485 )
1477 except AttributeError:
1486 except AttributeError:
1478 return dagop._reachablerootspure(
1487 return dagop._reachablerootspure(
1479 self.parentrevs, minroot, roots, heads, includepath
1488 self.parentrevs, minroot, roots, heads, includepath
1480 )
1489 )
1481
1490
1482 def ancestor(self, a, b):
1491 def ancestor(self, a, b):
1483 """calculate the "best" common ancestor of nodes a and b"""
1492 """calculate the "best" common ancestor of nodes a and b"""
1484
1493
1485 a, b = self.rev(a), self.rev(b)
1494 a, b = self.rev(a), self.rev(b)
1486 try:
1495 try:
1487 ancs = self.index.ancestors(a, b)
1496 ancs = self.index.ancestors(a, b)
1488 except (AttributeError, OverflowError):
1497 except (AttributeError, OverflowError):
1489 ancs = ancestor.ancestors(self.parentrevs, a, b)
1498 ancs = ancestor.ancestors(self.parentrevs, a, b)
1490 if ancs:
1499 if ancs:
1491 # choose a consistent winner when there's a tie
1500 # choose a consistent winner when there's a tie
1492 return min(map(self.node, ancs))
1501 return min(map(self.node, ancs))
1493 return self.nullid
1502 return self.nullid
1494
1503
1495 def _match(self, id):
1504 def _match(self, id):
1496 if isinstance(id, int):
1505 if isinstance(id, int):
1497 # rev
1506 # rev
1498 return self.node(id)
1507 return self.node(id)
1499 if len(id) == self.nodeconstants.nodelen:
1508 if len(id) == self.nodeconstants.nodelen:
1500 # possibly a binary node
1509 # possibly a binary node
1501 # odds of a binary node being all hex in ASCII are 1 in 10**25
1510 # odds of a binary node being all hex in ASCII are 1 in 10**25
1502 try:
1511 try:
1503 node = id
1512 node = id
1504 self.rev(node) # quick search the index
1513 self.rev(node) # quick search the index
1505 return node
1514 return node
1506 except error.LookupError:
1515 except error.LookupError:
1507 pass # may be partial hex id
1516 pass # may be partial hex id
1508 try:
1517 try:
1509 # str(rev)
1518 # str(rev)
1510 rev = int(id)
1519 rev = int(id)
1511 if b"%d" % rev != id:
1520 if b"%d" % rev != id:
1512 raise ValueError
1521 raise ValueError
1513 if rev < 0:
1522 if rev < 0:
1514 rev = len(self) + rev
1523 rev = len(self) + rev
1515 if rev < 0 or rev >= len(self):
1524 if rev < 0 or rev >= len(self):
1516 raise ValueError
1525 raise ValueError
1517 return self.node(rev)
1526 return self.node(rev)
1518 except (ValueError, OverflowError):
1527 except (ValueError, OverflowError):
1519 pass
1528 pass
1520 if len(id) == 2 * self.nodeconstants.nodelen:
1529 if len(id) == 2 * self.nodeconstants.nodelen:
1521 try:
1530 try:
1522 # a full hex nodeid?
1531 # a full hex nodeid?
1523 node = bin(id)
1532 node = bin(id)
1524 self.rev(node)
1533 self.rev(node)
1525 return node
1534 return node
1526 except (TypeError, error.LookupError):
1535 except (TypeError, error.LookupError):
1527 pass
1536 pass
1528
1537
1529 def _partialmatch(self, id):
1538 def _partialmatch(self, id):
1530 # we don't care wdirfilenodeids as they should be always full hash
1539 # we don't care wdirfilenodeids as they should be always full hash
1531 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1540 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1532 try:
1541 try:
1533 partial = self.index.partialmatch(id)
1542 partial = self.index.partialmatch(id)
1534 if partial and self.hasnode(partial):
1543 if partial and self.hasnode(partial):
1535 if maybewdir:
1544 if maybewdir:
1536 # single 'ff...' match in radix tree, ambiguous with wdir
1545 # single 'ff...' match in radix tree, ambiguous with wdir
1537 raise error.RevlogError
1546 raise error.RevlogError
1538 return partial
1547 return partial
1539 if maybewdir:
1548 if maybewdir:
1540 # no 'ff...' match in radix tree, wdir identified
1549 # no 'ff...' match in radix tree, wdir identified
1541 raise error.WdirUnsupported
1550 raise error.WdirUnsupported
1542 return None
1551 return None
1543 except error.RevlogError:
1552 except error.RevlogError:
1544 # parsers.c radix tree lookup gave multiple matches
1553 # parsers.c radix tree lookup gave multiple matches
1545 # fast path: for unfiltered changelog, radix tree is accurate
1554 # fast path: for unfiltered changelog, radix tree is accurate
1546 if not getattr(self, 'filteredrevs', None):
1555 if not getattr(self, 'filteredrevs', None):
1547 raise error.AmbiguousPrefixLookupError(
1556 raise error.AmbiguousPrefixLookupError(
1548 id, self.display_id, _(b'ambiguous identifier')
1557 id, self.display_id, _(b'ambiguous identifier')
1549 )
1558 )
1550 # fall through to slow path that filters hidden revisions
1559 # fall through to slow path that filters hidden revisions
1551 except (AttributeError, ValueError):
1560 except (AttributeError, ValueError):
1552 # we are pure python, or key was too short to search radix tree
1561 # we are pure python, or key was too short to search radix tree
1553 pass
1562 pass
1554
1563
1555 if id in self._pcache:
1564 if id in self._pcache:
1556 return self._pcache[id]
1565 return self._pcache[id]
1557
1566
1558 if len(id) <= 40:
1567 if len(id) <= 40:
1559 try:
1568 try:
1560 # hex(node)[:...]
1569 # hex(node)[:...]
1561 l = len(id) // 2 # grab an even number of digits
1570 l = len(id) // 2 # grab an even number of digits
1562 prefix = bin(id[: l * 2])
1571 prefix = bin(id[: l * 2])
1563 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1572 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1564 nl = [
1573 nl = [
1565 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1574 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1566 ]
1575 ]
1567 if self.nodeconstants.nullhex.startswith(id):
1576 if self.nodeconstants.nullhex.startswith(id):
1568 nl.append(self.nullid)
1577 nl.append(self.nullid)
1569 if len(nl) > 0:
1578 if len(nl) > 0:
1570 if len(nl) == 1 and not maybewdir:
1579 if len(nl) == 1 and not maybewdir:
1571 self._pcache[id] = nl[0]
1580 self._pcache[id] = nl[0]
1572 return nl[0]
1581 return nl[0]
1573 raise error.AmbiguousPrefixLookupError(
1582 raise error.AmbiguousPrefixLookupError(
1574 id, self.display_id, _(b'ambiguous identifier')
1583 id, self.display_id, _(b'ambiguous identifier')
1575 )
1584 )
1576 if maybewdir:
1585 if maybewdir:
1577 raise error.WdirUnsupported
1586 raise error.WdirUnsupported
1578 return None
1587 return None
1579 except TypeError:
1588 except TypeError:
1580 pass
1589 pass
1581
1590
1582 def lookup(self, id):
1591 def lookup(self, id):
1583 """locate a node based on:
1592 """locate a node based on:
1584 - revision number or str(revision number)
1593 - revision number or str(revision number)
1585 - nodeid or subset of hex nodeid
1594 - nodeid or subset of hex nodeid
1586 """
1595 """
1587 n = self._match(id)
1596 n = self._match(id)
1588 if n is not None:
1597 if n is not None:
1589 return n
1598 return n
1590 n = self._partialmatch(id)
1599 n = self._partialmatch(id)
1591 if n:
1600 if n:
1592 return n
1601 return n
1593
1602
1594 raise error.LookupError(id, self.display_id, _(b'no match found'))
1603 raise error.LookupError(id, self.display_id, _(b'no match found'))
1595
1604
1596 def shortest(self, node, minlength=1):
1605 def shortest(self, node, minlength=1):
1597 """Find the shortest unambiguous prefix that matches node."""
1606 """Find the shortest unambiguous prefix that matches node."""
1598
1607
1599 def isvalid(prefix):
1608 def isvalid(prefix):
1600 try:
1609 try:
1601 matchednode = self._partialmatch(prefix)
1610 matchednode = self._partialmatch(prefix)
1602 except error.AmbiguousPrefixLookupError:
1611 except error.AmbiguousPrefixLookupError:
1603 return False
1612 return False
1604 except error.WdirUnsupported:
1613 except error.WdirUnsupported:
1605 # single 'ff...' match
1614 # single 'ff...' match
1606 return True
1615 return True
1607 if matchednode is None:
1616 if matchednode is None:
1608 raise error.LookupError(node, self.display_id, _(b'no node'))
1617 raise error.LookupError(node, self.display_id, _(b'no node'))
1609 return True
1618 return True
1610
1619
1611 def maybewdir(prefix):
1620 def maybewdir(prefix):
1612 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1621 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1613
1622
1614 hexnode = hex(node)
1623 hexnode = hex(node)
1615
1624
1616 def disambiguate(hexnode, minlength):
1625 def disambiguate(hexnode, minlength):
1617 """Disambiguate against wdirid."""
1626 """Disambiguate against wdirid."""
1618 for length in range(minlength, len(hexnode) + 1):
1627 for length in range(minlength, len(hexnode) + 1):
1619 prefix = hexnode[:length]
1628 prefix = hexnode[:length]
1620 if not maybewdir(prefix):
1629 if not maybewdir(prefix):
1621 return prefix
1630 return prefix
1622
1631
1623 if not getattr(self, 'filteredrevs', None):
1632 if not getattr(self, 'filteredrevs', None):
1624 try:
1633 try:
1625 length = max(self.index.shortest(node), minlength)
1634 length = max(self.index.shortest(node), minlength)
1626 return disambiguate(hexnode, length)
1635 return disambiguate(hexnode, length)
1627 except error.RevlogError:
1636 except error.RevlogError:
1628 if node != self.nodeconstants.wdirid:
1637 if node != self.nodeconstants.wdirid:
1629 raise error.LookupError(
1638 raise error.LookupError(
1630 node, self.display_id, _(b'no node')
1639 node, self.display_id, _(b'no node')
1631 )
1640 )
1632 except AttributeError:
1641 except AttributeError:
1633 # Fall through to pure code
1642 # Fall through to pure code
1634 pass
1643 pass
1635
1644
1636 if node == self.nodeconstants.wdirid:
1645 if node == self.nodeconstants.wdirid:
1637 for length in range(minlength, len(hexnode) + 1):
1646 for length in range(minlength, len(hexnode) + 1):
1638 prefix = hexnode[:length]
1647 prefix = hexnode[:length]
1639 if isvalid(prefix):
1648 if isvalid(prefix):
1640 return prefix
1649 return prefix
1641
1650
1642 for length in range(minlength, len(hexnode) + 1):
1651 for length in range(minlength, len(hexnode) + 1):
1643 prefix = hexnode[:length]
1652 prefix = hexnode[:length]
1644 if isvalid(prefix):
1653 if isvalid(prefix):
1645 return disambiguate(hexnode, length)
1654 return disambiguate(hexnode, length)
1646
1655
1647 def cmp(self, node, text):
1656 def cmp(self, node, text):
1648 """compare text with a given file revision
1657 """compare text with a given file revision
1649
1658
1650 returns True if text is different than what is stored.
1659 returns True if text is different than what is stored.
1651 """
1660 """
1652 p1, p2 = self.parents(node)
1661 p1, p2 = self.parents(node)
1653 return storageutil.hashrevisionsha1(text, p1, p2) != node
1662 return storageutil.hashrevisionsha1(text, p1, p2) != node
1654
1663
1655 def _cachesegment(self, offset, data):
1664 def _cachesegment(self, offset, data):
1656 """Add a segment to the revlog cache.
1665 """Add a segment to the revlog cache.
1657
1666
1658 Accepts an absolute offset and the data that is at that location.
1667 Accepts an absolute offset and the data that is at that location.
1659 """
1668 """
1660 o, d = self._chunkcache
1669 o, d = self._chunkcache
1661 # try to add to existing cache
1670 # try to add to existing cache
1662 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1671 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1663 self._chunkcache = o, d + data
1672 self._chunkcache = o, d + data
1664 else:
1673 else:
1665 self._chunkcache = offset, data
1674 self._chunkcache = offset, data
1666
1675
1667 def _readsegment(self, offset, length, df=None):
1676 def _readsegment(self, offset, length, df=None):
1668 """Load a segment of raw data from the revlog.
1677 """Load a segment of raw data from the revlog.
1669
1678
1670 Accepts an absolute offset, length to read, and an optional existing
1679 Accepts an absolute offset, length to read, and an optional existing
1671 file handle to read from.
1680 file handle to read from.
1672
1681
1673 If an existing file handle is passed, it will be seeked and the
1682 If an existing file handle is passed, it will be seeked and the
1674 original seek position will NOT be restored.
1683 original seek position will NOT be restored.
1675
1684
1676 Returns a str or buffer of raw byte data.
1685 Returns a str or buffer of raw byte data.
1677
1686
1678 Raises if the requested number of bytes could not be read.
1687 Raises if the requested number of bytes could not be read.
1679 """
1688 """
1680 # Cache data both forward and backward around the requested
1689 # Cache data both forward and backward around the requested
1681 # data, in a fixed size window. This helps speed up operations
1690 # data, in a fixed size window. This helps speed up operations
1682 # involving reading the revlog backwards.
1691 # involving reading the revlog backwards.
1683 cachesize = self._chunkcachesize
1692 cachesize = self._chunkcachesize
1684 realoffset = offset & ~(cachesize - 1)
1693 realoffset = offset & ~(cachesize - 1)
1685 reallength = (
1694 reallength = (
1686 (offset + length + cachesize) & ~(cachesize - 1)
1695 (offset + length + cachesize) & ~(cachesize - 1)
1687 ) - realoffset
1696 ) - realoffset
1688 with self._datareadfp(df) as df:
1697 with self._datareadfp(df) as df:
1689 df.seek(realoffset)
1698 df.seek(realoffset)
1690 d = df.read(reallength)
1699 d = df.read(reallength)
1691
1700
1692 self._cachesegment(realoffset, d)
1701 self._cachesegment(realoffset, d)
1693 if offset != realoffset or reallength != length:
1702 if offset != realoffset or reallength != length:
1694 startoffset = offset - realoffset
1703 startoffset = offset - realoffset
1695 if len(d) - startoffset < length:
1704 if len(d) - startoffset < length:
1696 raise error.RevlogError(
1705 raise error.RevlogError(
1697 _(
1706 _(
1698 b'partial read of revlog %s; expected %d bytes from '
1707 b'partial read of revlog %s; expected %d bytes from '
1699 b'offset %d, got %d'
1708 b'offset %d, got %d'
1700 )
1709 )
1701 % (
1710 % (
1702 self._indexfile if self._inline else self._datafile,
1711 self._indexfile if self._inline else self._datafile,
1703 length,
1712 length,
1704 offset,
1713 offset,
1705 len(d) - startoffset,
1714 len(d) - startoffset,
1706 )
1715 )
1707 )
1716 )
1708
1717
1709 return util.buffer(d, startoffset, length)
1718 return util.buffer(d, startoffset, length)
1710
1719
1711 if len(d) < length:
1720 if len(d) < length:
1712 raise error.RevlogError(
1721 raise error.RevlogError(
1713 _(
1722 _(
1714 b'partial read of revlog %s; expected %d bytes from offset '
1723 b'partial read of revlog %s; expected %d bytes from offset '
1715 b'%d, got %d'
1724 b'%d, got %d'
1716 )
1725 )
1717 % (
1726 % (
1718 self._indexfile if self._inline else self._datafile,
1727 self._indexfile if self._inline else self._datafile,
1719 length,
1728 length,
1720 offset,
1729 offset,
1721 len(d),
1730 len(d),
1722 )
1731 )
1723 )
1732 )
1724
1733
1725 return d
1734 return d
1726
1735
1727 def _getsegment(self, offset, length, df=None):
1736 def _getsegment(self, offset, length, df=None):
1728 """Obtain a segment of raw data from the revlog.
1737 """Obtain a segment of raw data from the revlog.
1729
1738
1730 Accepts an absolute offset, length of bytes to obtain, and an
1739 Accepts an absolute offset, length of bytes to obtain, and an
1731 optional file handle to the already-opened revlog. If the file
1740 optional file handle to the already-opened revlog. If the file
1732 handle is used, it's original seek position will not be preserved.
1741 handle is used, it's original seek position will not be preserved.
1733
1742
1734 Requests for data may be returned from a cache.
1743 Requests for data may be returned from a cache.
1735
1744
1736 Returns a str or a buffer instance of raw byte data.
1745 Returns a str or a buffer instance of raw byte data.
1737 """
1746 """
1738 o, d = self._chunkcache
1747 o, d = self._chunkcache
1739 l = len(d)
1748 l = len(d)
1740
1749
1741 # is it in the cache?
1750 # is it in the cache?
1742 cachestart = offset - o
1751 cachestart = offset - o
1743 cacheend = cachestart + length
1752 cacheend = cachestart + length
1744 if cachestart >= 0 and cacheend <= l:
1753 if cachestart >= 0 and cacheend <= l:
1745 if cachestart == 0 and cacheend == l:
1754 if cachestart == 0 and cacheend == l:
1746 return d # avoid a copy
1755 return d # avoid a copy
1747 return util.buffer(d, cachestart, cacheend - cachestart)
1756 return util.buffer(d, cachestart, cacheend - cachestart)
1748
1757
1749 return self._readsegment(offset, length, df=df)
1758 return self._readsegment(offset, length, df=df)
1750
1759
1751 def _getsegmentforrevs(self, startrev, endrev, df=None):
1760 def _getsegmentforrevs(self, startrev, endrev, df=None):
1752 """Obtain a segment of raw data corresponding to a range of revisions.
1761 """Obtain a segment of raw data corresponding to a range of revisions.
1753
1762
1754 Accepts the start and end revisions and an optional already-open
1763 Accepts the start and end revisions and an optional already-open
1755 file handle to be used for reading. If the file handle is read, its
1764 file handle to be used for reading. If the file handle is read, its
1756 seek position will not be preserved.
1765 seek position will not be preserved.
1757
1766
1758 Requests for data may be satisfied by a cache.
1767 Requests for data may be satisfied by a cache.
1759
1768
1760 Returns a 2-tuple of (offset, data) for the requested range of
1769 Returns a 2-tuple of (offset, data) for the requested range of
1761 revisions. Offset is the integer offset from the beginning of the
1770 revisions. Offset is the integer offset from the beginning of the
1762 revlog and data is a str or buffer of the raw byte data.
1771 revlog and data is a str or buffer of the raw byte data.
1763
1772
1764 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1773 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1765 to determine where each revision's data begins and ends.
1774 to determine where each revision's data begins and ends.
1766 """
1775 """
1767 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1776 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1768 # (functions are expensive).
1777 # (functions are expensive).
1769 index = self.index
1778 index = self.index
1770 istart = index[startrev]
1779 istart = index[startrev]
1771 start = int(istart[0] >> 16)
1780 start = int(istart[0] >> 16)
1772 if startrev == endrev:
1781 if startrev == endrev:
1773 end = start + istart[1]
1782 end = start + istart[1]
1774 else:
1783 else:
1775 iend = index[endrev]
1784 iend = index[endrev]
1776 end = int(iend[0] >> 16) + iend[1]
1785 end = int(iend[0] >> 16) + iend[1]
1777
1786
1778 if self._inline:
1787 if self._inline:
1779 start += (startrev + 1) * self.index.entry_size
1788 start += (startrev + 1) * self.index.entry_size
1780 end += (endrev + 1) * self.index.entry_size
1789 end += (endrev + 1) * self.index.entry_size
1781 length = end - start
1790 length = end - start
1782
1791
1783 return start, self._getsegment(start, length, df=df)
1792 return start, self._getsegment(start, length, df=df)
1784
1793
1785 def _chunk(self, rev, df=None):
1794 def _chunk(self, rev, df=None):
1786 """Obtain a single decompressed chunk for a revision.
1795 """Obtain a single decompressed chunk for a revision.
1787
1796
1788 Accepts an integer revision and an optional already-open file handle
1797 Accepts an integer revision and an optional already-open file handle
1789 to be used for reading. If used, the seek position of the file will not
1798 to be used for reading. If used, the seek position of the file will not
1790 be preserved.
1799 be preserved.
1791
1800
1792 Returns a str holding uncompressed data for the requested revision.
1801 Returns a str holding uncompressed data for the requested revision.
1793 """
1802 """
1794 compression_mode = self.index[rev][10]
1803 compression_mode = self.index[rev][10]
1795 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1804 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1796 if compression_mode == COMP_MODE_PLAIN:
1805 if compression_mode == COMP_MODE_PLAIN:
1797 return data
1806 return data
1798 elif compression_mode == COMP_MODE_DEFAULT:
1807 elif compression_mode == COMP_MODE_DEFAULT:
1799 return self._decompressor(data)
1808 return self._decompressor(data)
1800 elif compression_mode == COMP_MODE_INLINE:
1809 elif compression_mode == COMP_MODE_INLINE:
1801 return self.decompress(data)
1810 return self.decompress(data)
1802 else:
1811 else:
1803 msg = 'unknown compression mode %d'
1812 msg = 'unknown compression mode %d'
1804 msg %= compression_mode
1813 msg %= compression_mode
1805 raise error.RevlogError(msg)
1814 raise error.RevlogError(msg)
1806
1815
1807 def _chunks(self, revs, df=None, targetsize=None):
1816 def _chunks(self, revs, df=None, targetsize=None):
1808 """Obtain decompressed chunks for the specified revisions.
1817 """Obtain decompressed chunks for the specified revisions.
1809
1818
1810 Accepts an iterable of numeric revisions that are assumed to be in
1819 Accepts an iterable of numeric revisions that are assumed to be in
1811 ascending order. Also accepts an optional already-open file handle
1820 ascending order. Also accepts an optional already-open file handle
1812 to be used for reading. If used, the seek position of the file will
1821 to be used for reading. If used, the seek position of the file will
1813 not be preserved.
1822 not be preserved.
1814
1823
1815 This function is similar to calling ``self._chunk()`` multiple times,
1824 This function is similar to calling ``self._chunk()`` multiple times,
1816 but is faster.
1825 but is faster.
1817
1826
1818 Returns a list with decompressed data for each requested revision.
1827 Returns a list with decompressed data for each requested revision.
1819 """
1828 """
1820 if not revs:
1829 if not revs:
1821 return []
1830 return []
1822 start = self.start
1831 start = self.start
1823 length = self.length
1832 length = self.length
1824 inline = self._inline
1833 inline = self._inline
1825 iosize = self.index.entry_size
1834 iosize = self.index.entry_size
1826 buffer = util.buffer
1835 buffer = util.buffer
1827
1836
1828 l = []
1837 l = []
1829 ladd = l.append
1838 ladd = l.append
1830
1839
1831 if not self._withsparseread:
1840 if not self._withsparseread:
1832 slicedchunks = (revs,)
1841 slicedchunks = (revs,)
1833 else:
1842 else:
1834 slicedchunks = deltautil.slicechunk(
1843 slicedchunks = deltautil.slicechunk(
1835 self, revs, targetsize=targetsize
1844 self, revs, targetsize=targetsize
1836 )
1845 )
1837
1846
1838 for revschunk in slicedchunks:
1847 for revschunk in slicedchunks:
1839 firstrev = revschunk[0]
1848 firstrev = revschunk[0]
1840 # Skip trailing revisions with empty diff
1849 # Skip trailing revisions with empty diff
1841 for lastrev in revschunk[::-1]:
1850 for lastrev in revschunk[::-1]:
1842 if length(lastrev) != 0:
1851 if length(lastrev) != 0:
1843 break
1852 break
1844
1853
1845 try:
1854 try:
1846 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1855 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1847 except OverflowError:
1856 except OverflowError:
1848 # issue4215 - we can't cache a run of chunks greater than
1857 # issue4215 - we can't cache a run of chunks greater than
1849 # 2G on Windows
1858 # 2G on Windows
1850 return [self._chunk(rev, df=df) for rev in revschunk]
1859 return [self._chunk(rev, df=df) for rev in revschunk]
1851
1860
1852 decomp = self.decompress
1861 decomp = self.decompress
1853 # self._decompressor might be None, but will not be used in that case
1862 # self._decompressor might be None, but will not be used in that case
1854 def_decomp = self._decompressor
1863 def_decomp = self._decompressor
1855 for rev in revschunk:
1864 for rev in revschunk:
1856 chunkstart = start(rev)
1865 chunkstart = start(rev)
1857 if inline:
1866 if inline:
1858 chunkstart += (rev + 1) * iosize
1867 chunkstart += (rev + 1) * iosize
1859 chunklength = length(rev)
1868 chunklength = length(rev)
1860 comp_mode = self.index[rev][10]
1869 comp_mode = self.index[rev][10]
1861 c = buffer(data, chunkstart - offset, chunklength)
1870 c = buffer(data, chunkstart - offset, chunklength)
1862 if comp_mode == COMP_MODE_PLAIN:
1871 if comp_mode == COMP_MODE_PLAIN:
1863 ladd(c)
1872 ladd(c)
1864 elif comp_mode == COMP_MODE_INLINE:
1873 elif comp_mode == COMP_MODE_INLINE:
1865 ladd(decomp(c))
1874 ladd(decomp(c))
1866 elif comp_mode == COMP_MODE_DEFAULT:
1875 elif comp_mode == COMP_MODE_DEFAULT:
1867 ladd(def_decomp(c))
1876 ladd(def_decomp(c))
1868 else:
1877 else:
1869 msg = 'unknown compression mode %d'
1878 msg = 'unknown compression mode %d'
1870 msg %= comp_mode
1879 msg %= comp_mode
1871 raise error.RevlogError(msg)
1880 raise error.RevlogError(msg)
1872
1881
1873 return l
1882 return l
1874
1883
1875 def _chunkclear(self):
1884 def _chunkclear(self):
1876 """Clear the raw chunk cache."""
1885 """Clear the raw chunk cache."""
1877 self._chunkcache = (0, b'')
1886 self._chunkcache = (0, b'')
1878
1887
1879 def deltaparent(self, rev):
1888 def deltaparent(self, rev):
1880 """return deltaparent of the given revision"""
1889 """return deltaparent of the given revision"""
1881 base = self.index[rev][3]
1890 base = self.index[rev][3]
1882 if base == rev:
1891 if base == rev:
1883 return nullrev
1892 return nullrev
1884 elif self._generaldelta:
1893 elif self._generaldelta:
1885 return base
1894 return base
1886 else:
1895 else:
1887 return rev - 1
1896 return rev - 1
1888
1897
1889 def issnapshot(self, rev):
1898 def issnapshot(self, rev):
1890 """tells whether rev is a snapshot"""
1899 """tells whether rev is a snapshot"""
1891 if not self._sparserevlog:
1900 if not self._sparserevlog:
1892 return self.deltaparent(rev) == nullrev
1901 return self.deltaparent(rev) == nullrev
1893 elif util.safehasattr(self.index, b'issnapshot'):
1902 elif util.safehasattr(self.index, b'issnapshot'):
1894 # directly assign the method to cache the testing and access
1903 # directly assign the method to cache the testing and access
1895 self.issnapshot = self.index.issnapshot
1904 self.issnapshot = self.index.issnapshot
1896 return self.issnapshot(rev)
1905 return self.issnapshot(rev)
1897 if rev == nullrev:
1906 if rev == nullrev:
1898 return True
1907 return True
1899 entry = self.index[rev]
1908 entry = self.index[rev]
1900 base = entry[3]
1909 base = entry[3]
1901 if base == rev:
1910 if base == rev:
1902 return True
1911 return True
1903 if base == nullrev:
1912 if base == nullrev:
1904 return True
1913 return True
1905 p1 = entry[5]
1914 p1 = entry[5]
1906 p2 = entry[6]
1915 p2 = entry[6]
1907 if base == p1 or base == p2:
1916 if base == p1 or base == p2:
1908 return False
1917 return False
1909 return self.issnapshot(base)
1918 return self.issnapshot(base)
1910
1919
1911 def snapshotdepth(self, rev):
1920 def snapshotdepth(self, rev):
1912 """number of snapshot in the chain before this one"""
1921 """number of snapshot in the chain before this one"""
1913 if not self.issnapshot(rev):
1922 if not self.issnapshot(rev):
1914 raise error.ProgrammingError(b'revision %d not a snapshot')
1923 raise error.ProgrammingError(b'revision %d not a snapshot')
1915 return len(self._deltachain(rev)[0]) - 1
1924 return len(self._deltachain(rev)[0]) - 1
1916
1925
1917 def revdiff(self, rev1, rev2):
1926 def revdiff(self, rev1, rev2):
1918 """return or calculate a delta between two revisions
1927 """return or calculate a delta between two revisions
1919
1928
1920 The delta calculated is in binary form and is intended to be written to
1929 The delta calculated is in binary form and is intended to be written to
1921 revlog data directly. So this function needs raw revision data.
1930 revlog data directly. So this function needs raw revision data.
1922 """
1931 """
1923 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1932 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1924 return bytes(self._chunk(rev2))
1933 return bytes(self._chunk(rev2))
1925
1934
1926 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1935 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1927
1936
1928 def _processflags(self, text, flags, operation, raw=False):
1937 def _processflags(self, text, flags, operation, raw=False):
1929 """deprecated entry point to access flag processors"""
1938 """deprecated entry point to access flag processors"""
1930 msg = b'_processflag(...) use the specialized variant'
1939 msg = b'_processflag(...) use the specialized variant'
1931 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1940 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1932 if raw:
1941 if raw:
1933 return text, flagutil.processflagsraw(self, text, flags)
1942 return text, flagutil.processflagsraw(self, text, flags)
1934 elif operation == b'read':
1943 elif operation == b'read':
1935 return flagutil.processflagsread(self, text, flags)
1944 return flagutil.processflagsread(self, text, flags)
1936 else: # write operation
1945 else: # write operation
1937 return flagutil.processflagswrite(self, text, flags)
1946 return flagutil.processflagswrite(self, text, flags)
1938
1947
1939 def revision(self, nodeorrev, _df=None, raw=False):
1948 def revision(self, nodeorrev, _df=None, raw=False):
1940 """return an uncompressed revision of a given node or revision
1949 """return an uncompressed revision of a given node or revision
1941 number.
1950 number.
1942
1951
1943 _df - an existing file handle to read from. (internal-only)
1952 _df - an existing file handle to read from. (internal-only)
1944 raw - an optional argument specifying if the revision data is to be
1953 raw - an optional argument specifying if the revision data is to be
1945 treated as raw data when applying flag transforms. 'raw' should be set
1954 treated as raw data when applying flag transforms. 'raw' should be set
1946 to True when generating changegroups or in debug commands.
1955 to True when generating changegroups or in debug commands.
1947 """
1956 """
1948 if raw:
1957 if raw:
1949 msg = (
1958 msg = (
1950 b'revlog.revision(..., raw=True) is deprecated, '
1959 b'revlog.revision(..., raw=True) is deprecated, '
1951 b'use revlog.rawdata(...)'
1960 b'use revlog.rawdata(...)'
1952 )
1961 )
1953 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1962 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1954 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1963 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1955
1964
1956 def sidedata(self, nodeorrev, _df=None):
1965 def sidedata(self, nodeorrev, _df=None):
1957 """a map of extra data related to the changeset but not part of the hash
1966 """a map of extra data related to the changeset but not part of the hash
1958
1967
1959 This function currently return a dictionary. However, more advanced
1968 This function currently return a dictionary. However, more advanced
1960 mapping object will likely be used in the future for a more
1969 mapping object will likely be used in the future for a more
1961 efficient/lazy code.
1970 efficient/lazy code.
1962 """
1971 """
1963 return self._revisiondata(nodeorrev, _df)[1]
1972 return self._revisiondata(nodeorrev, _df)[1]
1964
1973
1965 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1974 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1966 # deal with <nodeorrev> argument type
1975 # deal with <nodeorrev> argument type
1967 if isinstance(nodeorrev, int):
1976 if isinstance(nodeorrev, int):
1968 rev = nodeorrev
1977 rev = nodeorrev
1969 node = self.node(rev)
1978 node = self.node(rev)
1970 else:
1979 else:
1971 node = nodeorrev
1980 node = nodeorrev
1972 rev = None
1981 rev = None
1973
1982
1974 # fast path the special `nullid` rev
1983 # fast path the special `nullid` rev
1975 if node == self.nullid:
1984 if node == self.nullid:
1976 return b"", {}
1985 return b"", {}
1977
1986
1978 # ``rawtext`` is the text as stored inside the revlog. Might be the
1987 # ``rawtext`` is the text as stored inside the revlog. Might be the
1979 # revision or might need to be processed to retrieve the revision.
1988 # revision or might need to be processed to retrieve the revision.
1980 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1989 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1981
1990
1982 if self.hassidedata:
1991 if self.hassidedata:
1983 if rev is None:
1992 if rev is None:
1984 rev = self.rev(node)
1993 rev = self.rev(node)
1985 sidedata = self._sidedata(rev)
1994 sidedata = self._sidedata(rev)
1986 else:
1995 else:
1987 sidedata = {}
1996 sidedata = {}
1988
1997
1989 if raw and validated:
1998 if raw and validated:
1990 # if we don't want to process the raw text and that raw
1999 # if we don't want to process the raw text and that raw
1991 # text is cached, we can exit early.
2000 # text is cached, we can exit early.
1992 return rawtext, sidedata
2001 return rawtext, sidedata
1993 if rev is None:
2002 if rev is None:
1994 rev = self.rev(node)
2003 rev = self.rev(node)
1995 # the revlog's flag for this revision
2004 # the revlog's flag for this revision
1996 # (usually alter its state or content)
2005 # (usually alter its state or content)
1997 flags = self.flags(rev)
2006 flags = self.flags(rev)
1998
2007
1999 if validated and flags == REVIDX_DEFAULT_FLAGS:
2008 if validated and flags == REVIDX_DEFAULT_FLAGS:
2000 # no extra flags set, no flag processor runs, text = rawtext
2009 # no extra flags set, no flag processor runs, text = rawtext
2001 return rawtext, sidedata
2010 return rawtext, sidedata
2002
2011
2003 if raw:
2012 if raw:
2004 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2013 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2005 text = rawtext
2014 text = rawtext
2006 else:
2015 else:
2007 r = flagutil.processflagsread(self, rawtext, flags)
2016 r = flagutil.processflagsread(self, rawtext, flags)
2008 text, validatehash = r
2017 text, validatehash = r
2009 if validatehash:
2018 if validatehash:
2010 self.checkhash(text, node, rev=rev)
2019 self.checkhash(text, node, rev=rev)
2011 if not validated:
2020 if not validated:
2012 self._revisioncache = (node, rev, rawtext)
2021 self._revisioncache = (node, rev, rawtext)
2013
2022
2014 return text, sidedata
2023 return text, sidedata
2015
2024
2016 def _rawtext(self, node, rev, _df=None):
2025 def _rawtext(self, node, rev, _df=None):
2017 """return the possibly unvalidated rawtext for a revision
2026 """return the possibly unvalidated rawtext for a revision
2018
2027
2019 returns (rev, rawtext, validated)
2028 returns (rev, rawtext, validated)
2020 """
2029 """
2021
2030
2022 # revision in the cache (could be useful to apply delta)
2031 # revision in the cache (could be useful to apply delta)
2023 cachedrev = None
2032 cachedrev = None
2024 # An intermediate text to apply deltas to
2033 # An intermediate text to apply deltas to
2025 basetext = None
2034 basetext = None
2026
2035
2027 # Check if we have the entry in cache
2036 # Check if we have the entry in cache
2028 # The cache entry looks like (node, rev, rawtext)
2037 # The cache entry looks like (node, rev, rawtext)
2029 if self._revisioncache:
2038 if self._revisioncache:
2030 if self._revisioncache[0] == node:
2039 if self._revisioncache[0] == node:
2031 return (rev, self._revisioncache[2], True)
2040 return (rev, self._revisioncache[2], True)
2032 cachedrev = self._revisioncache[1]
2041 cachedrev = self._revisioncache[1]
2033
2042
2034 if rev is None:
2043 if rev is None:
2035 rev = self.rev(node)
2044 rev = self.rev(node)
2036
2045
2037 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2046 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2038 if stopped:
2047 if stopped:
2039 basetext = self._revisioncache[2]
2048 basetext = self._revisioncache[2]
2040
2049
2041 # drop cache to save memory, the caller is expected to
2050 # drop cache to save memory, the caller is expected to
2042 # update self._revisioncache after validating the text
2051 # update self._revisioncache after validating the text
2043 self._revisioncache = None
2052 self._revisioncache = None
2044
2053
2045 targetsize = None
2054 targetsize = None
2046 rawsize = self.index[rev][2]
2055 rawsize = self.index[rev][2]
2047 if 0 <= rawsize:
2056 if 0 <= rawsize:
2048 targetsize = 4 * rawsize
2057 targetsize = 4 * rawsize
2049
2058
2050 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2059 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2051 if basetext is None:
2060 if basetext is None:
2052 basetext = bytes(bins[0])
2061 basetext = bytes(bins[0])
2053 bins = bins[1:]
2062 bins = bins[1:]
2054
2063
2055 rawtext = mdiff.patches(basetext, bins)
2064 rawtext = mdiff.patches(basetext, bins)
2056 del basetext # let us have a chance to free memory early
2065 del basetext # let us have a chance to free memory early
2057 return (rev, rawtext, False)
2066 return (rev, rawtext, False)
2058
2067
2059 def _sidedata(self, rev):
2068 def _sidedata(self, rev):
2060 """Return the sidedata for a given revision number."""
2069 """Return the sidedata for a given revision number."""
2061 index_entry = self.index[rev]
2070 index_entry = self.index[rev]
2062 sidedata_offset = index_entry[8]
2071 sidedata_offset = index_entry[8]
2063 sidedata_size = index_entry[9]
2072 sidedata_size = index_entry[9]
2064
2073
2065 if self._inline:
2074 if self._inline:
2066 sidedata_offset += self.index.entry_size * (1 + rev)
2075 sidedata_offset += self.index.entry_size * (1 + rev)
2067 if sidedata_size == 0:
2076 if sidedata_size == 0:
2068 return {}
2077 return {}
2069
2078
2070 comp_segment = self._getsegment(sidedata_offset, sidedata_size)
2079 comp_segment = self._getsegment(sidedata_offset, sidedata_size)
2071 comp = self.index[rev][11]
2080 comp = self.index[rev][11]
2072 if comp == COMP_MODE_PLAIN:
2081 if comp == COMP_MODE_PLAIN:
2073 segment = comp_segment
2082 segment = comp_segment
2074 elif comp == COMP_MODE_DEFAULT:
2083 elif comp == COMP_MODE_DEFAULT:
2075 segment = self._decompressor(comp_segment)
2084 segment = self._decompressor(comp_segment)
2076 elif comp == COMP_MODE_INLINE:
2085 elif comp == COMP_MODE_INLINE:
2077 segment = self.decompress(comp_segment)
2086 segment = self.decompress(comp_segment)
2078 else:
2087 else:
2079 msg = 'unknown compression mode %d'
2088 msg = 'unknown compression mode %d'
2080 msg %= comp
2089 msg %= comp
2081 raise error.RevlogError(msg)
2090 raise error.RevlogError(msg)
2082
2091
2083 sidedata = sidedatautil.deserialize_sidedata(segment)
2092 sidedata = sidedatautil.deserialize_sidedata(segment)
2084 return sidedata
2093 return sidedata
2085
2094
2086 def rawdata(self, nodeorrev, _df=None):
2095 def rawdata(self, nodeorrev, _df=None):
2087 """return an uncompressed raw data of a given node or revision number.
2096 """return an uncompressed raw data of a given node or revision number.
2088
2097
2089 _df - an existing file handle to read from. (internal-only)
2098 _df - an existing file handle to read from. (internal-only)
2090 """
2099 """
2091 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2100 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2092
2101
2093 def hash(self, text, p1, p2):
2102 def hash(self, text, p1, p2):
2094 """Compute a node hash.
2103 """Compute a node hash.
2095
2104
2096 Available as a function so that subclasses can replace the hash
2105 Available as a function so that subclasses can replace the hash
2097 as needed.
2106 as needed.
2098 """
2107 """
2099 return storageutil.hashrevisionsha1(text, p1, p2)
2108 return storageutil.hashrevisionsha1(text, p1, p2)
2100
2109
2101 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2110 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2102 """Check node hash integrity.
2111 """Check node hash integrity.
2103
2112
2104 Available as a function so that subclasses can extend hash mismatch
2113 Available as a function so that subclasses can extend hash mismatch
2105 behaviors as needed.
2114 behaviors as needed.
2106 """
2115 """
2107 try:
2116 try:
2108 if p1 is None and p2 is None:
2117 if p1 is None and p2 is None:
2109 p1, p2 = self.parents(node)
2118 p1, p2 = self.parents(node)
2110 if node != self.hash(text, p1, p2):
2119 if node != self.hash(text, p1, p2):
2111 # Clear the revision cache on hash failure. The revision cache
2120 # Clear the revision cache on hash failure. The revision cache
2112 # only stores the raw revision and clearing the cache does have
2121 # only stores the raw revision and clearing the cache does have
2113 # the side-effect that we won't have a cache hit when the raw
2122 # the side-effect that we won't have a cache hit when the raw
2114 # revision data is accessed. But this case should be rare and
2123 # revision data is accessed. But this case should be rare and
2115 # it is extra work to teach the cache about the hash
2124 # it is extra work to teach the cache about the hash
2116 # verification state.
2125 # verification state.
2117 if self._revisioncache and self._revisioncache[0] == node:
2126 if self._revisioncache and self._revisioncache[0] == node:
2118 self._revisioncache = None
2127 self._revisioncache = None
2119
2128
2120 revornode = rev
2129 revornode = rev
2121 if revornode is None:
2130 if revornode is None:
2122 revornode = templatefilters.short(hex(node))
2131 revornode = templatefilters.short(hex(node))
2123 raise error.RevlogError(
2132 raise error.RevlogError(
2124 _(b"integrity check failed on %s:%s")
2133 _(b"integrity check failed on %s:%s")
2125 % (self.display_id, pycompat.bytestr(revornode))
2134 % (self.display_id, pycompat.bytestr(revornode))
2126 )
2135 )
2127 except error.RevlogError:
2136 except error.RevlogError:
2128 if self._censorable and storageutil.iscensoredtext(text):
2137 if self._censorable and storageutil.iscensoredtext(text):
2129 raise error.CensoredNodeError(self.display_id, node, text)
2138 raise error.CensoredNodeError(self.display_id, node, text)
2130 raise
2139 raise
2131
2140
2132 def _enforceinlinesize(self, tr):
2141 def _enforceinlinesize(self, tr):
2133 """Check if the revlog is too big for inline and convert if so.
2142 """Check if the revlog is too big for inline and convert if so.
2134
2143
2135 This should be called after revisions are added to the revlog. If the
2144 This should be called after revisions are added to the revlog. If the
2136 revlog has grown too large to be an inline revlog, it will convert it
2145 revlog has grown too large to be an inline revlog, it will convert it
2137 to use multiple index and data files.
2146 to use multiple index and data files.
2138 """
2147 """
2139 tiprev = len(self) - 1
2148 tiprev = len(self) - 1
2140 total_size = self.start(tiprev) + self.length(tiprev)
2149 total_size = self.start(tiprev) + self.length(tiprev)
2141 if not self._inline or total_size < _maxinline:
2150 if not self._inline or total_size < _maxinline:
2142 return
2151 return
2143
2152
2144 troffset = tr.findoffset(self._indexfile)
2153 troffset = tr.findoffset(self._indexfile)
2145 if troffset is None:
2154 if troffset is None:
2146 raise error.RevlogError(
2155 raise error.RevlogError(
2147 _(b"%s not found in the transaction") % self._indexfile
2156 _(b"%s not found in the transaction") % self._indexfile
2148 )
2157 )
2149 trindex = 0
2158 trindex = 0
2150 tr.add(self._datafile, 0)
2159 tr.add(self._datafile, 0)
2151
2160
2152 existing_handles = False
2161 existing_handles = False
2153 if self._writinghandles is not None:
2162 if self._writinghandles is not None:
2154 existing_handles = True
2163 existing_handles = True
2155 fp = self._writinghandles[0]
2164 fp = self._writinghandles[0]
2156 fp.flush()
2165 fp.flush()
2157 fp.close()
2166 fp.close()
2158 # We can't use the cached file handle after close(). So prevent
2167 # We can't use the cached file handle after close(). So prevent
2159 # its usage.
2168 # its usage.
2160 self._writinghandles = None
2169 self._writinghandles = None
2161
2170
2162 new_dfh = self._datafp(b'w+')
2171 new_dfh = self._datafp(b'w+')
2163 new_dfh.truncate(0) # drop any potentially existing data
2172 new_dfh.truncate(0) # drop any potentially existing data
2164 try:
2173 try:
2165 with self._indexfp() as read_ifh:
2174 with self._indexfp() as read_ifh:
2166 for r in self:
2175 for r in self:
2167 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2176 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2168 if troffset <= self.start(r):
2177 if troffset <= self.start(r):
2169 trindex = r
2178 trindex = r
2170 new_dfh.flush()
2179 new_dfh.flush()
2171
2180
2172 with self.__index_new_fp() as fp:
2181 with self.__index_new_fp() as fp:
2173 self._format_flags &= ~FLAG_INLINE_DATA
2182 self._format_flags &= ~FLAG_INLINE_DATA
2174 self._inline = False
2183 self._inline = False
2175 for i in self:
2184 for i in self:
2176 e = self.index.entry_binary(i)
2185 e = self.index.entry_binary(i)
2177 if i == 0 and self._docket is None:
2186 if i == 0 and self._docket is None:
2178 header = self._format_flags | self._format_version
2187 header = self._format_flags | self._format_version
2179 header = self.index.pack_header(header)
2188 header = self.index.pack_header(header)
2180 e = header + e
2189 e = header + e
2181 fp.write(e)
2190 fp.write(e)
2182 if self._docket is not None:
2191 if self._docket is not None:
2183 self._docket.index_end = fp.tell()
2192 self._docket.index_end = fp.tell()
2184 # the temp file replace the real index when we exit the context
2193 # the temp file replace the real index when we exit the context
2185 # manager
2194 # manager
2186
2195
2187 tr.replace(self._indexfile, trindex * self.index.entry_size)
2196 tr.replace(self._indexfile, trindex * self.index.entry_size)
2188 nodemaputil.setup_persistent_nodemap(tr, self)
2197 nodemaputil.setup_persistent_nodemap(tr, self)
2189 self._chunkclear()
2198 self._chunkclear()
2190
2199
2191 if existing_handles:
2200 if existing_handles:
2192 # switched from inline to conventional reopen the index
2201 # switched from inline to conventional reopen the index
2193 ifh = self.__index_write_fp()
2202 ifh = self.__index_write_fp()
2194 self._writinghandles = (ifh, new_dfh)
2203 self._writinghandles = (ifh, new_dfh)
2195 new_dfh = None
2204 new_dfh = None
2196 finally:
2205 finally:
2197 if new_dfh is not None:
2206 if new_dfh is not None:
2198 new_dfh.close()
2207 new_dfh.close()
2199
2208
2200 def _nodeduplicatecallback(self, transaction, node):
2209 def _nodeduplicatecallback(self, transaction, node):
2201 """called when trying to add a node already stored."""
2210 """called when trying to add a node already stored."""
2202
2211
2203 @contextlib.contextmanager
2212 @contextlib.contextmanager
2204 def _writing(self, transaction):
2213 def _writing(self, transaction):
2205 if self._trypending:
2214 if self._trypending:
2206 msg = b'try to write in a `trypending` revlog: %s'
2215 msg = b'try to write in a `trypending` revlog: %s'
2207 msg %= self.display_id
2216 msg %= self.display_id
2208 raise error.ProgrammingError(msg)
2217 raise error.ProgrammingError(msg)
2209 if self._writinghandles is not None:
2218 if self._writinghandles is not None:
2210 yield
2219 yield
2211 else:
2220 else:
2212 r = len(self)
2221 r = len(self)
2213 dsize = 0
2222 dsize = 0
2214 if r:
2223 if r:
2215 dsize = self.end(r - 1)
2224 dsize = self.end(r - 1)
2216 dfh = None
2225 dfh = None
2217 if not self._inline:
2226 if not self._inline:
2218 try:
2227 try:
2219 dfh = self._datafp(b"r+")
2228 dfh = self._datafp(b"r+")
2220 if self._docket is None:
2229 if self._docket is None:
2221 dfh.seek(0, os.SEEK_END)
2230 dfh.seek(0, os.SEEK_END)
2222 else:
2231 else:
2223 dfh.seek(self._docket.data_end, os.SEEK_SET)
2232 dfh.seek(self._docket.data_end, os.SEEK_SET)
2224 except IOError as inst:
2233 except IOError as inst:
2225 if inst.errno != errno.ENOENT:
2234 if inst.errno != errno.ENOENT:
2226 raise
2235 raise
2227 dfh = self._datafp(b"w+")
2236 dfh = self._datafp(b"w+")
2228 transaction.add(self._datafile, dsize)
2237 transaction.add(self._datafile, dsize)
2229 try:
2238 try:
2230 isize = r * self.index.entry_size
2239 isize = r * self.index.entry_size
2231 ifh = self.__index_write_fp()
2240 ifh = self.__index_write_fp()
2232 if self._inline:
2241 if self._inline:
2233 transaction.add(self._indexfile, dsize + isize)
2242 transaction.add(self._indexfile, dsize + isize)
2234 else:
2243 else:
2235 transaction.add(self._indexfile, isize)
2244 transaction.add(self._indexfile, isize)
2236 try:
2245 try:
2237 self._writinghandles = (ifh, dfh)
2246 self._writinghandles = (ifh, dfh)
2238 try:
2247 try:
2239 yield
2248 yield
2240 if self._docket is not None:
2249 if self._docket is not None:
2241 self._write_docket(transaction)
2250 self._write_docket(transaction)
2242 finally:
2251 finally:
2243 self._writinghandles = None
2252 self._writinghandles = None
2244 finally:
2253 finally:
2245 ifh.close()
2254 ifh.close()
2246 finally:
2255 finally:
2247 if dfh is not None:
2256 if dfh is not None:
2248 dfh.close()
2257 dfh.close()
2249
2258
2250 def _write_docket(self, transaction):
2259 def _write_docket(self, transaction):
2251 """write the current docket on disk
2260 """write the current docket on disk
2252
2261
2253 Exist as a method to help changelog to implement transaction logic
2262 Exist as a method to help changelog to implement transaction logic
2254
2263
2255 We could also imagine using the same transaction logic for all revlog
2264 We could also imagine using the same transaction logic for all revlog
2256 since docket are cheap."""
2265 since docket are cheap."""
2257 self._docket.write(transaction)
2266 self._docket.write(transaction)
2258
2267
2259 def addrevision(
2268 def addrevision(
2260 self,
2269 self,
2261 text,
2270 text,
2262 transaction,
2271 transaction,
2263 link,
2272 link,
2264 p1,
2273 p1,
2265 p2,
2274 p2,
2266 cachedelta=None,
2275 cachedelta=None,
2267 node=None,
2276 node=None,
2268 flags=REVIDX_DEFAULT_FLAGS,
2277 flags=REVIDX_DEFAULT_FLAGS,
2269 deltacomputer=None,
2278 deltacomputer=None,
2270 sidedata=None,
2279 sidedata=None,
2271 ):
2280 ):
2272 """add a revision to the log
2281 """add a revision to the log
2273
2282
2274 text - the revision data to add
2283 text - the revision data to add
2275 transaction - the transaction object used for rollback
2284 transaction - the transaction object used for rollback
2276 link - the linkrev data to add
2285 link - the linkrev data to add
2277 p1, p2 - the parent nodeids of the revision
2286 p1, p2 - the parent nodeids of the revision
2278 cachedelta - an optional precomputed delta
2287 cachedelta - an optional precomputed delta
2279 node - nodeid of revision; typically node is not specified, and it is
2288 node - nodeid of revision; typically node is not specified, and it is
2280 computed by default as hash(text, p1, p2), however subclasses might
2289 computed by default as hash(text, p1, p2), however subclasses might
2281 use different hashing method (and override checkhash() in such case)
2290 use different hashing method (and override checkhash() in such case)
2282 flags - the known flags to set on the revision
2291 flags - the known flags to set on the revision
2283 deltacomputer - an optional deltacomputer instance shared between
2292 deltacomputer - an optional deltacomputer instance shared between
2284 multiple calls
2293 multiple calls
2285 """
2294 """
2286 if link == nullrev:
2295 if link == nullrev:
2287 raise error.RevlogError(
2296 raise error.RevlogError(
2288 _(b"attempted to add linkrev -1 to %s") % self.display_id
2297 _(b"attempted to add linkrev -1 to %s") % self.display_id
2289 )
2298 )
2290
2299
2291 if sidedata is None:
2300 if sidedata is None:
2292 sidedata = {}
2301 sidedata = {}
2293 elif sidedata and not self.hassidedata:
2302 elif sidedata and not self.hassidedata:
2294 raise error.ProgrammingError(
2303 raise error.ProgrammingError(
2295 _(b"trying to add sidedata to a revlog who don't support them")
2304 _(b"trying to add sidedata to a revlog who don't support them")
2296 )
2305 )
2297
2306
2298 if flags:
2307 if flags:
2299 node = node or self.hash(text, p1, p2)
2308 node = node or self.hash(text, p1, p2)
2300
2309
2301 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2310 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2302
2311
2303 # If the flag processor modifies the revision data, ignore any provided
2312 # If the flag processor modifies the revision data, ignore any provided
2304 # cachedelta.
2313 # cachedelta.
2305 if rawtext != text:
2314 if rawtext != text:
2306 cachedelta = None
2315 cachedelta = None
2307
2316
2308 if len(rawtext) > _maxentrysize:
2317 if len(rawtext) > _maxentrysize:
2309 raise error.RevlogError(
2318 raise error.RevlogError(
2310 _(
2319 _(
2311 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2320 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2312 )
2321 )
2313 % (self.display_id, len(rawtext))
2322 % (self.display_id, len(rawtext))
2314 )
2323 )
2315
2324
2316 node = node or self.hash(rawtext, p1, p2)
2325 node = node or self.hash(rawtext, p1, p2)
2317 rev = self.index.get_rev(node)
2326 rev = self.index.get_rev(node)
2318 if rev is not None:
2327 if rev is not None:
2319 return rev
2328 return rev
2320
2329
2321 if validatehash:
2330 if validatehash:
2322 self.checkhash(rawtext, node, p1=p1, p2=p2)
2331 self.checkhash(rawtext, node, p1=p1, p2=p2)
2323
2332
2324 return self.addrawrevision(
2333 return self.addrawrevision(
2325 rawtext,
2334 rawtext,
2326 transaction,
2335 transaction,
2327 link,
2336 link,
2328 p1,
2337 p1,
2329 p2,
2338 p2,
2330 node,
2339 node,
2331 flags,
2340 flags,
2332 cachedelta=cachedelta,
2341 cachedelta=cachedelta,
2333 deltacomputer=deltacomputer,
2342 deltacomputer=deltacomputer,
2334 sidedata=sidedata,
2343 sidedata=sidedata,
2335 )
2344 )
2336
2345
2337 def addrawrevision(
2346 def addrawrevision(
2338 self,
2347 self,
2339 rawtext,
2348 rawtext,
2340 transaction,
2349 transaction,
2341 link,
2350 link,
2342 p1,
2351 p1,
2343 p2,
2352 p2,
2344 node,
2353 node,
2345 flags,
2354 flags,
2346 cachedelta=None,
2355 cachedelta=None,
2347 deltacomputer=None,
2356 deltacomputer=None,
2348 sidedata=None,
2357 sidedata=None,
2349 ):
2358 ):
2350 """add a raw revision with known flags, node and parents
2359 """add a raw revision with known flags, node and parents
2351 useful when reusing a revision not stored in this revlog (ex: received
2360 useful when reusing a revision not stored in this revlog (ex: received
2352 over wire, or read from an external bundle).
2361 over wire, or read from an external bundle).
2353 """
2362 """
2354 with self._writing(transaction):
2363 with self._writing(transaction):
2355 return self._addrevision(
2364 return self._addrevision(
2356 node,
2365 node,
2357 rawtext,
2366 rawtext,
2358 transaction,
2367 transaction,
2359 link,
2368 link,
2360 p1,
2369 p1,
2361 p2,
2370 p2,
2362 flags,
2371 flags,
2363 cachedelta,
2372 cachedelta,
2364 deltacomputer=deltacomputer,
2373 deltacomputer=deltacomputer,
2365 sidedata=sidedata,
2374 sidedata=sidedata,
2366 )
2375 )
2367
2376
2368 def compress(self, data):
2377 def compress(self, data):
2369 """Generate a possibly-compressed representation of data."""
2378 """Generate a possibly-compressed representation of data."""
2370 if not data:
2379 if not data:
2371 return b'', data
2380 return b'', data
2372
2381
2373 compressed = self._compressor.compress(data)
2382 compressed = self._compressor.compress(data)
2374
2383
2375 if compressed:
2384 if compressed:
2376 # The revlog compressor added the header in the returned data.
2385 # The revlog compressor added the header in the returned data.
2377 return b'', compressed
2386 return b'', compressed
2378
2387
2379 if data[0:1] == b'\0':
2388 if data[0:1] == b'\0':
2380 return b'', data
2389 return b'', data
2381 return b'u', data
2390 return b'u', data
2382
2391
2383 def decompress(self, data):
2392 def decompress(self, data):
2384 """Decompress a revlog chunk.
2393 """Decompress a revlog chunk.
2385
2394
2386 The chunk is expected to begin with a header identifying the
2395 The chunk is expected to begin with a header identifying the
2387 format type so it can be routed to an appropriate decompressor.
2396 format type so it can be routed to an appropriate decompressor.
2388 """
2397 """
2389 if not data:
2398 if not data:
2390 return data
2399 return data
2391
2400
2392 # Revlogs are read much more frequently than they are written and many
2401 # Revlogs are read much more frequently than they are written and many
2393 # chunks only take microseconds to decompress, so performance is
2402 # chunks only take microseconds to decompress, so performance is
2394 # important here.
2403 # important here.
2395 #
2404 #
2396 # We can make a few assumptions about revlogs:
2405 # We can make a few assumptions about revlogs:
2397 #
2406 #
2398 # 1) the majority of chunks will be compressed (as opposed to inline
2407 # 1) the majority of chunks will be compressed (as opposed to inline
2399 # raw data).
2408 # raw data).
2400 # 2) decompressing *any* data will likely by at least 10x slower than
2409 # 2) decompressing *any* data will likely by at least 10x slower than
2401 # returning raw inline data.
2410 # returning raw inline data.
2402 # 3) we want to prioritize common and officially supported compression
2411 # 3) we want to prioritize common and officially supported compression
2403 # engines
2412 # engines
2404 #
2413 #
2405 # It follows that we want to optimize for "decompress compressed data
2414 # It follows that we want to optimize for "decompress compressed data
2406 # when encoded with common and officially supported compression engines"
2415 # when encoded with common and officially supported compression engines"
2407 # case over "raw data" and "data encoded by less common or non-official
2416 # case over "raw data" and "data encoded by less common or non-official
2408 # compression engines." That is why we have the inline lookup first
2417 # compression engines." That is why we have the inline lookup first
2409 # followed by the compengines lookup.
2418 # followed by the compengines lookup.
2410 #
2419 #
2411 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2420 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2412 # compressed chunks. And this matters for changelog and manifest reads.
2421 # compressed chunks. And this matters for changelog and manifest reads.
2413 t = data[0:1]
2422 t = data[0:1]
2414
2423
2415 if t == b'x':
2424 if t == b'x':
2416 try:
2425 try:
2417 return _zlibdecompress(data)
2426 return _zlibdecompress(data)
2418 except zlib.error as e:
2427 except zlib.error as e:
2419 raise error.RevlogError(
2428 raise error.RevlogError(
2420 _(b'revlog decompress error: %s')
2429 _(b'revlog decompress error: %s')
2421 % stringutil.forcebytestr(e)
2430 % stringutil.forcebytestr(e)
2422 )
2431 )
2423 # '\0' is more common than 'u' so it goes first.
2432 # '\0' is more common than 'u' so it goes first.
2424 elif t == b'\0':
2433 elif t == b'\0':
2425 return data
2434 return data
2426 elif t == b'u':
2435 elif t == b'u':
2427 return util.buffer(data, 1)
2436 return util.buffer(data, 1)
2428
2437
2429 compressor = self._get_decompressor(t)
2438 compressor = self._get_decompressor(t)
2430
2439
2431 return compressor.decompress(data)
2440 return compressor.decompress(data)
2432
2441
2433 def _addrevision(
2442 def _addrevision(
2434 self,
2443 self,
2435 node,
2444 node,
2436 rawtext,
2445 rawtext,
2437 transaction,
2446 transaction,
2438 link,
2447 link,
2439 p1,
2448 p1,
2440 p2,
2449 p2,
2441 flags,
2450 flags,
2442 cachedelta,
2451 cachedelta,
2443 alwayscache=False,
2452 alwayscache=False,
2444 deltacomputer=None,
2453 deltacomputer=None,
2445 sidedata=None,
2454 sidedata=None,
2446 ):
2455 ):
2447 """internal function to add revisions to the log
2456 """internal function to add revisions to the log
2448
2457
2449 see addrevision for argument descriptions.
2458 see addrevision for argument descriptions.
2450
2459
2451 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2460 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2452
2461
2453 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2462 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2454 be used.
2463 be used.
2455
2464
2456 invariants:
2465 invariants:
2457 - rawtext is optional (can be None); if not set, cachedelta must be set.
2466 - rawtext is optional (can be None); if not set, cachedelta must be set.
2458 if both are set, they must correspond to each other.
2467 if both are set, they must correspond to each other.
2459 """
2468 """
2460 if node == self.nullid:
2469 if node == self.nullid:
2461 raise error.RevlogError(
2470 raise error.RevlogError(
2462 _(b"%s: attempt to add null revision") % self.display_id
2471 _(b"%s: attempt to add null revision") % self.display_id
2463 )
2472 )
2464 if (
2473 if (
2465 node == self.nodeconstants.wdirid
2474 node == self.nodeconstants.wdirid
2466 or node in self.nodeconstants.wdirfilenodeids
2475 or node in self.nodeconstants.wdirfilenodeids
2467 ):
2476 ):
2468 raise error.RevlogError(
2477 raise error.RevlogError(
2469 _(b"%s: attempt to add wdir revision") % self.display_id
2478 _(b"%s: attempt to add wdir revision") % self.display_id
2470 )
2479 )
2471 if self._writinghandles is None:
2480 if self._writinghandles is None:
2472 msg = b'adding revision outside `revlog._writing` context'
2481 msg = b'adding revision outside `revlog._writing` context'
2473 raise error.ProgrammingError(msg)
2482 raise error.ProgrammingError(msg)
2474
2483
2475 if self._inline:
2484 if self._inline:
2476 fh = self._writinghandles[0]
2485 fh = self._writinghandles[0]
2477 else:
2486 else:
2478 fh = self._writinghandles[1]
2487 fh = self._writinghandles[1]
2479
2488
2480 btext = [rawtext]
2489 btext = [rawtext]
2481
2490
2482 curr = len(self)
2491 curr = len(self)
2483 prev = curr - 1
2492 prev = curr - 1
2484
2493
2485 offset = self._get_data_offset(prev)
2494 offset = self._get_data_offset(prev)
2486
2495
2487 if self._concurrencychecker:
2496 if self._concurrencychecker:
2488 ifh, dfh = self._writinghandles
2497 ifh, dfh = self._writinghandles
2489 if self._inline:
2498 if self._inline:
2490 # offset is "as if" it were in the .d file, so we need to add on
2499 # offset is "as if" it were in the .d file, so we need to add on
2491 # the size of the entry metadata.
2500 # the size of the entry metadata.
2492 self._concurrencychecker(
2501 self._concurrencychecker(
2493 ifh, self._indexfile, offset + curr * self.index.entry_size
2502 ifh, self._indexfile, offset + curr * self.index.entry_size
2494 )
2503 )
2495 else:
2504 else:
2496 # Entries in the .i are a consistent size.
2505 # Entries in the .i are a consistent size.
2497 self._concurrencychecker(
2506 self._concurrencychecker(
2498 ifh, self._indexfile, curr * self.index.entry_size
2507 ifh, self._indexfile, curr * self.index.entry_size
2499 )
2508 )
2500 self._concurrencychecker(dfh, self._datafile, offset)
2509 self._concurrencychecker(dfh, self._datafile, offset)
2501
2510
2502 p1r, p2r = self.rev(p1), self.rev(p2)
2511 p1r, p2r = self.rev(p1), self.rev(p2)
2503
2512
2504 # full versions are inserted when the needed deltas
2513 # full versions are inserted when the needed deltas
2505 # become comparable to the uncompressed text
2514 # become comparable to the uncompressed text
2506 if rawtext is None:
2515 if rawtext is None:
2507 # need rawtext size, before changed by flag processors, which is
2516 # need rawtext size, before changed by flag processors, which is
2508 # the non-raw size. use revlog explicitly to avoid filelog's extra
2517 # the non-raw size. use revlog explicitly to avoid filelog's extra
2509 # logic that might remove metadata size.
2518 # logic that might remove metadata size.
2510 textlen = mdiff.patchedsize(
2519 textlen = mdiff.patchedsize(
2511 revlog.size(self, cachedelta[0]), cachedelta[1]
2520 revlog.size(self, cachedelta[0]), cachedelta[1]
2512 )
2521 )
2513 else:
2522 else:
2514 textlen = len(rawtext)
2523 textlen = len(rawtext)
2515
2524
2516 if deltacomputer is None:
2525 if deltacomputer is None:
2517 deltacomputer = deltautil.deltacomputer(self)
2526 deltacomputer = deltautil.deltacomputer(self)
2518
2527
2519 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2528 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2520
2529
2521 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2530 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2522
2531
2523 compression_mode = COMP_MODE_INLINE
2532 compression_mode = COMP_MODE_INLINE
2524 if self._docket is not None:
2533 if self._docket is not None:
2525 h, d = deltainfo.data
2534 h, d = deltainfo.data
2526 if not h and not d:
2535 if not h and not d:
2527 # not data to store at all... declare them uncompressed
2536 # not data to store at all... declare them uncompressed
2528 compression_mode = COMP_MODE_PLAIN
2537 compression_mode = COMP_MODE_PLAIN
2529 elif not h:
2538 elif not h:
2530 t = d[0:1]
2539 t = d[0:1]
2531 if t == b'\0':
2540 if t == b'\0':
2532 compression_mode = COMP_MODE_PLAIN
2541 compression_mode = COMP_MODE_PLAIN
2533 elif t == self._docket.default_compression_header:
2542 elif t == self._docket.default_compression_header:
2534 compression_mode = COMP_MODE_DEFAULT
2543 compression_mode = COMP_MODE_DEFAULT
2535 elif h == b'u':
2544 elif h == b'u':
2536 # we have a more efficient way to declare uncompressed
2545 # we have a more efficient way to declare uncompressed
2537 h = b''
2546 h = b''
2538 compression_mode = COMP_MODE_PLAIN
2547 compression_mode = COMP_MODE_PLAIN
2539 deltainfo = deltautil.drop_u_compression(deltainfo)
2548 deltainfo = deltautil.drop_u_compression(deltainfo)
2540
2549
2541 sidedata_compression_mode = COMP_MODE_INLINE
2550 sidedata_compression_mode = COMP_MODE_INLINE
2542 if sidedata and self.hassidedata:
2551 if sidedata and self.hassidedata:
2543 sidedata_compression_mode = COMP_MODE_PLAIN
2552 sidedata_compression_mode = COMP_MODE_PLAIN
2544 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2553 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2545 sidedata_offset = offset + deltainfo.deltalen
2554 sidedata_offset = offset + deltainfo.deltalen
2546 h, comp_sidedata = self.compress(serialized_sidedata)
2555 h, comp_sidedata = self.compress(serialized_sidedata)
2547 if (
2556 if (
2548 h != b'u'
2557 h != b'u'
2549 and comp_sidedata[0:1] != b'\0'
2558 and comp_sidedata[0:1] != b'\0'
2550 and len(comp_sidedata) < len(serialized_sidedata)
2559 and len(comp_sidedata) < len(serialized_sidedata)
2551 ):
2560 ):
2552 assert not h
2561 assert not h
2553 if (
2562 if (
2554 comp_sidedata[0:1]
2563 comp_sidedata[0:1]
2555 == self._docket.default_compression_header
2564 == self._docket.default_compression_header
2556 ):
2565 ):
2557 sidedata_compression_mode = COMP_MODE_DEFAULT
2566 sidedata_compression_mode = COMP_MODE_DEFAULT
2558 serialized_sidedata = comp_sidedata
2567 serialized_sidedata = comp_sidedata
2559 else:
2568 else:
2560 sidedata_compression_mode = COMP_MODE_INLINE
2569 sidedata_compression_mode = COMP_MODE_INLINE
2561 serialized_sidedata = comp_sidedata
2570 serialized_sidedata = comp_sidedata
2562 else:
2571 else:
2563 serialized_sidedata = b""
2572 serialized_sidedata = b""
2564 # Don't store the offset if the sidedata is empty, that way
2573 # Don't store the offset if the sidedata is empty, that way
2565 # we can easily detect empty sidedata and they will be no different
2574 # we can easily detect empty sidedata and they will be no different
2566 # than ones we manually add.
2575 # than ones we manually add.
2567 sidedata_offset = 0
2576 sidedata_offset = 0
2568
2577
2569 e = (
2578 e = (
2570 offset_type(offset, flags),
2579 offset_type(offset, flags),
2571 deltainfo.deltalen,
2580 deltainfo.deltalen,
2572 textlen,
2581 textlen,
2573 deltainfo.base,
2582 deltainfo.base,
2574 link,
2583 link,
2575 p1r,
2584 p1r,
2576 p2r,
2585 p2r,
2577 node,
2586 node,
2578 sidedata_offset,
2587 sidedata_offset,
2579 len(serialized_sidedata),
2588 len(serialized_sidedata),
2580 compression_mode,
2589 compression_mode,
2581 sidedata_compression_mode,
2590 sidedata_compression_mode,
2582 )
2591 )
2583
2592
2584 self.index.append(e)
2593 self.index.append(e)
2585 entry = self.index.entry_binary(curr)
2594 entry = self.index.entry_binary(curr)
2586 if curr == 0 and self._docket is None:
2595 if curr == 0 and self._docket is None:
2587 header = self._format_flags | self._format_version
2596 header = self._format_flags | self._format_version
2588 header = self.index.pack_header(header)
2597 header = self.index.pack_header(header)
2589 entry = header + entry
2598 entry = header + entry
2590 self._writeentry(
2599 self._writeentry(
2591 transaction,
2600 transaction,
2592 entry,
2601 entry,
2593 deltainfo.data,
2602 deltainfo.data,
2594 link,
2603 link,
2595 offset,
2604 offset,
2596 serialized_sidedata,
2605 serialized_sidedata,
2597 )
2606 )
2598
2607
2599 rawtext = btext[0]
2608 rawtext = btext[0]
2600
2609
2601 if alwayscache and rawtext is None:
2610 if alwayscache and rawtext is None:
2602 rawtext = deltacomputer.buildtext(revinfo, fh)
2611 rawtext = deltacomputer.buildtext(revinfo, fh)
2603
2612
2604 if type(rawtext) == bytes: # only accept immutable objects
2613 if type(rawtext) == bytes: # only accept immutable objects
2605 self._revisioncache = (node, curr, rawtext)
2614 self._revisioncache = (node, curr, rawtext)
2606 self._chainbasecache[curr] = deltainfo.chainbase
2615 self._chainbasecache[curr] = deltainfo.chainbase
2607 return curr
2616 return curr
2608
2617
2609 def _get_data_offset(self, prev):
2618 def _get_data_offset(self, prev):
2610 """Returns the current offset in the (in-transaction) data file.
2619 """Returns the current offset in the (in-transaction) data file.
2611 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2620 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2612 file to store that information: since sidedata can be rewritten to the
2621 file to store that information: since sidedata can be rewritten to the
2613 end of the data file within a transaction, you can have cases where, for
2622 end of the data file within a transaction, you can have cases where, for
2614 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2623 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2615 to `n - 1`'s sidedata being written after `n`'s data.
2624 to `n - 1`'s sidedata being written after `n`'s data.
2616
2625
2617 TODO cache this in a docket file before getting out of experimental."""
2626 TODO cache this in a docket file before getting out of experimental."""
2618 if self._docket is None:
2627 if self._docket is None:
2619 return self.end(prev)
2628 return self.end(prev)
2620 else:
2629 else:
2621 return self._docket.data_end
2630 return self._docket.data_end
2622
2631
2623 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2632 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2624 # Files opened in a+ mode have inconsistent behavior on various
2633 # Files opened in a+ mode have inconsistent behavior on various
2625 # platforms. Windows requires that a file positioning call be made
2634 # platforms. Windows requires that a file positioning call be made
2626 # when the file handle transitions between reads and writes. See
2635 # when the file handle transitions between reads and writes. See
2627 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2636 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2628 # platforms, Python or the platform itself can be buggy. Some versions
2637 # platforms, Python or the platform itself can be buggy. Some versions
2629 # of Solaris have been observed to not append at the end of the file
2638 # of Solaris have been observed to not append at the end of the file
2630 # if the file was seeked to before the end. See issue4943 for more.
2639 # if the file was seeked to before the end. See issue4943 for more.
2631 #
2640 #
2632 # We work around this issue by inserting a seek() before writing.
2641 # We work around this issue by inserting a seek() before writing.
2633 # Note: This is likely not necessary on Python 3. However, because
2642 # Note: This is likely not necessary on Python 3. However, because
2634 # the file handle is reused for reads and may be seeked there, we need
2643 # the file handle is reused for reads and may be seeked there, we need
2635 # to be careful before changing this.
2644 # to be careful before changing this.
2636 if self._writinghandles is None:
2645 if self._writinghandles is None:
2637 msg = b'adding revision outside `revlog._writing` context'
2646 msg = b'adding revision outside `revlog._writing` context'
2638 raise error.ProgrammingError(msg)
2647 raise error.ProgrammingError(msg)
2639 ifh, dfh = self._writinghandles
2648 ifh, dfh = self._writinghandles
2640 if self._docket is None:
2649 if self._docket is None:
2641 ifh.seek(0, os.SEEK_END)
2650 ifh.seek(0, os.SEEK_END)
2642 else:
2651 else:
2643 ifh.seek(self._docket.index_end, os.SEEK_SET)
2652 ifh.seek(self._docket.index_end, os.SEEK_SET)
2644 if dfh:
2653 if dfh:
2645 if self._docket is None:
2654 if self._docket is None:
2646 dfh.seek(0, os.SEEK_END)
2655 dfh.seek(0, os.SEEK_END)
2647 else:
2656 else:
2648 dfh.seek(self._docket.data_end, os.SEEK_SET)
2657 dfh.seek(self._docket.data_end, os.SEEK_SET)
2649
2658
2650 curr = len(self) - 1
2659 curr = len(self) - 1
2651 if not self._inline:
2660 if not self._inline:
2652 transaction.add(self._datafile, offset)
2661 transaction.add(self._datafile, offset)
2653 transaction.add(self._indexfile, curr * len(entry))
2662 transaction.add(self._indexfile, curr * len(entry))
2654 if data[0]:
2663 if data[0]:
2655 dfh.write(data[0])
2664 dfh.write(data[0])
2656 dfh.write(data[1])
2665 dfh.write(data[1])
2657 if sidedata:
2666 if sidedata:
2658 dfh.write(sidedata)
2667 dfh.write(sidedata)
2659 ifh.write(entry)
2668 ifh.write(entry)
2660 else:
2669 else:
2661 offset += curr * self.index.entry_size
2670 offset += curr * self.index.entry_size
2662 transaction.add(self._indexfile, offset)
2671 transaction.add(self._indexfile, offset)
2663 ifh.write(entry)
2672 ifh.write(entry)
2664 ifh.write(data[0])
2673 ifh.write(data[0])
2665 ifh.write(data[1])
2674 ifh.write(data[1])
2666 if sidedata:
2675 if sidedata:
2667 ifh.write(sidedata)
2676 ifh.write(sidedata)
2668 self._enforceinlinesize(transaction)
2677 self._enforceinlinesize(transaction)
2669 if self._docket is not None:
2678 if self._docket is not None:
2670 self._docket.index_end = self._writinghandles[0].tell()
2679 self._docket.index_end = self._writinghandles[0].tell()
2671 self._docket.data_end = self._writinghandles[1].tell()
2680 self._docket.data_end = self._writinghandles[1].tell()
2672
2681
2673 nodemaputil.setup_persistent_nodemap(transaction, self)
2682 nodemaputil.setup_persistent_nodemap(transaction, self)
2674
2683
2675 def addgroup(
2684 def addgroup(
2676 self,
2685 self,
2677 deltas,
2686 deltas,
2678 linkmapper,
2687 linkmapper,
2679 transaction,
2688 transaction,
2680 alwayscache=False,
2689 alwayscache=False,
2681 addrevisioncb=None,
2690 addrevisioncb=None,
2682 duplicaterevisioncb=None,
2691 duplicaterevisioncb=None,
2683 ):
2692 ):
2684 """
2693 """
2685 add a delta group
2694 add a delta group
2686
2695
2687 given a set of deltas, add them to the revision log. the
2696 given a set of deltas, add them to the revision log. the
2688 first delta is against its parent, which should be in our
2697 first delta is against its parent, which should be in our
2689 log, the rest are against the previous delta.
2698 log, the rest are against the previous delta.
2690
2699
2691 If ``addrevisioncb`` is defined, it will be called with arguments of
2700 If ``addrevisioncb`` is defined, it will be called with arguments of
2692 this revlog and the node that was added.
2701 this revlog and the node that was added.
2693 """
2702 """
2694
2703
2695 if self._adding_group:
2704 if self._adding_group:
2696 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2705 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2697
2706
2698 self._adding_group = True
2707 self._adding_group = True
2699 empty = True
2708 empty = True
2700 try:
2709 try:
2701 with self._writing(transaction):
2710 with self._writing(transaction):
2702 deltacomputer = deltautil.deltacomputer(self)
2711 deltacomputer = deltautil.deltacomputer(self)
2703 # loop through our set of deltas
2712 # loop through our set of deltas
2704 for data in deltas:
2713 for data in deltas:
2705 (
2714 (
2706 node,
2715 node,
2707 p1,
2716 p1,
2708 p2,
2717 p2,
2709 linknode,
2718 linknode,
2710 deltabase,
2719 deltabase,
2711 delta,
2720 delta,
2712 flags,
2721 flags,
2713 sidedata,
2722 sidedata,
2714 ) = data
2723 ) = data
2715 link = linkmapper(linknode)
2724 link = linkmapper(linknode)
2716 flags = flags or REVIDX_DEFAULT_FLAGS
2725 flags = flags or REVIDX_DEFAULT_FLAGS
2717
2726
2718 rev = self.index.get_rev(node)
2727 rev = self.index.get_rev(node)
2719 if rev is not None:
2728 if rev is not None:
2720 # this can happen if two branches make the same change
2729 # this can happen if two branches make the same change
2721 self._nodeduplicatecallback(transaction, rev)
2730 self._nodeduplicatecallback(transaction, rev)
2722 if duplicaterevisioncb:
2731 if duplicaterevisioncb:
2723 duplicaterevisioncb(self, rev)
2732 duplicaterevisioncb(self, rev)
2724 empty = False
2733 empty = False
2725 continue
2734 continue
2726
2735
2727 for p in (p1, p2):
2736 for p in (p1, p2):
2728 if not self.index.has_node(p):
2737 if not self.index.has_node(p):
2729 raise error.LookupError(
2738 raise error.LookupError(
2730 p, self.radix, _(b'unknown parent')
2739 p, self.radix, _(b'unknown parent')
2731 )
2740 )
2732
2741
2733 if not self.index.has_node(deltabase):
2742 if not self.index.has_node(deltabase):
2734 raise error.LookupError(
2743 raise error.LookupError(
2735 deltabase, self.display_id, _(b'unknown delta base')
2744 deltabase, self.display_id, _(b'unknown delta base')
2736 )
2745 )
2737
2746
2738 baserev = self.rev(deltabase)
2747 baserev = self.rev(deltabase)
2739
2748
2740 if baserev != nullrev and self.iscensored(baserev):
2749 if baserev != nullrev and self.iscensored(baserev):
2741 # if base is censored, delta must be full replacement in a
2750 # if base is censored, delta must be full replacement in a
2742 # single patch operation
2751 # single patch operation
2743 hlen = struct.calcsize(b">lll")
2752 hlen = struct.calcsize(b">lll")
2744 oldlen = self.rawsize(baserev)
2753 oldlen = self.rawsize(baserev)
2745 newlen = len(delta) - hlen
2754 newlen = len(delta) - hlen
2746 if delta[:hlen] != mdiff.replacediffheader(
2755 if delta[:hlen] != mdiff.replacediffheader(
2747 oldlen, newlen
2756 oldlen, newlen
2748 ):
2757 ):
2749 raise error.CensoredBaseError(
2758 raise error.CensoredBaseError(
2750 self.display_id, self.node(baserev)
2759 self.display_id, self.node(baserev)
2751 )
2760 )
2752
2761
2753 if not flags and self._peek_iscensored(baserev, delta):
2762 if not flags and self._peek_iscensored(baserev, delta):
2754 flags |= REVIDX_ISCENSORED
2763 flags |= REVIDX_ISCENSORED
2755
2764
2756 # We assume consumers of addrevisioncb will want to retrieve
2765 # We assume consumers of addrevisioncb will want to retrieve
2757 # the added revision, which will require a call to
2766 # the added revision, which will require a call to
2758 # revision(). revision() will fast path if there is a cache
2767 # revision(). revision() will fast path if there is a cache
2759 # hit. So, we tell _addrevision() to always cache in this case.
2768 # hit. So, we tell _addrevision() to always cache in this case.
2760 # We're only using addgroup() in the context of changegroup
2769 # We're only using addgroup() in the context of changegroup
2761 # generation so the revision data can always be handled as raw
2770 # generation so the revision data can always be handled as raw
2762 # by the flagprocessor.
2771 # by the flagprocessor.
2763 rev = self._addrevision(
2772 rev = self._addrevision(
2764 node,
2773 node,
2765 None,
2774 None,
2766 transaction,
2775 transaction,
2767 link,
2776 link,
2768 p1,
2777 p1,
2769 p2,
2778 p2,
2770 flags,
2779 flags,
2771 (baserev, delta),
2780 (baserev, delta),
2772 alwayscache=alwayscache,
2781 alwayscache=alwayscache,
2773 deltacomputer=deltacomputer,
2782 deltacomputer=deltacomputer,
2774 sidedata=sidedata,
2783 sidedata=sidedata,
2775 )
2784 )
2776
2785
2777 if addrevisioncb:
2786 if addrevisioncb:
2778 addrevisioncb(self, rev)
2787 addrevisioncb(self, rev)
2779 empty = False
2788 empty = False
2780 finally:
2789 finally:
2781 self._adding_group = False
2790 self._adding_group = False
2782 return not empty
2791 return not empty
2783
2792
2784 def iscensored(self, rev):
2793 def iscensored(self, rev):
2785 """Check if a file revision is censored."""
2794 """Check if a file revision is censored."""
2786 if not self._censorable:
2795 if not self._censorable:
2787 return False
2796 return False
2788
2797
2789 return self.flags(rev) & REVIDX_ISCENSORED
2798 return self.flags(rev) & REVIDX_ISCENSORED
2790
2799
2791 def _peek_iscensored(self, baserev, delta):
2800 def _peek_iscensored(self, baserev, delta):
2792 """Quickly check if a delta produces a censored revision."""
2801 """Quickly check if a delta produces a censored revision."""
2793 if not self._censorable:
2802 if not self._censorable:
2794 return False
2803 return False
2795
2804
2796 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2805 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2797
2806
2798 def getstrippoint(self, minlink):
2807 def getstrippoint(self, minlink):
2799 """find the minimum rev that must be stripped to strip the linkrev
2808 """find the minimum rev that must be stripped to strip the linkrev
2800
2809
2801 Returns a tuple containing the minimum rev and a set of all revs that
2810 Returns a tuple containing the minimum rev and a set of all revs that
2802 have linkrevs that will be broken by this strip.
2811 have linkrevs that will be broken by this strip.
2803 """
2812 """
2804 return storageutil.resolvestripinfo(
2813 return storageutil.resolvestripinfo(
2805 minlink,
2814 minlink,
2806 len(self) - 1,
2815 len(self) - 1,
2807 self.headrevs(),
2816 self.headrevs(),
2808 self.linkrev,
2817 self.linkrev,
2809 self.parentrevs,
2818 self.parentrevs,
2810 )
2819 )
2811
2820
2812 def strip(self, minlink, transaction):
2821 def strip(self, minlink, transaction):
2813 """truncate the revlog on the first revision with a linkrev >= minlink
2822 """truncate the revlog on the first revision with a linkrev >= minlink
2814
2823
2815 This function is called when we're stripping revision minlink and
2824 This function is called when we're stripping revision minlink and
2816 its descendants from the repository.
2825 its descendants from the repository.
2817
2826
2818 We have to remove all revisions with linkrev >= minlink, because
2827 We have to remove all revisions with linkrev >= minlink, because
2819 the equivalent changelog revisions will be renumbered after the
2828 the equivalent changelog revisions will be renumbered after the
2820 strip.
2829 strip.
2821
2830
2822 So we truncate the revlog on the first of these revisions, and
2831 So we truncate the revlog on the first of these revisions, and
2823 trust that the caller has saved the revisions that shouldn't be
2832 trust that the caller has saved the revisions that shouldn't be
2824 removed and that it'll re-add them after this truncation.
2833 removed and that it'll re-add them after this truncation.
2825 """
2834 """
2826 if len(self) == 0:
2835 if len(self) == 0:
2827 return
2836 return
2828
2837
2829 rev, _ = self.getstrippoint(minlink)
2838 rev, _ = self.getstrippoint(minlink)
2830 if rev == len(self):
2839 if rev == len(self):
2831 return
2840 return
2832
2841
2833 # first truncate the files on disk
2842 # first truncate the files on disk
2834 data_end = self.start(rev)
2843 data_end = self.start(rev)
2835 if not self._inline:
2844 if not self._inline:
2836 transaction.add(self._datafile, data_end)
2845 transaction.add(self._datafile, data_end)
2837 end = rev * self.index.entry_size
2846 end = rev * self.index.entry_size
2838 else:
2847 else:
2839 end = data_end + (rev * self.index.entry_size)
2848 end = data_end + (rev * self.index.entry_size)
2840
2849
2841 transaction.add(self._indexfile, end)
2850 transaction.add(self._indexfile, end)
2842 if self._docket is not None:
2851 if self._docket is not None:
2843 # XXX we could, leverage the docket while stripping. However it is
2852 # XXX we could, leverage the docket while stripping. However it is
2844 # not powerfull enough at the time of this comment
2853 # not powerfull enough at the time of this comment
2845 self._docket.index_end = end
2854 self._docket.index_end = end
2846 self._docket.data_end = data_end
2855 self._docket.data_end = data_end
2847 self._docket.write(transaction, stripping=True)
2856 self._docket.write(transaction, stripping=True)
2848
2857
2849 # then reset internal state in memory to forget those revisions
2858 # then reset internal state in memory to forget those revisions
2850 self._revisioncache = None
2859 self._revisioncache = None
2851 self._chaininfocache = util.lrucachedict(500)
2860 self._chaininfocache = util.lrucachedict(500)
2852 self._chunkclear()
2861 self._chunkclear()
2853
2862
2854 del self.index[rev:-1]
2863 del self.index[rev:-1]
2855
2864
2856 def checksize(self):
2865 def checksize(self):
2857 """Check size of index and data files
2866 """Check size of index and data files
2858
2867
2859 return a (dd, di) tuple.
2868 return a (dd, di) tuple.
2860 - dd: extra bytes for the "data" file
2869 - dd: extra bytes for the "data" file
2861 - di: extra bytes for the "index" file
2870 - di: extra bytes for the "index" file
2862
2871
2863 A healthy revlog will return (0, 0).
2872 A healthy revlog will return (0, 0).
2864 """
2873 """
2865 expected = 0
2874 expected = 0
2866 if len(self):
2875 if len(self):
2867 expected = max(0, self.end(len(self) - 1))
2876 expected = max(0, self.end(len(self) - 1))
2868
2877
2869 try:
2878 try:
2870 with self._datafp() as f:
2879 with self._datafp() as f:
2871 f.seek(0, io.SEEK_END)
2880 f.seek(0, io.SEEK_END)
2872 actual = f.tell()
2881 actual = f.tell()
2873 dd = actual - expected
2882 dd = actual - expected
2874 except IOError as inst:
2883 except IOError as inst:
2875 if inst.errno != errno.ENOENT:
2884 if inst.errno != errno.ENOENT:
2876 raise
2885 raise
2877 dd = 0
2886 dd = 0
2878
2887
2879 try:
2888 try:
2880 f = self.opener(self._indexfile)
2889 f = self.opener(self._indexfile)
2881 f.seek(0, io.SEEK_END)
2890 f.seek(0, io.SEEK_END)
2882 actual = f.tell()
2891 actual = f.tell()
2883 f.close()
2892 f.close()
2884 s = self.index.entry_size
2893 s = self.index.entry_size
2885 i = max(0, actual // s)
2894 i = max(0, actual // s)
2886 di = actual - (i * s)
2895 di = actual - (i * s)
2887 if self._inline:
2896 if self._inline:
2888 databytes = 0
2897 databytes = 0
2889 for r in self:
2898 for r in self:
2890 databytes += max(0, self.length(r))
2899 databytes += max(0, self.length(r))
2891 dd = 0
2900 dd = 0
2892 di = actual - len(self) * s - databytes
2901 di = actual - len(self) * s - databytes
2893 except IOError as inst:
2902 except IOError as inst:
2894 if inst.errno != errno.ENOENT:
2903 if inst.errno != errno.ENOENT:
2895 raise
2904 raise
2896 di = 0
2905 di = 0
2897
2906
2898 return (dd, di)
2907 return (dd, di)
2899
2908
2900 def files(self):
2909 def files(self):
2901 res = [self._indexfile]
2910 res = [self._indexfile]
2902 if not self._inline:
2911 if not self._inline:
2903 res.append(self._datafile)
2912 res.append(self._datafile)
2904 return res
2913 return res
2905
2914
2906 def emitrevisions(
2915 def emitrevisions(
2907 self,
2916 self,
2908 nodes,
2917 nodes,
2909 nodesorder=None,
2918 nodesorder=None,
2910 revisiondata=False,
2919 revisiondata=False,
2911 assumehaveparentrevisions=False,
2920 assumehaveparentrevisions=False,
2912 deltamode=repository.CG_DELTAMODE_STD,
2921 deltamode=repository.CG_DELTAMODE_STD,
2913 sidedata_helpers=None,
2922 sidedata_helpers=None,
2914 ):
2923 ):
2915 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2924 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2916 raise error.ProgrammingError(
2925 raise error.ProgrammingError(
2917 b'unhandled value for nodesorder: %s' % nodesorder
2926 b'unhandled value for nodesorder: %s' % nodesorder
2918 )
2927 )
2919
2928
2920 if nodesorder is None and not self._generaldelta:
2929 if nodesorder is None and not self._generaldelta:
2921 nodesorder = b'storage'
2930 nodesorder = b'storage'
2922
2931
2923 if (
2932 if (
2924 not self._storedeltachains
2933 not self._storedeltachains
2925 and deltamode != repository.CG_DELTAMODE_PREV
2934 and deltamode != repository.CG_DELTAMODE_PREV
2926 ):
2935 ):
2927 deltamode = repository.CG_DELTAMODE_FULL
2936 deltamode = repository.CG_DELTAMODE_FULL
2928
2937
2929 return storageutil.emitrevisions(
2938 return storageutil.emitrevisions(
2930 self,
2939 self,
2931 nodes,
2940 nodes,
2932 nodesorder,
2941 nodesorder,
2933 revlogrevisiondelta,
2942 revlogrevisiondelta,
2934 deltaparentfn=self.deltaparent,
2943 deltaparentfn=self.deltaparent,
2935 candeltafn=self.candelta,
2944 candeltafn=self.candelta,
2936 rawsizefn=self.rawsize,
2945 rawsizefn=self.rawsize,
2937 revdifffn=self.revdiff,
2946 revdifffn=self.revdiff,
2938 flagsfn=self.flags,
2947 flagsfn=self.flags,
2939 deltamode=deltamode,
2948 deltamode=deltamode,
2940 revisiondata=revisiondata,
2949 revisiondata=revisiondata,
2941 assumehaveparentrevisions=assumehaveparentrevisions,
2950 assumehaveparentrevisions=assumehaveparentrevisions,
2942 sidedata_helpers=sidedata_helpers,
2951 sidedata_helpers=sidedata_helpers,
2943 )
2952 )
2944
2953
2945 DELTAREUSEALWAYS = b'always'
2954 DELTAREUSEALWAYS = b'always'
2946 DELTAREUSESAMEREVS = b'samerevs'
2955 DELTAREUSESAMEREVS = b'samerevs'
2947 DELTAREUSENEVER = b'never'
2956 DELTAREUSENEVER = b'never'
2948
2957
2949 DELTAREUSEFULLADD = b'fulladd'
2958 DELTAREUSEFULLADD = b'fulladd'
2950
2959
2951 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2960 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2952
2961
2953 def clone(
2962 def clone(
2954 self,
2963 self,
2955 tr,
2964 tr,
2956 destrevlog,
2965 destrevlog,
2957 addrevisioncb=None,
2966 addrevisioncb=None,
2958 deltareuse=DELTAREUSESAMEREVS,
2967 deltareuse=DELTAREUSESAMEREVS,
2959 forcedeltabothparents=None,
2968 forcedeltabothparents=None,
2960 sidedata_helpers=None,
2969 sidedata_helpers=None,
2961 ):
2970 ):
2962 """Copy this revlog to another, possibly with format changes.
2971 """Copy this revlog to another, possibly with format changes.
2963
2972
2964 The destination revlog will contain the same revisions and nodes.
2973 The destination revlog will contain the same revisions and nodes.
2965 However, it may not be bit-for-bit identical due to e.g. delta encoding
2974 However, it may not be bit-for-bit identical due to e.g. delta encoding
2966 differences.
2975 differences.
2967
2976
2968 The ``deltareuse`` argument control how deltas from the existing revlog
2977 The ``deltareuse`` argument control how deltas from the existing revlog
2969 are preserved in the destination revlog. The argument can have the
2978 are preserved in the destination revlog. The argument can have the
2970 following values:
2979 following values:
2971
2980
2972 DELTAREUSEALWAYS
2981 DELTAREUSEALWAYS
2973 Deltas will always be reused (if possible), even if the destination
2982 Deltas will always be reused (if possible), even if the destination
2974 revlog would not select the same revisions for the delta. This is the
2983 revlog would not select the same revisions for the delta. This is the
2975 fastest mode of operation.
2984 fastest mode of operation.
2976 DELTAREUSESAMEREVS
2985 DELTAREUSESAMEREVS
2977 Deltas will be reused if the destination revlog would pick the same
2986 Deltas will be reused if the destination revlog would pick the same
2978 revisions for the delta. This mode strikes a balance between speed
2987 revisions for the delta. This mode strikes a balance between speed
2979 and optimization.
2988 and optimization.
2980 DELTAREUSENEVER
2989 DELTAREUSENEVER
2981 Deltas will never be reused. This is the slowest mode of execution.
2990 Deltas will never be reused. This is the slowest mode of execution.
2982 This mode can be used to recompute deltas (e.g. if the diff/delta
2991 This mode can be used to recompute deltas (e.g. if the diff/delta
2983 algorithm changes).
2992 algorithm changes).
2984 DELTAREUSEFULLADD
2993 DELTAREUSEFULLADD
2985 Revision will be re-added as if their were new content. This is
2994 Revision will be re-added as if their were new content. This is
2986 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2995 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2987 eg: large file detection and handling.
2996 eg: large file detection and handling.
2988
2997
2989 Delta computation can be slow, so the choice of delta reuse policy can
2998 Delta computation can be slow, so the choice of delta reuse policy can
2990 significantly affect run time.
2999 significantly affect run time.
2991
3000
2992 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3001 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2993 two extremes. Deltas will be reused if they are appropriate. But if the
3002 two extremes. Deltas will be reused if they are appropriate. But if the
2994 delta could choose a better revision, it will do so. This means if you
3003 delta could choose a better revision, it will do so. This means if you
2995 are converting a non-generaldelta revlog to a generaldelta revlog,
3004 are converting a non-generaldelta revlog to a generaldelta revlog,
2996 deltas will be recomputed if the delta's parent isn't a parent of the
3005 deltas will be recomputed if the delta's parent isn't a parent of the
2997 revision.
3006 revision.
2998
3007
2999 In addition to the delta policy, the ``forcedeltabothparents``
3008 In addition to the delta policy, the ``forcedeltabothparents``
3000 argument controls whether to force compute deltas against both parents
3009 argument controls whether to force compute deltas against both parents
3001 for merges. By default, the current default is used.
3010 for merges. By default, the current default is used.
3002
3011
3003 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3012 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3004 `sidedata_helpers`.
3013 `sidedata_helpers`.
3005 """
3014 """
3006 if deltareuse not in self.DELTAREUSEALL:
3015 if deltareuse not in self.DELTAREUSEALL:
3007 raise ValueError(
3016 raise ValueError(
3008 _(b'value for deltareuse invalid: %s') % deltareuse
3017 _(b'value for deltareuse invalid: %s') % deltareuse
3009 )
3018 )
3010
3019
3011 if len(destrevlog):
3020 if len(destrevlog):
3012 raise ValueError(_(b'destination revlog is not empty'))
3021 raise ValueError(_(b'destination revlog is not empty'))
3013
3022
3014 if getattr(self, 'filteredrevs', None):
3023 if getattr(self, 'filteredrevs', None):
3015 raise ValueError(_(b'source revlog has filtered revisions'))
3024 raise ValueError(_(b'source revlog has filtered revisions'))
3016 if getattr(destrevlog, 'filteredrevs', None):
3025 if getattr(destrevlog, 'filteredrevs', None):
3017 raise ValueError(_(b'destination revlog has filtered revisions'))
3026 raise ValueError(_(b'destination revlog has filtered revisions'))
3018
3027
3019 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3028 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3020 # if possible.
3029 # if possible.
3021 oldlazydelta = destrevlog._lazydelta
3030 oldlazydelta = destrevlog._lazydelta
3022 oldlazydeltabase = destrevlog._lazydeltabase
3031 oldlazydeltabase = destrevlog._lazydeltabase
3023 oldamd = destrevlog._deltabothparents
3032 oldamd = destrevlog._deltabothparents
3024
3033
3025 try:
3034 try:
3026 if deltareuse == self.DELTAREUSEALWAYS:
3035 if deltareuse == self.DELTAREUSEALWAYS:
3027 destrevlog._lazydeltabase = True
3036 destrevlog._lazydeltabase = True
3028 destrevlog._lazydelta = True
3037 destrevlog._lazydelta = True
3029 elif deltareuse == self.DELTAREUSESAMEREVS:
3038 elif deltareuse == self.DELTAREUSESAMEREVS:
3030 destrevlog._lazydeltabase = False
3039 destrevlog._lazydeltabase = False
3031 destrevlog._lazydelta = True
3040 destrevlog._lazydelta = True
3032 elif deltareuse == self.DELTAREUSENEVER:
3041 elif deltareuse == self.DELTAREUSENEVER:
3033 destrevlog._lazydeltabase = False
3042 destrevlog._lazydeltabase = False
3034 destrevlog._lazydelta = False
3043 destrevlog._lazydelta = False
3035
3044
3036 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3045 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3037
3046
3038 self._clone(
3047 self._clone(
3039 tr,
3048 tr,
3040 destrevlog,
3049 destrevlog,
3041 addrevisioncb,
3050 addrevisioncb,
3042 deltareuse,
3051 deltareuse,
3043 forcedeltabothparents,
3052 forcedeltabothparents,
3044 sidedata_helpers,
3053 sidedata_helpers,
3045 )
3054 )
3046
3055
3047 finally:
3056 finally:
3048 destrevlog._lazydelta = oldlazydelta
3057 destrevlog._lazydelta = oldlazydelta
3049 destrevlog._lazydeltabase = oldlazydeltabase
3058 destrevlog._lazydeltabase = oldlazydeltabase
3050 destrevlog._deltabothparents = oldamd
3059 destrevlog._deltabothparents = oldamd
3051
3060
3052 def _clone(
3061 def _clone(
3053 self,
3062 self,
3054 tr,
3063 tr,
3055 destrevlog,
3064 destrevlog,
3056 addrevisioncb,
3065 addrevisioncb,
3057 deltareuse,
3066 deltareuse,
3058 forcedeltabothparents,
3067 forcedeltabothparents,
3059 sidedata_helpers,
3068 sidedata_helpers,
3060 ):
3069 ):
3061 """perform the core duty of `revlog.clone` after parameter processing"""
3070 """perform the core duty of `revlog.clone` after parameter processing"""
3062 deltacomputer = deltautil.deltacomputer(destrevlog)
3071 deltacomputer = deltautil.deltacomputer(destrevlog)
3063 index = self.index
3072 index = self.index
3064 for rev in self:
3073 for rev in self:
3065 entry = index[rev]
3074 entry = index[rev]
3066
3075
3067 # Some classes override linkrev to take filtered revs into
3076 # Some classes override linkrev to take filtered revs into
3068 # account. Use raw entry from index.
3077 # account. Use raw entry from index.
3069 flags = entry[0] & 0xFFFF
3078 flags = entry[0] & 0xFFFF
3070 linkrev = entry[4]
3079 linkrev = entry[4]
3071 p1 = index[entry[5]][7]
3080 p1 = index[entry[5]][7]
3072 p2 = index[entry[6]][7]
3081 p2 = index[entry[6]][7]
3073 node = entry[7]
3082 node = entry[7]
3074
3083
3075 # (Possibly) reuse the delta from the revlog if allowed and
3084 # (Possibly) reuse the delta from the revlog if allowed and
3076 # the revlog chunk is a delta.
3085 # the revlog chunk is a delta.
3077 cachedelta = None
3086 cachedelta = None
3078 rawtext = None
3087 rawtext = None
3079 if deltareuse == self.DELTAREUSEFULLADD:
3088 if deltareuse == self.DELTAREUSEFULLADD:
3080 text, sidedata = self._revisiondata(rev)
3089 text, sidedata = self._revisiondata(rev)
3081
3090
3082 if sidedata_helpers is not None:
3091 if sidedata_helpers is not None:
3083 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3092 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3084 self, sidedata_helpers, sidedata, rev
3093 self, sidedata_helpers, sidedata, rev
3085 )
3094 )
3086 flags = flags | new_flags[0] & ~new_flags[1]
3095 flags = flags | new_flags[0] & ~new_flags[1]
3087
3096
3088 destrevlog.addrevision(
3097 destrevlog.addrevision(
3089 text,
3098 text,
3090 tr,
3099 tr,
3091 linkrev,
3100 linkrev,
3092 p1,
3101 p1,
3093 p2,
3102 p2,
3094 cachedelta=cachedelta,
3103 cachedelta=cachedelta,
3095 node=node,
3104 node=node,
3096 flags=flags,
3105 flags=flags,
3097 deltacomputer=deltacomputer,
3106 deltacomputer=deltacomputer,
3098 sidedata=sidedata,
3107 sidedata=sidedata,
3099 )
3108 )
3100 else:
3109 else:
3101 if destrevlog._lazydelta:
3110 if destrevlog._lazydelta:
3102 dp = self.deltaparent(rev)
3111 dp = self.deltaparent(rev)
3103 if dp != nullrev:
3112 if dp != nullrev:
3104 cachedelta = (dp, bytes(self._chunk(rev)))
3113 cachedelta = (dp, bytes(self._chunk(rev)))
3105
3114
3106 sidedata = None
3115 sidedata = None
3107 if not cachedelta:
3116 if not cachedelta:
3108 rawtext, sidedata = self._revisiondata(rev)
3117 rawtext, sidedata = self._revisiondata(rev)
3109 if sidedata is None:
3118 if sidedata is None:
3110 sidedata = self.sidedata(rev)
3119 sidedata = self.sidedata(rev)
3111
3120
3112 if sidedata_helpers is not None:
3121 if sidedata_helpers is not None:
3113 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3122 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3114 self, sidedata_helpers, sidedata, rev
3123 self, sidedata_helpers, sidedata, rev
3115 )
3124 )
3116 flags = flags | new_flags[0] & ~new_flags[1]
3125 flags = flags | new_flags[0] & ~new_flags[1]
3117
3126
3118 with destrevlog._writing(tr):
3127 with destrevlog._writing(tr):
3119 destrevlog._addrevision(
3128 destrevlog._addrevision(
3120 node,
3129 node,
3121 rawtext,
3130 rawtext,
3122 tr,
3131 tr,
3123 linkrev,
3132 linkrev,
3124 p1,
3133 p1,
3125 p2,
3134 p2,
3126 flags,
3135 flags,
3127 cachedelta,
3136 cachedelta,
3128 deltacomputer=deltacomputer,
3137 deltacomputer=deltacomputer,
3129 sidedata=sidedata,
3138 sidedata=sidedata,
3130 )
3139 )
3131
3140
3132 if addrevisioncb:
3141 if addrevisioncb:
3133 addrevisioncb(self, rev, node)
3142 addrevisioncb(self, rev, node)
3134
3143
3135 def censorrevision(self, tr, censornode, tombstone=b''):
3144 def censorrevision(self, tr, censornode, tombstone=b''):
3136 if self._format_version == REVLOGV0:
3145 if self._format_version == REVLOGV0:
3137 raise error.RevlogError(
3146 raise error.RevlogError(
3138 _(b'cannot censor with version %d revlogs')
3147 _(b'cannot censor with version %d revlogs')
3139 % self._format_version
3148 % self._format_version
3140 )
3149 )
3141
3150
3142 censorrev = self.rev(censornode)
3151 censorrev = self.rev(censornode)
3143 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3152 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3144
3153
3145 if len(tombstone) > self.rawsize(censorrev):
3154 if len(tombstone) > self.rawsize(censorrev):
3146 raise error.Abort(
3155 raise error.Abort(
3147 _(b'censor tombstone must be no longer than censored data')
3156 _(b'censor tombstone must be no longer than censored data')
3148 )
3157 )
3149
3158
3150 # Rewriting the revlog in place is hard. Our strategy for censoring is
3159 # Rewriting the revlog in place is hard. Our strategy for censoring is
3151 # to create a new revlog, copy all revisions to it, then replace the
3160 # to create a new revlog, copy all revisions to it, then replace the
3152 # revlogs on transaction close.
3161 # revlogs on transaction close.
3153 #
3162 #
3154 # This is a bit dangerous. We could easily have a mismatch of state.
3163 # This is a bit dangerous. We could easily have a mismatch of state.
3155 newrl = revlog(
3164 newrl = revlog(
3156 self.opener,
3165 self.opener,
3157 target=self.target,
3166 target=self.target,
3158 radix=self.radix,
3167 radix=self.radix,
3159 postfix=b'tmpcensored',
3168 postfix=b'tmpcensored',
3160 censorable=True,
3169 censorable=True,
3161 )
3170 )
3162 newrl._format_version = self._format_version
3171 newrl._format_version = self._format_version
3163 newrl._format_flags = self._format_flags
3172 newrl._format_flags = self._format_flags
3164 newrl._generaldelta = self._generaldelta
3173 newrl._generaldelta = self._generaldelta
3165 newrl._parse_index = self._parse_index
3174 newrl._parse_index = self._parse_index
3166
3175
3167 for rev in self.revs():
3176 for rev in self.revs():
3168 node = self.node(rev)
3177 node = self.node(rev)
3169 p1, p2 = self.parents(node)
3178 p1, p2 = self.parents(node)
3170
3179
3171 if rev == censorrev:
3180 if rev == censorrev:
3172 newrl.addrawrevision(
3181 newrl.addrawrevision(
3173 tombstone,
3182 tombstone,
3174 tr,
3183 tr,
3175 self.linkrev(censorrev),
3184 self.linkrev(censorrev),
3176 p1,
3185 p1,
3177 p2,
3186 p2,
3178 censornode,
3187 censornode,
3179 REVIDX_ISCENSORED,
3188 REVIDX_ISCENSORED,
3180 )
3189 )
3181
3190
3182 if newrl.deltaparent(rev) != nullrev:
3191 if newrl.deltaparent(rev) != nullrev:
3183 raise error.Abort(
3192 raise error.Abort(
3184 _(
3193 _(
3185 b'censored revision stored as delta; '
3194 b'censored revision stored as delta; '
3186 b'cannot censor'
3195 b'cannot censor'
3187 ),
3196 ),
3188 hint=_(
3197 hint=_(
3189 b'censoring of revlogs is not '
3198 b'censoring of revlogs is not '
3190 b'fully implemented; please report '
3199 b'fully implemented; please report '
3191 b'this bug'
3200 b'this bug'
3192 ),
3201 ),
3193 )
3202 )
3194 continue
3203 continue
3195
3204
3196 if self.iscensored(rev):
3205 if self.iscensored(rev):
3197 if self.deltaparent(rev) != nullrev:
3206 if self.deltaparent(rev) != nullrev:
3198 raise error.Abort(
3207 raise error.Abort(
3199 _(
3208 _(
3200 b'cannot censor due to censored '
3209 b'cannot censor due to censored '
3201 b'revision having delta stored'
3210 b'revision having delta stored'
3202 )
3211 )
3203 )
3212 )
3204 rawtext = self._chunk(rev)
3213 rawtext = self._chunk(rev)
3205 else:
3214 else:
3206 rawtext = self.rawdata(rev)
3215 rawtext = self.rawdata(rev)
3207
3216
3208 newrl.addrawrevision(
3217 newrl.addrawrevision(
3209 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3218 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3210 )
3219 )
3211
3220
3212 tr.addbackup(self._indexfile, location=b'store')
3221 tr.addbackup(self._indexfile, location=b'store')
3213 if not self._inline:
3222 if not self._inline:
3214 tr.addbackup(self._datafile, location=b'store')
3223 tr.addbackup(self._datafile, location=b'store')
3215
3224
3216 self.opener.rename(newrl._indexfile, self._indexfile)
3225 self.opener.rename(newrl._indexfile, self._indexfile)
3217 if not self._inline:
3226 if not self._inline:
3218 self.opener.rename(newrl._datafile, self._datafile)
3227 self.opener.rename(newrl._datafile, self._datafile)
3219
3228
3220 self.clearcaches()
3229 self.clearcaches()
3221 self._loadindex()
3230 self._loadindex()
3222
3231
3223 def verifyintegrity(self, state):
3232 def verifyintegrity(self, state):
3224 """Verifies the integrity of the revlog.
3233 """Verifies the integrity of the revlog.
3225
3234
3226 Yields ``revlogproblem`` instances describing problems that are
3235 Yields ``revlogproblem`` instances describing problems that are
3227 found.
3236 found.
3228 """
3237 """
3229 dd, di = self.checksize()
3238 dd, di = self.checksize()
3230 if dd:
3239 if dd:
3231 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3240 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3232 if di:
3241 if di:
3233 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3242 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3234
3243
3235 version = self._format_version
3244 version = self._format_version
3236
3245
3237 # The verifier tells us what version revlog we should be.
3246 # The verifier tells us what version revlog we should be.
3238 if version != state[b'expectedversion']:
3247 if version != state[b'expectedversion']:
3239 yield revlogproblem(
3248 yield revlogproblem(
3240 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3249 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3241 % (self.display_id, version, state[b'expectedversion'])
3250 % (self.display_id, version, state[b'expectedversion'])
3242 )
3251 )
3243
3252
3244 state[b'skipread'] = set()
3253 state[b'skipread'] = set()
3245 state[b'safe_renamed'] = set()
3254 state[b'safe_renamed'] = set()
3246
3255
3247 for rev in self:
3256 for rev in self:
3248 node = self.node(rev)
3257 node = self.node(rev)
3249
3258
3250 # Verify contents. 4 cases to care about:
3259 # Verify contents. 4 cases to care about:
3251 #
3260 #
3252 # common: the most common case
3261 # common: the most common case
3253 # rename: with a rename
3262 # rename: with a rename
3254 # meta: file content starts with b'\1\n', the metadata
3263 # meta: file content starts with b'\1\n', the metadata
3255 # header defined in filelog.py, but without a rename
3264 # header defined in filelog.py, but without a rename
3256 # ext: content stored externally
3265 # ext: content stored externally
3257 #
3266 #
3258 # More formally, their differences are shown below:
3267 # More formally, their differences are shown below:
3259 #
3268 #
3260 # | common | rename | meta | ext
3269 # | common | rename | meta | ext
3261 # -------------------------------------------------------
3270 # -------------------------------------------------------
3262 # flags() | 0 | 0 | 0 | not 0
3271 # flags() | 0 | 0 | 0 | not 0
3263 # renamed() | False | True | False | ?
3272 # renamed() | False | True | False | ?
3264 # rawtext[0:2]=='\1\n'| False | True | True | ?
3273 # rawtext[0:2]=='\1\n'| False | True | True | ?
3265 #
3274 #
3266 # "rawtext" means the raw text stored in revlog data, which
3275 # "rawtext" means the raw text stored in revlog data, which
3267 # could be retrieved by "rawdata(rev)". "text"
3276 # could be retrieved by "rawdata(rev)". "text"
3268 # mentioned below is "revision(rev)".
3277 # mentioned below is "revision(rev)".
3269 #
3278 #
3270 # There are 3 different lengths stored physically:
3279 # There are 3 different lengths stored physically:
3271 # 1. L1: rawsize, stored in revlog index
3280 # 1. L1: rawsize, stored in revlog index
3272 # 2. L2: len(rawtext), stored in revlog data
3281 # 2. L2: len(rawtext), stored in revlog data
3273 # 3. L3: len(text), stored in revlog data if flags==0, or
3282 # 3. L3: len(text), stored in revlog data if flags==0, or
3274 # possibly somewhere else if flags!=0
3283 # possibly somewhere else if flags!=0
3275 #
3284 #
3276 # L1 should be equal to L2. L3 could be different from them.
3285 # L1 should be equal to L2. L3 could be different from them.
3277 # "text" may or may not affect commit hash depending on flag
3286 # "text" may or may not affect commit hash depending on flag
3278 # processors (see flagutil.addflagprocessor).
3287 # processors (see flagutil.addflagprocessor).
3279 #
3288 #
3280 # | common | rename | meta | ext
3289 # | common | rename | meta | ext
3281 # -------------------------------------------------
3290 # -------------------------------------------------
3282 # rawsize() | L1 | L1 | L1 | L1
3291 # rawsize() | L1 | L1 | L1 | L1
3283 # size() | L1 | L2-LM | L1(*) | L1 (?)
3292 # size() | L1 | L2-LM | L1(*) | L1 (?)
3284 # len(rawtext) | L2 | L2 | L2 | L2
3293 # len(rawtext) | L2 | L2 | L2 | L2
3285 # len(text) | L2 | L2 | L2 | L3
3294 # len(text) | L2 | L2 | L2 | L3
3286 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3295 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3287 #
3296 #
3288 # LM: length of metadata, depending on rawtext
3297 # LM: length of metadata, depending on rawtext
3289 # (*): not ideal, see comment in filelog.size
3298 # (*): not ideal, see comment in filelog.size
3290 # (?): could be "- len(meta)" if the resolved content has
3299 # (?): could be "- len(meta)" if the resolved content has
3291 # rename metadata
3300 # rename metadata
3292 #
3301 #
3293 # Checks needed to be done:
3302 # Checks needed to be done:
3294 # 1. length check: L1 == L2, in all cases.
3303 # 1. length check: L1 == L2, in all cases.
3295 # 2. hash check: depending on flag processor, we may need to
3304 # 2. hash check: depending on flag processor, we may need to
3296 # use either "text" (external), or "rawtext" (in revlog).
3305 # use either "text" (external), or "rawtext" (in revlog).
3297
3306
3298 try:
3307 try:
3299 skipflags = state.get(b'skipflags', 0)
3308 skipflags = state.get(b'skipflags', 0)
3300 if skipflags:
3309 if skipflags:
3301 skipflags &= self.flags(rev)
3310 skipflags &= self.flags(rev)
3302
3311
3303 _verify_revision(self, skipflags, state, node)
3312 _verify_revision(self, skipflags, state, node)
3304
3313
3305 l1 = self.rawsize(rev)
3314 l1 = self.rawsize(rev)
3306 l2 = len(self.rawdata(node))
3315 l2 = len(self.rawdata(node))
3307
3316
3308 if l1 != l2:
3317 if l1 != l2:
3309 yield revlogproblem(
3318 yield revlogproblem(
3310 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3319 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3311 node=node,
3320 node=node,
3312 )
3321 )
3313
3322
3314 except error.CensoredNodeError:
3323 except error.CensoredNodeError:
3315 if state[b'erroroncensored']:
3324 if state[b'erroroncensored']:
3316 yield revlogproblem(
3325 yield revlogproblem(
3317 error=_(b'censored file data'), node=node
3326 error=_(b'censored file data'), node=node
3318 )
3327 )
3319 state[b'skipread'].add(node)
3328 state[b'skipread'].add(node)
3320 except Exception as e:
3329 except Exception as e:
3321 yield revlogproblem(
3330 yield revlogproblem(
3322 error=_(b'unpacking %s: %s')
3331 error=_(b'unpacking %s: %s')
3323 % (short(node), stringutil.forcebytestr(e)),
3332 % (short(node), stringutil.forcebytestr(e)),
3324 node=node,
3333 node=node,
3325 )
3334 )
3326 state[b'skipread'].add(node)
3335 state[b'skipread'].add(node)
3327
3336
3328 def storageinfo(
3337 def storageinfo(
3329 self,
3338 self,
3330 exclusivefiles=False,
3339 exclusivefiles=False,
3331 sharedfiles=False,
3340 sharedfiles=False,
3332 revisionscount=False,
3341 revisionscount=False,
3333 trackedsize=False,
3342 trackedsize=False,
3334 storedsize=False,
3343 storedsize=False,
3335 ):
3344 ):
3336 d = {}
3345 d = {}
3337
3346
3338 if exclusivefiles:
3347 if exclusivefiles:
3339 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3348 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3340 if not self._inline:
3349 if not self._inline:
3341 d[b'exclusivefiles'].append((self.opener, self._datafile))
3350 d[b'exclusivefiles'].append((self.opener, self._datafile))
3342
3351
3343 if sharedfiles:
3352 if sharedfiles:
3344 d[b'sharedfiles'] = []
3353 d[b'sharedfiles'] = []
3345
3354
3346 if revisionscount:
3355 if revisionscount:
3347 d[b'revisionscount'] = len(self)
3356 d[b'revisionscount'] = len(self)
3348
3357
3349 if trackedsize:
3358 if trackedsize:
3350 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3359 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3351
3360
3352 if storedsize:
3361 if storedsize:
3353 d[b'storedsize'] = sum(
3362 d[b'storedsize'] = sum(
3354 self.opener.stat(path).st_size for path in self.files()
3363 self.opener.stat(path).st_size for path in self.files()
3355 )
3364 )
3356
3365
3357 return d
3366 return d
3358
3367
3359 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3368 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3360 if not self.hassidedata:
3369 if not self.hassidedata:
3361 return
3370 return
3362 # revlog formats with sidedata support does not support inline
3371 # revlog formats with sidedata support does not support inline
3363 assert not self._inline
3372 assert not self._inline
3364 if not helpers[1] and not helpers[2]:
3373 if not helpers[1] and not helpers[2]:
3365 # Nothing to generate or remove
3374 # Nothing to generate or remove
3366 return
3375 return
3367
3376
3368 new_entries = []
3377 new_entries = []
3369 # append the new sidedata
3378 # append the new sidedata
3370 with self._writing(transaction):
3379 with self._writing(transaction):
3371 ifh, dfh = self._writinghandles
3380 ifh, dfh = self._writinghandles
3372 if self._docket is not None:
3381 if self._docket is not None:
3373 dfh.seek(self._docket.data_end, os.SEEK_SET)
3382 dfh.seek(self._docket.data_end, os.SEEK_SET)
3374 else:
3383 else:
3375 dfh.seek(0, os.SEEK_END)
3384 dfh.seek(0, os.SEEK_END)
3376
3385
3377 current_offset = dfh.tell()
3386 current_offset = dfh.tell()
3378 for rev in range(startrev, endrev + 1):
3387 for rev in range(startrev, endrev + 1):
3379 entry = self.index[rev]
3388 entry = self.index[rev]
3380 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3389 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3381 store=self,
3390 store=self,
3382 sidedata_helpers=helpers,
3391 sidedata_helpers=helpers,
3383 sidedata={},
3392 sidedata={},
3384 rev=rev,
3393 rev=rev,
3385 )
3394 )
3386
3395
3387 serialized_sidedata = sidedatautil.serialize_sidedata(
3396 serialized_sidedata = sidedatautil.serialize_sidedata(
3388 new_sidedata
3397 new_sidedata
3389 )
3398 )
3390
3399
3391 sidedata_compression_mode = COMP_MODE_INLINE
3400 sidedata_compression_mode = COMP_MODE_INLINE
3392 if serialized_sidedata and self.hassidedata:
3401 if serialized_sidedata and self.hassidedata:
3393 sidedata_compression_mode = COMP_MODE_PLAIN
3402 sidedata_compression_mode = COMP_MODE_PLAIN
3394 h, comp_sidedata = self.compress(serialized_sidedata)
3403 h, comp_sidedata = self.compress(serialized_sidedata)
3395 if (
3404 if (
3396 h != b'u'
3405 h != b'u'
3397 and comp_sidedata[0] != b'\0'
3406 and comp_sidedata[0] != b'\0'
3398 and len(comp_sidedata) < len(serialized_sidedata)
3407 and len(comp_sidedata) < len(serialized_sidedata)
3399 ):
3408 ):
3400 assert not h
3409 assert not h
3401 if (
3410 if (
3402 comp_sidedata[0]
3411 comp_sidedata[0]
3403 == self._docket.default_compression_header
3412 == self._docket.default_compression_header
3404 ):
3413 ):
3405 sidedata_compression_mode = COMP_MODE_DEFAULT
3414 sidedata_compression_mode = COMP_MODE_DEFAULT
3406 serialized_sidedata = comp_sidedata
3415 serialized_sidedata = comp_sidedata
3407 else:
3416 else:
3408 sidedata_compression_mode = COMP_MODE_INLINE
3417 sidedata_compression_mode = COMP_MODE_INLINE
3409 serialized_sidedata = comp_sidedata
3418 serialized_sidedata = comp_sidedata
3410 if entry[8] != 0 or entry[9] != 0:
3419 if entry[8] != 0 or entry[9] != 0:
3411 # rewriting entries that already have sidedata is not
3420 # rewriting entries that already have sidedata is not
3412 # supported yet, because it introduces garbage data in the
3421 # supported yet, because it introduces garbage data in the
3413 # revlog.
3422 # revlog.
3414 msg = b"rewriting existing sidedata is not supported yet"
3423 msg = b"rewriting existing sidedata is not supported yet"
3415 raise error.Abort(msg)
3424 raise error.Abort(msg)
3416
3425
3417 # Apply (potential) flags to add and to remove after running
3426 # Apply (potential) flags to add and to remove after running
3418 # the sidedata helpers
3427 # the sidedata helpers
3419 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3428 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3420 entry_update = (
3429 entry_update = (
3421 current_offset,
3430 current_offset,
3422 len(serialized_sidedata),
3431 len(serialized_sidedata),
3423 new_offset_flags,
3432 new_offset_flags,
3424 sidedata_compression_mode,
3433 sidedata_compression_mode,
3425 )
3434 )
3426
3435
3427 # the sidedata computation might have move the file cursors around
3436 # the sidedata computation might have move the file cursors around
3428 dfh.seek(current_offset, os.SEEK_SET)
3437 dfh.seek(current_offset, os.SEEK_SET)
3429 dfh.write(serialized_sidedata)
3438 dfh.write(serialized_sidedata)
3430 new_entries.append(entry_update)
3439 new_entries.append(entry_update)
3431 current_offset += len(serialized_sidedata)
3440 current_offset += len(serialized_sidedata)
3432 if self._docket is not None:
3441 if self._docket is not None:
3433 self._docket.data_end = dfh.tell()
3442 self._docket.data_end = dfh.tell()
3434
3443
3435 # rewrite the new index entries
3444 # rewrite the new index entries
3436 ifh.seek(startrev * self.index.entry_size)
3445 ifh.seek(startrev * self.index.entry_size)
3437 for i, e in enumerate(new_entries):
3446 for i, e in enumerate(new_entries):
3438 rev = startrev + i
3447 rev = startrev + i
3439 self.index.replace_sidedata_info(rev, *e)
3448 self.index.replace_sidedata_info(rev, *e)
3440 packed = self.index.entry_binary(rev)
3449 packed = self.index.entry_binary(rev)
3441 if rev == 0 and self._docket is None:
3450 if rev == 0 and self._docket is None:
3442 header = self._format_flags | self._format_version
3451 header = self._format_flags | self._format_version
3443 header = self.index.pack_header(header)
3452 header = self.index.pack_header(header)
3444 packed = header + packed
3453 packed = header + packed
3445 ifh.write(packed)
3454 ifh.write(packed)
@@ -1,190 +1,204 b''
1 # revlogdeltas.py - constant used for revlog logic.
1 # revlogdeltas.py - constant used for revlog logic.
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2018 Octobus <contact@octobus.net>
4 # Copyright 2018 Octobus <contact@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """Helper class to compute deltas stored inside revlogs"""
8 """Helper class to compute deltas stored inside revlogs"""
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import struct
12 import struct
13
13
14 from ..interfaces import repository
14 from ..interfaces import repository
15
15
16 ### Internal utily constants
16 ### Internal utily constants
17
17
18 KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
18 KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
19 KIND_MANIFESTLOG = 1002
19 KIND_MANIFESTLOG = 1002
20 KIND_FILELOG = 1003
20 KIND_FILELOG = 1003
21 KIND_OTHER = 1004
21 KIND_OTHER = 1004
22
22
23 ALL_KINDS = {
23 ALL_KINDS = {
24 KIND_CHANGELOG,
24 KIND_CHANGELOG,
25 KIND_MANIFESTLOG,
25 KIND_MANIFESTLOG,
26 KIND_FILELOG,
26 KIND_FILELOG,
27 KIND_OTHER,
27 KIND_OTHER,
28 }
28 }
29
29
30 ### main revlog header
30 ### main revlog header
31
31
32 INDEX_HEADER = struct.Struct(b">I")
32 INDEX_HEADER = struct.Struct(b">I")
33
33
34 ## revlog version
34 ## revlog version
35 REVLOGV0 = 0
35 REVLOGV0 = 0
36 REVLOGV1 = 1
36 REVLOGV1 = 1
37 # Dummy value until file format is finalized.
37 # Dummy value until file format is finalized.
38 REVLOGV2 = 0xDEAD
38 REVLOGV2 = 0xDEAD
39 # Dummy value until file format is finalized.
39 # Dummy value until file format is finalized.
40 CHANGELOGV2 = 0xD34D
40 CHANGELOGV2 = 0xD34D
41
41
42 ## global revlog header flags
42 ## global revlog header flags
43 # Shared across v1 and v2.
43 # Shared across v1 and v2.
44 FLAG_INLINE_DATA = 1 << 16
44 FLAG_INLINE_DATA = 1 << 16
45 # Only used by v1, implied by v2.
45 # Only used by v1, implied by v2.
46 FLAG_GENERALDELTA = 1 << 17
46 FLAG_GENERALDELTA = 1 << 17
47 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
47 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
48 REVLOG_DEFAULT_FORMAT = REVLOGV1
48 REVLOG_DEFAULT_FORMAT = REVLOGV1
49 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
49 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
50 REVLOGV0_FLAGS = 0
50 REVLOGV0_FLAGS = 0
51 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
51 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
52 REVLOGV2_FLAGS = FLAG_INLINE_DATA
52 REVLOGV2_FLAGS = FLAG_INLINE_DATA
53 CHANGELOGV2_FLAGS = 0
53 CHANGELOGV2_FLAGS = 0
54
54
55 ### individual entry
55 ### individual entry
56
56
57 ## index v0:
57 ## index v0:
58 # 4 bytes: offset
58 # 4 bytes: offset
59 # 4 bytes: compressed length
59 # 4 bytes: compressed length
60 # 4 bytes: base rev
60 # 4 bytes: base rev
61 # 4 bytes: link rev
61 # 4 bytes: link rev
62 # 20 bytes: parent 1 nodeid
62 # 20 bytes: parent 1 nodeid
63 # 20 bytes: parent 2 nodeid
63 # 20 bytes: parent 2 nodeid
64 # 20 bytes: nodeid
64 # 20 bytes: nodeid
65 INDEX_ENTRY_V0 = struct.Struct(b">4l20s20s20s")
65 INDEX_ENTRY_V0 = struct.Struct(b">4l20s20s20s")
66
66
67 ## index v1
67 ## index v1
68 # 6 bytes: offset
68 # 6 bytes: offset
69 # 2 bytes: flags
69 # 2 bytes: flags
70 # 4 bytes: compressed length
70 # 4 bytes: compressed length
71 # 4 bytes: uncompressed length
71 # 4 bytes: uncompressed length
72 # 4 bytes: base rev
72 # 4 bytes: base rev
73 # 4 bytes: link rev
73 # 4 bytes: link rev
74 # 4 bytes: parent 1 rev
74 # 4 bytes: parent 1 rev
75 # 4 bytes: parent 2 rev
75 # 4 bytes: parent 2 rev
76 # 32 bytes: nodeid
76 # 32 bytes: nodeid
77 INDEX_ENTRY_V1 = struct.Struct(b">Qiiiiii20s12x")
77 INDEX_ENTRY_V1 = struct.Struct(b">Qiiiiii20s12x")
78 assert INDEX_ENTRY_V1.size == 32 * 2
78 assert INDEX_ENTRY_V1.size == 32 * 2
79
79
80 # 6 bytes: offset
80 # 6 bytes: offset
81 # 2 bytes: flags
81 # 2 bytes: flags
82 # 4 bytes: compressed length
82 # 4 bytes: compressed length
83 # 4 bytes: uncompressed length
83 # 4 bytes: uncompressed length
84 # 4 bytes: base rev
84 # 4 bytes: base rev
85 # 4 bytes: link rev
85 # 4 bytes: link rev
86 # 4 bytes: parent 1 rev
86 # 4 bytes: parent 1 rev
87 # 4 bytes: parent 2 rev
87 # 4 bytes: parent 2 rev
88 # 32 bytes: nodeid
88 # 32 bytes: nodeid
89 # 8 bytes: sidedata offset
89 # 8 bytes: sidedata offset
90 # 4 bytes: sidedata compressed length
90 # 4 bytes: sidedata compressed length
91 # 1 bytes: compression mode (2 lower bit are data_compression_mode)
91 # 1 bytes: compression mode (2 lower bit are data_compression_mode)
92 # 19 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
92 # 19 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
93 INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQiB19x")
93 INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQiB19x")
94 assert INDEX_ENTRY_V2.size == 32 * 3, INDEX_ENTRY_V2.size
94 assert INDEX_ENTRY_V2.size == 32 * 3, INDEX_ENTRY_V2.size
95
95
96 # 6 bytes: offset
97 # 2 bytes: flags
98 # 4 bytes: compressed length
99 # 4 bytes: uncompressed length
100 # 4 bytes: parent 1 rev
101 # 4 bytes: parent 2 rev
102 # 32 bytes: nodeid
103 # 8 bytes: sidedata offset
104 # 4 bytes: sidedata compressed length
105 # 1 bytes: compression mode (2 lower bit are data_compression_mode)
106 # 27 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
107 INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiB27x")
108 assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_V2.size
109
96 # revlog index flags
110 # revlog index flags
97
111
98 # For historical reasons, revlog's internal flags were exposed via the
112 # For historical reasons, revlog's internal flags were exposed via the
99 # wire protocol and are even exposed in parts of the storage APIs.
113 # wire protocol and are even exposed in parts of the storage APIs.
100
114
101 # revision has censor metadata, must be verified
115 # revision has censor metadata, must be verified
102 REVIDX_ISCENSORED = repository.REVISION_FLAG_CENSORED
116 REVIDX_ISCENSORED = repository.REVISION_FLAG_CENSORED
103 # revision hash does not match data (narrowhg)
117 # revision hash does not match data (narrowhg)
104 REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
118 REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
105 # revision data is stored externally
119 # revision data is stored externally
106 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
120 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
107 # revision changes files in a way that could affect copy tracing.
121 # revision changes files in a way that could affect copy tracing.
108 REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
122 REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
109 REVIDX_DEFAULT_FLAGS = 0
123 REVIDX_DEFAULT_FLAGS = 0
110 # stable order in which flags need to be processed and their processors applied
124 # stable order in which flags need to be processed and their processors applied
111 REVIDX_FLAGS_ORDER = [
125 REVIDX_FLAGS_ORDER = [
112 REVIDX_ISCENSORED,
126 REVIDX_ISCENSORED,
113 REVIDX_ELLIPSIS,
127 REVIDX_ELLIPSIS,
114 REVIDX_EXTSTORED,
128 REVIDX_EXTSTORED,
115 REVIDX_HASCOPIESINFO,
129 REVIDX_HASCOPIESINFO,
116 ]
130 ]
117
131
118 # bitmark for flags that could cause rawdata content change
132 # bitmark for flags that could cause rawdata content change
119 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
133 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
120
134
121 ## chunk compression mode constants:
135 ## chunk compression mode constants:
122 # These constants are used in revlog version >=2 to denote the compression used
136 # These constants are used in revlog version >=2 to denote the compression used
123 # for a chunk.
137 # for a chunk.
124
138
125 # Chunk use no compression, the data stored on disk can be directly use as
139 # Chunk use no compression, the data stored on disk can be directly use as
126 # chunk value. Without any header information prefixed.
140 # chunk value. Without any header information prefixed.
127 COMP_MODE_PLAIN = 0
141 COMP_MODE_PLAIN = 0
128
142
129 # Chunk use the "default compression" for the revlog (usually defined in the
143 # Chunk use the "default compression" for the revlog (usually defined in the
130 # revlog docket). A header is still used.
144 # revlog docket). A header is still used.
131 #
145 #
132 # XXX: keeping a header is probably not useful and we should probably drop it.
146 # XXX: keeping a header is probably not useful and we should probably drop it.
133 #
147 #
134 # XXX: The value of allow mixed type of compression in the revlog is unclear
148 # XXX: The value of allow mixed type of compression in the revlog is unclear
135 # and we should consider making PLAIN/DEFAULT the only available mode for
149 # and we should consider making PLAIN/DEFAULT the only available mode for
136 # revlog v2, disallowing INLINE mode.
150 # revlog v2, disallowing INLINE mode.
137 COMP_MODE_DEFAULT = 1
151 COMP_MODE_DEFAULT = 1
138
152
139 # Chunk use a compression mode stored "inline" at the start of the chunk
153 # Chunk use a compression mode stored "inline" at the start of the chunk
140 # itself. This is the mode always used for revlog version "0" and "1"
154 # itself. This is the mode always used for revlog version "0" and "1"
141 COMP_MODE_INLINE = 2
155 COMP_MODE_INLINE = 2
142
156
143 SUPPORTED_FLAGS = {
157 SUPPORTED_FLAGS = {
144 REVLOGV0: REVLOGV0_FLAGS,
158 REVLOGV0: REVLOGV0_FLAGS,
145 REVLOGV1: REVLOGV1_FLAGS,
159 REVLOGV1: REVLOGV1_FLAGS,
146 REVLOGV2: REVLOGV2_FLAGS,
160 REVLOGV2: REVLOGV2_FLAGS,
147 CHANGELOGV2: CHANGELOGV2_FLAGS,
161 CHANGELOGV2: CHANGELOGV2_FLAGS,
148 }
162 }
149
163
150 _no = lambda flags: False
164 _no = lambda flags: False
151 _yes = lambda flags: True
165 _yes = lambda flags: True
152
166
153
167
154 def _from_flag(flag):
168 def _from_flag(flag):
155 return lambda flags: bool(flags & flag)
169 return lambda flags: bool(flags & flag)
156
170
157
171
158 FEATURES_BY_VERSION = {
172 FEATURES_BY_VERSION = {
159 REVLOGV0: {
173 REVLOGV0: {
160 b'inline': _no,
174 b'inline': _no,
161 b'generaldelta': _no,
175 b'generaldelta': _no,
162 b'sidedata': False,
176 b'sidedata': False,
163 b'docket': False,
177 b'docket': False,
164 },
178 },
165 REVLOGV1: {
179 REVLOGV1: {
166 b'inline': _from_flag(FLAG_INLINE_DATA),
180 b'inline': _from_flag(FLAG_INLINE_DATA),
167 b'generaldelta': _from_flag(FLAG_GENERALDELTA),
181 b'generaldelta': _from_flag(FLAG_GENERALDELTA),
168 b'sidedata': False,
182 b'sidedata': False,
169 b'docket': False,
183 b'docket': False,
170 },
184 },
171 REVLOGV2: {
185 REVLOGV2: {
172 # The point of inline-revlog is to reduce the number of files used in
186 # The point of inline-revlog is to reduce the number of files used in
173 # the store. Using a docket defeat this purpose. So we needs other
187 # the store. Using a docket defeat this purpose. So we needs other
174 # means to reduce the number of files for revlogv2.
188 # means to reduce the number of files for revlogv2.
175 b'inline': _no,
189 b'inline': _no,
176 b'generaldelta': _yes,
190 b'generaldelta': _yes,
177 b'sidedata': True,
191 b'sidedata': True,
178 b'docket': True,
192 b'docket': True,
179 },
193 },
180 CHANGELOGV2: {
194 CHANGELOGV2: {
181 b'inline': _no,
195 b'inline': _no,
182 # General delta is useless for changelog since we don't do any delta
196 # General delta is useless for changelog since we don't do any delta
183 b'generaldelta': _no,
197 b'generaldelta': _no,
184 b'sidedata': True,
198 b'sidedata': True,
185 b'docket': True,
199 b'docket': True,
186 },
200 },
187 }
201 }
188
202
189
203
190 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
204 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
General Comments 0
You need to be logged in to leave comments. Login now