##// END OF EJS Templates
revlog: store sidedata in their own file...
marmoute -
r48181:e6292eb3 default
parent child Browse files
Show More
@@ -1,2719 +1,2719 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * include management of a persistent nodemap in the main docket
1153 # * include management of a persistent nodemap in the main docket
1154 # * enforce a "no-truncate" policy for mmap safety
1154 # * enforce a "no-truncate" policy for mmap safety
1155 # - for censoring operation
1155 # - for censoring operation
1156 # - for stripping operation
1156 # - for stripping operation
1157 # - for rollback operation
1157 # - for rollback operation
1158 # * proper streaming (race free) of the docket file
1158 # * proper streaming (race free) of the docket file
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 # * Exchange-wise, we will also need to do something more efficient than
1160 # * Exchange-wise, we will also need to do something more efficient than
1161 # keeping references to the affected revlogs, especially memory-wise when
1161 # keeping references to the affected revlogs, especially memory-wise when
1162 # rewriting sidedata.
1162 # rewriting sidedata.
1163 # * introduce a proper solution to reduce the number of filelog related files.
1163 # * introduce a proper solution to reduce the number of filelog related files.
1164 # * use caching for reading sidedata (similar to what we do for data).
1164 # * use caching for reading sidedata (similar to what we do for data).
1165 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1165 # * Improvement to consider
1166 # * Improvement to consider
1166 # - avoid compression header in chunk using the default compression?
1167 # - avoid compression header in chunk using the default compression?
1167 # - forbid "inline" compression mode entirely?
1168 # - forbid "inline" compression mode entirely?
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 # - keep track of chain base or size (probably not that useful anymore)
1171 # - keep track of chain base or size (probably not that useful anymore)
1171 # - store data and sidedata in different files
1172 coreconfigitem(
1172 coreconfigitem(
1173 b'experimental',
1173 b'experimental',
1174 b'revlogv2',
1174 b'revlogv2',
1175 default=None,
1175 default=None,
1176 )
1176 )
1177 coreconfigitem(
1177 coreconfigitem(
1178 b'experimental',
1178 b'experimental',
1179 b'revisions.disambiguatewithin',
1179 b'revisions.disambiguatewithin',
1180 default=None,
1180 default=None,
1181 )
1181 )
1182 coreconfigitem(
1182 coreconfigitem(
1183 b'experimental',
1183 b'experimental',
1184 b'rust.index',
1184 b'rust.index',
1185 default=False,
1185 default=False,
1186 )
1186 )
1187 coreconfigitem(
1187 coreconfigitem(
1188 b'experimental',
1188 b'experimental',
1189 b'server.filesdata.recommended-batch-size',
1189 b'server.filesdata.recommended-batch-size',
1190 default=50000,
1190 default=50000,
1191 )
1191 )
1192 coreconfigitem(
1192 coreconfigitem(
1193 b'experimental',
1193 b'experimental',
1194 b'server.manifestdata.recommended-batch-size',
1194 b'server.manifestdata.recommended-batch-size',
1195 default=100000,
1195 default=100000,
1196 )
1196 )
1197 coreconfigitem(
1197 coreconfigitem(
1198 b'experimental',
1198 b'experimental',
1199 b'server.stream-narrow-clones',
1199 b'server.stream-narrow-clones',
1200 default=False,
1200 default=False,
1201 )
1201 )
1202 coreconfigitem(
1202 coreconfigitem(
1203 b'experimental',
1203 b'experimental',
1204 b'single-head-per-branch',
1204 b'single-head-per-branch',
1205 default=False,
1205 default=False,
1206 )
1206 )
1207 coreconfigitem(
1207 coreconfigitem(
1208 b'experimental',
1208 b'experimental',
1209 b'single-head-per-branch:account-closed-heads',
1209 b'single-head-per-branch:account-closed-heads',
1210 default=False,
1210 default=False,
1211 )
1211 )
1212 coreconfigitem(
1212 coreconfigitem(
1213 b'experimental',
1213 b'experimental',
1214 b'single-head-per-branch:public-changes-only',
1214 b'single-head-per-branch:public-changes-only',
1215 default=False,
1215 default=False,
1216 )
1216 )
1217 coreconfigitem(
1217 coreconfigitem(
1218 b'experimental',
1218 b'experimental',
1219 b'sshserver.support-v2',
1219 b'sshserver.support-v2',
1220 default=False,
1220 default=False,
1221 )
1221 )
1222 coreconfigitem(
1222 coreconfigitem(
1223 b'experimental',
1223 b'experimental',
1224 b'sparse-read',
1224 b'sparse-read',
1225 default=False,
1225 default=False,
1226 )
1226 )
1227 coreconfigitem(
1227 coreconfigitem(
1228 b'experimental',
1228 b'experimental',
1229 b'sparse-read.density-threshold',
1229 b'sparse-read.density-threshold',
1230 default=0.50,
1230 default=0.50,
1231 )
1231 )
1232 coreconfigitem(
1232 coreconfigitem(
1233 b'experimental',
1233 b'experimental',
1234 b'sparse-read.min-gap-size',
1234 b'sparse-read.min-gap-size',
1235 default=b'65K',
1235 default=b'65K',
1236 )
1236 )
1237 coreconfigitem(
1237 coreconfigitem(
1238 b'experimental',
1238 b'experimental',
1239 b'treemanifest',
1239 b'treemanifest',
1240 default=False,
1240 default=False,
1241 )
1241 )
1242 coreconfigitem(
1242 coreconfigitem(
1243 b'experimental',
1243 b'experimental',
1244 b'update.atomic-file',
1244 b'update.atomic-file',
1245 default=False,
1245 default=False,
1246 )
1246 )
1247 coreconfigitem(
1247 coreconfigitem(
1248 b'experimental',
1248 b'experimental',
1249 b'sshpeer.advertise-v2',
1249 b'sshpeer.advertise-v2',
1250 default=False,
1250 default=False,
1251 )
1251 )
1252 coreconfigitem(
1252 coreconfigitem(
1253 b'experimental',
1253 b'experimental',
1254 b'web.apiserver',
1254 b'web.apiserver',
1255 default=False,
1255 default=False,
1256 )
1256 )
1257 coreconfigitem(
1257 coreconfigitem(
1258 b'experimental',
1258 b'experimental',
1259 b'web.api.http-v2',
1259 b'web.api.http-v2',
1260 default=False,
1260 default=False,
1261 )
1261 )
1262 coreconfigitem(
1262 coreconfigitem(
1263 b'experimental',
1263 b'experimental',
1264 b'web.api.debugreflect',
1264 b'web.api.debugreflect',
1265 default=False,
1265 default=False,
1266 )
1266 )
1267 coreconfigitem(
1267 coreconfigitem(
1268 b'experimental',
1268 b'experimental',
1269 b'worker.wdir-get-thread-safe',
1269 b'worker.wdir-get-thread-safe',
1270 default=False,
1270 default=False,
1271 )
1271 )
1272 coreconfigitem(
1272 coreconfigitem(
1273 b'experimental',
1273 b'experimental',
1274 b'worker.repository-upgrade',
1274 b'worker.repository-upgrade',
1275 default=False,
1275 default=False,
1276 )
1276 )
1277 coreconfigitem(
1277 coreconfigitem(
1278 b'experimental',
1278 b'experimental',
1279 b'xdiff',
1279 b'xdiff',
1280 default=False,
1280 default=False,
1281 )
1281 )
1282 coreconfigitem(
1282 coreconfigitem(
1283 b'extensions',
1283 b'extensions',
1284 b'.*',
1284 b'.*',
1285 default=None,
1285 default=None,
1286 generic=True,
1286 generic=True,
1287 )
1287 )
1288 coreconfigitem(
1288 coreconfigitem(
1289 b'extdata',
1289 b'extdata',
1290 b'.*',
1290 b'.*',
1291 default=None,
1291 default=None,
1292 generic=True,
1292 generic=True,
1293 )
1293 )
1294 coreconfigitem(
1294 coreconfigitem(
1295 b'format',
1295 b'format',
1296 b'bookmarks-in-store',
1296 b'bookmarks-in-store',
1297 default=False,
1297 default=False,
1298 )
1298 )
1299 coreconfigitem(
1299 coreconfigitem(
1300 b'format',
1300 b'format',
1301 b'chunkcachesize',
1301 b'chunkcachesize',
1302 default=None,
1302 default=None,
1303 experimental=True,
1303 experimental=True,
1304 )
1304 )
1305 coreconfigitem(
1305 coreconfigitem(
1306 # Enable this dirstate format *when creating a new repository*.
1306 # Enable this dirstate format *when creating a new repository*.
1307 # Which format to use for existing repos is controlled by .hg/requires
1307 # Which format to use for existing repos is controlled by .hg/requires
1308 b'format',
1308 b'format',
1309 b'exp-dirstate-v2',
1309 b'exp-dirstate-v2',
1310 default=False,
1310 default=False,
1311 experimental=True,
1311 experimental=True,
1312 )
1312 )
1313 coreconfigitem(
1313 coreconfigitem(
1314 b'format',
1314 b'format',
1315 b'dotencode',
1315 b'dotencode',
1316 default=True,
1316 default=True,
1317 )
1317 )
1318 coreconfigitem(
1318 coreconfigitem(
1319 b'format',
1319 b'format',
1320 b'generaldelta',
1320 b'generaldelta',
1321 default=False,
1321 default=False,
1322 experimental=True,
1322 experimental=True,
1323 )
1323 )
1324 coreconfigitem(
1324 coreconfigitem(
1325 b'format',
1325 b'format',
1326 b'manifestcachesize',
1326 b'manifestcachesize',
1327 default=None,
1327 default=None,
1328 experimental=True,
1328 experimental=True,
1329 )
1329 )
1330 coreconfigitem(
1330 coreconfigitem(
1331 b'format',
1331 b'format',
1332 b'maxchainlen',
1332 b'maxchainlen',
1333 default=dynamicdefault,
1333 default=dynamicdefault,
1334 experimental=True,
1334 experimental=True,
1335 )
1335 )
1336 coreconfigitem(
1336 coreconfigitem(
1337 b'format',
1337 b'format',
1338 b'obsstore-version',
1338 b'obsstore-version',
1339 default=None,
1339 default=None,
1340 )
1340 )
1341 coreconfigitem(
1341 coreconfigitem(
1342 b'format',
1342 b'format',
1343 b'sparse-revlog',
1343 b'sparse-revlog',
1344 default=True,
1344 default=True,
1345 )
1345 )
1346 coreconfigitem(
1346 coreconfigitem(
1347 b'format',
1347 b'format',
1348 b'revlog-compression',
1348 b'revlog-compression',
1349 default=lambda: [b'zstd', b'zlib'],
1349 default=lambda: [b'zstd', b'zlib'],
1350 alias=[(b'experimental', b'format.compression')],
1350 alias=[(b'experimental', b'format.compression')],
1351 )
1351 )
1352 # Experimental TODOs:
1352 # Experimental TODOs:
1353 #
1353 #
1354 # * Same as for evlogv2 (but for the reduction of the number of files)
1354 # * Same as for evlogv2 (but for the reduction of the number of files)
1355 # * Improvement to investigate
1355 # * Improvement to investigate
1356 # - storing .hgtags fnode
1356 # - storing .hgtags fnode
1357 # - storing `rank` of changesets
1357 # - storing `rank` of changesets
1358 # - storing branch related identifier
1358 # - storing branch related identifier
1359
1359
1360 coreconfigitem(
1360 coreconfigitem(
1361 b'format',
1361 b'format',
1362 b'exp-use-changelog-v2',
1362 b'exp-use-changelog-v2',
1363 default=None,
1363 default=None,
1364 experimental=True,
1364 experimental=True,
1365 )
1365 )
1366 coreconfigitem(
1366 coreconfigitem(
1367 b'format',
1367 b'format',
1368 b'usefncache',
1368 b'usefncache',
1369 default=True,
1369 default=True,
1370 )
1370 )
1371 coreconfigitem(
1371 coreconfigitem(
1372 b'format',
1372 b'format',
1373 b'usegeneraldelta',
1373 b'usegeneraldelta',
1374 default=True,
1374 default=True,
1375 )
1375 )
1376 coreconfigitem(
1376 coreconfigitem(
1377 b'format',
1377 b'format',
1378 b'usestore',
1378 b'usestore',
1379 default=True,
1379 default=True,
1380 )
1380 )
1381
1381
1382
1382
1383 def _persistent_nodemap_default():
1383 def _persistent_nodemap_default():
1384 """compute `use-persistent-nodemap` default value
1384 """compute `use-persistent-nodemap` default value
1385
1385
1386 The feature is disabled unless a fast implementation is available.
1386 The feature is disabled unless a fast implementation is available.
1387 """
1387 """
1388 from . import policy
1388 from . import policy
1389
1389
1390 return policy.importrust('revlog') is not None
1390 return policy.importrust('revlog') is not None
1391
1391
1392
1392
1393 coreconfigitem(
1393 coreconfigitem(
1394 b'format',
1394 b'format',
1395 b'use-persistent-nodemap',
1395 b'use-persistent-nodemap',
1396 default=_persistent_nodemap_default,
1396 default=_persistent_nodemap_default,
1397 )
1397 )
1398 coreconfigitem(
1398 coreconfigitem(
1399 b'format',
1399 b'format',
1400 b'exp-use-copies-side-data-changeset',
1400 b'exp-use-copies-side-data-changeset',
1401 default=False,
1401 default=False,
1402 experimental=True,
1402 experimental=True,
1403 )
1403 )
1404 coreconfigitem(
1404 coreconfigitem(
1405 b'format',
1405 b'format',
1406 b'use-share-safe',
1406 b'use-share-safe',
1407 default=False,
1407 default=False,
1408 )
1408 )
1409 coreconfigitem(
1409 coreconfigitem(
1410 b'format',
1410 b'format',
1411 b'internal-phase',
1411 b'internal-phase',
1412 default=False,
1412 default=False,
1413 experimental=True,
1413 experimental=True,
1414 )
1414 )
1415 coreconfigitem(
1415 coreconfigitem(
1416 b'fsmonitor',
1416 b'fsmonitor',
1417 b'warn_when_unused',
1417 b'warn_when_unused',
1418 default=True,
1418 default=True,
1419 )
1419 )
1420 coreconfigitem(
1420 coreconfigitem(
1421 b'fsmonitor',
1421 b'fsmonitor',
1422 b'warn_update_file_count',
1422 b'warn_update_file_count',
1423 default=50000,
1423 default=50000,
1424 )
1424 )
1425 coreconfigitem(
1425 coreconfigitem(
1426 b'fsmonitor',
1426 b'fsmonitor',
1427 b'warn_update_file_count_rust',
1427 b'warn_update_file_count_rust',
1428 default=400000,
1428 default=400000,
1429 )
1429 )
1430 coreconfigitem(
1430 coreconfigitem(
1431 b'help',
1431 b'help',
1432 br'hidden-command\..*',
1432 br'hidden-command\..*',
1433 default=False,
1433 default=False,
1434 generic=True,
1434 generic=True,
1435 )
1435 )
1436 coreconfigitem(
1436 coreconfigitem(
1437 b'help',
1437 b'help',
1438 br'hidden-topic\..*',
1438 br'hidden-topic\..*',
1439 default=False,
1439 default=False,
1440 generic=True,
1440 generic=True,
1441 )
1441 )
1442 coreconfigitem(
1442 coreconfigitem(
1443 b'hooks',
1443 b'hooks',
1444 b'[^:]*',
1444 b'[^:]*',
1445 default=dynamicdefault,
1445 default=dynamicdefault,
1446 generic=True,
1446 generic=True,
1447 )
1447 )
1448 coreconfigitem(
1448 coreconfigitem(
1449 b'hooks',
1449 b'hooks',
1450 b'.*:run-with-plain',
1450 b'.*:run-with-plain',
1451 default=True,
1451 default=True,
1452 generic=True,
1452 generic=True,
1453 )
1453 )
1454 coreconfigitem(
1454 coreconfigitem(
1455 b'hgweb-paths',
1455 b'hgweb-paths',
1456 b'.*',
1456 b'.*',
1457 default=list,
1457 default=list,
1458 generic=True,
1458 generic=True,
1459 )
1459 )
1460 coreconfigitem(
1460 coreconfigitem(
1461 b'hostfingerprints',
1461 b'hostfingerprints',
1462 b'.*',
1462 b'.*',
1463 default=list,
1463 default=list,
1464 generic=True,
1464 generic=True,
1465 )
1465 )
1466 coreconfigitem(
1466 coreconfigitem(
1467 b'hostsecurity',
1467 b'hostsecurity',
1468 b'ciphers',
1468 b'ciphers',
1469 default=None,
1469 default=None,
1470 )
1470 )
1471 coreconfigitem(
1471 coreconfigitem(
1472 b'hostsecurity',
1472 b'hostsecurity',
1473 b'minimumprotocol',
1473 b'minimumprotocol',
1474 default=dynamicdefault,
1474 default=dynamicdefault,
1475 )
1475 )
1476 coreconfigitem(
1476 coreconfigitem(
1477 b'hostsecurity',
1477 b'hostsecurity',
1478 b'.*:minimumprotocol$',
1478 b'.*:minimumprotocol$',
1479 default=dynamicdefault,
1479 default=dynamicdefault,
1480 generic=True,
1480 generic=True,
1481 )
1481 )
1482 coreconfigitem(
1482 coreconfigitem(
1483 b'hostsecurity',
1483 b'hostsecurity',
1484 b'.*:ciphers$',
1484 b'.*:ciphers$',
1485 default=dynamicdefault,
1485 default=dynamicdefault,
1486 generic=True,
1486 generic=True,
1487 )
1487 )
1488 coreconfigitem(
1488 coreconfigitem(
1489 b'hostsecurity',
1489 b'hostsecurity',
1490 b'.*:fingerprints$',
1490 b'.*:fingerprints$',
1491 default=list,
1491 default=list,
1492 generic=True,
1492 generic=True,
1493 )
1493 )
1494 coreconfigitem(
1494 coreconfigitem(
1495 b'hostsecurity',
1495 b'hostsecurity',
1496 b'.*:verifycertsfile$',
1496 b'.*:verifycertsfile$',
1497 default=None,
1497 default=None,
1498 generic=True,
1498 generic=True,
1499 )
1499 )
1500
1500
1501 coreconfigitem(
1501 coreconfigitem(
1502 b'http_proxy',
1502 b'http_proxy',
1503 b'always',
1503 b'always',
1504 default=False,
1504 default=False,
1505 )
1505 )
1506 coreconfigitem(
1506 coreconfigitem(
1507 b'http_proxy',
1507 b'http_proxy',
1508 b'host',
1508 b'host',
1509 default=None,
1509 default=None,
1510 )
1510 )
1511 coreconfigitem(
1511 coreconfigitem(
1512 b'http_proxy',
1512 b'http_proxy',
1513 b'no',
1513 b'no',
1514 default=list,
1514 default=list,
1515 )
1515 )
1516 coreconfigitem(
1516 coreconfigitem(
1517 b'http_proxy',
1517 b'http_proxy',
1518 b'passwd',
1518 b'passwd',
1519 default=None,
1519 default=None,
1520 )
1520 )
1521 coreconfigitem(
1521 coreconfigitem(
1522 b'http_proxy',
1522 b'http_proxy',
1523 b'user',
1523 b'user',
1524 default=None,
1524 default=None,
1525 )
1525 )
1526
1526
1527 coreconfigitem(
1527 coreconfigitem(
1528 b'http',
1528 b'http',
1529 b'timeout',
1529 b'timeout',
1530 default=None,
1530 default=None,
1531 )
1531 )
1532
1532
1533 coreconfigitem(
1533 coreconfigitem(
1534 b'logtoprocess',
1534 b'logtoprocess',
1535 b'commandexception',
1535 b'commandexception',
1536 default=None,
1536 default=None,
1537 )
1537 )
1538 coreconfigitem(
1538 coreconfigitem(
1539 b'logtoprocess',
1539 b'logtoprocess',
1540 b'commandfinish',
1540 b'commandfinish',
1541 default=None,
1541 default=None,
1542 )
1542 )
1543 coreconfigitem(
1543 coreconfigitem(
1544 b'logtoprocess',
1544 b'logtoprocess',
1545 b'command',
1545 b'command',
1546 default=None,
1546 default=None,
1547 )
1547 )
1548 coreconfigitem(
1548 coreconfigitem(
1549 b'logtoprocess',
1549 b'logtoprocess',
1550 b'develwarn',
1550 b'develwarn',
1551 default=None,
1551 default=None,
1552 )
1552 )
1553 coreconfigitem(
1553 coreconfigitem(
1554 b'logtoprocess',
1554 b'logtoprocess',
1555 b'uiblocked',
1555 b'uiblocked',
1556 default=None,
1556 default=None,
1557 )
1557 )
1558 coreconfigitem(
1558 coreconfigitem(
1559 b'merge',
1559 b'merge',
1560 b'checkunknown',
1560 b'checkunknown',
1561 default=b'abort',
1561 default=b'abort',
1562 )
1562 )
1563 coreconfigitem(
1563 coreconfigitem(
1564 b'merge',
1564 b'merge',
1565 b'checkignored',
1565 b'checkignored',
1566 default=b'abort',
1566 default=b'abort',
1567 )
1567 )
1568 coreconfigitem(
1568 coreconfigitem(
1569 b'experimental',
1569 b'experimental',
1570 b'merge.checkpathconflicts',
1570 b'merge.checkpathconflicts',
1571 default=False,
1571 default=False,
1572 )
1572 )
1573 coreconfigitem(
1573 coreconfigitem(
1574 b'merge',
1574 b'merge',
1575 b'followcopies',
1575 b'followcopies',
1576 default=True,
1576 default=True,
1577 )
1577 )
1578 coreconfigitem(
1578 coreconfigitem(
1579 b'merge',
1579 b'merge',
1580 b'on-failure',
1580 b'on-failure',
1581 default=b'continue',
1581 default=b'continue',
1582 )
1582 )
1583 coreconfigitem(
1583 coreconfigitem(
1584 b'merge',
1584 b'merge',
1585 b'preferancestor',
1585 b'preferancestor',
1586 default=lambda: [b'*'],
1586 default=lambda: [b'*'],
1587 experimental=True,
1587 experimental=True,
1588 )
1588 )
1589 coreconfigitem(
1589 coreconfigitem(
1590 b'merge',
1590 b'merge',
1591 b'strict-capability-check',
1591 b'strict-capability-check',
1592 default=False,
1592 default=False,
1593 )
1593 )
1594 coreconfigitem(
1594 coreconfigitem(
1595 b'merge-tools',
1595 b'merge-tools',
1596 b'.*',
1596 b'.*',
1597 default=None,
1597 default=None,
1598 generic=True,
1598 generic=True,
1599 )
1599 )
1600 coreconfigitem(
1600 coreconfigitem(
1601 b'merge-tools',
1601 b'merge-tools',
1602 br'.*\.args$',
1602 br'.*\.args$',
1603 default=b"$local $base $other",
1603 default=b"$local $base $other",
1604 generic=True,
1604 generic=True,
1605 priority=-1,
1605 priority=-1,
1606 )
1606 )
1607 coreconfigitem(
1607 coreconfigitem(
1608 b'merge-tools',
1608 b'merge-tools',
1609 br'.*\.binary$',
1609 br'.*\.binary$',
1610 default=False,
1610 default=False,
1611 generic=True,
1611 generic=True,
1612 priority=-1,
1612 priority=-1,
1613 )
1613 )
1614 coreconfigitem(
1614 coreconfigitem(
1615 b'merge-tools',
1615 b'merge-tools',
1616 br'.*\.check$',
1616 br'.*\.check$',
1617 default=list,
1617 default=list,
1618 generic=True,
1618 generic=True,
1619 priority=-1,
1619 priority=-1,
1620 )
1620 )
1621 coreconfigitem(
1621 coreconfigitem(
1622 b'merge-tools',
1622 b'merge-tools',
1623 br'.*\.checkchanged$',
1623 br'.*\.checkchanged$',
1624 default=False,
1624 default=False,
1625 generic=True,
1625 generic=True,
1626 priority=-1,
1626 priority=-1,
1627 )
1627 )
1628 coreconfigitem(
1628 coreconfigitem(
1629 b'merge-tools',
1629 b'merge-tools',
1630 br'.*\.executable$',
1630 br'.*\.executable$',
1631 default=dynamicdefault,
1631 default=dynamicdefault,
1632 generic=True,
1632 generic=True,
1633 priority=-1,
1633 priority=-1,
1634 )
1634 )
1635 coreconfigitem(
1635 coreconfigitem(
1636 b'merge-tools',
1636 b'merge-tools',
1637 br'.*\.fixeol$',
1637 br'.*\.fixeol$',
1638 default=False,
1638 default=False,
1639 generic=True,
1639 generic=True,
1640 priority=-1,
1640 priority=-1,
1641 )
1641 )
1642 coreconfigitem(
1642 coreconfigitem(
1643 b'merge-tools',
1643 b'merge-tools',
1644 br'.*\.gui$',
1644 br'.*\.gui$',
1645 default=False,
1645 default=False,
1646 generic=True,
1646 generic=True,
1647 priority=-1,
1647 priority=-1,
1648 )
1648 )
1649 coreconfigitem(
1649 coreconfigitem(
1650 b'merge-tools',
1650 b'merge-tools',
1651 br'.*\.mergemarkers$',
1651 br'.*\.mergemarkers$',
1652 default=b'basic',
1652 default=b'basic',
1653 generic=True,
1653 generic=True,
1654 priority=-1,
1654 priority=-1,
1655 )
1655 )
1656 coreconfigitem(
1656 coreconfigitem(
1657 b'merge-tools',
1657 b'merge-tools',
1658 br'.*\.mergemarkertemplate$',
1658 br'.*\.mergemarkertemplate$',
1659 default=dynamicdefault, # take from command-templates.mergemarker
1659 default=dynamicdefault, # take from command-templates.mergemarker
1660 generic=True,
1660 generic=True,
1661 priority=-1,
1661 priority=-1,
1662 )
1662 )
1663 coreconfigitem(
1663 coreconfigitem(
1664 b'merge-tools',
1664 b'merge-tools',
1665 br'.*\.priority$',
1665 br'.*\.priority$',
1666 default=0,
1666 default=0,
1667 generic=True,
1667 generic=True,
1668 priority=-1,
1668 priority=-1,
1669 )
1669 )
1670 coreconfigitem(
1670 coreconfigitem(
1671 b'merge-tools',
1671 b'merge-tools',
1672 br'.*\.premerge$',
1672 br'.*\.premerge$',
1673 default=dynamicdefault,
1673 default=dynamicdefault,
1674 generic=True,
1674 generic=True,
1675 priority=-1,
1675 priority=-1,
1676 )
1676 )
1677 coreconfigitem(
1677 coreconfigitem(
1678 b'merge-tools',
1678 b'merge-tools',
1679 br'.*\.symlink$',
1679 br'.*\.symlink$',
1680 default=False,
1680 default=False,
1681 generic=True,
1681 generic=True,
1682 priority=-1,
1682 priority=-1,
1683 )
1683 )
1684 coreconfigitem(
1684 coreconfigitem(
1685 b'pager',
1685 b'pager',
1686 b'attend-.*',
1686 b'attend-.*',
1687 default=dynamicdefault,
1687 default=dynamicdefault,
1688 generic=True,
1688 generic=True,
1689 )
1689 )
1690 coreconfigitem(
1690 coreconfigitem(
1691 b'pager',
1691 b'pager',
1692 b'ignore',
1692 b'ignore',
1693 default=list,
1693 default=list,
1694 )
1694 )
1695 coreconfigitem(
1695 coreconfigitem(
1696 b'pager',
1696 b'pager',
1697 b'pager',
1697 b'pager',
1698 default=dynamicdefault,
1698 default=dynamicdefault,
1699 )
1699 )
1700 coreconfigitem(
1700 coreconfigitem(
1701 b'patch',
1701 b'patch',
1702 b'eol',
1702 b'eol',
1703 default=b'strict',
1703 default=b'strict',
1704 )
1704 )
1705 coreconfigitem(
1705 coreconfigitem(
1706 b'patch',
1706 b'patch',
1707 b'fuzz',
1707 b'fuzz',
1708 default=2,
1708 default=2,
1709 )
1709 )
1710 coreconfigitem(
1710 coreconfigitem(
1711 b'paths',
1711 b'paths',
1712 b'default',
1712 b'default',
1713 default=None,
1713 default=None,
1714 )
1714 )
1715 coreconfigitem(
1715 coreconfigitem(
1716 b'paths',
1716 b'paths',
1717 b'default-push',
1717 b'default-push',
1718 default=None,
1718 default=None,
1719 )
1719 )
1720 coreconfigitem(
1720 coreconfigitem(
1721 b'paths',
1721 b'paths',
1722 b'.*',
1722 b'.*',
1723 default=None,
1723 default=None,
1724 generic=True,
1724 generic=True,
1725 )
1725 )
1726 coreconfigitem(
1726 coreconfigitem(
1727 b'phases',
1727 b'phases',
1728 b'checksubrepos',
1728 b'checksubrepos',
1729 default=b'follow',
1729 default=b'follow',
1730 )
1730 )
1731 coreconfigitem(
1731 coreconfigitem(
1732 b'phases',
1732 b'phases',
1733 b'new-commit',
1733 b'new-commit',
1734 default=b'draft',
1734 default=b'draft',
1735 )
1735 )
1736 coreconfigitem(
1736 coreconfigitem(
1737 b'phases',
1737 b'phases',
1738 b'publish',
1738 b'publish',
1739 default=True,
1739 default=True,
1740 )
1740 )
1741 coreconfigitem(
1741 coreconfigitem(
1742 b'profiling',
1742 b'profiling',
1743 b'enabled',
1743 b'enabled',
1744 default=False,
1744 default=False,
1745 )
1745 )
1746 coreconfigitem(
1746 coreconfigitem(
1747 b'profiling',
1747 b'profiling',
1748 b'format',
1748 b'format',
1749 default=b'text',
1749 default=b'text',
1750 )
1750 )
1751 coreconfigitem(
1751 coreconfigitem(
1752 b'profiling',
1752 b'profiling',
1753 b'freq',
1753 b'freq',
1754 default=1000,
1754 default=1000,
1755 )
1755 )
1756 coreconfigitem(
1756 coreconfigitem(
1757 b'profiling',
1757 b'profiling',
1758 b'limit',
1758 b'limit',
1759 default=30,
1759 default=30,
1760 )
1760 )
1761 coreconfigitem(
1761 coreconfigitem(
1762 b'profiling',
1762 b'profiling',
1763 b'nested',
1763 b'nested',
1764 default=0,
1764 default=0,
1765 )
1765 )
1766 coreconfigitem(
1766 coreconfigitem(
1767 b'profiling',
1767 b'profiling',
1768 b'output',
1768 b'output',
1769 default=None,
1769 default=None,
1770 )
1770 )
1771 coreconfigitem(
1771 coreconfigitem(
1772 b'profiling',
1772 b'profiling',
1773 b'showmax',
1773 b'showmax',
1774 default=0.999,
1774 default=0.999,
1775 )
1775 )
1776 coreconfigitem(
1776 coreconfigitem(
1777 b'profiling',
1777 b'profiling',
1778 b'showmin',
1778 b'showmin',
1779 default=dynamicdefault,
1779 default=dynamicdefault,
1780 )
1780 )
1781 coreconfigitem(
1781 coreconfigitem(
1782 b'profiling',
1782 b'profiling',
1783 b'showtime',
1783 b'showtime',
1784 default=True,
1784 default=True,
1785 )
1785 )
1786 coreconfigitem(
1786 coreconfigitem(
1787 b'profiling',
1787 b'profiling',
1788 b'sort',
1788 b'sort',
1789 default=b'inlinetime',
1789 default=b'inlinetime',
1790 )
1790 )
1791 coreconfigitem(
1791 coreconfigitem(
1792 b'profiling',
1792 b'profiling',
1793 b'statformat',
1793 b'statformat',
1794 default=b'hotpath',
1794 default=b'hotpath',
1795 )
1795 )
1796 coreconfigitem(
1796 coreconfigitem(
1797 b'profiling',
1797 b'profiling',
1798 b'time-track',
1798 b'time-track',
1799 default=dynamicdefault,
1799 default=dynamicdefault,
1800 )
1800 )
1801 coreconfigitem(
1801 coreconfigitem(
1802 b'profiling',
1802 b'profiling',
1803 b'type',
1803 b'type',
1804 default=b'stat',
1804 default=b'stat',
1805 )
1805 )
1806 coreconfigitem(
1806 coreconfigitem(
1807 b'progress',
1807 b'progress',
1808 b'assume-tty',
1808 b'assume-tty',
1809 default=False,
1809 default=False,
1810 )
1810 )
1811 coreconfigitem(
1811 coreconfigitem(
1812 b'progress',
1812 b'progress',
1813 b'changedelay',
1813 b'changedelay',
1814 default=1,
1814 default=1,
1815 )
1815 )
1816 coreconfigitem(
1816 coreconfigitem(
1817 b'progress',
1817 b'progress',
1818 b'clear-complete',
1818 b'clear-complete',
1819 default=True,
1819 default=True,
1820 )
1820 )
1821 coreconfigitem(
1821 coreconfigitem(
1822 b'progress',
1822 b'progress',
1823 b'debug',
1823 b'debug',
1824 default=False,
1824 default=False,
1825 )
1825 )
1826 coreconfigitem(
1826 coreconfigitem(
1827 b'progress',
1827 b'progress',
1828 b'delay',
1828 b'delay',
1829 default=3,
1829 default=3,
1830 )
1830 )
1831 coreconfigitem(
1831 coreconfigitem(
1832 b'progress',
1832 b'progress',
1833 b'disable',
1833 b'disable',
1834 default=False,
1834 default=False,
1835 )
1835 )
1836 coreconfigitem(
1836 coreconfigitem(
1837 b'progress',
1837 b'progress',
1838 b'estimateinterval',
1838 b'estimateinterval',
1839 default=60.0,
1839 default=60.0,
1840 )
1840 )
1841 coreconfigitem(
1841 coreconfigitem(
1842 b'progress',
1842 b'progress',
1843 b'format',
1843 b'format',
1844 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1844 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1845 )
1845 )
1846 coreconfigitem(
1846 coreconfigitem(
1847 b'progress',
1847 b'progress',
1848 b'refresh',
1848 b'refresh',
1849 default=0.1,
1849 default=0.1,
1850 )
1850 )
1851 coreconfigitem(
1851 coreconfigitem(
1852 b'progress',
1852 b'progress',
1853 b'width',
1853 b'width',
1854 default=dynamicdefault,
1854 default=dynamicdefault,
1855 )
1855 )
1856 coreconfigitem(
1856 coreconfigitem(
1857 b'pull',
1857 b'pull',
1858 b'confirm',
1858 b'confirm',
1859 default=False,
1859 default=False,
1860 )
1860 )
1861 coreconfigitem(
1861 coreconfigitem(
1862 b'push',
1862 b'push',
1863 b'pushvars.server',
1863 b'pushvars.server',
1864 default=False,
1864 default=False,
1865 )
1865 )
1866 coreconfigitem(
1866 coreconfigitem(
1867 b'rewrite',
1867 b'rewrite',
1868 b'backup-bundle',
1868 b'backup-bundle',
1869 default=True,
1869 default=True,
1870 alias=[(b'ui', b'history-editing-backup')],
1870 alias=[(b'ui', b'history-editing-backup')],
1871 )
1871 )
1872 coreconfigitem(
1872 coreconfigitem(
1873 b'rewrite',
1873 b'rewrite',
1874 b'update-timestamp',
1874 b'update-timestamp',
1875 default=False,
1875 default=False,
1876 )
1876 )
1877 coreconfigitem(
1877 coreconfigitem(
1878 b'rewrite',
1878 b'rewrite',
1879 b'empty-successor',
1879 b'empty-successor',
1880 default=b'skip',
1880 default=b'skip',
1881 experimental=True,
1881 experimental=True,
1882 )
1882 )
1883 coreconfigitem(
1883 coreconfigitem(
1884 b'storage',
1884 b'storage',
1885 b'new-repo-backend',
1885 b'new-repo-backend',
1886 default=b'revlogv1',
1886 default=b'revlogv1',
1887 experimental=True,
1887 experimental=True,
1888 )
1888 )
1889 coreconfigitem(
1889 coreconfigitem(
1890 b'storage',
1890 b'storage',
1891 b'revlog.optimize-delta-parent-choice',
1891 b'revlog.optimize-delta-parent-choice',
1892 default=True,
1892 default=True,
1893 alias=[(b'format', b'aggressivemergedeltas')],
1893 alias=[(b'format', b'aggressivemergedeltas')],
1894 )
1894 )
1895 # experimental as long as rust is experimental (or a C version is implemented)
1895 # experimental as long as rust is experimental (or a C version is implemented)
1896 coreconfigitem(
1896 coreconfigitem(
1897 b'storage',
1897 b'storage',
1898 b'revlog.persistent-nodemap.mmap',
1898 b'revlog.persistent-nodemap.mmap',
1899 default=True,
1899 default=True,
1900 )
1900 )
1901 # experimental as long as format.use-persistent-nodemap is.
1901 # experimental as long as format.use-persistent-nodemap is.
1902 coreconfigitem(
1902 coreconfigitem(
1903 b'storage',
1903 b'storage',
1904 b'revlog.persistent-nodemap.slow-path',
1904 b'revlog.persistent-nodemap.slow-path',
1905 default=b"abort",
1905 default=b"abort",
1906 )
1906 )
1907
1907
1908 coreconfigitem(
1908 coreconfigitem(
1909 b'storage',
1909 b'storage',
1910 b'revlog.reuse-external-delta',
1910 b'revlog.reuse-external-delta',
1911 default=True,
1911 default=True,
1912 )
1912 )
1913 coreconfigitem(
1913 coreconfigitem(
1914 b'storage',
1914 b'storage',
1915 b'revlog.reuse-external-delta-parent',
1915 b'revlog.reuse-external-delta-parent',
1916 default=None,
1916 default=None,
1917 )
1917 )
1918 coreconfigitem(
1918 coreconfigitem(
1919 b'storage',
1919 b'storage',
1920 b'revlog.zlib.level',
1920 b'revlog.zlib.level',
1921 default=None,
1921 default=None,
1922 )
1922 )
1923 coreconfigitem(
1923 coreconfigitem(
1924 b'storage',
1924 b'storage',
1925 b'revlog.zstd.level',
1925 b'revlog.zstd.level',
1926 default=None,
1926 default=None,
1927 )
1927 )
1928 coreconfigitem(
1928 coreconfigitem(
1929 b'server',
1929 b'server',
1930 b'bookmarks-pushkey-compat',
1930 b'bookmarks-pushkey-compat',
1931 default=True,
1931 default=True,
1932 )
1932 )
1933 coreconfigitem(
1933 coreconfigitem(
1934 b'server',
1934 b'server',
1935 b'bundle1',
1935 b'bundle1',
1936 default=True,
1936 default=True,
1937 )
1937 )
1938 coreconfigitem(
1938 coreconfigitem(
1939 b'server',
1939 b'server',
1940 b'bundle1gd',
1940 b'bundle1gd',
1941 default=None,
1941 default=None,
1942 )
1942 )
1943 coreconfigitem(
1943 coreconfigitem(
1944 b'server',
1944 b'server',
1945 b'bundle1.pull',
1945 b'bundle1.pull',
1946 default=None,
1946 default=None,
1947 )
1947 )
1948 coreconfigitem(
1948 coreconfigitem(
1949 b'server',
1949 b'server',
1950 b'bundle1gd.pull',
1950 b'bundle1gd.pull',
1951 default=None,
1951 default=None,
1952 )
1952 )
1953 coreconfigitem(
1953 coreconfigitem(
1954 b'server',
1954 b'server',
1955 b'bundle1.push',
1955 b'bundle1.push',
1956 default=None,
1956 default=None,
1957 )
1957 )
1958 coreconfigitem(
1958 coreconfigitem(
1959 b'server',
1959 b'server',
1960 b'bundle1gd.push',
1960 b'bundle1gd.push',
1961 default=None,
1961 default=None,
1962 )
1962 )
1963 coreconfigitem(
1963 coreconfigitem(
1964 b'server',
1964 b'server',
1965 b'bundle2.stream',
1965 b'bundle2.stream',
1966 default=True,
1966 default=True,
1967 alias=[(b'experimental', b'bundle2.stream')],
1967 alias=[(b'experimental', b'bundle2.stream')],
1968 )
1968 )
1969 coreconfigitem(
1969 coreconfigitem(
1970 b'server',
1970 b'server',
1971 b'compressionengines',
1971 b'compressionengines',
1972 default=list,
1972 default=list,
1973 )
1973 )
1974 coreconfigitem(
1974 coreconfigitem(
1975 b'server',
1975 b'server',
1976 b'concurrent-push-mode',
1976 b'concurrent-push-mode',
1977 default=b'check-related',
1977 default=b'check-related',
1978 )
1978 )
1979 coreconfigitem(
1979 coreconfigitem(
1980 b'server',
1980 b'server',
1981 b'disablefullbundle',
1981 b'disablefullbundle',
1982 default=False,
1982 default=False,
1983 )
1983 )
1984 coreconfigitem(
1984 coreconfigitem(
1985 b'server',
1985 b'server',
1986 b'maxhttpheaderlen',
1986 b'maxhttpheaderlen',
1987 default=1024,
1987 default=1024,
1988 )
1988 )
1989 coreconfigitem(
1989 coreconfigitem(
1990 b'server',
1990 b'server',
1991 b'pullbundle',
1991 b'pullbundle',
1992 default=False,
1992 default=False,
1993 )
1993 )
1994 coreconfigitem(
1994 coreconfigitem(
1995 b'server',
1995 b'server',
1996 b'preferuncompressed',
1996 b'preferuncompressed',
1997 default=False,
1997 default=False,
1998 )
1998 )
1999 coreconfigitem(
1999 coreconfigitem(
2000 b'server',
2000 b'server',
2001 b'streamunbundle',
2001 b'streamunbundle',
2002 default=False,
2002 default=False,
2003 )
2003 )
2004 coreconfigitem(
2004 coreconfigitem(
2005 b'server',
2005 b'server',
2006 b'uncompressed',
2006 b'uncompressed',
2007 default=True,
2007 default=True,
2008 )
2008 )
2009 coreconfigitem(
2009 coreconfigitem(
2010 b'server',
2010 b'server',
2011 b'uncompressedallowsecret',
2011 b'uncompressedallowsecret',
2012 default=False,
2012 default=False,
2013 )
2013 )
2014 coreconfigitem(
2014 coreconfigitem(
2015 b'server',
2015 b'server',
2016 b'view',
2016 b'view',
2017 default=b'served',
2017 default=b'served',
2018 )
2018 )
2019 coreconfigitem(
2019 coreconfigitem(
2020 b'server',
2020 b'server',
2021 b'validate',
2021 b'validate',
2022 default=False,
2022 default=False,
2023 )
2023 )
2024 coreconfigitem(
2024 coreconfigitem(
2025 b'server',
2025 b'server',
2026 b'zliblevel',
2026 b'zliblevel',
2027 default=-1,
2027 default=-1,
2028 )
2028 )
2029 coreconfigitem(
2029 coreconfigitem(
2030 b'server',
2030 b'server',
2031 b'zstdlevel',
2031 b'zstdlevel',
2032 default=3,
2032 default=3,
2033 )
2033 )
2034 coreconfigitem(
2034 coreconfigitem(
2035 b'share',
2035 b'share',
2036 b'pool',
2036 b'pool',
2037 default=None,
2037 default=None,
2038 )
2038 )
2039 coreconfigitem(
2039 coreconfigitem(
2040 b'share',
2040 b'share',
2041 b'poolnaming',
2041 b'poolnaming',
2042 default=b'identity',
2042 default=b'identity',
2043 )
2043 )
2044 coreconfigitem(
2044 coreconfigitem(
2045 b'share',
2045 b'share',
2046 b'safe-mismatch.source-not-safe',
2046 b'safe-mismatch.source-not-safe',
2047 default=b'abort',
2047 default=b'abort',
2048 )
2048 )
2049 coreconfigitem(
2049 coreconfigitem(
2050 b'share',
2050 b'share',
2051 b'safe-mismatch.source-safe',
2051 b'safe-mismatch.source-safe',
2052 default=b'abort',
2052 default=b'abort',
2053 )
2053 )
2054 coreconfigitem(
2054 coreconfigitem(
2055 b'share',
2055 b'share',
2056 b'safe-mismatch.source-not-safe.warn',
2056 b'safe-mismatch.source-not-safe.warn',
2057 default=True,
2057 default=True,
2058 )
2058 )
2059 coreconfigitem(
2059 coreconfigitem(
2060 b'share',
2060 b'share',
2061 b'safe-mismatch.source-safe.warn',
2061 b'safe-mismatch.source-safe.warn',
2062 default=True,
2062 default=True,
2063 )
2063 )
2064 coreconfigitem(
2064 coreconfigitem(
2065 b'shelve',
2065 b'shelve',
2066 b'maxbackups',
2066 b'maxbackups',
2067 default=10,
2067 default=10,
2068 )
2068 )
2069 coreconfigitem(
2069 coreconfigitem(
2070 b'smtp',
2070 b'smtp',
2071 b'host',
2071 b'host',
2072 default=None,
2072 default=None,
2073 )
2073 )
2074 coreconfigitem(
2074 coreconfigitem(
2075 b'smtp',
2075 b'smtp',
2076 b'local_hostname',
2076 b'local_hostname',
2077 default=None,
2077 default=None,
2078 )
2078 )
2079 coreconfigitem(
2079 coreconfigitem(
2080 b'smtp',
2080 b'smtp',
2081 b'password',
2081 b'password',
2082 default=None,
2082 default=None,
2083 )
2083 )
2084 coreconfigitem(
2084 coreconfigitem(
2085 b'smtp',
2085 b'smtp',
2086 b'port',
2086 b'port',
2087 default=dynamicdefault,
2087 default=dynamicdefault,
2088 )
2088 )
2089 coreconfigitem(
2089 coreconfigitem(
2090 b'smtp',
2090 b'smtp',
2091 b'tls',
2091 b'tls',
2092 default=b'none',
2092 default=b'none',
2093 )
2093 )
2094 coreconfigitem(
2094 coreconfigitem(
2095 b'smtp',
2095 b'smtp',
2096 b'username',
2096 b'username',
2097 default=None,
2097 default=None,
2098 )
2098 )
2099 coreconfigitem(
2099 coreconfigitem(
2100 b'sparse',
2100 b'sparse',
2101 b'missingwarning',
2101 b'missingwarning',
2102 default=True,
2102 default=True,
2103 experimental=True,
2103 experimental=True,
2104 )
2104 )
2105 coreconfigitem(
2105 coreconfigitem(
2106 b'subrepos',
2106 b'subrepos',
2107 b'allowed',
2107 b'allowed',
2108 default=dynamicdefault, # to make backporting simpler
2108 default=dynamicdefault, # to make backporting simpler
2109 )
2109 )
2110 coreconfigitem(
2110 coreconfigitem(
2111 b'subrepos',
2111 b'subrepos',
2112 b'hg:allowed',
2112 b'hg:allowed',
2113 default=dynamicdefault,
2113 default=dynamicdefault,
2114 )
2114 )
2115 coreconfigitem(
2115 coreconfigitem(
2116 b'subrepos',
2116 b'subrepos',
2117 b'git:allowed',
2117 b'git:allowed',
2118 default=dynamicdefault,
2118 default=dynamicdefault,
2119 )
2119 )
2120 coreconfigitem(
2120 coreconfigitem(
2121 b'subrepos',
2121 b'subrepos',
2122 b'svn:allowed',
2122 b'svn:allowed',
2123 default=dynamicdefault,
2123 default=dynamicdefault,
2124 )
2124 )
2125 coreconfigitem(
2125 coreconfigitem(
2126 b'templates',
2126 b'templates',
2127 b'.*',
2127 b'.*',
2128 default=None,
2128 default=None,
2129 generic=True,
2129 generic=True,
2130 )
2130 )
2131 coreconfigitem(
2131 coreconfigitem(
2132 b'templateconfig',
2132 b'templateconfig',
2133 b'.*',
2133 b'.*',
2134 default=dynamicdefault,
2134 default=dynamicdefault,
2135 generic=True,
2135 generic=True,
2136 )
2136 )
2137 coreconfigitem(
2137 coreconfigitem(
2138 b'trusted',
2138 b'trusted',
2139 b'groups',
2139 b'groups',
2140 default=list,
2140 default=list,
2141 )
2141 )
2142 coreconfigitem(
2142 coreconfigitem(
2143 b'trusted',
2143 b'trusted',
2144 b'users',
2144 b'users',
2145 default=list,
2145 default=list,
2146 )
2146 )
2147 coreconfigitem(
2147 coreconfigitem(
2148 b'ui',
2148 b'ui',
2149 b'_usedassubrepo',
2149 b'_usedassubrepo',
2150 default=False,
2150 default=False,
2151 )
2151 )
2152 coreconfigitem(
2152 coreconfigitem(
2153 b'ui',
2153 b'ui',
2154 b'allowemptycommit',
2154 b'allowemptycommit',
2155 default=False,
2155 default=False,
2156 )
2156 )
2157 coreconfigitem(
2157 coreconfigitem(
2158 b'ui',
2158 b'ui',
2159 b'archivemeta',
2159 b'archivemeta',
2160 default=True,
2160 default=True,
2161 )
2161 )
2162 coreconfigitem(
2162 coreconfigitem(
2163 b'ui',
2163 b'ui',
2164 b'askusername',
2164 b'askusername',
2165 default=False,
2165 default=False,
2166 )
2166 )
2167 coreconfigitem(
2167 coreconfigitem(
2168 b'ui',
2168 b'ui',
2169 b'available-memory',
2169 b'available-memory',
2170 default=None,
2170 default=None,
2171 )
2171 )
2172
2172
2173 coreconfigitem(
2173 coreconfigitem(
2174 b'ui',
2174 b'ui',
2175 b'clonebundlefallback',
2175 b'clonebundlefallback',
2176 default=False,
2176 default=False,
2177 )
2177 )
2178 coreconfigitem(
2178 coreconfigitem(
2179 b'ui',
2179 b'ui',
2180 b'clonebundleprefers',
2180 b'clonebundleprefers',
2181 default=list,
2181 default=list,
2182 )
2182 )
2183 coreconfigitem(
2183 coreconfigitem(
2184 b'ui',
2184 b'ui',
2185 b'clonebundles',
2185 b'clonebundles',
2186 default=True,
2186 default=True,
2187 )
2187 )
2188 coreconfigitem(
2188 coreconfigitem(
2189 b'ui',
2189 b'ui',
2190 b'color',
2190 b'color',
2191 default=b'auto',
2191 default=b'auto',
2192 )
2192 )
2193 coreconfigitem(
2193 coreconfigitem(
2194 b'ui',
2194 b'ui',
2195 b'commitsubrepos',
2195 b'commitsubrepos',
2196 default=False,
2196 default=False,
2197 )
2197 )
2198 coreconfigitem(
2198 coreconfigitem(
2199 b'ui',
2199 b'ui',
2200 b'debug',
2200 b'debug',
2201 default=False,
2201 default=False,
2202 )
2202 )
2203 coreconfigitem(
2203 coreconfigitem(
2204 b'ui',
2204 b'ui',
2205 b'debugger',
2205 b'debugger',
2206 default=None,
2206 default=None,
2207 )
2207 )
2208 coreconfigitem(
2208 coreconfigitem(
2209 b'ui',
2209 b'ui',
2210 b'editor',
2210 b'editor',
2211 default=dynamicdefault,
2211 default=dynamicdefault,
2212 )
2212 )
2213 coreconfigitem(
2213 coreconfigitem(
2214 b'ui',
2214 b'ui',
2215 b'detailed-exit-code',
2215 b'detailed-exit-code',
2216 default=False,
2216 default=False,
2217 experimental=True,
2217 experimental=True,
2218 )
2218 )
2219 coreconfigitem(
2219 coreconfigitem(
2220 b'ui',
2220 b'ui',
2221 b'fallbackencoding',
2221 b'fallbackencoding',
2222 default=None,
2222 default=None,
2223 )
2223 )
2224 coreconfigitem(
2224 coreconfigitem(
2225 b'ui',
2225 b'ui',
2226 b'forcecwd',
2226 b'forcecwd',
2227 default=None,
2227 default=None,
2228 )
2228 )
2229 coreconfigitem(
2229 coreconfigitem(
2230 b'ui',
2230 b'ui',
2231 b'forcemerge',
2231 b'forcemerge',
2232 default=None,
2232 default=None,
2233 )
2233 )
2234 coreconfigitem(
2234 coreconfigitem(
2235 b'ui',
2235 b'ui',
2236 b'formatdebug',
2236 b'formatdebug',
2237 default=False,
2237 default=False,
2238 )
2238 )
2239 coreconfigitem(
2239 coreconfigitem(
2240 b'ui',
2240 b'ui',
2241 b'formatjson',
2241 b'formatjson',
2242 default=False,
2242 default=False,
2243 )
2243 )
2244 coreconfigitem(
2244 coreconfigitem(
2245 b'ui',
2245 b'ui',
2246 b'formatted',
2246 b'formatted',
2247 default=None,
2247 default=None,
2248 )
2248 )
2249 coreconfigitem(
2249 coreconfigitem(
2250 b'ui',
2250 b'ui',
2251 b'interactive',
2251 b'interactive',
2252 default=None,
2252 default=None,
2253 )
2253 )
2254 coreconfigitem(
2254 coreconfigitem(
2255 b'ui',
2255 b'ui',
2256 b'interface',
2256 b'interface',
2257 default=None,
2257 default=None,
2258 )
2258 )
2259 coreconfigitem(
2259 coreconfigitem(
2260 b'ui',
2260 b'ui',
2261 b'interface.chunkselector',
2261 b'interface.chunkselector',
2262 default=None,
2262 default=None,
2263 )
2263 )
2264 coreconfigitem(
2264 coreconfigitem(
2265 b'ui',
2265 b'ui',
2266 b'large-file-limit',
2266 b'large-file-limit',
2267 default=10000000,
2267 default=10000000,
2268 )
2268 )
2269 coreconfigitem(
2269 coreconfigitem(
2270 b'ui',
2270 b'ui',
2271 b'logblockedtimes',
2271 b'logblockedtimes',
2272 default=False,
2272 default=False,
2273 )
2273 )
2274 coreconfigitem(
2274 coreconfigitem(
2275 b'ui',
2275 b'ui',
2276 b'merge',
2276 b'merge',
2277 default=None,
2277 default=None,
2278 )
2278 )
2279 coreconfigitem(
2279 coreconfigitem(
2280 b'ui',
2280 b'ui',
2281 b'mergemarkers',
2281 b'mergemarkers',
2282 default=b'basic',
2282 default=b'basic',
2283 )
2283 )
2284 coreconfigitem(
2284 coreconfigitem(
2285 b'ui',
2285 b'ui',
2286 b'message-output',
2286 b'message-output',
2287 default=b'stdio',
2287 default=b'stdio',
2288 )
2288 )
2289 coreconfigitem(
2289 coreconfigitem(
2290 b'ui',
2290 b'ui',
2291 b'nontty',
2291 b'nontty',
2292 default=False,
2292 default=False,
2293 )
2293 )
2294 coreconfigitem(
2294 coreconfigitem(
2295 b'ui',
2295 b'ui',
2296 b'origbackuppath',
2296 b'origbackuppath',
2297 default=None,
2297 default=None,
2298 )
2298 )
2299 coreconfigitem(
2299 coreconfigitem(
2300 b'ui',
2300 b'ui',
2301 b'paginate',
2301 b'paginate',
2302 default=True,
2302 default=True,
2303 )
2303 )
2304 coreconfigitem(
2304 coreconfigitem(
2305 b'ui',
2305 b'ui',
2306 b'patch',
2306 b'patch',
2307 default=None,
2307 default=None,
2308 )
2308 )
2309 coreconfigitem(
2309 coreconfigitem(
2310 b'ui',
2310 b'ui',
2311 b'portablefilenames',
2311 b'portablefilenames',
2312 default=b'warn',
2312 default=b'warn',
2313 )
2313 )
2314 coreconfigitem(
2314 coreconfigitem(
2315 b'ui',
2315 b'ui',
2316 b'promptecho',
2316 b'promptecho',
2317 default=False,
2317 default=False,
2318 )
2318 )
2319 coreconfigitem(
2319 coreconfigitem(
2320 b'ui',
2320 b'ui',
2321 b'quiet',
2321 b'quiet',
2322 default=False,
2322 default=False,
2323 )
2323 )
2324 coreconfigitem(
2324 coreconfigitem(
2325 b'ui',
2325 b'ui',
2326 b'quietbookmarkmove',
2326 b'quietbookmarkmove',
2327 default=False,
2327 default=False,
2328 )
2328 )
2329 coreconfigitem(
2329 coreconfigitem(
2330 b'ui',
2330 b'ui',
2331 b'relative-paths',
2331 b'relative-paths',
2332 default=b'legacy',
2332 default=b'legacy',
2333 )
2333 )
2334 coreconfigitem(
2334 coreconfigitem(
2335 b'ui',
2335 b'ui',
2336 b'remotecmd',
2336 b'remotecmd',
2337 default=b'hg',
2337 default=b'hg',
2338 )
2338 )
2339 coreconfigitem(
2339 coreconfigitem(
2340 b'ui',
2340 b'ui',
2341 b'report_untrusted',
2341 b'report_untrusted',
2342 default=True,
2342 default=True,
2343 )
2343 )
2344 coreconfigitem(
2344 coreconfigitem(
2345 b'ui',
2345 b'ui',
2346 b'rollback',
2346 b'rollback',
2347 default=True,
2347 default=True,
2348 )
2348 )
2349 coreconfigitem(
2349 coreconfigitem(
2350 b'ui',
2350 b'ui',
2351 b'signal-safe-lock',
2351 b'signal-safe-lock',
2352 default=True,
2352 default=True,
2353 )
2353 )
2354 coreconfigitem(
2354 coreconfigitem(
2355 b'ui',
2355 b'ui',
2356 b'slash',
2356 b'slash',
2357 default=False,
2357 default=False,
2358 )
2358 )
2359 coreconfigitem(
2359 coreconfigitem(
2360 b'ui',
2360 b'ui',
2361 b'ssh',
2361 b'ssh',
2362 default=b'ssh',
2362 default=b'ssh',
2363 )
2363 )
2364 coreconfigitem(
2364 coreconfigitem(
2365 b'ui',
2365 b'ui',
2366 b'ssherrorhint',
2366 b'ssherrorhint',
2367 default=None,
2367 default=None,
2368 )
2368 )
2369 coreconfigitem(
2369 coreconfigitem(
2370 b'ui',
2370 b'ui',
2371 b'statuscopies',
2371 b'statuscopies',
2372 default=False,
2372 default=False,
2373 )
2373 )
2374 coreconfigitem(
2374 coreconfigitem(
2375 b'ui',
2375 b'ui',
2376 b'strict',
2376 b'strict',
2377 default=False,
2377 default=False,
2378 )
2378 )
2379 coreconfigitem(
2379 coreconfigitem(
2380 b'ui',
2380 b'ui',
2381 b'style',
2381 b'style',
2382 default=b'',
2382 default=b'',
2383 )
2383 )
2384 coreconfigitem(
2384 coreconfigitem(
2385 b'ui',
2385 b'ui',
2386 b'supportcontact',
2386 b'supportcontact',
2387 default=None,
2387 default=None,
2388 )
2388 )
2389 coreconfigitem(
2389 coreconfigitem(
2390 b'ui',
2390 b'ui',
2391 b'textwidth',
2391 b'textwidth',
2392 default=78,
2392 default=78,
2393 )
2393 )
2394 coreconfigitem(
2394 coreconfigitem(
2395 b'ui',
2395 b'ui',
2396 b'timeout',
2396 b'timeout',
2397 default=b'600',
2397 default=b'600',
2398 )
2398 )
2399 coreconfigitem(
2399 coreconfigitem(
2400 b'ui',
2400 b'ui',
2401 b'timeout.warn',
2401 b'timeout.warn',
2402 default=0,
2402 default=0,
2403 )
2403 )
2404 coreconfigitem(
2404 coreconfigitem(
2405 b'ui',
2405 b'ui',
2406 b'timestamp-output',
2406 b'timestamp-output',
2407 default=False,
2407 default=False,
2408 )
2408 )
2409 coreconfigitem(
2409 coreconfigitem(
2410 b'ui',
2410 b'ui',
2411 b'traceback',
2411 b'traceback',
2412 default=False,
2412 default=False,
2413 )
2413 )
2414 coreconfigitem(
2414 coreconfigitem(
2415 b'ui',
2415 b'ui',
2416 b'tweakdefaults',
2416 b'tweakdefaults',
2417 default=False,
2417 default=False,
2418 )
2418 )
2419 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2419 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2420 coreconfigitem(
2420 coreconfigitem(
2421 b'ui',
2421 b'ui',
2422 b'verbose',
2422 b'verbose',
2423 default=False,
2423 default=False,
2424 )
2424 )
2425 coreconfigitem(
2425 coreconfigitem(
2426 b'verify',
2426 b'verify',
2427 b'skipflags',
2427 b'skipflags',
2428 default=None,
2428 default=None,
2429 )
2429 )
2430 coreconfigitem(
2430 coreconfigitem(
2431 b'web',
2431 b'web',
2432 b'allowbz2',
2432 b'allowbz2',
2433 default=False,
2433 default=False,
2434 )
2434 )
2435 coreconfigitem(
2435 coreconfigitem(
2436 b'web',
2436 b'web',
2437 b'allowgz',
2437 b'allowgz',
2438 default=False,
2438 default=False,
2439 )
2439 )
2440 coreconfigitem(
2440 coreconfigitem(
2441 b'web',
2441 b'web',
2442 b'allow-pull',
2442 b'allow-pull',
2443 alias=[(b'web', b'allowpull')],
2443 alias=[(b'web', b'allowpull')],
2444 default=True,
2444 default=True,
2445 )
2445 )
2446 coreconfigitem(
2446 coreconfigitem(
2447 b'web',
2447 b'web',
2448 b'allow-push',
2448 b'allow-push',
2449 alias=[(b'web', b'allow_push')],
2449 alias=[(b'web', b'allow_push')],
2450 default=list,
2450 default=list,
2451 )
2451 )
2452 coreconfigitem(
2452 coreconfigitem(
2453 b'web',
2453 b'web',
2454 b'allowzip',
2454 b'allowzip',
2455 default=False,
2455 default=False,
2456 )
2456 )
2457 coreconfigitem(
2457 coreconfigitem(
2458 b'web',
2458 b'web',
2459 b'archivesubrepos',
2459 b'archivesubrepos',
2460 default=False,
2460 default=False,
2461 )
2461 )
2462 coreconfigitem(
2462 coreconfigitem(
2463 b'web',
2463 b'web',
2464 b'cache',
2464 b'cache',
2465 default=True,
2465 default=True,
2466 )
2466 )
2467 coreconfigitem(
2467 coreconfigitem(
2468 b'web',
2468 b'web',
2469 b'comparisoncontext',
2469 b'comparisoncontext',
2470 default=5,
2470 default=5,
2471 )
2471 )
2472 coreconfigitem(
2472 coreconfigitem(
2473 b'web',
2473 b'web',
2474 b'contact',
2474 b'contact',
2475 default=None,
2475 default=None,
2476 )
2476 )
2477 coreconfigitem(
2477 coreconfigitem(
2478 b'web',
2478 b'web',
2479 b'deny_push',
2479 b'deny_push',
2480 default=list,
2480 default=list,
2481 )
2481 )
2482 coreconfigitem(
2482 coreconfigitem(
2483 b'web',
2483 b'web',
2484 b'guessmime',
2484 b'guessmime',
2485 default=False,
2485 default=False,
2486 )
2486 )
2487 coreconfigitem(
2487 coreconfigitem(
2488 b'web',
2488 b'web',
2489 b'hidden',
2489 b'hidden',
2490 default=False,
2490 default=False,
2491 )
2491 )
2492 coreconfigitem(
2492 coreconfigitem(
2493 b'web',
2493 b'web',
2494 b'labels',
2494 b'labels',
2495 default=list,
2495 default=list,
2496 )
2496 )
2497 coreconfigitem(
2497 coreconfigitem(
2498 b'web',
2498 b'web',
2499 b'logoimg',
2499 b'logoimg',
2500 default=b'hglogo.png',
2500 default=b'hglogo.png',
2501 )
2501 )
2502 coreconfigitem(
2502 coreconfigitem(
2503 b'web',
2503 b'web',
2504 b'logourl',
2504 b'logourl',
2505 default=b'https://mercurial-scm.org/',
2505 default=b'https://mercurial-scm.org/',
2506 )
2506 )
2507 coreconfigitem(
2507 coreconfigitem(
2508 b'web',
2508 b'web',
2509 b'accesslog',
2509 b'accesslog',
2510 default=b'-',
2510 default=b'-',
2511 )
2511 )
2512 coreconfigitem(
2512 coreconfigitem(
2513 b'web',
2513 b'web',
2514 b'address',
2514 b'address',
2515 default=b'',
2515 default=b'',
2516 )
2516 )
2517 coreconfigitem(
2517 coreconfigitem(
2518 b'web',
2518 b'web',
2519 b'allow-archive',
2519 b'allow-archive',
2520 alias=[(b'web', b'allow_archive')],
2520 alias=[(b'web', b'allow_archive')],
2521 default=list,
2521 default=list,
2522 )
2522 )
2523 coreconfigitem(
2523 coreconfigitem(
2524 b'web',
2524 b'web',
2525 b'allow_read',
2525 b'allow_read',
2526 default=list,
2526 default=list,
2527 )
2527 )
2528 coreconfigitem(
2528 coreconfigitem(
2529 b'web',
2529 b'web',
2530 b'baseurl',
2530 b'baseurl',
2531 default=None,
2531 default=None,
2532 )
2532 )
2533 coreconfigitem(
2533 coreconfigitem(
2534 b'web',
2534 b'web',
2535 b'cacerts',
2535 b'cacerts',
2536 default=None,
2536 default=None,
2537 )
2537 )
2538 coreconfigitem(
2538 coreconfigitem(
2539 b'web',
2539 b'web',
2540 b'certificate',
2540 b'certificate',
2541 default=None,
2541 default=None,
2542 )
2542 )
2543 coreconfigitem(
2543 coreconfigitem(
2544 b'web',
2544 b'web',
2545 b'collapse',
2545 b'collapse',
2546 default=False,
2546 default=False,
2547 )
2547 )
2548 coreconfigitem(
2548 coreconfigitem(
2549 b'web',
2549 b'web',
2550 b'csp',
2550 b'csp',
2551 default=None,
2551 default=None,
2552 )
2552 )
2553 coreconfigitem(
2553 coreconfigitem(
2554 b'web',
2554 b'web',
2555 b'deny_read',
2555 b'deny_read',
2556 default=list,
2556 default=list,
2557 )
2557 )
2558 coreconfigitem(
2558 coreconfigitem(
2559 b'web',
2559 b'web',
2560 b'descend',
2560 b'descend',
2561 default=True,
2561 default=True,
2562 )
2562 )
2563 coreconfigitem(
2563 coreconfigitem(
2564 b'web',
2564 b'web',
2565 b'description',
2565 b'description',
2566 default=b"",
2566 default=b"",
2567 )
2567 )
2568 coreconfigitem(
2568 coreconfigitem(
2569 b'web',
2569 b'web',
2570 b'encoding',
2570 b'encoding',
2571 default=lambda: encoding.encoding,
2571 default=lambda: encoding.encoding,
2572 )
2572 )
2573 coreconfigitem(
2573 coreconfigitem(
2574 b'web',
2574 b'web',
2575 b'errorlog',
2575 b'errorlog',
2576 default=b'-',
2576 default=b'-',
2577 )
2577 )
2578 coreconfigitem(
2578 coreconfigitem(
2579 b'web',
2579 b'web',
2580 b'ipv6',
2580 b'ipv6',
2581 default=False,
2581 default=False,
2582 )
2582 )
2583 coreconfigitem(
2583 coreconfigitem(
2584 b'web',
2584 b'web',
2585 b'maxchanges',
2585 b'maxchanges',
2586 default=10,
2586 default=10,
2587 )
2587 )
2588 coreconfigitem(
2588 coreconfigitem(
2589 b'web',
2589 b'web',
2590 b'maxfiles',
2590 b'maxfiles',
2591 default=10,
2591 default=10,
2592 )
2592 )
2593 coreconfigitem(
2593 coreconfigitem(
2594 b'web',
2594 b'web',
2595 b'maxshortchanges',
2595 b'maxshortchanges',
2596 default=60,
2596 default=60,
2597 )
2597 )
2598 coreconfigitem(
2598 coreconfigitem(
2599 b'web',
2599 b'web',
2600 b'motd',
2600 b'motd',
2601 default=b'',
2601 default=b'',
2602 )
2602 )
2603 coreconfigitem(
2603 coreconfigitem(
2604 b'web',
2604 b'web',
2605 b'name',
2605 b'name',
2606 default=dynamicdefault,
2606 default=dynamicdefault,
2607 )
2607 )
2608 coreconfigitem(
2608 coreconfigitem(
2609 b'web',
2609 b'web',
2610 b'port',
2610 b'port',
2611 default=8000,
2611 default=8000,
2612 )
2612 )
2613 coreconfigitem(
2613 coreconfigitem(
2614 b'web',
2614 b'web',
2615 b'prefix',
2615 b'prefix',
2616 default=b'',
2616 default=b'',
2617 )
2617 )
2618 coreconfigitem(
2618 coreconfigitem(
2619 b'web',
2619 b'web',
2620 b'push_ssl',
2620 b'push_ssl',
2621 default=True,
2621 default=True,
2622 )
2622 )
2623 coreconfigitem(
2623 coreconfigitem(
2624 b'web',
2624 b'web',
2625 b'refreshinterval',
2625 b'refreshinterval',
2626 default=20,
2626 default=20,
2627 )
2627 )
2628 coreconfigitem(
2628 coreconfigitem(
2629 b'web',
2629 b'web',
2630 b'server-header',
2630 b'server-header',
2631 default=None,
2631 default=None,
2632 )
2632 )
2633 coreconfigitem(
2633 coreconfigitem(
2634 b'web',
2634 b'web',
2635 b'static',
2635 b'static',
2636 default=None,
2636 default=None,
2637 )
2637 )
2638 coreconfigitem(
2638 coreconfigitem(
2639 b'web',
2639 b'web',
2640 b'staticurl',
2640 b'staticurl',
2641 default=None,
2641 default=None,
2642 )
2642 )
2643 coreconfigitem(
2643 coreconfigitem(
2644 b'web',
2644 b'web',
2645 b'stripes',
2645 b'stripes',
2646 default=1,
2646 default=1,
2647 )
2647 )
2648 coreconfigitem(
2648 coreconfigitem(
2649 b'web',
2649 b'web',
2650 b'style',
2650 b'style',
2651 default=b'paper',
2651 default=b'paper',
2652 )
2652 )
2653 coreconfigitem(
2653 coreconfigitem(
2654 b'web',
2654 b'web',
2655 b'templates',
2655 b'templates',
2656 default=None,
2656 default=None,
2657 )
2657 )
2658 coreconfigitem(
2658 coreconfigitem(
2659 b'web',
2659 b'web',
2660 b'view',
2660 b'view',
2661 default=b'served',
2661 default=b'served',
2662 experimental=True,
2662 experimental=True,
2663 )
2663 )
2664 coreconfigitem(
2664 coreconfigitem(
2665 b'worker',
2665 b'worker',
2666 b'backgroundclose',
2666 b'backgroundclose',
2667 default=dynamicdefault,
2667 default=dynamicdefault,
2668 )
2668 )
2669 # Windows defaults to a limit of 512 open files. A buffer of 128
2669 # Windows defaults to a limit of 512 open files. A buffer of 128
2670 # should give us enough headway.
2670 # should give us enough headway.
2671 coreconfigitem(
2671 coreconfigitem(
2672 b'worker',
2672 b'worker',
2673 b'backgroundclosemaxqueue',
2673 b'backgroundclosemaxqueue',
2674 default=384,
2674 default=384,
2675 )
2675 )
2676 coreconfigitem(
2676 coreconfigitem(
2677 b'worker',
2677 b'worker',
2678 b'backgroundcloseminfilecount',
2678 b'backgroundcloseminfilecount',
2679 default=2048,
2679 default=2048,
2680 )
2680 )
2681 coreconfigitem(
2681 coreconfigitem(
2682 b'worker',
2682 b'worker',
2683 b'backgroundclosethreadcount',
2683 b'backgroundclosethreadcount',
2684 default=4,
2684 default=4,
2685 )
2685 )
2686 coreconfigitem(
2686 coreconfigitem(
2687 b'worker',
2687 b'worker',
2688 b'enabled',
2688 b'enabled',
2689 default=True,
2689 default=True,
2690 )
2690 )
2691 coreconfigitem(
2691 coreconfigitem(
2692 b'worker',
2692 b'worker',
2693 b'numcpus',
2693 b'numcpus',
2694 default=None,
2694 default=None,
2695 )
2695 )
2696
2696
2697 # Rebase related configuration moved to core because other extension are doing
2697 # Rebase related configuration moved to core because other extension are doing
2698 # strange things. For example, shelve import the extensions to reuse some bit
2698 # strange things. For example, shelve import the extensions to reuse some bit
2699 # without formally loading it.
2699 # without formally loading it.
2700 coreconfigitem(
2700 coreconfigitem(
2701 b'commands',
2701 b'commands',
2702 b'rebase.requiredest',
2702 b'rebase.requiredest',
2703 default=False,
2703 default=False,
2704 )
2704 )
2705 coreconfigitem(
2705 coreconfigitem(
2706 b'experimental',
2706 b'experimental',
2707 b'rebaseskipobsolete',
2707 b'rebaseskipobsolete',
2708 default=True,
2708 default=True,
2709 )
2709 )
2710 coreconfigitem(
2710 coreconfigitem(
2711 b'rebase',
2711 b'rebase',
2712 b'singletransaction',
2712 b'singletransaction',
2713 default=False,
2713 default=False,
2714 )
2714 )
2715 coreconfigitem(
2715 coreconfigitem(
2716 b'rebase',
2716 b'rebase',
2717 b'experimental.inmemory',
2717 b'experimental.inmemory',
2718 default=False,
2718 default=False,
2719 )
2719 )
@@ -1,3475 +1,3535 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 #
3 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
5 #
5 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
7
8
8 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
9
10
10 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
12 and O(changes) merge between branches.
12 """
13 """
13
14
14 from __future__ import absolute_import
15 from __future__ import absolute_import
15
16
16 import binascii
17 import binascii
17 import collections
18 import collections
18 import contextlib
19 import contextlib
19 import errno
20 import errno
20 import io
21 import io
21 import os
22 import os
22 import struct
23 import struct
23 import zlib
24 import zlib
24
25
25 # import stuff from node for others to import from revlog
26 # import stuff from node for others to import from revlog
26 from .node import (
27 from .node import (
27 bin,
28 bin,
28 hex,
29 hex,
29 nullrev,
30 nullrev,
30 sha1nodeconstants,
31 sha1nodeconstants,
31 short,
32 short,
32 wdirrev,
33 wdirrev,
33 )
34 )
34 from .i18n import _
35 from .i18n import _
35 from .pycompat import getattr
36 from .pycompat import getattr
36 from .revlogutils.constants import (
37 from .revlogutils.constants import (
37 ALL_KINDS,
38 ALL_KINDS,
38 CHANGELOGV2,
39 CHANGELOGV2,
39 COMP_MODE_DEFAULT,
40 COMP_MODE_DEFAULT,
40 COMP_MODE_INLINE,
41 COMP_MODE_INLINE,
41 COMP_MODE_PLAIN,
42 COMP_MODE_PLAIN,
42 FEATURES_BY_VERSION,
43 FEATURES_BY_VERSION,
43 FLAG_GENERALDELTA,
44 FLAG_GENERALDELTA,
44 FLAG_INLINE_DATA,
45 FLAG_INLINE_DATA,
45 INDEX_HEADER,
46 INDEX_HEADER,
46 KIND_CHANGELOG,
47 KIND_CHANGELOG,
47 REVLOGV0,
48 REVLOGV0,
48 REVLOGV1,
49 REVLOGV1,
49 REVLOGV1_FLAGS,
50 REVLOGV1_FLAGS,
50 REVLOGV2,
51 REVLOGV2,
51 REVLOGV2_FLAGS,
52 REVLOGV2_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
53 REVLOG_DEFAULT_FLAGS,
53 REVLOG_DEFAULT_FORMAT,
54 REVLOG_DEFAULT_FORMAT,
54 REVLOG_DEFAULT_VERSION,
55 REVLOG_DEFAULT_VERSION,
55 SUPPORTED_FLAGS,
56 SUPPORTED_FLAGS,
56 )
57 )
57 from .revlogutils.flagutil import (
58 from .revlogutils.flagutil import (
58 REVIDX_DEFAULT_FLAGS,
59 REVIDX_DEFAULT_FLAGS,
59 REVIDX_ELLIPSIS,
60 REVIDX_ELLIPSIS,
60 REVIDX_EXTSTORED,
61 REVIDX_EXTSTORED,
61 REVIDX_FLAGS_ORDER,
62 REVIDX_FLAGS_ORDER,
62 REVIDX_HASCOPIESINFO,
63 REVIDX_HASCOPIESINFO,
63 REVIDX_ISCENSORED,
64 REVIDX_ISCENSORED,
64 REVIDX_RAWTEXT_CHANGING_FLAGS,
65 REVIDX_RAWTEXT_CHANGING_FLAGS,
65 )
66 )
66 from .thirdparty import attr
67 from .thirdparty import attr
67 from . import (
68 from . import (
68 ancestor,
69 ancestor,
69 dagop,
70 dagop,
70 error,
71 error,
71 mdiff,
72 mdiff,
72 policy,
73 policy,
73 pycompat,
74 pycompat,
74 templatefilters,
75 templatefilters,
75 util,
76 util,
76 )
77 )
77 from .interfaces import (
78 from .interfaces import (
78 repository,
79 repository,
79 util as interfaceutil,
80 util as interfaceutil,
80 )
81 )
81 from .revlogutils import (
82 from .revlogutils import (
82 deltas as deltautil,
83 deltas as deltautil,
83 docket as docketutil,
84 docket as docketutil,
84 flagutil,
85 flagutil,
85 nodemap as nodemaputil,
86 nodemap as nodemaputil,
86 revlogv0,
87 revlogv0,
87 sidedata as sidedatautil,
88 sidedata as sidedatautil,
88 )
89 )
89 from .utils import (
90 from .utils import (
90 storageutil,
91 storageutil,
91 stringutil,
92 stringutil,
92 )
93 )
93
94
94 # blanked usage of all the name to prevent pyflakes constraints
95 # blanked usage of all the name to prevent pyflakes constraints
95 # We need these name available in the module for extensions.
96 # We need these name available in the module for extensions.
96
97
97 REVLOGV0
98 REVLOGV0
98 REVLOGV1
99 REVLOGV1
99 REVLOGV2
100 REVLOGV2
100 FLAG_INLINE_DATA
101 FLAG_INLINE_DATA
101 FLAG_GENERALDELTA
102 FLAG_GENERALDELTA
102 REVLOG_DEFAULT_FLAGS
103 REVLOG_DEFAULT_FLAGS
103 REVLOG_DEFAULT_FORMAT
104 REVLOG_DEFAULT_FORMAT
104 REVLOG_DEFAULT_VERSION
105 REVLOG_DEFAULT_VERSION
105 REVLOGV1_FLAGS
106 REVLOGV1_FLAGS
106 REVLOGV2_FLAGS
107 REVLOGV2_FLAGS
107 REVIDX_ISCENSORED
108 REVIDX_ISCENSORED
108 REVIDX_ELLIPSIS
109 REVIDX_ELLIPSIS
109 REVIDX_HASCOPIESINFO
110 REVIDX_HASCOPIESINFO
110 REVIDX_EXTSTORED
111 REVIDX_EXTSTORED
111 REVIDX_DEFAULT_FLAGS
112 REVIDX_DEFAULT_FLAGS
112 REVIDX_FLAGS_ORDER
113 REVIDX_FLAGS_ORDER
113 REVIDX_RAWTEXT_CHANGING_FLAGS
114 REVIDX_RAWTEXT_CHANGING_FLAGS
114
115
115 parsers = policy.importmod('parsers')
116 parsers = policy.importmod('parsers')
116 rustancestor = policy.importrust('ancestor')
117 rustancestor = policy.importrust('ancestor')
117 rustdagop = policy.importrust('dagop')
118 rustdagop = policy.importrust('dagop')
118 rustrevlog = policy.importrust('revlog')
119 rustrevlog = policy.importrust('revlog')
119
120
120 # Aliased for performance.
121 # Aliased for performance.
121 _zlibdecompress = zlib.decompress
122 _zlibdecompress = zlib.decompress
122
123
123 # max size of revlog with inline data
124 # max size of revlog with inline data
124 _maxinline = 131072
125 _maxinline = 131072
125 _chunksize = 1048576
126 _chunksize = 1048576
126
127
127 # Flag processors for REVIDX_ELLIPSIS.
128 # Flag processors for REVIDX_ELLIPSIS.
128 def ellipsisreadprocessor(rl, text):
129 def ellipsisreadprocessor(rl, text):
129 return text, False
130 return text, False
130
131
131
132
132 def ellipsiswriteprocessor(rl, text):
133 def ellipsiswriteprocessor(rl, text):
133 return text, False
134 return text, False
134
135
135
136
136 def ellipsisrawprocessor(rl, text):
137 def ellipsisrawprocessor(rl, text):
137 return False
138 return False
138
139
139
140
140 ellipsisprocessor = (
141 ellipsisprocessor = (
141 ellipsisreadprocessor,
142 ellipsisreadprocessor,
142 ellipsiswriteprocessor,
143 ellipsiswriteprocessor,
143 ellipsisrawprocessor,
144 ellipsisrawprocessor,
144 )
145 )
145
146
146
147
147 def offset_type(offset, type):
148 def offset_type(offset, type):
148 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
149 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
149 raise ValueError(b'unknown revlog index flags')
150 raise ValueError(b'unknown revlog index flags')
150 return int(int(offset) << 16 | type)
151 return int(int(offset) << 16 | type)
151
152
152
153
153 def _verify_revision(rl, skipflags, state, node):
154 def _verify_revision(rl, skipflags, state, node):
154 """Verify the integrity of the given revlog ``node`` while providing a hook
155 """Verify the integrity of the given revlog ``node`` while providing a hook
155 point for extensions to influence the operation."""
156 point for extensions to influence the operation."""
156 if skipflags:
157 if skipflags:
157 state[b'skipread'].add(node)
158 state[b'skipread'].add(node)
158 else:
159 else:
159 # Side-effect: read content and verify hash.
160 # Side-effect: read content and verify hash.
160 rl.revision(node)
161 rl.revision(node)
161
162
162
163
163 # True if a fast implementation for persistent-nodemap is available
164 # True if a fast implementation for persistent-nodemap is available
164 #
165 #
165 # We also consider we have a "fast" implementation in "pure" python because
166 # We also consider we have a "fast" implementation in "pure" python because
166 # people using pure don't really have performance consideration (and a
167 # people using pure don't really have performance consideration (and a
167 # wheelbarrow of other slowness source)
168 # wheelbarrow of other slowness source)
168 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
169 parsers, 'BaseIndexObject'
170 parsers, 'BaseIndexObject'
170 )
171 )
171
172
172
173
173 @attr.s(slots=True, frozen=True)
174 @attr.s(slots=True, frozen=True)
174 class _revisioninfo(object):
175 class _revisioninfo(object):
175 """Information about a revision that allows building its fulltext
176 """Information about a revision that allows building its fulltext
176 node: expected hash of the revision
177 node: expected hash of the revision
177 p1, p2: parent revs of the revision
178 p1, p2: parent revs of the revision
178 btext: built text cache consisting of a one-element list
179 btext: built text cache consisting of a one-element list
179 cachedelta: (baserev, uncompressed_delta) or None
180 cachedelta: (baserev, uncompressed_delta) or None
180 flags: flags associated to the revision storage
181 flags: flags associated to the revision storage
181
182
182 One of btext[0] or cachedelta must be set.
183 One of btext[0] or cachedelta must be set.
183 """
184 """
184
185
185 node = attr.ib()
186 node = attr.ib()
186 p1 = attr.ib()
187 p1 = attr.ib()
187 p2 = attr.ib()
188 p2 = attr.ib()
188 btext = attr.ib()
189 btext = attr.ib()
189 textlen = attr.ib()
190 textlen = attr.ib()
190 cachedelta = attr.ib()
191 cachedelta = attr.ib()
191 flags = attr.ib()
192 flags = attr.ib()
192
193
193
194
194 @interfaceutil.implementer(repository.irevisiondelta)
195 @interfaceutil.implementer(repository.irevisiondelta)
195 @attr.s(slots=True)
196 @attr.s(slots=True)
196 class revlogrevisiondelta(object):
197 class revlogrevisiondelta(object):
197 node = attr.ib()
198 node = attr.ib()
198 p1node = attr.ib()
199 p1node = attr.ib()
199 p2node = attr.ib()
200 p2node = attr.ib()
200 basenode = attr.ib()
201 basenode = attr.ib()
201 flags = attr.ib()
202 flags = attr.ib()
202 baserevisionsize = attr.ib()
203 baserevisionsize = attr.ib()
203 revision = attr.ib()
204 revision = attr.ib()
204 delta = attr.ib()
205 delta = attr.ib()
205 sidedata = attr.ib()
206 sidedata = attr.ib()
206 protocol_flags = attr.ib()
207 protocol_flags = attr.ib()
207 linknode = attr.ib(default=None)
208 linknode = attr.ib(default=None)
208
209
209
210
210 @interfaceutil.implementer(repository.iverifyproblem)
211 @interfaceutil.implementer(repository.iverifyproblem)
211 @attr.s(frozen=True)
212 @attr.s(frozen=True)
212 class revlogproblem(object):
213 class revlogproblem(object):
213 warning = attr.ib(default=None)
214 warning = attr.ib(default=None)
214 error = attr.ib(default=None)
215 error = attr.ib(default=None)
215 node = attr.ib(default=None)
216 node = attr.ib(default=None)
216
217
217
218
218 def parse_index_v1(data, inline):
219 def parse_index_v1(data, inline):
219 # call the C implementation to parse the index data
220 # call the C implementation to parse the index data
220 index, cache = parsers.parse_index2(data, inline)
221 index, cache = parsers.parse_index2(data, inline)
221 return index, cache
222 return index, cache
222
223
223
224
224 def parse_index_v2(data, inline):
225 def parse_index_v2(data, inline):
225 # call the C implementation to parse the index data
226 # call the C implementation to parse the index data
226 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
227 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
227 return index, cache
228 return index, cache
228
229
229
230
230 def parse_index_cl_v2(data, inline):
231 def parse_index_cl_v2(data, inline):
231 # call the C implementation to parse the index data
232 # call the C implementation to parse the index data
232 assert not inline
233 assert not inline
233 from .pure.parsers import parse_index_cl_v2
234 from .pure.parsers import parse_index_cl_v2
234
235
235 index, cache = parse_index_cl_v2(data)
236 index, cache = parse_index_cl_v2(data)
236 return index, cache
237 return index, cache
237
238
238
239
239 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
240 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
240
241
241 def parse_index_v1_nodemap(data, inline):
242 def parse_index_v1_nodemap(data, inline):
242 index, cache = parsers.parse_index_devel_nodemap(data, inline)
243 index, cache = parsers.parse_index_devel_nodemap(data, inline)
243 return index, cache
244 return index, cache
244
245
245
246
246 else:
247 else:
247 parse_index_v1_nodemap = None
248 parse_index_v1_nodemap = None
248
249
249
250
250 def parse_index_v1_mixed(data, inline):
251 def parse_index_v1_mixed(data, inline):
251 index, cache = parse_index_v1(data, inline)
252 index, cache = parse_index_v1(data, inline)
252 return rustrevlog.MixedIndex(index), cache
253 return rustrevlog.MixedIndex(index), cache
253
254
254
255
255 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
256 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
256 # signed integer)
257 # signed integer)
257 _maxentrysize = 0x7FFFFFFF
258 _maxentrysize = 0x7FFFFFFF
258
259
259 PARTIAL_READ_MSG = _(
260 PARTIAL_READ_MSG = _(
260 b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
261 b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
261 )
262 )
262
263
264 FILE_TOO_SHORT_MSG = _(
265 b'cannot read from revlog %s;'
266 b' expected %d bytes from offset %d, data size is %d'
267 )
268
263
269
264 class revlog(object):
270 class revlog(object):
265 """
271 """
266 the underlying revision storage object
272 the underlying revision storage object
267
273
268 A revlog consists of two parts, an index and the revision data.
274 A revlog consists of two parts, an index and the revision data.
269
275
270 The index is a file with a fixed record size containing
276 The index is a file with a fixed record size containing
271 information on each revision, including its nodeid (hash), the
277 information on each revision, including its nodeid (hash), the
272 nodeids of its parents, the position and offset of its data within
278 nodeids of its parents, the position and offset of its data within
273 the data file, and the revision it's based on. Finally, each entry
279 the data file, and the revision it's based on. Finally, each entry
274 contains a linkrev entry that can serve as a pointer to external
280 contains a linkrev entry that can serve as a pointer to external
275 data.
281 data.
276
282
277 The revision data itself is a linear collection of data chunks.
283 The revision data itself is a linear collection of data chunks.
278 Each chunk represents a revision and is usually represented as a
284 Each chunk represents a revision and is usually represented as a
279 delta against the previous chunk. To bound lookup time, runs of
285 delta against the previous chunk. To bound lookup time, runs of
280 deltas are limited to about 2 times the length of the original
286 deltas are limited to about 2 times the length of the original
281 version data. This makes retrieval of a version proportional to
287 version data. This makes retrieval of a version proportional to
282 its size, or O(1) relative to the number of revisions.
288 its size, or O(1) relative to the number of revisions.
283
289
284 Both pieces of the revlog are written to in an append-only
290 Both pieces of the revlog are written to in an append-only
285 fashion, which means we never need to rewrite a file to insert or
291 fashion, which means we never need to rewrite a file to insert or
286 remove data, and can use some simple techniques to avoid the need
292 remove data, and can use some simple techniques to avoid the need
287 for locking while reading.
293 for locking while reading.
288
294
289 If checkambig, indexfile is opened with checkambig=True at
295 If checkambig, indexfile is opened with checkambig=True at
290 writing, to avoid file stat ambiguity.
296 writing, to avoid file stat ambiguity.
291
297
292 If mmaplargeindex is True, and an mmapindexthreshold is set, the
298 If mmaplargeindex is True, and an mmapindexthreshold is set, the
293 index will be mmapped rather than read if it is larger than the
299 index will be mmapped rather than read if it is larger than the
294 configured threshold.
300 configured threshold.
295
301
296 If censorable is True, the revlog can have censored revisions.
302 If censorable is True, the revlog can have censored revisions.
297
303
298 If `upperboundcomp` is not None, this is the expected maximal gain from
304 If `upperboundcomp` is not None, this is the expected maximal gain from
299 compression for the data content.
305 compression for the data content.
300
306
301 `concurrencychecker` is an optional function that receives 3 arguments: a
307 `concurrencychecker` is an optional function that receives 3 arguments: a
302 file handle, a filename, and an expected position. It should check whether
308 file handle, a filename, and an expected position. It should check whether
303 the current position in the file handle is valid, and log/warn/fail (by
309 the current position in the file handle is valid, and log/warn/fail (by
304 raising).
310 raising).
305
311
306
312
307 Internal details
313 Internal details
308 ----------------
314 ----------------
309
315
310 A large part of the revlog logic deals with revisions' "index entries", tuple
316 A large part of the revlog logic deals with revisions' "index entries", tuple
311 objects that contains the same "items" whatever the revlog version.
317 objects that contains the same "items" whatever the revlog version.
312 Different versions will have different ways of storing these items (sometimes
318 Different versions will have different ways of storing these items (sometimes
313 not having them at all), but the tuple will always be the same. New fields
319 not having them at all), but the tuple will always be the same. New fields
314 are usually added at the end to avoid breaking existing code that relies
320 are usually added at the end to avoid breaking existing code that relies
315 on the existing order. The field are defined as follows:
321 on the existing order. The field are defined as follows:
316
322
317 [0] offset:
323 [0] offset:
318 The byte index of the start of revision data chunk.
324 The byte index of the start of revision data chunk.
319 That value is shifted up by 16 bits. use "offset = field >> 16" to
325 That value is shifted up by 16 bits. use "offset = field >> 16" to
320 retrieve it.
326 retrieve it.
321
327
322 flags:
328 flags:
323 A flag field that carries special information or changes the behavior
329 A flag field that carries special information or changes the behavior
324 of the revision. (see `REVIDX_*` constants for details)
330 of the revision. (see `REVIDX_*` constants for details)
325 The flag field only occupies the first 16 bits of this field,
331 The flag field only occupies the first 16 bits of this field,
326 use "flags = field & 0xFFFF" to retrieve the value.
332 use "flags = field & 0xFFFF" to retrieve the value.
327
333
328 [1] compressed length:
334 [1] compressed length:
329 The size, in bytes, of the chunk on disk
335 The size, in bytes, of the chunk on disk
330
336
331 [2] uncompressed length:
337 [2] uncompressed length:
332 The size, in bytes, of the full revision once reconstructed.
338 The size, in bytes, of the full revision once reconstructed.
333
339
334 [3] base rev:
340 [3] base rev:
335 Either the base of the revision delta chain (without general
341 Either the base of the revision delta chain (without general
336 delta), or the base of the delta (stored in the data chunk)
342 delta), or the base of the delta (stored in the data chunk)
337 with general delta.
343 with general delta.
338
344
339 [4] link rev:
345 [4] link rev:
340 Changelog revision number of the changeset introducing this
346 Changelog revision number of the changeset introducing this
341 revision.
347 revision.
342
348
343 [5] parent 1 rev:
349 [5] parent 1 rev:
344 Revision number of the first parent
350 Revision number of the first parent
345
351
346 [6] parent 2 rev:
352 [6] parent 2 rev:
347 Revision number of the second parent
353 Revision number of the second parent
348
354
349 [7] node id:
355 [7] node id:
350 The node id of the current revision
356 The node id of the current revision
351
357
352 [8] sidedata offset:
358 [8] sidedata offset:
353 The byte index of the start of the revision's side-data chunk.
359 The byte index of the start of the revision's side-data chunk.
354
360
355 [9] sidedata chunk length:
361 [9] sidedata chunk length:
356 The size, in bytes, of the revision's side-data chunk.
362 The size, in bytes, of the revision's side-data chunk.
357
363
358 [10] data compression mode:
364 [10] data compression mode:
359 two bits that detail the way the data chunk is compressed on disk.
365 two bits that detail the way the data chunk is compressed on disk.
360 (see "COMP_MODE_*" constants for details). For revlog version 0 and
366 (see "COMP_MODE_*" constants for details). For revlog version 0 and
361 1 this will always be COMP_MODE_INLINE.
367 1 this will always be COMP_MODE_INLINE.
362
368
363 [11] side-data compression mode:
369 [11] side-data compression mode:
364 two bits that detail the way the sidedata chunk is compressed on disk.
370 two bits that detail the way the sidedata chunk is compressed on disk.
365 (see "COMP_MODE_*" constants for details)
371 (see "COMP_MODE_*" constants for details)
366 """
372 """
367
373
368 _flagserrorclass = error.RevlogError
374 _flagserrorclass = error.RevlogError
369
375
370 def __init__(
376 def __init__(
371 self,
377 self,
372 opener,
378 opener,
373 target,
379 target,
374 radix,
380 radix,
375 postfix=None, # only exist for `tmpcensored` now
381 postfix=None, # only exist for `tmpcensored` now
376 checkambig=False,
382 checkambig=False,
377 mmaplargeindex=False,
383 mmaplargeindex=False,
378 censorable=False,
384 censorable=False,
379 upperboundcomp=None,
385 upperboundcomp=None,
380 persistentnodemap=False,
386 persistentnodemap=False,
381 concurrencychecker=None,
387 concurrencychecker=None,
382 trypending=False,
388 trypending=False,
383 ):
389 ):
384 """
390 """
385 create a revlog object
391 create a revlog object
386
392
387 opener is a function that abstracts the file opening operation
393 opener is a function that abstracts the file opening operation
388 and can be used to implement COW semantics or the like.
394 and can be used to implement COW semantics or the like.
389
395
390 `target`: a (KIND, ID) tuple that identify the content stored in
396 `target`: a (KIND, ID) tuple that identify the content stored in
391 this revlog. It help the rest of the code to understand what the revlog
397 this revlog. It help the rest of the code to understand what the revlog
392 is about without having to resort to heuristic and index filename
398 is about without having to resort to heuristic and index filename
393 analysis. Note: that this must be reliably be set by normal code, but
399 analysis. Note: that this must be reliably be set by normal code, but
394 that test, debug, or performance measurement code might not set this to
400 that test, debug, or performance measurement code might not set this to
395 accurate value.
401 accurate value.
396 """
402 """
397 self.upperboundcomp = upperboundcomp
403 self.upperboundcomp = upperboundcomp
398
404
399 self.radix = radix
405 self.radix = radix
400
406
401 self._docket_file = None
407 self._docket_file = None
402 self._indexfile = None
408 self._indexfile = None
403 self._datafile = None
409 self._datafile = None
410 self._sidedatafile = None
404 self._nodemap_file = None
411 self._nodemap_file = None
405 self.postfix = postfix
412 self.postfix = postfix
406 self._trypending = trypending
413 self._trypending = trypending
407 self.opener = opener
414 self.opener = opener
408 if persistentnodemap:
415 if persistentnodemap:
409 self._nodemap_file = nodemaputil.get_nodemap_file(self)
416 self._nodemap_file = nodemaputil.get_nodemap_file(self)
410
417
411 assert target[0] in ALL_KINDS
418 assert target[0] in ALL_KINDS
412 assert len(target) == 2
419 assert len(target) == 2
413 self.target = target
420 self.target = target
414 # When True, indexfile is opened with checkambig=True at writing, to
421 # When True, indexfile is opened with checkambig=True at writing, to
415 # avoid file stat ambiguity.
422 # avoid file stat ambiguity.
416 self._checkambig = checkambig
423 self._checkambig = checkambig
417 self._mmaplargeindex = mmaplargeindex
424 self._mmaplargeindex = mmaplargeindex
418 self._censorable = censorable
425 self._censorable = censorable
419 # 3-tuple of (node, rev, text) for a raw revision.
426 # 3-tuple of (node, rev, text) for a raw revision.
420 self._revisioncache = None
427 self._revisioncache = None
421 # Maps rev to chain base rev.
428 # Maps rev to chain base rev.
422 self._chainbasecache = util.lrucachedict(100)
429 self._chainbasecache = util.lrucachedict(100)
423 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
430 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
424 self._chunkcache = (0, b'')
431 self._chunkcache = (0, b'')
425 # How much data to read and cache into the raw revlog data cache.
432 # How much data to read and cache into the raw revlog data cache.
426 self._chunkcachesize = 65536
433 self._chunkcachesize = 65536
427 self._maxchainlen = None
434 self._maxchainlen = None
428 self._deltabothparents = True
435 self._deltabothparents = True
429 self.index = None
436 self.index = None
430 self._docket = None
437 self._docket = None
431 self._nodemap_docket = None
438 self._nodemap_docket = None
432 # Mapping of partial identifiers to full nodes.
439 # Mapping of partial identifiers to full nodes.
433 self._pcache = {}
440 self._pcache = {}
434 # Mapping of revision integer to full node.
441 # Mapping of revision integer to full node.
435 self._compengine = b'zlib'
442 self._compengine = b'zlib'
436 self._compengineopts = {}
443 self._compengineopts = {}
437 self._maxdeltachainspan = -1
444 self._maxdeltachainspan = -1
438 self._withsparseread = False
445 self._withsparseread = False
439 self._sparserevlog = False
446 self._sparserevlog = False
440 self.hassidedata = False
447 self.hassidedata = False
441 self._srdensitythreshold = 0.50
448 self._srdensitythreshold = 0.50
442 self._srmingapsize = 262144
449 self._srmingapsize = 262144
443
450
444 # Make copy of flag processors so each revlog instance can support
451 # Make copy of flag processors so each revlog instance can support
445 # custom flags.
452 # custom flags.
446 self._flagprocessors = dict(flagutil.flagprocessors)
453 self._flagprocessors = dict(flagutil.flagprocessors)
447
454
448 # 2-tuple of file handles being used for active writing.
455 # 3-tuple of file handles being used for active writing.
449 self._writinghandles = None
456 self._writinghandles = None
450 # prevent nesting of addgroup
457 # prevent nesting of addgroup
451 self._adding_group = None
458 self._adding_group = None
452
459
453 self._loadindex()
460 self._loadindex()
454
461
455 self._concurrencychecker = concurrencychecker
462 self._concurrencychecker = concurrencychecker
456
463
457 def _init_opts(self):
464 def _init_opts(self):
458 """process options (from above/config) to setup associated default revlog mode
465 """process options (from above/config) to setup associated default revlog mode
459
466
460 These values might be affected when actually reading on disk information.
467 These values might be affected when actually reading on disk information.
461
468
462 The relevant values are returned for use in _loadindex().
469 The relevant values are returned for use in _loadindex().
463
470
464 * newversionflags:
471 * newversionflags:
465 version header to use if we need to create a new revlog
472 version header to use if we need to create a new revlog
466
473
467 * mmapindexthreshold:
474 * mmapindexthreshold:
468 minimal index size for start to use mmap
475 minimal index size for start to use mmap
469
476
470 * force_nodemap:
477 * force_nodemap:
471 force the usage of a "development" version of the nodemap code
478 force the usage of a "development" version of the nodemap code
472 """
479 """
473 mmapindexthreshold = None
480 mmapindexthreshold = None
474 opts = self.opener.options
481 opts = self.opener.options
475
482
476 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
483 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
477 new_header = CHANGELOGV2
484 new_header = CHANGELOGV2
478 elif b'revlogv2' in opts:
485 elif b'revlogv2' in opts:
479 new_header = REVLOGV2
486 new_header = REVLOGV2
480 elif b'revlogv1' in opts:
487 elif b'revlogv1' in opts:
481 new_header = REVLOGV1 | FLAG_INLINE_DATA
488 new_header = REVLOGV1 | FLAG_INLINE_DATA
482 if b'generaldelta' in opts:
489 if b'generaldelta' in opts:
483 new_header |= FLAG_GENERALDELTA
490 new_header |= FLAG_GENERALDELTA
484 elif b'revlogv0' in self.opener.options:
491 elif b'revlogv0' in self.opener.options:
485 new_header = REVLOGV0
492 new_header = REVLOGV0
486 else:
493 else:
487 new_header = REVLOG_DEFAULT_VERSION
494 new_header = REVLOG_DEFAULT_VERSION
488
495
489 if b'chunkcachesize' in opts:
496 if b'chunkcachesize' in opts:
490 self._chunkcachesize = opts[b'chunkcachesize']
497 self._chunkcachesize = opts[b'chunkcachesize']
491 if b'maxchainlen' in opts:
498 if b'maxchainlen' in opts:
492 self._maxchainlen = opts[b'maxchainlen']
499 self._maxchainlen = opts[b'maxchainlen']
493 if b'deltabothparents' in opts:
500 if b'deltabothparents' in opts:
494 self._deltabothparents = opts[b'deltabothparents']
501 self._deltabothparents = opts[b'deltabothparents']
495 self._lazydelta = bool(opts.get(b'lazydelta', True))
502 self._lazydelta = bool(opts.get(b'lazydelta', True))
496 self._lazydeltabase = False
503 self._lazydeltabase = False
497 if self._lazydelta:
504 if self._lazydelta:
498 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
505 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
499 if b'compengine' in opts:
506 if b'compengine' in opts:
500 self._compengine = opts[b'compengine']
507 self._compengine = opts[b'compengine']
501 if b'zlib.level' in opts:
508 if b'zlib.level' in opts:
502 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
509 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
503 if b'zstd.level' in opts:
510 if b'zstd.level' in opts:
504 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
511 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
505 if b'maxdeltachainspan' in opts:
512 if b'maxdeltachainspan' in opts:
506 self._maxdeltachainspan = opts[b'maxdeltachainspan']
513 self._maxdeltachainspan = opts[b'maxdeltachainspan']
507 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
514 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
508 mmapindexthreshold = opts[b'mmapindexthreshold']
515 mmapindexthreshold = opts[b'mmapindexthreshold']
509 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
516 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
510 withsparseread = bool(opts.get(b'with-sparse-read', False))
517 withsparseread = bool(opts.get(b'with-sparse-read', False))
511 # sparse-revlog forces sparse-read
518 # sparse-revlog forces sparse-read
512 self._withsparseread = self._sparserevlog or withsparseread
519 self._withsparseread = self._sparserevlog or withsparseread
513 if b'sparse-read-density-threshold' in opts:
520 if b'sparse-read-density-threshold' in opts:
514 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
521 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
515 if b'sparse-read-min-gap-size' in opts:
522 if b'sparse-read-min-gap-size' in opts:
516 self._srmingapsize = opts[b'sparse-read-min-gap-size']
523 self._srmingapsize = opts[b'sparse-read-min-gap-size']
517 if opts.get(b'enableellipsis'):
524 if opts.get(b'enableellipsis'):
518 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
525 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
519
526
520 # revlog v0 doesn't have flag processors
527 # revlog v0 doesn't have flag processors
521 for flag, processor in pycompat.iteritems(
528 for flag, processor in pycompat.iteritems(
522 opts.get(b'flagprocessors', {})
529 opts.get(b'flagprocessors', {})
523 ):
530 ):
524 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
531 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
525
532
526 if self._chunkcachesize <= 0:
533 if self._chunkcachesize <= 0:
527 raise error.RevlogError(
534 raise error.RevlogError(
528 _(b'revlog chunk cache size %r is not greater than 0')
535 _(b'revlog chunk cache size %r is not greater than 0')
529 % self._chunkcachesize
536 % self._chunkcachesize
530 )
537 )
531 elif self._chunkcachesize & (self._chunkcachesize - 1):
538 elif self._chunkcachesize & (self._chunkcachesize - 1):
532 raise error.RevlogError(
539 raise error.RevlogError(
533 _(b'revlog chunk cache size %r is not a power of 2')
540 _(b'revlog chunk cache size %r is not a power of 2')
534 % self._chunkcachesize
541 % self._chunkcachesize
535 )
542 )
536 force_nodemap = opts.get(b'devel-force-nodemap', False)
543 force_nodemap = opts.get(b'devel-force-nodemap', False)
537 return new_header, mmapindexthreshold, force_nodemap
544 return new_header, mmapindexthreshold, force_nodemap
538
545
539 def _get_data(self, filepath, mmap_threshold, size=None):
546 def _get_data(self, filepath, mmap_threshold, size=None):
540 """return a file content with or without mmap
547 """return a file content with or without mmap
541
548
542 If the file is missing return the empty string"""
549 If the file is missing return the empty string"""
543 try:
550 try:
544 with self.opener(filepath) as fp:
551 with self.opener(filepath) as fp:
545 if mmap_threshold is not None:
552 if mmap_threshold is not None:
546 file_size = self.opener.fstat(fp).st_size
553 file_size = self.opener.fstat(fp).st_size
547 if file_size >= mmap_threshold:
554 if file_size >= mmap_threshold:
548 if size is not None:
555 if size is not None:
549 # avoid potentiel mmap crash
556 # avoid potentiel mmap crash
550 size = min(file_size, size)
557 size = min(file_size, size)
551 # TODO: should .close() to release resources without
558 # TODO: should .close() to release resources without
552 # relying on Python GC
559 # relying on Python GC
553 if size is None:
560 if size is None:
554 return util.buffer(util.mmapread(fp))
561 return util.buffer(util.mmapread(fp))
555 else:
562 else:
556 return util.buffer(util.mmapread(fp, size))
563 return util.buffer(util.mmapread(fp, size))
557 if size is None:
564 if size is None:
558 return fp.read()
565 return fp.read()
559 else:
566 else:
560 return fp.read(size)
567 return fp.read(size)
561 except IOError as inst:
568 except IOError as inst:
562 if inst.errno != errno.ENOENT:
569 if inst.errno != errno.ENOENT:
563 raise
570 raise
564 return b''
571 return b''
565
572
566 def _loadindex(self):
573 def _loadindex(self):
567
574
568 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
575 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
569
576
570 if self.postfix is not None:
577 if self.postfix is not None:
571 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
578 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
572 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
579 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
573 entry_point = b'%s.i.a' % self.radix
580 entry_point = b'%s.i.a' % self.radix
574 else:
581 else:
575 entry_point = b'%s.i' % self.radix
582 entry_point = b'%s.i' % self.radix
576
583
577 entry_data = b''
584 entry_data = b''
578 self._initempty = True
585 self._initempty = True
579 entry_data = self._get_data(entry_point, mmapindexthreshold)
586 entry_data = self._get_data(entry_point, mmapindexthreshold)
580 if len(entry_data) > 0:
587 if len(entry_data) > 0:
581 header = INDEX_HEADER.unpack(entry_data[:4])[0]
588 header = INDEX_HEADER.unpack(entry_data[:4])[0]
582 self._initempty = False
589 self._initempty = False
583 else:
590 else:
584 header = new_header
591 header = new_header
585
592
586 self._format_flags = header & ~0xFFFF
593 self._format_flags = header & ~0xFFFF
587 self._format_version = header & 0xFFFF
594 self._format_version = header & 0xFFFF
588
595
589 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
596 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
590 if supported_flags is None:
597 if supported_flags is None:
591 msg = _(b'unknown version (%d) in revlog %s')
598 msg = _(b'unknown version (%d) in revlog %s')
592 msg %= (self._format_version, self.display_id)
599 msg %= (self._format_version, self.display_id)
593 raise error.RevlogError(msg)
600 raise error.RevlogError(msg)
594 elif self._format_flags & ~supported_flags:
601 elif self._format_flags & ~supported_flags:
595 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
602 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
596 display_flag = self._format_flags >> 16
603 display_flag = self._format_flags >> 16
597 msg %= (display_flag, self._format_version, self.display_id)
604 msg %= (display_flag, self._format_version, self.display_id)
598 raise error.RevlogError(msg)
605 raise error.RevlogError(msg)
599
606
600 features = FEATURES_BY_VERSION[self._format_version]
607 features = FEATURES_BY_VERSION[self._format_version]
601 self._inline = features[b'inline'](self._format_flags)
608 self._inline = features[b'inline'](self._format_flags)
602 self._generaldelta = features[b'generaldelta'](self._format_flags)
609 self._generaldelta = features[b'generaldelta'](self._format_flags)
603 self.hassidedata = features[b'sidedata']
610 self.hassidedata = features[b'sidedata']
604
611
605 if not features[b'docket']:
612 if not features[b'docket']:
606 self._indexfile = entry_point
613 self._indexfile = entry_point
607 index_data = entry_data
614 index_data = entry_data
608 else:
615 else:
609 self._docket_file = entry_point
616 self._docket_file = entry_point
610 if self._initempty:
617 if self._initempty:
611 self._docket = docketutil.default_docket(self, header)
618 self._docket = docketutil.default_docket(self, header)
612 else:
619 else:
613 self._docket = docketutil.parse_docket(
620 self._docket = docketutil.parse_docket(
614 self, entry_data, use_pending=self._trypending
621 self, entry_data, use_pending=self._trypending
615 )
622 )
616 self._indexfile = self._docket.index_filepath()
623 self._indexfile = self._docket.index_filepath()
617 index_data = b''
624 index_data = b''
618 index_size = self._docket.index_end
625 index_size = self._docket.index_end
619 if index_size > 0:
626 if index_size > 0:
620 index_data = self._get_data(
627 index_data = self._get_data(
621 self._indexfile, mmapindexthreshold, size=index_size
628 self._indexfile, mmapindexthreshold, size=index_size
622 )
629 )
623 if len(index_data) < index_size:
630 if len(index_data) < index_size:
624 msg = _(b'too few index data for %s: got %d, expected %d')
631 msg = _(b'too few index data for %s: got %d, expected %d')
625 msg %= (self.display_id, len(index_data), index_size)
632 msg %= (self.display_id, len(index_data), index_size)
626 raise error.RevlogError(msg)
633 raise error.RevlogError(msg)
627
634
628 self._inline = False
635 self._inline = False
629 # generaldelta implied by version 2 revlogs.
636 # generaldelta implied by version 2 revlogs.
630 self._generaldelta = True
637 self._generaldelta = True
631 # the logic for persistent nodemap will be dealt with within the
638 # the logic for persistent nodemap will be dealt with within the
632 # main docket, so disable it for now.
639 # main docket, so disable it for now.
633 self._nodemap_file = None
640 self._nodemap_file = None
634
641
635 if self._docket is not None:
642 if self._docket is not None:
636 self._datafile = self._docket.data_filepath()
643 self._datafile = self._docket.data_filepath()
644 self._sidedatafile = self._docket.sidedata_filepath()
637 elif self.postfix is None:
645 elif self.postfix is None:
638 self._datafile = b'%s.d' % self.radix
646 self._datafile = b'%s.d' % self.radix
639 else:
647 else:
640 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
648 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
641
649
642 self.nodeconstants = sha1nodeconstants
650 self.nodeconstants = sha1nodeconstants
643 self.nullid = self.nodeconstants.nullid
651 self.nullid = self.nodeconstants.nullid
644
652
645 # sparse-revlog can't be on without general-delta (issue6056)
653 # sparse-revlog can't be on without general-delta (issue6056)
646 if not self._generaldelta:
654 if not self._generaldelta:
647 self._sparserevlog = False
655 self._sparserevlog = False
648
656
649 self._storedeltachains = True
657 self._storedeltachains = True
650
658
651 devel_nodemap = (
659 devel_nodemap = (
652 self._nodemap_file
660 self._nodemap_file
653 and force_nodemap
661 and force_nodemap
654 and parse_index_v1_nodemap is not None
662 and parse_index_v1_nodemap is not None
655 )
663 )
656
664
657 use_rust_index = False
665 use_rust_index = False
658 if rustrevlog is not None:
666 if rustrevlog is not None:
659 if self._nodemap_file is not None:
667 if self._nodemap_file is not None:
660 use_rust_index = True
668 use_rust_index = True
661 else:
669 else:
662 use_rust_index = self.opener.options.get(b'rust.index')
670 use_rust_index = self.opener.options.get(b'rust.index')
663
671
664 self._parse_index = parse_index_v1
672 self._parse_index = parse_index_v1
665 if self._format_version == REVLOGV0:
673 if self._format_version == REVLOGV0:
666 self._parse_index = revlogv0.parse_index_v0
674 self._parse_index = revlogv0.parse_index_v0
667 elif self._format_version == REVLOGV2:
675 elif self._format_version == REVLOGV2:
668 self._parse_index = parse_index_v2
676 self._parse_index = parse_index_v2
669 elif self._format_version == CHANGELOGV2:
677 elif self._format_version == CHANGELOGV2:
670 self._parse_index = parse_index_cl_v2
678 self._parse_index = parse_index_cl_v2
671 elif devel_nodemap:
679 elif devel_nodemap:
672 self._parse_index = parse_index_v1_nodemap
680 self._parse_index = parse_index_v1_nodemap
673 elif use_rust_index:
681 elif use_rust_index:
674 self._parse_index = parse_index_v1_mixed
682 self._parse_index = parse_index_v1_mixed
675 try:
683 try:
676 d = self._parse_index(index_data, self._inline)
684 d = self._parse_index(index_data, self._inline)
677 index, _chunkcache = d
685 index, _chunkcache = d
678 use_nodemap = (
686 use_nodemap = (
679 not self._inline
687 not self._inline
680 and self._nodemap_file is not None
688 and self._nodemap_file is not None
681 and util.safehasattr(index, 'update_nodemap_data')
689 and util.safehasattr(index, 'update_nodemap_data')
682 )
690 )
683 if use_nodemap:
691 if use_nodemap:
684 nodemap_data = nodemaputil.persisted_data(self)
692 nodemap_data = nodemaputil.persisted_data(self)
685 if nodemap_data is not None:
693 if nodemap_data is not None:
686 docket = nodemap_data[0]
694 docket = nodemap_data[0]
687 if (
695 if (
688 len(d[0]) > docket.tip_rev
696 len(d[0]) > docket.tip_rev
689 and d[0][docket.tip_rev][7] == docket.tip_node
697 and d[0][docket.tip_rev][7] == docket.tip_node
690 ):
698 ):
691 # no changelog tampering
699 # no changelog tampering
692 self._nodemap_docket = docket
700 self._nodemap_docket = docket
693 index.update_nodemap_data(*nodemap_data)
701 index.update_nodemap_data(*nodemap_data)
694 except (ValueError, IndexError):
702 except (ValueError, IndexError):
695 raise error.RevlogError(
703 raise error.RevlogError(
696 _(b"index %s is corrupted") % self.display_id
704 _(b"index %s is corrupted") % self.display_id
697 )
705 )
698 self.index, self._chunkcache = d
706 self.index, self._chunkcache = d
699 if not self._chunkcache:
707 if not self._chunkcache:
700 self._chunkclear()
708 self._chunkclear()
701 # revnum -> (chain-length, sum-delta-length)
709 # revnum -> (chain-length, sum-delta-length)
702 self._chaininfocache = util.lrucachedict(500)
710 self._chaininfocache = util.lrucachedict(500)
703 # revlog header -> revlog compressor
711 # revlog header -> revlog compressor
704 self._decompressors = {}
712 self._decompressors = {}
705
713
706 @util.propertycache
714 @util.propertycache
707 def revlog_kind(self):
715 def revlog_kind(self):
708 return self.target[0]
716 return self.target[0]
709
717
710 @util.propertycache
718 @util.propertycache
711 def display_id(self):
719 def display_id(self):
712 """The public facing "ID" of the revlog that we use in message"""
720 """The public facing "ID" of the revlog that we use in message"""
713 # Maybe we should build a user facing representation of
721 # Maybe we should build a user facing representation of
714 # revlog.target instead of using `self.radix`
722 # revlog.target instead of using `self.radix`
715 return self.radix
723 return self.radix
716
724
717 def _get_decompressor(self, t):
725 def _get_decompressor(self, t):
718 try:
726 try:
719 compressor = self._decompressors[t]
727 compressor = self._decompressors[t]
720 except KeyError:
728 except KeyError:
721 try:
729 try:
722 engine = util.compengines.forrevlogheader(t)
730 engine = util.compengines.forrevlogheader(t)
723 compressor = engine.revlogcompressor(self._compengineopts)
731 compressor = engine.revlogcompressor(self._compengineopts)
724 self._decompressors[t] = compressor
732 self._decompressors[t] = compressor
725 except KeyError:
733 except KeyError:
726 raise error.RevlogError(
734 raise error.RevlogError(
727 _(b'unknown compression type %s') % binascii.hexlify(t)
735 _(b'unknown compression type %s') % binascii.hexlify(t)
728 )
736 )
729 return compressor
737 return compressor
730
738
731 @util.propertycache
739 @util.propertycache
732 def _compressor(self):
740 def _compressor(self):
733 engine = util.compengines[self._compengine]
741 engine = util.compengines[self._compengine]
734 return engine.revlogcompressor(self._compengineopts)
742 return engine.revlogcompressor(self._compengineopts)
735
743
736 @util.propertycache
744 @util.propertycache
737 def _decompressor(self):
745 def _decompressor(self):
738 """the default decompressor"""
746 """the default decompressor"""
739 if self._docket is None:
747 if self._docket is None:
740 return None
748 return None
741 t = self._docket.default_compression_header
749 t = self._docket.default_compression_header
742 c = self._get_decompressor(t)
750 c = self._get_decompressor(t)
743 return c.decompress
751 return c.decompress
744
752
745 def _indexfp(self):
753 def _indexfp(self):
746 """file object for the revlog's index file"""
754 """file object for the revlog's index file"""
747 return self.opener(self._indexfile, mode=b"r")
755 return self.opener(self._indexfile, mode=b"r")
748
756
749 def __index_write_fp(self):
757 def __index_write_fp(self):
750 # You should not use this directly and use `_writing` instead
758 # You should not use this directly and use `_writing` instead
751 try:
759 try:
752 f = self.opener(
760 f = self.opener(
753 self._indexfile, mode=b"r+", checkambig=self._checkambig
761 self._indexfile, mode=b"r+", checkambig=self._checkambig
754 )
762 )
755 if self._docket is None:
763 if self._docket is None:
756 f.seek(0, os.SEEK_END)
764 f.seek(0, os.SEEK_END)
757 else:
765 else:
758 f.seek(self._docket.index_end, os.SEEK_SET)
766 f.seek(self._docket.index_end, os.SEEK_SET)
759 return f
767 return f
760 except IOError as inst:
768 except IOError as inst:
761 if inst.errno != errno.ENOENT:
769 if inst.errno != errno.ENOENT:
762 raise
770 raise
763 return self.opener(
771 return self.opener(
764 self._indexfile, mode=b"w+", checkambig=self._checkambig
772 self._indexfile, mode=b"w+", checkambig=self._checkambig
765 )
773 )
766
774
767 def __index_new_fp(self):
775 def __index_new_fp(self):
768 # You should not use this unless you are upgrading from inline revlog
776 # You should not use this unless you are upgrading from inline revlog
769 return self.opener(
777 return self.opener(
770 self._indexfile,
778 self._indexfile,
771 mode=b"w",
779 mode=b"w",
772 checkambig=self._checkambig,
780 checkambig=self._checkambig,
773 atomictemp=True,
781 atomictemp=True,
774 )
782 )
775
783
776 def _datafp(self, mode=b'r'):
784 def _datafp(self, mode=b'r'):
777 """file object for the revlog's data file"""
785 """file object for the revlog's data file"""
778 return self.opener(self._datafile, mode=mode)
786 return self.opener(self._datafile, mode=mode)
779
787
780 @contextlib.contextmanager
788 @contextlib.contextmanager
781 def _datareadfp(self, existingfp=None):
789 def _datareadfp(self, existingfp=None):
782 """file object suitable to read data"""
790 """file object suitable to read data"""
783 # Use explicit file handle, if given.
791 # Use explicit file handle, if given.
784 if existingfp is not None:
792 if existingfp is not None:
785 yield existingfp
793 yield existingfp
786
794
787 # Use a file handle being actively used for writes, if available.
795 # Use a file handle being actively used for writes, if available.
788 # There is some danger to doing this because reads will seek the
796 # There is some danger to doing this because reads will seek the
789 # file. However, _writeentry() performs a SEEK_END before all writes,
797 # file. However, _writeentry() performs a SEEK_END before all writes,
790 # so we should be safe.
798 # so we should be safe.
791 elif self._writinghandles:
799 elif self._writinghandles:
792 if self._inline:
800 if self._inline:
793 yield self._writinghandles[0]
801 yield self._writinghandles[0]
794 else:
802 else:
795 yield self._writinghandles[1]
803 yield self._writinghandles[1]
796
804
797 # Otherwise open a new file handle.
805 # Otherwise open a new file handle.
798 else:
806 else:
799 if self._inline:
807 if self._inline:
800 func = self._indexfp
808 func = self._indexfp
801 else:
809 else:
802 func = self._datafp
810 func = self._datafp
803 with func() as fp:
811 with func() as fp:
804 yield fp
812 yield fp
805
813
814 @contextlib.contextmanager
806 def _sidedatareadfp(self):
815 def _sidedatareadfp(self):
807 """file object suitable to read sidedata"""
816 """file object suitable to read sidedata"""
808 return self._datareadfp()
817 if self._writinghandles:
818 yield self._writinghandles[2]
819 else:
820 with self.opener(self._sidedatafile) as fp:
821 yield fp
809
822
810 def tiprev(self):
823 def tiprev(self):
811 return len(self.index) - 1
824 return len(self.index) - 1
812
825
813 def tip(self):
826 def tip(self):
814 return self.node(self.tiprev())
827 return self.node(self.tiprev())
815
828
816 def __contains__(self, rev):
829 def __contains__(self, rev):
817 return 0 <= rev < len(self)
830 return 0 <= rev < len(self)
818
831
819 def __len__(self):
832 def __len__(self):
820 return len(self.index)
833 return len(self.index)
821
834
822 def __iter__(self):
835 def __iter__(self):
823 return iter(pycompat.xrange(len(self)))
836 return iter(pycompat.xrange(len(self)))
824
837
825 def revs(self, start=0, stop=None):
838 def revs(self, start=0, stop=None):
826 """iterate over all rev in this revlog (from start to stop)"""
839 """iterate over all rev in this revlog (from start to stop)"""
827 return storageutil.iterrevs(len(self), start=start, stop=stop)
840 return storageutil.iterrevs(len(self), start=start, stop=stop)
828
841
829 @property
842 @property
830 def nodemap(self):
843 def nodemap(self):
831 msg = (
844 msg = (
832 b"revlog.nodemap is deprecated, "
845 b"revlog.nodemap is deprecated, "
833 b"use revlog.index.[has_node|rev|get_rev]"
846 b"use revlog.index.[has_node|rev|get_rev]"
834 )
847 )
835 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
848 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
836 return self.index.nodemap
849 return self.index.nodemap
837
850
838 @property
851 @property
839 def _nodecache(self):
852 def _nodecache(self):
840 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
853 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
841 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
854 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
842 return self.index.nodemap
855 return self.index.nodemap
843
856
844 def hasnode(self, node):
857 def hasnode(self, node):
845 try:
858 try:
846 self.rev(node)
859 self.rev(node)
847 return True
860 return True
848 except KeyError:
861 except KeyError:
849 return False
862 return False
850
863
851 def candelta(self, baserev, rev):
864 def candelta(self, baserev, rev):
852 """whether two revisions (baserev, rev) can be delta-ed or not"""
865 """whether two revisions (baserev, rev) can be delta-ed or not"""
853 # Disable delta if either rev requires a content-changing flag
866 # Disable delta if either rev requires a content-changing flag
854 # processor (ex. LFS). This is because such flag processor can alter
867 # processor (ex. LFS). This is because such flag processor can alter
855 # the rawtext content that the delta will be based on, and two clients
868 # the rawtext content that the delta will be based on, and two clients
856 # could have a same revlog node with different flags (i.e. different
869 # could have a same revlog node with different flags (i.e. different
857 # rawtext contents) and the delta could be incompatible.
870 # rawtext contents) and the delta could be incompatible.
858 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
871 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
859 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
872 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
860 ):
873 ):
861 return False
874 return False
862 return True
875 return True
863
876
864 def update_caches(self, transaction):
877 def update_caches(self, transaction):
865 if self._nodemap_file is not None:
878 if self._nodemap_file is not None:
866 if transaction is None:
879 if transaction is None:
867 nodemaputil.update_persistent_nodemap(self)
880 nodemaputil.update_persistent_nodemap(self)
868 else:
881 else:
869 nodemaputil.setup_persistent_nodemap(transaction, self)
882 nodemaputil.setup_persistent_nodemap(transaction, self)
870
883
871 def clearcaches(self):
884 def clearcaches(self):
872 self._revisioncache = None
885 self._revisioncache = None
873 self._chainbasecache.clear()
886 self._chainbasecache.clear()
874 self._chunkcache = (0, b'')
887 self._chunkcache = (0, b'')
875 self._pcache = {}
888 self._pcache = {}
876 self._nodemap_docket = None
889 self._nodemap_docket = None
877 self.index.clearcaches()
890 self.index.clearcaches()
878 # The python code is the one responsible for validating the docket, we
891 # The python code is the one responsible for validating the docket, we
879 # end up having to refresh it here.
892 # end up having to refresh it here.
880 use_nodemap = (
893 use_nodemap = (
881 not self._inline
894 not self._inline
882 and self._nodemap_file is not None
895 and self._nodemap_file is not None
883 and util.safehasattr(self.index, 'update_nodemap_data')
896 and util.safehasattr(self.index, 'update_nodemap_data')
884 )
897 )
885 if use_nodemap:
898 if use_nodemap:
886 nodemap_data = nodemaputil.persisted_data(self)
899 nodemap_data = nodemaputil.persisted_data(self)
887 if nodemap_data is not None:
900 if nodemap_data is not None:
888 self._nodemap_docket = nodemap_data[0]
901 self._nodemap_docket = nodemap_data[0]
889 self.index.update_nodemap_data(*nodemap_data)
902 self.index.update_nodemap_data(*nodemap_data)
890
903
891 def rev(self, node):
904 def rev(self, node):
892 try:
905 try:
893 return self.index.rev(node)
906 return self.index.rev(node)
894 except TypeError:
907 except TypeError:
895 raise
908 raise
896 except error.RevlogError:
909 except error.RevlogError:
897 # parsers.c radix tree lookup failed
910 # parsers.c radix tree lookup failed
898 if (
911 if (
899 node == self.nodeconstants.wdirid
912 node == self.nodeconstants.wdirid
900 or node in self.nodeconstants.wdirfilenodeids
913 or node in self.nodeconstants.wdirfilenodeids
901 ):
914 ):
902 raise error.WdirUnsupported
915 raise error.WdirUnsupported
903 raise error.LookupError(node, self.display_id, _(b'no node'))
916 raise error.LookupError(node, self.display_id, _(b'no node'))
904
917
905 # Accessors for index entries.
918 # Accessors for index entries.
906
919
907 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
920 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
908 # are flags.
921 # are flags.
909 def start(self, rev):
922 def start(self, rev):
910 return int(self.index[rev][0] >> 16)
923 return int(self.index[rev][0] >> 16)
911
924
925 def sidedata_cut_off(self, rev):
926 sd_cut_off = self.index[rev][8]
927 if sd_cut_off != 0:
928 return sd_cut_off
929 # This is some annoying dance, because entries without sidedata
930 # currently use 0 as their ofsset. (instead of previous-offset +
931 # previous-size)
932 #
933 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
934 # In the meantime, we need this.
935 while 0 <= rev:
936 e = self.index[rev]
937 if e[9] != 0:
938 return e[8] + e[9]
939 rev -= 1
940 return 0
941
912 def flags(self, rev):
942 def flags(self, rev):
913 return self.index[rev][0] & 0xFFFF
943 return self.index[rev][0] & 0xFFFF
914
944
915 def length(self, rev):
945 def length(self, rev):
916 return self.index[rev][1]
946 return self.index[rev][1]
917
947
918 def sidedata_length(self, rev):
948 def sidedata_length(self, rev):
919 if not self.hassidedata:
949 if not self.hassidedata:
920 return 0
950 return 0
921 return self.index[rev][9]
951 return self.index[rev][9]
922
952
923 def rawsize(self, rev):
953 def rawsize(self, rev):
924 """return the length of the uncompressed text for a given revision"""
954 """return the length of the uncompressed text for a given revision"""
925 l = self.index[rev][2]
955 l = self.index[rev][2]
926 if l >= 0:
956 if l >= 0:
927 return l
957 return l
928
958
929 t = self.rawdata(rev)
959 t = self.rawdata(rev)
930 return len(t)
960 return len(t)
931
961
932 def size(self, rev):
962 def size(self, rev):
933 """length of non-raw text (processed by a "read" flag processor)"""
963 """length of non-raw text (processed by a "read" flag processor)"""
934 # fast path: if no "read" flag processor could change the content,
964 # fast path: if no "read" flag processor could change the content,
935 # size is rawsize. note: ELLIPSIS is known to not change the content.
965 # size is rawsize. note: ELLIPSIS is known to not change the content.
936 flags = self.flags(rev)
966 flags = self.flags(rev)
937 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
967 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
938 return self.rawsize(rev)
968 return self.rawsize(rev)
939
969
940 return len(self.revision(rev, raw=False))
970 return len(self.revision(rev, raw=False))
941
971
942 def chainbase(self, rev):
972 def chainbase(self, rev):
943 base = self._chainbasecache.get(rev)
973 base = self._chainbasecache.get(rev)
944 if base is not None:
974 if base is not None:
945 return base
975 return base
946
976
947 index = self.index
977 index = self.index
948 iterrev = rev
978 iterrev = rev
949 base = index[iterrev][3]
979 base = index[iterrev][3]
950 while base != iterrev:
980 while base != iterrev:
951 iterrev = base
981 iterrev = base
952 base = index[iterrev][3]
982 base = index[iterrev][3]
953
983
954 self._chainbasecache[rev] = base
984 self._chainbasecache[rev] = base
955 return base
985 return base
956
986
957 def linkrev(self, rev):
987 def linkrev(self, rev):
958 return self.index[rev][4]
988 return self.index[rev][4]
959
989
960 def parentrevs(self, rev):
990 def parentrevs(self, rev):
961 try:
991 try:
962 entry = self.index[rev]
992 entry = self.index[rev]
963 except IndexError:
993 except IndexError:
964 if rev == wdirrev:
994 if rev == wdirrev:
965 raise error.WdirUnsupported
995 raise error.WdirUnsupported
966 raise
996 raise
967 if entry[5] == nullrev:
997 if entry[5] == nullrev:
968 return entry[6], entry[5]
998 return entry[6], entry[5]
969 else:
999 else:
970 return entry[5], entry[6]
1000 return entry[5], entry[6]
971
1001
972 # fast parentrevs(rev) where rev isn't filtered
1002 # fast parentrevs(rev) where rev isn't filtered
973 _uncheckedparentrevs = parentrevs
1003 _uncheckedparentrevs = parentrevs
974
1004
975 def node(self, rev):
1005 def node(self, rev):
976 try:
1006 try:
977 return self.index[rev][7]
1007 return self.index[rev][7]
978 except IndexError:
1008 except IndexError:
979 if rev == wdirrev:
1009 if rev == wdirrev:
980 raise error.WdirUnsupported
1010 raise error.WdirUnsupported
981 raise
1011 raise
982
1012
983 # Derived from index values.
1013 # Derived from index values.
984
1014
985 def end(self, rev):
1015 def end(self, rev):
986 return self.start(rev) + self.length(rev)
1016 return self.start(rev) + self.length(rev)
987
1017
988 def parents(self, node):
1018 def parents(self, node):
989 i = self.index
1019 i = self.index
990 d = i[self.rev(node)]
1020 d = i[self.rev(node)]
991 # inline node() to avoid function call overhead
1021 # inline node() to avoid function call overhead
992 if d[5] == self.nullid:
1022 if d[5] == self.nullid:
993 return i[d[6]][7], i[d[5]][7]
1023 return i[d[6]][7], i[d[5]][7]
994 else:
1024 else:
995 return i[d[5]][7], i[d[6]][7]
1025 return i[d[5]][7], i[d[6]][7]
996
1026
997 def chainlen(self, rev):
1027 def chainlen(self, rev):
998 return self._chaininfo(rev)[0]
1028 return self._chaininfo(rev)[0]
999
1029
1000 def _chaininfo(self, rev):
1030 def _chaininfo(self, rev):
1001 chaininfocache = self._chaininfocache
1031 chaininfocache = self._chaininfocache
1002 if rev in chaininfocache:
1032 if rev in chaininfocache:
1003 return chaininfocache[rev]
1033 return chaininfocache[rev]
1004 index = self.index
1034 index = self.index
1005 generaldelta = self._generaldelta
1035 generaldelta = self._generaldelta
1006 iterrev = rev
1036 iterrev = rev
1007 e = index[iterrev]
1037 e = index[iterrev]
1008 clen = 0
1038 clen = 0
1009 compresseddeltalen = 0
1039 compresseddeltalen = 0
1010 while iterrev != e[3]:
1040 while iterrev != e[3]:
1011 clen += 1
1041 clen += 1
1012 compresseddeltalen += e[1]
1042 compresseddeltalen += e[1]
1013 if generaldelta:
1043 if generaldelta:
1014 iterrev = e[3]
1044 iterrev = e[3]
1015 else:
1045 else:
1016 iterrev -= 1
1046 iterrev -= 1
1017 if iterrev in chaininfocache:
1047 if iterrev in chaininfocache:
1018 t = chaininfocache[iterrev]
1048 t = chaininfocache[iterrev]
1019 clen += t[0]
1049 clen += t[0]
1020 compresseddeltalen += t[1]
1050 compresseddeltalen += t[1]
1021 break
1051 break
1022 e = index[iterrev]
1052 e = index[iterrev]
1023 else:
1053 else:
1024 # Add text length of base since decompressing that also takes
1054 # Add text length of base since decompressing that also takes
1025 # work. For cache hits the length is already included.
1055 # work. For cache hits the length is already included.
1026 compresseddeltalen += e[1]
1056 compresseddeltalen += e[1]
1027 r = (clen, compresseddeltalen)
1057 r = (clen, compresseddeltalen)
1028 chaininfocache[rev] = r
1058 chaininfocache[rev] = r
1029 return r
1059 return r
1030
1060
1031 def _deltachain(self, rev, stoprev=None):
1061 def _deltachain(self, rev, stoprev=None):
1032 """Obtain the delta chain for a revision.
1062 """Obtain the delta chain for a revision.
1033
1063
1034 ``stoprev`` specifies a revision to stop at. If not specified, we
1064 ``stoprev`` specifies a revision to stop at. If not specified, we
1035 stop at the base of the chain.
1065 stop at the base of the chain.
1036
1066
1037 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1067 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1038 revs in ascending order and ``stopped`` is a bool indicating whether
1068 revs in ascending order and ``stopped`` is a bool indicating whether
1039 ``stoprev`` was hit.
1069 ``stoprev`` was hit.
1040 """
1070 """
1041 # Try C implementation.
1071 # Try C implementation.
1042 try:
1072 try:
1043 return self.index.deltachain(rev, stoprev, self._generaldelta)
1073 return self.index.deltachain(rev, stoprev, self._generaldelta)
1044 except AttributeError:
1074 except AttributeError:
1045 pass
1075 pass
1046
1076
1047 chain = []
1077 chain = []
1048
1078
1049 # Alias to prevent attribute lookup in tight loop.
1079 # Alias to prevent attribute lookup in tight loop.
1050 index = self.index
1080 index = self.index
1051 generaldelta = self._generaldelta
1081 generaldelta = self._generaldelta
1052
1082
1053 iterrev = rev
1083 iterrev = rev
1054 e = index[iterrev]
1084 e = index[iterrev]
1055 while iterrev != e[3] and iterrev != stoprev:
1085 while iterrev != e[3] and iterrev != stoprev:
1056 chain.append(iterrev)
1086 chain.append(iterrev)
1057 if generaldelta:
1087 if generaldelta:
1058 iterrev = e[3]
1088 iterrev = e[3]
1059 else:
1089 else:
1060 iterrev -= 1
1090 iterrev -= 1
1061 e = index[iterrev]
1091 e = index[iterrev]
1062
1092
1063 if iterrev == stoprev:
1093 if iterrev == stoprev:
1064 stopped = True
1094 stopped = True
1065 else:
1095 else:
1066 chain.append(iterrev)
1096 chain.append(iterrev)
1067 stopped = False
1097 stopped = False
1068
1098
1069 chain.reverse()
1099 chain.reverse()
1070 return chain, stopped
1100 return chain, stopped
1071
1101
1072 def ancestors(self, revs, stoprev=0, inclusive=False):
1102 def ancestors(self, revs, stoprev=0, inclusive=False):
1073 """Generate the ancestors of 'revs' in reverse revision order.
1103 """Generate the ancestors of 'revs' in reverse revision order.
1074 Does not generate revs lower than stoprev.
1104 Does not generate revs lower than stoprev.
1075
1105
1076 See the documentation for ancestor.lazyancestors for more details."""
1106 See the documentation for ancestor.lazyancestors for more details."""
1077
1107
1078 # first, make sure start revisions aren't filtered
1108 # first, make sure start revisions aren't filtered
1079 revs = list(revs)
1109 revs = list(revs)
1080 checkrev = self.node
1110 checkrev = self.node
1081 for r in revs:
1111 for r in revs:
1082 checkrev(r)
1112 checkrev(r)
1083 # and we're sure ancestors aren't filtered as well
1113 # and we're sure ancestors aren't filtered as well
1084
1114
1085 if rustancestor is not None and self.index.rust_ext_compat:
1115 if rustancestor is not None and self.index.rust_ext_compat:
1086 lazyancestors = rustancestor.LazyAncestors
1116 lazyancestors = rustancestor.LazyAncestors
1087 arg = self.index
1117 arg = self.index
1088 else:
1118 else:
1089 lazyancestors = ancestor.lazyancestors
1119 lazyancestors = ancestor.lazyancestors
1090 arg = self._uncheckedparentrevs
1120 arg = self._uncheckedparentrevs
1091 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1121 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1092
1122
1093 def descendants(self, revs):
1123 def descendants(self, revs):
1094 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1124 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1095
1125
1096 def findcommonmissing(self, common=None, heads=None):
1126 def findcommonmissing(self, common=None, heads=None):
1097 """Return a tuple of the ancestors of common and the ancestors of heads
1127 """Return a tuple of the ancestors of common and the ancestors of heads
1098 that are not ancestors of common. In revset terminology, we return the
1128 that are not ancestors of common. In revset terminology, we return the
1099 tuple:
1129 tuple:
1100
1130
1101 ::common, (::heads) - (::common)
1131 ::common, (::heads) - (::common)
1102
1132
1103 The list is sorted by revision number, meaning it is
1133 The list is sorted by revision number, meaning it is
1104 topologically sorted.
1134 topologically sorted.
1105
1135
1106 'heads' and 'common' are both lists of node IDs. If heads is
1136 'heads' and 'common' are both lists of node IDs. If heads is
1107 not supplied, uses all of the revlog's heads. If common is not
1137 not supplied, uses all of the revlog's heads. If common is not
1108 supplied, uses nullid."""
1138 supplied, uses nullid."""
1109 if common is None:
1139 if common is None:
1110 common = [self.nullid]
1140 common = [self.nullid]
1111 if heads is None:
1141 if heads is None:
1112 heads = self.heads()
1142 heads = self.heads()
1113
1143
1114 common = [self.rev(n) for n in common]
1144 common = [self.rev(n) for n in common]
1115 heads = [self.rev(n) for n in heads]
1145 heads = [self.rev(n) for n in heads]
1116
1146
1117 # we want the ancestors, but inclusive
1147 # we want the ancestors, but inclusive
1118 class lazyset(object):
1148 class lazyset(object):
1119 def __init__(self, lazyvalues):
1149 def __init__(self, lazyvalues):
1120 self.addedvalues = set()
1150 self.addedvalues = set()
1121 self.lazyvalues = lazyvalues
1151 self.lazyvalues = lazyvalues
1122
1152
1123 def __contains__(self, value):
1153 def __contains__(self, value):
1124 return value in self.addedvalues or value in self.lazyvalues
1154 return value in self.addedvalues or value in self.lazyvalues
1125
1155
1126 def __iter__(self):
1156 def __iter__(self):
1127 added = self.addedvalues
1157 added = self.addedvalues
1128 for r in added:
1158 for r in added:
1129 yield r
1159 yield r
1130 for r in self.lazyvalues:
1160 for r in self.lazyvalues:
1131 if not r in added:
1161 if not r in added:
1132 yield r
1162 yield r
1133
1163
1134 def add(self, value):
1164 def add(self, value):
1135 self.addedvalues.add(value)
1165 self.addedvalues.add(value)
1136
1166
1137 def update(self, values):
1167 def update(self, values):
1138 self.addedvalues.update(values)
1168 self.addedvalues.update(values)
1139
1169
1140 has = lazyset(self.ancestors(common))
1170 has = lazyset(self.ancestors(common))
1141 has.add(nullrev)
1171 has.add(nullrev)
1142 has.update(common)
1172 has.update(common)
1143
1173
1144 # take all ancestors from heads that aren't in has
1174 # take all ancestors from heads that aren't in has
1145 missing = set()
1175 missing = set()
1146 visit = collections.deque(r for r in heads if r not in has)
1176 visit = collections.deque(r for r in heads if r not in has)
1147 while visit:
1177 while visit:
1148 r = visit.popleft()
1178 r = visit.popleft()
1149 if r in missing:
1179 if r in missing:
1150 continue
1180 continue
1151 else:
1181 else:
1152 missing.add(r)
1182 missing.add(r)
1153 for p in self.parentrevs(r):
1183 for p in self.parentrevs(r):
1154 if p not in has:
1184 if p not in has:
1155 visit.append(p)
1185 visit.append(p)
1156 missing = list(missing)
1186 missing = list(missing)
1157 missing.sort()
1187 missing.sort()
1158 return has, [self.node(miss) for miss in missing]
1188 return has, [self.node(miss) for miss in missing]
1159
1189
1160 def incrementalmissingrevs(self, common=None):
1190 def incrementalmissingrevs(self, common=None):
1161 """Return an object that can be used to incrementally compute the
1191 """Return an object that can be used to incrementally compute the
1162 revision numbers of the ancestors of arbitrary sets that are not
1192 revision numbers of the ancestors of arbitrary sets that are not
1163 ancestors of common. This is an ancestor.incrementalmissingancestors
1193 ancestors of common. This is an ancestor.incrementalmissingancestors
1164 object.
1194 object.
1165
1195
1166 'common' is a list of revision numbers. If common is not supplied, uses
1196 'common' is a list of revision numbers. If common is not supplied, uses
1167 nullrev.
1197 nullrev.
1168 """
1198 """
1169 if common is None:
1199 if common is None:
1170 common = [nullrev]
1200 common = [nullrev]
1171
1201
1172 if rustancestor is not None and self.index.rust_ext_compat:
1202 if rustancestor is not None and self.index.rust_ext_compat:
1173 return rustancestor.MissingAncestors(self.index, common)
1203 return rustancestor.MissingAncestors(self.index, common)
1174 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1204 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1175
1205
1176 def findmissingrevs(self, common=None, heads=None):
1206 def findmissingrevs(self, common=None, heads=None):
1177 """Return the revision numbers of the ancestors of heads that
1207 """Return the revision numbers of the ancestors of heads that
1178 are not ancestors of common.
1208 are not ancestors of common.
1179
1209
1180 More specifically, return a list of revision numbers corresponding to
1210 More specifically, return a list of revision numbers corresponding to
1181 nodes N such that every N satisfies the following constraints:
1211 nodes N such that every N satisfies the following constraints:
1182
1212
1183 1. N is an ancestor of some node in 'heads'
1213 1. N is an ancestor of some node in 'heads'
1184 2. N is not an ancestor of any node in 'common'
1214 2. N is not an ancestor of any node in 'common'
1185
1215
1186 The list is sorted by revision number, meaning it is
1216 The list is sorted by revision number, meaning it is
1187 topologically sorted.
1217 topologically sorted.
1188
1218
1189 'heads' and 'common' are both lists of revision numbers. If heads is
1219 'heads' and 'common' are both lists of revision numbers. If heads is
1190 not supplied, uses all of the revlog's heads. If common is not
1220 not supplied, uses all of the revlog's heads. If common is not
1191 supplied, uses nullid."""
1221 supplied, uses nullid."""
1192 if common is None:
1222 if common is None:
1193 common = [nullrev]
1223 common = [nullrev]
1194 if heads is None:
1224 if heads is None:
1195 heads = self.headrevs()
1225 heads = self.headrevs()
1196
1226
1197 inc = self.incrementalmissingrevs(common=common)
1227 inc = self.incrementalmissingrevs(common=common)
1198 return inc.missingancestors(heads)
1228 return inc.missingancestors(heads)
1199
1229
1200 def findmissing(self, common=None, heads=None):
1230 def findmissing(self, common=None, heads=None):
1201 """Return the ancestors of heads that are not ancestors of common.
1231 """Return the ancestors of heads that are not ancestors of common.
1202
1232
1203 More specifically, return a list of nodes N such that every N
1233 More specifically, return a list of nodes N such that every N
1204 satisfies the following constraints:
1234 satisfies the following constraints:
1205
1235
1206 1. N is an ancestor of some node in 'heads'
1236 1. N is an ancestor of some node in 'heads'
1207 2. N is not an ancestor of any node in 'common'
1237 2. N is not an ancestor of any node in 'common'
1208
1238
1209 The list is sorted by revision number, meaning it is
1239 The list is sorted by revision number, meaning it is
1210 topologically sorted.
1240 topologically sorted.
1211
1241
1212 'heads' and 'common' are both lists of node IDs. If heads is
1242 'heads' and 'common' are both lists of node IDs. If heads is
1213 not supplied, uses all of the revlog's heads. If common is not
1243 not supplied, uses all of the revlog's heads. If common is not
1214 supplied, uses nullid."""
1244 supplied, uses nullid."""
1215 if common is None:
1245 if common is None:
1216 common = [self.nullid]
1246 common = [self.nullid]
1217 if heads is None:
1247 if heads is None:
1218 heads = self.heads()
1248 heads = self.heads()
1219
1249
1220 common = [self.rev(n) for n in common]
1250 common = [self.rev(n) for n in common]
1221 heads = [self.rev(n) for n in heads]
1251 heads = [self.rev(n) for n in heads]
1222
1252
1223 inc = self.incrementalmissingrevs(common=common)
1253 inc = self.incrementalmissingrevs(common=common)
1224 return [self.node(r) for r in inc.missingancestors(heads)]
1254 return [self.node(r) for r in inc.missingancestors(heads)]
1225
1255
1226 def nodesbetween(self, roots=None, heads=None):
1256 def nodesbetween(self, roots=None, heads=None):
1227 """Return a topological path from 'roots' to 'heads'.
1257 """Return a topological path from 'roots' to 'heads'.
1228
1258
1229 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1259 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1230 topologically sorted list of all nodes N that satisfy both of
1260 topologically sorted list of all nodes N that satisfy both of
1231 these constraints:
1261 these constraints:
1232
1262
1233 1. N is a descendant of some node in 'roots'
1263 1. N is a descendant of some node in 'roots'
1234 2. N is an ancestor of some node in 'heads'
1264 2. N is an ancestor of some node in 'heads'
1235
1265
1236 Every node is considered to be both a descendant and an ancestor
1266 Every node is considered to be both a descendant and an ancestor
1237 of itself, so every reachable node in 'roots' and 'heads' will be
1267 of itself, so every reachable node in 'roots' and 'heads' will be
1238 included in 'nodes'.
1268 included in 'nodes'.
1239
1269
1240 'outroots' is the list of reachable nodes in 'roots', i.e., the
1270 'outroots' is the list of reachable nodes in 'roots', i.e., the
1241 subset of 'roots' that is returned in 'nodes'. Likewise,
1271 subset of 'roots' that is returned in 'nodes'. Likewise,
1242 'outheads' is the subset of 'heads' that is also in 'nodes'.
1272 'outheads' is the subset of 'heads' that is also in 'nodes'.
1243
1273
1244 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1274 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1245 unspecified, uses nullid as the only root. If 'heads' is
1275 unspecified, uses nullid as the only root. If 'heads' is
1246 unspecified, uses list of all of the revlog's heads."""
1276 unspecified, uses list of all of the revlog's heads."""
1247 nonodes = ([], [], [])
1277 nonodes = ([], [], [])
1248 if roots is not None:
1278 if roots is not None:
1249 roots = list(roots)
1279 roots = list(roots)
1250 if not roots:
1280 if not roots:
1251 return nonodes
1281 return nonodes
1252 lowestrev = min([self.rev(n) for n in roots])
1282 lowestrev = min([self.rev(n) for n in roots])
1253 else:
1283 else:
1254 roots = [self.nullid] # Everybody's a descendant of nullid
1284 roots = [self.nullid] # Everybody's a descendant of nullid
1255 lowestrev = nullrev
1285 lowestrev = nullrev
1256 if (lowestrev == nullrev) and (heads is None):
1286 if (lowestrev == nullrev) and (heads is None):
1257 # We want _all_ the nodes!
1287 # We want _all_ the nodes!
1258 return (
1288 return (
1259 [self.node(r) for r in self],
1289 [self.node(r) for r in self],
1260 [self.nullid],
1290 [self.nullid],
1261 list(self.heads()),
1291 list(self.heads()),
1262 )
1292 )
1263 if heads is None:
1293 if heads is None:
1264 # All nodes are ancestors, so the latest ancestor is the last
1294 # All nodes are ancestors, so the latest ancestor is the last
1265 # node.
1295 # node.
1266 highestrev = len(self) - 1
1296 highestrev = len(self) - 1
1267 # Set ancestors to None to signal that every node is an ancestor.
1297 # Set ancestors to None to signal that every node is an ancestor.
1268 ancestors = None
1298 ancestors = None
1269 # Set heads to an empty dictionary for later discovery of heads
1299 # Set heads to an empty dictionary for later discovery of heads
1270 heads = {}
1300 heads = {}
1271 else:
1301 else:
1272 heads = list(heads)
1302 heads = list(heads)
1273 if not heads:
1303 if not heads:
1274 return nonodes
1304 return nonodes
1275 ancestors = set()
1305 ancestors = set()
1276 # Turn heads into a dictionary so we can remove 'fake' heads.
1306 # Turn heads into a dictionary so we can remove 'fake' heads.
1277 # Also, later we will be using it to filter out the heads we can't
1307 # Also, later we will be using it to filter out the heads we can't
1278 # find from roots.
1308 # find from roots.
1279 heads = dict.fromkeys(heads, False)
1309 heads = dict.fromkeys(heads, False)
1280 # Start at the top and keep marking parents until we're done.
1310 # Start at the top and keep marking parents until we're done.
1281 nodestotag = set(heads)
1311 nodestotag = set(heads)
1282 # Remember where the top was so we can use it as a limit later.
1312 # Remember where the top was so we can use it as a limit later.
1283 highestrev = max([self.rev(n) for n in nodestotag])
1313 highestrev = max([self.rev(n) for n in nodestotag])
1284 while nodestotag:
1314 while nodestotag:
1285 # grab a node to tag
1315 # grab a node to tag
1286 n = nodestotag.pop()
1316 n = nodestotag.pop()
1287 # Never tag nullid
1317 # Never tag nullid
1288 if n == self.nullid:
1318 if n == self.nullid:
1289 continue
1319 continue
1290 # A node's revision number represents its place in a
1320 # A node's revision number represents its place in a
1291 # topologically sorted list of nodes.
1321 # topologically sorted list of nodes.
1292 r = self.rev(n)
1322 r = self.rev(n)
1293 if r >= lowestrev:
1323 if r >= lowestrev:
1294 if n not in ancestors:
1324 if n not in ancestors:
1295 # If we are possibly a descendant of one of the roots
1325 # If we are possibly a descendant of one of the roots
1296 # and we haven't already been marked as an ancestor
1326 # and we haven't already been marked as an ancestor
1297 ancestors.add(n) # Mark as ancestor
1327 ancestors.add(n) # Mark as ancestor
1298 # Add non-nullid parents to list of nodes to tag.
1328 # Add non-nullid parents to list of nodes to tag.
1299 nodestotag.update(
1329 nodestotag.update(
1300 [p for p in self.parents(n) if p != self.nullid]
1330 [p for p in self.parents(n) if p != self.nullid]
1301 )
1331 )
1302 elif n in heads: # We've seen it before, is it a fake head?
1332 elif n in heads: # We've seen it before, is it a fake head?
1303 # So it is, real heads should not be the ancestors of
1333 # So it is, real heads should not be the ancestors of
1304 # any other heads.
1334 # any other heads.
1305 heads.pop(n)
1335 heads.pop(n)
1306 if not ancestors:
1336 if not ancestors:
1307 return nonodes
1337 return nonodes
1308 # Now that we have our set of ancestors, we want to remove any
1338 # Now that we have our set of ancestors, we want to remove any
1309 # roots that are not ancestors.
1339 # roots that are not ancestors.
1310
1340
1311 # If one of the roots was nullid, everything is included anyway.
1341 # If one of the roots was nullid, everything is included anyway.
1312 if lowestrev > nullrev:
1342 if lowestrev > nullrev:
1313 # But, since we weren't, let's recompute the lowest rev to not
1343 # But, since we weren't, let's recompute the lowest rev to not
1314 # include roots that aren't ancestors.
1344 # include roots that aren't ancestors.
1315
1345
1316 # Filter out roots that aren't ancestors of heads
1346 # Filter out roots that aren't ancestors of heads
1317 roots = [root for root in roots if root in ancestors]
1347 roots = [root for root in roots if root in ancestors]
1318 # Recompute the lowest revision
1348 # Recompute the lowest revision
1319 if roots:
1349 if roots:
1320 lowestrev = min([self.rev(root) for root in roots])
1350 lowestrev = min([self.rev(root) for root in roots])
1321 else:
1351 else:
1322 # No more roots? Return empty list
1352 # No more roots? Return empty list
1323 return nonodes
1353 return nonodes
1324 else:
1354 else:
1325 # We are descending from nullid, and don't need to care about
1355 # We are descending from nullid, and don't need to care about
1326 # any other roots.
1356 # any other roots.
1327 lowestrev = nullrev
1357 lowestrev = nullrev
1328 roots = [self.nullid]
1358 roots = [self.nullid]
1329 # Transform our roots list into a set.
1359 # Transform our roots list into a set.
1330 descendants = set(roots)
1360 descendants = set(roots)
1331 # Also, keep the original roots so we can filter out roots that aren't
1361 # Also, keep the original roots so we can filter out roots that aren't
1332 # 'real' roots (i.e. are descended from other roots).
1362 # 'real' roots (i.e. are descended from other roots).
1333 roots = descendants.copy()
1363 roots = descendants.copy()
1334 # Our topologically sorted list of output nodes.
1364 # Our topologically sorted list of output nodes.
1335 orderedout = []
1365 orderedout = []
1336 # Don't start at nullid since we don't want nullid in our output list,
1366 # Don't start at nullid since we don't want nullid in our output list,
1337 # and if nullid shows up in descendants, empty parents will look like
1367 # and if nullid shows up in descendants, empty parents will look like
1338 # they're descendants.
1368 # they're descendants.
1339 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1369 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1340 n = self.node(r)
1370 n = self.node(r)
1341 isdescendant = False
1371 isdescendant = False
1342 if lowestrev == nullrev: # Everybody is a descendant of nullid
1372 if lowestrev == nullrev: # Everybody is a descendant of nullid
1343 isdescendant = True
1373 isdescendant = True
1344 elif n in descendants:
1374 elif n in descendants:
1345 # n is already a descendant
1375 # n is already a descendant
1346 isdescendant = True
1376 isdescendant = True
1347 # This check only needs to be done here because all the roots
1377 # This check only needs to be done here because all the roots
1348 # will start being marked is descendants before the loop.
1378 # will start being marked is descendants before the loop.
1349 if n in roots:
1379 if n in roots:
1350 # If n was a root, check if it's a 'real' root.
1380 # If n was a root, check if it's a 'real' root.
1351 p = tuple(self.parents(n))
1381 p = tuple(self.parents(n))
1352 # If any of its parents are descendants, it's not a root.
1382 # If any of its parents are descendants, it's not a root.
1353 if (p[0] in descendants) or (p[1] in descendants):
1383 if (p[0] in descendants) or (p[1] in descendants):
1354 roots.remove(n)
1384 roots.remove(n)
1355 else:
1385 else:
1356 p = tuple(self.parents(n))
1386 p = tuple(self.parents(n))
1357 # A node is a descendant if either of its parents are
1387 # A node is a descendant if either of its parents are
1358 # descendants. (We seeded the dependents list with the roots
1388 # descendants. (We seeded the dependents list with the roots
1359 # up there, remember?)
1389 # up there, remember?)
1360 if (p[0] in descendants) or (p[1] in descendants):
1390 if (p[0] in descendants) or (p[1] in descendants):
1361 descendants.add(n)
1391 descendants.add(n)
1362 isdescendant = True
1392 isdescendant = True
1363 if isdescendant and ((ancestors is None) or (n in ancestors)):
1393 if isdescendant and ((ancestors is None) or (n in ancestors)):
1364 # Only include nodes that are both descendants and ancestors.
1394 # Only include nodes that are both descendants and ancestors.
1365 orderedout.append(n)
1395 orderedout.append(n)
1366 if (ancestors is not None) and (n in heads):
1396 if (ancestors is not None) and (n in heads):
1367 # We're trying to figure out which heads are reachable
1397 # We're trying to figure out which heads are reachable
1368 # from roots.
1398 # from roots.
1369 # Mark this head as having been reached
1399 # Mark this head as having been reached
1370 heads[n] = True
1400 heads[n] = True
1371 elif ancestors is None:
1401 elif ancestors is None:
1372 # Otherwise, we're trying to discover the heads.
1402 # Otherwise, we're trying to discover the heads.
1373 # Assume this is a head because if it isn't, the next step
1403 # Assume this is a head because if it isn't, the next step
1374 # will eventually remove it.
1404 # will eventually remove it.
1375 heads[n] = True
1405 heads[n] = True
1376 # But, obviously its parents aren't.
1406 # But, obviously its parents aren't.
1377 for p in self.parents(n):
1407 for p in self.parents(n):
1378 heads.pop(p, None)
1408 heads.pop(p, None)
1379 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1409 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1380 roots = list(roots)
1410 roots = list(roots)
1381 assert orderedout
1411 assert orderedout
1382 assert roots
1412 assert roots
1383 assert heads
1413 assert heads
1384 return (orderedout, roots, heads)
1414 return (orderedout, roots, heads)
1385
1415
1386 def headrevs(self, revs=None):
1416 def headrevs(self, revs=None):
1387 if revs is None:
1417 if revs is None:
1388 try:
1418 try:
1389 return self.index.headrevs()
1419 return self.index.headrevs()
1390 except AttributeError:
1420 except AttributeError:
1391 return self._headrevs()
1421 return self._headrevs()
1392 if rustdagop is not None and self.index.rust_ext_compat:
1422 if rustdagop is not None and self.index.rust_ext_compat:
1393 return rustdagop.headrevs(self.index, revs)
1423 return rustdagop.headrevs(self.index, revs)
1394 return dagop.headrevs(revs, self._uncheckedparentrevs)
1424 return dagop.headrevs(revs, self._uncheckedparentrevs)
1395
1425
1396 def computephases(self, roots):
1426 def computephases(self, roots):
1397 return self.index.computephasesmapsets(roots)
1427 return self.index.computephasesmapsets(roots)
1398
1428
1399 def _headrevs(self):
1429 def _headrevs(self):
1400 count = len(self)
1430 count = len(self)
1401 if not count:
1431 if not count:
1402 return [nullrev]
1432 return [nullrev]
1403 # we won't iter over filtered rev so nobody is a head at start
1433 # we won't iter over filtered rev so nobody is a head at start
1404 ishead = [0] * (count + 1)
1434 ishead = [0] * (count + 1)
1405 index = self.index
1435 index = self.index
1406 for r in self:
1436 for r in self:
1407 ishead[r] = 1 # I may be an head
1437 ishead[r] = 1 # I may be an head
1408 e = index[r]
1438 e = index[r]
1409 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1439 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1410 return [r for r, val in enumerate(ishead) if val]
1440 return [r for r, val in enumerate(ishead) if val]
1411
1441
1412 def heads(self, start=None, stop=None):
1442 def heads(self, start=None, stop=None):
1413 """return the list of all nodes that have no children
1443 """return the list of all nodes that have no children
1414
1444
1415 if start is specified, only heads that are descendants of
1445 if start is specified, only heads that are descendants of
1416 start will be returned
1446 start will be returned
1417 if stop is specified, it will consider all the revs from stop
1447 if stop is specified, it will consider all the revs from stop
1418 as if they had no children
1448 as if they had no children
1419 """
1449 """
1420 if start is None and stop is None:
1450 if start is None and stop is None:
1421 if not len(self):
1451 if not len(self):
1422 return [self.nullid]
1452 return [self.nullid]
1423 return [self.node(r) for r in self.headrevs()]
1453 return [self.node(r) for r in self.headrevs()]
1424
1454
1425 if start is None:
1455 if start is None:
1426 start = nullrev
1456 start = nullrev
1427 else:
1457 else:
1428 start = self.rev(start)
1458 start = self.rev(start)
1429
1459
1430 stoprevs = {self.rev(n) for n in stop or []}
1460 stoprevs = {self.rev(n) for n in stop or []}
1431
1461
1432 revs = dagop.headrevssubset(
1462 revs = dagop.headrevssubset(
1433 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1463 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1434 )
1464 )
1435
1465
1436 return [self.node(rev) for rev in revs]
1466 return [self.node(rev) for rev in revs]
1437
1467
1438 def children(self, node):
1468 def children(self, node):
1439 """find the children of a given node"""
1469 """find the children of a given node"""
1440 c = []
1470 c = []
1441 p = self.rev(node)
1471 p = self.rev(node)
1442 for r in self.revs(start=p + 1):
1472 for r in self.revs(start=p + 1):
1443 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1473 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1444 if prevs:
1474 if prevs:
1445 for pr in prevs:
1475 for pr in prevs:
1446 if pr == p:
1476 if pr == p:
1447 c.append(self.node(r))
1477 c.append(self.node(r))
1448 elif p == nullrev:
1478 elif p == nullrev:
1449 c.append(self.node(r))
1479 c.append(self.node(r))
1450 return c
1480 return c
1451
1481
1452 def commonancestorsheads(self, a, b):
1482 def commonancestorsheads(self, a, b):
1453 """calculate all the heads of the common ancestors of nodes a and b"""
1483 """calculate all the heads of the common ancestors of nodes a and b"""
1454 a, b = self.rev(a), self.rev(b)
1484 a, b = self.rev(a), self.rev(b)
1455 ancs = self._commonancestorsheads(a, b)
1485 ancs = self._commonancestorsheads(a, b)
1456 return pycompat.maplist(self.node, ancs)
1486 return pycompat.maplist(self.node, ancs)
1457
1487
1458 def _commonancestorsheads(self, *revs):
1488 def _commonancestorsheads(self, *revs):
1459 """calculate all the heads of the common ancestors of revs"""
1489 """calculate all the heads of the common ancestors of revs"""
1460 try:
1490 try:
1461 ancs = self.index.commonancestorsheads(*revs)
1491 ancs = self.index.commonancestorsheads(*revs)
1462 except (AttributeError, OverflowError): # C implementation failed
1492 except (AttributeError, OverflowError): # C implementation failed
1463 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1493 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1464 return ancs
1494 return ancs
1465
1495
1466 def isancestor(self, a, b):
1496 def isancestor(self, a, b):
1467 """return True if node a is an ancestor of node b
1497 """return True if node a is an ancestor of node b
1468
1498
1469 A revision is considered an ancestor of itself."""
1499 A revision is considered an ancestor of itself."""
1470 a, b = self.rev(a), self.rev(b)
1500 a, b = self.rev(a), self.rev(b)
1471 return self.isancestorrev(a, b)
1501 return self.isancestorrev(a, b)
1472
1502
1473 def isancestorrev(self, a, b):
1503 def isancestorrev(self, a, b):
1474 """return True if revision a is an ancestor of revision b
1504 """return True if revision a is an ancestor of revision b
1475
1505
1476 A revision is considered an ancestor of itself.
1506 A revision is considered an ancestor of itself.
1477
1507
1478 The implementation of this is trivial but the use of
1508 The implementation of this is trivial but the use of
1479 reachableroots is not."""
1509 reachableroots is not."""
1480 if a == nullrev:
1510 if a == nullrev:
1481 return True
1511 return True
1482 elif a == b:
1512 elif a == b:
1483 return True
1513 return True
1484 elif a > b:
1514 elif a > b:
1485 return False
1515 return False
1486 return bool(self.reachableroots(a, [b], [a], includepath=False))
1516 return bool(self.reachableroots(a, [b], [a], includepath=False))
1487
1517
1488 def reachableroots(self, minroot, heads, roots, includepath=False):
1518 def reachableroots(self, minroot, heads, roots, includepath=False):
1489 """return (heads(::(<roots> and <roots>::<heads>)))
1519 """return (heads(::(<roots> and <roots>::<heads>)))
1490
1520
1491 If includepath is True, return (<roots>::<heads>)."""
1521 If includepath is True, return (<roots>::<heads>)."""
1492 try:
1522 try:
1493 return self.index.reachableroots2(
1523 return self.index.reachableroots2(
1494 minroot, heads, roots, includepath
1524 minroot, heads, roots, includepath
1495 )
1525 )
1496 except AttributeError:
1526 except AttributeError:
1497 return dagop._reachablerootspure(
1527 return dagop._reachablerootspure(
1498 self.parentrevs, minroot, roots, heads, includepath
1528 self.parentrevs, minroot, roots, heads, includepath
1499 )
1529 )
1500
1530
1501 def ancestor(self, a, b):
1531 def ancestor(self, a, b):
1502 """calculate the "best" common ancestor of nodes a and b"""
1532 """calculate the "best" common ancestor of nodes a and b"""
1503
1533
1504 a, b = self.rev(a), self.rev(b)
1534 a, b = self.rev(a), self.rev(b)
1505 try:
1535 try:
1506 ancs = self.index.ancestors(a, b)
1536 ancs = self.index.ancestors(a, b)
1507 except (AttributeError, OverflowError):
1537 except (AttributeError, OverflowError):
1508 ancs = ancestor.ancestors(self.parentrevs, a, b)
1538 ancs = ancestor.ancestors(self.parentrevs, a, b)
1509 if ancs:
1539 if ancs:
1510 # choose a consistent winner when there's a tie
1540 # choose a consistent winner when there's a tie
1511 return min(map(self.node, ancs))
1541 return min(map(self.node, ancs))
1512 return self.nullid
1542 return self.nullid
1513
1543
1514 def _match(self, id):
1544 def _match(self, id):
1515 if isinstance(id, int):
1545 if isinstance(id, int):
1516 # rev
1546 # rev
1517 return self.node(id)
1547 return self.node(id)
1518 if len(id) == self.nodeconstants.nodelen:
1548 if len(id) == self.nodeconstants.nodelen:
1519 # possibly a binary node
1549 # possibly a binary node
1520 # odds of a binary node being all hex in ASCII are 1 in 10**25
1550 # odds of a binary node being all hex in ASCII are 1 in 10**25
1521 try:
1551 try:
1522 node = id
1552 node = id
1523 self.rev(node) # quick search the index
1553 self.rev(node) # quick search the index
1524 return node
1554 return node
1525 except error.LookupError:
1555 except error.LookupError:
1526 pass # may be partial hex id
1556 pass # may be partial hex id
1527 try:
1557 try:
1528 # str(rev)
1558 # str(rev)
1529 rev = int(id)
1559 rev = int(id)
1530 if b"%d" % rev != id:
1560 if b"%d" % rev != id:
1531 raise ValueError
1561 raise ValueError
1532 if rev < 0:
1562 if rev < 0:
1533 rev = len(self) + rev
1563 rev = len(self) + rev
1534 if rev < 0 or rev >= len(self):
1564 if rev < 0 or rev >= len(self):
1535 raise ValueError
1565 raise ValueError
1536 return self.node(rev)
1566 return self.node(rev)
1537 except (ValueError, OverflowError):
1567 except (ValueError, OverflowError):
1538 pass
1568 pass
1539 if len(id) == 2 * self.nodeconstants.nodelen:
1569 if len(id) == 2 * self.nodeconstants.nodelen:
1540 try:
1570 try:
1541 # a full hex nodeid?
1571 # a full hex nodeid?
1542 node = bin(id)
1572 node = bin(id)
1543 self.rev(node)
1573 self.rev(node)
1544 return node
1574 return node
1545 except (TypeError, error.LookupError):
1575 except (TypeError, error.LookupError):
1546 pass
1576 pass
1547
1577
1548 def _partialmatch(self, id):
1578 def _partialmatch(self, id):
1549 # we don't care wdirfilenodeids as they should be always full hash
1579 # we don't care wdirfilenodeids as they should be always full hash
1550 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1580 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1551 ambiguous = False
1581 ambiguous = False
1552 try:
1582 try:
1553 partial = self.index.partialmatch(id)
1583 partial = self.index.partialmatch(id)
1554 if partial and self.hasnode(partial):
1584 if partial and self.hasnode(partial):
1555 if maybewdir:
1585 if maybewdir:
1556 # single 'ff...' match in radix tree, ambiguous with wdir
1586 # single 'ff...' match in radix tree, ambiguous with wdir
1557 ambiguous = True
1587 ambiguous = True
1558 else:
1588 else:
1559 return partial
1589 return partial
1560 elif maybewdir:
1590 elif maybewdir:
1561 # no 'ff...' match in radix tree, wdir identified
1591 # no 'ff...' match in radix tree, wdir identified
1562 raise error.WdirUnsupported
1592 raise error.WdirUnsupported
1563 else:
1593 else:
1564 return None
1594 return None
1565 except error.RevlogError:
1595 except error.RevlogError:
1566 # parsers.c radix tree lookup gave multiple matches
1596 # parsers.c radix tree lookup gave multiple matches
1567 # fast path: for unfiltered changelog, radix tree is accurate
1597 # fast path: for unfiltered changelog, radix tree is accurate
1568 if not getattr(self, 'filteredrevs', None):
1598 if not getattr(self, 'filteredrevs', None):
1569 ambiguous = True
1599 ambiguous = True
1570 # fall through to slow path that filters hidden revisions
1600 # fall through to slow path that filters hidden revisions
1571 except (AttributeError, ValueError):
1601 except (AttributeError, ValueError):
1572 # we are pure python, or key was too short to search radix tree
1602 # we are pure python, or key was too short to search radix tree
1573 pass
1603 pass
1574 if ambiguous:
1604 if ambiguous:
1575 raise error.AmbiguousPrefixLookupError(
1605 raise error.AmbiguousPrefixLookupError(
1576 id, self.display_id, _(b'ambiguous identifier')
1606 id, self.display_id, _(b'ambiguous identifier')
1577 )
1607 )
1578
1608
1579 if id in self._pcache:
1609 if id in self._pcache:
1580 return self._pcache[id]
1610 return self._pcache[id]
1581
1611
1582 if len(id) <= 40:
1612 if len(id) <= 40:
1583 try:
1613 try:
1584 # hex(node)[:...]
1614 # hex(node)[:...]
1585 l = len(id) // 2 # grab an even number of digits
1615 l = len(id) // 2 # grab an even number of digits
1586 prefix = bin(id[: l * 2])
1616 prefix = bin(id[: l * 2])
1587 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1617 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1588 nl = [
1618 nl = [
1589 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1619 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1590 ]
1620 ]
1591 if self.nodeconstants.nullhex.startswith(id):
1621 if self.nodeconstants.nullhex.startswith(id):
1592 nl.append(self.nullid)
1622 nl.append(self.nullid)
1593 if len(nl) > 0:
1623 if len(nl) > 0:
1594 if len(nl) == 1 and not maybewdir:
1624 if len(nl) == 1 and not maybewdir:
1595 self._pcache[id] = nl[0]
1625 self._pcache[id] = nl[0]
1596 return nl[0]
1626 return nl[0]
1597 raise error.AmbiguousPrefixLookupError(
1627 raise error.AmbiguousPrefixLookupError(
1598 id, self.display_id, _(b'ambiguous identifier')
1628 id, self.display_id, _(b'ambiguous identifier')
1599 )
1629 )
1600 if maybewdir:
1630 if maybewdir:
1601 raise error.WdirUnsupported
1631 raise error.WdirUnsupported
1602 return None
1632 return None
1603 except TypeError:
1633 except TypeError:
1604 pass
1634 pass
1605
1635
1606 def lookup(self, id):
1636 def lookup(self, id):
1607 """locate a node based on:
1637 """locate a node based on:
1608 - revision number or str(revision number)
1638 - revision number or str(revision number)
1609 - nodeid or subset of hex nodeid
1639 - nodeid or subset of hex nodeid
1610 """
1640 """
1611 n = self._match(id)
1641 n = self._match(id)
1612 if n is not None:
1642 if n is not None:
1613 return n
1643 return n
1614 n = self._partialmatch(id)
1644 n = self._partialmatch(id)
1615 if n:
1645 if n:
1616 return n
1646 return n
1617
1647
1618 raise error.LookupError(id, self.display_id, _(b'no match found'))
1648 raise error.LookupError(id, self.display_id, _(b'no match found'))
1619
1649
1620 def shortest(self, node, minlength=1):
1650 def shortest(self, node, minlength=1):
1621 """Find the shortest unambiguous prefix that matches node."""
1651 """Find the shortest unambiguous prefix that matches node."""
1622
1652
1623 def isvalid(prefix):
1653 def isvalid(prefix):
1624 try:
1654 try:
1625 matchednode = self._partialmatch(prefix)
1655 matchednode = self._partialmatch(prefix)
1626 except error.AmbiguousPrefixLookupError:
1656 except error.AmbiguousPrefixLookupError:
1627 return False
1657 return False
1628 except error.WdirUnsupported:
1658 except error.WdirUnsupported:
1629 # single 'ff...' match
1659 # single 'ff...' match
1630 return True
1660 return True
1631 if matchednode is None:
1661 if matchednode is None:
1632 raise error.LookupError(node, self.display_id, _(b'no node'))
1662 raise error.LookupError(node, self.display_id, _(b'no node'))
1633 return True
1663 return True
1634
1664
1635 def maybewdir(prefix):
1665 def maybewdir(prefix):
1636 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1666 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1637
1667
1638 hexnode = hex(node)
1668 hexnode = hex(node)
1639
1669
1640 def disambiguate(hexnode, minlength):
1670 def disambiguate(hexnode, minlength):
1641 """Disambiguate against wdirid."""
1671 """Disambiguate against wdirid."""
1642 for length in range(minlength, len(hexnode) + 1):
1672 for length in range(minlength, len(hexnode) + 1):
1643 prefix = hexnode[:length]
1673 prefix = hexnode[:length]
1644 if not maybewdir(prefix):
1674 if not maybewdir(prefix):
1645 return prefix
1675 return prefix
1646
1676
1647 if not getattr(self, 'filteredrevs', None):
1677 if not getattr(self, 'filteredrevs', None):
1648 try:
1678 try:
1649 length = max(self.index.shortest(node), minlength)
1679 length = max(self.index.shortest(node), minlength)
1650 return disambiguate(hexnode, length)
1680 return disambiguate(hexnode, length)
1651 except error.RevlogError:
1681 except error.RevlogError:
1652 if node != self.nodeconstants.wdirid:
1682 if node != self.nodeconstants.wdirid:
1653 raise error.LookupError(
1683 raise error.LookupError(
1654 node, self.display_id, _(b'no node')
1684 node, self.display_id, _(b'no node')
1655 )
1685 )
1656 except AttributeError:
1686 except AttributeError:
1657 # Fall through to pure code
1687 # Fall through to pure code
1658 pass
1688 pass
1659
1689
1660 if node == self.nodeconstants.wdirid:
1690 if node == self.nodeconstants.wdirid:
1661 for length in range(minlength, len(hexnode) + 1):
1691 for length in range(minlength, len(hexnode) + 1):
1662 prefix = hexnode[:length]
1692 prefix = hexnode[:length]
1663 if isvalid(prefix):
1693 if isvalid(prefix):
1664 return prefix
1694 return prefix
1665
1695
1666 for length in range(minlength, len(hexnode) + 1):
1696 for length in range(minlength, len(hexnode) + 1):
1667 prefix = hexnode[:length]
1697 prefix = hexnode[:length]
1668 if isvalid(prefix):
1698 if isvalid(prefix):
1669 return disambiguate(hexnode, length)
1699 return disambiguate(hexnode, length)
1670
1700
1671 def cmp(self, node, text):
1701 def cmp(self, node, text):
1672 """compare text with a given file revision
1702 """compare text with a given file revision
1673
1703
1674 returns True if text is different than what is stored.
1704 returns True if text is different than what is stored.
1675 """
1705 """
1676 p1, p2 = self.parents(node)
1706 p1, p2 = self.parents(node)
1677 return storageutil.hashrevisionsha1(text, p1, p2) != node
1707 return storageutil.hashrevisionsha1(text, p1, p2) != node
1678
1708
1679 def _cachesegment(self, offset, data):
1709 def _cachesegment(self, offset, data):
1680 """Add a segment to the revlog cache.
1710 """Add a segment to the revlog cache.
1681
1711
1682 Accepts an absolute offset and the data that is at that location.
1712 Accepts an absolute offset and the data that is at that location.
1683 """
1713 """
1684 o, d = self._chunkcache
1714 o, d = self._chunkcache
1685 # try to add to existing cache
1715 # try to add to existing cache
1686 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1716 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1687 self._chunkcache = o, d + data
1717 self._chunkcache = o, d + data
1688 else:
1718 else:
1689 self._chunkcache = offset, data
1719 self._chunkcache = offset, data
1690
1720
1691 def _readsegment(self, offset, length, df=None):
1721 def _readsegment(self, offset, length, df=None):
1692 """Load a segment of raw data from the revlog.
1722 """Load a segment of raw data from the revlog.
1693
1723
1694 Accepts an absolute offset, length to read, and an optional existing
1724 Accepts an absolute offset, length to read, and an optional existing
1695 file handle to read from.
1725 file handle to read from.
1696
1726
1697 If an existing file handle is passed, it will be seeked and the
1727 If an existing file handle is passed, it will be seeked and the
1698 original seek position will NOT be restored.
1728 original seek position will NOT be restored.
1699
1729
1700 Returns a str or buffer of raw byte data.
1730 Returns a str or buffer of raw byte data.
1701
1731
1702 Raises if the requested number of bytes could not be read.
1732 Raises if the requested number of bytes could not be read.
1703 """
1733 """
1704 # Cache data both forward and backward around the requested
1734 # Cache data both forward and backward around the requested
1705 # data, in a fixed size window. This helps speed up operations
1735 # data, in a fixed size window. This helps speed up operations
1706 # involving reading the revlog backwards.
1736 # involving reading the revlog backwards.
1707 cachesize = self._chunkcachesize
1737 cachesize = self._chunkcachesize
1708 realoffset = offset & ~(cachesize - 1)
1738 realoffset = offset & ~(cachesize - 1)
1709 reallength = (
1739 reallength = (
1710 (offset + length + cachesize) & ~(cachesize - 1)
1740 (offset + length + cachesize) & ~(cachesize - 1)
1711 ) - realoffset
1741 ) - realoffset
1712 with self._datareadfp(df) as df:
1742 with self._datareadfp(df) as df:
1713 df.seek(realoffset)
1743 df.seek(realoffset)
1714 d = df.read(reallength)
1744 d = df.read(reallength)
1715
1745
1716 self._cachesegment(realoffset, d)
1746 self._cachesegment(realoffset, d)
1717 if offset != realoffset or reallength != length:
1747 if offset != realoffset or reallength != length:
1718 startoffset = offset - realoffset
1748 startoffset = offset - realoffset
1719 if len(d) - startoffset < length:
1749 if len(d) - startoffset < length:
1720 filename = self._indexfile if self._inline else self._datafile
1750 filename = self._indexfile if self._inline else self._datafile
1721 got = len(d) - startoffset
1751 got = len(d) - startoffset
1722 m = PARTIAL_READ_MSG % (filename, length, offset, got)
1752 m = PARTIAL_READ_MSG % (filename, length, offset, got)
1723 raise error.RevlogError(m)
1753 raise error.RevlogError(m)
1724 return util.buffer(d, startoffset, length)
1754 return util.buffer(d, startoffset, length)
1725
1755
1726 if len(d) < length:
1756 if len(d) < length:
1727 filename = self._indexfile if self._inline else self._datafile
1757 filename = self._indexfile if self._inline else self._datafile
1728 got = len(d) - startoffset
1758 got = len(d) - startoffset
1729 m = PARTIAL_READ_MSG % (filename, length, offset, got)
1759 m = PARTIAL_READ_MSG % (filename, length, offset, got)
1730 raise error.RevlogError(m)
1760 raise error.RevlogError(m)
1731
1761
1732 return d
1762 return d
1733
1763
1734 def _getsegment(self, offset, length, df=None):
1764 def _getsegment(self, offset, length, df=None):
1735 """Obtain a segment of raw data from the revlog.
1765 """Obtain a segment of raw data from the revlog.
1736
1766
1737 Accepts an absolute offset, length of bytes to obtain, and an
1767 Accepts an absolute offset, length of bytes to obtain, and an
1738 optional file handle to the already-opened revlog. If the file
1768 optional file handle to the already-opened revlog. If the file
1739 handle is used, it's original seek position will not be preserved.
1769 handle is used, it's original seek position will not be preserved.
1740
1770
1741 Requests for data may be returned from a cache.
1771 Requests for data may be returned from a cache.
1742
1772
1743 Returns a str or a buffer instance of raw byte data.
1773 Returns a str or a buffer instance of raw byte data.
1744 """
1774 """
1745 o, d = self._chunkcache
1775 o, d = self._chunkcache
1746 l = len(d)
1776 l = len(d)
1747
1777
1748 # is it in the cache?
1778 # is it in the cache?
1749 cachestart = offset - o
1779 cachestart = offset - o
1750 cacheend = cachestart + length
1780 cacheend = cachestart + length
1751 if cachestart >= 0 and cacheend <= l:
1781 if cachestart >= 0 and cacheend <= l:
1752 if cachestart == 0 and cacheend == l:
1782 if cachestart == 0 and cacheend == l:
1753 return d # avoid a copy
1783 return d # avoid a copy
1754 return util.buffer(d, cachestart, cacheend - cachestart)
1784 return util.buffer(d, cachestart, cacheend - cachestart)
1755
1785
1756 return self._readsegment(offset, length, df=df)
1786 return self._readsegment(offset, length, df=df)
1757
1787
1758 def _getsegmentforrevs(self, startrev, endrev, df=None):
1788 def _getsegmentforrevs(self, startrev, endrev, df=None):
1759 """Obtain a segment of raw data corresponding to a range of revisions.
1789 """Obtain a segment of raw data corresponding to a range of revisions.
1760
1790
1761 Accepts the start and end revisions and an optional already-open
1791 Accepts the start and end revisions and an optional already-open
1762 file handle to be used for reading. If the file handle is read, its
1792 file handle to be used for reading. If the file handle is read, its
1763 seek position will not be preserved.
1793 seek position will not be preserved.
1764
1794
1765 Requests for data may be satisfied by a cache.
1795 Requests for data may be satisfied by a cache.
1766
1796
1767 Returns a 2-tuple of (offset, data) for the requested range of
1797 Returns a 2-tuple of (offset, data) for the requested range of
1768 revisions. Offset is the integer offset from the beginning of the
1798 revisions. Offset is the integer offset from the beginning of the
1769 revlog and data is a str or buffer of the raw byte data.
1799 revlog and data is a str or buffer of the raw byte data.
1770
1800
1771 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1801 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1772 to determine where each revision's data begins and ends.
1802 to determine where each revision's data begins and ends.
1773 """
1803 """
1774 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1804 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1775 # (functions are expensive).
1805 # (functions are expensive).
1776 index = self.index
1806 index = self.index
1777 istart = index[startrev]
1807 istart = index[startrev]
1778 start = int(istart[0] >> 16)
1808 start = int(istart[0] >> 16)
1779 if startrev == endrev:
1809 if startrev == endrev:
1780 end = start + istart[1]
1810 end = start + istart[1]
1781 else:
1811 else:
1782 iend = index[endrev]
1812 iend = index[endrev]
1783 end = int(iend[0] >> 16) + iend[1]
1813 end = int(iend[0] >> 16) + iend[1]
1784
1814
1785 if self._inline:
1815 if self._inline:
1786 start += (startrev + 1) * self.index.entry_size
1816 start += (startrev + 1) * self.index.entry_size
1787 end += (endrev + 1) * self.index.entry_size
1817 end += (endrev + 1) * self.index.entry_size
1788 length = end - start
1818 length = end - start
1789
1819
1790 return start, self._getsegment(start, length, df=df)
1820 return start, self._getsegment(start, length, df=df)
1791
1821
1792 def _chunk(self, rev, df=None):
1822 def _chunk(self, rev, df=None):
1793 """Obtain a single decompressed chunk for a revision.
1823 """Obtain a single decompressed chunk for a revision.
1794
1824
1795 Accepts an integer revision and an optional already-open file handle
1825 Accepts an integer revision and an optional already-open file handle
1796 to be used for reading. If used, the seek position of the file will not
1826 to be used for reading. If used, the seek position of the file will not
1797 be preserved.
1827 be preserved.
1798
1828
1799 Returns a str holding uncompressed data for the requested revision.
1829 Returns a str holding uncompressed data for the requested revision.
1800 """
1830 """
1801 compression_mode = self.index[rev][10]
1831 compression_mode = self.index[rev][10]
1802 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1832 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1803 if compression_mode == COMP_MODE_PLAIN:
1833 if compression_mode == COMP_MODE_PLAIN:
1804 return data
1834 return data
1805 elif compression_mode == COMP_MODE_DEFAULT:
1835 elif compression_mode == COMP_MODE_DEFAULT:
1806 return self._decompressor(data)
1836 return self._decompressor(data)
1807 elif compression_mode == COMP_MODE_INLINE:
1837 elif compression_mode == COMP_MODE_INLINE:
1808 return self.decompress(data)
1838 return self.decompress(data)
1809 else:
1839 else:
1810 msg = 'unknown compression mode %d'
1840 msg = 'unknown compression mode %d'
1811 msg %= compression_mode
1841 msg %= compression_mode
1812 raise error.RevlogError(msg)
1842 raise error.RevlogError(msg)
1813
1843
1814 def _chunks(self, revs, df=None, targetsize=None):
1844 def _chunks(self, revs, df=None, targetsize=None):
1815 """Obtain decompressed chunks for the specified revisions.
1845 """Obtain decompressed chunks for the specified revisions.
1816
1846
1817 Accepts an iterable of numeric revisions that are assumed to be in
1847 Accepts an iterable of numeric revisions that are assumed to be in
1818 ascending order. Also accepts an optional already-open file handle
1848 ascending order. Also accepts an optional already-open file handle
1819 to be used for reading. If used, the seek position of the file will
1849 to be used for reading. If used, the seek position of the file will
1820 not be preserved.
1850 not be preserved.
1821
1851
1822 This function is similar to calling ``self._chunk()`` multiple times,
1852 This function is similar to calling ``self._chunk()`` multiple times,
1823 but is faster.
1853 but is faster.
1824
1854
1825 Returns a list with decompressed data for each requested revision.
1855 Returns a list with decompressed data for each requested revision.
1826 """
1856 """
1827 if not revs:
1857 if not revs:
1828 return []
1858 return []
1829 start = self.start
1859 start = self.start
1830 length = self.length
1860 length = self.length
1831 inline = self._inline
1861 inline = self._inline
1832 iosize = self.index.entry_size
1862 iosize = self.index.entry_size
1833 buffer = util.buffer
1863 buffer = util.buffer
1834
1864
1835 l = []
1865 l = []
1836 ladd = l.append
1866 ladd = l.append
1837
1867
1838 if not self._withsparseread:
1868 if not self._withsparseread:
1839 slicedchunks = (revs,)
1869 slicedchunks = (revs,)
1840 else:
1870 else:
1841 slicedchunks = deltautil.slicechunk(
1871 slicedchunks = deltautil.slicechunk(
1842 self, revs, targetsize=targetsize
1872 self, revs, targetsize=targetsize
1843 )
1873 )
1844
1874
1845 for revschunk in slicedchunks:
1875 for revschunk in slicedchunks:
1846 firstrev = revschunk[0]
1876 firstrev = revschunk[0]
1847 # Skip trailing revisions with empty diff
1877 # Skip trailing revisions with empty diff
1848 for lastrev in revschunk[::-1]:
1878 for lastrev in revschunk[::-1]:
1849 if length(lastrev) != 0:
1879 if length(lastrev) != 0:
1850 break
1880 break
1851
1881
1852 try:
1882 try:
1853 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1883 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1854 except OverflowError:
1884 except OverflowError:
1855 # issue4215 - we can't cache a run of chunks greater than
1885 # issue4215 - we can't cache a run of chunks greater than
1856 # 2G on Windows
1886 # 2G on Windows
1857 return [self._chunk(rev, df=df) for rev in revschunk]
1887 return [self._chunk(rev, df=df) for rev in revschunk]
1858
1888
1859 decomp = self.decompress
1889 decomp = self.decompress
1860 # self._decompressor might be None, but will not be used in that case
1890 # self._decompressor might be None, but will not be used in that case
1861 def_decomp = self._decompressor
1891 def_decomp = self._decompressor
1862 for rev in revschunk:
1892 for rev in revschunk:
1863 chunkstart = start(rev)
1893 chunkstart = start(rev)
1864 if inline:
1894 if inline:
1865 chunkstart += (rev + 1) * iosize
1895 chunkstart += (rev + 1) * iosize
1866 chunklength = length(rev)
1896 chunklength = length(rev)
1867 comp_mode = self.index[rev][10]
1897 comp_mode = self.index[rev][10]
1868 c = buffer(data, chunkstart - offset, chunklength)
1898 c = buffer(data, chunkstart - offset, chunklength)
1869 if comp_mode == COMP_MODE_PLAIN:
1899 if comp_mode == COMP_MODE_PLAIN:
1870 ladd(c)
1900 ladd(c)
1871 elif comp_mode == COMP_MODE_INLINE:
1901 elif comp_mode == COMP_MODE_INLINE:
1872 ladd(decomp(c))
1902 ladd(decomp(c))
1873 elif comp_mode == COMP_MODE_DEFAULT:
1903 elif comp_mode == COMP_MODE_DEFAULT:
1874 ladd(def_decomp(c))
1904 ladd(def_decomp(c))
1875 else:
1905 else:
1876 msg = 'unknown compression mode %d'
1906 msg = 'unknown compression mode %d'
1877 msg %= comp_mode
1907 msg %= comp_mode
1878 raise error.RevlogError(msg)
1908 raise error.RevlogError(msg)
1879
1909
1880 return l
1910 return l
1881
1911
1882 def _chunkclear(self):
1912 def _chunkclear(self):
1883 """Clear the raw chunk cache."""
1913 """Clear the raw chunk cache."""
1884 self._chunkcache = (0, b'')
1914 self._chunkcache = (0, b'')
1885
1915
1886 def deltaparent(self, rev):
1916 def deltaparent(self, rev):
1887 """return deltaparent of the given revision"""
1917 """return deltaparent of the given revision"""
1888 base = self.index[rev][3]
1918 base = self.index[rev][3]
1889 if base == rev:
1919 if base == rev:
1890 return nullrev
1920 return nullrev
1891 elif self._generaldelta:
1921 elif self._generaldelta:
1892 return base
1922 return base
1893 else:
1923 else:
1894 return rev - 1
1924 return rev - 1
1895
1925
1896 def issnapshot(self, rev):
1926 def issnapshot(self, rev):
1897 """tells whether rev is a snapshot"""
1927 """tells whether rev is a snapshot"""
1898 if not self._sparserevlog:
1928 if not self._sparserevlog:
1899 return self.deltaparent(rev) == nullrev
1929 return self.deltaparent(rev) == nullrev
1900 elif util.safehasattr(self.index, b'issnapshot'):
1930 elif util.safehasattr(self.index, b'issnapshot'):
1901 # directly assign the method to cache the testing and access
1931 # directly assign the method to cache the testing and access
1902 self.issnapshot = self.index.issnapshot
1932 self.issnapshot = self.index.issnapshot
1903 return self.issnapshot(rev)
1933 return self.issnapshot(rev)
1904 if rev == nullrev:
1934 if rev == nullrev:
1905 return True
1935 return True
1906 entry = self.index[rev]
1936 entry = self.index[rev]
1907 base = entry[3]
1937 base = entry[3]
1908 if base == rev:
1938 if base == rev:
1909 return True
1939 return True
1910 if base == nullrev:
1940 if base == nullrev:
1911 return True
1941 return True
1912 p1 = entry[5]
1942 p1 = entry[5]
1913 p2 = entry[6]
1943 p2 = entry[6]
1914 if base == p1 or base == p2:
1944 if base == p1 or base == p2:
1915 return False
1945 return False
1916 return self.issnapshot(base)
1946 return self.issnapshot(base)
1917
1947
1918 def snapshotdepth(self, rev):
1948 def snapshotdepth(self, rev):
1919 """number of snapshot in the chain before this one"""
1949 """number of snapshot in the chain before this one"""
1920 if not self.issnapshot(rev):
1950 if not self.issnapshot(rev):
1921 raise error.ProgrammingError(b'revision %d not a snapshot')
1951 raise error.ProgrammingError(b'revision %d not a snapshot')
1922 return len(self._deltachain(rev)[0]) - 1
1952 return len(self._deltachain(rev)[0]) - 1
1923
1953
1924 def revdiff(self, rev1, rev2):
1954 def revdiff(self, rev1, rev2):
1925 """return or calculate a delta between two revisions
1955 """return or calculate a delta between two revisions
1926
1956
1927 The delta calculated is in binary form and is intended to be written to
1957 The delta calculated is in binary form and is intended to be written to
1928 revlog data directly. So this function needs raw revision data.
1958 revlog data directly. So this function needs raw revision data.
1929 """
1959 """
1930 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1960 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1931 return bytes(self._chunk(rev2))
1961 return bytes(self._chunk(rev2))
1932
1962
1933 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1963 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1934
1964
1935 def _processflags(self, text, flags, operation, raw=False):
1965 def _processflags(self, text, flags, operation, raw=False):
1936 """deprecated entry point to access flag processors"""
1966 """deprecated entry point to access flag processors"""
1937 msg = b'_processflag(...) use the specialized variant'
1967 msg = b'_processflag(...) use the specialized variant'
1938 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1968 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1939 if raw:
1969 if raw:
1940 return text, flagutil.processflagsraw(self, text, flags)
1970 return text, flagutil.processflagsraw(self, text, flags)
1941 elif operation == b'read':
1971 elif operation == b'read':
1942 return flagutil.processflagsread(self, text, flags)
1972 return flagutil.processflagsread(self, text, flags)
1943 else: # write operation
1973 else: # write operation
1944 return flagutil.processflagswrite(self, text, flags)
1974 return flagutil.processflagswrite(self, text, flags)
1945
1975
1946 def revision(self, nodeorrev, _df=None, raw=False):
1976 def revision(self, nodeorrev, _df=None, raw=False):
1947 """return an uncompressed revision of a given node or revision
1977 """return an uncompressed revision of a given node or revision
1948 number.
1978 number.
1949
1979
1950 _df - an existing file handle to read from. (internal-only)
1980 _df - an existing file handle to read from. (internal-only)
1951 raw - an optional argument specifying if the revision data is to be
1981 raw - an optional argument specifying if the revision data is to be
1952 treated as raw data when applying flag transforms. 'raw' should be set
1982 treated as raw data when applying flag transforms. 'raw' should be set
1953 to True when generating changegroups or in debug commands.
1983 to True when generating changegroups or in debug commands.
1954 """
1984 """
1955 if raw:
1985 if raw:
1956 msg = (
1986 msg = (
1957 b'revlog.revision(..., raw=True) is deprecated, '
1987 b'revlog.revision(..., raw=True) is deprecated, '
1958 b'use revlog.rawdata(...)'
1988 b'use revlog.rawdata(...)'
1959 )
1989 )
1960 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1990 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1961 return self._revisiondata(nodeorrev, _df, raw=raw)
1991 return self._revisiondata(nodeorrev, _df, raw=raw)
1962
1992
1963 def sidedata(self, nodeorrev, _df=None):
1993 def sidedata(self, nodeorrev, _df=None):
1964 """a map of extra data related to the changeset but not part of the hash
1994 """a map of extra data related to the changeset but not part of the hash
1965
1995
1966 This function currently return a dictionary. However, more advanced
1996 This function currently return a dictionary. However, more advanced
1967 mapping object will likely be used in the future for a more
1997 mapping object will likely be used in the future for a more
1968 efficient/lazy code.
1998 efficient/lazy code.
1969 """
1999 """
1970 # deal with <nodeorrev> argument type
2000 # deal with <nodeorrev> argument type
1971 if isinstance(nodeorrev, int):
2001 if isinstance(nodeorrev, int):
1972 rev = nodeorrev
2002 rev = nodeorrev
1973 else:
2003 else:
1974 rev = self.rev(nodeorrev)
2004 rev = self.rev(nodeorrev)
1975 return self._sidedata(rev)
2005 return self._sidedata(rev)
1976
2006
1977 def _revisiondata(self, nodeorrev, _df=None, raw=False):
2007 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1978 # deal with <nodeorrev> argument type
2008 # deal with <nodeorrev> argument type
1979 if isinstance(nodeorrev, int):
2009 if isinstance(nodeorrev, int):
1980 rev = nodeorrev
2010 rev = nodeorrev
1981 node = self.node(rev)
2011 node = self.node(rev)
1982 else:
2012 else:
1983 node = nodeorrev
2013 node = nodeorrev
1984 rev = None
2014 rev = None
1985
2015
1986 # fast path the special `nullid` rev
2016 # fast path the special `nullid` rev
1987 if node == self.nullid:
2017 if node == self.nullid:
1988 return b""
2018 return b""
1989
2019
1990 # ``rawtext`` is the text as stored inside the revlog. Might be the
2020 # ``rawtext`` is the text as stored inside the revlog. Might be the
1991 # revision or might need to be processed to retrieve the revision.
2021 # revision or might need to be processed to retrieve the revision.
1992 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
2022 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1993
2023
1994 if raw and validated:
2024 if raw and validated:
1995 # if we don't want to process the raw text and that raw
2025 # if we don't want to process the raw text and that raw
1996 # text is cached, we can exit early.
2026 # text is cached, we can exit early.
1997 return rawtext
2027 return rawtext
1998 if rev is None:
2028 if rev is None:
1999 rev = self.rev(node)
2029 rev = self.rev(node)
2000 # the revlog's flag for this revision
2030 # the revlog's flag for this revision
2001 # (usually alter its state or content)
2031 # (usually alter its state or content)
2002 flags = self.flags(rev)
2032 flags = self.flags(rev)
2003
2033
2004 if validated and flags == REVIDX_DEFAULT_FLAGS:
2034 if validated and flags == REVIDX_DEFAULT_FLAGS:
2005 # no extra flags set, no flag processor runs, text = rawtext
2035 # no extra flags set, no flag processor runs, text = rawtext
2006 return rawtext
2036 return rawtext
2007
2037
2008 if raw:
2038 if raw:
2009 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2039 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2010 text = rawtext
2040 text = rawtext
2011 else:
2041 else:
2012 r = flagutil.processflagsread(self, rawtext, flags)
2042 r = flagutil.processflagsread(self, rawtext, flags)
2013 text, validatehash = r
2043 text, validatehash = r
2014 if validatehash:
2044 if validatehash:
2015 self.checkhash(text, node, rev=rev)
2045 self.checkhash(text, node, rev=rev)
2016 if not validated:
2046 if not validated:
2017 self._revisioncache = (node, rev, rawtext)
2047 self._revisioncache = (node, rev, rawtext)
2018
2048
2019 return text
2049 return text
2020
2050
2021 def _rawtext(self, node, rev, _df=None):
2051 def _rawtext(self, node, rev, _df=None):
2022 """return the possibly unvalidated rawtext for a revision
2052 """return the possibly unvalidated rawtext for a revision
2023
2053
2024 returns (rev, rawtext, validated)
2054 returns (rev, rawtext, validated)
2025 """
2055 """
2026
2056
2027 # revision in the cache (could be useful to apply delta)
2057 # revision in the cache (could be useful to apply delta)
2028 cachedrev = None
2058 cachedrev = None
2029 # An intermediate text to apply deltas to
2059 # An intermediate text to apply deltas to
2030 basetext = None
2060 basetext = None
2031
2061
2032 # Check if we have the entry in cache
2062 # Check if we have the entry in cache
2033 # The cache entry looks like (node, rev, rawtext)
2063 # The cache entry looks like (node, rev, rawtext)
2034 if self._revisioncache:
2064 if self._revisioncache:
2035 if self._revisioncache[0] == node:
2065 if self._revisioncache[0] == node:
2036 return (rev, self._revisioncache[2], True)
2066 return (rev, self._revisioncache[2], True)
2037 cachedrev = self._revisioncache[1]
2067 cachedrev = self._revisioncache[1]
2038
2068
2039 if rev is None:
2069 if rev is None:
2040 rev = self.rev(node)
2070 rev = self.rev(node)
2041
2071
2042 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2072 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2043 if stopped:
2073 if stopped:
2044 basetext = self._revisioncache[2]
2074 basetext = self._revisioncache[2]
2045
2075
2046 # drop cache to save memory, the caller is expected to
2076 # drop cache to save memory, the caller is expected to
2047 # update self._revisioncache after validating the text
2077 # update self._revisioncache after validating the text
2048 self._revisioncache = None
2078 self._revisioncache = None
2049
2079
2050 targetsize = None
2080 targetsize = None
2051 rawsize = self.index[rev][2]
2081 rawsize = self.index[rev][2]
2052 if 0 <= rawsize:
2082 if 0 <= rawsize:
2053 targetsize = 4 * rawsize
2083 targetsize = 4 * rawsize
2054
2084
2055 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2085 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2056 if basetext is None:
2086 if basetext is None:
2057 basetext = bytes(bins[0])
2087 basetext = bytes(bins[0])
2058 bins = bins[1:]
2088 bins = bins[1:]
2059
2089
2060 rawtext = mdiff.patches(basetext, bins)
2090 rawtext = mdiff.patches(basetext, bins)
2061 del basetext # let us have a chance to free memory early
2091 del basetext # let us have a chance to free memory early
2062 return (rev, rawtext, False)
2092 return (rev, rawtext, False)
2063
2093
2064 def _sidedata(self, rev):
2094 def _sidedata(self, rev):
2065 """Return the sidedata for a given revision number."""
2095 """Return the sidedata for a given revision number."""
2066 index_entry = self.index[rev]
2096 index_entry = self.index[rev]
2067 sidedata_offset = index_entry[8]
2097 sidedata_offset = index_entry[8]
2068 sidedata_size = index_entry[9]
2098 sidedata_size = index_entry[9]
2069
2099
2070 if self._inline:
2100 if self._inline:
2071 sidedata_offset += self.index.entry_size * (1 + rev)
2101 sidedata_offset += self.index.entry_size * (1 + rev)
2072 if sidedata_size == 0:
2102 if sidedata_size == 0:
2073 return {}
2103 return {}
2074
2104
2075 # XXX this need caching, as we do for data
2105 # XXX this need caching, as we do for data
2076 with self._sidedatareadfp() as sdf:
2106 with self._sidedatareadfp() as sdf:
2077 sdf.seek(sidedata_offset)
2107 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
2108 filename = self._sidedatafile
2109 end = self._docket.sidedata_end
2110 offset = sidedata_offset
2111 length = sidedata_size
2112 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
2113 raise error.RevlogError(m)
2114
2115 sdf.seek(sidedata_offset, os.SEEK_SET)
2078 comp_segment = sdf.read(sidedata_size)
2116 comp_segment = sdf.read(sidedata_size)
2079
2117
2080 if len(comp_segment) < sidedata_size:
2118 if len(comp_segment) < sidedata_size:
2081 filename = self._datafile
2119 filename = self._sidedatafile
2082 length = sidedata_size
2120 length = sidedata_size
2083 offset = sidedata_offset
2121 offset = sidedata_offset
2084 got = len(comp_segment)
2122 got = len(comp_segment)
2085 m = PARTIAL_READ_MSG % (filename, length, offset, got)
2123 m = PARTIAL_READ_MSG % (filename, length, offset, got)
2086 raise error.RevlogError(m)
2124 raise error.RevlogError(m)
2087
2125
2088 comp = self.index[rev][11]
2126 comp = self.index[rev][11]
2089 if comp == COMP_MODE_PLAIN:
2127 if comp == COMP_MODE_PLAIN:
2090 segment = comp_segment
2128 segment = comp_segment
2091 elif comp == COMP_MODE_DEFAULT:
2129 elif comp == COMP_MODE_DEFAULT:
2092 segment = self._decompressor(comp_segment)
2130 segment = self._decompressor(comp_segment)
2093 elif comp == COMP_MODE_INLINE:
2131 elif comp == COMP_MODE_INLINE:
2094 segment = self.decompress(comp_segment)
2132 segment = self.decompress(comp_segment)
2095 else:
2133 else:
2096 msg = 'unknown compression mode %d'
2134 msg = 'unknown compression mode %d'
2097 msg %= comp
2135 msg %= comp
2098 raise error.RevlogError(msg)
2136 raise error.RevlogError(msg)
2099
2137
2100 sidedata = sidedatautil.deserialize_sidedata(segment)
2138 sidedata = sidedatautil.deserialize_sidedata(segment)
2101 return sidedata
2139 return sidedata
2102
2140
2103 def rawdata(self, nodeorrev, _df=None):
2141 def rawdata(self, nodeorrev, _df=None):
2104 """return an uncompressed raw data of a given node or revision number.
2142 """return an uncompressed raw data of a given node or revision number.
2105
2143
2106 _df - an existing file handle to read from. (internal-only)
2144 _df - an existing file handle to read from. (internal-only)
2107 """
2145 """
2108 return self._revisiondata(nodeorrev, _df, raw=True)
2146 return self._revisiondata(nodeorrev, _df, raw=True)
2109
2147
2110 def hash(self, text, p1, p2):
2148 def hash(self, text, p1, p2):
2111 """Compute a node hash.
2149 """Compute a node hash.
2112
2150
2113 Available as a function so that subclasses can replace the hash
2151 Available as a function so that subclasses can replace the hash
2114 as needed.
2152 as needed.
2115 """
2153 """
2116 return storageutil.hashrevisionsha1(text, p1, p2)
2154 return storageutil.hashrevisionsha1(text, p1, p2)
2117
2155
2118 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2156 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2119 """Check node hash integrity.
2157 """Check node hash integrity.
2120
2158
2121 Available as a function so that subclasses can extend hash mismatch
2159 Available as a function so that subclasses can extend hash mismatch
2122 behaviors as needed.
2160 behaviors as needed.
2123 """
2161 """
2124 try:
2162 try:
2125 if p1 is None and p2 is None:
2163 if p1 is None and p2 is None:
2126 p1, p2 = self.parents(node)
2164 p1, p2 = self.parents(node)
2127 if node != self.hash(text, p1, p2):
2165 if node != self.hash(text, p1, p2):
2128 # Clear the revision cache on hash failure. The revision cache
2166 # Clear the revision cache on hash failure. The revision cache
2129 # only stores the raw revision and clearing the cache does have
2167 # only stores the raw revision and clearing the cache does have
2130 # the side-effect that we won't have a cache hit when the raw
2168 # the side-effect that we won't have a cache hit when the raw
2131 # revision data is accessed. But this case should be rare and
2169 # revision data is accessed. But this case should be rare and
2132 # it is extra work to teach the cache about the hash
2170 # it is extra work to teach the cache about the hash
2133 # verification state.
2171 # verification state.
2134 if self._revisioncache and self._revisioncache[0] == node:
2172 if self._revisioncache and self._revisioncache[0] == node:
2135 self._revisioncache = None
2173 self._revisioncache = None
2136
2174
2137 revornode = rev
2175 revornode = rev
2138 if revornode is None:
2176 if revornode is None:
2139 revornode = templatefilters.short(hex(node))
2177 revornode = templatefilters.short(hex(node))
2140 raise error.RevlogError(
2178 raise error.RevlogError(
2141 _(b"integrity check failed on %s:%s")
2179 _(b"integrity check failed on %s:%s")
2142 % (self.display_id, pycompat.bytestr(revornode))
2180 % (self.display_id, pycompat.bytestr(revornode))
2143 )
2181 )
2144 except error.RevlogError:
2182 except error.RevlogError:
2145 if self._censorable and storageutil.iscensoredtext(text):
2183 if self._censorable and storageutil.iscensoredtext(text):
2146 raise error.CensoredNodeError(self.display_id, node, text)
2184 raise error.CensoredNodeError(self.display_id, node, text)
2147 raise
2185 raise
2148
2186
2149 def _enforceinlinesize(self, tr):
2187 def _enforceinlinesize(self, tr):
2150 """Check if the revlog is too big for inline and convert if so.
2188 """Check if the revlog is too big for inline and convert if so.
2151
2189
2152 This should be called after revisions are added to the revlog. If the
2190 This should be called after revisions are added to the revlog. If the
2153 revlog has grown too large to be an inline revlog, it will convert it
2191 revlog has grown too large to be an inline revlog, it will convert it
2154 to use multiple index and data files.
2192 to use multiple index and data files.
2155 """
2193 """
2156 tiprev = len(self) - 1
2194 tiprev = len(self) - 1
2157 total_size = self.start(tiprev) + self.length(tiprev)
2195 total_size = self.start(tiprev) + self.length(tiprev)
2158 if not self._inline or total_size < _maxinline:
2196 if not self._inline or total_size < _maxinline:
2159 return
2197 return
2160
2198
2161 troffset = tr.findoffset(self._indexfile)
2199 troffset = tr.findoffset(self._indexfile)
2162 if troffset is None:
2200 if troffset is None:
2163 raise error.RevlogError(
2201 raise error.RevlogError(
2164 _(b"%s not found in the transaction") % self._indexfile
2202 _(b"%s not found in the transaction") % self._indexfile
2165 )
2203 )
2166 trindex = 0
2204 trindex = 0
2167 tr.add(self._datafile, 0)
2205 tr.add(self._datafile, 0)
2168
2206
2169 existing_handles = False
2207 existing_handles = False
2170 if self._writinghandles is not None:
2208 if self._writinghandles is not None:
2171 existing_handles = True
2209 existing_handles = True
2172 fp = self._writinghandles[0]
2210 fp = self._writinghandles[0]
2173 fp.flush()
2211 fp.flush()
2174 fp.close()
2212 fp.close()
2175 # We can't use the cached file handle after close(). So prevent
2213 # We can't use the cached file handle after close(). So prevent
2176 # its usage.
2214 # its usage.
2177 self._writinghandles = None
2215 self._writinghandles = None
2178
2216
2179 new_dfh = self._datafp(b'w+')
2217 new_dfh = self._datafp(b'w+')
2180 new_dfh.truncate(0) # drop any potentially existing data
2218 new_dfh.truncate(0) # drop any potentially existing data
2181 try:
2219 try:
2182 with self._indexfp() as read_ifh:
2220 with self._indexfp() as read_ifh:
2183 for r in self:
2221 for r in self:
2184 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2222 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2185 if troffset <= self.start(r) + r * self.index.entry_size:
2223 if troffset <= self.start(r) + r * self.index.entry_size:
2186 trindex = r
2224 trindex = r
2187 new_dfh.flush()
2225 new_dfh.flush()
2188
2226
2189 with self.__index_new_fp() as fp:
2227 with self.__index_new_fp() as fp:
2190 self._format_flags &= ~FLAG_INLINE_DATA
2228 self._format_flags &= ~FLAG_INLINE_DATA
2191 self._inline = False
2229 self._inline = False
2192 for i in self:
2230 for i in self:
2193 e = self.index.entry_binary(i)
2231 e = self.index.entry_binary(i)
2194 if i == 0 and self._docket is None:
2232 if i == 0 and self._docket is None:
2195 header = self._format_flags | self._format_version
2233 header = self._format_flags | self._format_version
2196 header = self.index.pack_header(header)
2234 header = self.index.pack_header(header)
2197 e = header + e
2235 e = header + e
2198 fp.write(e)
2236 fp.write(e)
2199 if self._docket is not None:
2237 if self._docket is not None:
2200 self._docket.index_end = fp.tell()
2238 self._docket.index_end = fp.tell()
2201
2239
2202 # There is a small transactional race here. If the rename of
2240 # There is a small transactional race here. If the rename of
2203 # the index fails, we should remove the datafile. It is more
2241 # the index fails, we should remove the datafile. It is more
2204 # important to ensure that the data file is not truncated
2242 # important to ensure that the data file is not truncated
2205 # when the index is replaced as otherwise data is lost.
2243 # when the index is replaced as otherwise data is lost.
2206 tr.replace(self._datafile, self.start(trindex))
2244 tr.replace(self._datafile, self.start(trindex))
2207
2245
2208 # the temp file replace the real index when we exit the context
2246 # the temp file replace the real index when we exit the context
2209 # manager
2247 # manager
2210
2248
2211 tr.replace(self._indexfile, trindex * self.index.entry_size)
2249 tr.replace(self._indexfile, trindex * self.index.entry_size)
2212 nodemaputil.setup_persistent_nodemap(tr, self)
2250 nodemaputil.setup_persistent_nodemap(tr, self)
2213 self._chunkclear()
2251 self._chunkclear()
2214
2252
2215 if existing_handles:
2253 if existing_handles:
2216 # switched from inline to conventional reopen the index
2254 # switched from inline to conventional reopen the index
2217 ifh = self.__index_write_fp()
2255 ifh = self.__index_write_fp()
2218 self._writinghandles = (ifh, new_dfh)
2256 self._writinghandles = (ifh, new_dfh, None)
2219 new_dfh = None
2257 new_dfh = None
2220 finally:
2258 finally:
2221 if new_dfh is not None:
2259 if new_dfh is not None:
2222 new_dfh.close()
2260 new_dfh.close()
2223
2261
2224 def _nodeduplicatecallback(self, transaction, node):
2262 def _nodeduplicatecallback(self, transaction, node):
2225 """called when trying to add a node already stored."""
2263 """called when trying to add a node already stored."""
2226
2264
2227 @contextlib.contextmanager
2265 @contextlib.contextmanager
2228 def _writing(self, transaction):
2266 def _writing(self, transaction):
2229 if self._trypending:
2267 if self._trypending:
2230 msg = b'try to write in a `trypending` revlog: %s'
2268 msg = b'try to write in a `trypending` revlog: %s'
2231 msg %= self.display_id
2269 msg %= self.display_id
2232 raise error.ProgrammingError(msg)
2270 raise error.ProgrammingError(msg)
2233 if self._writinghandles is not None:
2271 if self._writinghandles is not None:
2234 yield
2272 yield
2235 else:
2273 else:
2236 ifh = dfh = None
2274 ifh = dfh = sdfh = None
2237 try:
2275 try:
2238 r = len(self)
2276 r = len(self)
2239 # opening the data file.
2277 # opening the data file.
2240 dsize = 0
2278 dsize = 0
2241 if r:
2279 if r:
2242 dsize = self.end(r - 1)
2280 dsize = self.end(r - 1)
2243 dfh = None
2281 dfh = None
2244 if not self._inline:
2282 if not self._inline:
2245 try:
2283 try:
2246 dfh = self._datafp(b"r+")
2284 dfh = self._datafp(b"r+")
2247 if self._docket is None:
2285 if self._docket is None:
2248 dfh.seek(0, os.SEEK_END)
2286 dfh.seek(0, os.SEEK_END)
2249 else:
2287 else:
2250 dfh.seek(self._docket.data_end, os.SEEK_SET)
2288 dfh.seek(self._docket.data_end, os.SEEK_SET)
2251 except IOError as inst:
2289 except IOError as inst:
2252 if inst.errno != errno.ENOENT:
2290 if inst.errno != errno.ENOENT:
2253 raise
2291 raise
2254 dfh = self._datafp(b"w+")
2292 dfh = self._datafp(b"w+")
2255 transaction.add(self._datafile, dsize)
2293 transaction.add(self._datafile, dsize)
2294 if self._sidedatafile is not None:
2295 try:
2296 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2297 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2298 except IOError as inst:
2299 if inst.errno != errno.ENOENT:
2300 raise
2301 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2302 transaction.add(
2303 self._sidedatafile, self._docket.sidedata_end
2304 )
2256
2305
2257 # opening the index file.
2306 # opening the index file.
2258 isize = r * self.index.entry_size
2307 isize = r * self.index.entry_size
2259 ifh = self.__index_write_fp()
2308 ifh = self.__index_write_fp()
2260 if self._inline:
2309 if self._inline:
2261 transaction.add(self._indexfile, dsize + isize)
2310 transaction.add(self._indexfile, dsize + isize)
2262 else:
2311 else:
2263 transaction.add(self._indexfile, isize)
2312 transaction.add(self._indexfile, isize)
2264 # exposing all file handle for writing.
2313 # exposing all file handle for writing.
2265 self._writinghandles = (ifh, dfh)
2314 self._writinghandles = (ifh, dfh, sdfh)
2266 yield
2315 yield
2267 if self._docket is not None:
2316 if self._docket is not None:
2268 self._write_docket(transaction)
2317 self._write_docket(transaction)
2269 finally:
2318 finally:
2270 self._writinghandles = None
2319 self._writinghandles = None
2271 if dfh is not None:
2320 if dfh is not None:
2272 dfh.close()
2321 dfh.close()
2322 if sdfh is not None:
2323 dfh.close()
2273 # closing the index file last to avoid exposing referent to
2324 # closing the index file last to avoid exposing referent to
2274 # potential unflushed data content.
2325 # potential unflushed data content.
2275 if ifh is not None:
2326 if ifh is not None:
2276 ifh.close()
2327 ifh.close()
2277
2328
2278 def _write_docket(self, transaction):
2329 def _write_docket(self, transaction):
2279 """write the current docket on disk
2330 """write the current docket on disk
2280
2331
2281 Exist as a method to help changelog to implement transaction logic
2332 Exist as a method to help changelog to implement transaction logic
2282
2333
2283 We could also imagine using the same transaction logic for all revlog
2334 We could also imagine using the same transaction logic for all revlog
2284 since docket are cheap."""
2335 since docket are cheap."""
2285 self._docket.write(transaction)
2336 self._docket.write(transaction)
2286
2337
2287 def addrevision(
2338 def addrevision(
2288 self,
2339 self,
2289 text,
2340 text,
2290 transaction,
2341 transaction,
2291 link,
2342 link,
2292 p1,
2343 p1,
2293 p2,
2344 p2,
2294 cachedelta=None,
2345 cachedelta=None,
2295 node=None,
2346 node=None,
2296 flags=REVIDX_DEFAULT_FLAGS,
2347 flags=REVIDX_DEFAULT_FLAGS,
2297 deltacomputer=None,
2348 deltacomputer=None,
2298 sidedata=None,
2349 sidedata=None,
2299 ):
2350 ):
2300 """add a revision to the log
2351 """add a revision to the log
2301
2352
2302 text - the revision data to add
2353 text - the revision data to add
2303 transaction - the transaction object used for rollback
2354 transaction - the transaction object used for rollback
2304 link - the linkrev data to add
2355 link - the linkrev data to add
2305 p1, p2 - the parent nodeids of the revision
2356 p1, p2 - the parent nodeids of the revision
2306 cachedelta - an optional precomputed delta
2357 cachedelta - an optional precomputed delta
2307 node - nodeid of revision; typically node is not specified, and it is
2358 node - nodeid of revision; typically node is not specified, and it is
2308 computed by default as hash(text, p1, p2), however subclasses might
2359 computed by default as hash(text, p1, p2), however subclasses might
2309 use different hashing method (and override checkhash() in such case)
2360 use different hashing method (and override checkhash() in such case)
2310 flags - the known flags to set on the revision
2361 flags - the known flags to set on the revision
2311 deltacomputer - an optional deltacomputer instance shared between
2362 deltacomputer - an optional deltacomputer instance shared between
2312 multiple calls
2363 multiple calls
2313 """
2364 """
2314 if link == nullrev:
2365 if link == nullrev:
2315 raise error.RevlogError(
2366 raise error.RevlogError(
2316 _(b"attempted to add linkrev -1 to %s") % self.display_id
2367 _(b"attempted to add linkrev -1 to %s") % self.display_id
2317 )
2368 )
2318
2369
2319 if sidedata is None:
2370 if sidedata is None:
2320 sidedata = {}
2371 sidedata = {}
2321 elif sidedata and not self.hassidedata:
2372 elif sidedata and not self.hassidedata:
2322 raise error.ProgrammingError(
2373 raise error.ProgrammingError(
2323 _(b"trying to add sidedata to a revlog who don't support them")
2374 _(b"trying to add sidedata to a revlog who don't support them")
2324 )
2375 )
2325
2376
2326 if flags:
2377 if flags:
2327 node = node or self.hash(text, p1, p2)
2378 node = node or self.hash(text, p1, p2)
2328
2379
2329 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2380 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2330
2381
2331 # If the flag processor modifies the revision data, ignore any provided
2382 # If the flag processor modifies the revision data, ignore any provided
2332 # cachedelta.
2383 # cachedelta.
2333 if rawtext != text:
2384 if rawtext != text:
2334 cachedelta = None
2385 cachedelta = None
2335
2386
2336 if len(rawtext) > _maxentrysize:
2387 if len(rawtext) > _maxentrysize:
2337 raise error.RevlogError(
2388 raise error.RevlogError(
2338 _(
2389 _(
2339 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2390 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2340 )
2391 )
2341 % (self.display_id, len(rawtext))
2392 % (self.display_id, len(rawtext))
2342 )
2393 )
2343
2394
2344 node = node or self.hash(rawtext, p1, p2)
2395 node = node or self.hash(rawtext, p1, p2)
2345 rev = self.index.get_rev(node)
2396 rev = self.index.get_rev(node)
2346 if rev is not None:
2397 if rev is not None:
2347 return rev
2398 return rev
2348
2399
2349 if validatehash:
2400 if validatehash:
2350 self.checkhash(rawtext, node, p1=p1, p2=p2)
2401 self.checkhash(rawtext, node, p1=p1, p2=p2)
2351
2402
2352 return self.addrawrevision(
2403 return self.addrawrevision(
2353 rawtext,
2404 rawtext,
2354 transaction,
2405 transaction,
2355 link,
2406 link,
2356 p1,
2407 p1,
2357 p2,
2408 p2,
2358 node,
2409 node,
2359 flags,
2410 flags,
2360 cachedelta=cachedelta,
2411 cachedelta=cachedelta,
2361 deltacomputer=deltacomputer,
2412 deltacomputer=deltacomputer,
2362 sidedata=sidedata,
2413 sidedata=sidedata,
2363 )
2414 )
2364
2415
2365 def addrawrevision(
2416 def addrawrevision(
2366 self,
2417 self,
2367 rawtext,
2418 rawtext,
2368 transaction,
2419 transaction,
2369 link,
2420 link,
2370 p1,
2421 p1,
2371 p2,
2422 p2,
2372 node,
2423 node,
2373 flags,
2424 flags,
2374 cachedelta=None,
2425 cachedelta=None,
2375 deltacomputer=None,
2426 deltacomputer=None,
2376 sidedata=None,
2427 sidedata=None,
2377 ):
2428 ):
2378 """add a raw revision with known flags, node and parents
2429 """add a raw revision with known flags, node and parents
2379 useful when reusing a revision not stored in this revlog (ex: received
2430 useful when reusing a revision not stored in this revlog (ex: received
2380 over wire, or read from an external bundle).
2431 over wire, or read from an external bundle).
2381 """
2432 """
2382 with self._writing(transaction):
2433 with self._writing(transaction):
2383 return self._addrevision(
2434 return self._addrevision(
2384 node,
2435 node,
2385 rawtext,
2436 rawtext,
2386 transaction,
2437 transaction,
2387 link,
2438 link,
2388 p1,
2439 p1,
2389 p2,
2440 p2,
2390 flags,
2441 flags,
2391 cachedelta,
2442 cachedelta,
2392 deltacomputer=deltacomputer,
2443 deltacomputer=deltacomputer,
2393 sidedata=sidedata,
2444 sidedata=sidedata,
2394 )
2445 )
2395
2446
2396 def compress(self, data):
2447 def compress(self, data):
2397 """Generate a possibly-compressed representation of data."""
2448 """Generate a possibly-compressed representation of data."""
2398 if not data:
2449 if not data:
2399 return b'', data
2450 return b'', data
2400
2451
2401 compressed = self._compressor.compress(data)
2452 compressed = self._compressor.compress(data)
2402
2453
2403 if compressed:
2454 if compressed:
2404 # The revlog compressor added the header in the returned data.
2455 # The revlog compressor added the header in the returned data.
2405 return b'', compressed
2456 return b'', compressed
2406
2457
2407 if data[0:1] == b'\0':
2458 if data[0:1] == b'\0':
2408 return b'', data
2459 return b'', data
2409 return b'u', data
2460 return b'u', data
2410
2461
2411 def decompress(self, data):
2462 def decompress(self, data):
2412 """Decompress a revlog chunk.
2463 """Decompress a revlog chunk.
2413
2464
2414 The chunk is expected to begin with a header identifying the
2465 The chunk is expected to begin with a header identifying the
2415 format type so it can be routed to an appropriate decompressor.
2466 format type so it can be routed to an appropriate decompressor.
2416 """
2467 """
2417 if not data:
2468 if not data:
2418 return data
2469 return data
2419
2470
2420 # Revlogs are read much more frequently than they are written and many
2471 # Revlogs are read much more frequently than they are written and many
2421 # chunks only take microseconds to decompress, so performance is
2472 # chunks only take microseconds to decompress, so performance is
2422 # important here.
2473 # important here.
2423 #
2474 #
2424 # We can make a few assumptions about revlogs:
2475 # We can make a few assumptions about revlogs:
2425 #
2476 #
2426 # 1) the majority of chunks will be compressed (as opposed to inline
2477 # 1) the majority of chunks will be compressed (as opposed to inline
2427 # raw data).
2478 # raw data).
2428 # 2) decompressing *any* data will likely by at least 10x slower than
2479 # 2) decompressing *any* data will likely by at least 10x slower than
2429 # returning raw inline data.
2480 # returning raw inline data.
2430 # 3) we want to prioritize common and officially supported compression
2481 # 3) we want to prioritize common and officially supported compression
2431 # engines
2482 # engines
2432 #
2483 #
2433 # It follows that we want to optimize for "decompress compressed data
2484 # It follows that we want to optimize for "decompress compressed data
2434 # when encoded with common and officially supported compression engines"
2485 # when encoded with common and officially supported compression engines"
2435 # case over "raw data" and "data encoded by less common or non-official
2486 # case over "raw data" and "data encoded by less common or non-official
2436 # compression engines." That is why we have the inline lookup first
2487 # compression engines." That is why we have the inline lookup first
2437 # followed by the compengines lookup.
2488 # followed by the compengines lookup.
2438 #
2489 #
2439 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2490 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2440 # compressed chunks. And this matters for changelog and manifest reads.
2491 # compressed chunks. And this matters for changelog and manifest reads.
2441 t = data[0:1]
2492 t = data[0:1]
2442
2493
2443 if t == b'x':
2494 if t == b'x':
2444 try:
2495 try:
2445 return _zlibdecompress(data)
2496 return _zlibdecompress(data)
2446 except zlib.error as e:
2497 except zlib.error as e:
2447 raise error.RevlogError(
2498 raise error.RevlogError(
2448 _(b'revlog decompress error: %s')
2499 _(b'revlog decompress error: %s')
2449 % stringutil.forcebytestr(e)
2500 % stringutil.forcebytestr(e)
2450 )
2501 )
2451 # '\0' is more common than 'u' so it goes first.
2502 # '\0' is more common than 'u' so it goes first.
2452 elif t == b'\0':
2503 elif t == b'\0':
2453 return data
2504 return data
2454 elif t == b'u':
2505 elif t == b'u':
2455 return util.buffer(data, 1)
2506 return util.buffer(data, 1)
2456
2507
2457 compressor = self._get_decompressor(t)
2508 compressor = self._get_decompressor(t)
2458
2509
2459 return compressor.decompress(data)
2510 return compressor.decompress(data)
2460
2511
2461 def _addrevision(
2512 def _addrevision(
2462 self,
2513 self,
2463 node,
2514 node,
2464 rawtext,
2515 rawtext,
2465 transaction,
2516 transaction,
2466 link,
2517 link,
2467 p1,
2518 p1,
2468 p2,
2519 p2,
2469 flags,
2520 flags,
2470 cachedelta,
2521 cachedelta,
2471 alwayscache=False,
2522 alwayscache=False,
2472 deltacomputer=None,
2523 deltacomputer=None,
2473 sidedata=None,
2524 sidedata=None,
2474 ):
2525 ):
2475 """internal function to add revisions to the log
2526 """internal function to add revisions to the log
2476
2527
2477 see addrevision for argument descriptions.
2528 see addrevision for argument descriptions.
2478
2529
2479 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2530 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2480
2531
2481 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2532 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2482 be used.
2533 be used.
2483
2534
2484 invariants:
2535 invariants:
2485 - rawtext is optional (can be None); if not set, cachedelta must be set.
2536 - rawtext is optional (can be None); if not set, cachedelta must be set.
2486 if both are set, they must correspond to each other.
2537 if both are set, they must correspond to each other.
2487 """
2538 """
2488 if node == self.nullid:
2539 if node == self.nullid:
2489 raise error.RevlogError(
2540 raise error.RevlogError(
2490 _(b"%s: attempt to add null revision") % self.display_id
2541 _(b"%s: attempt to add null revision") % self.display_id
2491 )
2542 )
2492 if (
2543 if (
2493 node == self.nodeconstants.wdirid
2544 node == self.nodeconstants.wdirid
2494 or node in self.nodeconstants.wdirfilenodeids
2545 or node in self.nodeconstants.wdirfilenodeids
2495 ):
2546 ):
2496 raise error.RevlogError(
2547 raise error.RevlogError(
2497 _(b"%s: attempt to add wdir revision") % self.display_id
2548 _(b"%s: attempt to add wdir revision") % self.display_id
2498 )
2549 )
2499 if self._writinghandles is None:
2550 if self._writinghandles is None:
2500 msg = b'adding revision outside `revlog._writing` context'
2551 msg = b'adding revision outside `revlog._writing` context'
2501 raise error.ProgrammingError(msg)
2552 raise error.ProgrammingError(msg)
2502
2553
2503 if self._inline:
2554 if self._inline:
2504 fh = self._writinghandles[0]
2555 fh = self._writinghandles[0]
2505 else:
2556 else:
2506 fh = self._writinghandles[1]
2557 fh = self._writinghandles[1]
2507
2558
2508 btext = [rawtext]
2559 btext = [rawtext]
2509
2560
2510 curr = len(self)
2561 curr = len(self)
2511 prev = curr - 1
2562 prev = curr - 1
2512
2563
2513 offset = self._get_data_offset(prev)
2564 offset = self._get_data_offset(prev)
2514
2565
2515 if self._concurrencychecker:
2566 if self._concurrencychecker:
2516 ifh, dfh = self._writinghandles
2567 ifh, dfh, sdfh = self._writinghandles
2568 # XXX no checking for the sidedata file
2517 if self._inline:
2569 if self._inline:
2518 # offset is "as if" it were in the .d file, so we need to add on
2570 # offset is "as if" it were in the .d file, so we need to add on
2519 # the size of the entry metadata.
2571 # the size of the entry metadata.
2520 self._concurrencychecker(
2572 self._concurrencychecker(
2521 ifh, self._indexfile, offset + curr * self.index.entry_size
2573 ifh, self._indexfile, offset + curr * self.index.entry_size
2522 )
2574 )
2523 else:
2575 else:
2524 # Entries in the .i are a consistent size.
2576 # Entries in the .i are a consistent size.
2525 self._concurrencychecker(
2577 self._concurrencychecker(
2526 ifh, self._indexfile, curr * self.index.entry_size
2578 ifh, self._indexfile, curr * self.index.entry_size
2527 )
2579 )
2528 self._concurrencychecker(dfh, self._datafile, offset)
2580 self._concurrencychecker(dfh, self._datafile, offset)
2529
2581
2530 p1r, p2r = self.rev(p1), self.rev(p2)
2582 p1r, p2r = self.rev(p1), self.rev(p2)
2531
2583
2532 # full versions are inserted when the needed deltas
2584 # full versions are inserted when the needed deltas
2533 # become comparable to the uncompressed text
2585 # become comparable to the uncompressed text
2534 if rawtext is None:
2586 if rawtext is None:
2535 # need rawtext size, before changed by flag processors, which is
2587 # need rawtext size, before changed by flag processors, which is
2536 # the non-raw size. use revlog explicitly to avoid filelog's extra
2588 # the non-raw size. use revlog explicitly to avoid filelog's extra
2537 # logic that might remove metadata size.
2589 # logic that might remove metadata size.
2538 textlen = mdiff.patchedsize(
2590 textlen = mdiff.patchedsize(
2539 revlog.size(self, cachedelta[0]), cachedelta[1]
2591 revlog.size(self, cachedelta[0]), cachedelta[1]
2540 )
2592 )
2541 else:
2593 else:
2542 textlen = len(rawtext)
2594 textlen = len(rawtext)
2543
2595
2544 if deltacomputer is None:
2596 if deltacomputer is None:
2545 deltacomputer = deltautil.deltacomputer(self)
2597 deltacomputer = deltautil.deltacomputer(self)
2546
2598
2547 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2599 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2548
2600
2549 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2601 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2550
2602
2551 compression_mode = COMP_MODE_INLINE
2603 compression_mode = COMP_MODE_INLINE
2552 if self._docket is not None:
2604 if self._docket is not None:
2553 h, d = deltainfo.data
2605 h, d = deltainfo.data
2554 if not h and not d:
2606 if not h and not d:
2555 # not data to store at all... declare them uncompressed
2607 # not data to store at all... declare them uncompressed
2556 compression_mode = COMP_MODE_PLAIN
2608 compression_mode = COMP_MODE_PLAIN
2557 elif not h:
2609 elif not h:
2558 t = d[0:1]
2610 t = d[0:1]
2559 if t == b'\0':
2611 if t == b'\0':
2560 compression_mode = COMP_MODE_PLAIN
2612 compression_mode = COMP_MODE_PLAIN
2561 elif t == self._docket.default_compression_header:
2613 elif t == self._docket.default_compression_header:
2562 compression_mode = COMP_MODE_DEFAULT
2614 compression_mode = COMP_MODE_DEFAULT
2563 elif h == b'u':
2615 elif h == b'u':
2564 # we have a more efficient way to declare uncompressed
2616 # we have a more efficient way to declare uncompressed
2565 h = b''
2617 h = b''
2566 compression_mode = COMP_MODE_PLAIN
2618 compression_mode = COMP_MODE_PLAIN
2567 deltainfo = deltautil.drop_u_compression(deltainfo)
2619 deltainfo = deltautil.drop_u_compression(deltainfo)
2568
2620
2569 sidedata_compression_mode = COMP_MODE_INLINE
2621 sidedata_compression_mode = COMP_MODE_INLINE
2570 if sidedata and self.hassidedata:
2622 if sidedata and self.hassidedata:
2571 sidedata_compression_mode = COMP_MODE_PLAIN
2623 sidedata_compression_mode = COMP_MODE_PLAIN
2572 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2624 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2573 sidedata_offset = offset + deltainfo.deltalen
2625 sidedata_offset = self._docket.sidedata_end
2574 h, comp_sidedata = self.compress(serialized_sidedata)
2626 h, comp_sidedata = self.compress(serialized_sidedata)
2575 if (
2627 if (
2576 h != b'u'
2628 h != b'u'
2577 and comp_sidedata[0:1] != b'\0'
2629 and comp_sidedata[0:1] != b'\0'
2578 and len(comp_sidedata) < len(serialized_sidedata)
2630 and len(comp_sidedata) < len(serialized_sidedata)
2579 ):
2631 ):
2580 assert not h
2632 assert not h
2581 if (
2633 if (
2582 comp_sidedata[0:1]
2634 comp_sidedata[0:1]
2583 == self._docket.default_compression_header
2635 == self._docket.default_compression_header
2584 ):
2636 ):
2585 sidedata_compression_mode = COMP_MODE_DEFAULT
2637 sidedata_compression_mode = COMP_MODE_DEFAULT
2586 serialized_sidedata = comp_sidedata
2638 serialized_sidedata = comp_sidedata
2587 else:
2639 else:
2588 sidedata_compression_mode = COMP_MODE_INLINE
2640 sidedata_compression_mode = COMP_MODE_INLINE
2589 serialized_sidedata = comp_sidedata
2641 serialized_sidedata = comp_sidedata
2590 else:
2642 else:
2591 serialized_sidedata = b""
2643 serialized_sidedata = b""
2592 # Don't store the offset if the sidedata is empty, that way
2644 # Don't store the offset if the sidedata is empty, that way
2593 # we can easily detect empty sidedata and they will be no different
2645 # we can easily detect empty sidedata and they will be no different
2594 # than ones we manually add.
2646 # than ones we manually add.
2595 sidedata_offset = 0
2647 sidedata_offset = 0
2596
2648
2597 e = (
2649 e = (
2598 offset_type(offset, flags),
2650 offset_type(offset, flags),
2599 deltainfo.deltalen,
2651 deltainfo.deltalen,
2600 textlen,
2652 textlen,
2601 deltainfo.base,
2653 deltainfo.base,
2602 link,
2654 link,
2603 p1r,
2655 p1r,
2604 p2r,
2656 p2r,
2605 node,
2657 node,
2606 sidedata_offset,
2658 sidedata_offset,
2607 len(serialized_sidedata),
2659 len(serialized_sidedata),
2608 compression_mode,
2660 compression_mode,
2609 sidedata_compression_mode,
2661 sidedata_compression_mode,
2610 )
2662 )
2611
2663
2612 self.index.append(e)
2664 self.index.append(e)
2613 entry = self.index.entry_binary(curr)
2665 entry = self.index.entry_binary(curr)
2614 if curr == 0 and self._docket is None:
2666 if curr == 0 and self._docket is None:
2615 header = self._format_flags | self._format_version
2667 header = self._format_flags | self._format_version
2616 header = self.index.pack_header(header)
2668 header = self.index.pack_header(header)
2617 entry = header + entry
2669 entry = header + entry
2618 self._writeentry(
2670 self._writeentry(
2619 transaction,
2671 transaction,
2620 entry,
2672 entry,
2621 deltainfo.data,
2673 deltainfo.data,
2622 link,
2674 link,
2623 offset,
2675 offset,
2624 serialized_sidedata,
2676 serialized_sidedata,
2677 sidedata_offset,
2625 )
2678 )
2626
2679
2627 rawtext = btext[0]
2680 rawtext = btext[0]
2628
2681
2629 if alwayscache and rawtext is None:
2682 if alwayscache and rawtext is None:
2630 rawtext = deltacomputer.buildtext(revinfo, fh)
2683 rawtext = deltacomputer.buildtext(revinfo, fh)
2631
2684
2632 if type(rawtext) == bytes: # only accept immutable objects
2685 if type(rawtext) == bytes: # only accept immutable objects
2633 self._revisioncache = (node, curr, rawtext)
2686 self._revisioncache = (node, curr, rawtext)
2634 self._chainbasecache[curr] = deltainfo.chainbase
2687 self._chainbasecache[curr] = deltainfo.chainbase
2635 return curr
2688 return curr
2636
2689
2637 def _get_data_offset(self, prev):
2690 def _get_data_offset(self, prev):
2638 """Returns the current offset in the (in-transaction) data file.
2691 """Returns the current offset in the (in-transaction) data file.
2639 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2692 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2640 file to store that information: since sidedata can be rewritten to the
2693 file to store that information: since sidedata can be rewritten to the
2641 end of the data file within a transaction, you can have cases where, for
2694 end of the data file within a transaction, you can have cases where, for
2642 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2695 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2643 to `n - 1`'s sidedata being written after `n`'s data.
2696 to `n - 1`'s sidedata being written after `n`'s data.
2644
2697
2645 TODO cache this in a docket file before getting out of experimental."""
2698 TODO cache this in a docket file before getting out of experimental."""
2646 if self._docket is None:
2699 if self._docket is None:
2647 return self.end(prev)
2700 return self.end(prev)
2648 else:
2701 else:
2649 return self._docket.data_end
2702 return self._docket.data_end
2650
2703
2651 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2704 def _writeentry(
2705 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2706 ):
2652 # Files opened in a+ mode have inconsistent behavior on various
2707 # Files opened in a+ mode have inconsistent behavior on various
2653 # platforms. Windows requires that a file positioning call be made
2708 # platforms. Windows requires that a file positioning call be made
2654 # when the file handle transitions between reads and writes. See
2709 # when the file handle transitions between reads and writes. See
2655 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2710 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2656 # platforms, Python or the platform itself can be buggy. Some versions
2711 # platforms, Python or the platform itself can be buggy. Some versions
2657 # of Solaris have been observed to not append at the end of the file
2712 # of Solaris have been observed to not append at the end of the file
2658 # if the file was seeked to before the end. See issue4943 for more.
2713 # if the file was seeked to before the end. See issue4943 for more.
2659 #
2714 #
2660 # We work around this issue by inserting a seek() before writing.
2715 # We work around this issue by inserting a seek() before writing.
2661 # Note: This is likely not necessary on Python 3. However, because
2716 # Note: This is likely not necessary on Python 3. However, because
2662 # the file handle is reused for reads and may be seeked there, we need
2717 # the file handle is reused for reads and may be seeked there, we need
2663 # to be careful before changing this.
2718 # to be careful before changing this.
2664 if self._writinghandles is None:
2719 if self._writinghandles is None:
2665 msg = b'adding revision outside `revlog._writing` context'
2720 msg = b'adding revision outside `revlog._writing` context'
2666 raise error.ProgrammingError(msg)
2721 raise error.ProgrammingError(msg)
2667 ifh, dfh = self._writinghandles
2722 ifh, dfh, sdfh = self._writinghandles
2668 if self._docket is None:
2723 if self._docket is None:
2669 ifh.seek(0, os.SEEK_END)
2724 ifh.seek(0, os.SEEK_END)
2670 else:
2725 else:
2671 ifh.seek(self._docket.index_end, os.SEEK_SET)
2726 ifh.seek(self._docket.index_end, os.SEEK_SET)
2672 if dfh:
2727 if dfh:
2673 if self._docket is None:
2728 if self._docket is None:
2674 dfh.seek(0, os.SEEK_END)
2729 dfh.seek(0, os.SEEK_END)
2675 else:
2730 else:
2676 dfh.seek(self._docket.data_end, os.SEEK_SET)
2731 dfh.seek(self._docket.data_end, os.SEEK_SET)
2732 if sdfh:
2733 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2677
2734
2678 curr = len(self) - 1
2735 curr = len(self) - 1
2679 if not self._inline:
2736 if not self._inline:
2680 transaction.add(self._datafile, offset)
2737 transaction.add(self._datafile, offset)
2738 if self._sidedatafile:
2739 transaction.add(self._sidedatafile, sidedata_offset)
2681 transaction.add(self._indexfile, curr * len(entry))
2740 transaction.add(self._indexfile, curr * len(entry))
2682 if data[0]:
2741 if data[0]:
2683 dfh.write(data[0])
2742 dfh.write(data[0])
2684 dfh.write(data[1])
2743 dfh.write(data[1])
2685 if sidedata:
2744 if sidedata:
2686 dfh.write(sidedata)
2745 sdfh.write(sidedata)
2687 ifh.write(entry)
2746 ifh.write(entry)
2688 else:
2747 else:
2689 offset += curr * self.index.entry_size
2748 offset += curr * self.index.entry_size
2690 transaction.add(self._indexfile, offset)
2749 transaction.add(self._indexfile, offset)
2691 ifh.write(entry)
2750 ifh.write(entry)
2692 ifh.write(data[0])
2751 ifh.write(data[0])
2693 ifh.write(data[1])
2752 ifh.write(data[1])
2694 if sidedata:
2753 assert not sidedata
2695 ifh.write(sidedata)
2696 self._enforceinlinesize(transaction)
2754 self._enforceinlinesize(transaction)
2697 if self._docket is not None:
2755 if self._docket is not None:
2698 self._docket.index_end = self._writinghandles[0].tell()
2756 self._docket.index_end = self._writinghandles[0].tell()
2699 self._docket.data_end = self._writinghandles[1].tell()
2757 self._docket.data_end = self._writinghandles[1].tell()
2758 self._docket.sidedata_end = self._writinghandles[2].tell()
2700
2759
2701 nodemaputil.setup_persistent_nodemap(transaction, self)
2760 nodemaputil.setup_persistent_nodemap(transaction, self)
2702
2761
2703 def addgroup(
2762 def addgroup(
2704 self,
2763 self,
2705 deltas,
2764 deltas,
2706 linkmapper,
2765 linkmapper,
2707 transaction,
2766 transaction,
2708 alwayscache=False,
2767 alwayscache=False,
2709 addrevisioncb=None,
2768 addrevisioncb=None,
2710 duplicaterevisioncb=None,
2769 duplicaterevisioncb=None,
2711 ):
2770 ):
2712 """
2771 """
2713 add a delta group
2772 add a delta group
2714
2773
2715 given a set of deltas, add them to the revision log. the
2774 given a set of deltas, add them to the revision log. the
2716 first delta is against its parent, which should be in our
2775 first delta is against its parent, which should be in our
2717 log, the rest are against the previous delta.
2776 log, the rest are against the previous delta.
2718
2777
2719 If ``addrevisioncb`` is defined, it will be called with arguments of
2778 If ``addrevisioncb`` is defined, it will be called with arguments of
2720 this revlog and the node that was added.
2779 this revlog and the node that was added.
2721 """
2780 """
2722
2781
2723 if self._adding_group:
2782 if self._adding_group:
2724 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2783 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2725
2784
2726 self._adding_group = True
2785 self._adding_group = True
2727 empty = True
2786 empty = True
2728 try:
2787 try:
2729 with self._writing(transaction):
2788 with self._writing(transaction):
2730 deltacomputer = deltautil.deltacomputer(self)
2789 deltacomputer = deltautil.deltacomputer(self)
2731 # loop through our set of deltas
2790 # loop through our set of deltas
2732 for data in deltas:
2791 for data in deltas:
2733 (
2792 (
2734 node,
2793 node,
2735 p1,
2794 p1,
2736 p2,
2795 p2,
2737 linknode,
2796 linknode,
2738 deltabase,
2797 deltabase,
2739 delta,
2798 delta,
2740 flags,
2799 flags,
2741 sidedata,
2800 sidedata,
2742 ) = data
2801 ) = data
2743 link = linkmapper(linknode)
2802 link = linkmapper(linknode)
2744 flags = flags or REVIDX_DEFAULT_FLAGS
2803 flags = flags or REVIDX_DEFAULT_FLAGS
2745
2804
2746 rev = self.index.get_rev(node)
2805 rev = self.index.get_rev(node)
2747 if rev is not None:
2806 if rev is not None:
2748 # this can happen if two branches make the same change
2807 # this can happen if two branches make the same change
2749 self._nodeduplicatecallback(transaction, rev)
2808 self._nodeduplicatecallback(transaction, rev)
2750 if duplicaterevisioncb:
2809 if duplicaterevisioncb:
2751 duplicaterevisioncb(self, rev)
2810 duplicaterevisioncb(self, rev)
2752 empty = False
2811 empty = False
2753 continue
2812 continue
2754
2813
2755 for p in (p1, p2):
2814 for p in (p1, p2):
2756 if not self.index.has_node(p):
2815 if not self.index.has_node(p):
2757 raise error.LookupError(
2816 raise error.LookupError(
2758 p, self.radix, _(b'unknown parent')
2817 p, self.radix, _(b'unknown parent')
2759 )
2818 )
2760
2819
2761 if not self.index.has_node(deltabase):
2820 if not self.index.has_node(deltabase):
2762 raise error.LookupError(
2821 raise error.LookupError(
2763 deltabase, self.display_id, _(b'unknown delta base')
2822 deltabase, self.display_id, _(b'unknown delta base')
2764 )
2823 )
2765
2824
2766 baserev = self.rev(deltabase)
2825 baserev = self.rev(deltabase)
2767
2826
2768 if baserev != nullrev and self.iscensored(baserev):
2827 if baserev != nullrev and self.iscensored(baserev):
2769 # if base is censored, delta must be full replacement in a
2828 # if base is censored, delta must be full replacement in a
2770 # single patch operation
2829 # single patch operation
2771 hlen = struct.calcsize(b">lll")
2830 hlen = struct.calcsize(b">lll")
2772 oldlen = self.rawsize(baserev)
2831 oldlen = self.rawsize(baserev)
2773 newlen = len(delta) - hlen
2832 newlen = len(delta) - hlen
2774 if delta[:hlen] != mdiff.replacediffheader(
2833 if delta[:hlen] != mdiff.replacediffheader(
2775 oldlen, newlen
2834 oldlen, newlen
2776 ):
2835 ):
2777 raise error.CensoredBaseError(
2836 raise error.CensoredBaseError(
2778 self.display_id, self.node(baserev)
2837 self.display_id, self.node(baserev)
2779 )
2838 )
2780
2839
2781 if not flags and self._peek_iscensored(baserev, delta):
2840 if not flags and self._peek_iscensored(baserev, delta):
2782 flags |= REVIDX_ISCENSORED
2841 flags |= REVIDX_ISCENSORED
2783
2842
2784 # We assume consumers of addrevisioncb will want to retrieve
2843 # We assume consumers of addrevisioncb will want to retrieve
2785 # the added revision, which will require a call to
2844 # the added revision, which will require a call to
2786 # revision(). revision() will fast path if there is a cache
2845 # revision(). revision() will fast path if there is a cache
2787 # hit. So, we tell _addrevision() to always cache in this case.
2846 # hit. So, we tell _addrevision() to always cache in this case.
2788 # We're only using addgroup() in the context of changegroup
2847 # We're only using addgroup() in the context of changegroup
2789 # generation so the revision data can always be handled as raw
2848 # generation so the revision data can always be handled as raw
2790 # by the flagprocessor.
2849 # by the flagprocessor.
2791 rev = self._addrevision(
2850 rev = self._addrevision(
2792 node,
2851 node,
2793 None,
2852 None,
2794 transaction,
2853 transaction,
2795 link,
2854 link,
2796 p1,
2855 p1,
2797 p2,
2856 p2,
2798 flags,
2857 flags,
2799 (baserev, delta),
2858 (baserev, delta),
2800 alwayscache=alwayscache,
2859 alwayscache=alwayscache,
2801 deltacomputer=deltacomputer,
2860 deltacomputer=deltacomputer,
2802 sidedata=sidedata,
2861 sidedata=sidedata,
2803 )
2862 )
2804
2863
2805 if addrevisioncb:
2864 if addrevisioncb:
2806 addrevisioncb(self, rev)
2865 addrevisioncb(self, rev)
2807 empty = False
2866 empty = False
2808 finally:
2867 finally:
2809 self._adding_group = False
2868 self._adding_group = False
2810 return not empty
2869 return not empty
2811
2870
2812 def iscensored(self, rev):
2871 def iscensored(self, rev):
2813 """Check if a file revision is censored."""
2872 """Check if a file revision is censored."""
2814 if not self._censorable:
2873 if not self._censorable:
2815 return False
2874 return False
2816
2875
2817 return self.flags(rev) & REVIDX_ISCENSORED
2876 return self.flags(rev) & REVIDX_ISCENSORED
2818
2877
2819 def _peek_iscensored(self, baserev, delta):
2878 def _peek_iscensored(self, baserev, delta):
2820 """Quickly check if a delta produces a censored revision."""
2879 """Quickly check if a delta produces a censored revision."""
2821 if not self._censorable:
2880 if not self._censorable:
2822 return False
2881 return False
2823
2882
2824 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2883 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2825
2884
2826 def getstrippoint(self, minlink):
2885 def getstrippoint(self, minlink):
2827 """find the minimum rev that must be stripped to strip the linkrev
2886 """find the minimum rev that must be stripped to strip the linkrev
2828
2887
2829 Returns a tuple containing the minimum rev and a set of all revs that
2888 Returns a tuple containing the minimum rev and a set of all revs that
2830 have linkrevs that will be broken by this strip.
2889 have linkrevs that will be broken by this strip.
2831 """
2890 """
2832 return storageutil.resolvestripinfo(
2891 return storageutil.resolvestripinfo(
2833 minlink,
2892 minlink,
2834 len(self) - 1,
2893 len(self) - 1,
2835 self.headrevs(),
2894 self.headrevs(),
2836 self.linkrev,
2895 self.linkrev,
2837 self.parentrevs,
2896 self.parentrevs,
2838 )
2897 )
2839
2898
2840 def strip(self, minlink, transaction):
2899 def strip(self, minlink, transaction):
2841 """truncate the revlog on the first revision with a linkrev >= minlink
2900 """truncate the revlog on the first revision with a linkrev >= minlink
2842
2901
2843 This function is called when we're stripping revision minlink and
2902 This function is called when we're stripping revision minlink and
2844 its descendants from the repository.
2903 its descendants from the repository.
2845
2904
2846 We have to remove all revisions with linkrev >= minlink, because
2905 We have to remove all revisions with linkrev >= minlink, because
2847 the equivalent changelog revisions will be renumbered after the
2906 the equivalent changelog revisions will be renumbered after the
2848 strip.
2907 strip.
2849
2908
2850 So we truncate the revlog on the first of these revisions, and
2909 So we truncate the revlog on the first of these revisions, and
2851 trust that the caller has saved the revisions that shouldn't be
2910 trust that the caller has saved the revisions that shouldn't be
2852 removed and that it'll re-add them after this truncation.
2911 removed and that it'll re-add them after this truncation.
2853 """
2912 """
2854 if len(self) == 0:
2913 if len(self) == 0:
2855 return
2914 return
2856
2915
2857 rev, _ = self.getstrippoint(minlink)
2916 rev, _ = self.getstrippoint(minlink)
2858 if rev == len(self):
2917 if rev == len(self):
2859 return
2918 return
2860
2919
2861 # first truncate the files on disk
2920 # first truncate the files on disk
2862 data_end = self.start(rev)
2921 data_end = self.start(rev)
2863 if not self._inline:
2922 if not self._inline:
2864 transaction.add(self._datafile, data_end)
2923 transaction.add(self._datafile, data_end)
2865 end = rev * self.index.entry_size
2924 end = rev * self.index.entry_size
2866 else:
2925 else:
2867 end = data_end + (rev * self.index.entry_size)
2926 end = data_end + (rev * self.index.entry_size)
2868
2927
2928 if self._sidedatafile:
2929 sidedata_end = self.sidedata_cut_off(rev)
2930 transaction.add(self._sidedatafile, sidedata_end)
2931
2869 transaction.add(self._indexfile, end)
2932 transaction.add(self._indexfile, end)
2870 if self._docket is not None:
2933 if self._docket is not None:
2871 # XXX we could, leverage the docket while stripping. However it is
2934 # XXX we could, leverage the docket while stripping. However it is
2872 # not powerfull enough at the time of this comment
2935 # not powerfull enough at the time of this comment
2873 self._docket.index_end = end
2936 self._docket.index_end = end
2874 self._docket.data_end = data_end
2937 self._docket.data_end = data_end
2938 self._docket.sidedata_end = sidedata_end
2875 self._docket.write(transaction, stripping=True)
2939 self._docket.write(transaction, stripping=True)
2876
2940
2877 # then reset internal state in memory to forget those revisions
2941 # then reset internal state in memory to forget those revisions
2878 self._revisioncache = None
2942 self._revisioncache = None
2879 self._chaininfocache = util.lrucachedict(500)
2943 self._chaininfocache = util.lrucachedict(500)
2880 self._chunkclear()
2944 self._chunkclear()
2881
2945
2882 del self.index[rev:-1]
2946 del self.index[rev:-1]
2883
2947
2884 def checksize(self):
2948 def checksize(self):
2885 """Check size of index and data files
2949 """Check size of index and data files
2886
2950
2887 return a (dd, di) tuple.
2951 return a (dd, di) tuple.
2888 - dd: extra bytes for the "data" file
2952 - dd: extra bytes for the "data" file
2889 - di: extra bytes for the "index" file
2953 - di: extra bytes for the "index" file
2890
2954
2891 A healthy revlog will return (0, 0).
2955 A healthy revlog will return (0, 0).
2892 """
2956 """
2893 expected = 0
2957 expected = 0
2894 if len(self):
2958 if len(self):
2895 expected = max(0, self.end(len(self) - 1))
2959 expected = max(0, self.end(len(self) - 1))
2896
2960
2897 try:
2961 try:
2898 with self._datafp() as f:
2962 with self._datafp() as f:
2899 f.seek(0, io.SEEK_END)
2963 f.seek(0, io.SEEK_END)
2900 actual = f.tell()
2964 actual = f.tell()
2901 dd = actual - expected
2965 dd = actual - expected
2902 except IOError as inst:
2966 except IOError as inst:
2903 if inst.errno != errno.ENOENT:
2967 if inst.errno != errno.ENOENT:
2904 raise
2968 raise
2905 dd = 0
2969 dd = 0
2906
2970
2907 try:
2971 try:
2908 f = self.opener(self._indexfile)
2972 f = self.opener(self._indexfile)
2909 f.seek(0, io.SEEK_END)
2973 f.seek(0, io.SEEK_END)
2910 actual = f.tell()
2974 actual = f.tell()
2911 f.close()
2975 f.close()
2912 s = self.index.entry_size
2976 s = self.index.entry_size
2913 i = max(0, actual // s)
2977 i = max(0, actual // s)
2914 di = actual - (i * s)
2978 di = actual - (i * s)
2915 if self._inline:
2979 if self._inline:
2916 databytes = 0
2980 databytes = 0
2917 for r in self:
2981 for r in self:
2918 databytes += max(0, self.length(r))
2982 databytes += max(0, self.length(r))
2919 dd = 0
2983 dd = 0
2920 di = actual - len(self) * s - databytes
2984 di = actual - len(self) * s - databytes
2921 except IOError as inst:
2985 except IOError as inst:
2922 if inst.errno != errno.ENOENT:
2986 if inst.errno != errno.ENOENT:
2923 raise
2987 raise
2924 di = 0
2988 di = 0
2925
2989
2926 return (dd, di)
2990 return (dd, di)
2927
2991
2928 def files(self):
2992 def files(self):
2929 res = [self._indexfile]
2993 res = [self._indexfile]
2930 if not self._inline:
2994 if not self._inline:
2931 res.append(self._datafile)
2995 res.append(self._datafile)
2932 return res
2996 return res
2933
2997
2934 def emitrevisions(
2998 def emitrevisions(
2935 self,
2999 self,
2936 nodes,
3000 nodes,
2937 nodesorder=None,
3001 nodesorder=None,
2938 revisiondata=False,
3002 revisiondata=False,
2939 assumehaveparentrevisions=False,
3003 assumehaveparentrevisions=False,
2940 deltamode=repository.CG_DELTAMODE_STD,
3004 deltamode=repository.CG_DELTAMODE_STD,
2941 sidedata_helpers=None,
3005 sidedata_helpers=None,
2942 ):
3006 ):
2943 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3007 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2944 raise error.ProgrammingError(
3008 raise error.ProgrammingError(
2945 b'unhandled value for nodesorder: %s' % nodesorder
3009 b'unhandled value for nodesorder: %s' % nodesorder
2946 )
3010 )
2947
3011
2948 if nodesorder is None and not self._generaldelta:
3012 if nodesorder is None and not self._generaldelta:
2949 nodesorder = b'storage'
3013 nodesorder = b'storage'
2950
3014
2951 if (
3015 if (
2952 not self._storedeltachains
3016 not self._storedeltachains
2953 and deltamode != repository.CG_DELTAMODE_PREV
3017 and deltamode != repository.CG_DELTAMODE_PREV
2954 ):
3018 ):
2955 deltamode = repository.CG_DELTAMODE_FULL
3019 deltamode = repository.CG_DELTAMODE_FULL
2956
3020
2957 return storageutil.emitrevisions(
3021 return storageutil.emitrevisions(
2958 self,
3022 self,
2959 nodes,
3023 nodes,
2960 nodesorder,
3024 nodesorder,
2961 revlogrevisiondelta,
3025 revlogrevisiondelta,
2962 deltaparentfn=self.deltaparent,
3026 deltaparentfn=self.deltaparent,
2963 candeltafn=self.candelta,
3027 candeltafn=self.candelta,
2964 rawsizefn=self.rawsize,
3028 rawsizefn=self.rawsize,
2965 revdifffn=self.revdiff,
3029 revdifffn=self.revdiff,
2966 flagsfn=self.flags,
3030 flagsfn=self.flags,
2967 deltamode=deltamode,
3031 deltamode=deltamode,
2968 revisiondata=revisiondata,
3032 revisiondata=revisiondata,
2969 assumehaveparentrevisions=assumehaveparentrevisions,
3033 assumehaveparentrevisions=assumehaveparentrevisions,
2970 sidedata_helpers=sidedata_helpers,
3034 sidedata_helpers=sidedata_helpers,
2971 )
3035 )
2972
3036
2973 DELTAREUSEALWAYS = b'always'
3037 DELTAREUSEALWAYS = b'always'
2974 DELTAREUSESAMEREVS = b'samerevs'
3038 DELTAREUSESAMEREVS = b'samerevs'
2975 DELTAREUSENEVER = b'never'
3039 DELTAREUSENEVER = b'never'
2976
3040
2977 DELTAREUSEFULLADD = b'fulladd'
3041 DELTAREUSEFULLADD = b'fulladd'
2978
3042
2979 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3043 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2980
3044
2981 def clone(
3045 def clone(
2982 self,
3046 self,
2983 tr,
3047 tr,
2984 destrevlog,
3048 destrevlog,
2985 addrevisioncb=None,
3049 addrevisioncb=None,
2986 deltareuse=DELTAREUSESAMEREVS,
3050 deltareuse=DELTAREUSESAMEREVS,
2987 forcedeltabothparents=None,
3051 forcedeltabothparents=None,
2988 sidedata_helpers=None,
3052 sidedata_helpers=None,
2989 ):
3053 ):
2990 """Copy this revlog to another, possibly with format changes.
3054 """Copy this revlog to another, possibly with format changes.
2991
3055
2992 The destination revlog will contain the same revisions and nodes.
3056 The destination revlog will contain the same revisions and nodes.
2993 However, it may not be bit-for-bit identical due to e.g. delta encoding
3057 However, it may not be bit-for-bit identical due to e.g. delta encoding
2994 differences.
3058 differences.
2995
3059
2996 The ``deltareuse`` argument control how deltas from the existing revlog
3060 The ``deltareuse`` argument control how deltas from the existing revlog
2997 are preserved in the destination revlog. The argument can have the
3061 are preserved in the destination revlog. The argument can have the
2998 following values:
3062 following values:
2999
3063
3000 DELTAREUSEALWAYS
3064 DELTAREUSEALWAYS
3001 Deltas will always be reused (if possible), even if the destination
3065 Deltas will always be reused (if possible), even if the destination
3002 revlog would not select the same revisions for the delta. This is the
3066 revlog would not select the same revisions for the delta. This is the
3003 fastest mode of operation.
3067 fastest mode of operation.
3004 DELTAREUSESAMEREVS
3068 DELTAREUSESAMEREVS
3005 Deltas will be reused if the destination revlog would pick the same
3069 Deltas will be reused if the destination revlog would pick the same
3006 revisions for the delta. This mode strikes a balance between speed
3070 revisions for the delta. This mode strikes a balance between speed
3007 and optimization.
3071 and optimization.
3008 DELTAREUSENEVER
3072 DELTAREUSENEVER
3009 Deltas will never be reused. This is the slowest mode of execution.
3073 Deltas will never be reused. This is the slowest mode of execution.
3010 This mode can be used to recompute deltas (e.g. if the diff/delta
3074 This mode can be used to recompute deltas (e.g. if the diff/delta
3011 algorithm changes).
3075 algorithm changes).
3012 DELTAREUSEFULLADD
3076 DELTAREUSEFULLADD
3013 Revision will be re-added as if their were new content. This is
3077 Revision will be re-added as if their were new content. This is
3014 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3078 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3015 eg: large file detection and handling.
3079 eg: large file detection and handling.
3016
3080
3017 Delta computation can be slow, so the choice of delta reuse policy can
3081 Delta computation can be slow, so the choice of delta reuse policy can
3018 significantly affect run time.
3082 significantly affect run time.
3019
3083
3020 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3084 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3021 two extremes. Deltas will be reused if they are appropriate. But if the
3085 two extremes. Deltas will be reused if they are appropriate. But if the
3022 delta could choose a better revision, it will do so. This means if you
3086 delta could choose a better revision, it will do so. This means if you
3023 are converting a non-generaldelta revlog to a generaldelta revlog,
3087 are converting a non-generaldelta revlog to a generaldelta revlog,
3024 deltas will be recomputed if the delta's parent isn't a parent of the
3088 deltas will be recomputed if the delta's parent isn't a parent of the
3025 revision.
3089 revision.
3026
3090
3027 In addition to the delta policy, the ``forcedeltabothparents``
3091 In addition to the delta policy, the ``forcedeltabothparents``
3028 argument controls whether to force compute deltas against both parents
3092 argument controls whether to force compute deltas against both parents
3029 for merges. By default, the current default is used.
3093 for merges. By default, the current default is used.
3030
3094
3031 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3095 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3032 `sidedata_helpers`.
3096 `sidedata_helpers`.
3033 """
3097 """
3034 if deltareuse not in self.DELTAREUSEALL:
3098 if deltareuse not in self.DELTAREUSEALL:
3035 raise ValueError(
3099 raise ValueError(
3036 _(b'value for deltareuse invalid: %s') % deltareuse
3100 _(b'value for deltareuse invalid: %s') % deltareuse
3037 )
3101 )
3038
3102
3039 if len(destrevlog):
3103 if len(destrevlog):
3040 raise ValueError(_(b'destination revlog is not empty'))
3104 raise ValueError(_(b'destination revlog is not empty'))
3041
3105
3042 if getattr(self, 'filteredrevs', None):
3106 if getattr(self, 'filteredrevs', None):
3043 raise ValueError(_(b'source revlog has filtered revisions'))
3107 raise ValueError(_(b'source revlog has filtered revisions'))
3044 if getattr(destrevlog, 'filteredrevs', None):
3108 if getattr(destrevlog, 'filteredrevs', None):
3045 raise ValueError(_(b'destination revlog has filtered revisions'))
3109 raise ValueError(_(b'destination revlog has filtered revisions'))
3046
3110
3047 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3111 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3048 # if possible.
3112 # if possible.
3049 oldlazydelta = destrevlog._lazydelta
3113 oldlazydelta = destrevlog._lazydelta
3050 oldlazydeltabase = destrevlog._lazydeltabase
3114 oldlazydeltabase = destrevlog._lazydeltabase
3051 oldamd = destrevlog._deltabothparents
3115 oldamd = destrevlog._deltabothparents
3052
3116
3053 try:
3117 try:
3054 if deltareuse == self.DELTAREUSEALWAYS:
3118 if deltareuse == self.DELTAREUSEALWAYS:
3055 destrevlog._lazydeltabase = True
3119 destrevlog._lazydeltabase = True
3056 destrevlog._lazydelta = True
3120 destrevlog._lazydelta = True
3057 elif deltareuse == self.DELTAREUSESAMEREVS:
3121 elif deltareuse == self.DELTAREUSESAMEREVS:
3058 destrevlog._lazydeltabase = False
3122 destrevlog._lazydeltabase = False
3059 destrevlog._lazydelta = True
3123 destrevlog._lazydelta = True
3060 elif deltareuse == self.DELTAREUSENEVER:
3124 elif deltareuse == self.DELTAREUSENEVER:
3061 destrevlog._lazydeltabase = False
3125 destrevlog._lazydeltabase = False
3062 destrevlog._lazydelta = False
3126 destrevlog._lazydelta = False
3063
3127
3064 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3128 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3065
3129
3066 self._clone(
3130 self._clone(
3067 tr,
3131 tr,
3068 destrevlog,
3132 destrevlog,
3069 addrevisioncb,
3133 addrevisioncb,
3070 deltareuse,
3134 deltareuse,
3071 forcedeltabothparents,
3135 forcedeltabothparents,
3072 sidedata_helpers,
3136 sidedata_helpers,
3073 )
3137 )
3074
3138
3075 finally:
3139 finally:
3076 destrevlog._lazydelta = oldlazydelta
3140 destrevlog._lazydelta = oldlazydelta
3077 destrevlog._lazydeltabase = oldlazydeltabase
3141 destrevlog._lazydeltabase = oldlazydeltabase
3078 destrevlog._deltabothparents = oldamd
3142 destrevlog._deltabothparents = oldamd
3079
3143
3080 def _clone(
3144 def _clone(
3081 self,
3145 self,
3082 tr,
3146 tr,
3083 destrevlog,
3147 destrevlog,
3084 addrevisioncb,
3148 addrevisioncb,
3085 deltareuse,
3149 deltareuse,
3086 forcedeltabothparents,
3150 forcedeltabothparents,
3087 sidedata_helpers,
3151 sidedata_helpers,
3088 ):
3152 ):
3089 """perform the core duty of `revlog.clone` after parameter processing"""
3153 """perform the core duty of `revlog.clone` after parameter processing"""
3090 deltacomputer = deltautil.deltacomputer(destrevlog)
3154 deltacomputer = deltautil.deltacomputer(destrevlog)
3091 index = self.index
3155 index = self.index
3092 for rev in self:
3156 for rev in self:
3093 entry = index[rev]
3157 entry = index[rev]
3094
3158
3095 # Some classes override linkrev to take filtered revs into
3159 # Some classes override linkrev to take filtered revs into
3096 # account. Use raw entry from index.
3160 # account. Use raw entry from index.
3097 flags = entry[0] & 0xFFFF
3161 flags = entry[0] & 0xFFFF
3098 linkrev = entry[4]
3162 linkrev = entry[4]
3099 p1 = index[entry[5]][7]
3163 p1 = index[entry[5]][7]
3100 p2 = index[entry[6]][7]
3164 p2 = index[entry[6]][7]
3101 node = entry[7]
3165 node = entry[7]
3102
3166
3103 # (Possibly) reuse the delta from the revlog if allowed and
3167 # (Possibly) reuse the delta from the revlog if allowed and
3104 # the revlog chunk is a delta.
3168 # the revlog chunk is a delta.
3105 cachedelta = None
3169 cachedelta = None
3106 rawtext = None
3170 rawtext = None
3107 if deltareuse == self.DELTAREUSEFULLADD:
3171 if deltareuse == self.DELTAREUSEFULLADD:
3108 text = self._revisiondata(rev)
3172 text = self._revisiondata(rev)
3109 sidedata = self.sidedata(rev)
3173 sidedata = self.sidedata(rev)
3110
3174
3111 if sidedata_helpers is not None:
3175 if sidedata_helpers is not None:
3112 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3176 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3113 self, sidedata_helpers, sidedata, rev
3177 self, sidedata_helpers, sidedata, rev
3114 )
3178 )
3115 flags = flags | new_flags[0] & ~new_flags[1]
3179 flags = flags | new_flags[0] & ~new_flags[1]
3116
3180
3117 destrevlog.addrevision(
3181 destrevlog.addrevision(
3118 text,
3182 text,
3119 tr,
3183 tr,
3120 linkrev,
3184 linkrev,
3121 p1,
3185 p1,
3122 p2,
3186 p2,
3123 cachedelta=cachedelta,
3187 cachedelta=cachedelta,
3124 node=node,
3188 node=node,
3125 flags=flags,
3189 flags=flags,
3126 deltacomputer=deltacomputer,
3190 deltacomputer=deltacomputer,
3127 sidedata=sidedata,
3191 sidedata=sidedata,
3128 )
3192 )
3129 else:
3193 else:
3130 if destrevlog._lazydelta:
3194 if destrevlog._lazydelta:
3131 dp = self.deltaparent(rev)
3195 dp = self.deltaparent(rev)
3132 if dp != nullrev:
3196 if dp != nullrev:
3133 cachedelta = (dp, bytes(self._chunk(rev)))
3197 cachedelta = (dp, bytes(self._chunk(rev)))
3134
3198
3135 sidedata = None
3199 sidedata = None
3136 if not cachedelta:
3200 if not cachedelta:
3137 rawtext = self._revisiondata(rev)
3201 rawtext = self._revisiondata(rev)
3138 sidedata = self.sidedata(rev)
3202 sidedata = self.sidedata(rev)
3139 if sidedata is None:
3203 if sidedata is None:
3140 sidedata = self.sidedata(rev)
3204 sidedata = self.sidedata(rev)
3141
3205
3142 if sidedata_helpers is not None:
3206 if sidedata_helpers is not None:
3143 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3207 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3144 self, sidedata_helpers, sidedata, rev
3208 self, sidedata_helpers, sidedata, rev
3145 )
3209 )
3146 flags = flags | new_flags[0] & ~new_flags[1]
3210 flags = flags | new_flags[0] & ~new_flags[1]
3147
3211
3148 with destrevlog._writing(tr):
3212 with destrevlog._writing(tr):
3149 destrevlog._addrevision(
3213 destrevlog._addrevision(
3150 node,
3214 node,
3151 rawtext,
3215 rawtext,
3152 tr,
3216 tr,
3153 linkrev,
3217 linkrev,
3154 p1,
3218 p1,
3155 p2,
3219 p2,
3156 flags,
3220 flags,
3157 cachedelta,
3221 cachedelta,
3158 deltacomputer=deltacomputer,
3222 deltacomputer=deltacomputer,
3159 sidedata=sidedata,
3223 sidedata=sidedata,
3160 )
3224 )
3161
3225
3162 if addrevisioncb:
3226 if addrevisioncb:
3163 addrevisioncb(self, rev, node)
3227 addrevisioncb(self, rev, node)
3164
3228
3165 def censorrevision(self, tr, censornode, tombstone=b''):
3229 def censorrevision(self, tr, censornode, tombstone=b''):
3166 if self._format_version == REVLOGV0:
3230 if self._format_version == REVLOGV0:
3167 raise error.RevlogError(
3231 raise error.RevlogError(
3168 _(b'cannot censor with version %d revlogs')
3232 _(b'cannot censor with version %d revlogs')
3169 % self._format_version
3233 % self._format_version
3170 )
3234 )
3171
3235
3172 censorrev = self.rev(censornode)
3236 censorrev = self.rev(censornode)
3173 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3237 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3174
3238
3175 if len(tombstone) > self.rawsize(censorrev):
3239 if len(tombstone) > self.rawsize(censorrev):
3176 raise error.Abort(
3240 raise error.Abort(
3177 _(b'censor tombstone must be no longer than censored data')
3241 _(b'censor tombstone must be no longer than censored data')
3178 )
3242 )
3179
3243
3180 # Rewriting the revlog in place is hard. Our strategy for censoring is
3244 # Rewriting the revlog in place is hard. Our strategy for censoring is
3181 # to create a new revlog, copy all revisions to it, then replace the
3245 # to create a new revlog, copy all revisions to it, then replace the
3182 # revlogs on transaction close.
3246 # revlogs on transaction close.
3183 #
3247 #
3184 # This is a bit dangerous. We could easily have a mismatch of state.
3248 # This is a bit dangerous. We could easily have a mismatch of state.
3185 newrl = revlog(
3249 newrl = revlog(
3186 self.opener,
3250 self.opener,
3187 target=self.target,
3251 target=self.target,
3188 radix=self.radix,
3252 radix=self.radix,
3189 postfix=b'tmpcensored',
3253 postfix=b'tmpcensored',
3190 censorable=True,
3254 censorable=True,
3191 )
3255 )
3192 newrl._format_version = self._format_version
3256 newrl._format_version = self._format_version
3193 newrl._format_flags = self._format_flags
3257 newrl._format_flags = self._format_flags
3194 newrl._generaldelta = self._generaldelta
3258 newrl._generaldelta = self._generaldelta
3195 newrl._parse_index = self._parse_index
3259 newrl._parse_index = self._parse_index
3196
3260
3197 for rev in self.revs():
3261 for rev in self.revs():
3198 node = self.node(rev)
3262 node = self.node(rev)
3199 p1, p2 = self.parents(node)
3263 p1, p2 = self.parents(node)
3200
3264
3201 if rev == censorrev:
3265 if rev == censorrev:
3202 newrl.addrawrevision(
3266 newrl.addrawrevision(
3203 tombstone,
3267 tombstone,
3204 tr,
3268 tr,
3205 self.linkrev(censorrev),
3269 self.linkrev(censorrev),
3206 p1,
3270 p1,
3207 p2,
3271 p2,
3208 censornode,
3272 censornode,
3209 REVIDX_ISCENSORED,
3273 REVIDX_ISCENSORED,
3210 )
3274 )
3211
3275
3212 if newrl.deltaparent(rev) != nullrev:
3276 if newrl.deltaparent(rev) != nullrev:
3213 raise error.Abort(
3277 raise error.Abort(
3214 _(
3278 _(
3215 b'censored revision stored as delta; '
3279 b'censored revision stored as delta; '
3216 b'cannot censor'
3280 b'cannot censor'
3217 ),
3281 ),
3218 hint=_(
3282 hint=_(
3219 b'censoring of revlogs is not '
3283 b'censoring of revlogs is not '
3220 b'fully implemented; please report '
3284 b'fully implemented; please report '
3221 b'this bug'
3285 b'this bug'
3222 ),
3286 ),
3223 )
3287 )
3224 continue
3288 continue
3225
3289
3226 if self.iscensored(rev):
3290 if self.iscensored(rev):
3227 if self.deltaparent(rev) != nullrev:
3291 if self.deltaparent(rev) != nullrev:
3228 raise error.Abort(
3292 raise error.Abort(
3229 _(
3293 _(
3230 b'cannot censor due to censored '
3294 b'cannot censor due to censored '
3231 b'revision having delta stored'
3295 b'revision having delta stored'
3232 )
3296 )
3233 )
3297 )
3234 rawtext = self._chunk(rev)
3298 rawtext = self._chunk(rev)
3235 else:
3299 else:
3236 rawtext = self.rawdata(rev)
3300 rawtext = self.rawdata(rev)
3237
3301
3238 newrl.addrawrevision(
3302 newrl.addrawrevision(
3239 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3303 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3240 )
3304 )
3241
3305
3242 tr.addbackup(self._indexfile, location=b'store')
3306 tr.addbackup(self._indexfile, location=b'store')
3243 if not self._inline:
3307 if not self._inline:
3244 tr.addbackup(self._datafile, location=b'store')
3308 tr.addbackup(self._datafile, location=b'store')
3245
3309
3246 self.opener.rename(newrl._indexfile, self._indexfile)
3310 self.opener.rename(newrl._indexfile, self._indexfile)
3247 if not self._inline:
3311 if not self._inline:
3248 self.opener.rename(newrl._datafile, self._datafile)
3312 self.opener.rename(newrl._datafile, self._datafile)
3249
3313
3250 self.clearcaches()
3314 self.clearcaches()
3251 self._loadindex()
3315 self._loadindex()
3252
3316
3253 def verifyintegrity(self, state):
3317 def verifyintegrity(self, state):
3254 """Verifies the integrity of the revlog.
3318 """Verifies the integrity of the revlog.
3255
3319
3256 Yields ``revlogproblem`` instances describing problems that are
3320 Yields ``revlogproblem`` instances describing problems that are
3257 found.
3321 found.
3258 """
3322 """
3259 dd, di = self.checksize()
3323 dd, di = self.checksize()
3260 if dd:
3324 if dd:
3261 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3325 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3262 if di:
3326 if di:
3263 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3327 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3264
3328
3265 version = self._format_version
3329 version = self._format_version
3266
3330
3267 # The verifier tells us what version revlog we should be.
3331 # The verifier tells us what version revlog we should be.
3268 if version != state[b'expectedversion']:
3332 if version != state[b'expectedversion']:
3269 yield revlogproblem(
3333 yield revlogproblem(
3270 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3334 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3271 % (self.display_id, version, state[b'expectedversion'])
3335 % (self.display_id, version, state[b'expectedversion'])
3272 )
3336 )
3273
3337
3274 state[b'skipread'] = set()
3338 state[b'skipread'] = set()
3275 state[b'safe_renamed'] = set()
3339 state[b'safe_renamed'] = set()
3276
3340
3277 for rev in self:
3341 for rev in self:
3278 node = self.node(rev)
3342 node = self.node(rev)
3279
3343
3280 # Verify contents. 4 cases to care about:
3344 # Verify contents. 4 cases to care about:
3281 #
3345 #
3282 # common: the most common case
3346 # common: the most common case
3283 # rename: with a rename
3347 # rename: with a rename
3284 # meta: file content starts with b'\1\n', the metadata
3348 # meta: file content starts with b'\1\n', the metadata
3285 # header defined in filelog.py, but without a rename
3349 # header defined in filelog.py, but without a rename
3286 # ext: content stored externally
3350 # ext: content stored externally
3287 #
3351 #
3288 # More formally, their differences are shown below:
3352 # More formally, their differences are shown below:
3289 #
3353 #
3290 # | common | rename | meta | ext
3354 # | common | rename | meta | ext
3291 # -------------------------------------------------------
3355 # -------------------------------------------------------
3292 # flags() | 0 | 0 | 0 | not 0
3356 # flags() | 0 | 0 | 0 | not 0
3293 # renamed() | False | True | False | ?
3357 # renamed() | False | True | False | ?
3294 # rawtext[0:2]=='\1\n'| False | True | True | ?
3358 # rawtext[0:2]=='\1\n'| False | True | True | ?
3295 #
3359 #
3296 # "rawtext" means the raw text stored in revlog data, which
3360 # "rawtext" means the raw text stored in revlog data, which
3297 # could be retrieved by "rawdata(rev)". "text"
3361 # could be retrieved by "rawdata(rev)". "text"
3298 # mentioned below is "revision(rev)".
3362 # mentioned below is "revision(rev)".
3299 #
3363 #
3300 # There are 3 different lengths stored physically:
3364 # There are 3 different lengths stored physically:
3301 # 1. L1: rawsize, stored in revlog index
3365 # 1. L1: rawsize, stored in revlog index
3302 # 2. L2: len(rawtext), stored in revlog data
3366 # 2. L2: len(rawtext), stored in revlog data
3303 # 3. L3: len(text), stored in revlog data if flags==0, or
3367 # 3. L3: len(text), stored in revlog data if flags==0, or
3304 # possibly somewhere else if flags!=0
3368 # possibly somewhere else if flags!=0
3305 #
3369 #
3306 # L1 should be equal to L2. L3 could be different from them.
3370 # L1 should be equal to L2. L3 could be different from them.
3307 # "text" may or may not affect commit hash depending on flag
3371 # "text" may or may not affect commit hash depending on flag
3308 # processors (see flagutil.addflagprocessor).
3372 # processors (see flagutil.addflagprocessor).
3309 #
3373 #
3310 # | common | rename | meta | ext
3374 # | common | rename | meta | ext
3311 # -------------------------------------------------
3375 # -------------------------------------------------
3312 # rawsize() | L1 | L1 | L1 | L1
3376 # rawsize() | L1 | L1 | L1 | L1
3313 # size() | L1 | L2-LM | L1(*) | L1 (?)
3377 # size() | L1 | L2-LM | L1(*) | L1 (?)
3314 # len(rawtext) | L2 | L2 | L2 | L2
3378 # len(rawtext) | L2 | L2 | L2 | L2
3315 # len(text) | L2 | L2 | L2 | L3
3379 # len(text) | L2 | L2 | L2 | L3
3316 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3380 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3317 #
3381 #
3318 # LM: length of metadata, depending on rawtext
3382 # LM: length of metadata, depending on rawtext
3319 # (*): not ideal, see comment in filelog.size
3383 # (*): not ideal, see comment in filelog.size
3320 # (?): could be "- len(meta)" if the resolved content has
3384 # (?): could be "- len(meta)" if the resolved content has
3321 # rename metadata
3385 # rename metadata
3322 #
3386 #
3323 # Checks needed to be done:
3387 # Checks needed to be done:
3324 # 1. length check: L1 == L2, in all cases.
3388 # 1. length check: L1 == L2, in all cases.
3325 # 2. hash check: depending on flag processor, we may need to
3389 # 2. hash check: depending on flag processor, we may need to
3326 # use either "text" (external), or "rawtext" (in revlog).
3390 # use either "text" (external), or "rawtext" (in revlog).
3327
3391
3328 try:
3392 try:
3329 skipflags = state.get(b'skipflags', 0)
3393 skipflags = state.get(b'skipflags', 0)
3330 if skipflags:
3394 if skipflags:
3331 skipflags &= self.flags(rev)
3395 skipflags &= self.flags(rev)
3332
3396
3333 _verify_revision(self, skipflags, state, node)
3397 _verify_revision(self, skipflags, state, node)
3334
3398
3335 l1 = self.rawsize(rev)
3399 l1 = self.rawsize(rev)
3336 l2 = len(self.rawdata(node))
3400 l2 = len(self.rawdata(node))
3337
3401
3338 if l1 != l2:
3402 if l1 != l2:
3339 yield revlogproblem(
3403 yield revlogproblem(
3340 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3404 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3341 node=node,
3405 node=node,
3342 )
3406 )
3343
3407
3344 except error.CensoredNodeError:
3408 except error.CensoredNodeError:
3345 if state[b'erroroncensored']:
3409 if state[b'erroroncensored']:
3346 yield revlogproblem(
3410 yield revlogproblem(
3347 error=_(b'censored file data'), node=node
3411 error=_(b'censored file data'), node=node
3348 )
3412 )
3349 state[b'skipread'].add(node)
3413 state[b'skipread'].add(node)
3350 except Exception as e:
3414 except Exception as e:
3351 yield revlogproblem(
3415 yield revlogproblem(
3352 error=_(b'unpacking %s: %s')
3416 error=_(b'unpacking %s: %s')
3353 % (short(node), stringutil.forcebytestr(e)),
3417 % (short(node), stringutil.forcebytestr(e)),
3354 node=node,
3418 node=node,
3355 )
3419 )
3356 state[b'skipread'].add(node)
3420 state[b'skipread'].add(node)
3357
3421
3358 def storageinfo(
3422 def storageinfo(
3359 self,
3423 self,
3360 exclusivefiles=False,
3424 exclusivefiles=False,
3361 sharedfiles=False,
3425 sharedfiles=False,
3362 revisionscount=False,
3426 revisionscount=False,
3363 trackedsize=False,
3427 trackedsize=False,
3364 storedsize=False,
3428 storedsize=False,
3365 ):
3429 ):
3366 d = {}
3430 d = {}
3367
3431
3368 if exclusivefiles:
3432 if exclusivefiles:
3369 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3433 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3370 if not self._inline:
3434 if not self._inline:
3371 d[b'exclusivefiles'].append((self.opener, self._datafile))
3435 d[b'exclusivefiles'].append((self.opener, self._datafile))
3372
3436
3373 if sharedfiles:
3437 if sharedfiles:
3374 d[b'sharedfiles'] = []
3438 d[b'sharedfiles'] = []
3375
3439
3376 if revisionscount:
3440 if revisionscount:
3377 d[b'revisionscount'] = len(self)
3441 d[b'revisionscount'] = len(self)
3378
3442
3379 if trackedsize:
3443 if trackedsize:
3380 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3444 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3381
3445
3382 if storedsize:
3446 if storedsize:
3383 d[b'storedsize'] = sum(
3447 d[b'storedsize'] = sum(
3384 self.opener.stat(path).st_size for path in self.files()
3448 self.opener.stat(path).st_size for path in self.files()
3385 )
3449 )
3386
3450
3387 return d
3451 return d
3388
3452
3389 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3453 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3390 if not self.hassidedata:
3454 if not self.hassidedata:
3391 return
3455 return
3392 # revlog formats with sidedata support does not support inline
3456 # revlog formats with sidedata support does not support inline
3393 assert not self._inline
3457 assert not self._inline
3394 if not helpers[1] and not helpers[2]:
3458 if not helpers[1] and not helpers[2]:
3395 # Nothing to generate or remove
3459 # Nothing to generate or remove
3396 return
3460 return
3397
3461
3398 new_entries = []
3462 new_entries = []
3399 # append the new sidedata
3463 # append the new sidedata
3400 with self._writing(transaction):
3464 with self._writing(transaction):
3401 ifh, dfh = self._writinghandles
3465 ifh, dfh, sdfh = self._writinghandles
3402 if self._docket is not None:
3466 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3403 dfh.seek(self._docket.data_end, os.SEEK_SET)
3467
3404 else:
3468 current_offset = sdfh.tell()
3405 dfh.seek(0, os.SEEK_END)
3406
3407 current_offset = dfh.tell()
3408 for rev in range(startrev, endrev + 1):
3469 for rev in range(startrev, endrev + 1):
3409 entry = self.index[rev]
3470 entry = self.index[rev]
3410 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3471 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3411 store=self,
3472 store=self,
3412 sidedata_helpers=helpers,
3473 sidedata_helpers=helpers,
3413 sidedata={},
3474 sidedata={},
3414 rev=rev,
3475 rev=rev,
3415 )
3476 )
3416
3477
3417 serialized_sidedata = sidedatautil.serialize_sidedata(
3478 serialized_sidedata = sidedatautil.serialize_sidedata(
3418 new_sidedata
3479 new_sidedata
3419 )
3480 )
3420
3481
3421 sidedata_compression_mode = COMP_MODE_INLINE
3482 sidedata_compression_mode = COMP_MODE_INLINE
3422 if serialized_sidedata and self.hassidedata:
3483 if serialized_sidedata and self.hassidedata:
3423 sidedata_compression_mode = COMP_MODE_PLAIN
3484 sidedata_compression_mode = COMP_MODE_PLAIN
3424 h, comp_sidedata = self.compress(serialized_sidedata)
3485 h, comp_sidedata = self.compress(serialized_sidedata)
3425 if (
3486 if (
3426 h != b'u'
3487 h != b'u'
3427 and comp_sidedata[0] != b'\0'
3488 and comp_sidedata[0] != b'\0'
3428 and len(comp_sidedata) < len(serialized_sidedata)
3489 and len(comp_sidedata) < len(serialized_sidedata)
3429 ):
3490 ):
3430 assert not h
3491 assert not h
3431 if (
3492 if (
3432 comp_sidedata[0]
3493 comp_sidedata[0]
3433 == self._docket.default_compression_header
3494 == self._docket.default_compression_header
3434 ):
3495 ):
3435 sidedata_compression_mode = COMP_MODE_DEFAULT
3496 sidedata_compression_mode = COMP_MODE_DEFAULT
3436 serialized_sidedata = comp_sidedata
3497 serialized_sidedata = comp_sidedata
3437 else:
3498 else:
3438 sidedata_compression_mode = COMP_MODE_INLINE
3499 sidedata_compression_mode = COMP_MODE_INLINE
3439 serialized_sidedata = comp_sidedata
3500 serialized_sidedata = comp_sidedata
3440 if entry[8] != 0 or entry[9] != 0:
3501 if entry[8] != 0 or entry[9] != 0:
3441 # rewriting entries that already have sidedata is not
3502 # rewriting entries that already have sidedata is not
3442 # supported yet, because it introduces garbage data in the
3503 # supported yet, because it introduces garbage data in the
3443 # revlog.
3504 # revlog.
3444 msg = b"rewriting existing sidedata is not supported yet"
3505 msg = b"rewriting existing sidedata is not supported yet"
3445 raise error.Abort(msg)
3506 raise error.Abort(msg)
3446
3507
3447 # Apply (potential) flags to add and to remove after running
3508 # Apply (potential) flags to add and to remove after running
3448 # the sidedata helpers
3509 # the sidedata helpers
3449 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3510 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3450 entry_update = (
3511 entry_update = (
3451 current_offset,
3512 current_offset,
3452 len(serialized_sidedata),
3513 len(serialized_sidedata),
3453 new_offset_flags,
3514 new_offset_flags,
3454 sidedata_compression_mode,
3515 sidedata_compression_mode,
3455 )
3516 )
3456
3517
3457 # the sidedata computation might have move the file cursors around
3518 # the sidedata computation might have move the file cursors around
3458 dfh.seek(current_offset, os.SEEK_SET)
3519 sdfh.seek(current_offset, os.SEEK_SET)
3459 dfh.write(serialized_sidedata)
3520 sdfh.write(serialized_sidedata)
3460 new_entries.append(entry_update)
3521 new_entries.append(entry_update)
3461 current_offset += len(serialized_sidedata)
3522 current_offset += len(serialized_sidedata)
3462 if self._docket is not None:
3523 self._docket.sidedata_end = sdfh.tell()
3463 self._docket.data_end = dfh.tell()
3464
3524
3465 # rewrite the new index entries
3525 # rewrite the new index entries
3466 ifh.seek(startrev * self.index.entry_size)
3526 ifh.seek(startrev * self.index.entry_size)
3467 for i, e in enumerate(new_entries):
3527 for i, e in enumerate(new_entries):
3468 rev = startrev + i
3528 rev = startrev + i
3469 self.index.replace_sidedata_info(rev, *e)
3529 self.index.replace_sidedata_info(rev, *e)
3470 packed = self.index.entry_binary(rev)
3530 packed = self.index.entry_binary(rev)
3471 if rev == 0 and self._docket is None:
3531 if rev == 0 and self._docket is None:
3472 header = self._format_flags | self._format_version
3532 header = self._format_flags | self._format_version
3473 header = self.index.pack_header(header)
3533 header = self.index.pack_header(header)
3474 packed = header + packed
3534 packed = header + packed
3475 ifh.write(packed)
3535 ifh.write(packed)
@@ -1,287 +1,333 b''
1 # docket - code related to revlog "docket"
1 # docket - code related to revlog "docket"
2 #
2 #
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 ### Revlog docket file
8 ### Revlog docket file
9 #
9 #
10 # The revlog is stored on disk using multiple files:
10 # The revlog is stored on disk using multiple files:
11 #
11 #
12 # * a small docket file, containing metadata and a pointer,
12 # * a small docket file, containing metadata and a pointer,
13 #
13 #
14 # * an index file, containing fixed width information about revisions,
14 # * an index file, containing fixed width information about revisions,
15 #
15 #
16 # * a data file, containing variable width data for these revisions,
16 # * a data file, containing variable width data for these revisions,
17
17
18 from __future__ import absolute_import
18 from __future__ import absolute_import
19
19
20 import errno
20 import errno
21 import os
21 import os
22 import random
22 import random
23 import struct
23 import struct
24
24
25 from .. import (
25 from .. import (
26 encoding,
26 encoding,
27 error,
27 error,
28 node,
28 node,
29 pycompat,
29 pycompat,
30 util,
30 util,
31 )
31 )
32
32
33 from . import (
33 from . import (
34 constants,
34 constants,
35 )
35 )
36
36
37
37
38 def make_uid(id_size=8):
38 def make_uid(id_size=8):
39 """return a new unique identifier.
39 """return a new unique identifier.
40
40
41 The identifier is random and composed of ascii characters."""
41 The identifier is random and composed of ascii characters."""
42 # size we "hex" the result we need half the number of bits to have a final
42 # size we "hex" the result we need half the number of bits to have a final
43 # uuid of size ID_SIZE
43 # uuid of size ID_SIZE
44 return node.hex(os.urandom(id_size // 2))
44 return node.hex(os.urandom(id_size // 2))
45
45
46
46
47 # some special test logic to avoid anoying random output in the test
47 # some special test logic to avoid anoying random output in the test
48 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
48 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
49
49
50 if stable_docket_file:
50 if stable_docket_file:
51
51
52 def make_uid(id_size=8):
52 def make_uid(id_size=8):
53 try:
53 try:
54 with open(stable_docket_file, mode='rb') as f:
54 with open(stable_docket_file, mode='rb') as f:
55 seed = f.read().strip()
55 seed = f.read().strip()
56 except IOError as inst:
56 except IOError as inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 seed = b'04' # chosen by a fair dice roll. garanteed to be random
59 seed = b'04' # chosen by a fair dice roll. garanteed to be random
60 if pycompat.ispy3:
60 if pycompat.ispy3:
61 iter_seed = iter(seed)
61 iter_seed = iter(seed)
62 else:
62 else:
63 iter_seed = (ord(c) for c in seed)
63 iter_seed = (ord(c) for c in seed)
64 # some basic circular sum hashing on 64 bits
64 # some basic circular sum hashing on 64 bits
65 int_seed = 0
65 int_seed = 0
66 low_mask = int('1' * 35, 2)
66 low_mask = int('1' * 35, 2)
67 for i in iter_seed:
67 for i in iter_seed:
68 high_part = int_seed >> 35
68 high_part = int_seed >> 35
69 low_part = (int_seed & low_mask) << 28
69 low_part = (int_seed & low_mask) << 28
70 int_seed = high_part + low_part + i
70 int_seed = high_part + low_part + i
71 r = random.Random()
71 r = random.Random()
72 if pycompat.ispy3:
72 if pycompat.ispy3:
73 r.seed(int_seed, version=1)
73 r.seed(int_seed, version=1)
74 else:
74 else:
75 r.seed(int_seed)
75 r.seed(int_seed)
76 # once we drop python 3.8 support we can simply use r.randbytes
76 # once we drop python 3.8 support we can simply use r.randbytes
77 raw = r.getrandbits(id_size * 4)
77 raw = r.getrandbits(id_size * 4)
78 assert id_size == 8
78 assert id_size == 8
79 p = struct.pack('>L', raw)
79 p = struct.pack('>L', raw)
80 new = node.hex(p)
80 new = node.hex(p)
81 with open(stable_docket_file, 'wb') as f:
81 with open(stable_docket_file, 'wb') as f:
82 f.write(new)
82 f.write(new)
83 return new
83 return new
84
84
85
85
86 # Docket format
86 # Docket format
87 #
87 #
88 # * 4 bytes: revlog version
88 # * 4 bytes: revlog version
89 # | This is mandatory as docket must be compatible with the previous
89 # | This is mandatory as docket must be compatible with the previous
90 # | revlog index header.
90 # | revlog index header.
91 # * 1 bytes: size of index uuid
91 # * 1 bytes: size of index uuid
92 # * 1 bytes: size of data uuid
92 # * 1 bytes: size of data uuid
93 # * 1 bytes: size of sizedata uuid
93 # * 8 bytes: size of index-data
94 # * 8 bytes: size of index-data
94 # * 8 bytes: pending size of index-data
95 # * 8 bytes: pending size of index-data
95 # * 8 bytes: size of data
96 # * 8 bytes: size of data
97 # * 8 bytes: size of sidedata
96 # * 8 bytes: pending size of data
98 # * 8 bytes: pending size of data
99 # * 8 bytes: pending size of sidedata
97 # * 1 bytes: default compression header
100 # * 1 bytes: default compression header
98 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBLLLLc')
101 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBLLLLLLc')
99
102
100
103
101 class RevlogDocket(object):
104 class RevlogDocket(object):
102 """metadata associated with revlog"""
105 """metadata associated with revlog"""
103
106
104 def __init__(
107 def __init__(
105 self,
108 self,
106 revlog,
109 revlog,
107 use_pending=False,
110 use_pending=False,
108 version_header=None,
111 version_header=None,
109 index_uuid=None,
112 index_uuid=None,
110 data_uuid=None,
113 data_uuid=None,
114 sidedata_uuid=None,
111 index_end=0,
115 index_end=0,
112 pending_index_end=0,
116 pending_index_end=0,
113 data_end=0,
117 data_end=0,
114 pending_data_end=0,
118 pending_data_end=0,
119 sidedata_end=0,
120 pending_sidedata_end=0,
115 default_compression_header=None,
121 default_compression_header=None,
116 ):
122 ):
117 self._version_header = version_header
123 self._version_header = version_header
118 self._read_only = bool(use_pending)
124 self._read_only = bool(use_pending)
119 self._dirty = False
125 self._dirty = False
120 self._radix = revlog.radix
126 self._radix = revlog.radix
121 self._path = revlog._docket_file
127 self._path = revlog._docket_file
122 self._opener = revlog.opener
128 self._opener = revlog.opener
123 self._index_uuid = index_uuid
129 self._index_uuid = index_uuid
124 self._data_uuid = data_uuid
130 self._data_uuid = data_uuid
131 self._sidedata_uuid = sidedata_uuid
125 # thes asserts should be True as long as we have a single index filename
132 # thes asserts should be True as long as we have a single index filename
126 assert index_end <= pending_index_end
133 assert index_end <= pending_index_end
127 assert data_end <= pending_data_end
134 assert data_end <= pending_data_end
135 assert sidedata_end <= pending_sidedata_end
128 self._initial_index_end = index_end
136 self._initial_index_end = index_end
129 self._pending_index_end = pending_index_end
137 self._pending_index_end = pending_index_end
130 self._initial_data_end = data_end
138 self._initial_data_end = data_end
131 self._pending_data_end = pending_data_end
139 self._pending_data_end = pending_data_end
140 self._initial_sidedata_end = sidedata_end
141 self._pending_sidedata_end = pending_sidedata_end
132 if use_pending:
142 if use_pending:
133 self._index_end = self._pending_index_end
143 self._index_end = self._pending_index_end
134 self._data_end = self._pending_data_end
144 self._data_end = self._pending_data_end
145 self._sidedata_end = self._pending_sidedata_end
135 else:
146 else:
136 self._index_end = self._initial_index_end
147 self._index_end = self._initial_index_end
137 self._data_end = self._initial_data_end
148 self._data_end = self._initial_data_end
149 self._sidedata_end = self._initial_sidedata_end
138 self.default_compression_header = default_compression_header
150 self.default_compression_header = default_compression_header
139
151
140 def index_filepath(self):
152 def index_filepath(self):
141 """file path to the current index file associated to this docket"""
153 """file path to the current index file associated to this docket"""
142 # very simplistic version at first
154 # very simplistic version at first
143 if self._index_uuid is None:
155 if self._index_uuid is None:
144 self._index_uuid = make_uid()
156 self._index_uuid = make_uid()
145 return b"%s-%s.idx" % (self._radix, self._index_uuid)
157 return b"%s-%s.idx" % (self._radix, self._index_uuid)
146
158
147 def data_filepath(self):
159 def data_filepath(self):
148 """file path to the current data file associated to this docket"""
160 """file path to the current data file associated to this docket"""
149 # very simplistic version at first
161 # very simplistic version at first
150 if self._data_uuid is None:
162 if self._data_uuid is None:
151 self._data_uuid = make_uid()
163 self._data_uuid = make_uid()
152 return b"%s-%s.dat" % (self._radix, self._data_uuid)
164 return b"%s-%s.dat" % (self._radix, self._data_uuid)
153
165
166 def sidedata_filepath(self):
167 """file path to the current sidedata file associated to this docket"""
168 # very simplistic version at first
169 if self._sidedata_uuid is None:
170 self._sidedata_uuid = make_uid()
171 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
172
154 @property
173 @property
155 def index_end(self):
174 def index_end(self):
156 return self._index_end
175 return self._index_end
157
176
158 @index_end.setter
177 @index_end.setter
159 def index_end(self, new_size):
178 def index_end(self, new_size):
160 if new_size != self._index_end:
179 if new_size != self._index_end:
161 self._index_end = new_size
180 self._index_end = new_size
162 self._dirty = True
181 self._dirty = True
163
182
164 @property
183 @property
165 def data_end(self):
184 def data_end(self):
166 return self._data_end
185 return self._data_end
167
186
168 @data_end.setter
187 @data_end.setter
169 def data_end(self, new_size):
188 def data_end(self, new_size):
170 if new_size != self._data_end:
189 if new_size != self._data_end:
171 self._data_end = new_size
190 self._data_end = new_size
172 self._dirty = True
191 self._dirty = True
173
192
193 @property
194 def sidedata_end(self):
195 return self._sidedata_end
196
197 @sidedata_end.setter
198 def sidedata_end(self, new_size):
199 if new_size != self._sidedata_end:
200 self._sidedata_end = new_size
201 self._dirty = True
202
174 def write(self, transaction, pending=False, stripping=False):
203 def write(self, transaction, pending=False, stripping=False):
175 """write the modification of disk if any
204 """write the modification of disk if any
176
205
177 This make the new content visible to all process"""
206 This make the new content visible to all process"""
178 if not self._dirty:
207 if not self._dirty:
179 return False
208 return False
180 else:
209 else:
181 if self._read_only:
210 if self._read_only:
182 msg = b'writing read-only docket: %s'
211 msg = b'writing read-only docket: %s'
183 msg %= self._path
212 msg %= self._path
184 raise error.ProgrammingError(msg)
213 raise error.ProgrammingError(msg)
185 if not stripping:
214 if not stripping:
186 # XXX we could, leverage the docket while stripping. However it
215 # XXX we could, leverage the docket while stripping. However it
187 # is not powerfull enough at the time of this comment
216 # is not powerfull enough at the time of this comment
188 transaction.addbackup(self._path, location=b'store')
217 transaction.addbackup(self._path, location=b'store')
189 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
218 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
190 f.write(self._serialize(pending=pending))
219 f.write(self._serialize(pending=pending))
191 # if pending we still need to the write final data eventually
220 # if pending we still need to the write final data eventually
192 self._dirty = pending
221 self._dirty = pending
193 return True
222 return True
194
223
195 def _serialize(self, pending=False):
224 def _serialize(self, pending=False):
196 if pending:
225 if pending:
197 official_index_end = self._initial_index_end
226 official_index_end = self._initial_index_end
198 official_data_end = self._initial_data_end
227 official_data_end = self._initial_data_end
228 official_sidedata_end = self._initial_sidedata_end
199 else:
229 else:
200 official_index_end = self._index_end
230 official_index_end = self._index_end
201 official_data_end = self._data_end
231 official_data_end = self._data_end
232 official_sidedata_end = self._sidedata_end
202
233
203 # this assert should be True as long as we have a single index filename
234 # this assert should be True as long as we have a single index filename
204 assert official_data_end <= self._data_end
235 assert official_data_end <= self._data_end
236 assert official_sidedata_end <= self._sidedata_end
205 data = (
237 data = (
206 self._version_header,
238 self._version_header,
207 len(self._index_uuid),
239 len(self._index_uuid),
208 len(self._data_uuid),
240 len(self._data_uuid),
241 len(self._sidedata_uuid),
209 official_index_end,
242 official_index_end,
210 self._index_end,
243 self._index_end,
211 official_data_end,
244 official_data_end,
212 self._data_end,
245 self._data_end,
246 official_sidedata_end,
247 self._sidedata_end,
213 self.default_compression_header,
248 self.default_compression_header,
214 )
249 )
215 s = []
250 s = []
216 s.append(S_HEADER.pack(*data))
251 s.append(S_HEADER.pack(*data))
217 s.append(self._index_uuid)
252 s.append(self._index_uuid)
218 s.append(self._data_uuid)
253 s.append(self._data_uuid)
254 s.append(self._sidedata_uuid)
219 return b''.join(s)
255 return b''.join(s)
220
256
221
257
222 def default_docket(revlog, version_header):
258 def default_docket(revlog, version_header):
223 """given a revlog version a new docket object for the given revlog"""
259 """given a revlog version a new docket object for the given revlog"""
224 rl_version = version_header & 0xFFFF
260 rl_version = version_header & 0xFFFF
225 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
261 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
226 return None
262 return None
227 comp = util.compengines[revlog._compengine].revlogheader()
263 comp = util.compengines[revlog._compengine].revlogheader()
228 docket = RevlogDocket(
264 docket = RevlogDocket(
229 revlog,
265 revlog,
230 version_header=version_header,
266 version_header=version_header,
231 default_compression_header=comp,
267 default_compression_header=comp,
232 )
268 )
233 docket._dirty = True
269 docket._dirty = True
234 return docket
270 return docket
235
271
236
272
237 def parse_docket(revlog, data, use_pending=False):
273 def parse_docket(revlog, data, use_pending=False):
238 """given some docket data return a docket object for the given revlog"""
274 """given some docket data return a docket object for the given revlog"""
239 header = S_HEADER.unpack(data[: S_HEADER.size])
275 header = S_HEADER.unpack(data[: S_HEADER.size])
240
276
241 # this is a mutable closure capture used in `get_data`
277 # this is a mutable closure capture used in `get_data`
242 offset = [S_HEADER.size]
278 offset = [S_HEADER.size]
243
279
244 def get_data(size):
280 def get_data(size):
245 """utility closure to access the `size` next bytes"""
281 """utility closure to access the `size` next bytes"""
246 if offset[0] + size > len(data):
282 if offset[0] + size > len(data):
247 # XXX better class
283 # XXX better class
248 msg = b"docket is too short, expected %d got %d"
284 msg = b"docket is too short, expected %d got %d"
249 msg %= (offset[0] + size, len(data))
285 msg %= (offset[0] + size, len(data))
250 raise error.Abort(msg)
286 raise error.Abort(msg)
251 raw = data[offset[0] : offset[0] + size]
287 raw = data[offset[0] : offset[0] + size]
252 offset[0] += size
288 offset[0] += size
253 return raw
289 return raw
254
290
255 iheader = iter(header)
291 iheader = iter(header)
256
292
257 version_header = next(iheader)
293 version_header = next(iheader)
258
294
259 index_uuid_size = next(iheader)
295 index_uuid_size = next(iheader)
260 index_uuid = get_data(index_uuid_size)
296 index_uuid = get_data(index_uuid_size)
261
297
262 data_uuid_size = next(iheader)
298 data_uuid_size = next(iheader)
263 data_uuid = get_data(data_uuid_size)
299 data_uuid = get_data(data_uuid_size)
264
300
301 sidedata_uuid_size = next(iheader)
302 sidedata_uuid = get_data(sidedata_uuid_size)
303
265 index_size = next(iheader)
304 index_size = next(iheader)
266
305
267 pending_index_size = next(iheader)
306 pending_index_size = next(iheader)
268
307
269 data_size = next(iheader)
308 data_size = next(iheader)
270
309
271 pending_data_size = next(iheader)
310 pending_data_size = next(iheader)
272
311
312 sidedata_size = next(iheader)
313
314 pending_sidedata_size = next(iheader)
315
273 default_compression_header = next(iheader)
316 default_compression_header = next(iheader)
274
317
275 docket = RevlogDocket(
318 docket = RevlogDocket(
276 revlog,
319 revlog,
277 use_pending=use_pending,
320 use_pending=use_pending,
278 version_header=version_header,
321 version_header=version_header,
279 index_uuid=index_uuid,
322 index_uuid=index_uuid,
280 data_uuid=data_uuid,
323 data_uuid=data_uuid,
324 sidedata_uuid=sidedata_uuid,
281 index_end=index_size,
325 index_end=index_size,
282 pending_index_end=pending_index_size,
326 pending_index_end=pending_index_size,
283 data_end=data_size,
327 data_end=data_size,
284 pending_data_end=pending_data_size,
328 pending_data_end=pending_data_size,
329 sidedata_end=sidedata_size,
330 pending_sidedata_end=pending_sidedata_size,
285 default_compression_header=default_compression_header,
331 default_compression_header=default_compression_header,
286 )
332 )
287 return docket
333 return docket
@@ -1,823 +1,824 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import getattr
17 from .pycompat import getattr
18 from .node import hex
18 from .node import hex
19 from . import (
19 from . import (
20 changelog,
20 changelog,
21 error,
21 error,
22 manifest,
22 manifest,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 vfs as vfsmod,
26 vfs as vfsmod,
27 )
27 )
28 from .utils import hashutil
28 from .utils import hashutil
29
29
30 parsers = policy.importmod('parsers')
30 parsers = policy.importmod('parsers')
31 # how much bytes should be read from fncache in one read
31 # how much bytes should be read from fncache in one read
32 # It is done to prevent loading large fncache files into memory
32 # It is done to prevent loading large fncache files into memory
33 fncache_chunksize = 10 ** 6
33 fncache_chunksize = 10 ** 6
34
34
35
35
36 def _matchtrackedpath(path, matcher):
36 def _matchtrackedpath(path, matcher):
37 """parses a fncache entry and returns whether the entry is tracking a path
37 """parses a fncache entry and returns whether the entry is tracking a path
38 matched by matcher or not.
38 matched by matcher or not.
39
39
40 If matcher is None, returns True"""
40 If matcher is None, returns True"""
41
41
42 if matcher is None:
42 if matcher is None:
43 return True
43 return True
44 path = decodedir(path)
44 path = decodedir(path)
45 if path.startswith(b'data/'):
45 if path.startswith(b'data/'):
46 return matcher(path[len(b'data/') : -len(b'.i')])
46 return matcher(path[len(b'data/') : -len(b'.i')])
47 elif path.startswith(b'meta/'):
47 elif path.startswith(b'meta/'):
48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
49
49
50 raise error.ProgrammingError(b"cannot decode path %s" % path)
50 raise error.ProgrammingError(b"cannot decode path %s" % path)
51
51
52
52
53 # This avoids a collision between a file named foo and a dir named
53 # This avoids a collision between a file named foo and a dir named
54 # foo.i or foo.d
54 # foo.i or foo.d
55 def _encodedir(path):
55 def _encodedir(path):
56 """
56 """
57 >>> _encodedir(b'data/foo.i')
57 >>> _encodedir(b'data/foo.i')
58 'data/foo.i'
58 'data/foo.i'
59 >>> _encodedir(b'data/foo.i/bla.i')
59 >>> _encodedir(b'data/foo.i/bla.i')
60 'data/foo.i.hg/bla.i'
60 'data/foo.i.hg/bla.i'
61 >>> _encodedir(b'data/foo.i.hg/bla.i')
61 >>> _encodedir(b'data/foo.i.hg/bla.i')
62 'data/foo.i.hg.hg/bla.i'
62 'data/foo.i.hg.hg/bla.i'
63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
65 """
65 """
66 return (
66 return (
67 path.replace(b".hg/", b".hg.hg/")
67 path.replace(b".hg/", b".hg.hg/")
68 .replace(b".i/", b".i.hg/")
68 .replace(b".i/", b".i.hg/")
69 .replace(b".d/", b".d.hg/")
69 .replace(b".d/", b".d.hg/")
70 )
70 )
71
71
72
72
73 encodedir = getattr(parsers, 'encodedir', _encodedir)
73 encodedir = getattr(parsers, 'encodedir', _encodedir)
74
74
75
75
76 def decodedir(path):
76 def decodedir(path):
77 """
77 """
78 >>> decodedir(b'data/foo.i')
78 >>> decodedir(b'data/foo.i')
79 'data/foo.i'
79 'data/foo.i'
80 >>> decodedir(b'data/foo.i.hg/bla.i')
80 >>> decodedir(b'data/foo.i.hg/bla.i')
81 'data/foo.i/bla.i'
81 'data/foo.i/bla.i'
82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
83 'data/foo.i.hg/bla.i'
83 'data/foo.i.hg/bla.i'
84 """
84 """
85 if b".hg/" not in path:
85 if b".hg/" not in path:
86 return path
86 return path
87 return (
87 return (
88 path.replace(b".d.hg/", b".d/")
88 path.replace(b".d.hg/", b".d/")
89 .replace(b".i.hg/", b".i/")
89 .replace(b".i.hg/", b".i/")
90 .replace(b".hg.hg/", b".hg/")
90 .replace(b".hg.hg/", b".hg/")
91 )
91 )
92
92
93
93
94 def _reserved():
94 def _reserved():
95 """characters that are problematic for filesystems
95 """characters that are problematic for filesystems
96
96
97 * ascii escapes (0..31)
97 * ascii escapes (0..31)
98 * ascii hi (126..255)
98 * ascii hi (126..255)
99 * windows specials
99 * windows specials
100
100
101 these characters will be escaped by encodefunctions
101 these characters will be escaped by encodefunctions
102 """
102 """
103 winreserved = [ord(x) for x in u'\\:*?"<>|']
103 winreserved = [ord(x) for x in u'\\:*?"<>|']
104 for x in range(32):
104 for x in range(32):
105 yield x
105 yield x
106 for x in range(126, 256):
106 for x in range(126, 256):
107 yield x
107 yield x
108 for x in winreserved:
108 for x in winreserved:
109 yield x
109 yield x
110
110
111
111
112 def _buildencodefun():
112 def _buildencodefun():
113 """
113 """
114 >>> enc, dec = _buildencodefun()
114 >>> enc, dec = _buildencodefun()
115
115
116 >>> enc(b'nothing/special.txt')
116 >>> enc(b'nothing/special.txt')
117 'nothing/special.txt'
117 'nothing/special.txt'
118 >>> dec(b'nothing/special.txt')
118 >>> dec(b'nothing/special.txt')
119 'nothing/special.txt'
119 'nothing/special.txt'
120
120
121 >>> enc(b'HELLO')
121 >>> enc(b'HELLO')
122 '_h_e_l_l_o'
122 '_h_e_l_l_o'
123 >>> dec(b'_h_e_l_l_o')
123 >>> dec(b'_h_e_l_l_o')
124 'HELLO'
124 'HELLO'
125
125
126 >>> enc(b'hello:world?')
126 >>> enc(b'hello:world?')
127 'hello~3aworld~3f'
127 'hello~3aworld~3f'
128 >>> dec(b'hello~3aworld~3f')
128 >>> dec(b'hello~3aworld~3f')
129 'hello:world?'
129 'hello:world?'
130
130
131 >>> enc(b'the\\x07quick\\xADshot')
131 >>> enc(b'the\\x07quick\\xADshot')
132 'the~07quick~adshot'
132 'the~07quick~adshot'
133 >>> dec(b'the~07quick~adshot')
133 >>> dec(b'the~07quick~adshot')
134 'the\\x07quick\\xadshot'
134 'the\\x07quick\\xadshot'
135 """
135 """
136 e = b'_'
136 e = b'_'
137 xchr = pycompat.bytechr
137 xchr = pycompat.bytechr
138 asciistr = list(map(xchr, range(127)))
138 asciistr = list(map(xchr, range(127)))
139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
140
140
141 cmap = {x: x for x in asciistr}
141 cmap = {x: x for x in asciistr}
142 for x in _reserved():
142 for x in _reserved():
143 cmap[xchr(x)] = b"~%02x" % x
143 cmap[xchr(x)] = b"~%02x" % x
144 for x in capitals + [ord(e)]:
144 for x in capitals + [ord(e)]:
145 cmap[xchr(x)] = e + xchr(x).lower()
145 cmap[xchr(x)] = e + xchr(x).lower()
146
146
147 dmap = {}
147 dmap = {}
148 for k, v in pycompat.iteritems(cmap):
148 for k, v in pycompat.iteritems(cmap):
149 dmap[v] = k
149 dmap[v] = k
150
150
151 def decode(s):
151 def decode(s):
152 i = 0
152 i = 0
153 while i < len(s):
153 while i < len(s):
154 for l in pycompat.xrange(1, 4):
154 for l in pycompat.xrange(1, 4):
155 try:
155 try:
156 yield dmap[s[i : i + l]]
156 yield dmap[s[i : i + l]]
157 i += l
157 i += l
158 break
158 break
159 except KeyError:
159 except KeyError:
160 pass
160 pass
161 else:
161 else:
162 raise KeyError
162 raise KeyError
163
163
164 return (
164 return (
165 lambda s: b''.join(
165 lambda s: b''.join(
166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
167 ),
167 ),
168 lambda s: b''.join(list(decode(s))),
168 lambda s: b''.join(list(decode(s))),
169 )
169 )
170
170
171
171
172 _encodefname, _decodefname = _buildencodefun()
172 _encodefname, _decodefname = _buildencodefun()
173
173
174
174
175 def encodefilename(s):
175 def encodefilename(s):
176 """
176 """
177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
179 """
179 """
180 return _encodefname(encodedir(s))
180 return _encodefname(encodedir(s))
181
181
182
182
183 def decodefilename(s):
183 def decodefilename(s):
184 """
184 """
185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
187 """
187 """
188 return decodedir(_decodefname(s))
188 return decodedir(_decodefname(s))
189
189
190
190
191 def _buildlowerencodefun():
191 def _buildlowerencodefun():
192 """
192 """
193 >>> f = _buildlowerencodefun()
193 >>> f = _buildlowerencodefun()
194 >>> f(b'nothing/special.txt')
194 >>> f(b'nothing/special.txt')
195 'nothing/special.txt'
195 'nothing/special.txt'
196 >>> f(b'HELLO')
196 >>> f(b'HELLO')
197 'hello'
197 'hello'
198 >>> f(b'hello:world?')
198 >>> f(b'hello:world?')
199 'hello~3aworld~3f'
199 'hello~3aworld~3f'
200 >>> f(b'the\\x07quick\\xADshot')
200 >>> f(b'the\\x07quick\\xADshot')
201 'the~07quick~adshot'
201 'the~07quick~adshot'
202 """
202 """
203 xchr = pycompat.bytechr
203 xchr = pycompat.bytechr
204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
205 for x in _reserved():
205 for x in _reserved():
206 cmap[xchr(x)] = b"~%02x" % x
206 cmap[xchr(x)] = b"~%02x" % x
207 for x in range(ord(b"A"), ord(b"Z") + 1):
207 for x in range(ord(b"A"), ord(b"Z") + 1):
208 cmap[xchr(x)] = xchr(x).lower()
208 cmap[xchr(x)] = xchr(x).lower()
209
209
210 def lowerencode(s):
210 def lowerencode(s):
211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
212
212
213 return lowerencode
213 return lowerencode
214
214
215
215
216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
217
217
218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
221
221
222
222
223 def _auxencode(path, dotencode):
223 def _auxencode(path, dotencode):
224 """
224 """
225 Encodes filenames containing names reserved by Windows or which end in
225 Encodes filenames containing names reserved by Windows or which end in
226 period or space. Does not touch other single reserved characters c.
226 period or space. Does not touch other single reserved characters c.
227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
228 Additionally encodes space or period at the beginning, if dotencode is
228 Additionally encodes space or period at the beginning, if dotencode is
229 True. Parameter path is assumed to be all lowercase.
229 True. Parameter path is assumed to be all lowercase.
230 A segment only needs encoding if a reserved name appears as a
230 A segment only needs encoding if a reserved name appears as a
231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
232 doesn't need encoding.
232 doesn't need encoding.
233
233
234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
235 >>> _auxencode(s.split(b'/'), True)
235 >>> _auxencode(s.split(b'/'), True)
236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
238 >>> _auxencode(s.split(b'/'), False)
238 >>> _auxencode(s.split(b'/'), False)
239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
240 >>> _auxencode([b'foo. '], True)
240 >>> _auxencode([b'foo. '], True)
241 ['foo.~20']
241 ['foo.~20']
242 >>> _auxencode([b' .foo'], True)
242 >>> _auxencode([b' .foo'], True)
243 ['~20.foo']
243 ['~20.foo']
244 """
244 """
245 for i, n in enumerate(path):
245 for i, n in enumerate(path):
246 if not n:
246 if not n:
247 continue
247 continue
248 if dotencode and n[0] in b'. ':
248 if dotencode and n[0] in b'. ':
249 n = b"~%02x" % ord(n[0:1]) + n[1:]
249 n = b"~%02x" % ord(n[0:1]) + n[1:]
250 path[i] = n
250 path[i] = n
251 else:
251 else:
252 l = n.find(b'.')
252 l = n.find(b'.')
253 if l == -1:
253 if l == -1:
254 l = len(n)
254 l = len(n)
255 if (l == 3 and n[:3] in _winres3) or (
255 if (l == 3 and n[:3] in _winres3) or (
256 l == 4
256 l == 4
257 and n[3:4] <= b'9'
257 and n[3:4] <= b'9'
258 and n[3:4] >= b'1'
258 and n[3:4] >= b'1'
259 and n[:3] in _winres4
259 and n[:3] in _winres4
260 ):
260 ):
261 # encode third letter ('aux' -> 'au~78')
261 # encode third letter ('aux' -> 'au~78')
262 ec = b"~%02x" % ord(n[2:3])
262 ec = b"~%02x" % ord(n[2:3])
263 n = n[0:2] + ec + n[3:]
263 n = n[0:2] + ec + n[3:]
264 path[i] = n
264 path[i] = n
265 if n[-1] in b'. ':
265 if n[-1] in b'. ':
266 # encode last period or space ('foo...' -> 'foo..~2e')
266 # encode last period or space ('foo...' -> 'foo..~2e')
267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
268 return path
268 return path
269
269
270
270
271 _maxstorepathlen = 120
271 _maxstorepathlen = 120
272 _dirprefixlen = 8
272 _dirprefixlen = 8
273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
274
274
275
275
276 def _hashencode(path, dotencode):
276 def _hashencode(path, dotencode):
277 digest = hex(hashutil.sha1(path).digest())
277 digest = hex(hashutil.sha1(path).digest())
278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
279 parts = _auxencode(le, dotencode)
279 parts = _auxencode(le, dotencode)
280 basename = parts[-1]
280 basename = parts[-1]
281 _root, ext = os.path.splitext(basename)
281 _root, ext = os.path.splitext(basename)
282 sdirs = []
282 sdirs = []
283 sdirslen = 0
283 sdirslen = 0
284 for p in parts[:-1]:
284 for p in parts[:-1]:
285 d = p[:_dirprefixlen]
285 d = p[:_dirprefixlen]
286 if d[-1] in b'. ':
286 if d[-1] in b'. ':
287 # Windows can't access dirs ending in period or space
287 # Windows can't access dirs ending in period or space
288 d = d[:-1] + b'_'
288 d = d[:-1] + b'_'
289 if sdirslen == 0:
289 if sdirslen == 0:
290 t = len(d)
290 t = len(d)
291 else:
291 else:
292 t = sdirslen + 1 + len(d)
292 t = sdirslen + 1 + len(d)
293 if t > _maxshortdirslen:
293 if t > _maxshortdirslen:
294 break
294 break
295 sdirs.append(d)
295 sdirs.append(d)
296 sdirslen = t
296 sdirslen = t
297 dirs = b'/'.join(sdirs)
297 dirs = b'/'.join(sdirs)
298 if len(dirs) > 0:
298 if len(dirs) > 0:
299 dirs += b'/'
299 dirs += b'/'
300 res = b'dh/' + dirs + digest + ext
300 res = b'dh/' + dirs + digest + ext
301 spaceleft = _maxstorepathlen - len(res)
301 spaceleft = _maxstorepathlen - len(res)
302 if spaceleft > 0:
302 if spaceleft > 0:
303 filler = basename[:spaceleft]
303 filler = basename[:spaceleft]
304 res = b'dh/' + dirs + filler + digest + ext
304 res = b'dh/' + dirs + filler + digest + ext
305 return res
305 return res
306
306
307
307
308 def _hybridencode(path, dotencode):
308 def _hybridencode(path, dotencode):
309 """encodes path with a length limit
309 """encodes path with a length limit
310
310
311 Encodes all paths that begin with 'data/', according to the following.
311 Encodes all paths that begin with 'data/', according to the following.
312
312
313 Default encoding (reversible):
313 Default encoding (reversible):
314
314
315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
316 characters are encoded as '~xx', where xx is the two digit hex code
316 characters are encoded as '~xx', where xx is the two digit hex code
317 of the character (see encodefilename).
317 of the character (see encodefilename).
318 Relevant path components consisting of Windows reserved filenames are
318 Relevant path components consisting of Windows reserved filenames are
319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
320
320
321 Hashed encoding (not reversible):
321 Hashed encoding (not reversible):
322
322
323 If the default-encoded path is longer than _maxstorepathlen, a
323 If the default-encoded path is longer than _maxstorepathlen, a
324 non-reversible hybrid hashing of the path is done instead.
324 non-reversible hybrid hashing of the path is done instead.
325 This encoding uses up to _dirprefixlen characters of all directory
325 This encoding uses up to _dirprefixlen characters of all directory
326 levels of the lowerencoded path, but not more levels than can fit into
326 levels of the lowerencoded path, but not more levels than can fit into
327 _maxshortdirslen.
327 _maxshortdirslen.
328 Then follows the filler followed by the sha digest of the full path.
328 Then follows the filler followed by the sha digest of the full path.
329 The filler is the beginning of the basename of the lowerencoded path
329 The filler is the beginning of the basename of the lowerencoded path
330 (the basename is everything after the last path separator). The filler
330 (the basename is everything after the last path separator). The filler
331 is as long as possible, filling in characters from the basename until
331 is as long as possible, filling in characters from the basename until
332 the encoded path has _maxstorepathlen characters (or all chars of the
332 the encoded path has _maxstorepathlen characters (or all chars of the
333 basename have been taken).
333 basename have been taken).
334 The extension (e.g. '.i' or '.d') is preserved.
334 The extension (e.g. '.i' or '.d') is preserved.
335
335
336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
337 encoding was used.
337 encoding was used.
338 """
338 """
339 path = encodedir(path)
339 path = encodedir(path)
340 ef = _encodefname(path).split(b'/')
340 ef = _encodefname(path).split(b'/')
341 res = b'/'.join(_auxencode(ef, dotencode))
341 res = b'/'.join(_auxencode(ef, dotencode))
342 if len(res) > _maxstorepathlen:
342 if len(res) > _maxstorepathlen:
343 res = _hashencode(path, dotencode)
343 res = _hashencode(path, dotencode)
344 return res
344 return res
345
345
346
346
347 def _pathencode(path):
347 def _pathencode(path):
348 de = encodedir(path)
348 de = encodedir(path)
349 if len(path) > _maxstorepathlen:
349 if len(path) > _maxstorepathlen:
350 return _hashencode(de, True)
350 return _hashencode(de, True)
351 ef = _encodefname(de).split(b'/')
351 ef = _encodefname(de).split(b'/')
352 res = b'/'.join(_auxencode(ef, True))
352 res = b'/'.join(_auxencode(ef, True))
353 if len(res) > _maxstorepathlen:
353 if len(res) > _maxstorepathlen:
354 return _hashencode(de, True)
354 return _hashencode(de, True)
355 return res
355 return res
356
356
357
357
358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
359
359
360
360
361 def _plainhybridencode(f):
361 def _plainhybridencode(f):
362 return _hybridencode(f, False)
362 return _hybridencode(f, False)
363
363
364
364
365 def _calcmode(vfs):
365 def _calcmode(vfs):
366 try:
366 try:
367 # files in .hg/ will be created using this mode
367 # files in .hg/ will be created using this mode
368 mode = vfs.stat().st_mode
368 mode = vfs.stat().st_mode
369 # avoid some useless chmods
369 # avoid some useless chmods
370 if (0o777 & ~util.umask) == (0o777 & mode):
370 if (0o777 & ~util.umask) == (0o777 & mode):
371 mode = None
371 mode = None
372 except OSError:
372 except OSError:
373 mode = None
373 mode = None
374 return mode
374 return mode
375
375
376
376
377 _data = [
377 _data = [
378 b'bookmarks',
378 b'bookmarks',
379 b'narrowspec',
379 b'narrowspec',
380 b'data',
380 b'data',
381 b'meta',
381 b'meta',
382 b'00manifest.d',
382 b'00manifest.d',
383 b'00manifest.i',
383 b'00manifest.i',
384 b'00changelog.d',
384 b'00changelog.d',
385 b'00changelog.i',
385 b'00changelog.i',
386 b'phaseroots',
386 b'phaseroots',
387 b'obsstore',
387 b'obsstore',
388 b'requires',
388 b'requires',
389 ]
389 ]
390
390
391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
392 REVLOG_FILES_OTHER_EXT = (
392 REVLOG_FILES_OTHER_EXT = (
393 b'.idx',
393 b'.idx',
394 b'.d',
394 b'.d',
395 b'.dat',
395 b'.dat',
396 b'.n',
396 b'.n',
397 b'.nd',
397 b'.nd',
398 b'.sda',
398 b'd.tmpcensored',
399 b'd.tmpcensored',
399 )
400 )
400 # files that are "volatile" and might change between listing and streaming
401 # files that are "volatile" and might change between listing and streaming
401 #
402 #
402 # note: the ".nd" file are nodemap data and won't "change" but they might be
403 # note: the ".nd" file are nodemap data and won't "change" but they might be
403 # deleted.
404 # deleted.
404 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
405 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
405
406
406 # some exception to the above matching
407 # some exception to the above matching
407 EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
408 EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
408
409
409
410
410 def is_revlog(f, kind, st):
411 def is_revlog(f, kind, st):
411 if kind != stat.S_IFREG:
412 if kind != stat.S_IFREG:
412 return None
413 return None
413 return revlog_type(f)
414 return revlog_type(f)
414
415
415
416
416 def revlog_type(f):
417 def revlog_type(f):
417 if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
418 if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
418 return FILEFLAGS_REVLOG_MAIN
419 return FILEFLAGS_REVLOG_MAIN
419 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
420 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
420 t = FILETYPE_FILELOG_OTHER
421 t = FILETYPE_FILELOG_OTHER
421 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
422 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
422 t |= FILEFLAGS_VOLATILE
423 t |= FILEFLAGS_VOLATILE
423 return t
424 return t
424 return None
425 return None
425
426
426
427
427 # the file is part of changelog data
428 # the file is part of changelog data
428 FILEFLAGS_CHANGELOG = 1 << 13
429 FILEFLAGS_CHANGELOG = 1 << 13
429 # the file is part of manifest data
430 # the file is part of manifest data
430 FILEFLAGS_MANIFESTLOG = 1 << 12
431 FILEFLAGS_MANIFESTLOG = 1 << 12
431 # the file is part of filelog data
432 # the file is part of filelog data
432 FILEFLAGS_FILELOG = 1 << 11
433 FILEFLAGS_FILELOG = 1 << 11
433 # file that are not directly part of a revlog
434 # file that are not directly part of a revlog
434 FILEFLAGS_OTHER = 1 << 10
435 FILEFLAGS_OTHER = 1 << 10
435
436
436 # the main entry point for a revlog
437 # the main entry point for a revlog
437 FILEFLAGS_REVLOG_MAIN = 1 << 1
438 FILEFLAGS_REVLOG_MAIN = 1 << 1
438 # a secondary file for a revlog
439 # a secondary file for a revlog
439 FILEFLAGS_REVLOG_OTHER = 1 << 0
440 FILEFLAGS_REVLOG_OTHER = 1 << 0
440
441
441 # files that are "volatile" and might change between listing and streaming
442 # files that are "volatile" and might change between listing and streaming
442 FILEFLAGS_VOLATILE = 1 << 20
443 FILEFLAGS_VOLATILE = 1 << 20
443
444
444 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
445 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
445 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
446 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
446 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
447 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
447 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
448 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
448 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
449 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
449 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
450 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
450 FILETYPE_OTHER = FILEFLAGS_OTHER
451 FILETYPE_OTHER = FILEFLAGS_OTHER
451
452
452
453
453 class basicstore(object):
454 class basicstore(object):
454 '''base class for local repository stores'''
455 '''base class for local repository stores'''
455
456
456 def __init__(self, path, vfstype):
457 def __init__(self, path, vfstype):
457 vfs = vfstype(path)
458 vfs = vfstype(path)
458 self.path = vfs.base
459 self.path = vfs.base
459 self.createmode = _calcmode(vfs)
460 self.createmode = _calcmode(vfs)
460 vfs.createmode = self.createmode
461 vfs.createmode = self.createmode
461 self.rawvfs = vfs
462 self.rawvfs = vfs
462 self.vfs = vfsmod.filtervfs(vfs, encodedir)
463 self.vfs = vfsmod.filtervfs(vfs, encodedir)
463 self.opener = self.vfs
464 self.opener = self.vfs
464
465
465 def join(self, f):
466 def join(self, f):
466 return self.path + b'/' + encodedir(f)
467 return self.path + b'/' + encodedir(f)
467
468
468 def _walk(self, relpath, recurse):
469 def _walk(self, relpath, recurse):
469 '''yields (unencoded, encoded, size)'''
470 '''yields (unencoded, encoded, size)'''
470 path = self.path
471 path = self.path
471 if relpath:
472 if relpath:
472 path += b'/' + relpath
473 path += b'/' + relpath
473 striplen = len(self.path) + 1
474 striplen = len(self.path) + 1
474 l = []
475 l = []
475 if self.rawvfs.isdir(path):
476 if self.rawvfs.isdir(path):
476 visit = [path]
477 visit = [path]
477 readdir = self.rawvfs.readdir
478 readdir = self.rawvfs.readdir
478 while visit:
479 while visit:
479 p = visit.pop()
480 p = visit.pop()
480 for f, kind, st in readdir(p, stat=True):
481 for f, kind, st in readdir(p, stat=True):
481 fp = p + b'/' + f
482 fp = p + b'/' + f
482 rl_type = is_revlog(f, kind, st)
483 rl_type = is_revlog(f, kind, st)
483 if rl_type is not None:
484 if rl_type is not None:
484 n = util.pconvert(fp[striplen:])
485 n = util.pconvert(fp[striplen:])
485 l.append((rl_type, decodedir(n), n, st.st_size))
486 l.append((rl_type, decodedir(n), n, st.st_size))
486 elif kind == stat.S_IFDIR and recurse:
487 elif kind == stat.S_IFDIR and recurse:
487 visit.append(fp)
488 visit.append(fp)
488 l.sort()
489 l.sort()
489 return l
490 return l
490
491
491 def changelog(self, trypending, concurrencychecker=None):
492 def changelog(self, trypending, concurrencychecker=None):
492 return changelog.changelog(
493 return changelog.changelog(
493 self.vfs,
494 self.vfs,
494 trypending=trypending,
495 trypending=trypending,
495 concurrencychecker=concurrencychecker,
496 concurrencychecker=concurrencychecker,
496 )
497 )
497
498
498 def manifestlog(self, repo, storenarrowmatch):
499 def manifestlog(self, repo, storenarrowmatch):
499 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
500 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
500 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
501 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
501
502
502 def datafiles(self, matcher=None):
503 def datafiles(self, matcher=None):
503 files = self._walk(b'data', True) + self._walk(b'meta', True)
504 files = self._walk(b'data', True) + self._walk(b'meta', True)
504 for (t, u, e, s) in files:
505 for (t, u, e, s) in files:
505 yield (FILEFLAGS_FILELOG | t, u, e, s)
506 yield (FILEFLAGS_FILELOG | t, u, e, s)
506
507
507 def topfiles(self):
508 def topfiles(self):
508 # yield manifest before changelog
509 # yield manifest before changelog
509 files = reversed(self._walk(b'', False))
510 files = reversed(self._walk(b'', False))
510 for (t, u, e, s) in files:
511 for (t, u, e, s) in files:
511 if u.startswith(b'00changelog'):
512 if u.startswith(b'00changelog'):
512 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
513 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
513 elif u.startswith(b'00manifest'):
514 elif u.startswith(b'00manifest'):
514 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
515 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
515 else:
516 else:
516 yield (FILETYPE_OTHER | t, u, e, s)
517 yield (FILETYPE_OTHER | t, u, e, s)
517
518
518 def walk(self, matcher=None):
519 def walk(self, matcher=None):
519 """return file related to data storage (ie: revlogs)
520 """return file related to data storage (ie: revlogs)
520
521
521 yields (file_type, unencoded, encoded, size)
522 yields (file_type, unencoded, encoded, size)
522
523
523 if a matcher is passed, storage files of only those tracked paths
524 if a matcher is passed, storage files of only those tracked paths
524 are passed with matches the matcher
525 are passed with matches the matcher
525 """
526 """
526 # yield data files first
527 # yield data files first
527 for x in self.datafiles(matcher):
528 for x in self.datafiles(matcher):
528 yield x
529 yield x
529 for x in self.topfiles():
530 for x in self.topfiles():
530 yield x
531 yield x
531
532
532 def copylist(self):
533 def copylist(self):
533 return _data
534 return _data
534
535
535 def write(self, tr):
536 def write(self, tr):
536 pass
537 pass
537
538
538 def invalidatecaches(self):
539 def invalidatecaches(self):
539 pass
540 pass
540
541
541 def markremoved(self, fn):
542 def markremoved(self, fn):
542 pass
543 pass
543
544
544 def __contains__(self, path):
545 def __contains__(self, path):
545 '''Checks if the store contains path'''
546 '''Checks if the store contains path'''
546 path = b"/".join((b"data", path))
547 path = b"/".join((b"data", path))
547 # file?
548 # file?
548 if self.vfs.exists(path + b".i"):
549 if self.vfs.exists(path + b".i"):
549 return True
550 return True
550 # dir?
551 # dir?
551 if not path.endswith(b"/"):
552 if not path.endswith(b"/"):
552 path = path + b"/"
553 path = path + b"/"
553 return self.vfs.exists(path)
554 return self.vfs.exists(path)
554
555
555
556
556 class encodedstore(basicstore):
557 class encodedstore(basicstore):
557 def __init__(self, path, vfstype):
558 def __init__(self, path, vfstype):
558 vfs = vfstype(path + b'/store')
559 vfs = vfstype(path + b'/store')
559 self.path = vfs.base
560 self.path = vfs.base
560 self.createmode = _calcmode(vfs)
561 self.createmode = _calcmode(vfs)
561 vfs.createmode = self.createmode
562 vfs.createmode = self.createmode
562 self.rawvfs = vfs
563 self.rawvfs = vfs
563 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
564 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
564 self.opener = self.vfs
565 self.opener = self.vfs
565
566
566 def datafiles(self, matcher=None):
567 def datafiles(self, matcher=None):
567 for t, a, b, size in super(encodedstore, self).datafiles():
568 for t, a, b, size in super(encodedstore, self).datafiles():
568 try:
569 try:
569 a = decodefilename(a)
570 a = decodefilename(a)
570 except KeyError:
571 except KeyError:
571 a = None
572 a = None
572 if a is not None and not _matchtrackedpath(a, matcher):
573 if a is not None and not _matchtrackedpath(a, matcher):
573 continue
574 continue
574 yield t, a, b, size
575 yield t, a, b, size
575
576
576 def join(self, f):
577 def join(self, f):
577 return self.path + b'/' + encodefilename(f)
578 return self.path + b'/' + encodefilename(f)
578
579
579 def copylist(self):
580 def copylist(self):
580 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
581 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
581
582
582
583
583 class fncache(object):
584 class fncache(object):
584 # the filename used to be partially encoded
585 # the filename used to be partially encoded
585 # hence the encodedir/decodedir dance
586 # hence the encodedir/decodedir dance
586 def __init__(self, vfs):
587 def __init__(self, vfs):
587 self.vfs = vfs
588 self.vfs = vfs
588 self.entries = None
589 self.entries = None
589 self._dirty = False
590 self._dirty = False
590 # set of new additions to fncache
591 # set of new additions to fncache
591 self.addls = set()
592 self.addls = set()
592
593
593 def ensureloaded(self, warn=None):
594 def ensureloaded(self, warn=None):
594 """read the fncache file if not already read.
595 """read the fncache file if not already read.
595
596
596 If the file on disk is corrupted, raise. If warn is provided,
597 If the file on disk is corrupted, raise. If warn is provided,
597 warn and keep going instead."""
598 warn and keep going instead."""
598 if self.entries is None:
599 if self.entries is None:
599 self._load(warn)
600 self._load(warn)
600
601
601 def _load(self, warn=None):
602 def _load(self, warn=None):
602 '''fill the entries from the fncache file'''
603 '''fill the entries from the fncache file'''
603 self._dirty = False
604 self._dirty = False
604 try:
605 try:
605 fp = self.vfs(b'fncache', mode=b'rb')
606 fp = self.vfs(b'fncache', mode=b'rb')
606 except IOError:
607 except IOError:
607 # skip nonexistent file
608 # skip nonexistent file
608 self.entries = set()
609 self.entries = set()
609 return
610 return
610
611
611 self.entries = set()
612 self.entries = set()
612 chunk = b''
613 chunk = b''
613 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
614 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
614 chunk += c
615 chunk += c
615 try:
616 try:
616 p = chunk.rindex(b'\n')
617 p = chunk.rindex(b'\n')
617 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
618 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
618 chunk = chunk[p + 1 :]
619 chunk = chunk[p + 1 :]
619 except ValueError:
620 except ValueError:
620 # substring '\n' not found, maybe the entry is bigger than the
621 # substring '\n' not found, maybe the entry is bigger than the
621 # chunksize, so let's keep iterating
622 # chunksize, so let's keep iterating
622 pass
623 pass
623
624
624 if chunk:
625 if chunk:
625 msg = _(b"fncache does not ends with a newline")
626 msg = _(b"fncache does not ends with a newline")
626 if warn:
627 if warn:
627 warn(msg + b'\n')
628 warn(msg + b'\n')
628 else:
629 else:
629 raise error.Abort(
630 raise error.Abort(
630 msg,
631 msg,
631 hint=_(
632 hint=_(
632 b"use 'hg debugrebuildfncache' to "
633 b"use 'hg debugrebuildfncache' to "
633 b"rebuild the fncache"
634 b"rebuild the fncache"
634 ),
635 ),
635 )
636 )
636 self._checkentries(fp, warn)
637 self._checkentries(fp, warn)
637 fp.close()
638 fp.close()
638
639
639 def _checkentries(self, fp, warn):
640 def _checkentries(self, fp, warn):
640 """make sure there is no empty string in entries"""
641 """make sure there is no empty string in entries"""
641 if b'' in self.entries:
642 if b'' in self.entries:
642 fp.seek(0)
643 fp.seek(0)
643 for n, line in enumerate(util.iterfile(fp)):
644 for n, line in enumerate(util.iterfile(fp)):
644 if not line.rstrip(b'\n'):
645 if not line.rstrip(b'\n'):
645 t = _(b'invalid entry in fncache, line %d') % (n + 1)
646 t = _(b'invalid entry in fncache, line %d') % (n + 1)
646 if warn:
647 if warn:
647 warn(t + b'\n')
648 warn(t + b'\n')
648 else:
649 else:
649 raise error.Abort(t)
650 raise error.Abort(t)
650
651
651 def write(self, tr):
652 def write(self, tr):
652 if self._dirty:
653 if self._dirty:
653 assert self.entries is not None
654 assert self.entries is not None
654 self.entries = self.entries | self.addls
655 self.entries = self.entries | self.addls
655 self.addls = set()
656 self.addls = set()
656 tr.addbackup(b'fncache')
657 tr.addbackup(b'fncache')
657 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
658 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
658 if self.entries:
659 if self.entries:
659 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
660 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
660 fp.close()
661 fp.close()
661 self._dirty = False
662 self._dirty = False
662 if self.addls:
663 if self.addls:
663 # if we have just new entries, let's append them to the fncache
664 # if we have just new entries, let's append them to the fncache
664 tr.addbackup(b'fncache')
665 tr.addbackup(b'fncache')
665 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
666 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
666 if self.addls:
667 if self.addls:
667 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
668 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
668 fp.close()
669 fp.close()
669 self.entries = None
670 self.entries = None
670 self.addls = set()
671 self.addls = set()
671
672
672 def add(self, fn):
673 def add(self, fn):
673 if self.entries is None:
674 if self.entries is None:
674 self._load()
675 self._load()
675 if fn not in self.entries:
676 if fn not in self.entries:
676 self.addls.add(fn)
677 self.addls.add(fn)
677
678
678 def remove(self, fn):
679 def remove(self, fn):
679 if self.entries is None:
680 if self.entries is None:
680 self._load()
681 self._load()
681 if fn in self.addls:
682 if fn in self.addls:
682 self.addls.remove(fn)
683 self.addls.remove(fn)
683 return
684 return
684 try:
685 try:
685 self.entries.remove(fn)
686 self.entries.remove(fn)
686 self._dirty = True
687 self._dirty = True
687 except KeyError:
688 except KeyError:
688 pass
689 pass
689
690
690 def __contains__(self, fn):
691 def __contains__(self, fn):
691 if fn in self.addls:
692 if fn in self.addls:
692 return True
693 return True
693 if self.entries is None:
694 if self.entries is None:
694 self._load()
695 self._load()
695 return fn in self.entries
696 return fn in self.entries
696
697
697 def __iter__(self):
698 def __iter__(self):
698 if self.entries is None:
699 if self.entries is None:
699 self._load()
700 self._load()
700 return iter(self.entries | self.addls)
701 return iter(self.entries | self.addls)
701
702
702
703
703 class _fncachevfs(vfsmod.proxyvfs):
704 class _fncachevfs(vfsmod.proxyvfs):
704 def __init__(self, vfs, fnc, encode):
705 def __init__(self, vfs, fnc, encode):
705 vfsmod.proxyvfs.__init__(self, vfs)
706 vfsmod.proxyvfs.__init__(self, vfs)
706 self.fncache = fnc
707 self.fncache = fnc
707 self.encode = encode
708 self.encode = encode
708
709
709 def __call__(self, path, mode=b'r', *args, **kw):
710 def __call__(self, path, mode=b'r', *args, **kw):
710 encoded = self.encode(path)
711 encoded = self.encode(path)
711 if mode not in (b'r', b'rb') and (
712 if mode not in (b'r', b'rb') and (
712 path.startswith(b'data/') or path.startswith(b'meta/')
713 path.startswith(b'data/') or path.startswith(b'meta/')
713 ):
714 ):
714 # do not trigger a fncache load when adding a file that already is
715 # do not trigger a fncache load when adding a file that already is
715 # known to exist.
716 # known to exist.
716 notload = self.fncache.entries is None and self.vfs.exists(encoded)
717 notload = self.fncache.entries is None and self.vfs.exists(encoded)
717 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
718 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
718 # when appending to an existing file, if the file has size zero,
719 # when appending to an existing file, if the file has size zero,
719 # it should be considered as missing. Such zero-size files are
720 # it should be considered as missing. Such zero-size files are
720 # the result of truncation when a transaction is aborted.
721 # the result of truncation when a transaction is aborted.
721 notload = False
722 notload = False
722 if not notload:
723 if not notload:
723 self.fncache.add(path)
724 self.fncache.add(path)
724 return self.vfs(encoded, mode, *args, **kw)
725 return self.vfs(encoded, mode, *args, **kw)
725
726
726 def join(self, path):
727 def join(self, path):
727 if path:
728 if path:
728 return self.vfs.join(self.encode(path))
729 return self.vfs.join(self.encode(path))
729 else:
730 else:
730 return self.vfs.join(path)
731 return self.vfs.join(path)
731
732
732
733
733 class fncachestore(basicstore):
734 class fncachestore(basicstore):
734 def __init__(self, path, vfstype, dotencode):
735 def __init__(self, path, vfstype, dotencode):
735 if dotencode:
736 if dotencode:
736 encode = _pathencode
737 encode = _pathencode
737 else:
738 else:
738 encode = _plainhybridencode
739 encode = _plainhybridencode
739 self.encode = encode
740 self.encode = encode
740 vfs = vfstype(path + b'/store')
741 vfs = vfstype(path + b'/store')
741 self.path = vfs.base
742 self.path = vfs.base
742 self.pathsep = self.path + b'/'
743 self.pathsep = self.path + b'/'
743 self.createmode = _calcmode(vfs)
744 self.createmode = _calcmode(vfs)
744 vfs.createmode = self.createmode
745 vfs.createmode = self.createmode
745 self.rawvfs = vfs
746 self.rawvfs = vfs
746 fnc = fncache(vfs)
747 fnc = fncache(vfs)
747 self.fncache = fnc
748 self.fncache = fnc
748 self.vfs = _fncachevfs(vfs, fnc, encode)
749 self.vfs = _fncachevfs(vfs, fnc, encode)
749 self.opener = self.vfs
750 self.opener = self.vfs
750
751
751 def join(self, f):
752 def join(self, f):
752 return self.pathsep + self.encode(f)
753 return self.pathsep + self.encode(f)
753
754
754 def getsize(self, path):
755 def getsize(self, path):
755 return self.rawvfs.stat(path).st_size
756 return self.rawvfs.stat(path).st_size
756
757
757 def datafiles(self, matcher=None):
758 def datafiles(self, matcher=None):
758 for f in sorted(self.fncache):
759 for f in sorted(self.fncache):
759 if not _matchtrackedpath(f, matcher):
760 if not _matchtrackedpath(f, matcher):
760 continue
761 continue
761 ef = self.encode(f)
762 ef = self.encode(f)
762 try:
763 try:
763 t = revlog_type(f)
764 t = revlog_type(f)
764 assert t is not None, f
765 assert t is not None, f
765 t |= FILEFLAGS_FILELOG
766 t |= FILEFLAGS_FILELOG
766 yield t, f, ef, self.getsize(ef)
767 yield t, f, ef, self.getsize(ef)
767 except OSError as err:
768 except OSError as err:
768 if err.errno != errno.ENOENT:
769 if err.errno != errno.ENOENT:
769 raise
770 raise
770
771
771 def copylist(self):
772 def copylist(self):
772 d = (
773 d = (
773 b'bookmarks',
774 b'bookmarks',
774 b'narrowspec',
775 b'narrowspec',
775 b'data',
776 b'data',
776 b'meta',
777 b'meta',
777 b'dh',
778 b'dh',
778 b'fncache',
779 b'fncache',
779 b'phaseroots',
780 b'phaseroots',
780 b'obsstore',
781 b'obsstore',
781 b'00manifest.d',
782 b'00manifest.d',
782 b'00manifest.i',
783 b'00manifest.i',
783 b'00changelog.d',
784 b'00changelog.d',
784 b'00changelog.i',
785 b'00changelog.i',
785 b'requires',
786 b'requires',
786 )
787 )
787 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
788 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
788
789
789 def write(self, tr):
790 def write(self, tr):
790 self.fncache.write(tr)
791 self.fncache.write(tr)
791
792
792 def invalidatecaches(self):
793 def invalidatecaches(self):
793 self.fncache.entries = None
794 self.fncache.entries = None
794 self.fncache.addls = set()
795 self.fncache.addls = set()
795
796
796 def markremoved(self, fn):
797 def markremoved(self, fn):
797 self.fncache.remove(fn)
798 self.fncache.remove(fn)
798
799
799 def _exists(self, f):
800 def _exists(self, f):
800 ef = self.encode(f)
801 ef = self.encode(f)
801 try:
802 try:
802 self.getsize(ef)
803 self.getsize(ef)
803 return True
804 return True
804 except OSError as err:
805 except OSError as err:
805 if err.errno != errno.ENOENT:
806 if err.errno != errno.ENOENT:
806 raise
807 raise
807 # nonexistent entry
808 # nonexistent entry
808 return False
809 return False
809
810
810 def __contains__(self, path):
811 def __contains__(self, path):
811 '''Checks if the store contains path'''
812 '''Checks if the store contains path'''
812 path = b"/".join((b"data", path))
813 path = b"/".join((b"data", path))
813 # check for files (exact match)
814 # check for files (exact match)
814 e = path + b'.i'
815 e = path + b'.i'
815 if e in self.fncache and self._exists(e):
816 if e in self.fncache and self._exists(e):
816 return True
817 return True
817 # now check for directories (prefix match)
818 # now check for directories (prefix match)
818 if not path.endswith(b'/'):
819 if not path.endswith(b'/'):
819 path += b'/'
820 path += b'/'
820 for e in self.fncache:
821 for e in self.fncache:
821 if e.startswith(path) and self._exists(e):
822 if e.startswith(path) and self._exists(e):
822 return True
823 return True
823 return False
824 return False
@@ -1,94 +1,96 b''
1 #require reporevlogstore
1 #require reporevlogstore
2
2
3 A repo with unknown revlogv2 requirement string cannot be opened
3 A repo with unknown revlogv2 requirement string cannot be opened
4
4
5 $ hg init invalidreq
5 $ hg init invalidreq
6 $ cd invalidreq
6 $ cd invalidreq
7 $ echo exp-revlogv2.unknown >> .hg/requires
7 $ echo exp-revlogv2.unknown >> .hg/requires
8 $ hg log
8 $ hg log
9 abort: repository requires features unknown to this Mercurial: exp-revlogv2.unknown
9 abort: repository requires features unknown to this Mercurial: exp-revlogv2.unknown
10 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
10 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
11 [255]
11 [255]
12 $ cd ..
12 $ cd ..
13
13
14 Can create and open repo with revlog v2 requirement
14 Can create and open repo with revlog v2 requirement
15
15
16 $ cat >> $HGRCPATH << EOF
16 $ cat >> $HGRCPATH << EOF
17 > [experimental]
17 > [experimental]
18 > revlogv2 = enable-unstable-format-and-corrupt-my-data
18 > revlogv2 = enable-unstable-format-and-corrupt-my-data
19 > EOF
19 > EOF
20
20
21 $ hg init empty-repo
21 $ hg init empty-repo
22 $ cd empty-repo
22 $ cd empty-repo
23 $ cat .hg/requires
23 $ cat .hg/requires
24 dotencode
24 dotencode
25 exp-dirstate-v2 (dirstate-v2 !)
25 exp-dirstate-v2 (dirstate-v2 !)
26 exp-revlogv2.2
26 exp-revlogv2.2
27 fncache
27 fncache
28 generaldelta
28 generaldelta
29 persistent-nodemap (rust !)
29 persistent-nodemap (rust !)
30 revlog-compression-zstd (zstd !)
30 revlog-compression-zstd (zstd !)
31 sparserevlog
31 sparserevlog
32 store
32 store
33
33
34 $ hg log
34 $ hg log
35
35
36 Unknown flags to revlog are rejected
36 Unknown flags to revlog are rejected
37
37
38 >>> with open('.hg/store/00changelog.i', 'wb') as fh:
38 >>> with open('.hg/store/00changelog.i', 'wb') as fh:
39 ... fh.write(b'\xff\x00\xde\xad') and None
39 ... fh.write(b'\xff\x00\xde\xad') and None
40
40
41 $ hg log
41 $ hg log
42 abort: unknown flags (0xff00) in version 57005 revlog 00changelog
42 abort: unknown flags (0xff00) in version 57005 revlog 00changelog
43 [50]
43 [50]
44
44
45 $ cd ..
45 $ cd ..
46
46
47 Writing a simple revlog v2 works
47 Writing a simple revlog v2 works
48
48
49 $ hg init simple
49 $ hg init simple
50 $ cd simple
50 $ cd simple
51 $ touch foo
51 $ touch foo
52 $ hg -q commit -A -m initial
52 $ hg -q commit -A -m initial
53
53
54 $ hg log
54 $ hg log
55 changeset: 0:96ee1d7354c4
55 changeset: 0:96ee1d7354c4
56 tag: tip
56 tag: tip
57 user: test
57 user: test
58 date: Thu Jan 01 00:00:00 1970 +0000
58 date: Thu Jan 01 00:00:00 1970 +0000
59 summary: initial
59 summary: initial
60
60
61
61
62 Header written as expected
62 Header written as expected
63
63
64 $ f --hexdump --bytes 4 .hg/store/00changelog.i
64 $ f --hexdump --bytes 4 .hg/store/00changelog.i
65 .hg/store/00changelog.i:
65 .hg/store/00changelog.i:
66 0000: 00 00 de ad |....|
66 0000: 00 00 de ad |....|
67
67
68 $ f --hexdump --bytes 4 .hg/store/data/foo.i
68 $ f --hexdump --bytes 4 .hg/store/data/foo.i
69 .hg/store/data/foo.i:
69 .hg/store/data/foo.i:
70 0000: 00 00 de ad |....|
70 0000: 00 00 de ad |....|
71
71
72 Bundle use a compatible changegroup format
72 Bundle use a compatible changegroup format
73 ------------------------------------------
73 ------------------------------------------
74
74
75 $ hg bundle --all ../basic.hg
75 $ hg bundle --all ../basic.hg
76 1 changesets found
76 1 changesets found
77 $ hg debugbundle --spec ../basic.hg
77 $ hg debugbundle --spec ../basic.hg
78 bzip2-v2
78 bzip2-v2
79
79
80 The expected files are generated
80 The expected files are generated
81 --------------------------------
81 --------------------------------
82
82
83 We should have have:
83 We should have have:
84 - a docket
84 - a docket
85 - a index file with a unique name
85 - a index file with a unique name
86 - a data file
86 - a data file
87
87
88 $ ls .hg/store/00changelog* .hg/store/00manifest*
88 $ ls .hg/store/00changelog* .hg/store/00manifest*
89 .hg/store/00changelog-6b8ab34b.dat
89 .hg/store/00changelog-1335303a.sda
90 .hg/store/00changelog-88698448.idx
90 .hg/store/00changelog-6b8ab34b.idx
91 .hg/store/00changelog-b875dfc5.dat
91 .hg/store/00changelog.i
92 .hg/store/00changelog.i
92 .hg/store/00manifest-1335303a.dat
93 .hg/store/00manifest-05a21d65.idx
93 .hg/store/00manifest-b875dfc5.idx
94 .hg/store/00manifest-43c37dde.dat
95 .hg/store/00manifest-e2c9362a.sda
94 .hg/store/00manifest.i
96 .hg/store/00manifest.i
General Comments 0
You need to be logged in to leave comments. Login now