##// END OF EJS Templates
nodemap: make sure hooks have access to an up-to-date version...
marmoute -
r45003:64e2f603 default
parent child Browse files
Show More
@@ -1,1579 +1,1578
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section, configprefix + b'nodates', default=False,
136 section, configprefix + b'nodates', default=False,
137 )
137 )
138 coreconfigitem(
138 coreconfigitem(
139 section, configprefix + b'showfunc', default=False,
139 section, configprefix + b'showfunc', default=False,
140 )
140 )
141 coreconfigitem(
141 coreconfigitem(
142 section, configprefix + b'unified', default=None,
142 section, configprefix + b'unified', default=None,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section, configprefix + b'git', default=False,
145 section, configprefix + b'git', default=False,
146 )
146 )
147 coreconfigitem(
147 coreconfigitem(
148 section, configprefix + b'ignorews', default=False,
148 section, configprefix + b'ignorews', default=False,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section, configprefix + b'ignorewsamount', default=False,
151 section, configprefix + b'ignorewsamount', default=False,
152 )
152 )
153 coreconfigitem(
153 coreconfigitem(
154 section, configprefix + b'ignoreblanklines', default=False,
154 section, configprefix + b'ignoreblanklines', default=False,
155 )
155 )
156 coreconfigitem(
156 coreconfigitem(
157 section, configprefix + b'ignorewseol', default=False,
157 section, configprefix + b'ignorewseol', default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section, configprefix + b'nobinary', default=False,
160 section, configprefix + b'nobinary', default=False,
161 )
161 )
162 coreconfigitem(
162 coreconfigitem(
163 section, configprefix + b'noprefix', default=False,
163 section, configprefix + b'noprefix', default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section, configprefix + b'word-diff', default=False,
166 section, configprefix + b'word-diff', default=False,
167 )
167 )
168
168
169
169
170 coreconfigitem(
170 coreconfigitem(
171 b'alias', b'.*', default=dynamicdefault, generic=True,
171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 )
172 )
173 coreconfigitem(
173 coreconfigitem(
174 b'auth', b'cookiefile', default=None,
174 b'auth', b'cookiefile', default=None,
175 )
175 )
176 _registerdiffopts(section=b'annotate')
176 _registerdiffopts(section=b'annotate')
177 # bookmarks.pushing: internal hack for discovery
177 # bookmarks.pushing: internal hack for discovery
178 coreconfigitem(
178 coreconfigitem(
179 b'bookmarks', b'pushing', default=list,
179 b'bookmarks', b'pushing', default=list,
180 )
180 )
181 # bundle.mainreporoot: internal hack for bundlerepo
181 # bundle.mainreporoot: internal hack for bundlerepo
182 coreconfigitem(
182 coreconfigitem(
183 b'bundle', b'mainreporoot', default=b'',
183 b'bundle', b'mainreporoot', default=b'',
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 b'censor', b'policy', default=b'abort', experimental=True,
186 b'censor', b'policy', default=b'abort', experimental=True,
187 )
187 )
188 coreconfigitem(
188 coreconfigitem(
189 b'chgserver', b'idletimeout', default=3600,
189 b'chgserver', b'idletimeout', default=3600,
190 )
190 )
191 coreconfigitem(
191 coreconfigitem(
192 b'chgserver', b'skiphash', default=False,
192 b'chgserver', b'skiphash', default=False,
193 )
193 )
194 coreconfigitem(
194 coreconfigitem(
195 b'cmdserver', b'log', default=None,
195 b'cmdserver', b'log', default=None,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'cmdserver', b'max-log-files', default=7,
198 b'cmdserver', b'max-log-files', default=7,
199 )
199 )
200 coreconfigitem(
200 coreconfigitem(
201 b'cmdserver', b'max-log-size', default=b'1 MB',
201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 )
202 )
203 coreconfigitem(
203 coreconfigitem(
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 )
205 )
206 coreconfigitem(
206 coreconfigitem(
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 )
208 )
209 coreconfigitem(
209 coreconfigitem(
210 b'cmdserver',
210 b'cmdserver',
211 b'track-log',
211 b'track-log',
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 )
213 )
214 coreconfigitem(
214 coreconfigitem(
215 b'color', b'.*', default=None, generic=True,
215 b'color', b'.*', default=None, generic=True,
216 )
216 )
217 coreconfigitem(
217 coreconfigitem(
218 b'color', b'mode', default=b'auto',
218 b'color', b'mode', default=b'auto',
219 )
219 )
220 coreconfigitem(
220 coreconfigitem(
221 b'color', b'pagermode', default=dynamicdefault,
221 b'color', b'pagermode', default=dynamicdefault,
222 )
222 )
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 coreconfigitem(
224 coreconfigitem(
225 b'commands', b'commit.post-status', default=False,
225 b'commands', b'commit.post-status', default=False,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'commands', b'grep.all-files', default=False, experimental=True,
228 b'commands', b'grep.all-files', default=False, experimental=True,
229 )
229 )
230 coreconfigitem(
230 coreconfigitem(
231 b'commands', b'merge.require-rev', default=False,
231 b'commands', b'merge.require-rev', default=False,
232 )
232 )
233 coreconfigitem(
233 coreconfigitem(
234 b'commands', b'push.require-revs', default=False,
234 b'commands', b'push.require-revs', default=False,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'commands', b'resolve.confirm', default=False,
237 b'commands', b'resolve.confirm', default=False,
238 )
238 )
239 coreconfigitem(
239 coreconfigitem(
240 b'commands', b'resolve.explicit-re-merge', default=False,
240 b'commands', b'resolve.explicit-re-merge', default=False,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'commands', b'resolve.mark-check', default=b'none',
243 b'commands', b'resolve.mark-check', default=b'none',
244 )
244 )
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 coreconfigitem(
246 coreconfigitem(
247 b'commands', b'show.aliasprefix', default=list,
247 b'commands', b'show.aliasprefix', default=list,
248 )
248 )
249 coreconfigitem(
249 coreconfigitem(
250 b'commands', b'status.relative', default=False,
250 b'commands', b'status.relative', default=False,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'commands', b'status.skipstates', default=[], experimental=True,
253 b'commands', b'status.skipstates', default=[], experimental=True,
254 )
254 )
255 coreconfigitem(
255 coreconfigitem(
256 b'commands', b'status.terse', default=b'',
256 b'commands', b'status.terse', default=b'',
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'commands', b'status.verbose', default=False,
259 b'commands', b'status.verbose', default=False,
260 )
260 )
261 coreconfigitem(
261 coreconfigitem(
262 b'commands', b'update.check', default=None,
262 b'commands', b'update.check', default=None,
263 )
263 )
264 coreconfigitem(
264 coreconfigitem(
265 b'commands', b'update.requiredest', default=False,
265 b'commands', b'update.requiredest', default=False,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'committemplate', b'.*', default=None, generic=True,
268 b'committemplate', b'.*', default=None, generic=True,
269 )
269 )
270 coreconfigitem(
270 coreconfigitem(
271 b'convert', b'bzr.saverev', default=True,
271 b'convert', b'bzr.saverev', default=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'convert', b'cvsps.cache', default=True,
274 b'convert', b'cvsps.cache', default=True,
275 )
275 )
276 coreconfigitem(
276 coreconfigitem(
277 b'convert', b'cvsps.fuzz', default=60,
277 b'convert', b'cvsps.fuzz', default=60,
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'convert', b'cvsps.logencoding', default=None,
280 b'convert', b'cvsps.logencoding', default=None,
281 )
281 )
282 coreconfigitem(
282 coreconfigitem(
283 b'convert', b'cvsps.mergefrom', default=None,
283 b'convert', b'cvsps.mergefrom', default=None,
284 )
284 )
285 coreconfigitem(
285 coreconfigitem(
286 b'convert', b'cvsps.mergeto', default=None,
286 b'convert', b'cvsps.mergeto', default=None,
287 )
287 )
288 coreconfigitem(
288 coreconfigitem(
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 )
290 )
291 coreconfigitem(
291 coreconfigitem(
292 b'convert', b'git.extrakeys', default=list,
292 b'convert', b'git.extrakeys', default=list,
293 )
293 )
294 coreconfigitem(
294 coreconfigitem(
295 b'convert', b'git.findcopiesharder', default=False,
295 b'convert', b'git.findcopiesharder', default=False,
296 )
296 )
297 coreconfigitem(
297 coreconfigitem(
298 b'convert', b'git.remoteprefix', default=b'remote',
298 b'convert', b'git.remoteprefix', default=b'remote',
299 )
299 )
300 coreconfigitem(
300 coreconfigitem(
301 b'convert', b'git.renamelimit', default=400,
301 b'convert', b'git.renamelimit', default=400,
302 )
302 )
303 coreconfigitem(
303 coreconfigitem(
304 b'convert', b'git.saverev', default=True,
304 b'convert', b'git.saverev', default=True,
305 )
305 )
306 coreconfigitem(
306 coreconfigitem(
307 b'convert', b'git.similarity', default=50,
307 b'convert', b'git.similarity', default=50,
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'convert', b'git.skipsubmodules', default=False,
310 b'convert', b'git.skipsubmodules', default=False,
311 )
311 )
312 coreconfigitem(
312 coreconfigitem(
313 b'convert', b'hg.clonebranches', default=False,
313 b'convert', b'hg.clonebranches', default=False,
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'convert', b'hg.ignoreerrors', default=False,
316 b'convert', b'hg.ignoreerrors', default=False,
317 )
317 )
318 coreconfigitem(
318 coreconfigitem(
319 b'convert', b'hg.preserve-hash', default=False,
319 b'convert', b'hg.preserve-hash', default=False,
320 )
320 )
321 coreconfigitem(
321 coreconfigitem(
322 b'convert', b'hg.revs', default=None,
322 b'convert', b'hg.revs', default=None,
323 )
323 )
324 coreconfigitem(
324 coreconfigitem(
325 b'convert', b'hg.saverev', default=False,
325 b'convert', b'hg.saverev', default=False,
326 )
326 )
327 coreconfigitem(
327 coreconfigitem(
328 b'convert', b'hg.sourcename', default=None,
328 b'convert', b'hg.sourcename', default=None,
329 )
329 )
330 coreconfigitem(
330 coreconfigitem(
331 b'convert', b'hg.startrev', default=None,
331 b'convert', b'hg.startrev', default=None,
332 )
332 )
333 coreconfigitem(
333 coreconfigitem(
334 b'convert', b'hg.tagsbranch', default=b'default',
334 b'convert', b'hg.tagsbranch', default=b'default',
335 )
335 )
336 coreconfigitem(
336 coreconfigitem(
337 b'convert', b'hg.usebranchnames', default=True,
337 b'convert', b'hg.usebranchnames', default=True,
338 )
338 )
339 coreconfigitem(
339 coreconfigitem(
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'convert', b'localtimezone', default=False,
343 b'convert', b'localtimezone', default=False,
344 )
344 )
345 coreconfigitem(
345 coreconfigitem(
346 b'convert', b'p4.encoding', default=dynamicdefault,
346 b'convert', b'p4.encoding', default=dynamicdefault,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'convert', b'p4.startrev', default=0,
349 b'convert', b'p4.startrev', default=0,
350 )
350 )
351 coreconfigitem(
351 coreconfigitem(
352 b'convert', b'skiptags', default=False,
352 b'convert', b'skiptags', default=False,
353 )
353 )
354 coreconfigitem(
354 coreconfigitem(
355 b'convert', b'svn.debugsvnlog', default=True,
355 b'convert', b'svn.debugsvnlog', default=True,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'convert', b'svn.trunk', default=None,
358 b'convert', b'svn.trunk', default=None,
359 )
359 )
360 coreconfigitem(
360 coreconfigitem(
361 b'convert', b'svn.tags', default=None,
361 b'convert', b'svn.tags', default=None,
362 )
362 )
363 coreconfigitem(
363 coreconfigitem(
364 b'convert', b'svn.branches', default=None,
364 b'convert', b'svn.branches', default=None,
365 )
365 )
366 coreconfigitem(
366 coreconfigitem(
367 b'convert', b'svn.startrev', default=0,
367 b'convert', b'svn.startrev', default=0,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'debug', b'dirstate.delaywrite', default=0,
370 b'debug', b'dirstate.delaywrite', default=0,
371 )
371 )
372 coreconfigitem(
372 coreconfigitem(
373 b'defaults', b'.*', default=None, generic=True,
373 b'defaults', b'.*', default=None, generic=True,
374 )
374 )
375 coreconfigitem(
375 coreconfigitem(
376 b'devel', b'all-warnings', default=False,
376 b'devel', b'all-warnings', default=False,
377 )
377 )
378 coreconfigitem(
378 coreconfigitem(
379 b'devel', b'bundle2.debug', default=False,
379 b'devel', b'bundle2.debug', default=False,
380 )
380 )
381 coreconfigitem(
381 coreconfigitem(
382 b'devel', b'bundle.delta', default=b'',
382 b'devel', b'bundle.delta', default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'devel', b'cache-vfs', default=None,
385 b'devel', b'cache-vfs', default=None,
386 )
386 )
387 coreconfigitem(
387 coreconfigitem(
388 b'devel', b'check-locks', default=False,
388 b'devel', b'check-locks', default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'devel', b'check-relroot', default=False,
391 b'devel', b'check-relroot', default=False,
392 )
392 )
393 coreconfigitem(
393 coreconfigitem(
394 b'devel', b'default-date', default=None,
394 b'devel', b'default-date', default=None,
395 )
395 )
396 coreconfigitem(
396 coreconfigitem(
397 b'devel', b'deprec-warn', default=False,
397 b'devel', b'deprec-warn', default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'devel', b'disableloaddefaultcerts', default=False,
400 b'devel', b'disableloaddefaultcerts', default=False,
401 )
401 )
402 coreconfigitem(
402 coreconfigitem(
403 b'devel', b'warn-empty-changegroup', default=False,
403 b'devel', b'warn-empty-changegroup', default=False,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'devel', b'legacy.exchange', default=list,
406 b'devel', b'legacy.exchange', default=list,
407 )
407 )
408 # TODO before getting `persistent-nodemap` out of experimental
408 # TODO before getting `persistent-nodemap` out of experimental
409 #
409 #
410 # * code/tests around aborted transaction
410 # * code/tests around aborted transaction
411 # * code/tests around pending data for hooks
412 # * regenerate a new nodemap when the unused/total ration is to high
411 # * regenerate a new nodemap when the unused/total ration is to high
413 # * decide for a "status" of the persistent nodemap and associated location
412 # * decide for a "status" of the persistent nodemap and associated location
414 # - part of the store next the revlog itself (new requirements)
413 # - part of the store next the revlog itself (new requirements)
415 # - part of the cache directory
414 # - part of the cache directory
416 # - part of an `index` directory
415 # - part of an `index` directory
417 # (https://www.mercurial-scm.org/wiki/ComputedIndexPlan)
416 # (https://www.mercurial-scm.org/wiki/ComputedIndexPlan)
418 # * do we want to use this for more than just changelog? if so we need:
417 # * do we want to use this for more than just changelog? if so we need:
419 # - simpler "pending" logic for them
418 # - simpler "pending" logic for them
420 # - double check the memory story (we dont want to keep all revlog in memory)
419 # - double check the memory story (we dont want to keep all revlog in memory)
421 # - think about the naming scheme if we are in "cache"
420 # - think about the naming scheme if we are in "cache"
422 # * increment the version format to "1" and freeze it.
421 # * increment the version format to "1" and freeze it.
423 coreconfigitem(
422 coreconfigitem(
424 b'devel', b'persistent-nodemap', default=False,
423 b'devel', b'persistent-nodemap', default=False,
425 )
424 )
426 coreconfigitem(
425 coreconfigitem(
427 b'devel', b'servercafile', default=b'',
426 b'devel', b'servercafile', default=b'',
428 )
427 )
429 coreconfigitem(
428 coreconfigitem(
430 b'devel', b'serverexactprotocol', default=b'',
429 b'devel', b'serverexactprotocol', default=b'',
431 )
430 )
432 coreconfigitem(
431 coreconfigitem(
433 b'devel', b'serverrequirecert', default=False,
432 b'devel', b'serverrequirecert', default=False,
434 )
433 )
435 coreconfigitem(
434 coreconfigitem(
436 b'devel', b'strip-obsmarkers', default=True,
435 b'devel', b'strip-obsmarkers', default=True,
437 )
436 )
438 coreconfigitem(
437 coreconfigitem(
439 b'devel', b'warn-config', default=None,
438 b'devel', b'warn-config', default=None,
440 )
439 )
441 coreconfigitem(
440 coreconfigitem(
442 b'devel', b'warn-config-default', default=None,
441 b'devel', b'warn-config-default', default=None,
443 )
442 )
444 coreconfigitem(
443 coreconfigitem(
445 b'devel', b'user.obsmarker', default=None,
444 b'devel', b'user.obsmarker', default=None,
446 )
445 )
447 coreconfigitem(
446 coreconfigitem(
448 b'devel', b'warn-config-unknown', default=None,
447 b'devel', b'warn-config-unknown', default=None,
449 )
448 )
450 coreconfigitem(
449 coreconfigitem(
451 b'devel', b'debug.copies', default=False,
450 b'devel', b'debug.copies', default=False,
452 )
451 )
453 coreconfigitem(
452 coreconfigitem(
454 b'devel', b'debug.extensions', default=False,
453 b'devel', b'debug.extensions', default=False,
455 )
454 )
456 coreconfigitem(
455 coreconfigitem(
457 b'devel', b'debug.repo-filters', default=False,
456 b'devel', b'debug.repo-filters', default=False,
458 )
457 )
459 coreconfigitem(
458 coreconfigitem(
460 b'devel', b'debug.peer-request', default=False,
459 b'devel', b'debug.peer-request', default=False,
461 )
460 )
462 coreconfigitem(
461 coreconfigitem(
463 b'devel', b'discovery.randomize', default=True,
462 b'devel', b'discovery.randomize', default=True,
464 )
463 )
465 _registerdiffopts(section=b'diff')
464 _registerdiffopts(section=b'diff')
466 coreconfigitem(
465 coreconfigitem(
467 b'email', b'bcc', default=None,
466 b'email', b'bcc', default=None,
468 )
467 )
469 coreconfigitem(
468 coreconfigitem(
470 b'email', b'cc', default=None,
469 b'email', b'cc', default=None,
471 )
470 )
472 coreconfigitem(
471 coreconfigitem(
473 b'email', b'charsets', default=list,
472 b'email', b'charsets', default=list,
474 )
473 )
475 coreconfigitem(
474 coreconfigitem(
476 b'email', b'from', default=None,
475 b'email', b'from', default=None,
477 )
476 )
478 coreconfigitem(
477 coreconfigitem(
479 b'email', b'method', default=b'smtp',
478 b'email', b'method', default=b'smtp',
480 )
479 )
481 coreconfigitem(
480 coreconfigitem(
482 b'email', b'reply-to', default=None,
481 b'email', b'reply-to', default=None,
483 )
482 )
484 coreconfigitem(
483 coreconfigitem(
485 b'email', b'to', default=None,
484 b'email', b'to', default=None,
486 )
485 )
487 coreconfigitem(
486 coreconfigitem(
488 b'experimental', b'archivemetatemplate', default=dynamicdefault,
487 b'experimental', b'archivemetatemplate', default=dynamicdefault,
489 )
488 )
490 coreconfigitem(
489 coreconfigitem(
491 b'experimental', b'auto-publish', default=b'publish',
490 b'experimental', b'auto-publish', default=b'publish',
492 )
491 )
493 coreconfigitem(
492 coreconfigitem(
494 b'experimental', b'bundle-phases', default=False,
493 b'experimental', b'bundle-phases', default=False,
495 )
494 )
496 coreconfigitem(
495 coreconfigitem(
497 b'experimental', b'bundle2-advertise', default=True,
496 b'experimental', b'bundle2-advertise', default=True,
498 )
497 )
499 coreconfigitem(
498 coreconfigitem(
500 b'experimental', b'bundle2-output-capture', default=False,
499 b'experimental', b'bundle2-output-capture', default=False,
501 )
500 )
502 coreconfigitem(
501 coreconfigitem(
503 b'experimental', b'bundle2.pushback', default=False,
502 b'experimental', b'bundle2.pushback', default=False,
504 )
503 )
505 coreconfigitem(
504 coreconfigitem(
506 b'experimental', b'bundle2lazylocking', default=False,
505 b'experimental', b'bundle2lazylocking', default=False,
507 )
506 )
508 coreconfigitem(
507 coreconfigitem(
509 b'experimental', b'bundlecomplevel', default=None,
508 b'experimental', b'bundlecomplevel', default=None,
510 )
509 )
511 coreconfigitem(
510 coreconfigitem(
512 b'experimental', b'bundlecomplevel.bzip2', default=None,
511 b'experimental', b'bundlecomplevel.bzip2', default=None,
513 )
512 )
514 coreconfigitem(
513 coreconfigitem(
515 b'experimental', b'bundlecomplevel.gzip', default=None,
514 b'experimental', b'bundlecomplevel.gzip', default=None,
516 )
515 )
517 coreconfigitem(
516 coreconfigitem(
518 b'experimental', b'bundlecomplevel.none', default=None,
517 b'experimental', b'bundlecomplevel.none', default=None,
519 )
518 )
520 coreconfigitem(
519 coreconfigitem(
521 b'experimental', b'bundlecomplevel.zstd', default=None,
520 b'experimental', b'bundlecomplevel.zstd', default=None,
522 )
521 )
523 coreconfigitem(
522 coreconfigitem(
524 b'experimental', b'changegroup3', default=False,
523 b'experimental', b'changegroup3', default=False,
525 )
524 )
526 coreconfigitem(
525 coreconfigitem(
527 b'experimental', b'cleanup-as-archived', default=False,
526 b'experimental', b'cleanup-as-archived', default=False,
528 )
527 )
529 coreconfigitem(
528 coreconfigitem(
530 b'experimental', b'clientcompressionengines', default=list,
529 b'experimental', b'clientcompressionengines', default=list,
531 )
530 )
532 coreconfigitem(
531 coreconfigitem(
533 b'experimental', b'copytrace', default=b'on',
532 b'experimental', b'copytrace', default=b'on',
534 )
533 )
535 coreconfigitem(
534 coreconfigitem(
536 b'experimental', b'copytrace.movecandidateslimit', default=100,
535 b'experimental', b'copytrace.movecandidateslimit', default=100,
537 )
536 )
538 coreconfigitem(
537 coreconfigitem(
539 b'experimental', b'copytrace.sourcecommitlimit', default=100,
538 b'experimental', b'copytrace.sourcecommitlimit', default=100,
540 )
539 )
541 coreconfigitem(
540 coreconfigitem(
542 b'experimental', b'copies.read-from', default=b"filelog-only",
541 b'experimental', b'copies.read-from', default=b"filelog-only",
543 )
542 )
544 coreconfigitem(
543 coreconfigitem(
545 b'experimental', b'copies.write-to', default=b'filelog-only',
544 b'experimental', b'copies.write-to', default=b'filelog-only',
546 )
545 )
547 coreconfigitem(
546 coreconfigitem(
548 b'experimental', b'crecordtest', default=None,
547 b'experimental', b'crecordtest', default=None,
549 )
548 )
550 coreconfigitem(
549 coreconfigitem(
551 b'experimental', b'directaccess', default=False,
550 b'experimental', b'directaccess', default=False,
552 )
551 )
553 coreconfigitem(
552 coreconfigitem(
554 b'experimental', b'directaccess.revnums', default=False,
553 b'experimental', b'directaccess.revnums', default=False,
555 )
554 )
556 coreconfigitem(
555 coreconfigitem(
557 b'experimental', b'editortmpinhg', default=False,
556 b'experimental', b'editortmpinhg', default=False,
558 )
557 )
559 coreconfigitem(
558 coreconfigitem(
560 b'experimental', b'evolution', default=list,
559 b'experimental', b'evolution', default=list,
561 )
560 )
562 coreconfigitem(
561 coreconfigitem(
563 b'experimental',
562 b'experimental',
564 b'evolution.allowdivergence',
563 b'evolution.allowdivergence',
565 default=False,
564 default=False,
566 alias=[(b'experimental', b'allowdivergence')],
565 alias=[(b'experimental', b'allowdivergence')],
567 )
566 )
568 coreconfigitem(
567 coreconfigitem(
569 b'experimental', b'evolution.allowunstable', default=None,
568 b'experimental', b'evolution.allowunstable', default=None,
570 )
569 )
571 coreconfigitem(
570 coreconfigitem(
572 b'experimental', b'evolution.createmarkers', default=None,
571 b'experimental', b'evolution.createmarkers', default=None,
573 )
572 )
574 coreconfigitem(
573 coreconfigitem(
575 b'experimental',
574 b'experimental',
576 b'evolution.effect-flags',
575 b'evolution.effect-flags',
577 default=True,
576 default=True,
578 alias=[(b'experimental', b'effect-flags')],
577 alias=[(b'experimental', b'effect-flags')],
579 )
578 )
580 coreconfigitem(
579 coreconfigitem(
581 b'experimental', b'evolution.exchange', default=None,
580 b'experimental', b'evolution.exchange', default=None,
582 )
581 )
583 coreconfigitem(
582 coreconfigitem(
584 b'experimental', b'evolution.bundle-obsmarker', default=False,
583 b'experimental', b'evolution.bundle-obsmarker', default=False,
585 )
584 )
586 coreconfigitem(
585 coreconfigitem(
587 b'experimental', b'log.topo', default=False,
586 b'experimental', b'log.topo', default=False,
588 )
587 )
589 coreconfigitem(
588 coreconfigitem(
590 b'experimental', b'evolution.report-instabilities', default=True,
589 b'experimental', b'evolution.report-instabilities', default=True,
591 )
590 )
592 coreconfigitem(
591 coreconfigitem(
593 b'experimental', b'evolution.track-operation', default=True,
592 b'experimental', b'evolution.track-operation', default=True,
594 )
593 )
595 # repo-level config to exclude a revset visibility
594 # repo-level config to exclude a revset visibility
596 #
595 #
597 # The target use case is to use `share` to expose different subset of the same
596 # The target use case is to use `share` to expose different subset of the same
598 # repository, especially server side. See also `server.view`.
597 # repository, especially server side. See also `server.view`.
599 coreconfigitem(
598 coreconfigitem(
600 b'experimental', b'extra-filter-revs', default=None,
599 b'experimental', b'extra-filter-revs', default=None,
601 )
600 )
602 coreconfigitem(
601 coreconfigitem(
603 b'experimental', b'maxdeltachainspan', default=-1,
602 b'experimental', b'maxdeltachainspan', default=-1,
604 )
603 )
605 coreconfigitem(
604 coreconfigitem(
606 b'experimental', b'mergetempdirprefix', default=None,
605 b'experimental', b'mergetempdirprefix', default=None,
607 )
606 )
608 coreconfigitem(
607 coreconfigitem(
609 b'experimental', b'mmapindexthreshold', default=None,
608 b'experimental', b'mmapindexthreshold', default=None,
610 )
609 )
611 coreconfigitem(
610 coreconfigitem(
612 b'experimental', b'narrow', default=False,
611 b'experimental', b'narrow', default=False,
613 )
612 )
614 coreconfigitem(
613 coreconfigitem(
615 b'experimental', b'nonnormalparanoidcheck', default=False,
614 b'experimental', b'nonnormalparanoidcheck', default=False,
616 )
615 )
617 coreconfigitem(
616 coreconfigitem(
618 b'experimental', b'exportableenviron', default=list,
617 b'experimental', b'exportableenviron', default=list,
619 )
618 )
620 coreconfigitem(
619 coreconfigitem(
621 b'experimental', b'extendedheader.index', default=None,
620 b'experimental', b'extendedheader.index', default=None,
622 )
621 )
623 coreconfigitem(
622 coreconfigitem(
624 b'experimental', b'extendedheader.similarity', default=False,
623 b'experimental', b'extendedheader.similarity', default=False,
625 )
624 )
626 coreconfigitem(
625 coreconfigitem(
627 b'experimental', b'graphshorten', default=False,
626 b'experimental', b'graphshorten', default=False,
628 )
627 )
629 coreconfigitem(
628 coreconfigitem(
630 b'experimental', b'graphstyle.parent', default=dynamicdefault,
629 b'experimental', b'graphstyle.parent', default=dynamicdefault,
631 )
630 )
632 coreconfigitem(
631 coreconfigitem(
633 b'experimental', b'graphstyle.missing', default=dynamicdefault,
632 b'experimental', b'graphstyle.missing', default=dynamicdefault,
634 )
633 )
635 coreconfigitem(
634 coreconfigitem(
636 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
635 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
637 )
636 )
638 coreconfigitem(
637 coreconfigitem(
639 b'experimental', b'hook-track-tags', default=False,
638 b'experimental', b'hook-track-tags', default=False,
640 )
639 )
641 coreconfigitem(
640 coreconfigitem(
642 b'experimental', b'httppeer.advertise-v2', default=False,
641 b'experimental', b'httppeer.advertise-v2', default=False,
643 )
642 )
644 coreconfigitem(
643 coreconfigitem(
645 b'experimental', b'httppeer.v2-encoder-order', default=None,
644 b'experimental', b'httppeer.v2-encoder-order', default=None,
646 )
645 )
647 coreconfigitem(
646 coreconfigitem(
648 b'experimental', b'httppostargs', default=False,
647 b'experimental', b'httppostargs', default=False,
649 )
648 )
650 coreconfigitem(
649 coreconfigitem(
651 b'experimental', b'mergedriver', default=None,
650 b'experimental', b'mergedriver', default=None,
652 )
651 )
653 coreconfigitem(b'experimental', b'nointerrupt', default=False)
652 coreconfigitem(b'experimental', b'nointerrupt', default=False)
654 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
653 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
655
654
656 coreconfigitem(
655 coreconfigitem(
657 b'experimental', b'obsmarkers-exchange-debug', default=False,
656 b'experimental', b'obsmarkers-exchange-debug', default=False,
658 )
657 )
659 coreconfigitem(
658 coreconfigitem(
660 b'experimental', b'remotenames', default=False,
659 b'experimental', b'remotenames', default=False,
661 )
660 )
662 coreconfigitem(
661 coreconfigitem(
663 b'experimental', b'removeemptydirs', default=True,
662 b'experimental', b'removeemptydirs', default=True,
664 )
663 )
665 coreconfigitem(
664 coreconfigitem(
666 b'experimental', b'revert.interactive.select-to-keep', default=False,
665 b'experimental', b'revert.interactive.select-to-keep', default=False,
667 )
666 )
668 coreconfigitem(
667 coreconfigitem(
669 b'experimental', b'revisions.prefixhexnode', default=False,
668 b'experimental', b'revisions.prefixhexnode', default=False,
670 )
669 )
671 coreconfigitem(
670 coreconfigitem(
672 b'experimental', b'revlogv2', default=None,
671 b'experimental', b'revlogv2', default=None,
673 )
672 )
674 coreconfigitem(
673 coreconfigitem(
675 b'experimental', b'revisions.disambiguatewithin', default=None,
674 b'experimental', b'revisions.disambiguatewithin', default=None,
676 )
675 )
677 coreconfigitem(
676 coreconfigitem(
678 b'experimental', b'rust.index', default=False,
677 b'experimental', b'rust.index', default=False,
679 )
678 )
680 coreconfigitem(
679 coreconfigitem(
681 b'experimental', b'exp-persistent-nodemap', default=False,
680 b'experimental', b'exp-persistent-nodemap', default=False,
682 )
681 )
683 coreconfigitem(
682 coreconfigitem(
684 b'experimental', b'exp-persistent-nodemap.mmap', default=True,
683 b'experimental', b'exp-persistent-nodemap.mmap', default=True,
685 )
684 )
686 coreconfigitem(
685 coreconfigitem(
687 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
686 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
688 )
687 )
689 coreconfigitem(
688 coreconfigitem(
690 b'experimental',
689 b'experimental',
691 b'server.manifestdata.recommended-batch-size',
690 b'server.manifestdata.recommended-batch-size',
692 default=100000,
691 default=100000,
693 )
692 )
694 coreconfigitem(
693 coreconfigitem(
695 b'experimental', b'server.stream-narrow-clones', default=False,
694 b'experimental', b'server.stream-narrow-clones', default=False,
696 )
695 )
697 coreconfigitem(
696 coreconfigitem(
698 b'experimental', b'single-head-per-branch', default=False,
697 b'experimental', b'single-head-per-branch', default=False,
699 )
698 )
700 coreconfigitem(
699 coreconfigitem(
701 b'experimental',
700 b'experimental',
702 b'single-head-per-branch:account-closed-heads',
701 b'single-head-per-branch:account-closed-heads',
703 default=False,
702 default=False,
704 )
703 )
705 coreconfigitem(
704 coreconfigitem(
706 b'experimental', b'sshserver.support-v2', default=False,
705 b'experimental', b'sshserver.support-v2', default=False,
707 )
706 )
708 coreconfigitem(
707 coreconfigitem(
709 b'experimental', b'sparse-read', default=False,
708 b'experimental', b'sparse-read', default=False,
710 )
709 )
711 coreconfigitem(
710 coreconfigitem(
712 b'experimental', b'sparse-read.density-threshold', default=0.50,
711 b'experimental', b'sparse-read.density-threshold', default=0.50,
713 )
712 )
714 coreconfigitem(
713 coreconfigitem(
715 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
714 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
716 )
715 )
717 coreconfigitem(
716 coreconfigitem(
718 b'experimental', b'treemanifest', default=False,
717 b'experimental', b'treemanifest', default=False,
719 )
718 )
720 coreconfigitem(
719 coreconfigitem(
721 b'experimental', b'update.atomic-file', default=False,
720 b'experimental', b'update.atomic-file', default=False,
722 )
721 )
723 coreconfigitem(
722 coreconfigitem(
724 b'experimental', b'sshpeer.advertise-v2', default=False,
723 b'experimental', b'sshpeer.advertise-v2', default=False,
725 )
724 )
726 coreconfigitem(
725 coreconfigitem(
727 b'experimental', b'web.apiserver', default=False,
726 b'experimental', b'web.apiserver', default=False,
728 )
727 )
729 coreconfigitem(
728 coreconfigitem(
730 b'experimental', b'web.api.http-v2', default=False,
729 b'experimental', b'web.api.http-v2', default=False,
731 )
730 )
732 coreconfigitem(
731 coreconfigitem(
733 b'experimental', b'web.api.debugreflect', default=False,
732 b'experimental', b'web.api.debugreflect', default=False,
734 )
733 )
735 coreconfigitem(
734 coreconfigitem(
736 b'experimental', b'worker.wdir-get-thread-safe', default=False,
735 b'experimental', b'worker.wdir-get-thread-safe', default=False,
737 )
736 )
738 coreconfigitem(
737 coreconfigitem(
739 b'experimental', b'worker.repository-upgrade', default=False,
738 b'experimental', b'worker.repository-upgrade', default=False,
740 )
739 )
741 coreconfigitem(
740 coreconfigitem(
742 b'experimental', b'xdiff', default=False,
741 b'experimental', b'xdiff', default=False,
743 )
742 )
744 coreconfigitem(
743 coreconfigitem(
745 b'extensions', b'.*', default=None, generic=True,
744 b'extensions', b'.*', default=None, generic=True,
746 )
745 )
747 coreconfigitem(
746 coreconfigitem(
748 b'extdata', b'.*', default=None, generic=True,
747 b'extdata', b'.*', default=None, generic=True,
749 )
748 )
750 coreconfigitem(
749 coreconfigitem(
751 b'format', b'bookmarks-in-store', default=False,
750 b'format', b'bookmarks-in-store', default=False,
752 )
751 )
753 coreconfigitem(
752 coreconfigitem(
754 b'format', b'chunkcachesize', default=None, experimental=True,
753 b'format', b'chunkcachesize', default=None, experimental=True,
755 )
754 )
756 coreconfigitem(
755 coreconfigitem(
757 b'format', b'dotencode', default=True,
756 b'format', b'dotencode', default=True,
758 )
757 )
759 coreconfigitem(
758 coreconfigitem(
760 b'format', b'generaldelta', default=False, experimental=True,
759 b'format', b'generaldelta', default=False, experimental=True,
761 )
760 )
762 coreconfigitem(
761 coreconfigitem(
763 b'format', b'manifestcachesize', default=None, experimental=True,
762 b'format', b'manifestcachesize', default=None, experimental=True,
764 )
763 )
765 coreconfigitem(
764 coreconfigitem(
766 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
765 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
767 )
766 )
768 coreconfigitem(
767 coreconfigitem(
769 b'format', b'obsstore-version', default=None,
768 b'format', b'obsstore-version', default=None,
770 )
769 )
771 coreconfigitem(
770 coreconfigitem(
772 b'format', b'sparse-revlog', default=True,
771 b'format', b'sparse-revlog', default=True,
773 )
772 )
774 coreconfigitem(
773 coreconfigitem(
775 b'format',
774 b'format',
776 b'revlog-compression',
775 b'revlog-compression',
777 default=lambda: [b'zlib'],
776 default=lambda: [b'zlib'],
778 alias=[(b'experimental', b'format.compression')],
777 alias=[(b'experimental', b'format.compression')],
779 )
778 )
780 coreconfigitem(
779 coreconfigitem(
781 b'format', b'usefncache', default=True,
780 b'format', b'usefncache', default=True,
782 )
781 )
783 coreconfigitem(
782 coreconfigitem(
784 b'format', b'usegeneraldelta', default=True,
783 b'format', b'usegeneraldelta', default=True,
785 )
784 )
786 coreconfigitem(
785 coreconfigitem(
787 b'format', b'usestore', default=True,
786 b'format', b'usestore', default=True,
788 )
787 )
789 coreconfigitem(
788 coreconfigitem(
790 b'format',
789 b'format',
791 b'exp-use-copies-side-data-changeset',
790 b'exp-use-copies-side-data-changeset',
792 default=False,
791 default=False,
793 experimental=True,
792 experimental=True,
794 )
793 )
795 coreconfigitem(
794 coreconfigitem(
796 b'format', b'exp-use-side-data', default=False, experimental=True,
795 b'format', b'exp-use-side-data', default=False, experimental=True,
797 )
796 )
798 coreconfigitem(
797 coreconfigitem(
799 b'format', b'internal-phase', default=False, experimental=True,
798 b'format', b'internal-phase', default=False, experimental=True,
800 )
799 )
801 coreconfigitem(
800 coreconfigitem(
802 b'fsmonitor', b'warn_when_unused', default=True,
801 b'fsmonitor', b'warn_when_unused', default=True,
803 )
802 )
804 coreconfigitem(
803 coreconfigitem(
805 b'fsmonitor', b'warn_update_file_count', default=50000,
804 b'fsmonitor', b'warn_update_file_count', default=50000,
806 )
805 )
807 coreconfigitem(
806 coreconfigitem(
808 b'help', br'hidden-command\..*', default=False, generic=True,
807 b'help', br'hidden-command\..*', default=False, generic=True,
809 )
808 )
810 coreconfigitem(
809 coreconfigitem(
811 b'help', br'hidden-topic\..*', default=False, generic=True,
810 b'help', br'hidden-topic\..*', default=False, generic=True,
812 )
811 )
813 coreconfigitem(
812 coreconfigitem(
814 b'hooks', b'.*', default=dynamicdefault, generic=True,
813 b'hooks', b'.*', default=dynamicdefault, generic=True,
815 )
814 )
816 coreconfigitem(
815 coreconfigitem(
817 b'hgweb-paths', b'.*', default=list, generic=True,
816 b'hgweb-paths', b'.*', default=list, generic=True,
818 )
817 )
819 coreconfigitem(
818 coreconfigitem(
820 b'hostfingerprints', b'.*', default=list, generic=True,
819 b'hostfingerprints', b'.*', default=list, generic=True,
821 )
820 )
822 coreconfigitem(
821 coreconfigitem(
823 b'hostsecurity', b'ciphers', default=None,
822 b'hostsecurity', b'ciphers', default=None,
824 )
823 )
825 coreconfigitem(
824 coreconfigitem(
826 b'hostsecurity', b'disabletls10warning', default=False,
825 b'hostsecurity', b'disabletls10warning', default=False,
827 )
826 )
828 coreconfigitem(
827 coreconfigitem(
829 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
828 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
830 )
829 )
831 coreconfigitem(
830 coreconfigitem(
832 b'hostsecurity',
831 b'hostsecurity',
833 b'.*:minimumprotocol$',
832 b'.*:minimumprotocol$',
834 default=dynamicdefault,
833 default=dynamicdefault,
835 generic=True,
834 generic=True,
836 )
835 )
837 coreconfigitem(
836 coreconfigitem(
838 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
837 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
839 )
838 )
840 coreconfigitem(
839 coreconfigitem(
841 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
840 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
842 )
841 )
843 coreconfigitem(
842 coreconfigitem(
844 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
843 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
845 )
844 )
846
845
847 coreconfigitem(
846 coreconfigitem(
848 b'http_proxy', b'always', default=False,
847 b'http_proxy', b'always', default=False,
849 )
848 )
850 coreconfigitem(
849 coreconfigitem(
851 b'http_proxy', b'host', default=None,
850 b'http_proxy', b'host', default=None,
852 )
851 )
853 coreconfigitem(
852 coreconfigitem(
854 b'http_proxy', b'no', default=list,
853 b'http_proxy', b'no', default=list,
855 )
854 )
856 coreconfigitem(
855 coreconfigitem(
857 b'http_proxy', b'passwd', default=None,
856 b'http_proxy', b'passwd', default=None,
858 )
857 )
859 coreconfigitem(
858 coreconfigitem(
860 b'http_proxy', b'user', default=None,
859 b'http_proxy', b'user', default=None,
861 )
860 )
862
861
863 coreconfigitem(
862 coreconfigitem(
864 b'http', b'timeout', default=None,
863 b'http', b'timeout', default=None,
865 )
864 )
866
865
867 coreconfigitem(
866 coreconfigitem(
868 b'logtoprocess', b'commandexception', default=None,
867 b'logtoprocess', b'commandexception', default=None,
869 )
868 )
870 coreconfigitem(
869 coreconfigitem(
871 b'logtoprocess', b'commandfinish', default=None,
870 b'logtoprocess', b'commandfinish', default=None,
872 )
871 )
873 coreconfigitem(
872 coreconfigitem(
874 b'logtoprocess', b'command', default=None,
873 b'logtoprocess', b'command', default=None,
875 )
874 )
876 coreconfigitem(
875 coreconfigitem(
877 b'logtoprocess', b'develwarn', default=None,
876 b'logtoprocess', b'develwarn', default=None,
878 )
877 )
879 coreconfigitem(
878 coreconfigitem(
880 b'logtoprocess', b'uiblocked', default=None,
879 b'logtoprocess', b'uiblocked', default=None,
881 )
880 )
882 coreconfigitem(
881 coreconfigitem(
883 b'merge', b'checkunknown', default=b'abort',
882 b'merge', b'checkunknown', default=b'abort',
884 )
883 )
885 coreconfigitem(
884 coreconfigitem(
886 b'merge', b'checkignored', default=b'abort',
885 b'merge', b'checkignored', default=b'abort',
887 )
886 )
888 coreconfigitem(
887 coreconfigitem(
889 b'experimental', b'merge.checkpathconflicts', default=False,
888 b'experimental', b'merge.checkpathconflicts', default=False,
890 )
889 )
891 coreconfigitem(
890 coreconfigitem(
892 b'merge', b'followcopies', default=True,
891 b'merge', b'followcopies', default=True,
893 )
892 )
894 coreconfigitem(
893 coreconfigitem(
895 b'merge', b'on-failure', default=b'continue',
894 b'merge', b'on-failure', default=b'continue',
896 )
895 )
897 coreconfigitem(
896 coreconfigitem(
898 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
897 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
899 )
898 )
900 coreconfigitem(
899 coreconfigitem(
901 b'merge', b'strict-capability-check', default=False,
900 b'merge', b'strict-capability-check', default=False,
902 )
901 )
903 coreconfigitem(
902 coreconfigitem(
904 b'merge-tools', b'.*', default=None, generic=True,
903 b'merge-tools', b'.*', default=None, generic=True,
905 )
904 )
906 coreconfigitem(
905 coreconfigitem(
907 b'merge-tools',
906 b'merge-tools',
908 br'.*\.args$',
907 br'.*\.args$',
909 default=b"$local $base $other",
908 default=b"$local $base $other",
910 generic=True,
909 generic=True,
911 priority=-1,
910 priority=-1,
912 )
911 )
913 coreconfigitem(
912 coreconfigitem(
914 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
913 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
915 )
914 )
916 coreconfigitem(
915 coreconfigitem(
917 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
916 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
918 )
917 )
919 coreconfigitem(
918 coreconfigitem(
920 b'merge-tools',
919 b'merge-tools',
921 br'.*\.checkchanged$',
920 br'.*\.checkchanged$',
922 default=False,
921 default=False,
923 generic=True,
922 generic=True,
924 priority=-1,
923 priority=-1,
925 )
924 )
926 coreconfigitem(
925 coreconfigitem(
927 b'merge-tools',
926 b'merge-tools',
928 br'.*\.executable$',
927 br'.*\.executable$',
929 default=dynamicdefault,
928 default=dynamicdefault,
930 generic=True,
929 generic=True,
931 priority=-1,
930 priority=-1,
932 )
931 )
933 coreconfigitem(
932 coreconfigitem(
934 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
933 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
935 )
934 )
936 coreconfigitem(
935 coreconfigitem(
937 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
936 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
938 )
937 )
939 coreconfigitem(
938 coreconfigitem(
940 b'merge-tools',
939 b'merge-tools',
941 br'.*\.mergemarkers$',
940 br'.*\.mergemarkers$',
942 default=b'basic',
941 default=b'basic',
943 generic=True,
942 generic=True,
944 priority=-1,
943 priority=-1,
945 )
944 )
946 coreconfigitem(
945 coreconfigitem(
947 b'merge-tools',
946 b'merge-tools',
948 br'.*\.mergemarkertemplate$',
947 br'.*\.mergemarkertemplate$',
949 default=dynamicdefault, # take from ui.mergemarkertemplate
948 default=dynamicdefault, # take from ui.mergemarkertemplate
950 generic=True,
949 generic=True,
951 priority=-1,
950 priority=-1,
952 )
951 )
953 coreconfigitem(
952 coreconfigitem(
954 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
953 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
955 )
954 )
956 coreconfigitem(
955 coreconfigitem(
957 b'merge-tools',
956 b'merge-tools',
958 br'.*\.premerge$',
957 br'.*\.premerge$',
959 default=dynamicdefault,
958 default=dynamicdefault,
960 generic=True,
959 generic=True,
961 priority=-1,
960 priority=-1,
962 )
961 )
963 coreconfigitem(
962 coreconfigitem(
964 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
963 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
965 )
964 )
966 coreconfigitem(
965 coreconfigitem(
967 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
966 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
968 )
967 )
969 coreconfigitem(
968 coreconfigitem(
970 b'pager', b'ignore', default=list,
969 b'pager', b'ignore', default=list,
971 )
970 )
972 coreconfigitem(
971 coreconfigitem(
973 b'pager', b'pager', default=dynamicdefault,
972 b'pager', b'pager', default=dynamicdefault,
974 )
973 )
975 coreconfigitem(
974 coreconfigitem(
976 b'patch', b'eol', default=b'strict',
975 b'patch', b'eol', default=b'strict',
977 )
976 )
978 coreconfigitem(
977 coreconfigitem(
979 b'patch', b'fuzz', default=2,
978 b'patch', b'fuzz', default=2,
980 )
979 )
981 coreconfigitem(
980 coreconfigitem(
982 b'paths', b'default', default=None,
981 b'paths', b'default', default=None,
983 )
982 )
984 coreconfigitem(
983 coreconfigitem(
985 b'paths', b'default-push', default=None,
984 b'paths', b'default-push', default=None,
986 )
985 )
987 coreconfigitem(
986 coreconfigitem(
988 b'paths', b'.*', default=None, generic=True,
987 b'paths', b'.*', default=None, generic=True,
989 )
988 )
990 coreconfigitem(
989 coreconfigitem(
991 b'phases', b'checksubrepos', default=b'follow',
990 b'phases', b'checksubrepos', default=b'follow',
992 )
991 )
993 coreconfigitem(
992 coreconfigitem(
994 b'phases', b'new-commit', default=b'draft',
993 b'phases', b'new-commit', default=b'draft',
995 )
994 )
996 coreconfigitem(
995 coreconfigitem(
997 b'phases', b'publish', default=True,
996 b'phases', b'publish', default=True,
998 )
997 )
999 coreconfigitem(
998 coreconfigitem(
1000 b'profiling', b'enabled', default=False,
999 b'profiling', b'enabled', default=False,
1001 )
1000 )
1002 coreconfigitem(
1001 coreconfigitem(
1003 b'profiling', b'format', default=b'text',
1002 b'profiling', b'format', default=b'text',
1004 )
1003 )
1005 coreconfigitem(
1004 coreconfigitem(
1006 b'profiling', b'freq', default=1000,
1005 b'profiling', b'freq', default=1000,
1007 )
1006 )
1008 coreconfigitem(
1007 coreconfigitem(
1009 b'profiling', b'limit', default=30,
1008 b'profiling', b'limit', default=30,
1010 )
1009 )
1011 coreconfigitem(
1010 coreconfigitem(
1012 b'profiling', b'nested', default=0,
1011 b'profiling', b'nested', default=0,
1013 )
1012 )
1014 coreconfigitem(
1013 coreconfigitem(
1015 b'profiling', b'output', default=None,
1014 b'profiling', b'output', default=None,
1016 )
1015 )
1017 coreconfigitem(
1016 coreconfigitem(
1018 b'profiling', b'showmax', default=0.999,
1017 b'profiling', b'showmax', default=0.999,
1019 )
1018 )
1020 coreconfigitem(
1019 coreconfigitem(
1021 b'profiling', b'showmin', default=dynamicdefault,
1020 b'profiling', b'showmin', default=dynamicdefault,
1022 )
1021 )
1023 coreconfigitem(
1022 coreconfigitem(
1024 b'profiling', b'showtime', default=True,
1023 b'profiling', b'showtime', default=True,
1025 )
1024 )
1026 coreconfigitem(
1025 coreconfigitem(
1027 b'profiling', b'sort', default=b'inlinetime',
1026 b'profiling', b'sort', default=b'inlinetime',
1028 )
1027 )
1029 coreconfigitem(
1028 coreconfigitem(
1030 b'profiling', b'statformat', default=b'hotpath',
1029 b'profiling', b'statformat', default=b'hotpath',
1031 )
1030 )
1032 coreconfigitem(
1031 coreconfigitem(
1033 b'profiling', b'time-track', default=dynamicdefault,
1032 b'profiling', b'time-track', default=dynamicdefault,
1034 )
1033 )
1035 coreconfigitem(
1034 coreconfigitem(
1036 b'profiling', b'type', default=b'stat',
1035 b'profiling', b'type', default=b'stat',
1037 )
1036 )
1038 coreconfigitem(
1037 coreconfigitem(
1039 b'progress', b'assume-tty', default=False,
1038 b'progress', b'assume-tty', default=False,
1040 )
1039 )
1041 coreconfigitem(
1040 coreconfigitem(
1042 b'progress', b'changedelay', default=1,
1041 b'progress', b'changedelay', default=1,
1043 )
1042 )
1044 coreconfigitem(
1043 coreconfigitem(
1045 b'progress', b'clear-complete', default=True,
1044 b'progress', b'clear-complete', default=True,
1046 )
1045 )
1047 coreconfigitem(
1046 coreconfigitem(
1048 b'progress', b'debug', default=False,
1047 b'progress', b'debug', default=False,
1049 )
1048 )
1050 coreconfigitem(
1049 coreconfigitem(
1051 b'progress', b'delay', default=3,
1050 b'progress', b'delay', default=3,
1052 )
1051 )
1053 coreconfigitem(
1052 coreconfigitem(
1054 b'progress', b'disable', default=False,
1053 b'progress', b'disable', default=False,
1055 )
1054 )
1056 coreconfigitem(
1055 coreconfigitem(
1057 b'progress', b'estimateinterval', default=60.0,
1056 b'progress', b'estimateinterval', default=60.0,
1058 )
1057 )
1059 coreconfigitem(
1058 coreconfigitem(
1060 b'progress',
1059 b'progress',
1061 b'format',
1060 b'format',
1062 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1061 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1063 )
1062 )
1064 coreconfigitem(
1063 coreconfigitem(
1065 b'progress', b'refresh', default=0.1,
1064 b'progress', b'refresh', default=0.1,
1066 )
1065 )
1067 coreconfigitem(
1066 coreconfigitem(
1068 b'progress', b'width', default=dynamicdefault,
1067 b'progress', b'width', default=dynamicdefault,
1069 )
1068 )
1070 coreconfigitem(
1069 coreconfigitem(
1071 b'push', b'pushvars.server', default=False,
1070 b'push', b'pushvars.server', default=False,
1072 )
1071 )
1073 coreconfigitem(
1072 coreconfigitem(
1074 b'rewrite',
1073 b'rewrite',
1075 b'backup-bundle',
1074 b'backup-bundle',
1076 default=True,
1075 default=True,
1077 alias=[(b'ui', b'history-editing-backup')],
1076 alias=[(b'ui', b'history-editing-backup')],
1078 )
1077 )
1079 coreconfigitem(
1078 coreconfigitem(
1080 b'rewrite', b'update-timestamp', default=False,
1079 b'rewrite', b'update-timestamp', default=False,
1081 )
1080 )
1082 coreconfigitem(
1081 coreconfigitem(
1083 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1082 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1084 )
1083 )
1085 coreconfigitem(
1084 coreconfigitem(
1086 b'storage',
1085 b'storage',
1087 b'revlog.optimize-delta-parent-choice',
1086 b'revlog.optimize-delta-parent-choice',
1088 default=True,
1087 default=True,
1089 alias=[(b'format', b'aggressivemergedeltas')],
1088 alias=[(b'format', b'aggressivemergedeltas')],
1090 )
1089 )
1091 coreconfigitem(
1090 coreconfigitem(
1092 b'storage', b'revlog.reuse-external-delta', default=True,
1091 b'storage', b'revlog.reuse-external-delta', default=True,
1093 )
1092 )
1094 coreconfigitem(
1093 coreconfigitem(
1095 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1094 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1096 )
1095 )
1097 coreconfigitem(
1096 coreconfigitem(
1098 b'storage', b'revlog.zlib.level', default=None,
1097 b'storage', b'revlog.zlib.level', default=None,
1099 )
1098 )
1100 coreconfigitem(
1099 coreconfigitem(
1101 b'storage', b'revlog.zstd.level', default=None,
1100 b'storage', b'revlog.zstd.level', default=None,
1102 )
1101 )
1103 coreconfigitem(
1102 coreconfigitem(
1104 b'server', b'bookmarks-pushkey-compat', default=True,
1103 b'server', b'bookmarks-pushkey-compat', default=True,
1105 )
1104 )
1106 coreconfigitem(
1105 coreconfigitem(
1107 b'server', b'bundle1', default=True,
1106 b'server', b'bundle1', default=True,
1108 )
1107 )
1109 coreconfigitem(
1108 coreconfigitem(
1110 b'server', b'bundle1gd', default=None,
1109 b'server', b'bundle1gd', default=None,
1111 )
1110 )
1112 coreconfigitem(
1111 coreconfigitem(
1113 b'server', b'bundle1.pull', default=None,
1112 b'server', b'bundle1.pull', default=None,
1114 )
1113 )
1115 coreconfigitem(
1114 coreconfigitem(
1116 b'server', b'bundle1gd.pull', default=None,
1115 b'server', b'bundle1gd.pull', default=None,
1117 )
1116 )
1118 coreconfigitem(
1117 coreconfigitem(
1119 b'server', b'bundle1.push', default=None,
1118 b'server', b'bundle1.push', default=None,
1120 )
1119 )
1121 coreconfigitem(
1120 coreconfigitem(
1122 b'server', b'bundle1gd.push', default=None,
1121 b'server', b'bundle1gd.push', default=None,
1123 )
1122 )
1124 coreconfigitem(
1123 coreconfigitem(
1125 b'server',
1124 b'server',
1126 b'bundle2.stream',
1125 b'bundle2.stream',
1127 default=True,
1126 default=True,
1128 alias=[(b'experimental', b'bundle2.stream')],
1127 alias=[(b'experimental', b'bundle2.stream')],
1129 )
1128 )
1130 coreconfigitem(
1129 coreconfigitem(
1131 b'server', b'compressionengines', default=list,
1130 b'server', b'compressionengines', default=list,
1132 )
1131 )
1133 coreconfigitem(
1132 coreconfigitem(
1134 b'server', b'concurrent-push-mode', default=b'check-related',
1133 b'server', b'concurrent-push-mode', default=b'check-related',
1135 )
1134 )
1136 coreconfigitem(
1135 coreconfigitem(
1137 b'server', b'disablefullbundle', default=False,
1136 b'server', b'disablefullbundle', default=False,
1138 )
1137 )
1139 coreconfigitem(
1138 coreconfigitem(
1140 b'server', b'maxhttpheaderlen', default=1024,
1139 b'server', b'maxhttpheaderlen', default=1024,
1141 )
1140 )
1142 coreconfigitem(
1141 coreconfigitem(
1143 b'server', b'pullbundle', default=False,
1142 b'server', b'pullbundle', default=False,
1144 )
1143 )
1145 coreconfigitem(
1144 coreconfigitem(
1146 b'server', b'preferuncompressed', default=False,
1145 b'server', b'preferuncompressed', default=False,
1147 )
1146 )
1148 coreconfigitem(
1147 coreconfigitem(
1149 b'server', b'streamunbundle', default=False,
1148 b'server', b'streamunbundle', default=False,
1150 )
1149 )
1151 coreconfigitem(
1150 coreconfigitem(
1152 b'server', b'uncompressed', default=True,
1151 b'server', b'uncompressed', default=True,
1153 )
1152 )
1154 coreconfigitem(
1153 coreconfigitem(
1155 b'server', b'uncompressedallowsecret', default=False,
1154 b'server', b'uncompressedallowsecret', default=False,
1156 )
1155 )
1157 coreconfigitem(
1156 coreconfigitem(
1158 b'server', b'view', default=b'served',
1157 b'server', b'view', default=b'served',
1159 )
1158 )
1160 coreconfigitem(
1159 coreconfigitem(
1161 b'server', b'validate', default=False,
1160 b'server', b'validate', default=False,
1162 )
1161 )
1163 coreconfigitem(
1162 coreconfigitem(
1164 b'server', b'zliblevel', default=-1,
1163 b'server', b'zliblevel', default=-1,
1165 )
1164 )
1166 coreconfigitem(
1165 coreconfigitem(
1167 b'server', b'zstdlevel', default=3,
1166 b'server', b'zstdlevel', default=3,
1168 )
1167 )
1169 coreconfigitem(
1168 coreconfigitem(
1170 b'share', b'pool', default=None,
1169 b'share', b'pool', default=None,
1171 )
1170 )
1172 coreconfigitem(
1171 coreconfigitem(
1173 b'share', b'poolnaming', default=b'identity',
1172 b'share', b'poolnaming', default=b'identity',
1174 )
1173 )
1175 coreconfigitem(
1174 coreconfigitem(
1176 b'shelve', b'maxbackups', default=10,
1175 b'shelve', b'maxbackups', default=10,
1177 )
1176 )
1178 coreconfigitem(
1177 coreconfigitem(
1179 b'smtp', b'host', default=None,
1178 b'smtp', b'host', default=None,
1180 )
1179 )
1181 coreconfigitem(
1180 coreconfigitem(
1182 b'smtp', b'local_hostname', default=None,
1181 b'smtp', b'local_hostname', default=None,
1183 )
1182 )
1184 coreconfigitem(
1183 coreconfigitem(
1185 b'smtp', b'password', default=None,
1184 b'smtp', b'password', default=None,
1186 )
1185 )
1187 coreconfigitem(
1186 coreconfigitem(
1188 b'smtp', b'port', default=dynamicdefault,
1187 b'smtp', b'port', default=dynamicdefault,
1189 )
1188 )
1190 coreconfigitem(
1189 coreconfigitem(
1191 b'smtp', b'tls', default=b'none',
1190 b'smtp', b'tls', default=b'none',
1192 )
1191 )
1193 coreconfigitem(
1192 coreconfigitem(
1194 b'smtp', b'username', default=None,
1193 b'smtp', b'username', default=None,
1195 )
1194 )
1196 coreconfigitem(
1195 coreconfigitem(
1197 b'sparse', b'missingwarning', default=True, experimental=True,
1196 b'sparse', b'missingwarning', default=True, experimental=True,
1198 )
1197 )
1199 coreconfigitem(
1198 coreconfigitem(
1200 b'subrepos',
1199 b'subrepos',
1201 b'allowed',
1200 b'allowed',
1202 default=dynamicdefault, # to make backporting simpler
1201 default=dynamicdefault, # to make backporting simpler
1203 )
1202 )
1204 coreconfigitem(
1203 coreconfigitem(
1205 b'subrepos', b'hg:allowed', default=dynamicdefault,
1204 b'subrepos', b'hg:allowed', default=dynamicdefault,
1206 )
1205 )
1207 coreconfigitem(
1206 coreconfigitem(
1208 b'subrepos', b'git:allowed', default=dynamicdefault,
1207 b'subrepos', b'git:allowed', default=dynamicdefault,
1209 )
1208 )
1210 coreconfigitem(
1209 coreconfigitem(
1211 b'subrepos', b'svn:allowed', default=dynamicdefault,
1210 b'subrepos', b'svn:allowed', default=dynamicdefault,
1212 )
1211 )
1213 coreconfigitem(
1212 coreconfigitem(
1214 b'templates', b'.*', default=None, generic=True,
1213 b'templates', b'.*', default=None, generic=True,
1215 )
1214 )
1216 coreconfigitem(
1215 coreconfigitem(
1217 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1216 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1218 )
1217 )
1219 coreconfigitem(
1218 coreconfigitem(
1220 b'trusted', b'groups', default=list,
1219 b'trusted', b'groups', default=list,
1221 )
1220 )
1222 coreconfigitem(
1221 coreconfigitem(
1223 b'trusted', b'users', default=list,
1222 b'trusted', b'users', default=list,
1224 )
1223 )
1225 coreconfigitem(
1224 coreconfigitem(
1226 b'ui', b'_usedassubrepo', default=False,
1225 b'ui', b'_usedassubrepo', default=False,
1227 )
1226 )
1228 coreconfigitem(
1227 coreconfigitem(
1229 b'ui', b'allowemptycommit', default=False,
1228 b'ui', b'allowemptycommit', default=False,
1230 )
1229 )
1231 coreconfigitem(
1230 coreconfigitem(
1232 b'ui', b'archivemeta', default=True,
1231 b'ui', b'archivemeta', default=True,
1233 )
1232 )
1234 coreconfigitem(
1233 coreconfigitem(
1235 b'ui', b'askusername', default=False,
1234 b'ui', b'askusername', default=False,
1236 )
1235 )
1237 coreconfigitem(
1236 coreconfigitem(
1238 b'ui', b'clonebundlefallback', default=False,
1237 b'ui', b'clonebundlefallback', default=False,
1239 )
1238 )
1240 coreconfigitem(
1239 coreconfigitem(
1241 b'ui', b'clonebundleprefers', default=list,
1240 b'ui', b'clonebundleprefers', default=list,
1242 )
1241 )
1243 coreconfigitem(
1242 coreconfigitem(
1244 b'ui', b'clonebundles', default=True,
1243 b'ui', b'clonebundles', default=True,
1245 )
1244 )
1246 coreconfigitem(
1245 coreconfigitem(
1247 b'ui', b'color', default=b'auto',
1246 b'ui', b'color', default=b'auto',
1248 )
1247 )
1249 coreconfigitem(
1248 coreconfigitem(
1250 b'ui', b'commitsubrepos', default=False,
1249 b'ui', b'commitsubrepos', default=False,
1251 )
1250 )
1252 coreconfigitem(
1251 coreconfigitem(
1253 b'ui', b'debug', default=False,
1252 b'ui', b'debug', default=False,
1254 )
1253 )
1255 coreconfigitem(
1254 coreconfigitem(
1256 b'ui', b'debugger', default=None,
1255 b'ui', b'debugger', default=None,
1257 )
1256 )
1258 coreconfigitem(
1257 coreconfigitem(
1259 b'ui', b'editor', default=dynamicdefault,
1258 b'ui', b'editor', default=dynamicdefault,
1260 )
1259 )
1261 coreconfigitem(
1260 coreconfigitem(
1262 b'ui', b'fallbackencoding', default=None,
1261 b'ui', b'fallbackencoding', default=None,
1263 )
1262 )
1264 coreconfigitem(
1263 coreconfigitem(
1265 b'ui', b'forcecwd', default=None,
1264 b'ui', b'forcecwd', default=None,
1266 )
1265 )
1267 coreconfigitem(
1266 coreconfigitem(
1268 b'ui', b'forcemerge', default=None,
1267 b'ui', b'forcemerge', default=None,
1269 )
1268 )
1270 coreconfigitem(
1269 coreconfigitem(
1271 b'ui', b'formatdebug', default=False,
1270 b'ui', b'formatdebug', default=False,
1272 )
1271 )
1273 coreconfigitem(
1272 coreconfigitem(
1274 b'ui', b'formatjson', default=False,
1273 b'ui', b'formatjson', default=False,
1275 )
1274 )
1276 coreconfigitem(
1275 coreconfigitem(
1277 b'ui', b'formatted', default=None,
1276 b'ui', b'formatted', default=None,
1278 )
1277 )
1279 coreconfigitem(
1278 coreconfigitem(
1280 b'ui', b'graphnodetemplate', default=None,
1279 b'ui', b'graphnodetemplate', default=None,
1281 )
1280 )
1282 coreconfigitem(
1281 coreconfigitem(
1283 b'ui', b'interactive', default=None,
1282 b'ui', b'interactive', default=None,
1284 )
1283 )
1285 coreconfigitem(
1284 coreconfigitem(
1286 b'ui', b'interface', default=None,
1285 b'ui', b'interface', default=None,
1287 )
1286 )
1288 coreconfigitem(
1287 coreconfigitem(
1289 b'ui', b'interface.chunkselector', default=None,
1288 b'ui', b'interface.chunkselector', default=None,
1290 )
1289 )
1291 coreconfigitem(
1290 coreconfigitem(
1292 b'ui', b'large-file-limit', default=10000000,
1291 b'ui', b'large-file-limit', default=10000000,
1293 )
1292 )
1294 coreconfigitem(
1293 coreconfigitem(
1295 b'ui', b'logblockedtimes', default=False,
1294 b'ui', b'logblockedtimes', default=False,
1296 )
1295 )
1297 coreconfigitem(
1296 coreconfigitem(
1298 b'ui', b'logtemplate', default=None,
1297 b'ui', b'logtemplate', default=None,
1299 )
1298 )
1300 coreconfigitem(
1299 coreconfigitem(
1301 b'ui', b'merge', default=None,
1300 b'ui', b'merge', default=None,
1302 )
1301 )
1303 coreconfigitem(
1302 coreconfigitem(
1304 b'ui', b'mergemarkers', default=b'basic',
1303 b'ui', b'mergemarkers', default=b'basic',
1305 )
1304 )
1306 coreconfigitem(
1305 coreconfigitem(
1307 b'ui',
1306 b'ui',
1308 b'mergemarkertemplate',
1307 b'mergemarkertemplate',
1309 default=(
1308 default=(
1310 b'{node|short} '
1309 b'{node|short} '
1311 b'{ifeq(tags, "tip", "", '
1310 b'{ifeq(tags, "tip", "", '
1312 b'ifeq(tags, "", "", "{tags} "))}'
1311 b'ifeq(tags, "", "", "{tags} "))}'
1313 b'{if(bookmarks, "{bookmarks} ")}'
1312 b'{if(bookmarks, "{bookmarks} ")}'
1314 b'{ifeq(branch, "default", "", "{branch} ")}'
1313 b'{ifeq(branch, "default", "", "{branch} ")}'
1315 b'- {author|user}: {desc|firstline}'
1314 b'- {author|user}: {desc|firstline}'
1316 ),
1315 ),
1317 )
1316 )
1318 coreconfigitem(
1317 coreconfigitem(
1319 b'ui', b'message-output', default=b'stdio',
1318 b'ui', b'message-output', default=b'stdio',
1320 )
1319 )
1321 coreconfigitem(
1320 coreconfigitem(
1322 b'ui', b'nontty', default=False,
1321 b'ui', b'nontty', default=False,
1323 )
1322 )
1324 coreconfigitem(
1323 coreconfigitem(
1325 b'ui', b'origbackuppath', default=None,
1324 b'ui', b'origbackuppath', default=None,
1326 )
1325 )
1327 coreconfigitem(
1326 coreconfigitem(
1328 b'ui', b'paginate', default=True,
1327 b'ui', b'paginate', default=True,
1329 )
1328 )
1330 coreconfigitem(
1329 coreconfigitem(
1331 b'ui', b'patch', default=None,
1330 b'ui', b'patch', default=None,
1332 )
1331 )
1333 coreconfigitem(
1332 coreconfigitem(
1334 b'ui', b'pre-merge-tool-output-template', default=None,
1333 b'ui', b'pre-merge-tool-output-template', default=None,
1335 )
1334 )
1336 coreconfigitem(
1335 coreconfigitem(
1337 b'ui', b'portablefilenames', default=b'warn',
1336 b'ui', b'portablefilenames', default=b'warn',
1338 )
1337 )
1339 coreconfigitem(
1338 coreconfigitem(
1340 b'ui', b'promptecho', default=False,
1339 b'ui', b'promptecho', default=False,
1341 )
1340 )
1342 coreconfigitem(
1341 coreconfigitem(
1343 b'ui', b'quiet', default=False,
1342 b'ui', b'quiet', default=False,
1344 )
1343 )
1345 coreconfigitem(
1344 coreconfigitem(
1346 b'ui', b'quietbookmarkmove', default=False,
1345 b'ui', b'quietbookmarkmove', default=False,
1347 )
1346 )
1348 coreconfigitem(
1347 coreconfigitem(
1349 b'ui', b'relative-paths', default=b'legacy',
1348 b'ui', b'relative-paths', default=b'legacy',
1350 )
1349 )
1351 coreconfigitem(
1350 coreconfigitem(
1352 b'ui', b'remotecmd', default=b'hg',
1351 b'ui', b'remotecmd', default=b'hg',
1353 )
1352 )
1354 coreconfigitem(
1353 coreconfigitem(
1355 b'ui', b'report_untrusted', default=True,
1354 b'ui', b'report_untrusted', default=True,
1356 )
1355 )
1357 coreconfigitem(
1356 coreconfigitem(
1358 b'ui', b'rollback', default=True,
1357 b'ui', b'rollback', default=True,
1359 )
1358 )
1360 coreconfigitem(
1359 coreconfigitem(
1361 b'ui', b'signal-safe-lock', default=True,
1360 b'ui', b'signal-safe-lock', default=True,
1362 )
1361 )
1363 coreconfigitem(
1362 coreconfigitem(
1364 b'ui', b'slash', default=False,
1363 b'ui', b'slash', default=False,
1365 )
1364 )
1366 coreconfigitem(
1365 coreconfigitem(
1367 b'ui', b'ssh', default=b'ssh',
1366 b'ui', b'ssh', default=b'ssh',
1368 )
1367 )
1369 coreconfigitem(
1368 coreconfigitem(
1370 b'ui', b'ssherrorhint', default=None,
1369 b'ui', b'ssherrorhint', default=None,
1371 )
1370 )
1372 coreconfigitem(
1371 coreconfigitem(
1373 b'ui', b'statuscopies', default=False,
1372 b'ui', b'statuscopies', default=False,
1374 )
1373 )
1375 coreconfigitem(
1374 coreconfigitem(
1376 b'ui', b'strict', default=False,
1375 b'ui', b'strict', default=False,
1377 )
1376 )
1378 coreconfigitem(
1377 coreconfigitem(
1379 b'ui', b'style', default=b'',
1378 b'ui', b'style', default=b'',
1380 )
1379 )
1381 coreconfigitem(
1380 coreconfigitem(
1382 b'ui', b'supportcontact', default=None,
1381 b'ui', b'supportcontact', default=None,
1383 )
1382 )
1384 coreconfigitem(
1383 coreconfigitem(
1385 b'ui', b'textwidth', default=78,
1384 b'ui', b'textwidth', default=78,
1386 )
1385 )
1387 coreconfigitem(
1386 coreconfigitem(
1388 b'ui', b'timeout', default=b'600',
1387 b'ui', b'timeout', default=b'600',
1389 )
1388 )
1390 coreconfigitem(
1389 coreconfigitem(
1391 b'ui', b'timeout.warn', default=0,
1390 b'ui', b'timeout.warn', default=0,
1392 )
1391 )
1393 coreconfigitem(
1392 coreconfigitem(
1394 b'ui', b'traceback', default=False,
1393 b'ui', b'traceback', default=False,
1395 )
1394 )
1396 coreconfigitem(
1395 coreconfigitem(
1397 b'ui', b'tweakdefaults', default=False,
1396 b'ui', b'tweakdefaults', default=False,
1398 )
1397 )
1399 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1398 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1400 coreconfigitem(
1399 coreconfigitem(
1401 b'ui', b'verbose', default=False,
1400 b'ui', b'verbose', default=False,
1402 )
1401 )
1403 coreconfigitem(
1402 coreconfigitem(
1404 b'verify', b'skipflags', default=None,
1403 b'verify', b'skipflags', default=None,
1405 )
1404 )
1406 coreconfigitem(
1405 coreconfigitem(
1407 b'web', b'allowbz2', default=False,
1406 b'web', b'allowbz2', default=False,
1408 )
1407 )
1409 coreconfigitem(
1408 coreconfigitem(
1410 b'web', b'allowgz', default=False,
1409 b'web', b'allowgz', default=False,
1411 )
1410 )
1412 coreconfigitem(
1411 coreconfigitem(
1413 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1412 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1414 )
1413 )
1415 coreconfigitem(
1414 coreconfigitem(
1416 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1415 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1417 )
1416 )
1418 coreconfigitem(
1417 coreconfigitem(
1419 b'web', b'allowzip', default=False,
1418 b'web', b'allowzip', default=False,
1420 )
1419 )
1421 coreconfigitem(
1420 coreconfigitem(
1422 b'web', b'archivesubrepos', default=False,
1421 b'web', b'archivesubrepos', default=False,
1423 )
1422 )
1424 coreconfigitem(
1423 coreconfigitem(
1425 b'web', b'cache', default=True,
1424 b'web', b'cache', default=True,
1426 )
1425 )
1427 coreconfigitem(
1426 coreconfigitem(
1428 b'web', b'comparisoncontext', default=5,
1427 b'web', b'comparisoncontext', default=5,
1429 )
1428 )
1430 coreconfigitem(
1429 coreconfigitem(
1431 b'web', b'contact', default=None,
1430 b'web', b'contact', default=None,
1432 )
1431 )
1433 coreconfigitem(
1432 coreconfigitem(
1434 b'web', b'deny_push', default=list,
1433 b'web', b'deny_push', default=list,
1435 )
1434 )
1436 coreconfigitem(
1435 coreconfigitem(
1437 b'web', b'guessmime', default=False,
1436 b'web', b'guessmime', default=False,
1438 )
1437 )
1439 coreconfigitem(
1438 coreconfigitem(
1440 b'web', b'hidden', default=False,
1439 b'web', b'hidden', default=False,
1441 )
1440 )
1442 coreconfigitem(
1441 coreconfigitem(
1443 b'web', b'labels', default=list,
1442 b'web', b'labels', default=list,
1444 )
1443 )
1445 coreconfigitem(
1444 coreconfigitem(
1446 b'web', b'logoimg', default=b'hglogo.png',
1445 b'web', b'logoimg', default=b'hglogo.png',
1447 )
1446 )
1448 coreconfigitem(
1447 coreconfigitem(
1449 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1448 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1450 )
1449 )
1451 coreconfigitem(
1450 coreconfigitem(
1452 b'web', b'accesslog', default=b'-',
1451 b'web', b'accesslog', default=b'-',
1453 )
1452 )
1454 coreconfigitem(
1453 coreconfigitem(
1455 b'web', b'address', default=b'',
1454 b'web', b'address', default=b'',
1456 )
1455 )
1457 coreconfigitem(
1456 coreconfigitem(
1458 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1457 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1459 )
1458 )
1460 coreconfigitem(
1459 coreconfigitem(
1461 b'web', b'allow_read', default=list,
1460 b'web', b'allow_read', default=list,
1462 )
1461 )
1463 coreconfigitem(
1462 coreconfigitem(
1464 b'web', b'baseurl', default=None,
1463 b'web', b'baseurl', default=None,
1465 )
1464 )
1466 coreconfigitem(
1465 coreconfigitem(
1467 b'web', b'cacerts', default=None,
1466 b'web', b'cacerts', default=None,
1468 )
1467 )
1469 coreconfigitem(
1468 coreconfigitem(
1470 b'web', b'certificate', default=None,
1469 b'web', b'certificate', default=None,
1471 )
1470 )
1472 coreconfigitem(
1471 coreconfigitem(
1473 b'web', b'collapse', default=False,
1472 b'web', b'collapse', default=False,
1474 )
1473 )
1475 coreconfigitem(
1474 coreconfigitem(
1476 b'web', b'csp', default=None,
1475 b'web', b'csp', default=None,
1477 )
1476 )
1478 coreconfigitem(
1477 coreconfigitem(
1479 b'web', b'deny_read', default=list,
1478 b'web', b'deny_read', default=list,
1480 )
1479 )
1481 coreconfigitem(
1480 coreconfigitem(
1482 b'web', b'descend', default=True,
1481 b'web', b'descend', default=True,
1483 )
1482 )
1484 coreconfigitem(
1483 coreconfigitem(
1485 b'web', b'description', default=b"",
1484 b'web', b'description', default=b"",
1486 )
1485 )
1487 coreconfigitem(
1486 coreconfigitem(
1488 b'web', b'encoding', default=lambda: encoding.encoding,
1487 b'web', b'encoding', default=lambda: encoding.encoding,
1489 )
1488 )
1490 coreconfigitem(
1489 coreconfigitem(
1491 b'web', b'errorlog', default=b'-',
1490 b'web', b'errorlog', default=b'-',
1492 )
1491 )
1493 coreconfigitem(
1492 coreconfigitem(
1494 b'web', b'ipv6', default=False,
1493 b'web', b'ipv6', default=False,
1495 )
1494 )
1496 coreconfigitem(
1495 coreconfigitem(
1497 b'web', b'maxchanges', default=10,
1496 b'web', b'maxchanges', default=10,
1498 )
1497 )
1499 coreconfigitem(
1498 coreconfigitem(
1500 b'web', b'maxfiles', default=10,
1499 b'web', b'maxfiles', default=10,
1501 )
1500 )
1502 coreconfigitem(
1501 coreconfigitem(
1503 b'web', b'maxshortchanges', default=60,
1502 b'web', b'maxshortchanges', default=60,
1504 )
1503 )
1505 coreconfigitem(
1504 coreconfigitem(
1506 b'web', b'motd', default=b'',
1505 b'web', b'motd', default=b'',
1507 )
1506 )
1508 coreconfigitem(
1507 coreconfigitem(
1509 b'web', b'name', default=dynamicdefault,
1508 b'web', b'name', default=dynamicdefault,
1510 )
1509 )
1511 coreconfigitem(
1510 coreconfigitem(
1512 b'web', b'port', default=8000,
1511 b'web', b'port', default=8000,
1513 )
1512 )
1514 coreconfigitem(
1513 coreconfigitem(
1515 b'web', b'prefix', default=b'',
1514 b'web', b'prefix', default=b'',
1516 )
1515 )
1517 coreconfigitem(
1516 coreconfigitem(
1518 b'web', b'push_ssl', default=True,
1517 b'web', b'push_ssl', default=True,
1519 )
1518 )
1520 coreconfigitem(
1519 coreconfigitem(
1521 b'web', b'refreshinterval', default=20,
1520 b'web', b'refreshinterval', default=20,
1522 )
1521 )
1523 coreconfigitem(
1522 coreconfigitem(
1524 b'web', b'server-header', default=None,
1523 b'web', b'server-header', default=None,
1525 )
1524 )
1526 coreconfigitem(
1525 coreconfigitem(
1527 b'web', b'static', default=None,
1526 b'web', b'static', default=None,
1528 )
1527 )
1529 coreconfigitem(
1528 coreconfigitem(
1530 b'web', b'staticurl', default=None,
1529 b'web', b'staticurl', default=None,
1531 )
1530 )
1532 coreconfigitem(
1531 coreconfigitem(
1533 b'web', b'stripes', default=1,
1532 b'web', b'stripes', default=1,
1534 )
1533 )
1535 coreconfigitem(
1534 coreconfigitem(
1536 b'web', b'style', default=b'paper',
1535 b'web', b'style', default=b'paper',
1537 )
1536 )
1538 coreconfigitem(
1537 coreconfigitem(
1539 b'web', b'templates', default=None,
1538 b'web', b'templates', default=None,
1540 )
1539 )
1541 coreconfigitem(
1540 coreconfigitem(
1542 b'web', b'view', default=b'served', experimental=True,
1541 b'web', b'view', default=b'served', experimental=True,
1543 )
1542 )
1544 coreconfigitem(
1543 coreconfigitem(
1545 b'worker', b'backgroundclose', default=dynamicdefault,
1544 b'worker', b'backgroundclose', default=dynamicdefault,
1546 )
1545 )
1547 # Windows defaults to a limit of 512 open files. A buffer of 128
1546 # Windows defaults to a limit of 512 open files. A buffer of 128
1548 # should give us enough headway.
1547 # should give us enough headway.
1549 coreconfigitem(
1548 coreconfigitem(
1550 b'worker', b'backgroundclosemaxqueue', default=384,
1549 b'worker', b'backgroundclosemaxqueue', default=384,
1551 )
1550 )
1552 coreconfigitem(
1551 coreconfigitem(
1553 b'worker', b'backgroundcloseminfilecount', default=2048,
1552 b'worker', b'backgroundcloseminfilecount', default=2048,
1554 )
1553 )
1555 coreconfigitem(
1554 coreconfigitem(
1556 b'worker', b'backgroundclosethreadcount', default=4,
1555 b'worker', b'backgroundclosethreadcount', default=4,
1557 )
1556 )
1558 coreconfigitem(
1557 coreconfigitem(
1559 b'worker', b'enabled', default=True,
1558 b'worker', b'enabled', default=True,
1560 )
1559 )
1561 coreconfigitem(
1560 coreconfigitem(
1562 b'worker', b'numcpus', default=None,
1561 b'worker', b'numcpus', default=None,
1563 )
1562 )
1564
1563
1565 # Rebase related configuration moved to core because other extension are doing
1564 # Rebase related configuration moved to core because other extension are doing
1566 # strange things. For example, shelve import the extensions to reuse some bit
1565 # strange things. For example, shelve import the extensions to reuse some bit
1567 # without formally loading it.
1566 # without formally loading it.
1568 coreconfigitem(
1567 coreconfigitem(
1569 b'commands', b'rebase.requiredest', default=False,
1568 b'commands', b'rebase.requiredest', default=False,
1570 )
1569 )
1571 coreconfigitem(
1570 coreconfigitem(
1572 b'experimental', b'rebaseskipobsolete', default=True,
1571 b'experimental', b'rebaseskipobsolete', default=True,
1573 )
1572 )
1574 coreconfigitem(
1573 coreconfigitem(
1575 b'rebase', b'singletransaction', default=False,
1574 b'rebase', b'singletransaction', default=False,
1576 )
1575 )
1577 coreconfigitem(
1576 coreconfigitem(
1578 b'rebase', b'experimental.inmemory', default=False,
1577 b'rebase', b'experimental.inmemory', default=False,
1579 )
1578 )
@@ -1,3058 +1,3065
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .pycompat import getattr
38 from .pycompat import getattr
39 from .revlogutils.constants import (
39 from .revlogutils.constants import (
40 FLAG_GENERALDELTA,
40 FLAG_GENERALDELTA,
41 FLAG_INLINE_DATA,
41 FLAG_INLINE_DATA,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 )
50 )
51 from .revlogutils.flagutil import (
51 from .revlogutils.flagutil import (
52 REVIDX_DEFAULT_FLAGS,
52 REVIDX_DEFAULT_FLAGS,
53 REVIDX_ELLIPSIS,
53 REVIDX_ELLIPSIS,
54 REVIDX_EXTSTORED,
54 REVIDX_EXTSTORED,
55 REVIDX_FLAGS_ORDER,
55 REVIDX_FLAGS_ORDER,
56 REVIDX_ISCENSORED,
56 REVIDX_ISCENSORED,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 REVIDX_SIDEDATA,
58 REVIDX_SIDEDATA,
59 )
59 )
60 from .thirdparty import attr
60 from .thirdparty import attr
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 templatefilters,
68 templatefilters,
69 util,
69 util,
70 )
70 )
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75 from .revlogutils import (
75 from .revlogutils import (
76 deltas as deltautil,
76 deltas as deltautil,
77 flagutil,
77 flagutil,
78 nodemap as nodemaputil,
78 nodemap as nodemaputil,
79 sidedata as sidedatautil,
79 sidedata as sidedatautil,
80 )
80 )
81 from .utils import (
81 from .utils import (
82 storageutil,
82 storageutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 # blanked usage of all the name to prevent pyflakes constraints
86 # blanked usage of all the name to prevent pyflakes constraints
87 # We need these name available in the module for extensions.
87 # We need these name available in the module for extensions.
88 REVLOGV0
88 REVLOGV0
89 REVLOGV1
89 REVLOGV1
90 REVLOGV2
90 REVLOGV2
91 FLAG_INLINE_DATA
91 FLAG_INLINE_DATA
92 FLAG_GENERALDELTA
92 FLAG_GENERALDELTA
93 REVLOG_DEFAULT_FLAGS
93 REVLOG_DEFAULT_FLAGS
94 REVLOG_DEFAULT_FORMAT
94 REVLOG_DEFAULT_FORMAT
95 REVLOG_DEFAULT_VERSION
95 REVLOG_DEFAULT_VERSION
96 REVLOGV1_FLAGS
96 REVLOGV1_FLAGS
97 REVLOGV2_FLAGS
97 REVLOGV2_FLAGS
98 REVIDX_ISCENSORED
98 REVIDX_ISCENSORED
99 REVIDX_ELLIPSIS
99 REVIDX_ELLIPSIS
100 REVIDX_SIDEDATA
100 REVIDX_SIDEDATA
101 REVIDX_EXTSTORED
101 REVIDX_EXTSTORED
102 REVIDX_DEFAULT_FLAGS
102 REVIDX_DEFAULT_FLAGS
103 REVIDX_FLAGS_ORDER
103 REVIDX_FLAGS_ORDER
104 REVIDX_RAWTEXT_CHANGING_FLAGS
104 REVIDX_RAWTEXT_CHANGING_FLAGS
105
105
106 parsers = policy.importmod('parsers')
106 parsers = policy.importmod('parsers')
107 rustancestor = policy.importrust('ancestor')
107 rustancestor = policy.importrust('ancestor')
108 rustdagop = policy.importrust('dagop')
108 rustdagop = policy.importrust('dagop')
109 rustrevlog = policy.importrust('revlog')
109 rustrevlog = policy.importrust('revlog')
110
110
111 # Aliased for performance.
111 # Aliased for performance.
112 _zlibdecompress = zlib.decompress
112 _zlibdecompress = zlib.decompress
113
113
114 # max size of revlog with inline data
114 # max size of revlog with inline data
115 _maxinline = 131072
115 _maxinline = 131072
116 _chunksize = 1048576
116 _chunksize = 1048576
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False, {}
120 return text, False, {}
121
121
122
122
123 def ellipsiswriteprocessor(rl, text, sidedata):
123 def ellipsiswriteprocessor(rl, text, sidedata):
124 return text, False
124 return text, False
125
125
126
126
127 def ellipsisrawprocessor(rl, text):
127 def ellipsisrawprocessor(rl, text):
128 return False
128 return False
129
129
130
130
131 ellipsisprocessor = (
131 ellipsisprocessor = (
132 ellipsisreadprocessor,
132 ellipsisreadprocessor,
133 ellipsiswriteprocessor,
133 ellipsiswriteprocessor,
134 ellipsisrawprocessor,
134 ellipsisrawprocessor,
135 )
135 )
136
136
137
137
138 def getoffset(q):
138 def getoffset(q):
139 return int(q >> 16)
139 return int(q >> 16)
140
140
141
141
142 def gettype(q):
142 def gettype(q):
143 return int(q & 0xFFFF)
143 return int(q & 0xFFFF)
144
144
145
145
146 def offset_type(offset, type):
146 def offset_type(offset, type):
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
148 raise ValueError(b'unknown revlog index flags')
148 raise ValueError(b'unknown revlog index flags')
149 return int(int(offset) << 16 | type)
149 return int(int(offset) << 16 | type)
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 @attr.s(slots=True, frozen=True)
162 @attr.s(slots=True, frozen=True)
163 class _revisioninfo(object):
163 class _revisioninfo(object):
164 """Information about a revision that allows building its fulltext
164 """Information about a revision that allows building its fulltext
165 node: expected hash of the revision
165 node: expected hash of the revision
166 p1, p2: parent revs of the revision
166 p1, p2: parent revs of the revision
167 btext: built text cache consisting of a one-element list
167 btext: built text cache consisting of a one-element list
168 cachedelta: (baserev, uncompressed_delta) or None
168 cachedelta: (baserev, uncompressed_delta) or None
169 flags: flags associated to the revision storage
169 flags: flags associated to the revision storage
170
170
171 One of btext[0] or cachedelta must be set.
171 One of btext[0] or cachedelta must be set.
172 """
172 """
173
173
174 node = attr.ib()
174 node = attr.ib()
175 p1 = attr.ib()
175 p1 = attr.ib()
176 p2 = attr.ib()
176 p2 = attr.ib()
177 btext = attr.ib()
177 btext = attr.ib()
178 textlen = attr.ib()
178 textlen = attr.ib()
179 cachedelta = attr.ib()
179 cachedelta = attr.ib()
180 flags = attr.ib()
180 flags = attr.ib()
181
181
182
182
183 @interfaceutil.implementer(repository.irevisiondelta)
183 @interfaceutil.implementer(repository.irevisiondelta)
184 @attr.s(slots=True)
184 @attr.s(slots=True)
185 class revlogrevisiondelta(object):
185 class revlogrevisiondelta(object):
186 node = attr.ib()
186 node = attr.ib()
187 p1node = attr.ib()
187 p1node = attr.ib()
188 p2node = attr.ib()
188 p2node = attr.ib()
189 basenode = attr.ib()
189 basenode = attr.ib()
190 flags = attr.ib()
190 flags = attr.ib()
191 baserevisionsize = attr.ib()
191 baserevisionsize = attr.ib()
192 revision = attr.ib()
192 revision = attr.ib()
193 delta = attr.ib()
193 delta = attr.ib()
194 linknode = attr.ib(default=None)
194 linknode = attr.ib(default=None)
195
195
196
196
197 @interfaceutil.implementer(repository.iverifyproblem)
197 @interfaceutil.implementer(repository.iverifyproblem)
198 @attr.s(frozen=True)
198 @attr.s(frozen=True)
199 class revlogproblem(object):
199 class revlogproblem(object):
200 warning = attr.ib(default=None)
200 warning = attr.ib(default=None)
201 error = attr.ib(default=None)
201 error = attr.ib(default=None)
202 node = attr.ib(default=None)
202 node = attr.ib(default=None)
203
203
204
204
205 # index v0:
205 # index v0:
206 # 4 bytes: offset
206 # 4 bytes: offset
207 # 4 bytes: compressed length
207 # 4 bytes: compressed length
208 # 4 bytes: base rev
208 # 4 bytes: base rev
209 # 4 bytes: link rev
209 # 4 bytes: link rev
210 # 20 bytes: parent 1 nodeid
210 # 20 bytes: parent 1 nodeid
211 # 20 bytes: parent 2 nodeid
211 # 20 bytes: parent 2 nodeid
212 # 20 bytes: nodeid
212 # 20 bytes: nodeid
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
214 indexformatv0_pack = indexformatv0.pack
214 indexformatv0_pack = indexformatv0.pack
215 indexformatv0_unpack = indexformatv0.unpack
215 indexformatv0_unpack = indexformatv0.unpack
216
216
217
217
218 class revlogoldindex(list):
218 class revlogoldindex(list):
219 @property
219 @property
220 def nodemap(self):
220 def nodemap(self):
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
223 return self._nodemap
223 return self._nodemap
224
224
225 @util.propertycache
225 @util.propertycache
226 def _nodemap(self):
226 def _nodemap(self):
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
228 for r in range(0, len(self)):
228 for r in range(0, len(self)):
229 n = self[r][7]
229 n = self[r][7]
230 nodemap[n] = r
230 nodemap[n] = r
231 return nodemap
231 return nodemap
232
232
233 def has_node(self, node):
233 def has_node(self, node):
234 """return True if the node exist in the index"""
234 """return True if the node exist in the index"""
235 return node in self._nodemap
235 return node in self._nodemap
236
236
237 def rev(self, node):
237 def rev(self, node):
238 """return a revision for a node
238 """return a revision for a node
239
239
240 If the node is unknown, raise a RevlogError"""
240 If the node is unknown, raise a RevlogError"""
241 return self._nodemap[node]
241 return self._nodemap[node]
242
242
243 def get_rev(self, node):
243 def get_rev(self, node):
244 """return a revision for a node
244 """return a revision for a node
245
245
246 If the node is unknown, return None"""
246 If the node is unknown, return None"""
247 return self._nodemap.get(node)
247 return self._nodemap.get(node)
248
248
249 def append(self, tup):
249 def append(self, tup):
250 self._nodemap[tup[7]] = len(self)
250 self._nodemap[tup[7]] = len(self)
251 super(revlogoldindex, self).append(tup)
251 super(revlogoldindex, self).append(tup)
252
252
253 def __delitem__(self, i):
253 def __delitem__(self, i):
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 for r in pycompat.xrange(i.start, len(self)):
256 for r in pycompat.xrange(i.start, len(self)):
257 del self._nodemap[self[r][7]]
257 del self._nodemap[self[r][7]]
258 super(revlogoldindex, self).__delitem__(i)
258 super(revlogoldindex, self).__delitem__(i)
259
259
260 def clearcaches(self):
260 def clearcaches(self):
261 self.__dict__.pop('_nodemap', None)
261 self.__dict__.pop('_nodemap', None)
262
262
263 def __getitem__(self, i):
263 def __getitem__(self, i):
264 if i == -1:
264 if i == -1:
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
266 return list.__getitem__(self, i)
266 return list.__getitem__(self, i)
267
267
268
268
269 class revlogoldio(object):
269 class revlogoldio(object):
270 def __init__(self):
270 def __init__(self):
271 self.size = indexformatv0.size
271 self.size = indexformatv0.size
272
272
273 def parseindex(self, data, inline):
273 def parseindex(self, data, inline):
274 s = self.size
274 s = self.size
275 index = []
275 index = []
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
277 n = off = 0
277 n = off = 0
278 l = len(data)
278 l = len(data)
279 while off + s <= l:
279 while off + s <= l:
280 cur = data[off : off + s]
280 cur = data[off : off + s]
281 off += s
281 off += s
282 e = indexformatv0_unpack(cur)
282 e = indexformatv0_unpack(cur)
283 # transform to revlogv1 format
283 # transform to revlogv1 format
284 e2 = (
284 e2 = (
285 offset_type(e[0], 0),
285 offset_type(e[0], 0),
286 e[1],
286 e[1],
287 -1,
287 -1,
288 e[2],
288 e[2],
289 e[3],
289 e[3],
290 nodemap.get(e[4], nullrev),
290 nodemap.get(e[4], nullrev),
291 nodemap.get(e[5], nullrev),
291 nodemap.get(e[5], nullrev),
292 e[6],
292 e[6],
293 )
293 )
294 index.append(e2)
294 index.append(e2)
295 nodemap[e[6]] = n
295 nodemap[e[6]] = n
296 n += 1
296 n += 1
297
297
298 index = revlogoldindex(index)
298 index = revlogoldindex(index)
299 return index, None
299 return index, None
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 if gettype(entry[0]):
302 if gettype(entry[0]):
303 raise error.RevlogError(
303 raise error.RevlogError(
304 _(b'index entry flags need revlog version 1')
304 _(b'index entry flags need revlog version 1')
305 )
305 )
306 e2 = (
306 e2 = (
307 getoffset(entry[0]),
307 getoffset(entry[0]),
308 entry[1],
308 entry[1],
309 entry[3],
309 entry[3],
310 entry[4],
310 entry[4],
311 node(entry[5]),
311 node(entry[5]),
312 node(entry[6]),
312 node(entry[6]),
313 entry[7],
313 entry[7],
314 )
314 )
315 return indexformatv0_pack(*e2)
315 return indexformatv0_pack(*e2)
316
316
317
317
318 # index ng:
318 # index ng:
319 # 6 bytes: offset
319 # 6 bytes: offset
320 # 2 bytes: flags
320 # 2 bytes: flags
321 # 4 bytes: compressed length
321 # 4 bytes: compressed length
322 # 4 bytes: uncompressed length
322 # 4 bytes: uncompressed length
323 # 4 bytes: base rev
323 # 4 bytes: base rev
324 # 4 bytes: link rev
324 # 4 bytes: link rev
325 # 4 bytes: parent 1 rev
325 # 4 bytes: parent 1 rev
326 # 4 bytes: parent 2 rev
326 # 4 bytes: parent 2 rev
327 # 32 bytes: nodeid
327 # 32 bytes: nodeid
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
329 indexformatng_pack = indexformatng.pack
329 indexformatng_pack = indexformatng.pack
330 versionformat = struct.Struct(b">I")
330 versionformat = struct.Struct(b">I")
331 versionformat_pack = versionformat.pack
331 versionformat_pack = versionformat.pack
332 versionformat_unpack = versionformat.unpack
332 versionformat_unpack = versionformat.unpack
333
333
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
335 # signed integer)
335 # signed integer)
336 _maxentrysize = 0x7FFFFFFF
336 _maxentrysize = 0x7FFFFFFF
337
337
338
338
339 class revlogio(object):
339 class revlogio(object):
340 def __init__(self):
340 def __init__(self):
341 self.size = indexformatng.size
341 self.size = indexformatng.size
342
342
343 def parseindex(self, data, inline):
343 def parseindex(self, data, inline):
344 # call the C implementation to parse the index data
344 # call the C implementation to parse the index data
345 index, cache = parsers.parse_index2(data, inline)
345 index, cache = parsers.parse_index2(data, inline)
346 return index, cache
346 return index, cache
347
347
348 def packentry(self, entry, node, version, rev):
348 def packentry(self, entry, node, version, rev):
349 p = indexformatng_pack(*entry)
349 p = indexformatng_pack(*entry)
350 if rev == 0:
350 if rev == 0:
351 p = versionformat_pack(version) + p[4:]
351 p = versionformat_pack(version) + p[4:]
352 return p
352 return p
353
353
354
354
355 NodemapRevlogIO = None
355 NodemapRevlogIO = None
356
356
357 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
357 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
358
358
359 class NodemapRevlogIO(revlogio):
359 class NodemapRevlogIO(revlogio):
360 """A debug oriented IO class that return a PersistentNodeMapIndexObject
360 """A debug oriented IO class that return a PersistentNodeMapIndexObject
361
361
362 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
362 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
363 """
363 """
364
364
365 def parseindex(self, data, inline):
365 def parseindex(self, data, inline):
366 index, cache = parsers.parse_index_devel_nodemap(data, inline)
366 index, cache = parsers.parse_index_devel_nodemap(data, inline)
367 return index, cache
367 return index, cache
368
368
369
369
370 class rustrevlogio(revlogio):
370 class rustrevlogio(revlogio):
371 def parseindex(self, data, inline):
371 def parseindex(self, data, inline):
372 index, cache = super(rustrevlogio, self).parseindex(data, inline)
372 index, cache = super(rustrevlogio, self).parseindex(data, inline)
373 return rustrevlog.MixedIndex(index), cache
373 return rustrevlog.MixedIndex(index), cache
374
374
375
375
376 class revlog(object):
376 class revlog(object):
377 """
377 """
378 the underlying revision storage object
378 the underlying revision storage object
379
379
380 A revlog consists of two parts, an index and the revision data.
380 A revlog consists of two parts, an index and the revision data.
381
381
382 The index is a file with a fixed record size containing
382 The index is a file with a fixed record size containing
383 information on each revision, including its nodeid (hash), the
383 information on each revision, including its nodeid (hash), the
384 nodeids of its parents, the position and offset of its data within
384 nodeids of its parents, the position and offset of its data within
385 the data file, and the revision it's based on. Finally, each entry
385 the data file, and the revision it's based on. Finally, each entry
386 contains a linkrev entry that can serve as a pointer to external
386 contains a linkrev entry that can serve as a pointer to external
387 data.
387 data.
388
388
389 The revision data itself is a linear collection of data chunks.
389 The revision data itself is a linear collection of data chunks.
390 Each chunk represents a revision and is usually represented as a
390 Each chunk represents a revision and is usually represented as a
391 delta against the previous chunk. To bound lookup time, runs of
391 delta against the previous chunk. To bound lookup time, runs of
392 deltas are limited to about 2 times the length of the original
392 deltas are limited to about 2 times the length of the original
393 version data. This makes retrieval of a version proportional to
393 version data. This makes retrieval of a version proportional to
394 its size, or O(1) relative to the number of revisions.
394 its size, or O(1) relative to the number of revisions.
395
395
396 Both pieces of the revlog are written to in an append-only
396 Both pieces of the revlog are written to in an append-only
397 fashion, which means we never need to rewrite a file to insert or
397 fashion, which means we never need to rewrite a file to insert or
398 remove data, and can use some simple techniques to avoid the need
398 remove data, and can use some simple techniques to avoid the need
399 for locking while reading.
399 for locking while reading.
400
400
401 If checkambig, indexfile is opened with checkambig=True at
401 If checkambig, indexfile is opened with checkambig=True at
402 writing, to avoid file stat ambiguity.
402 writing, to avoid file stat ambiguity.
403
403
404 If mmaplargeindex is True, and an mmapindexthreshold is set, the
404 If mmaplargeindex is True, and an mmapindexthreshold is set, the
405 index will be mmapped rather than read if it is larger than the
405 index will be mmapped rather than read if it is larger than the
406 configured threshold.
406 configured threshold.
407
407
408 If censorable is True, the revlog can have censored revisions.
408 If censorable is True, the revlog can have censored revisions.
409
409
410 If `upperboundcomp` is not None, this is the expected maximal gain from
410 If `upperboundcomp` is not None, this is the expected maximal gain from
411 compression for the data content.
411 compression for the data content.
412 """
412 """
413
413
414 _flagserrorclass = error.RevlogError
414 _flagserrorclass = error.RevlogError
415
415
416 def __init__(
416 def __init__(
417 self,
417 self,
418 opener,
418 opener,
419 indexfile,
419 indexfile,
420 datafile=None,
420 datafile=None,
421 checkambig=False,
421 checkambig=False,
422 mmaplargeindex=False,
422 mmaplargeindex=False,
423 censorable=False,
423 censorable=False,
424 upperboundcomp=None,
424 upperboundcomp=None,
425 persistentnodemap=False,
425 persistentnodemap=False,
426 ):
426 ):
427 """
427 """
428 create a revlog object
428 create a revlog object
429
429
430 opener is a function that abstracts the file opening operation
430 opener is a function that abstracts the file opening operation
431 and can be used to implement COW semantics or the like.
431 and can be used to implement COW semantics or the like.
432
432
433 """
433 """
434 self.upperboundcomp = upperboundcomp
434 self.upperboundcomp = upperboundcomp
435 self.indexfile = indexfile
435 self.indexfile = indexfile
436 self.datafile = datafile or (indexfile[:-2] + b".d")
436 self.datafile = datafile or (indexfile[:-2] + b".d")
437 self.nodemap_file = None
437 self.nodemap_file = None
438 if persistentnodemap:
438 if persistentnodemap:
439 if indexfile.endswith(b'.a'):
440 pending_path = indexfile[:-4] + b".n.a"
441 if opener.exists(pending_path):
442 self.nodemap_file = pending_path
443 else:
444 self.nodemap_file = indexfile[:-4] + b".n"
445 else:
439 self.nodemap_file = indexfile[:-2] + b".n"
446 self.nodemap_file = indexfile[:-2] + b".n"
440
447
441 self.opener = opener
448 self.opener = opener
442 # When True, indexfile is opened with checkambig=True at writing, to
449 # When True, indexfile is opened with checkambig=True at writing, to
443 # avoid file stat ambiguity.
450 # avoid file stat ambiguity.
444 self._checkambig = checkambig
451 self._checkambig = checkambig
445 self._mmaplargeindex = mmaplargeindex
452 self._mmaplargeindex = mmaplargeindex
446 self._censorable = censorable
453 self._censorable = censorable
447 # 3-tuple of (node, rev, text) for a raw revision.
454 # 3-tuple of (node, rev, text) for a raw revision.
448 self._revisioncache = None
455 self._revisioncache = None
449 # Maps rev to chain base rev.
456 # Maps rev to chain base rev.
450 self._chainbasecache = util.lrucachedict(100)
457 self._chainbasecache = util.lrucachedict(100)
451 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
458 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
452 self._chunkcache = (0, b'')
459 self._chunkcache = (0, b'')
453 # How much data to read and cache into the raw revlog data cache.
460 # How much data to read and cache into the raw revlog data cache.
454 self._chunkcachesize = 65536
461 self._chunkcachesize = 65536
455 self._maxchainlen = None
462 self._maxchainlen = None
456 self._deltabothparents = True
463 self._deltabothparents = True
457 self.index = None
464 self.index = None
458 self._nodemap_docket = None
465 self._nodemap_docket = None
459 # Mapping of partial identifiers to full nodes.
466 # Mapping of partial identifiers to full nodes.
460 self._pcache = {}
467 self._pcache = {}
461 # Mapping of revision integer to full node.
468 # Mapping of revision integer to full node.
462 self._compengine = b'zlib'
469 self._compengine = b'zlib'
463 self._compengineopts = {}
470 self._compengineopts = {}
464 self._maxdeltachainspan = -1
471 self._maxdeltachainspan = -1
465 self._withsparseread = False
472 self._withsparseread = False
466 self._sparserevlog = False
473 self._sparserevlog = False
467 self._srdensitythreshold = 0.50
474 self._srdensitythreshold = 0.50
468 self._srmingapsize = 262144
475 self._srmingapsize = 262144
469
476
470 # Make copy of flag processors so each revlog instance can support
477 # Make copy of flag processors so each revlog instance can support
471 # custom flags.
478 # custom flags.
472 self._flagprocessors = dict(flagutil.flagprocessors)
479 self._flagprocessors = dict(flagutil.flagprocessors)
473
480
474 # 2-tuple of file handles being used for active writing.
481 # 2-tuple of file handles being used for active writing.
475 self._writinghandles = None
482 self._writinghandles = None
476
483
477 self._loadindex()
484 self._loadindex()
478
485
479 def _loadindex(self):
486 def _loadindex(self):
480 mmapindexthreshold = None
487 mmapindexthreshold = None
481 opts = self.opener.options
488 opts = self.opener.options
482
489
483 if b'revlogv2' in opts:
490 if b'revlogv2' in opts:
484 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
491 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
485 elif b'revlogv1' in opts:
492 elif b'revlogv1' in opts:
486 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
493 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
487 if b'generaldelta' in opts:
494 if b'generaldelta' in opts:
488 newversionflags |= FLAG_GENERALDELTA
495 newversionflags |= FLAG_GENERALDELTA
489 elif b'revlogv0' in self.opener.options:
496 elif b'revlogv0' in self.opener.options:
490 newversionflags = REVLOGV0
497 newversionflags = REVLOGV0
491 else:
498 else:
492 newversionflags = REVLOG_DEFAULT_VERSION
499 newversionflags = REVLOG_DEFAULT_VERSION
493
500
494 if b'chunkcachesize' in opts:
501 if b'chunkcachesize' in opts:
495 self._chunkcachesize = opts[b'chunkcachesize']
502 self._chunkcachesize = opts[b'chunkcachesize']
496 if b'maxchainlen' in opts:
503 if b'maxchainlen' in opts:
497 self._maxchainlen = opts[b'maxchainlen']
504 self._maxchainlen = opts[b'maxchainlen']
498 if b'deltabothparents' in opts:
505 if b'deltabothparents' in opts:
499 self._deltabothparents = opts[b'deltabothparents']
506 self._deltabothparents = opts[b'deltabothparents']
500 self._lazydelta = bool(opts.get(b'lazydelta', True))
507 self._lazydelta = bool(opts.get(b'lazydelta', True))
501 self._lazydeltabase = False
508 self._lazydeltabase = False
502 if self._lazydelta:
509 if self._lazydelta:
503 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
510 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
504 if b'compengine' in opts:
511 if b'compengine' in opts:
505 self._compengine = opts[b'compengine']
512 self._compengine = opts[b'compengine']
506 if b'zlib.level' in opts:
513 if b'zlib.level' in opts:
507 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
514 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
508 if b'zstd.level' in opts:
515 if b'zstd.level' in opts:
509 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
516 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
510 if b'maxdeltachainspan' in opts:
517 if b'maxdeltachainspan' in opts:
511 self._maxdeltachainspan = opts[b'maxdeltachainspan']
518 self._maxdeltachainspan = opts[b'maxdeltachainspan']
512 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
519 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
513 mmapindexthreshold = opts[b'mmapindexthreshold']
520 mmapindexthreshold = opts[b'mmapindexthreshold']
514 self.hassidedata = bool(opts.get(b'side-data', False))
521 self.hassidedata = bool(opts.get(b'side-data', False))
515 if self.hassidedata:
522 if self.hassidedata:
516 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
523 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
517 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
524 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
518 withsparseread = bool(opts.get(b'with-sparse-read', False))
525 withsparseread = bool(opts.get(b'with-sparse-read', False))
519 # sparse-revlog forces sparse-read
526 # sparse-revlog forces sparse-read
520 self._withsparseread = self._sparserevlog or withsparseread
527 self._withsparseread = self._sparserevlog or withsparseread
521 if b'sparse-read-density-threshold' in opts:
528 if b'sparse-read-density-threshold' in opts:
522 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
529 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
523 if b'sparse-read-min-gap-size' in opts:
530 if b'sparse-read-min-gap-size' in opts:
524 self._srmingapsize = opts[b'sparse-read-min-gap-size']
531 self._srmingapsize = opts[b'sparse-read-min-gap-size']
525 if opts.get(b'enableellipsis'):
532 if opts.get(b'enableellipsis'):
526 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
533 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
527
534
528 # revlog v0 doesn't have flag processors
535 # revlog v0 doesn't have flag processors
529 for flag, processor in pycompat.iteritems(
536 for flag, processor in pycompat.iteritems(
530 opts.get(b'flagprocessors', {})
537 opts.get(b'flagprocessors', {})
531 ):
538 ):
532 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
539 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
533
540
534 if self._chunkcachesize <= 0:
541 if self._chunkcachesize <= 0:
535 raise error.RevlogError(
542 raise error.RevlogError(
536 _(b'revlog chunk cache size %r is not greater than 0')
543 _(b'revlog chunk cache size %r is not greater than 0')
537 % self._chunkcachesize
544 % self._chunkcachesize
538 )
545 )
539 elif self._chunkcachesize & (self._chunkcachesize - 1):
546 elif self._chunkcachesize & (self._chunkcachesize - 1):
540 raise error.RevlogError(
547 raise error.RevlogError(
541 _(b'revlog chunk cache size %r is not a power of 2')
548 _(b'revlog chunk cache size %r is not a power of 2')
542 % self._chunkcachesize
549 % self._chunkcachesize
543 )
550 )
544
551
545 indexdata = b''
552 indexdata = b''
546 self._initempty = True
553 self._initempty = True
547 try:
554 try:
548 with self._indexfp() as f:
555 with self._indexfp() as f:
549 if (
556 if (
550 mmapindexthreshold is not None
557 mmapindexthreshold is not None
551 and self.opener.fstat(f).st_size >= mmapindexthreshold
558 and self.opener.fstat(f).st_size >= mmapindexthreshold
552 ):
559 ):
553 # TODO: should .close() to release resources without
560 # TODO: should .close() to release resources without
554 # relying on Python GC
561 # relying on Python GC
555 indexdata = util.buffer(util.mmapread(f))
562 indexdata = util.buffer(util.mmapread(f))
556 else:
563 else:
557 indexdata = f.read()
564 indexdata = f.read()
558 if len(indexdata) > 0:
565 if len(indexdata) > 0:
559 versionflags = versionformat_unpack(indexdata[:4])[0]
566 versionflags = versionformat_unpack(indexdata[:4])[0]
560 self._initempty = False
567 self._initempty = False
561 else:
568 else:
562 versionflags = newversionflags
569 versionflags = newversionflags
563 except IOError as inst:
570 except IOError as inst:
564 if inst.errno != errno.ENOENT:
571 if inst.errno != errno.ENOENT:
565 raise
572 raise
566
573
567 versionflags = newversionflags
574 versionflags = newversionflags
568
575
569 self.version = versionflags
576 self.version = versionflags
570
577
571 flags = versionflags & ~0xFFFF
578 flags = versionflags & ~0xFFFF
572 fmt = versionflags & 0xFFFF
579 fmt = versionflags & 0xFFFF
573
580
574 if fmt == REVLOGV0:
581 if fmt == REVLOGV0:
575 if flags:
582 if flags:
576 raise error.RevlogError(
583 raise error.RevlogError(
577 _(b'unknown flags (%#04x) in version %d revlog %s')
584 _(b'unknown flags (%#04x) in version %d revlog %s')
578 % (flags >> 16, fmt, self.indexfile)
585 % (flags >> 16, fmt, self.indexfile)
579 )
586 )
580
587
581 self._inline = False
588 self._inline = False
582 self._generaldelta = False
589 self._generaldelta = False
583
590
584 elif fmt == REVLOGV1:
591 elif fmt == REVLOGV1:
585 if flags & ~REVLOGV1_FLAGS:
592 if flags & ~REVLOGV1_FLAGS:
586 raise error.RevlogError(
593 raise error.RevlogError(
587 _(b'unknown flags (%#04x) in version %d revlog %s')
594 _(b'unknown flags (%#04x) in version %d revlog %s')
588 % (flags >> 16, fmt, self.indexfile)
595 % (flags >> 16, fmt, self.indexfile)
589 )
596 )
590
597
591 self._inline = versionflags & FLAG_INLINE_DATA
598 self._inline = versionflags & FLAG_INLINE_DATA
592 self._generaldelta = versionflags & FLAG_GENERALDELTA
599 self._generaldelta = versionflags & FLAG_GENERALDELTA
593
600
594 elif fmt == REVLOGV2:
601 elif fmt == REVLOGV2:
595 if flags & ~REVLOGV2_FLAGS:
602 if flags & ~REVLOGV2_FLAGS:
596 raise error.RevlogError(
603 raise error.RevlogError(
597 _(b'unknown flags (%#04x) in version %d revlog %s')
604 _(b'unknown flags (%#04x) in version %d revlog %s')
598 % (flags >> 16, fmt, self.indexfile)
605 % (flags >> 16, fmt, self.indexfile)
599 )
606 )
600
607
601 self._inline = versionflags & FLAG_INLINE_DATA
608 self._inline = versionflags & FLAG_INLINE_DATA
602 # generaldelta implied by version 2 revlogs.
609 # generaldelta implied by version 2 revlogs.
603 self._generaldelta = True
610 self._generaldelta = True
604
611
605 else:
612 else:
606 raise error.RevlogError(
613 raise error.RevlogError(
607 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
614 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
608 )
615 )
609 # sparse-revlog can't be on without general-delta (issue6056)
616 # sparse-revlog can't be on without general-delta (issue6056)
610 if not self._generaldelta:
617 if not self._generaldelta:
611 self._sparserevlog = False
618 self._sparserevlog = False
612
619
613 self._storedeltachains = True
620 self._storedeltachains = True
614
621
615 devel_nodemap = (
622 devel_nodemap = (
616 self.nodemap_file
623 self.nodemap_file
617 and opts.get(b'devel-force-nodemap', False)
624 and opts.get(b'devel-force-nodemap', False)
618 and NodemapRevlogIO is not None
625 and NodemapRevlogIO is not None
619 )
626 )
620
627
621 use_rust_index = False
628 use_rust_index = False
622 if rustrevlog is not None:
629 if rustrevlog is not None:
623 if self.nodemap_file is not None:
630 if self.nodemap_file is not None:
624 use_rust_index = True
631 use_rust_index = True
625 else:
632 else:
626 use_rust_index = self.opener.options.get(b'rust.index')
633 use_rust_index = self.opener.options.get(b'rust.index')
627
634
628 self._io = revlogio()
635 self._io = revlogio()
629 if self.version == REVLOGV0:
636 if self.version == REVLOGV0:
630 self._io = revlogoldio()
637 self._io = revlogoldio()
631 elif devel_nodemap:
638 elif devel_nodemap:
632 self._io = NodemapRevlogIO()
639 self._io = NodemapRevlogIO()
633 elif use_rust_index:
640 elif use_rust_index:
634 self._io = rustrevlogio()
641 self._io = rustrevlogio()
635 try:
642 try:
636 d = self._io.parseindex(indexdata, self._inline)
643 d = self._io.parseindex(indexdata, self._inline)
637 index, _chunkcache = d
644 index, _chunkcache = d
638 use_nodemap = (
645 use_nodemap = (
639 not self._inline
646 not self._inline
640 and self.nodemap_file is not None
647 and self.nodemap_file is not None
641 and util.safehasattr(index, 'update_nodemap_data')
648 and util.safehasattr(index, 'update_nodemap_data')
642 )
649 )
643 if use_nodemap:
650 if use_nodemap:
644 nodemap_data = nodemaputil.persisted_data(self)
651 nodemap_data = nodemaputil.persisted_data(self)
645 if nodemap_data is not None:
652 if nodemap_data is not None:
646 docket = nodemap_data[0]
653 docket = nodemap_data[0]
647 if d[0][docket.tip_rev][7] == docket.tip_node:
654 if d[0][docket.tip_rev][7] == docket.tip_node:
648 # no changelog tampering
655 # no changelog tampering
649 self._nodemap_docket = docket
656 self._nodemap_docket = docket
650 index.update_nodemap_data(*nodemap_data)
657 index.update_nodemap_data(*nodemap_data)
651 except (ValueError, IndexError):
658 except (ValueError, IndexError):
652 raise error.RevlogError(
659 raise error.RevlogError(
653 _(b"index %s is corrupted") % self.indexfile
660 _(b"index %s is corrupted") % self.indexfile
654 )
661 )
655 self.index, self._chunkcache = d
662 self.index, self._chunkcache = d
656 if not self._chunkcache:
663 if not self._chunkcache:
657 self._chunkclear()
664 self._chunkclear()
658 # revnum -> (chain-length, sum-delta-length)
665 # revnum -> (chain-length, sum-delta-length)
659 self._chaininfocache = {}
666 self._chaininfocache = {}
660 # revlog header -> revlog compressor
667 # revlog header -> revlog compressor
661 self._decompressors = {}
668 self._decompressors = {}
662
669
663 @util.propertycache
670 @util.propertycache
664 def _compressor(self):
671 def _compressor(self):
665 engine = util.compengines[self._compengine]
672 engine = util.compengines[self._compengine]
666 return engine.revlogcompressor(self._compengineopts)
673 return engine.revlogcompressor(self._compengineopts)
667
674
668 def _indexfp(self, mode=b'r'):
675 def _indexfp(self, mode=b'r'):
669 """file object for the revlog's index file"""
676 """file object for the revlog's index file"""
670 args = {'mode': mode}
677 args = {'mode': mode}
671 if mode != b'r':
678 if mode != b'r':
672 args['checkambig'] = self._checkambig
679 args['checkambig'] = self._checkambig
673 if mode == b'w':
680 if mode == b'w':
674 args['atomictemp'] = True
681 args['atomictemp'] = True
675 return self.opener(self.indexfile, **args)
682 return self.opener(self.indexfile, **args)
676
683
677 def _datafp(self, mode=b'r'):
684 def _datafp(self, mode=b'r'):
678 """file object for the revlog's data file"""
685 """file object for the revlog's data file"""
679 return self.opener(self.datafile, mode=mode)
686 return self.opener(self.datafile, mode=mode)
680
687
681 @contextlib.contextmanager
688 @contextlib.contextmanager
682 def _datareadfp(self, existingfp=None):
689 def _datareadfp(self, existingfp=None):
683 """file object suitable to read data"""
690 """file object suitable to read data"""
684 # Use explicit file handle, if given.
691 # Use explicit file handle, if given.
685 if existingfp is not None:
692 if existingfp is not None:
686 yield existingfp
693 yield existingfp
687
694
688 # Use a file handle being actively used for writes, if available.
695 # Use a file handle being actively used for writes, if available.
689 # There is some danger to doing this because reads will seek the
696 # There is some danger to doing this because reads will seek the
690 # file. However, _writeentry() performs a SEEK_END before all writes,
697 # file. However, _writeentry() performs a SEEK_END before all writes,
691 # so we should be safe.
698 # so we should be safe.
692 elif self._writinghandles:
699 elif self._writinghandles:
693 if self._inline:
700 if self._inline:
694 yield self._writinghandles[0]
701 yield self._writinghandles[0]
695 else:
702 else:
696 yield self._writinghandles[1]
703 yield self._writinghandles[1]
697
704
698 # Otherwise open a new file handle.
705 # Otherwise open a new file handle.
699 else:
706 else:
700 if self._inline:
707 if self._inline:
701 func = self._indexfp
708 func = self._indexfp
702 else:
709 else:
703 func = self._datafp
710 func = self._datafp
704 with func() as fp:
711 with func() as fp:
705 yield fp
712 yield fp
706
713
707 def tiprev(self):
714 def tiprev(self):
708 return len(self.index) - 1
715 return len(self.index) - 1
709
716
710 def tip(self):
717 def tip(self):
711 return self.node(self.tiprev())
718 return self.node(self.tiprev())
712
719
713 def __contains__(self, rev):
720 def __contains__(self, rev):
714 return 0 <= rev < len(self)
721 return 0 <= rev < len(self)
715
722
716 def __len__(self):
723 def __len__(self):
717 return len(self.index)
724 return len(self.index)
718
725
719 def __iter__(self):
726 def __iter__(self):
720 return iter(pycompat.xrange(len(self)))
727 return iter(pycompat.xrange(len(self)))
721
728
722 def revs(self, start=0, stop=None):
729 def revs(self, start=0, stop=None):
723 """iterate over all rev in this revlog (from start to stop)"""
730 """iterate over all rev in this revlog (from start to stop)"""
724 return storageutil.iterrevs(len(self), start=start, stop=stop)
731 return storageutil.iterrevs(len(self), start=start, stop=stop)
725
732
726 @property
733 @property
727 def nodemap(self):
734 def nodemap(self):
728 msg = (
735 msg = (
729 b"revlog.nodemap is deprecated, "
736 b"revlog.nodemap is deprecated, "
730 b"use revlog.index.[has_node|rev|get_rev]"
737 b"use revlog.index.[has_node|rev|get_rev]"
731 )
738 )
732 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
739 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
733 return self.index.nodemap
740 return self.index.nodemap
734
741
735 @property
742 @property
736 def _nodecache(self):
743 def _nodecache(self):
737 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
744 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
738 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
745 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
739 return self.index.nodemap
746 return self.index.nodemap
740
747
741 def hasnode(self, node):
748 def hasnode(self, node):
742 try:
749 try:
743 self.rev(node)
750 self.rev(node)
744 return True
751 return True
745 except KeyError:
752 except KeyError:
746 return False
753 return False
747
754
748 def candelta(self, baserev, rev):
755 def candelta(self, baserev, rev):
749 """whether two revisions (baserev, rev) can be delta-ed or not"""
756 """whether two revisions (baserev, rev) can be delta-ed or not"""
750 # Disable delta if either rev requires a content-changing flag
757 # Disable delta if either rev requires a content-changing flag
751 # processor (ex. LFS). This is because such flag processor can alter
758 # processor (ex. LFS). This is because such flag processor can alter
752 # the rawtext content that the delta will be based on, and two clients
759 # the rawtext content that the delta will be based on, and two clients
753 # could have a same revlog node with different flags (i.e. different
760 # could have a same revlog node with different flags (i.e. different
754 # rawtext contents) and the delta could be incompatible.
761 # rawtext contents) and the delta could be incompatible.
755 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
762 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
756 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
763 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
757 ):
764 ):
758 return False
765 return False
759 return True
766 return True
760
767
761 def update_caches(self, transaction):
768 def update_caches(self, transaction):
762 if self.nodemap_file is not None:
769 if self.nodemap_file is not None:
763 if transaction is None:
770 if transaction is None:
764 nodemaputil.update_persistent_nodemap(self)
771 nodemaputil.update_persistent_nodemap(self)
765 else:
772 else:
766 nodemaputil.setup_persistent_nodemap(transaction, self)
773 nodemaputil.setup_persistent_nodemap(transaction, self)
767
774
768 def clearcaches(self):
775 def clearcaches(self):
769 self._revisioncache = None
776 self._revisioncache = None
770 self._chainbasecache.clear()
777 self._chainbasecache.clear()
771 self._chunkcache = (0, b'')
778 self._chunkcache = (0, b'')
772 self._pcache = {}
779 self._pcache = {}
773 self._nodemap_docket = None
780 self._nodemap_docket = None
774 self.index.clearcaches()
781 self.index.clearcaches()
775 # The python code is the one responsible for validating the docket, we
782 # The python code is the one responsible for validating the docket, we
776 # end up having to refresh it here.
783 # end up having to refresh it here.
777 use_nodemap = (
784 use_nodemap = (
778 not self._inline
785 not self._inline
779 and self.nodemap_file is not None
786 and self.nodemap_file is not None
780 and util.safehasattr(self.index, 'update_nodemap_data')
787 and util.safehasattr(self.index, 'update_nodemap_data')
781 )
788 )
782 if use_nodemap:
789 if use_nodemap:
783 nodemap_data = nodemaputil.persisted_data(self)
790 nodemap_data = nodemaputil.persisted_data(self)
784 if nodemap_data is not None:
791 if nodemap_data is not None:
785 self._nodemap_docket = nodemap_data[0]
792 self._nodemap_docket = nodemap_data[0]
786 self.index.update_nodemap_data(*nodemap_data)
793 self.index.update_nodemap_data(*nodemap_data)
787
794
788 def rev(self, node):
795 def rev(self, node):
789 try:
796 try:
790 return self.index.rev(node)
797 return self.index.rev(node)
791 except TypeError:
798 except TypeError:
792 raise
799 raise
793 except error.RevlogError:
800 except error.RevlogError:
794 # parsers.c radix tree lookup failed
801 # parsers.c radix tree lookup failed
795 if node == wdirid or node in wdirfilenodeids:
802 if node == wdirid or node in wdirfilenodeids:
796 raise error.WdirUnsupported
803 raise error.WdirUnsupported
797 raise error.LookupError(node, self.indexfile, _(b'no node'))
804 raise error.LookupError(node, self.indexfile, _(b'no node'))
798
805
799 # Accessors for index entries.
806 # Accessors for index entries.
800
807
801 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
808 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
802 # are flags.
809 # are flags.
803 def start(self, rev):
810 def start(self, rev):
804 return int(self.index[rev][0] >> 16)
811 return int(self.index[rev][0] >> 16)
805
812
806 def flags(self, rev):
813 def flags(self, rev):
807 return self.index[rev][0] & 0xFFFF
814 return self.index[rev][0] & 0xFFFF
808
815
809 def length(self, rev):
816 def length(self, rev):
810 return self.index[rev][1]
817 return self.index[rev][1]
811
818
812 def rawsize(self, rev):
819 def rawsize(self, rev):
813 """return the length of the uncompressed text for a given revision"""
820 """return the length of the uncompressed text for a given revision"""
814 l = self.index[rev][2]
821 l = self.index[rev][2]
815 if l >= 0:
822 if l >= 0:
816 return l
823 return l
817
824
818 t = self.rawdata(rev)
825 t = self.rawdata(rev)
819 return len(t)
826 return len(t)
820
827
821 def size(self, rev):
828 def size(self, rev):
822 """length of non-raw text (processed by a "read" flag processor)"""
829 """length of non-raw text (processed by a "read" flag processor)"""
823 # fast path: if no "read" flag processor could change the content,
830 # fast path: if no "read" flag processor could change the content,
824 # size is rawsize. note: ELLIPSIS is known to not change the content.
831 # size is rawsize. note: ELLIPSIS is known to not change the content.
825 flags = self.flags(rev)
832 flags = self.flags(rev)
826 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
833 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
827 return self.rawsize(rev)
834 return self.rawsize(rev)
828
835
829 return len(self.revision(rev, raw=False))
836 return len(self.revision(rev, raw=False))
830
837
831 def chainbase(self, rev):
838 def chainbase(self, rev):
832 base = self._chainbasecache.get(rev)
839 base = self._chainbasecache.get(rev)
833 if base is not None:
840 if base is not None:
834 return base
841 return base
835
842
836 index = self.index
843 index = self.index
837 iterrev = rev
844 iterrev = rev
838 base = index[iterrev][3]
845 base = index[iterrev][3]
839 while base != iterrev:
846 while base != iterrev:
840 iterrev = base
847 iterrev = base
841 base = index[iterrev][3]
848 base = index[iterrev][3]
842
849
843 self._chainbasecache[rev] = base
850 self._chainbasecache[rev] = base
844 return base
851 return base
845
852
846 def linkrev(self, rev):
853 def linkrev(self, rev):
847 return self.index[rev][4]
854 return self.index[rev][4]
848
855
849 def parentrevs(self, rev):
856 def parentrevs(self, rev):
850 try:
857 try:
851 entry = self.index[rev]
858 entry = self.index[rev]
852 except IndexError:
859 except IndexError:
853 if rev == wdirrev:
860 if rev == wdirrev:
854 raise error.WdirUnsupported
861 raise error.WdirUnsupported
855 raise
862 raise
856
863
857 return entry[5], entry[6]
864 return entry[5], entry[6]
858
865
859 # fast parentrevs(rev) where rev isn't filtered
866 # fast parentrevs(rev) where rev isn't filtered
860 _uncheckedparentrevs = parentrevs
867 _uncheckedparentrevs = parentrevs
861
868
862 def node(self, rev):
869 def node(self, rev):
863 try:
870 try:
864 return self.index[rev][7]
871 return self.index[rev][7]
865 except IndexError:
872 except IndexError:
866 if rev == wdirrev:
873 if rev == wdirrev:
867 raise error.WdirUnsupported
874 raise error.WdirUnsupported
868 raise
875 raise
869
876
870 # Derived from index values.
877 # Derived from index values.
871
878
872 def end(self, rev):
879 def end(self, rev):
873 return self.start(rev) + self.length(rev)
880 return self.start(rev) + self.length(rev)
874
881
875 def parents(self, node):
882 def parents(self, node):
876 i = self.index
883 i = self.index
877 d = i[self.rev(node)]
884 d = i[self.rev(node)]
878 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
885 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
879
886
880 def chainlen(self, rev):
887 def chainlen(self, rev):
881 return self._chaininfo(rev)[0]
888 return self._chaininfo(rev)[0]
882
889
883 def _chaininfo(self, rev):
890 def _chaininfo(self, rev):
884 chaininfocache = self._chaininfocache
891 chaininfocache = self._chaininfocache
885 if rev in chaininfocache:
892 if rev in chaininfocache:
886 return chaininfocache[rev]
893 return chaininfocache[rev]
887 index = self.index
894 index = self.index
888 generaldelta = self._generaldelta
895 generaldelta = self._generaldelta
889 iterrev = rev
896 iterrev = rev
890 e = index[iterrev]
897 e = index[iterrev]
891 clen = 0
898 clen = 0
892 compresseddeltalen = 0
899 compresseddeltalen = 0
893 while iterrev != e[3]:
900 while iterrev != e[3]:
894 clen += 1
901 clen += 1
895 compresseddeltalen += e[1]
902 compresseddeltalen += e[1]
896 if generaldelta:
903 if generaldelta:
897 iterrev = e[3]
904 iterrev = e[3]
898 else:
905 else:
899 iterrev -= 1
906 iterrev -= 1
900 if iterrev in chaininfocache:
907 if iterrev in chaininfocache:
901 t = chaininfocache[iterrev]
908 t = chaininfocache[iterrev]
902 clen += t[0]
909 clen += t[0]
903 compresseddeltalen += t[1]
910 compresseddeltalen += t[1]
904 break
911 break
905 e = index[iterrev]
912 e = index[iterrev]
906 else:
913 else:
907 # Add text length of base since decompressing that also takes
914 # Add text length of base since decompressing that also takes
908 # work. For cache hits the length is already included.
915 # work. For cache hits the length is already included.
909 compresseddeltalen += e[1]
916 compresseddeltalen += e[1]
910 r = (clen, compresseddeltalen)
917 r = (clen, compresseddeltalen)
911 chaininfocache[rev] = r
918 chaininfocache[rev] = r
912 return r
919 return r
913
920
914 def _deltachain(self, rev, stoprev=None):
921 def _deltachain(self, rev, stoprev=None):
915 """Obtain the delta chain for a revision.
922 """Obtain the delta chain for a revision.
916
923
917 ``stoprev`` specifies a revision to stop at. If not specified, we
924 ``stoprev`` specifies a revision to stop at. If not specified, we
918 stop at the base of the chain.
925 stop at the base of the chain.
919
926
920 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
927 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
921 revs in ascending order and ``stopped`` is a bool indicating whether
928 revs in ascending order and ``stopped`` is a bool indicating whether
922 ``stoprev`` was hit.
929 ``stoprev`` was hit.
923 """
930 """
924 # Try C implementation.
931 # Try C implementation.
925 try:
932 try:
926 return self.index.deltachain(rev, stoprev, self._generaldelta)
933 return self.index.deltachain(rev, stoprev, self._generaldelta)
927 except AttributeError:
934 except AttributeError:
928 pass
935 pass
929
936
930 chain = []
937 chain = []
931
938
932 # Alias to prevent attribute lookup in tight loop.
939 # Alias to prevent attribute lookup in tight loop.
933 index = self.index
940 index = self.index
934 generaldelta = self._generaldelta
941 generaldelta = self._generaldelta
935
942
936 iterrev = rev
943 iterrev = rev
937 e = index[iterrev]
944 e = index[iterrev]
938 while iterrev != e[3] and iterrev != stoprev:
945 while iterrev != e[3] and iterrev != stoprev:
939 chain.append(iterrev)
946 chain.append(iterrev)
940 if generaldelta:
947 if generaldelta:
941 iterrev = e[3]
948 iterrev = e[3]
942 else:
949 else:
943 iterrev -= 1
950 iterrev -= 1
944 e = index[iterrev]
951 e = index[iterrev]
945
952
946 if iterrev == stoprev:
953 if iterrev == stoprev:
947 stopped = True
954 stopped = True
948 else:
955 else:
949 chain.append(iterrev)
956 chain.append(iterrev)
950 stopped = False
957 stopped = False
951
958
952 chain.reverse()
959 chain.reverse()
953 return chain, stopped
960 return chain, stopped
954
961
955 def ancestors(self, revs, stoprev=0, inclusive=False):
962 def ancestors(self, revs, stoprev=0, inclusive=False):
956 """Generate the ancestors of 'revs' in reverse revision order.
963 """Generate the ancestors of 'revs' in reverse revision order.
957 Does not generate revs lower than stoprev.
964 Does not generate revs lower than stoprev.
958
965
959 See the documentation for ancestor.lazyancestors for more details."""
966 See the documentation for ancestor.lazyancestors for more details."""
960
967
961 # first, make sure start revisions aren't filtered
968 # first, make sure start revisions aren't filtered
962 revs = list(revs)
969 revs = list(revs)
963 checkrev = self.node
970 checkrev = self.node
964 for r in revs:
971 for r in revs:
965 checkrev(r)
972 checkrev(r)
966 # and we're sure ancestors aren't filtered as well
973 # and we're sure ancestors aren't filtered as well
967
974
968 if rustancestor is not None:
975 if rustancestor is not None:
969 lazyancestors = rustancestor.LazyAncestors
976 lazyancestors = rustancestor.LazyAncestors
970 arg = self.index
977 arg = self.index
971 else:
978 else:
972 lazyancestors = ancestor.lazyancestors
979 lazyancestors = ancestor.lazyancestors
973 arg = self._uncheckedparentrevs
980 arg = self._uncheckedparentrevs
974 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
981 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
975
982
976 def descendants(self, revs):
983 def descendants(self, revs):
977 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
984 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
978
985
979 def findcommonmissing(self, common=None, heads=None):
986 def findcommonmissing(self, common=None, heads=None):
980 """Return a tuple of the ancestors of common and the ancestors of heads
987 """Return a tuple of the ancestors of common and the ancestors of heads
981 that are not ancestors of common. In revset terminology, we return the
988 that are not ancestors of common. In revset terminology, we return the
982 tuple:
989 tuple:
983
990
984 ::common, (::heads) - (::common)
991 ::common, (::heads) - (::common)
985
992
986 The list is sorted by revision number, meaning it is
993 The list is sorted by revision number, meaning it is
987 topologically sorted.
994 topologically sorted.
988
995
989 'heads' and 'common' are both lists of node IDs. If heads is
996 'heads' and 'common' are both lists of node IDs. If heads is
990 not supplied, uses all of the revlog's heads. If common is not
997 not supplied, uses all of the revlog's heads. If common is not
991 supplied, uses nullid."""
998 supplied, uses nullid."""
992 if common is None:
999 if common is None:
993 common = [nullid]
1000 common = [nullid]
994 if heads is None:
1001 if heads is None:
995 heads = self.heads()
1002 heads = self.heads()
996
1003
997 common = [self.rev(n) for n in common]
1004 common = [self.rev(n) for n in common]
998 heads = [self.rev(n) for n in heads]
1005 heads = [self.rev(n) for n in heads]
999
1006
1000 # we want the ancestors, but inclusive
1007 # we want the ancestors, but inclusive
1001 class lazyset(object):
1008 class lazyset(object):
1002 def __init__(self, lazyvalues):
1009 def __init__(self, lazyvalues):
1003 self.addedvalues = set()
1010 self.addedvalues = set()
1004 self.lazyvalues = lazyvalues
1011 self.lazyvalues = lazyvalues
1005
1012
1006 def __contains__(self, value):
1013 def __contains__(self, value):
1007 return value in self.addedvalues or value in self.lazyvalues
1014 return value in self.addedvalues or value in self.lazyvalues
1008
1015
1009 def __iter__(self):
1016 def __iter__(self):
1010 added = self.addedvalues
1017 added = self.addedvalues
1011 for r in added:
1018 for r in added:
1012 yield r
1019 yield r
1013 for r in self.lazyvalues:
1020 for r in self.lazyvalues:
1014 if not r in added:
1021 if not r in added:
1015 yield r
1022 yield r
1016
1023
1017 def add(self, value):
1024 def add(self, value):
1018 self.addedvalues.add(value)
1025 self.addedvalues.add(value)
1019
1026
1020 def update(self, values):
1027 def update(self, values):
1021 self.addedvalues.update(values)
1028 self.addedvalues.update(values)
1022
1029
1023 has = lazyset(self.ancestors(common))
1030 has = lazyset(self.ancestors(common))
1024 has.add(nullrev)
1031 has.add(nullrev)
1025 has.update(common)
1032 has.update(common)
1026
1033
1027 # take all ancestors from heads that aren't in has
1034 # take all ancestors from heads that aren't in has
1028 missing = set()
1035 missing = set()
1029 visit = collections.deque(r for r in heads if r not in has)
1036 visit = collections.deque(r for r in heads if r not in has)
1030 while visit:
1037 while visit:
1031 r = visit.popleft()
1038 r = visit.popleft()
1032 if r in missing:
1039 if r in missing:
1033 continue
1040 continue
1034 else:
1041 else:
1035 missing.add(r)
1042 missing.add(r)
1036 for p in self.parentrevs(r):
1043 for p in self.parentrevs(r):
1037 if p not in has:
1044 if p not in has:
1038 visit.append(p)
1045 visit.append(p)
1039 missing = list(missing)
1046 missing = list(missing)
1040 missing.sort()
1047 missing.sort()
1041 return has, [self.node(miss) for miss in missing]
1048 return has, [self.node(miss) for miss in missing]
1042
1049
1043 def incrementalmissingrevs(self, common=None):
1050 def incrementalmissingrevs(self, common=None):
1044 """Return an object that can be used to incrementally compute the
1051 """Return an object that can be used to incrementally compute the
1045 revision numbers of the ancestors of arbitrary sets that are not
1052 revision numbers of the ancestors of arbitrary sets that are not
1046 ancestors of common. This is an ancestor.incrementalmissingancestors
1053 ancestors of common. This is an ancestor.incrementalmissingancestors
1047 object.
1054 object.
1048
1055
1049 'common' is a list of revision numbers. If common is not supplied, uses
1056 'common' is a list of revision numbers. If common is not supplied, uses
1050 nullrev.
1057 nullrev.
1051 """
1058 """
1052 if common is None:
1059 if common is None:
1053 common = [nullrev]
1060 common = [nullrev]
1054
1061
1055 if rustancestor is not None:
1062 if rustancestor is not None:
1056 return rustancestor.MissingAncestors(self.index, common)
1063 return rustancestor.MissingAncestors(self.index, common)
1057 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1064 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1058
1065
1059 def findmissingrevs(self, common=None, heads=None):
1066 def findmissingrevs(self, common=None, heads=None):
1060 """Return the revision numbers of the ancestors of heads that
1067 """Return the revision numbers of the ancestors of heads that
1061 are not ancestors of common.
1068 are not ancestors of common.
1062
1069
1063 More specifically, return a list of revision numbers corresponding to
1070 More specifically, return a list of revision numbers corresponding to
1064 nodes N such that every N satisfies the following constraints:
1071 nodes N such that every N satisfies the following constraints:
1065
1072
1066 1. N is an ancestor of some node in 'heads'
1073 1. N is an ancestor of some node in 'heads'
1067 2. N is not an ancestor of any node in 'common'
1074 2. N is not an ancestor of any node in 'common'
1068
1075
1069 The list is sorted by revision number, meaning it is
1076 The list is sorted by revision number, meaning it is
1070 topologically sorted.
1077 topologically sorted.
1071
1078
1072 'heads' and 'common' are both lists of revision numbers. If heads is
1079 'heads' and 'common' are both lists of revision numbers. If heads is
1073 not supplied, uses all of the revlog's heads. If common is not
1080 not supplied, uses all of the revlog's heads. If common is not
1074 supplied, uses nullid."""
1081 supplied, uses nullid."""
1075 if common is None:
1082 if common is None:
1076 common = [nullrev]
1083 common = [nullrev]
1077 if heads is None:
1084 if heads is None:
1078 heads = self.headrevs()
1085 heads = self.headrevs()
1079
1086
1080 inc = self.incrementalmissingrevs(common=common)
1087 inc = self.incrementalmissingrevs(common=common)
1081 return inc.missingancestors(heads)
1088 return inc.missingancestors(heads)
1082
1089
1083 def findmissing(self, common=None, heads=None):
1090 def findmissing(self, common=None, heads=None):
1084 """Return the ancestors of heads that are not ancestors of common.
1091 """Return the ancestors of heads that are not ancestors of common.
1085
1092
1086 More specifically, return a list of nodes N such that every N
1093 More specifically, return a list of nodes N such that every N
1087 satisfies the following constraints:
1094 satisfies the following constraints:
1088
1095
1089 1. N is an ancestor of some node in 'heads'
1096 1. N is an ancestor of some node in 'heads'
1090 2. N is not an ancestor of any node in 'common'
1097 2. N is not an ancestor of any node in 'common'
1091
1098
1092 The list is sorted by revision number, meaning it is
1099 The list is sorted by revision number, meaning it is
1093 topologically sorted.
1100 topologically sorted.
1094
1101
1095 'heads' and 'common' are both lists of node IDs. If heads is
1102 'heads' and 'common' are both lists of node IDs. If heads is
1096 not supplied, uses all of the revlog's heads. If common is not
1103 not supplied, uses all of the revlog's heads. If common is not
1097 supplied, uses nullid."""
1104 supplied, uses nullid."""
1098 if common is None:
1105 if common is None:
1099 common = [nullid]
1106 common = [nullid]
1100 if heads is None:
1107 if heads is None:
1101 heads = self.heads()
1108 heads = self.heads()
1102
1109
1103 common = [self.rev(n) for n in common]
1110 common = [self.rev(n) for n in common]
1104 heads = [self.rev(n) for n in heads]
1111 heads = [self.rev(n) for n in heads]
1105
1112
1106 inc = self.incrementalmissingrevs(common=common)
1113 inc = self.incrementalmissingrevs(common=common)
1107 return [self.node(r) for r in inc.missingancestors(heads)]
1114 return [self.node(r) for r in inc.missingancestors(heads)]
1108
1115
1109 def nodesbetween(self, roots=None, heads=None):
1116 def nodesbetween(self, roots=None, heads=None):
1110 """Return a topological path from 'roots' to 'heads'.
1117 """Return a topological path from 'roots' to 'heads'.
1111
1118
1112 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1119 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1113 topologically sorted list of all nodes N that satisfy both of
1120 topologically sorted list of all nodes N that satisfy both of
1114 these constraints:
1121 these constraints:
1115
1122
1116 1. N is a descendant of some node in 'roots'
1123 1. N is a descendant of some node in 'roots'
1117 2. N is an ancestor of some node in 'heads'
1124 2. N is an ancestor of some node in 'heads'
1118
1125
1119 Every node is considered to be both a descendant and an ancestor
1126 Every node is considered to be both a descendant and an ancestor
1120 of itself, so every reachable node in 'roots' and 'heads' will be
1127 of itself, so every reachable node in 'roots' and 'heads' will be
1121 included in 'nodes'.
1128 included in 'nodes'.
1122
1129
1123 'outroots' is the list of reachable nodes in 'roots', i.e., the
1130 'outroots' is the list of reachable nodes in 'roots', i.e., the
1124 subset of 'roots' that is returned in 'nodes'. Likewise,
1131 subset of 'roots' that is returned in 'nodes'. Likewise,
1125 'outheads' is the subset of 'heads' that is also in 'nodes'.
1132 'outheads' is the subset of 'heads' that is also in 'nodes'.
1126
1133
1127 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1134 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1128 unspecified, uses nullid as the only root. If 'heads' is
1135 unspecified, uses nullid as the only root. If 'heads' is
1129 unspecified, uses list of all of the revlog's heads."""
1136 unspecified, uses list of all of the revlog's heads."""
1130 nonodes = ([], [], [])
1137 nonodes = ([], [], [])
1131 if roots is not None:
1138 if roots is not None:
1132 roots = list(roots)
1139 roots = list(roots)
1133 if not roots:
1140 if not roots:
1134 return nonodes
1141 return nonodes
1135 lowestrev = min([self.rev(n) for n in roots])
1142 lowestrev = min([self.rev(n) for n in roots])
1136 else:
1143 else:
1137 roots = [nullid] # Everybody's a descendant of nullid
1144 roots = [nullid] # Everybody's a descendant of nullid
1138 lowestrev = nullrev
1145 lowestrev = nullrev
1139 if (lowestrev == nullrev) and (heads is None):
1146 if (lowestrev == nullrev) and (heads is None):
1140 # We want _all_ the nodes!
1147 # We want _all_ the nodes!
1141 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1148 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1142 if heads is None:
1149 if heads is None:
1143 # All nodes are ancestors, so the latest ancestor is the last
1150 # All nodes are ancestors, so the latest ancestor is the last
1144 # node.
1151 # node.
1145 highestrev = len(self) - 1
1152 highestrev = len(self) - 1
1146 # Set ancestors to None to signal that every node is an ancestor.
1153 # Set ancestors to None to signal that every node is an ancestor.
1147 ancestors = None
1154 ancestors = None
1148 # Set heads to an empty dictionary for later discovery of heads
1155 # Set heads to an empty dictionary for later discovery of heads
1149 heads = {}
1156 heads = {}
1150 else:
1157 else:
1151 heads = list(heads)
1158 heads = list(heads)
1152 if not heads:
1159 if not heads:
1153 return nonodes
1160 return nonodes
1154 ancestors = set()
1161 ancestors = set()
1155 # Turn heads into a dictionary so we can remove 'fake' heads.
1162 # Turn heads into a dictionary so we can remove 'fake' heads.
1156 # Also, later we will be using it to filter out the heads we can't
1163 # Also, later we will be using it to filter out the heads we can't
1157 # find from roots.
1164 # find from roots.
1158 heads = dict.fromkeys(heads, False)
1165 heads = dict.fromkeys(heads, False)
1159 # Start at the top and keep marking parents until we're done.
1166 # Start at the top and keep marking parents until we're done.
1160 nodestotag = set(heads)
1167 nodestotag = set(heads)
1161 # Remember where the top was so we can use it as a limit later.
1168 # Remember where the top was so we can use it as a limit later.
1162 highestrev = max([self.rev(n) for n in nodestotag])
1169 highestrev = max([self.rev(n) for n in nodestotag])
1163 while nodestotag:
1170 while nodestotag:
1164 # grab a node to tag
1171 # grab a node to tag
1165 n = nodestotag.pop()
1172 n = nodestotag.pop()
1166 # Never tag nullid
1173 # Never tag nullid
1167 if n == nullid:
1174 if n == nullid:
1168 continue
1175 continue
1169 # A node's revision number represents its place in a
1176 # A node's revision number represents its place in a
1170 # topologically sorted list of nodes.
1177 # topologically sorted list of nodes.
1171 r = self.rev(n)
1178 r = self.rev(n)
1172 if r >= lowestrev:
1179 if r >= lowestrev:
1173 if n not in ancestors:
1180 if n not in ancestors:
1174 # If we are possibly a descendant of one of the roots
1181 # If we are possibly a descendant of one of the roots
1175 # and we haven't already been marked as an ancestor
1182 # and we haven't already been marked as an ancestor
1176 ancestors.add(n) # Mark as ancestor
1183 ancestors.add(n) # Mark as ancestor
1177 # Add non-nullid parents to list of nodes to tag.
1184 # Add non-nullid parents to list of nodes to tag.
1178 nodestotag.update(
1185 nodestotag.update(
1179 [p for p in self.parents(n) if p != nullid]
1186 [p for p in self.parents(n) if p != nullid]
1180 )
1187 )
1181 elif n in heads: # We've seen it before, is it a fake head?
1188 elif n in heads: # We've seen it before, is it a fake head?
1182 # So it is, real heads should not be the ancestors of
1189 # So it is, real heads should not be the ancestors of
1183 # any other heads.
1190 # any other heads.
1184 heads.pop(n)
1191 heads.pop(n)
1185 if not ancestors:
1192 if not ancestors:
1186 return nonodes
1193 return nonodes
1187 # Now that we have our set of ancestors, we want to remove any
1194 # Now that we have our set of ancestors, we want to remove any
1188 # roots that are not ancestors.
1195 # roots that are not ancestors.
1189
1196
1190 # If one of the roots was nullid, everything is included anyway.
1197 # If one of the roots was nullid, everything is included anyway.
1191 if lowestrev > nullrev:
1198 if lowestrev > nullrev:
1192 # But, since we weren't, let's recompute the lowest rev to not
1199 # But, since we weren't, let's recompute the lowest rev to not
1193 # include roots that aren't ancestors.
1200 # include roots that aren't ancestors.
1194
1201
1195 # Filter out roots that aren't ancestors of heads
1202 # Filter out roots that aren't ancestors of heads
1196 roots = [root for root in roots if root in ancestors]
1203 roots = [root for root in roots if root in ancestors]
1197 # Recompute the lowest revision
1204 # Recompute the lowest revision
1198 if roots:
1205 if roots:
1199 lowestrev = min([self.rev(root) for root in roots])
1206 lowestrev = min([self.rev(root) for root in roots])
1200 else:
1207 else:
1201 # No more roots? Return empty list
1208 # No more roots? Return empty list
1202 return nonodes
1209 return nonodes
1203 else:
1210 else:
1204 # We are descending from nullid, and don't need to care about
1211 # We are descending from nullid, and don't need to care about
1205 # any other roots.
1212 # any other roots.
1206 lowestrev = nullrev
1213 lowestrev = nullrev
1207 roots = [nullid]
1214 roots = [nullid]
1208 # Transform our roots list into a set.
1215 # Transform our roots list into a set.
1209 descendants = set(roots)
1216 descendants = set(roots)
1210 # Also, keep the original roots so we can filter out roots that aren't
1217 # Also, keep the original roots so we can filter out roots that aren't
1211 # 'real' roots (i.e. are descended from other roots).
1218 # 'real' roots (i.e. are descended from other roots).
1212 roots = descendants.copy()
1219 roots = descendants.copy()
1213 # Our topologically sorted list of output nodes.
1220 # Our topologically sorted list of output nodes.
1214 orderedout = []
1221 orderedout = []
1215 # Don't start at nullid since we don't want nullid in our output list,
1222 # Don't start at nullid since we don't want nullid in our output list,
1216 # and if nullid shows up in descendants, empty parents will look like
1223 # and if nullid shows up in descendants, empty parents will look like
1217 # they're descendants.
1224 # they're descendants.
1218 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1225 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1219 n = self.node(r)
1226 n = self.node(r)
1220 isdescendant = False
1227 isdescendant = False
1221 if lowestrev == nullrev: # Everybody is a descendant of nullid
1228 if lowestrev == nullrev: # Everybody is a descendant of nullid
1222 isdescendant = True
1229 isdescendant = True
1223 elif n in descendants:
1230 elif n in descendants:
1224 # n is already a descendant
1231 # n is already a descendant
1225 isdescendant = True
1232 isdescendant = True
1226 # This check only needs to be done here because all the roots
1233 # This check only needs to be done here because all the roots
1227 # will start being marked is descendants before the loop.
1234 # will start being marked is descendants before the loop.
1228 if n in roots:
1235 if n in roots:
1229 # If n was a root, check if it's a 'real' root.
1236 # If n was a root, check if it's a 'real' root.
1230 p = tuple(self.parents(n))
1237 p = tuple(self.parents(n))
1231 # If any of its parents are descendants, it's not a root.
1238 # If any of its parents are descendants, it's not a root.
1232 if (p[0] in descendants) or (p[1] in descendants):
1239 if (p[0] in descendants) or (p[1] in descendants):
1233 roots.remove(n)
1240 roots.remove(n)
1234 else:
1241 else:
1235 p = tuple(self.parents(n))
1242 p = tuple(self.parents(n))
1236 # A node is a descendant if either of its parents are
1243 # A node is a descendant if either of its parents are
1237 # descendants. (We seeded the dependents list with the roots
1244 # descendants. (We seeded the dependents list with the roots
1238 # up there, remember?)
1245 # up there, remember?)
1239 if (p[0] in descendants) or (p[1] in descendants):
1246 if (p[0] in descendants) or (p[1] in descendants):
1240 descendants.add(n)
1247 descendants.add(n)
1241 isdescendant = True
1248 isdescendant = True
1242 if isdescendant and ((ancestors is None) or (n in ancestors)):
1249 if isdescendant and ((ancestors is None) or (n in ancestors)):
1243 # Only include nodes that are both descendants and ancestors.
1250 # Only include nodes that are both descendants and ancestors.
1244 orderedout.append(n)
1251 orderedout.append(n)
1245 if (ancestors is not None) and (n in heads):
1252 if (ancestors is not None) and (n in heads):
1246 # We're trying to figure out which heads are reachable
1253 # We're trying to figure out which heads are reachable
1247 # from roots.
1254 # from roots.
1248 # Mark this head as having been reached
1255 # Mark this head as having been reached
1249 heads[n] = True
1256 heads[n] = True
1250 elif ancestors is None:
1257 elif ancestors is None:
1251 # Otherwise, we're trying to discover the heads.
1258 # Otherwise, we're trying to discover the heads.
1252 # Assume this is a head because if it isn't, the next step
1259 # Assume this is a head because if it isn't, the next step
1253 # will eventually remove it.
1260 # will eventually remove it.
1254 heads[n] = True
1261 heads[n] = True
1255 # But, obviously its parents aren't.
1262 # But, obviously its parents aren't.
1256 for p in self.parents(n):
1263 for p in self.parents(n):
1257 heads.pop(p, None)
1264 heads.pop(p, None)
1258 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1265 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1259 roots = list(roots)
1266 roots = list(roots)
1260 assert orderedout
1267 assert orderedout
1261 assert roots
1268 assert roots
1262 assert heads
1269 assert heads
1263 return (orderedout, roots, heads)
1270 return (orderedout, roots, heads)
1264
1271
1265 def headrevs(self, revs=None):
1272 def headrevs(self, revs=None):
1266 if revs is None:
1273 if revs is None:
1267 try:
1274 try:
1268 return self.index.headrevs()
1275 return self.index.headrevs()
1269 except AttributeError:
1276 except AttributeError:
1270 return self._headrevs()
1277 return self._headrevs()
1271 if rustdagop is not None:
1278 if rustdagop is not None:
1272 return rustdagop.headrevs(self.index, revs)
1279 return rustdagop.headrevs(self.index, revs)
1273 return dagop.headrevs(revs, self._uncheckedparentrevs)
1280 return dagop.headrevs(revs, self._uncheckedparentrevs)
1274
1281
1275 def computephases(self, roots):
1282 def computephases(self, roots):
1276 return self.index.computephasesmapsets(roots)
1283 return self.index.computephasesmapsets(roots)
1277
1284
1278 def _headrevs(self):
1285 def _headrevs(self):
1279 count = len(self)
1286 count = len(self)
1280 if not count:
1287 if not count:
1281 return [nullrev]
1288 return [nullrev]
1282 # we won't iter over filtered rev so nobody is a head at start
1289 # we won't iter over filtered rev so nobody is a head at start
1283 ishead = [0] * (count + 1)
1290 ishead = [0] * (count + 1)
1284 index = self.index
1291 index = self.index
1285 for r in self:
1292 for r in self:
1286 ishead[r] = 1 # I may be an head
1293 ishead[r] = 1 # I may be an head
1287 e = index[r]
1294 e = index[r]
1288 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1295 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1289 return [r for r, val in enumerate(ishead) if val]
1296 return [r for r, val in enumerate(ishead) if val]
1290
1297
1291 def heads(self, start=None, stop=None):
1298 def heads(self, start=None, stop=None):
1292 """return the list of all nodes that have no children
1299 """return the list of all nodes that have no children
1293
1300
1294 if start is specified, only heads that are descendants of
1301 if start is specified, only heads that are descendants of
1295 start will be returned
1302 start will be returned
1296 if stop is specified, it will consider all the revs from stop
1303 if stop is specified, it will consider all the revs from stop
1297 as if they had no children
1304 as if they had no children
1298 """
1305 """
1299 if start is None and stop is None:
1306 if start is None and stop is None:
1300 if not len(self):
1307 if not len(self):
1301 return [nullid]
1308 return [nullid]
1302 return [self.node(r) for r in self.headrevs()]
1309 return [self.node(r) for r in self.headrevs()]
1303
1310
1304 if start is None:
1311 if start is None:
1305 start = nullrev
1312 start = nullrev
1306 else:
1313 else:
1307 start = self.rev(start)
1314 start = self.rev(start)
1308
1315
1309 stoprevs = {self.rev(n) for n in stop or []}
1316 stoprevs = {self.rev(n) for n in stop or []}
1310
1317
1311 revs = dagop.headrevssubset(
1318 revs = dagop.headrevssubset(
1312 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1319 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1313 )
1320 )
1314
1321
1315 return [self.node(rev) for rev in revs]
1322 return [self.node(rev) for rev in revs]
1316
1323
1317 def children(self, node):
1324 def children(self, node):
1318 """find the children of a given node"""
1325 """find the children of a given node"""
1319 c = []
1326 c = []
1320 p = self.rev(node)
1327 p = self.rev(node)
1321 for r in self.revs(start=p + 1):
1328 for r in self.revs(start=p + 1):
1322 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1329 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1323 if prevs:
1330 if prevs:
1324 for pr in prevs:
1331 for pr in prevs:
1325 if pr == p:
1332 if pr == p:
1326 c.append(self.node(r))
1333 c.append(self.node(r))
1327 elif p == nullrev:
1334 elif p == nullrev:
1328 c.append(self.node(r))
1335 c.append(self.node(r))
1329 return c
1336 return c
1330
1337
1331 def commonancestorsheads(self, a, b):
1338 def commonancestorsheads(self, a, b):
1332 """calculate all the heads of the common ancestors of nodes a and b"""
1339 """calculate all the heads of the common ancestors of nodes a and b"""
1333 a, b = self.rev(a), self.rev(b)
1340 a, b = self.rev(a), self.rev(b)
1334 ancs = self._commonancestorsheads(a, b)
1341 ancs = self._commonancestorsheads(a, b)
1335 return pycompat.maplist(self.node, ancs)
1342 return pycompat.maplist(self.node, ancs)
1336
1343
1337 def _commonancestorsheads(self, *revs):
1344 def _commonancestorsheads(self, *revs):
1338 """calculate all the heads of the common ancestors of revs"""
1345 """calculate all the heads of the common ancestors of revs"""
1339 try:
1346 try:
1340 ancs = self.index.commonancestorsheads(*revs)
1347 ancs = self.index.commonancestorsheads(*revs)
1341 except (AttributeError, OverflowError): # C implementation failed
1348 except (AttributeError, OverflowError): # C implementation failed
1342 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1349 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1343 return ancs
1350 return ancs
1344
1351
1345 def isancestor(self, a, b):
1352 def isancestor(self, a, b):
1346 """return True if node a is an ancestor of node b
1353 """return True if node a is an ancestor of node b
1347
1354
1348 A revision is considered an ancestor of itself."""
1355 A revision is considered an ancestor of itself."""
1349 a, b = self.rev(a), self.rev(b)
1356 a, b = self.rev(a), self.rev(b)
1350 return self.isancestorrev(a, b)
1357 return self.isancestorrev(a, b)
1351
1358
1352 def isancestorrev(self, a, b):
1359 def isancestorrev(self, a, b):
1353 """return True if revision a is an ancestor of revision b
1360 """return True if revision a is an ancestor of revision b
1354
1361
1355 A revision is considered an ancestor of itself.
1362 A revision is considered an ancestor of itself.
1356
1363
1357 The implementation of this is trivial but the use of
1364 The implementation of this is trivial but the use of
1358 reachableroots is not."""
1365 reachableroots is not."""
1359 if a == nullrev:
1366 if a == nullrev:
1360 return True
1367 return True
1361 elif a == b:
1368 elif a == b:
1362 return True
1369 return True
1363 elif a > b:
1370 elif a > b:
1364 return False
1371 return False
1365 return bool(self.reachableroots(a, [b], [a], includepath=False))
1372 return bool(self.reachableroots(a, [b], [a], includepath=False))
1366
1373
1367 def reachableroots(self, minroot, heads, roots, includepath=False):
1374 def reachableroots(self, minroot, heads, roots, includepath=False):
1368 """return (heads(::(<roots> and <roots>::<heads>)))
1375 """return (heads(::(<roots> and <roots>::<heads>)))
1369
1376
1370 If includepath is True, return (<roots>::<heads>)."""
1377 If includepath is True, return (<roots>::<heads>)."""
1371 try:
1378 try:
1372 return self.index.reachableroots2(
1379 return self.index.reachableroots2(
1373 minroot, heads, roots, includepath
1380 minroot, heads, roots, includepath
1374 )
1381 )
1375 except AttributeError:
1382 except AttributeError:
1376 return dagop._reachablerootspure(
1383 return dagop._reachablerootspure(
1377 self.parentrevs, minroot, roots, heads, includepath
1384 self.parentrevs, minroot, roots, heads, includepath
1378 )
1385 )
1379
1386
1380 def ancestor(self, a, b):
1387 def ancestor(self, a, b):
1381 """calculate the "best" common ancestor of nodes a and b"""
1388 """calculate the "best" common ancestor of nodes a and b"""
1382
1389
1383 a, b = self.rev(a), self.rev(b)
1390 a, b = self.rev(a), self.rev(b)
1384 try:
1391 try:
1385 ancs = self.index.ancestors(a, b)
1392 ancs = self.index.ancestors(a, b)
1386 except (AttributeError, OverflowError):
1393 except (AttributeError, OverflowError):
1387 ancs = ancestor.ancestors(self.parentrevs, a, b)
1394 ancs = ancestor.ancestors(self.parentrevs, a, b)
1388 if ancs:
1395 if ancs:
1389 # choose a consistent winner when there's a tie
1396 # choose a consistent winner when there's a tie
1390 return min(map(self.node, ancs))
1397 return min(map(self.node, ancs))
1391 return nullid
1398 return nullid
1392
1399
1393 def _match(self, id):
1400 def _match(self, id):
1394 if isinstance(id, int):
1401 if isinstance(id, int):
1395 # rev
1402 # rev
1396 return self.node(id)
1403 return self.node(id)
1397 if len(id) == 20:
1404 if len(id) == 20:
1398 # possibly a binary node
1405 # possibly a binary node
1399 # odds of a binary node being all hex in ASCII are 1 in 10**25
1406 # odds of a binary node being all hex in ASCII are 1 in 10**25
1400 try:
1407 try:
1401 node = id
1408 node = id
1402 self.rev(node) # quick search the index
1409 self.rev(node) # quick search the index
1403 return node
1410 return node
1404 except error.LookupError:
1411 except error.LookupError:
1405 pass # may be partial hex id
1412 pass # may be partial hex id
1406 try:
1413 try:
1407 # str(rev)
1414 # str(rev)
1408 rev = int(id)
1415 rev = int(id)
1409 if b"%d" % rev != id:
1416 if b"%d" % rev != id:
1410 raise ValueError
1417 raise ValueError
1411 if rev < 0:
1418 if rev < 0:
1412 rev = len(self) + rev
1419 rev = len(self) + rev
1413 if rev < 0 or rev >= len(self):
1420 if rev < 0 or rev >= len(self):
1414 raise ValueError
1421 raise ValueError
1415 return self.node(rev)
1422 return self.node(rev)
1416 except (ValueError, OverflowError):
1423 except (ValueError, OverflowError):
1417 pass
1424 pass
1418 if len(id) == 40:
1425 if len(id) == 40:
1419 try:
1426 try:
1420 # a full hex nodeid?
1427 # a full hex nodeid?
1421 node = bin(id)
1428 node = bin(id)
1422 self.rev(node)
1429 self.rev(node)
1423 return node
1430 return node
1424 except (TypeError, error.LookupError):
1431 except (TypeError, error.LookupError):
1425 pass
1432 pass
1426
1433
1427 def _partialmatch(self, id):
1434 def _partialmatch(self, id):
1428 # we don't care wdirfilenodeids as they should be always full hash
1435 # we don't care wdirfilenodeids as they should be always full hash
1429 maybewdir = wdirhex.startswith(id)
1436 maybewdir = wdirhex.startswith(id)
1430 try:
1437 try:
1431 partial = self.index.partialmatch(id)
1438 partial = self.index.partialmatch(id)
1432 if partial and self.hasnode(partial):
1439 if partial and self.hasnode(partial):
1433 if maybewdir:
1440 if maybewdir:
1434 # single 'ff...' match in radix tree, ambiguous with wdir
1441 # single 'ff...' match in radix tree, ambiguous with wdir
1435 raise error.RevlogError
1442 raise error.RevlogError
1436 return partial
1443 return partial
1437 if maybewdir:
1444 if maybewdir:
1438 # no 'ff...' match in radix tree, wdir identified
1445 # no 'ff...' match in radix tree, wdir identified
1439 raise error.WdirUnsupported
1446 raise error.WdirUnsupported
1440 return None
1447 return None
1441 except error.RevlogError:
1448 except error.RevlogError:
1442 # parsers.c radix tree lookup gave multiple matches
1449 # parsers.c radix tree lookup gave multiple matches
1443 # fast path: for unfiltered changelog, radix tree is accurate
1450 # fast path: for unfiltered changelog, radix tree is accurate
1444 if not getattr(self, 'filteredrevs', None):
1451 if not getattr(self, 'filteredrevs', None):
1445 raise error.AmbiguousPrefixLookupError(
1452 raise error.AmbiguousPrefixLookupError(
1446 id, self.indexfile, _(b'ambiguous identifier')
1453 id, self.indexfile, _(b'ambiguous identifier')
1447 )
1454 )
1448 # fall through to slow path that filters hidden revisions
1455 # fall through to slow path that filters hidden revisions
1449 except (AttributeError, ValueError):
1456 except (AttributeError, ValueError):
1450 # we are pure python, or key was too short to search radix tree
1457 # we are pure python, or key was too short to search radix tree
1451 pass
1458 pass
1452
1459
1453 if id in self._pcache:
1460 if id in self._pcache:
1454 return self._pcache[id]
1461 return self._pcache[id]
1455
1462
1456 if len(id) <= 40:
1463 if len(id) <= 40:
1457 try:
1464 try:
1458 # hex(node)[:...]
1465 # hex(node)[:...]
1459 l = len(id) // 2 # grab an even number of digits
1466 l = len(id) // 2 # grab an even number of digits
1460 prefix = bin(id[: l * 2])
1467 prefix = bin(id[: l * 2])
1461 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1468 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1462 nl = [
1469 nl = [
1463 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1470 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1464 ]
1471 ]
1465 if nullhex.startswith(id):
1472 if nullhex.startswith(id):
1466 nl.append(nullid)
1473 nl.append(nullid)
1467 if len(nl) > 0:
1474 if len(nl) > 0:
1468 if len(nl) == 1 and not maybewdir:
1475 if len(nl) == 1 and not maybewdir:
1469 self._pcache[id] = nl[0]
1476 self._pcache[id] = nl[0]
1470 return nl[0]
1477 return nl[0]
1471 raise error.AmbiguousPrefixLookupError(
1478 raise error.AmbiguousPrefixLookupError(
1472 id, self.indexfile, _(b'ambiguous identifier')
1479 id, self.indexfile, _(b'ambiguous identifier')
1473 )
1480 )
1474 if maybewdir:
1481 if maybewdir:
1475 raise error.WdirUnsupported
1482 raise error.WdirUnsupported
1476 return None
1483 return None
1477 except TypeError:
1484 except TypeError:
1478 pass
1485 pass
1479
1486
1480 def lookup(self, id):
1487 def lookup(self, id):
1481 """locate a node based on:
1488 """locate a node based on:
1482 - revision number or str(revision number)
1489 - revision number or str(revision number)
1483 - nodeid or subset of hex nodeid
1490 - nodeid or subset of hex nodeid
1484 """
1491 """
1485 n = self._match(id)
1492 n = self._match(id)
1486 if n is not None:
1493 if n is not None:
1487 return n
1494 return n
1488 n = self._partialmatch(id)
1495 n = self._partialmatch(id)
1489 if n:
1496 if n:
1490 return n
1497 return n
1491
1498
1492 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1499 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1493
1500
1494 def shortest(self, node, minlength=1):
1501 def shortest(self, node, minlength=1):
1495 """Find the shortest unambiguous prefix that matches node."""
1502 """Find the shortest unambiguous prefix that matches node."""
1496
1503
1497 def isvalid(prefix):
1504 def isvalid(prefix):
1498 try:
1505 try:
1499 matchednode = self._partialmatch(prefix)
1506 matchednode = self._partialmatch(prefix)
1500 except error.AmbiguousPrefixLookupError:
1507 except error.AmbiguousPrefixLookupError:
1501 return False
1508 return False
1502 except error.WdirUnsupported:
1509 except error.WdirUnsupported:
1503 # single 'ff...' match
1510 # single 'ff...' match
1504 return True
1511 return True
1505 if matchednode is None:
1512 if matchednode is None:
1506 raise error.LookupError(node, self.indexfile, _(b'no node'))
1513 raise error.LookupError(node, self.indexfile, _(b'no node'))
1507 return True
1514 return True
1508
1515
1509 def maybewdir(prefix):
1516 def maybewdir(prefix):
1510 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1517 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1511
1518
1512 hexnode = hex(node)
1519 hexnode = hex(node)
1513
1520
1514 def disambiguate(hexnode, minlength):
1521 def disambiguate(hexnode, minlength):
1515 """Disambiguate against wdirid."""
1522 """Disambiguate against wdirid."""
1516 for length in range(minlength, 41):
1523 for length in range(minlength, 41):
1517 prefix = hexnode[:length]
1524 prefix = hexnode[:length]
1518 if not maybewdir(prefix):
1525 if not maybewdir(prefix):
1519 return prefix
1526 return prefix
1520
1527
1521 if not getattr(self, 'filteredrevs', None):
1528 if not getattr(self, 'filteredrevs', None):
1522 try:
1529 try:
1523 length = max(self.index.shortest(node), minlength)
1530 length = max(self.index.shortest(node), minlength)
1524 return disambiguate(hexnode, length)
1531 return disambiguate(hexnode, length)
1525 except error.RevlogError:
1532 except error.RevlogError:
1526 if node != wdirid:
1533 if node != wdirid:
1527 raise error.LookupError(node, self.indexfile, _(b'no node'))
1534 raise error.LookupError(node, self.indexfile, _(b'no node'))
1528 except AttributeError:
1535 except AttributeError:
1529 # Fall through to pure code
1536 # Fall through to pure code
1530 pass
1537 pass
1531
1538
1532 if node == wdirid:
1539 if node == wdirid:
1533 for length in range(minlength, 41):
1540 for length in range(minlength, 41):
1534 prefix = hexnode[:length]
1541 prefix = hexnode[:length]
1535 if isvalid(prefix):
1542 if isvalid(prefix):
1536 return prefix
1543 return prefix
1537
1544
1538 for length in range(minlength, 41):
1545 for length in range(minlength, 41):
1539 prefix = hexnode[:length]
1546 prefix = hexnode[:length]
1540 if isvalid(prefix):
1547 if isvalid(prefix):
1541 return disambiguate(hexnode, length)
1548 return disambiguate(hexnode, length)
1542
1549
1543 def cmp(self, node, text):
1550 def cmp(self, node, text):
1544 """compare text with a given file revision
1551 """compare text with a given file revision
1545
1552
1546 returns True if text is different than what is stored.
1553 returns True if text is different than what is stored.
1547 """
1554 """
1548 p1, p2 = self.parents(node)
1555 p1, p2 = self.parents(node)
1549 return storageutil.hashrevisionsha1(text, p1, p2) != node
1556 return storageutil.hashrevisionsha1(text, p1, p2) != node
1550
1557
1551 def _cachesegment(self, offset, data):
1558 def _cachesegment(self, offset, data):
1552 """Add a segment to the revlog cache.
1559 """Add a segment to the revlog cache.
1553
1560
1554 Accepts an absolute offset and the data that is at that location.
1561 Accepts an absolute offset and the data that is at that location.
1555 """
1562 """
1556 o, d = self._chunkcache
1563 o, d = self._chunkcache
1557 # try to add to existing cache
1564 # try to add to existing cache
1558 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1565 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1559 self._chunkcache = o, d + data
1566 self._chunkcache = o, d + data
1560 else:
1567 else:
1561 self._chunkcache = offset, data
1568 self._chunkcache = offset, data
1562
1569
1563 def _readsegment(self, offset, length, df=None):
1570 def _readsegment(self, offset, length, df=None):
1564 """Load a segment of raw data from the revlog.
1571 """Load a segment of raw data from the revlog.
1565
1572
1566 Accepts an absolute offset, length to read, and an optional existing
1573 Accepts an absolute offset, length to read, and an optional existing
1567 file handle to read from.
1574 file handle to read from.
1568
1575
1569 If an existing file handle is passed, it will be seeked and the
1576 If an existing file handle is passed, it will be seeked and the
1570 original seek position will NOT be restored.
1577 original seek position will NOT be restored.
1571
1578
1572 Returns a str or buffer of raw byte data.
1579 Returns a str or buffer of raw byte data.
1573
1580
1574 Raises if the requested number of bytes could not be read.
1581 Raises if the requested number of bytes could not be read.
1575 """
1582 """
1576 # Cache data both forward and backward around the requested
1583 # Cache data both forward and backward around the requested
1577 # data, in a fixed size window. This helps speed up operations
1584 # data, in a fixed size window. This helps speed up operations
1578 # involving reading the revlog backwards.
1585 # involving reading the revlog backwards.
1579 cachesize = self._chunkcachesize
1586 cachesize = self._chunkcachesize
1580 realoffset = offset & ~(cachesize - 1)
1587 realoffset = offset & ~(cachesize - 1)
1581 reallength = (
1588 reallength = (
1582 (offset + length + cachesize) & ~(cachesize - 1)
1589 (offset + length + cachesize) & ~(cachesize - 1)
1583 ) - realoffset
1590 ) - realoffset
1584 with self._datareadfp(df) as df:
1591 with self._datareadfp(df) as df:
1585 df.seek(realoffset)
1592 df.seek(realoffset)
1586 d = df.read(reallength)
1593 d = df.read(reallength)
1587
1594
1588 self._cachesegment(realoffset, d)
1595 self._cachesegment(realoffset, d)
1589 if offset != realoffset or reallength != length:
1596 if offset != realoffset or reallength != length:
1590 startoffset = offset - realoffset
1597 startoffset = offset - realoffset
1591 if len(d) - startoffset < length:
1598 if len(d) - startoffset < length:
1592 raise error.RevlogError(
1599 raise error.RevlogError(
1593 _(
1600 _(
1594 b'partial read of revlog %s; expected %d bytes from '
1601 b'partial read of revlog %s; expected %d bytes from '
1595 b'offset %d, got %d'
1602 b'offset %d, got %d'
1596 )
1603 )
1597 % (
1604 % (
1598 self.indexfile if self._inline else self.datafile,
1605 self.indexfile if self._inline else self.datafile,
1599 length,
1606 length,
1600 realoffset,
1607 realoffset,
1601 len(d) - startoffset,
1608 len(d) - startoffset,
1602 )
1609 )
1603 )
1610 )
1604
1611
1605 return util.buffer(d, startoffset, length)
1612 return util.buffer(d, startoffset, length)
1606
1613
1607 if len(d) < length:
1614 if len(d) < length:
1608 raise error.RevlogError(
1615 raise error.RevlogError(
1609 _(
1616 _(
1610 b'partial read of revlog %s; expected %d bytes from offset '
1617 b'partial read of revlog %s; expected %d bytes from offset '
1611 b'%d, got %d'
1618 b'%d, got %d'
1612 )
1619 )
1613 % (
1620 % (
1614 self.indexfile if self._inline else self.datafile,
1621 self.indexfile if self._inline else self.datafile,
1615 length,
1622 length,
1616 offset,
1623 offset,
1617 len(d),
1624 len(d),
1618 )
1625 )
1619 )
1626 )
1620
1627
1621 return d
1628 return d
1622
1629
1623 def _getsegment(self, offset, length, df=None):
1630 def _getsegment(self, offset, length, df=None):
1624 """Obtain a segment of raw data from the revlog.
1631 """Obtain a segment of raw data from the revlog.
1625
1632
1626 Accepts an absolute offset, length of bytes to obtain, and an
1633 Accepts an absolute offset, length of bytes to obtain, and an
1627 optional file handle to the already-opened revlog. If the file
1634 optional file handle to the already-opened revlog. If the file
1628 handle is used, it's original seek position will not be preserved.
1635 handle is used, it's original seek position will not be preserved.
1629
1636
1630 Requests for data may be returned from a cache.
1637 Requests for data may be returned from a cache.
1631
1638
1632 Returns a str or a buffer instance of raw byte data.
1639 Returns a str or a buffer instance of raw byte data.
1633 """
1640 """
1634 o, d = self._chunkcache
1641 o, d = self._chunkcache
1635 l = len(d)
1642 l = len(d)
1636
1643
1637 # is it in the cache?
1644 # is it in the cache?
1638 cachestart = offset - o
1645 cachestart = offset - o
1639 cacheend = cachestart + length
1646 cacheend = cachestart + length
1640 if cachestart >= 0 and cacheend <= l:
1647 if cachestart >= 0 and cacheend <= l:
1641 if cachestart == 0 and cacheend == l:
1648 if cachestart == 0 and cacheend == l:
1642 return d # avoid a copy
1649 return d # avoid a copy
1643 return util.buffer(d, cachestart, cacheend - cachestart)
1650 return util.buffer(d, cachestart, cacheend - cachestart)
1644
1651
1645 return self._readsegment(offset, length, df=df)
1652 return self._readsegment(offset, length, df=df)
1646
1653
1647 def _getsegmentforrevs(self, startrev, endrev, df=None):
1654 def _getsegmentforrevs(self, startrev, endrev, df=None):
1648 """Obtain a segment of raw data corresponding to a range of revisions.
1655 """Obtain a segment of raw data corresponding to a range of revisions.
1649
1656
1650 Accepts the start and end revisions and an optional already-open
1657 Accepts the start and end revisions and an optional already-open
1651 file handle to be used for reading. If the file handle is read, its
1658 file handle to be used for reading. If the file handle is read, its
1652 seek position will not be preserved.
1659 seek position will not be preserved.
1653
1660
1654 Requests for data may be satisfied by a cache.
1661 Requests for data may be satisfied by a cache.
1655
1662
1656 Returns a 2-tuple of (offset, data) for the requested range of
1663 Returns a 2-tuple of (offset, data) for the requested range of
1657 revisions. Offset is the integer offset from the beginning of the
1664 revisions. Offset is the integer offset from the beginning of the
1658 revlog and data is a str or buffer of the raw byte data.
1665 revlog and data is a str or buffer of the raw byte data.
1659
1666
1660 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1667 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1661 to determine where each revision's data begins and ends.
1668 to determine where each revision's data begins and ends.
1662 """
1669 """
1663 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1670 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1664 # (functions are expensive).
1671 # (functions are expensive).
1665 index = self.index
1672 index = self.index
1666 istart = index[startrev]
1673 istart = index[startrev]
1667 start = int(istart[0] >> 16)
1674 start = int(istart[0] >> 16)
1668 if startrev == endrev:
1675 if startrev == endrev:
1669 end = start + istart[1]
1676 end = start + istart[1]
1670 else:
1677 else:
1671 iend = index[endrev]
1678 iend = index[endrev]
1672 end = int(iend[0] >> 16) + iend[1]
1679 end = int(iend[0] >> 16) + iend[1]
1673
1680
1674 if self._inline:
1681 if self._inline:
1675 start += (startrev + 1) * self._io.size
1682 start += (startrev + 1) * self._io.size
1676 end += (endrev + 1) * self._io.size
1683 end += (endrev + 1) * self._io.size
1677 length = end - start
1684 length = end - start
1678
1685
1679 return start, self._getsegment(start, length, df=df)
1686 return start, self._getsegment(start, length, df=df)
1680
1687
1681 def _chunk(self, rev, df=None):
1688 def _chunk(self, rev, df=None):
1682 """Obtain a single decompressed chunk for a revision.
1689 """Obtain a single decompressed chunk for a revision.
1683
1690
1684 Accepts an integer revision and an optional already-open file handle
1691 Accepts an integer revision and an optional already-open file handle
1685 to be used for reading. If used, the seek position of the file will not
1692 to be used for reading. If used, the seek position of the file will not
1686 be preserved.
1693 be preserved.
1687
1694
1688 Returns a str holding uncompressed data for the requested revision.
1695 Returns a str holding uncompressed data for the requested revision.
1689 """
1696 """
1690 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1697 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1691
1698
1692 def _chunks(self, revs, df=None, targetsize=None):
1699 def _chunks(self, revs, df=None, targetsize=None):
1693 """Obtain decompressed chunks for the specified revisions.
1700 """Obtain decompressed chunks for the specified revisions.
1694
1701
1695 Accepts an iterable of numeric revisions that are assumed to be in
1702 Accepts an iterable of numeric revisions that are assumed to be in
1696 ascending order. Also accepts an optional already-open file handle
1703 ascending order. Also accepts an optional already-open file handle
1697 to be used for reading. If used, the seek position of the file will
1704 to be used for reading. If used, the seek position of the file will
1698 not be preserved.
1705 not be preserved.
1699
1706
1700 This function is similar to calling ``self._chunk()`` multiple times,
1707 This function is similar to calling ``self._chunk()`` multiple times,
1701 but is faster.
1708 but is faster.
1702
1709
1703 Returns a list with decompressed data for each requested revision.
1710 Returns a list with decompressed data for each requested revision.
1704 """
1711 """
1705 if not revs:
1712 if not revs:
1706 return []
1713 return []
1707 start = self.start
1714 start = self.start
1708 length = self.length
1715 length = self.length
1709 inline = self._inline
1716 inline = self._inline
1710 iosize = self._io.size
1717 iosize = self._io.size
1711 buffer = util.buffer
1718 buffer = util.buffer
1712
1719
1713 l = []
1720 l = []
1714 ladd = l.append
1721 ladd = l.append
1715
1722
1716 if not self._withsparseread:
1723 if not self._withsparseread:
1717 slicedchunks = (revs,)
1724 slicedchunks = (revs,)
1718 else:
1725 else:
1719 slicedchunks = deltautil.slicechunk(
1726 slicedchunks = deltautil.slicechunk(
1720 self, revs, targetsize=targetsize
1727 self, revs, targetsize=targetsize
1721 )
1728 )
1722
1729
1723 for revschunk in slicedchunks:
1730 for revschunk in slicedchunks:
1724 firstrev = revschunk[0]
1731 firstrev = revschunk[0]
1725 # Skip trailing revisions with empty diff
1732 # Skip trailing revisions with empty diff
1726 for lastrev in revschunk[::-1]:
1733 for lastrev in revschunk[::-1]:
1727 if length(lastrev) != 0:
1734 if length(lastrev) != 0:
1728 break
1735 break
1729
1736
1730 try:
1737 try:
1731 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1738 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1732 except OverflowError:
1739 except OverflowError:
1733 # issue4215 - we can't cache a run of chunks greater than
1740 # issue4215 - we can't cache a run of chunks greater than
1734 # 2G on Windows
1741 # 2G on Windows
1735 return [self._chunk(rev, df=df) for rev in revschunk]
1742 return [self._chunk(rev, df=df) for rev in revschunk]
1736
1743
1737 decomp = self.decompress
1744 decomp = self.decompress
1738 for rev in revschunk:
1745 for rev in revschunk:
1739 chunkstart = start(rev)
1746 chunkstart = start(rev)
1740 if inline:
1747 if inline:
1741 chunkstart += (rev + 1) * iosize
1748 chunkstart += (rev + 1) * iosize
1742 chunklength = length(rev)
1749 chunklength = length(rev)
1743 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1750 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1744
1751
1745 return l
1752 return l
1746
1753
1747 def _chunkclear(self):
1754 def _chunkclear(self):
1748 """Clear the raw chunk cache."""
1755 """Clear the raw chunk cache."""
1749 self._chunkcache = (0, b'')
1756 self._chunkcache = (0, b'')
1750
1757
1751 def deltaparent(self, rev):
1758 def deltaparent(self, rev):
1752 """return deltaparent of the given revision"""
1759 """return deltaparent of the given revision"""
1753 base = self.index[rev][3]
1760 base = self.index[rev][3]
1754 if base == rev:
1761 if base == rev:
1755 return nullrev
1762 return nullrev
1756 elif self._generaldelta:
1763 elif self._generaldelta:
1757 return base
1764 return base
1758 else:
1765 else:
1759 return rev - 1
1766 return rev - 1
1760
1767
1761 def issnapshot(self, rev):
1768 def issnapshot(self, rev):
1762 """tells whether rev is a snapshot
1769 """tells whether rev is a snapshot
1763 """
1770 """
1764 if not self._sparserevlog:
1771 if not self._sparserevlog:
1765 return self.deltaparent(rev) == nullrev
1772 return self.deltaparent(rev) == nullrev
1766 elif util.safehasattr(self.index, b'issnapshot'):
1773 elif util.safehasattr(self.index, b'issnapshot'):
1767 # directly assign the method to cache the testing and access
1774 # directly assign the method to cache the testing and access
1768 self.issnapshot = self.index.issnapshot
1775 self.issnapshot = self.index.issnapshot
1769 return self.issnapshot(rev)
1776 return self.issnapshot(rev)
1770 if rev == nullrev:
1777 if rev == nullrev:
1771 return True
1778 return True
1772 entry = self.index[rev]
1779 entry = self.index[rev]
1773 base = entry[3]
1780 base = entry[3]
1774 if base == rev:
1781 if base == rev:
1775 return True
1782 return True
1776 if base == nullrev:
1783 if base == nullrev:
1777 return True
1784 return True
1778 p1 = entry[5]
1785 p1 = entry[5]
1779 p2 = entry[6]
1786 p2 = entry[6]
1780 if base == p1 or base == p2:
1787 if base == p1 or base == p2:
1781 return False
1788 return False
1782 return self.issnapshot(base)
1789 return self.issnapshot(base)
1783
1790
1784 def snapshotdepth(self, rev):
1791 def snapshotdepth(self, rev):
1785 """number of snapshot in the chain before this one"""
1792 """number of snapshot in the chain before this one"""
1786 if not self.issnapshot(rev):
1793 if not self.issnapshot(rev):
1787 raise error.ProgrammingError(b'revision %d not a snapshot')
1794 raise error.ProgrammingError(b'revision %d not a snapshot')
1788 return len(self._deltachain(rev)[0]) - 1
1795 return len(self._deltachain(rev)[0]) - 1
1789
1796
1790 def revdiff(self, rev1, rev2):
1797 def revdiff(self, rev1, rev2):
1791 """return or calculate a delta between two revisions
1798 """return or calculate a delta between two revisions
1792
1799
1793 The delta calculated is in binary form and is intended to be written to
1800 The delta calculated is in binary form and is intended to be written to
1794 revlog data directly. So this function needs raw revision data.
1801 revlog data directly. So this function needs raw revision data.
1795 """
1802 """
1796 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1803 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1797 return bytes(self._chunk(rev2))
1804 return bytes(self._chunk(rev2))
1798
1805
1799 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1806 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1800
1807
1801 def _processflags(self, text, flags, operation, raw=False):
1808 def _processflags(self, text, flags, operation, raw=False):
1802 """deprecated entry point to access flag processors"""
1809 """deprecated entry point to access flag processors"""
1803 msg = b'_processflag(...) use the specialized variant'
1810 msg = b'_processflag(...) use the specialized variant'
1804 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1811 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1805 if raw:
1812 if raw:
1806 return text, flagutil.processflagsraw(self, text, flags)
1813 return text, flagutil.processflagsraw(self, text, flags)
1807 elif operation == b'read':
1814 elif operation == b'read':
1808 return flagutil.processflagsread(self, text, flags)
1815 return flagutil.processflagsread(self, text, flags)
1809 else: # write operation
1816 else: # write operation
1810 return flagutil.processflagswrite(self, text, flags)
1817 return flagutil.processflagswrite(self, text, flags)
1811
1818
1812 def revision(self, nodeorrev, _df=None, raw=False):
1819 def revision(self, nodeorrev, _df=None, raw=False):
1813 """return an uncompressed revision of a given node or revision
1820 """return an uncompressed revision of a given node or revision
1814 number.
1821 number.
1815
1822
1816 _df - an existing file handle to read from. (internal-only)
1823 _df - an existing file handle to read from. (internal-only)
1817 raw - an optional argument specifying if the revision data is to be
1824 raw - an optional argument specifying if the revision data is to be
1818 treated as raw data when applying flag transforms. 'raw' should be set
1825 treated as raw data when applying flag transforms. 'raw' should be set
1819 to True when generating changegroups or in debug commands.
1826 to True when generating changegroups or in debug commands.
1820 """
1827 """
1821 if raw:
1828 if raw:
1822 msg = (
1829 msg = (
1823 b'revlog.revision(..., raw=True) is deprecated, '
1830 b'revlog.revision(..., raw=True) is deprecated, '
1824 b'use revlog.rawdata(...)'
1831 b'use revlog.rawdata(...)'
1825 )
1832 )
1826 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1833 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1827 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1834 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1828
1835
1829 def sidedata(self, nodeorrev, _df=None):
1836 def sidedata(self, nodeorrev, _df=None):
1830 """a map of extra data related to the changeset but not part of the hash
1837 """a map of extra data related to the changeset but not part of the hash
1831
1838
1832 This function currently return a dictionary. However, more advanced
1839 This function currently return a dictionary. However, more advanced
1833 mapping object will likely be used in the future for a more
1840 mapping object will likely be used in the future for a more
1834 efficient/lazy code.
1841 efficient/lazy code.
1835 """
1842 """
1836 return self._revisiondata(nodeorrev, _df)[1]
1843 return self._revisiondata(nodeorrev, _df)[1]
1837
1844
1838 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1845 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1839 # deal with <nodeorrev> argument type
1846 # deal with <nodeorrev> argument type
1840 if isinstance(nodeorrev, int):
1847 if isinstance(nodeorrev, int):
1841 rev = nodeorrev
1848 rev = nodeorrev
1842 node = self.node(rev)
1849 node = self.node(rev)
1843 else:
1850 else:
1844 node = nodeorrev
1851 node = nodeorrev
1845 rev = None
1852 rev = None
1846
1853
1847 # fast path the special `nullid` rev
1854 # fast path the special `nullid` rev
1848 if node == nullid:
1855 if node == nullid:
1849 return b"", {}
1856 return b"", {}
1850
1857
1851 # ``rawtext`` is the text as stored inside the revlog. Might be the
1858 # ``rawtext`` is the text as stored inside the revlog. Might be the
1852 # revision or might need to be processed to retrieve the revision.
1859 # revision or might need to be processed to retrieve the revision.
1853 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1860 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1854
1861
1855 if raw and validated:
1862 if raw and validated:
1856 # if we don't want to process the raw text and that raw
1863 # if we don't want to process the raw text and that raw
1857 # text is cached, we can exit early.
1864 # text is cached, we can exit early.
1858 return rawtext, {}
1865 return rawtext, {}
1859 if rev is None:
1866 if rev is None:
1860 rev = self.rev(node)
1867 rev = self.rev(node)
1861 # the revlog's flag for this revision
1868 # the revlog's flag for this revision
1862 # (usually alter its state or content)
1869 # (usually alter its state or content)
1863 flags = self.flags(rev)
1870 flags = self.flags(rev)
1864
1871
1865 if validated and flags == REVIDX_DEFAULT_FLAGS:
1872 if validated and flags == REVIDX_DEFAULT_FLAGS:
1866 # no extra flags set, no flag processor runs, text = rawtext
1873 # no extra flags set, no flag processor runs, text = rawtext
1867 return rawtext, {}
1874 return rawtext, {}
1868
1875
1869 sidedata = {}
1876 sidedata = {}
1870 if raw:
1877 if raw:
1871 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1878 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1872 text = rawtext
1879 text = rawtext
1873 else:
1880 else:
1874 try:
1881 try:
1875 r = flagutil.processflagsread(self, rawtext, flags)
1882 r = flagutil.processflagsread(self, rawtext, flags)
1876 except error.SidedataHashError as exc:
1883 except error.SidedataHashError as exc:
1877 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1884 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1878 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1885 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1879 raise error.RevlogError(msg)
1886 raise error.RevlogError(msg)
1880 text, validatehash, sidedata = r
1887 text, validatehash, sidedata = r
1881 if validatehash:
1888 if validatehash:
1882 self.checkhash(text, node, rev=rev)
1889 self.checkhash(text, node, rev=rev)
1883 if not validated:
1890 if not validated:
1884 self._revisioncache = (node, rev, rawtext)
1891 self._revisioncache = (node, rev, rawtext)
1885
1892
1886 return text, sidedata
1893 return text, sidedata
1887
1894
1888 def _rawtext(self, node, rev, _df=None):
1895 def _rawtext(self, node, rev, _df=None):
1889 """return the possibly unvalidated rawtext for a revision
1896 """return the possibly unvalidated rawtext for a revision
1890
1897
1891 returns (rev, rawtext, validated)
1898 returns (rev, rawtext, validated)
1892 """
1899 """
1893
1900
1894 # revision in the cache (could be useful to apply delta)
1901 # revision in the cache (could be useful to apply delta)
1895 cachedrev = None
1902 cachedrev = None
1896 # An intermediate text to apply deltas to
1903 # An intermediate text to apply deltas to
1897 basetext = None
1904 basetext = None
1898
1905
1899 # Check if we have the entry in cache
1906 # Check if we have the entry in cache
1900 # The cache entry looks like (node, rev, rawtext)
1907 # The cache entry looks like (node, rev, rawtext)
1901 if self._revisioncache:
1908 if self._revisioncache:
1902 if self._revisioncache[0] == node:
1909 if self._revisioncache[0] == node:
1903 return (rev, self._revisioncache[2], True)
1910 return (rev, self._revisioncache[2], True)
1904 cachedrev = self._revisioncache[1]
1911 cachedrev = self._revisioncache[1]
1905
1912
1906 if rev is None:
1913 if rev is None:
1907 rev = self.rev(node)
1914 rev = self.rev(node)
1908
1915
1909 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1916 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1910 if stopped:
1917 if stopped:
1911 basetext = self._revisioncache[2]
1918 basetext = self._revisioncache[2]
1912
1919
1913 # drop cache to save memory, the caller is expected to
1920 # drop cache to save memory, the caller is expected to
1914 # update self._revisioncache after validating the text
1921 # update self._revisioncache after validating the text
1915 self._revisioncache = None
1922 self._revisioncache = None
1916
1923
1917 targetsize = None
1924 targetsize = None
1918 rawsize = self.index[rev][2]
1925 rawsize = self.index[rev][2]
1919 if 0 <= rawsize:
1926 if 0 <= rawsize:
1920 targetsize = 4 * rawsize
1927 targetsize = 4 * rawsize
1921
1928
1922 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1929 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1923 if basetext is None:
1930 if basetext is None:
1924 basetext = bytes(bins[0])
1931 basetext = bytes(bins[0])
1925 bins = bins[1:]
1932 bins = bins[1:]
1926
1933
1927 rawtext = mdiff.patches(basetext, bins)
1934 rawtext = mdiff.patches(basetext, bins)
1928 del basetext # let us have a chance to free memory early
1935 del basetext # let us have a chance to free memory early
1929 return (rev, rawtext, False)
1936 return (rev, rawtext, False)
1930
1937
1931 def rawdata(self, nodeorrev, _df=None):
1938 def rawdata(self, nodeorrev, _df=None):
1932 """return an uncompressed raw data of a given node or revision number.
1939 """return an uncompressed raw data of a given node or revision number.
1933
1940
1934 _df - an existing file handle to read from. (internal-only)
1941 _df - an existing file handle to read from. (internal-only)
1935 """
1942 """
1936 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1943 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1937
1944
1938 def hash(self, text, p1, p2):
1945 def hash(self, text, p1, p2):
1939 """Compute a node hash.
1946 """Compute a node hash.
1940
1947
1941 Available as a function so that subclasses can replace the hash
1948 Available as a function so that subclasses can replace the hash
1942 as needed.
1949 as needed.
1943 """
1950 """
1944 return storageutil.hashrevisionsha1(text, p1, p2)
1951 return storageutil.hashrevisionsha1(text, p1, p2)
1945
1952
1946 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1953 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1947 """Check node hash integrity.
1954 """Check node hash integrity.
1948
1955
1949 Available as a function so that subclasses can extend hash mismatch
1956 Available as a function so that subclasses can extend hash mismatch
1950 behaviors as needed.
1957 behaviors as needed.
1951 """
1958 """
1952 try:
1959 try:
1953 if p1 is None and p2 is None:
1960 if p1 is None and p2 is None:
1954 p1, p2 = self.parents(node)
1961 p1, p2 = self.parents(node)
1955 if node != self.hash(text, p1, p2):
1962 if node != self.hash(text, p1, p2):
1956 # Clear the revision cache on hash failure. The revision cache
1963 # Clear the revision cache on hash failure. The revision cache
1957 # only stores the raw revision and clearing the cache does have
1964 # only stores the raw revision and clearing the cache does have
1958 # the side-effect that we won't have a cache hit when the raw
1965 # the side-effect that we won't have a cache hit when the raw
1959 # revision data is accessed. But this case should be rare and
1966 # revision data is accessed. But this case should be rare and
1960 # it is extra work to teach the cache about the hash
1967 # it is extra work to teach the cache about the hash
1961 # verification state.
1968 # verification state.
1962 if self._revisioncache and self._revisioncache[0] == node:
1969 if self._revisioncache and self._revisioncache[0] == node:
1963 self._revisioncache = None
1970 self._revisioncache = None
1964
1971
1965 revornode = rev
1972 revornode = rev
1966 if revornode is None:
1973 if revornode is None:
1967 revornode = templatefilters.short(hex(node))
1974 revornode = templatefilters.short(hex(node))
1968 raise error.RevlogError(
1975 raise error.RevlogError(
1969 _(b"integrity check failed on %s:%s")
1976 _(b"integrity check failed on %s:%s")
1970 % (self.indexfile, pycompat.bytestr(revornode))
1977 % (self.indexfile, pycompat.bytestr(revornode))
1971 )
1978 )
1972 except error.RevlogError:
1979 except error.RevlogError:
1973 if self._censorable and storageutil.iscensoredtext(text):
1980 if self._censorable and storageutil.iscensoredtext(text):
1974 raise error.CensoredNodeError(self.indexfile, node, text)
1981 raise error.CensoredNodeError(self.indexfile, node, text)
1975 raise
1982 raise
1976
1983
1977 def _enforceinlinesize(self, tr, fp=None):
1984 def _enforceinlinesize(self, tr, fp=None):
1978 """Check if the revlog is too big for inline and convert if so.
1985 """Check if the revlog is too big for inline and convert if so.
1979
1986
1980 This should be called after revisions are added to the revlog. If the
1987 This should be called after revisions are added to the revlog. If the
1981 revlog has grown too large to be an inline revlog, it will convert it
1988 revlog has grown too large to be an inline revlog, it will convert it
1982 to use multiple index and data files.
1989 to use multiple index and data files.
1983 """
1990 """
1984 tiprev = len(self) - 1
1991 tiprev = len(self) - 1
1985 if (
1992 if (
1986 not self._inline
1993 not self._inline
1987 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1994 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1988 ):
1995 ):
1989 return
1996 return
1990
1997
1991 trinfo = tr.find(self.indexfile)
1998 trinfo = tr.find(self.indexfile)
1992 if trinfo is None:
1999 if trinfo is None:
1993 raise error.RevlogError(
2000 raise error.RevlogError(
1994 _(b"%s not found in the transaction") % self.indexfile
2001 _(b"%s not found in the transaction") % self.indexfile
1995 )
2002 )
1996
2003
1997 trindex = trinfo[2]
2004 trindex = trinfo[2]
1998 if trindex is not None:
2005 if trindex is not None:
1999 dataoff = self.start(trindex)
2006 dataoff = self.start(trindex)
2000 else:
2007 else:
2001 # revlog was stripped at start of transaction, use all leftover data
2008 # revlog was stripped at start of transaction, use all leftover data
2002 trindex = len(self) - 1
2009 trindex = len(self) - 1
2003 dataoff = self.end(tiprev)
2010 dataoff = self.end(tiprev)
2004
2011
2005 tr.add(self.datafile, dataoff)
2012 tr.add(self.datafile, dataoff)
2006
2013
2007 if fp:
2014 if fp:
2008 fp.flush()
2015 fp.flush()
2009 fp.close()
2016 fp.close()
2010 # We can't use the cached file handle after close(). So prevent
2017 # We can't use the cached file handle after close(). So prevent
2011 # its usage.
2018 # its usage.
2012 self._writinghandles = None
2019 self._writinghandles = None
2013
2020
2014 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2021 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2015 for r in self:
2022 for r in self:
2016 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2023 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2017
2024
2018 with self._indexfp(b'w') as fp:
2025 with self._indexfp(b'w') as fp:
2019 self.version &= ~FLAG_INLINE_DATA
2026 self.version &= ~FLAG_INLINE_DATA
2020 self._inline = False
2027 self._inline = False
2021 io = self._io
2028 io = self._io
2022 for i in self:
2029 for i in self:
2023 e = io.packentry(self.index[i], self.node, self.version, i)
2030 e = io.packentry(self.index[i], self.node, self.version, i)
2024 fp.write(e)
2031 fp.write(e)
2025
2032
2026 # the temp file replace the real index when we exit the context
2033 # the temp file replace the real index when we exit the context
2027 # manager
2034 # manager
2028
2035
2029 tr.replace(self.indexfile, trindex * self._io.size)
2036 tr.replace(self.indexfile, trindex * self._io.size)
2030 nodemaputil.setup_persistent_nodemap(tr, self)
2037 nodemaputil.setup_persistent_nodemap(tr, self)
2031 self._chunkclear()
2038 self._chunkclear()
2032
2039
2033 def _nodeduplicatecallback(self, transaction, node):
2040 def _nodeduplicatecallback(self, transaction, node):
2034 """called when trying to add a node already stored.
2041 """called when trying to add a node already stored.
2035 """
2042 """
2036
2043
2037 def addrevision(
2044 def addrevision(
2038 self,
2045 self,
2039 text,
2046 text,
2040 transaction,
2047 transaction,
2041 link,
2048 link,
2042 p1,
2049 p1,
2043 p2,
2050 p2,
2044 cachedelta=None,
2051 cachedelta=None,
2045 node=None,
2052 node=None,
2046 flags=REVIDX_DEFAULT_FLAGS,
2053 flags=REVIDX_DEFAULT_FLAGS,
2047 deltacomputer=None,
2054 deltacomputer=None,
2048 sidedata=None,
2055 sidedata=None,
2049 ):
2056 ):
2050 """add a revision to the log
2057 """add a revision to the log
2051
2058
2052 text - the revision data to add
2059 text - the revision data to add
2053 transaction - the transaction object used for rollback
2060 transaction - the transaction object used for rollback
2054 link - the linkrev data to add
2061 link - the linkrev data to add
2055 p1, p2 - the parent nodeids of the revision
2062 p1, p2 - the parent nodeids of the revision
2056 cachedelta - an optional precomputed delta
2063 cachedelta - an optional precomputed delta
2057 node - nodeid of revision; typically node is not specified, and it is
2064 node - nodeid of revision; typically node is not specified, and it is
2058 computed by default as hash(text, p1, p2), however subclasses might
2065 computed by default as hash(text, p1, p2), however subclasses might
2059 use different hashing method (and override checkhash() in such case)
2066 use different hashing method (and override checkhash() in such case)
2060 flags - the known flags to set on the revision
2067 flags - the known flags to set on the revision
2061 deltacomputer - an optional deltacomputer instance shared between
2068 deltacomputer - an optional deltacomputer instance shared between
2062 multiple calls
2069 multiple calls
2063 """
2070 """
2064 if link == nullrev:
2071 if link == nullrev:
2065 raise error.RevlogError(
2072 raise error.RevlogError(
2066 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2073 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2067 )
2074 )
2068
2075
2069 if sidedata is None:
2076 if sidedata is None:
2070 sidedata = {}
2077 sidedata = {}
2071 flags = flags & ~REVIDX_SIDEDATA
2078 flags = flags & ~REVIDX_SIDEDATA
2072 elif not self.hassidedata:
2079 elif not self.hassidedata:
2073 raise error.ProgrammingError(
2080 raise error.ProgrammingError(
2074 _(b"trying to add sidedata to a revlog who don't support them")
2081 _(b"trying to add sidedata to a revlog who don't support them")
2075 )
2082 )
2076 else:
2083 else:
2077 flags |= REVIDX_SIDEDATA
2084 flags |= REVIDX_SIDEDATA
2078
2085
2079 if flags:
2086 if flags:
2080 node = node or self.hash(text, p1, p2)
2087 node = node or self.hash(text, p1, p2)
2081
2088
2082 rawtext, validatehash = flagutil.processflagswrite(
2089 rawtext, validatehash = flagutil.processflagswrite(
2083 self, text, flags, sidedata=sidedata
2090 self, text, flags, sidedata=sidedata
2084 )
2091 )
2085
2092
2086 # If the flag processor modifies the revision data, ignore any provided
2093 # If the flag processor modifies the revision data, ignore any provided
2087 # cachedelta.
2094 # cachedelta.
2088 if rawtext != text:
2095 if rawtext != text:
2089 cachedelta = None
2096 cachedelta = None
2090
2097
2091 if len(rawtext) > _maxentrysize:
2098 if len(rawtext) > _maxentrysize:
2092 raise error.RevlogError(
2099 raise error.RevlogError(
2093 _(
2100 _(
2094 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2101 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2095 )
2102 )
2096 % (self.indexfile, len(rawtext))
2103 % (self.indexfile, len(rawtext))
2097 )
2104 )
2098
2105
2099 node = node or self.hash(rawtext, p1, p2)
2106 node = node or self.hash(rawtext, p1, p2)
2100 if self.index.has_node(node):
2107 if self.index.has_node(node):
2101 return node
2108 return node
2102
2109
2103 if validatehash:
2110 if validatehash:
2104 self.checkhash(rawtext, node, p1=p1, p2=p2)
2111 self.checkhash(rawtext, node, p1=p1, p2=p2)
2105
2112
2106 return self.addrawrevision(
2113 return self.addrawrevision(
2107 rawtext,
2114 rawtext,
2108 transaction,
2115 transaction,
2109 link,
2116 link,
2110 p1,
2117 p1,
2111 p2,
2118 p2,
2112 node,
2119 node,
2113 flags,
2120 flags,
2114 cachedelta=cachedelta,
2121 cachedelta=cachedelta,
2115 deltacomputer=deltacomputer,
2122 deltacomputer=deltacomputer,
2116 )
2123 )
2117
2124
2118 def addrawrevision(
2125 def addrawrevision(
2119 self,
2126 self,
2120 rawtext,
2127 rawtext,
2121 transaction,
2128 transaction,
2122 link,
2129 link,
2123 p1,
2130 p1,
2124 p2,
2131 p2,
2125 node,
2132 node,
2126 flags,
2133 flags,
2127 cachedelta=None,
2134 cachedelta=None,
2128 deltacomputer=None,
2135 deltacomputer=None,
2129 ):
2136 ):
2130 """add a raw revision with known flags, node and parents
2137 """add a raw revision with known flags, node and parents
2131 useful when reusing a revision not stored in this revlog (ex: received
2138 useful when reusing a revision not stored in this revlog (ex: received
2132 over wire, or read from an external bundle).
2139 over wire, or read from an external bundle).
2133 """
2140 """
2134 dfh = None
2141 dfh = None
2135 if not self._inline:
2142 if not self._inline:
2136 dfh = self._datafp(b"a+")
2143 dfh = self._datafp(b"a+")
2137 ifh = self._indexfp(b"a+")
2144 ifh = self._indexfp(b"a+")
2138 try:
2145 try:
2139 return self._addrevision(
2146 return self._addrevision(
2140 node,
2147 node,
2141 rawtext,
2148 rawtext,
2142 transaction,
2149 transaction,
2143 link,
2150 link,
2144 p1,
2151 p1,
2145 p2,
2152 p2,
2146 flags,
2153 flags,
2147 cachedelta,
2154 cachedelta,
2148 ifh,
2155 ifh,
2149 dfh,
2156 dfh,
2150 deltacomputer=deltacomputer,
2157 deltacomputer=deltacomputer,
2151 )
2158 )
2152 finally:
2159 finally:
2153 if dfh:
2160 if dfh:
2154 dfh.close()
2161 dfh.close()
2155 ifh.close()
2162 ifh.close()
2156
2163
2157 def compress(self, data):
2164 def compress(self, data):
2158 """Generate a possibly-compressed representation of data."""
2165 """Generate a possibly-compressed representation of data."""
2159 if not data:
2166 if not data:
2160 return b'', data
2167 return b'', data
2161
2168
2162 compressed = self._compressor.compress(data)
2169 compressed = self._compressor.compress(data)
2163
2170
2164 if compressed:
2171 if compressed:
2165 # The revlog compressor added the header in the returned data.
2172 # The revlog compressor added the header in the returned data.
2166 return b'', compressed
2173 return b'', compressed
2167
2174
2168 if data[0:1] == b'\0':
2175 if data[0:1] == b'\0':
2169 return b'', data
2176 return b'', data
2170 return b'u', data
2177 return b'u', data
2171
2178
2172 def decompress(self, data):
2179 def decompress(self, data):
2173 """Decompress a revlog chunk.
2180 """Decompress a revlog chunk.
2174
2181
2175 The chunk is expected to begin with a header identifying the
2182 The chunk is expected to begin with a header identifying the
2176 format type so it can be routed to an appropriate decompressor.
2183 format type so it can be routed to an appropriate decompressor.
2177 """
2184 """
2178 if not data:
2185 if not data:
2179 return data
2186 return data
2180
2187
2181 # Revlogs are read much more frequently than they are written and many
2188 # Revlogs are read much more frequently than they are written and many
2182 # chunks only take microseconds to decompress, so performance is
2189 # chunks only take microseconds to decompress, so performance is
2183 # important here.
2190 # important here.
2184 #
2191 #
2185 # We can make a few assumptions about revlogs:
2192 # We can make a few assumptions about revlogs:
2186 #
2193 #
2187 # 1) the majority of chunks will be compressed (as opposed to inline
2194 # 1) the majority of chunks will be compressed (as opposed to inline
2188 # raw data).
2195 # raw data).
2189 # 2) decompressing *any* data will likely by at least 10x slower than
2196 # 2) decompressing *any* data will likely by at least 10x slower than
2190 # returning raw inline data.
2197 # returning raw inline data.
2191 # 3) we want to prioritize common and officially supported compression
2198 # 3) we want to prioritize common and officially supported compression
2192 # engines
2199 # engines
2193 #
2200 #
2194 # It follows that we want to optimize for "decompress compressed data
2201 # It follows that we want to optimize for "decompress compressed data
2195 # when encoded with common and officially supported compression engines"
2202 # when encoded with common and officially supported compression engines"
2196 # case over "raw data" and "data encoded by less common or non-official
2203 # case over "raw data" and "data encoded by less common or non-official
2197 # compression engines." That is why we have the inline lookup first
2204 # compression engines." That is why we have the inline lookup first
2198 # followed by the compengines lookup.
2205 # followed by the compengines lookup.
2199 #
2206 #
2200 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2207 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2201 # compressed chunks. And this matters for changelog and manifest reads.
2208 # compressed chunks. And this matters for changelog and manifest reads.
2202 t = data[0:1]
2209 t = data[0:1]
2203
2210
2204 if t == b'x':
2211 if t == b'x':
2205 try:
2212 try:
2206 return _zlibdecompress(data)
2213 return _zlibdecompress(data)
2207 except zlib.error as e:
2214 except zlib.error as e:
2208 raise error.RevlogError(
2215 raise error.RevlogError(
2209 _(b'revlog decompress error: %s')
2216 _(b'revlog decompress error: %s')
2210 % stringutil.forcebytestr(e)
2217 % stringutil.forcebytestr(e)
2211 )
2218 )
2212 # '\0' is more common than 'u' so it goes first.
2219 # '\0' is more common than 'u' so it goes first.
2213 elif t == b'\0':
2220 elif t == b'\0':
2214 return data
2221 return data
2215 elif t == b'u':
2222 elif t == b'u':
2216 return util.buffer(data, 1)
2223 return util.buffer(data, 1)
2217
2224
2218 try:
2225 try:
2219 compressor = self._decompressors[t]
2226 compressor = self._decompressors[t]
2220 except KeyError:
2227 except KeyError:
2221 try:
2228 try:
2222 engine = util.compengines.forrevlogheader(t)
2229 engine = util.compengines.forrevlogheader(t)
2223 compressor = engine.revlogcompressor(self._compengineopts)
2230 compressor = engine.revlogcompressor(self._compengineopts)
2224 self._decompressors[t] = compressor
2231 self._decompressors[t] = compressor
2225 except KeyError:
2232 except KeyError:
2226 raise error.RevlogError(_(b'unknown compression type %r') % t)
2233 raise error.RevlogError(_(b'unknown compression type %r') % t)
2227
2234
2228 return compressor.decompress(data)
2235 return compressor.decompress(data)
2229
2236
2230 def _addrevision(
2237 def _addrevision(
2231 self,
2238 self,
2232 node,
2239 node,
2233 rawtext,
2240 rawtext,
2234 transaction,
2241 transaction,
2235 link,
2242 link,
2236 p1,
2243 p1,
2237 p2,
2244 p2,
2238 flags,
2245 flags,
2239 cachedelta,
2246 cachedelta,
2240 ifh,
2247 ifh,
2241 dfh,
2248 dfh,
2242 alwayscache=False,
2249 alwayscache=False,
2243 deltacomputer=None,
2250 deltacomputer=None,
2244 ):
2251 ):
2245 """internal function to add revisions to the log
2252 """internal function to add revisions to the log
2246
2253
2247 see addrevision for argument descriptions.
2254 see addrevision for argument descriptions.
2248
2255
2249 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2256 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2250
2257
2251 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2258 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2252 be used.
2259 be used.
2253
2260
2254 invariants:
2261 invariants:
2255 - rawtext is optional (can be None); if not set, cachedelta must be set.
2262 - rawtext is optional (can be None); if not set, cachedelta must be set.
2256 if both are set, they must correspond to each other.
2263 if both are set, they must correspond to each other.
2257 """
2264 """
2258 if node == nullid:
2265 if node == nullid:
2259 raise error.RevlogError(
2266 raise error.RevlogError(
2260 _(b"%s: attempt to add null revision") % self.indexfile
2267 _(b"%s: attempt to add null revision") % self.indexfile
2261 )
2268 )
2262 if node == wdirid or node in wdirfilenodeids:
2269 if node == wdirid or node in wdirfilenodeids:
2263 raise error.RevlogError(
2270 raise error.RevlogError(
2264 _(b"%s: attempt to add wdir revision") % self.indexfile
2271 _(b"%s: attempt to add wdir revision") % self.indexfile
2265 )
2272 )
2266
2273
2267 if self._inline:
2274 if self._inline:
2268 fh = ifh
2275 fh = ifh
2269 else:
2276 else:
2270 fh = dfh
2277 fh = dfh
2271
2278
2272 btext = [rawtext]
2279 btext = [rawtext]
2273
2280
2274 curr = len(self)
2281 curr = len(self)
2275 prev = curr - 1
2282 prev = curr - 1
2276 offset = self.end(prev)
2283 offset = self.end(prev)
2277 p1r, p2r = self.rev(p1), self.rev(p2)
2284 p1r, p2r = self.rev(p1), self.rev(p2)
2278
2285
2279 # full versions are inserted when the needed deltas
2286 # full versions are inserted when the needed deltas
2280 # become comparable to the uncompressed text
2287 # become comparable to the uncompressed text
2281 if rawtext is None:
2288 if rawtext is None:
2282 # need rawtext size, before changed by flag processors, which is
2289 # need rawtext size, before changed by flag processors, which is
2283 # the non-raw size. use revlog explicitly to avoid filelog's extra
2290 # the non-raw size. use revlog explicitly to avoid filelog's extra
2284 # logic that might remove metadata size.
2291 # logic that might remove metadata size.
2285 textlen = mdiff.patchedsize(
2292 textlen = mdiff.patchedsize(
2286 revlog.size(self, cachedelta[0]), cachedelta[1]
2293 revlog.size(self, cachedelta[0]), cachedelta[1]
2287 )
2294 )
2288 else:
2295 else:
2289 textlen = len(rawtext)
2296 textlen = len(rawtext)
2290
2297
2291 if deltacomputer is None:
2298 if deltacomputer is None:
2292 deltacomputer = deltautil.deltacomputer(self)
2299 deltacomputer = deltautil.deltacomputer(self)
2293
2300
2294 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2301 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2295
2302
2296 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2303 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2297
2304
2298 e = (
2305 e = (
2299 offset_type(offset, flags),
2306 offset_type(offset, flags),
2300 deltainfo.deltalen,
2307 deltainfo.deltalen,
2301 textlen,
2308 textlen,
2302 deltainfo.base,
2309 deltainfo.base,
2303 link,
2310 link,
2304 p1r,
2311 p1r,
2305 p2r,
2312 p2r,
2306 node,
2313 node,
2307 )
2314 )
2308 self.index.append(e)
2315 self.index.append(e)
2309
2316
2310 entry = self._io.packentry(e, self.node, self.version, curr)
2317 entry = self._io.packentry(e, self.node, self.version, curr)
2311 self._writeentry(
2318 self._writeentry(
2312 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2319 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2313 )
2320 )
2314
2321
2315 rawtext = btext[0]
2322 rawtext = btext[0]
2316
2323
2317 if alwayscache and rawtext is None:
2324 if alwayscache and rawtext is None:
2318 rawtext = deltacomputer.buildtext(revinfo, fh)
2325 rawtext = deltacomputer.buildtext(revinfo, fh)
2319
2326
2320 if type(rawtext) == bytes: # only accept immutable objects
2327 if type(rawtext) == bytes: # only accept immutable objects
2321 self._revisioncache = (node, curr, rawtext)
2328 self._revisioncache = (node, curr, rawtext)
2322 self._chainbasecache[curr] = deltainfo.chainbase
2329 self._chainbasecache[curr] = deltainfo.chainbase
2323 return node
2330 return node
2324
2331
2325 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2332 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2326 # Files opened in a+ mode have inconsistent behavior on various
2333 # Files opened in a+ mode have inconsistent behavior on various
2327 # platforms. Windows requires that a file positioning call be made
2334 # platforms. Windows requires that a file positioning call be made
2328 # when the file handle transitions between reads and writes. See
2335 # when the file handle transitions between reads and writes. See
2329 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2336 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2330 # platforms, Python or the platform itself can be buggy. Some versions
2337 # platforms, Python or the platform itself can be buggy. Some versions
2331 # of Solaris have been observed to not append at the end of the file
2338 # of Solaris have been observed to not append at the end of the file
2332 # if the file was seeked to before the end. See issue4943 for more.
2339 # if the file was seeked to before the end. See issue4943 for more.
2333 #
2340 #
2334 # We work around this issue by inserting a seek() before writing.
2341 # We work around this issue by inserting a seek() before writing.
2335 # Note: This is likely not necessary on Python 3. However, because
2342 # Note: This is likely not necessary on Python 3. However, because
2336 # the file handle is reused for reads and may be seeked there, we need
2343 # the file handle is reused for reads and may be seeked there, we need
2337 # to be careful before changing this.
2344 # to be careful before changing this.
2338 ifh.seek(0, os.SEEK_END)
2345 ifh.seek(0, os.SEEK_END)
2339 if dfh:
2346 if dfh:
2340 dfh.seek(0, os.SEEK_END)
2347 dfh.seek(0, os.SEEK_END)
2341
2348
2342 curr = len(self) - 1
2349 curr = len(self) - 1
2343 if not self._inline:
2350 if not self._inline:
2344 transaction.add(self.datafile, offset)
2351 transaction.add(self.datafile, offset)
2345 transaction.add(self.indexfile, curr * len(entry))
2352 transaction.add(self.indexfile, curr * len(entry))
2346 if data[0]:
2353 if data[0]:
2347 dfh.write(data[0])
2354 dfh.write(data[0])
2348 dfh.write(data[1])
2355 dfh.write(data[1])
2349 ifh.write(entry)
2356 ifh.write(entry)
2350 else:
2357 else:
2351 offset += curr * self._io.size
2358 offset += curr * self._io.size
2352 transaction.add(self.indexfile, offset, curr)
2359 transaction.add(self.indexfile, offset, curr)
2353 ifh.write(entry)
2360 ifh.write(entry)
2354 ifh.write(data[0])
2361 ifh.write(data[0])
2355 ifh.write(data[1])
2362 ifh.write(data[1])
2356 self._enforceinlinesize(transaction, ifh)
2363 self._enforceinlinesize(transaction, ifh)
2357 nodemaputil.setup_persistent_nodemap(transaction, self)
2364 nodemaputil.setup_persistent_nodemap(transaction, self)
2358
2365
2359 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2366 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2360 """
2367 """
2361 add a delta group
2368 add a delta group
2362
2369
2363 given a set of deltas, add them to the revision log. the
2370 given a set of deltas, add them to the revision log. the
2364 first delta is against its parent, which should be in our
2371 first delta is against its parent, which should be in our
2365 log, the rest are against the previous delta.
2372 log, the rest are against the previous delta.
2366
2373
2367 If ``addrevisioncb`` is defined, it will be called with arguments of
2374 If ``addrevisioncb`` is defined, it will be called with arguments of
2368 this revlog and the node that was added.
2375 this revlog and the node that was added.
2369 """
2376 """
2370
2377
2371 if self._writinghandles:
2378 if self._writinghandles:
2372 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2379 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2373
2380
2374 nodes = []
2381 nodes = []
2375
2382
2376 r = len(self)
2383 r = len(self)
2377 end = 0
2384 end = 0
2378 if r:
2385 if r:
2379 end = self.end(r - 1)
2386 end = self.end(r - 1)
2380 ifh = self._indexfp(b"a+")
2387 ifh = self._indexfp(b"a+")
2381 isize = r * self._io.size
2388 isize = r * self._io.size
2382 if self._inline:
2389 if self._inline:
2383 transaction.add(self.indexfile, end + isize, r)
2390 transaction.add(self.indexfile, end + isize, r)
2384 dfh = None
2391 dfh = None
2385 else:
2392 else:
2386 transaction.add(self.indexfile, isize, r)
2393 transaction.add(self.indexfile, isize, r)
2387 transaction.add(self.datafile, end)
2394 transaction.add(self.datafile, end)
2388 dfh = self._datafp(b"a+")
2395 dfh = self._datafp(b"a+")
2389
2396
2390 def flush():
2397 def flush():
2391 if dfh:
2398 if dfh:
2392 dfh.flush()
2399 dfh.flush()
2393 ifh.flush()
2400 ifh.flush()
2394
2401
2395 self._writinghandles = (ifh, dfh)
2402 self._writinghandles = (ifh, dfh)
2396
2403
2397 try:
2404 try:
2398 deltacomputer = deltautil.deltacomputer(self)
2405 deltacomputer = deltautil.deltacomputer(self)
2399 # loop through our set of deltas
2406 # loop through our set of deltas
2400 for data in deltas:
2407 for data in deltas:
2401 node, p1, p2, linknode, deltabase, delta, flags = data
2408 node, p1, p2, linknode, deltabase, delta, flags = data
2402 link = linkmapper(linknode)
2409 link = linkmapper(linknode)
2403 flags = flags or REVIDX_DEFAULT_FLAGS
2410 flags = flags or REVIDX_DEFAULT_FLAGS
2404
2411
2405 nodes.append(node)
2412 nodes.append(node)
2406
2413
2407 if self.index.has_node(node):
2414 if self.index.has_node(node):
2408 self._nodeduplicatecallback(transaction, node)
2415 self._nodeduplicatecallback(transaction, node)
2409 # this can happen if two branches make the same change
2416 # this can happen if two branches make the same change
2410 continue
2417 continue
2411
2418
2412 for p in (p1, p2):
2419 for p in (p1, p2):
2413 if not self.index.has_node(p):
2420 if not self.index.has_node(p):
2414 raise error.LookupError(
2421 raise error.LookupError(
2415 p, self.indexfile, _(b'unknown parent')
2422 p, self.indexfile, _(b'unknown parent')
2416 )
2423 )
2417
2424
2418 if not self.index.has_node(deltabase):
2425 if not self.index.has_node(deltabase):
2419 raise error.LookupError(
2426 raise error.LookupError(
2420 deltabase, self.indexfile, _(b'unknown delta base')
2427 deltabase, self.indexfile, _(b'unknown delta base')
2421 )
2428 )
2422
2429
2423 baserev = self.rev(deltabase)
2430 baserev = self.rev(deltabase)
2424
2431
2425 if baserev != nullrev and self.iscensored(baserev):
2432 if baserev != nullrev and self.iscensored(baserev):
2426 # if base is censored, delta must be full replacement in a
2433 # if base is censored, delta must be full replacement in a
2427 # single patch operation
2434 # single patch operation
2428 hlen = struct.calcsize(b">lll")
2435 hlen = struct.calcsize(b">lll")
2429 oldlen = self.rawsize(baserev)
2436 oldlen = self.rawsize(baserev)
2430 newlen = len(delta) - hlen
2437 newlen = len(delta) - hlen
2431 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2438 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2432 raise error.CensoredBaseError(
2439 raise error.CensoredBaseError(
2433 self.indexfile, self.node(baserev)
2440 self.indexfile, self.node(baserev)
2434 )
2441 )
2435
2442
2436 if not flags and self._peek_iscensored(baserev, delta, flush):
2443 if not flags and self._peek_iscensored(baserev, delta, flush):
2437 flags |= REVIDX_ISCENSORED
2444 flags |= REVIDX_ISCENSORED
2438
2445
2439 # We assume consumers of addrevisioncb will want to retrieve
2446 # We assume consumers of addrevisioncb will want to retrieve
2440 # the added revision, which will require a call to
2447 # the added revision, which will require a call to
2441 # revision(). revision() will fast path if there is a cache
2448 # revision(). revision() will fast path if there is a cache
2442 # hit. So, we tell _addrevision() to always cache in this case.
2449 # hit. So, we tell _addrevision() to always cache in this case.
2443 # We're only using addgroup() in the context of changegroup
2450 # We're only using addgroup() in the context of changegroup
2444 # generation so the revision data can always be handled as raw
2451 # generation so the revision data can always be handled as raw
2445 # by the flagprocessor.
2452 # by the flagprocessor.
2446 self._addrevision(
2453 self._addrevision(
2447 node,
2454 node,
2448 None,
2455 None,
2449 transaction,
2456 transaction,
2450 link,
2457 link,
2451 p1,
2458 p1,
2452 p2,
2459 p2,
2453 flags,
2460 flags,
2454 (baserev, delta),
2461 (baserev, delta),
2455 ifh,
2462 ifh,
2456 dfh,
2463 dfh,
2457 alwayscache=bool(addrevisioncb),
2464 alwayscache=bool(addrevisioncb),
2458 deltacomputer=deltacomputer,
2465 deltacomputer=deltacomputer,
2459 )
2466 )
2460
2467
2461 if addrevisioncb:
2468 if addrevisioncb:
2462 addrevisioncb(self, node)
2469 addrevisioncb(self, node)
2463
2470
2464 if not dfh and not self._inline:
2471 if not dfh and not self._inline:
2465 # addrevision switched from inline to conventional
2472 # addrevision switched from inline to conventional
2466 # reopen the index
2473 # reopen the index
2467 ifh.close()
2474 ifh.close()
2468 dfh = self._datafp(b"a+")
2475 dfh = self._datafp(b"a+")
2469 ifh = self._indexfp(b"a+")
2476 ifh = self._indexfp(b"a+")
2470 self._writinghandles = (ifh, dfh)
2477 self._writinghandles = (ifh, dfh)
2471 finally:
2478 finally:
2472 self._writinghandles = None
2479 self._writinghandles = None
2473
2480
2474 if dfh:
2481 if dfh:
2475 dfh.close()
2482 dfh.close()
2476 ifh.close()
2483 ifh.close()
2477
2484
2478 return nodes
2485 return nodes
2479
2486
2480 def iscensored(self, rev):
2487 def iscensored(self, rev):
2481 """Check if a file revision is censored."""
2488 """Check if a file revision is censored."""
2482 if not self._censorable:
2489 if not self._censorable:
2483 return False
2490 return False
2484
2491
2485 return self.flags(rev) & REVIDX_ISCENSORED
2492 return self.flags(rev) & REVIDX_ISCENSORED
2486
2493
2487 def _peek_iscensored(self, baserev, delta, flush):
2494 def _peek_iscensored(self, baserev, delta, flush):
2488 """Quickly check if a delta produces a censored revision."""
2495 """Quickly check if a delta produces a censored revision."""
2489 if not self._censorable:
2496 if not self._censorable:
2490 return False
2497 return False
2491
2498
2492 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2499 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2493
2500
2494 def getstrippoint(self, minlink):
2501 def getstrippoint(self, minlink):
2495 """find the minimum rev that must be stripped to strip the linkrev
2502 """find the minimum rev that must be stripped to strip the linkrev
2496
2503
2497 Returns a tuple containing the minimum rev and a set of all revs that
2504 Returns a tuple containing the minimum rev and a set of all revs that
2498 have linkrevs that will be broken by this strip.
2505 have linkrevs that will be broken by this strip.
2499 """
2506 """
2500 return storageutil.resolvestripinfo(
2507 return storageutil.resolvestripinfo(
2501 minlink,
2508 minlink,
2502 len(self) - 1,
2509 len(self) - 1,
2503 self.headrevs(),
2510 self.headrevs(),
2504 self.linkrev,
2511 self.linkrev,
2505 self.parentrevs,
2512 self.parentrevs,
2506 )
2513 )
2507
2514
2508 def strip(self, minlink, transaction):
2515 def strip(self, minlink, transaction):
2509 """truncate the revlog on the first revision with a linkrev >= minlink
2516 """truncate the revlog on the first revision with a linkrev >= minlink
2510
2517
2511 This function is called when we're stripping revision minlink and
2518 This function is called when we're stripping revision minlink and
2512 its descendants from the repository.
2519 its descendants from the repository.
2513
2520
2514 We have to remove all revisions with linkrev >= minlink, because
2521 We have to remove all revisions with linkrev >= minlink, because
2515 the equivalent changelog revisions will be renumbered after the
2522 the equivalent changelog revisions will be renumbered after the
2516 strip.
2523 strip.
2517
2524
2518 So we truncate the revlog on the first of these revisions, and
2525 So we truncate the revlog on the first of these revisions, and
2519 trust that the caller has saved the revisions that shouldn't be
2526 trust that the caller has saved the revisions that shouldn't be
2520 removed and that it'll re-add them after this truncation.
2527 removed and that it'll re-add them after this truncation.
2521 """
2528 """
2522 if len(self) == 0:
2529 if len(self) == 0:
2523 return
2530 return
2524
2531
2525 rev, _ = self.getstrippoint(minlink)
2532 rev, _ = self.getstrippoint(minlink)
2526 if rev == len(self):
2533 if rev == len(self):
2527 return
2534 return
2528
2535
2529 # first truncate the files on disk
2536 # first truncate the files on disk
2530 end = self.start(rev)
2537 end = self.start(rev)
2531 if not self._inline:
2538 if not self._inline:
2532 transaction.add(self.datafile, end)
2539 transaction.add(self.datafile, end)
2533 end = rev * self._io.size
2540 end = rev * self._io.size
2534 else:
2541 else:
2535 end += rev * self._io.size
2542 end += rev * self._io.size
2536
2543
2537 transaction.add(self.indexfile, end)
2544 transaction.add(self.indexfile, end)
2538
2545
2539 # then reset internal state in memory to forget those revisions
2546 # then reset internal state in memory to forget those revisions
2540 self._revisioncache = None
2547 self._revisioncache = None
2541 self._chaininfocache = {}
2548 self._chaininfocache = {}
2542 self._chunkclear()
2549 self._chunkclear()
2543
2550
2544 del self.index[rev:-1]
2551 del self.index[rev:-1]
2545
2552
2546 def checksize(self):
2553 def checksize(self):
2547 """Check size of index and data files
2554 """Check size of index and data files
2548
2555
2549 return a (dd, di) tuple.
2556 return a (dd, di) tuple.
2550 - dd: extra bytes for the "data" file
2557 - dd: extra bytes for the "data" file
2551 - di: extra bytes for the "index" file
2558 - di: extra bytes for the "index" file
2552
2559
2553 A healthy revlog will return (0, 0).
2560 A healthy revlog will return (0, 0).
2554 """
2561 """
2555 expected = 0
2562 expected = 0
2556 if len(self):
2563 if len(self):
2557 expected = max(0, self.end(len(self) - 1))
2564 expected = max(0, self.end(len(self) - 1))
2558
2565
2559 try:
2566 try:
2560 with self._datafp() as f:
2567 with self._datafp() as f:
2561 f.seek(0, io.SEEK_END)
2568 f.seek(0, io.SEEK_END)
2562 actual = f.tell()
2569 actual = f.tell()
2563 dd = actual - expected
2570 dd = actual - expected
2564 except IOError as inst:
2571 except IOError as inst:
2565 if inst.errno != errno.ENOENT:
2572 if inst.errno != errno.ENOENT:
2566 raise
2573 raise
2567 dd = 0
2574 dd = 0
2568
2575
2569 try:
2576 try:
2570 f = self.opener(self.indexfile)
2577 f = self.opener(self.indexfile)
2571 f.seek(0, io.SEEK_END)
2578 f.seek(0, io.SEEK_END)
2572 actual = f.tell()
2579 actual = f.tell()
2573 f.close()
2580 f.close()
2574 s = self._io.size
2581 s = self._io.size
2575 i = max(0, actual // s)
2582 i = max(0, actual // s)
2576 di = actual - (i * s)
2583 di = actual - (i * s)
2577 if self._inline:
2584 if self._inline:
2578 databytes = 0
2585 databytes = 0
2579 for r in self:
2586 for r in self:
2580 databytes += max(0, self.length(r))
2587 databytes += max(0, self.length(r))
2581 dd = 0
2588 dd = 0
2582 di = actual - len(self) * s - databytes
2589 di = actual - len(self) * s - databytes
2583 except IOError as inst:
2590 except IOError as inst:
2584 if inst.errno != errno.ENOENT:
2591 if inst.errno != errno.ENOENT:
2585 raise
2592 raise
2586 di = 0
2593 di = 0
2587
2594
2588 return (dd, di)
2595 return (dd, di)
2589
2596
2590 def files(self):
2597 def files(self):
2591 res = [self.indexfile]
2598 res = [self.indexfile]
2592 if not self._inline:
2599 if not self._inline:
2593 res.append(self.datafile)
2600 res.append(self.datafile)
2594 return res
2601 return res
2595
2602
2596 def emitrevisions(
2603 def emitrevisions(
2597 self,
2604 self,
2598 nodes,
2605 nodes,
2599 nodesorder=None,
2606 nodesorder=None,
2600 revisiondata=False,
2607 revisiondata=False,
2601 assumehaveparentrevisions=False,
2608 assumehaveparentrevisions=False,
2602 deltamode=repository.CG_DELTAMODE_STD,
2609 deltamode=repository.CG_DELTAMODE_STD,
2603 ):
2610 ):
2604 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2611 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2605 raise error.ProgrammingError(
2612 raise error.ProgrammingError(
2606 b'unhandled value for nodesorder: %s' % nodesorder
2613 b'unhandled value for nodesorder: %s' % nodesorder
2607 )
2614 )
2608
2615
2609 if nodesorder is None and not self._generaldelta:
2616 if nodesorder is None and not self._generaldelta:
2610 nodesorder = b'storage'
2617 nodesorder = b'storage'
2611
2618
2612 if (
2619 if (
2613 not self._storedeltachains
2620 not self._storedeltachains
2614 and deltamode != repository.CG_DELTAMODE_PREV
2621 and deltamode != repository.CG_DELTAMODE_PREV
2615 ):
2622 ):
2616 deltamode = repository.CG_DELTAMODE_FULL
2623 deltamode = repository.CG_DELTAMODE_FULL
2617
2624
2618 return storageutil.emitrevisions(
2625 return storageutil.emitrevisions(
2619 self,
2626 self,
2620 nodes,
2627 nodes,
2621 nodesorder,
2628 nodesorder,
2622 revlogrevisiondelta,
2629 revlogrevisiondelta,
2623 deltaparentfn=self.deltaparent,
2630 deltaparentfn=self.deltaparent,
2624 candeltafn=self.candelta,
2631 candeltafn=self.candelta,
2625 rawsizefn=self.rawsize,
2632 rawsizefn=self.rawsize,
2626 revdifffn=self.revdiff,
2633 revdifffn=self.revdiff,
2627 flagsfn=self.flags,
2634 flagsfn=self.flags,
2628 deltamode=deltamode,
2635 deltamode=deltamode,
2629 revisiondata=revisiondata,
2636 revisiondata=revisiondata,
2630 assumehaveparentrevisions=assumehaveparentrevisions,
2637 assumehaveparentrevisions=assumehaveparentrevisions,
2631 )
2638 )
2632
2639
2633 DELTAREUSEALWAYS = b'always'
2640 DELTAREUSEALWAYS = b'always'
2634 DELTAREUSESAMEREVS = b'samerevs'
2641 DELTAREUSESAMEREVS = b'samerevs'
2635 DELTAREUSENEVER = b'never'
2642 DELTAREUSENEVER = b'never'
2636
2643
2637 DELTAREUSEFULLADD = b'fulladd'
2644 DELTAREUSEFULLADD = b'fulladd'
2638
2645
2639 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2646 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2640
2647
2641 def clone(
2648 def clone(
2642 self,
2649 self,
2643 tr,
2650 tr,
2644 destrevlog,
2651 destrevlog,
2645 addrevisioncb=None,
2652 addrevisioncb=None,
2646 deltareuse=DELTAREUSESAMEREVS,
2653 deltareuse=DELTAREUSESAMEREVS,
2647 forcedeltabothparents=None,
2654 forcedeltabothparents=None,
2648 sidedatacompanion=None,
2655 sidedatacompanion=None,
2649 ):
2656 ):
2650 """Copy this revlog to another, possibly with format changes.
2657 """Copy this revlog to another, possibly with format changes.
2651
2658
2652 The destination revlog will contain the same revisions and nodes.
2659 The destination revlog will contain the same revisions and nodes.
2653 However, it may not be bit-for-bit identical due to e.g. delta encoding
2660 However, it may not be bit-for-bit identical due to e.g. delta encoding
2654 differences.
2661 differences.
2655
2662
2656 The ``deltareuse`` argument control how deltas from the existing revlog
2663 The ``deltareuse`` argument control how deltas from the existing revlog
2657 are preserved in the destination revlog. The argument can have the
2664 are preserved in the destination revlog. The argument can have the
2658 following values:
2665 following values:
2659
2666
2660 DELTAREUSEALWAYS
2667 DELTAREUSEALWAYS
2661 Deltas will always be reused (if possible), even if the destination
2668 Deltas will always be reused (if possible), even if the destination
2662 revlog would not select the same revisions for the delta. This is the
2669 revlog would not select the same revisions for the delta. This is the
2663 fastest mode of operation.
2670 fastest mode of operation.
2664 DELTAREUSESAMEREVS
2671 DELTAREUSESAMEREVS
2665 Deltas will be reused if the destination revlog would pick the same
2672 Deltas will be reused if the destination revlog would pick the same
2666 revisions for the delta. This mode strikes a balance between speed
2673 revisions for the delta. This mode strikes a balance between speed
2667 and optimization.
2674 and optimization.
2668 DELTAREUSENEVER
2675 DELTAREUSENEVER
2669 Deltas will never be reused. This is the slowest mode of execution.
2676 Deltas will never be reused. This is the slowest mode of execution.
2670 This mode can be used to recompute deltas (e.g. if the diff/delta
2677 This mode can be used to recompute deltas (e.g. if the diff/delta
2671 algorithm changes).
2678 algorithm changes).
2672 DELTAREUSEFULLADD
2679 DELTAREUSEFULLADD
2673 Revision will be re-added as if their were new content. This is
2680 Revision will be re-added as if their were new content. This is
2674 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2681 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2675 eg: large file detection and handling.
2682 eg: large file detection and handling.
2676
2683
2677 Delta computation can be slow, so the choice of delta reuse policy can
2684 Delta computation can be slow, so the choice of delta reuse policy can
2678 significantly affect run time.
2685 significantly affect run time.
2679
2686
2680 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2687 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2681 two extremes. Deltas will be reused if they are appropriate. But if the
2688 two extremes. Deltas will be reused if they are appropriate. But if the
2682 delta could choose a better revision, it will do so. This means if you
2689 delta could choose a better revision, it will do so. This means if you
2683 are converting a non-generaldelta revlog to a generaldelta revlog,
2690 are converting a non-generaldelta revlog to a generaldelta revlog,
2684 deltas will be recomputed if the delta's parent isn't a parent of the
2691 deltas will be recomputed if the delta's parent isn't a parent of the
2685 revision.
2692 revision.
2686
2693
2687 In addition to the delta policy, the ``forcedeltabothparents``
2694 In addition to the delta policy, the ``forcedeltabothparents``
2688 argument controls whether to force compute deltas against both parents
2695 argument controls whether to force compute deltas against both parents
2689 for merges. By default, the current default is used.
2696 for merges. By default, the current default is used.
2690
2697
2691 If not None, the `sidedatacompanion` is callable that accept two
2698 If not None, the `sidedatacompanion` is callable that accept two
2692 arguments:
2699 arguments:
2693
2700
2694 (srcrevlog, rev)
2701 (srcrevlog, rev)
2695
2702
2696 and return a triplet that control changes to sidedata content from the
2703 and return a triplet that control changes to sidedata content from the
2697 old revision to the new clone result:
2704 old revision to the new clone result:
2698
2705
2699 (dropall, filterout, update)
2706 (dropall, filterout, update)
2700
2707
2701 * if `dropall` is True, all sidedata should be dropped
2708 * if `dropall` is True, all sidedata should be dropped
2702 * `filterout` is a set of sidedata keys that should be dropped
2709 * `filterout` is a set of sidedata keys that should be dropped
2703 * `update` is a mapping of additionnal/new key -> value
2710 * `update` is a mapping of additionnal/new key -> value
2704 """
2711 """
2705 if deltareuse not in self.DELTAREUSEALL:
2712 if deltareuse not in self.DELTAREUSEALL:
2706 raise ValueError(
2713 raise ValueError(
2707 _(b'value for deltareuse invalid: %s') % deltareuse
2714 _(b'value for deltareuse invalid: %s') % deltareuse
2708 )
2715 )
2709
2716
2710 if len(destrevlog):
2717 if len(destrevlog):
2711 raise ValueError(_(b'destination revlog is not empty'))
2718 raise ValueError(_(b'destination revlog is not empty'))
2712
2719
2713 if getattr(self, 'filteredrevs', None):
2720 if getattr(self, 'filteredrevs', None):
2714 raise ValueError(_(b'source revlog has filtered revisions'))
2721 raise ValueError(_(b'source revlog has filtered revisions'))
2715 if getattr(destrevlog, 'filteredrevs', None):
2722 if getattr(destrevlog, 'filteredrevs', None):
2716 raise ValueError(_(b'destination revlog has filtered revisions'))
2723 raise ValueError(_(b'destination revlog has filtered revisions'))
2717
2724
2718 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2725 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2719 # if possible.
2726 # if possible.
2720 oldlazydelta = destrevlog._lazydelta
2727 oldlazydelta = destrevlog._lazydelta
2721 oldlazydeltabase = destrevlog._lazydeltabase
2728 oldlazydeltabase = destrevlog._lazydeltabase
2722 oldamd = destrevlog._deltabothparents
2729 oldamd = destrevlog._deltabothparents
2723
2730
2724 try:
2731 try:
2725 if deltareuse == self.DELTAREUSEALWAYS:
2732 if deltareuse == self.DELTAREUSEALWAYS:
2726 destrevlog._lazydeltabase = True
2733 destrevlog._lazydeltabase = True
2727 destrevlog._lazydelta = True
2734 destrevlog._lazydelta = True
2728 elif deltareuse == self.DELTAREUSESAMEREVS:
2735 elif deltareuse == self.DELTAREUSESAMEREVS:
2729 destrevlog._lazydeltabase = False
2736 destrevlog._lazydeltabase = False
2730 destrevlog._lazydelta = True
2737 destrevlog._lazydelta = True
2731 elif deltareuse == self.DELTAREUSENEVER:
2738 elif deltareuse == self.DELTAREUSENEVER:
2732 destrevlog._lazydeltabase = False
2739 destrevlog._lazydeltabase = False
2733 destrevlog._lazydelta = False
2740 destrevlog._lazydelta = False
2734
2741
2735 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2742 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2736
2743
2737 self._clone(
2744 self._clone(
2738 tr,
2745 tr,
2739 destrevlog,
2746 destrevlog,
2740 addrevisioncb,
2747 addrevisioncb,
2741 deltareuse,
2748 deltareuse,
2742 forcedeltabothparents,
2749 forcedeltabothparents,
2743 sidedatacompanion,
2750 sidedatacompanion,
2744 )
2751 )
2745
2752
2746 finally:
2753 finally:
2747 destrevlog._lazydelta = oldlazydelta
2754 destrevlog._lazydelta = oldlazydelta
2748 destrevlog._lazydeltabase = oldlazydeltabase
2755 destrevlog._lazydeltabase = oldlazydeltabase
2749 destrevlog._deltabothparents = oldamd
2756 destrevlog._deltabothparents = oldamd
2750
2757
2751 def _clone(
2758 def _clone(
2752 self,
2759 self,
2753 tr,
2760 tr,
2754 destrevlog,
2761 destrevlog,
2755 addrevisioncb,
2762 addrevisioncb,
2756 deltareuse,
2763 deltareuse,
2757 forcedeltabothparents,
2764 forcedeltabothparents,
2758 sidedatacompanion,
2765 sidedatacompanion,
2759 ):
2766 ):
2760 """perform the core duty of `revlog.clone` after parameter processing"""
2767 """perform the core duty of `revlog.clone` after parameter processing"""
2761 deltacomputer = deltautil.deltacomputer(destrevlog)
2768 deltacomputer = deltautil.deltacomputer(destrevlog)
2762 index = self.index
2769 index = self.index
2763 for rev in self:
2770 for rev in self:
2764 entry = index[rev]
2771 entry = index[rev]
2765
2772
2766 # Some classes override linkrev to take filtered revs into
2773 # Some classes override linkrev to take filtered revs into
2767 # account. Use raw entry from index.
2774 # account. Use raw entry from index.
2768 flags = entry[0] & 0xFFFF
2775 flags = entry[0] & 0xFFFF
2769 linkrev = entry[4]
2776 linkrev = entry[4]
2770 p1 = index[entry[5]][7]
2777 p1 = index[entry[5]][7]
2771 p2 = index[entry[6]][7]
2778 p2 = index[entry[6]][7]
2772 node = entry[7]
2779 node = entry[7]
2773
2780
2774 sidedataactions = (False, [], {})
2781 sidedataactions = (False, [], {})
2775 if sidedatacompanion is not None:
2782 if sidedatacompanion is not None:
2776 sidedataactions = sidedatacompanion(self, rev)
2783 sidedataactions = sidedatacompanion(self, rev)
2777
2784
2778 # (Possibly) reuse the delta from the revlog if allowed and
2785 # (Possibly) reuse the delta from the revlog if allowed and
2779 # the revlog chunk is a delta.
2786 # the revlog chunk is a delta.
2780 cachedelta = None
2787 cachedelta = None
2781 rawtext = None
2788 rawtext = None
2782 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2789 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2783 dropall, filterout, update = sidedataactions
2790 dropall, filterout, update = sidedataactions
2784 text, sidedata = self._revisiondata(rev)
2791 text, sidedata = self._revisiondata(rev)
2785 if dropall:
2792 if dropall:
2786 sidedata = {}
2793 sidedata = {}
2787 for key in filterout:
2794 for key in filterout:
2788 sidedata.pop(key, None)
2795 sidedata.pop(key, None)
2789 sidedata.update(update)
2796 sidedata.update(update)
2790 if not sidedata:
2797 if not sidedata:
2791 sidedata = None
2798 sidedata = None
2792 destrevlog.addrevision(
2799 destrevlog.addrevision(
2793 text,
2800 text,
2794 tr,
2801 tr,
2795 linkrev,
2802 linkrev,
2796 p1,
2803 p1,
2797 p2,
2804 p2,
2798 cachedelta=cachedelta,
2805 cachedelta=cachedelta,
2799 node=node,
2806 node=node,
2800 flags=flags,
2807 flags=flags,
2801 deltacomputer=deltacomputer,
2808 deltacomputer=deltacomputer,
2802 sidedata=sidedata,
2809 sidedata=sidedata,
2803 )
2810 )
2804 else:
2811 else:
2805 if destrevlog._lazydelta:
2812 if destrevlog._lazydelta:
2806 dp = self.deltaparent(rev)
2813 dp = self.deltaparent(rev)
2807 if dp != nullrev:
2814 if dp != nullrev:
2808 cachedelta = (dp, bytes(self._chunk(rev)))
2815 cachedelta = (dp, bytes(self._chunk(rev)))
2809
2816
2810 if not cachedelta:
2817 if not cachedelta:
2811 rawtext = self.rawdata(rev)
2818 rawtext = self.rawdata(rev)
2812
2819
2813 ifh = destrevlog.opener(
2820 ifh = destrevlog.opener(
2814 destrevlog.indexfile, b'a+', checkambig=False
2821 destrevlog.indexfile, b'a+', checkambig=False
2815 )
2822 )
2816 dfh = None
2823 dfh = None
2817 if not destrevlog._inline:
2824 if not destrevlog._inline:
2818 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2825 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2819 try:
2826 try:
2820 destrevlog._addrevision(
2827 destrevlog._addrevision(
2821 node,
2828 node,
2822 rawtext,
2829 rawtext,
2823 tr,
2830 tr,
2824 linkrev,
2831 linkrev,
2825 p1,
2832 p1,
2826 p2,
2833 p2,
2827 flags,
2834 flags,
2828 cachedelta,
2835 cachedelta,
2829 ifh,
2836 ifh,
2830 dfh,
2837 dfh,
2831 deltacomputer=deltacomputer,
2838 deltacomputer=deltacomputer,
2832 )
2839 )
2833 finally:
2840 finally:
2834 if dfh:
2841 if dfh:
2835 dfh.close()
2842 dfh.close()
2836 ifh.close()
2843 ifh.close()
2837
2844
2838 if addrevisioncb:
2845 if addrevisioncb:
2839 addrevisioncb(self, rev, node)
2846 addrevisioncb(self, rev, node)
2840
2847
2841 def censorrevision(self, tr, censornode, tombstone=b''):
2848 def censorrevision(self, tr, censornode, tombstone=b''):
2842 if (self.version & 0xFFFF) == REVLOGV0:
2849 if (self.version & 0xFFFF) == REVLOGV0:
2843 raise error.RevlogError(
2850 raise error.RevlogError(
2844 _(b'cannot censor with version %d revlogs') % self.version
2851 _(b'cannot censor with version %d revlogs') % self.version
2845 )
2852 )
2846
2853
2847 censorrev = self.rev(censornode)
2854 censorrev = self.rev(censornode)
2848 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2855 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2849
2856
2850 if len(tombstone) > self.rawsize(censorrev):
2857 if len(tombstone) > self.rawsize(censorrev):
2851 raise error.Abort(
2858 raise error.Abort(
2852 _(b'censor tombstone must be no longer than censored data')
2859 _(b'censor tombstone must be no longer than censored data')
2853 )
2860 )
2854
2861
2855 # Rewriting the revlog in place is hard. Our strategy for censoring is
2862 # Rewriting the revlog in place is hard. Our strategy for censoring is
2856 # to create a new revlog, copy all revisions to it, then replace the
2863 # to create a new revlog, copy all revisions to it, then replace the
2857 # revlogs on transaction close.
2864 # revlogs on transaction close.
2858
2865
2859 newindexfile = self.indexfile + b'.tmpcensored'
2866 newindexfile = self.indexfile + b'.tmpcensored'
2860 newdatafile = self.datafile + b'.tmpcensored'
2867 newdatafile = self.datafile + b'.tmpcensored'
2861
2868
2862 # This is a bit dangerous. We could easily have a mismatch of state.
2869 # This is a bit dangerous. We could easily have a mismatch of state.
2863 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2870 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2864 newrl.version = self.version
2871 newrl.version = self.version
2865 newrl._generaldelta = self._generaldelta
2872 newrl._generaldelta = self._generaldelta
2866 newrl._io = self._io
2873 newrl._io = self._io
2867
2874
2868 for rev in self.revs():
2875 for rev in self.revs():
2869 node = self.node(rev)
2876 node = self.node(rev)
2870 p1, p2 = self.parents(node)
2877 p1, p2 = self.parents(node)
2871
2878
2872 if rev == censorrev:
2879 if rev == censorrev:
2873 newrl.addrawrevision(
2880 newrl.addrawrevision(
2874 tombstone,
2881 tombstone,
2875 tr,
2882 tr,
2876 self.linkrev(censorrev),
2883 self.linkrev(censorrev),
2877 p1,
2884 p1,
2878 p2,
2885 p2,
2879 censornode,
2886 censornode,
2880 REVIDX_ISCENSORED,
2887 REVIDX_ISCENSORED,
2881 )
2888 )
2882
2889
2883 if newrl.deltaparent(rev) != nullrev:
2890 if newrl.deltaparent(rev) != nullrev:
2884 raise error.Abort(
2891 raise error.Abort(
2885 _(
2892 _(
2886 b'censored revision stored as delta; '
2893 b'censored revision stored as delta; '
2887 b'cannot censor'
2894 b'cannot censor'
2888 ),
2895 ),
2889 hint=_(
2896 hint=_(
2890 b'censoring of revlogs is not '
2897 b'censoring of revlogs is not '
2891 b'fully implemented; please report '
2898 b'fully implemented; please report '
2892 b'this bug'
2899 b'this bug'
2893 ),
2900 ),
2894 )
2901 )
2895 continue
2902 continue
2896
2903
2897 if self.iscensored(rev):
2904 if self.iscensored(rev):
2898 if self.deltaparent(rev) != nullrev:
2905 if self.deltaparent(rev) != nullrev:
2899 raise error.Abort(
2906 raise error.Abort(
2900 _(
2907 _(
2901 b'cannot censor due to censored '
2908 b'cannot censor due to censored '
2902 b'revision having delta stored'
2909 b'revision having delta stored'
2903 )
2910 )
2904 )
2911 )
2905 rawtext = self._chunk(rev)
2912 rawtext = self._chunk(rev)
2906 else:
2913 else:
2907 rawtext = self.rawdata(rev)
2914 rawtext = self.rawdata(rev)
2908
2915
2909 newrl.addrawrevision(
2916 newrl.addrawrevision(
2910 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2917 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2911 )
2918 )
2912
2919
2913 tr.addbackup(self.indexfile, location=b'store')
2920 tr.addbackup(self.indexfile, location=b'store')
2914 if not self._inline:
2921 if not self._inline:
2915 tr.addbackup(self.datafile, location=b'store')
2922 tr.addbackup(self.datafile, location=b'store')
2916
2923
2917 self.opener.rename(newrl.indexfile, self.indexfile)
2924 self.opener.rename(newrl.indexfile, self.indexfile)
2918 if not self._inline:
2925 if not self._inline:
2919 self.opener.rename(newrl.datafile, self.datafile)
2926 self.opener.rename(newrl.datafile, self.datafile)
2920
2927
2921 self.clearcaches()
2928 self.clearcaches()
2922 self._loadindex()
2929 self._loadindex()
2923
2930
2924 def verifyintegrity(self, state):
2931 def verifyintegrity(self, state):
2925 """Verifies the integrity of the revlog.
2932 """Verifies the integrity of the revlog.
2926
2933
2927 Yields ``revlogproblem`` instances describing problems that are
2934 Yields ``revlogproblem`` instances describing problems that are
2928 found.
2935 found.
2929 """
2936 """
2930 dd, di = self.checksize()
2937 dd, di = self.checksize()
2931 if dd:
2938 if dd:
2932 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2939 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2933 if di:
2940 if di:
2934 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2941 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2935
2942
2936 version = self.version & 0xFFFF
2943 version = self.version & 0xFFFF
2937
2944
2938 # The verifier tells us what version revlog we should be.
2945 # The verifier tells us what version revlog we should be.
2939 if version != state[b'expectedversion']:
2946 if version != state[b'expectedversion']:
2940 yield revlogproblem(
2947 yield revlogproblem(
2941 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2948 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2942 % (self.indexfile, version, state[b'expectedversion'])
2949 % (self.indexfile, version, state[b'expectedversion'])
2943 )
2950 )
2944
2951
2945 state[b'skipread'] = set()
2952 state[b'skipread'] = set()
2946 state[b'safe_renamed'] = set()
2953 state[b'safe_renamed'] = set()
2947
2954
2948 for rev in self:
2955 for rev in self:
2949 node = self.node(rev)
2956 node = self.node(rev)
2950
2957
2951 # Verify contents. 4 cases to care about:
2958 # Verify contents. 4 cases to care about:
2952 #
2959 #
2953 # common: the most common case
2960 # common: the most common case
2954 # rename: with a rename
2961 # rename: with a rename
2955 # meta: file content starts with b'\1\n', the metadata
2962 # meta: file content starts with b'\1\n', the metadata
2956 # header defined in filelog.py, but without a rename
2963 # header defined in filelog.py, but without a rename
2957 # ext: content stored externally
2964 # ext: content stored externally
2958 #
2965 #
2959 # More formally, their differences are shown below:
2966 # More formally, their differences are shown below:
2960 #
2967 #
2961 # | common | rename | meta | ext
2968 # | common | rename | meta | ext
2962 # -------------------------------------------------------
2969 # -------------------------------------------------------
2963 # flags() | 0 | 0 | 0 | not 0
2970 # flags() | 0 | 0 | 0 | not 0
2964 # renamed() | False | True | False | ?
2971 # renamed() | False | True | False | ?
2965 # rawtext[0:2]=='\1\n'| False | True | True | ?
2972 # rawtext[0:2]=='\1\n'| False | True | True | ?
2966 #
2973 #
2967 # "rawtext" means the raw text stored in revlog data, which
2974 # "rawtext" means the raw text stored in revlog data, which
2968 # could be retrieved by "rawdata(rev)". "text"
2975 # could be retrieved by "rawdata(rev)". "text"
2969 # mentioned below is "revision(rev)".
2976 # mentioned below is "revision(rev)".
2970 #
2977 #
2971 # There are 3 different lengths stored physically:
2978 # There are 3 different lengths stored physically:
2972 # 1. L1: rawsize, stored in revlog index
2979 # 1. L1: rawsize, stored in revlog index
2973 # 2. L2: len(rawtext), stored in revlog data
2980 # 2. L2: len(rawtext), stored in revlog data
2974 # 3. L3: len(text), stored in revlog data if flags==0, or
2981 # 3. L3: len(text), stored in revlog data if flags==0, or
2975 # possibly somewhere else if flags!=0
2982 # possibly somewhere else if flags!=0
2976 #
2983 #
2977 # L1 should be equal to L2. L3 could be different from them.
2984 # L1 should be equal to L2. L3 could be different from them.
2978 # "text" may or may not affect commit hash depending on flag
2985 # "text" may or may not affect commit hash depending on flag
2979 # processors (see flagutil.addflagprocessor).
2986 # processors (see flagutil.addflagprocessor).
2980 #
2987 #
2981 # | common | rename | meta | ext
2988 # | common | rename | meta | ext
2982 # -------------------------------------------------
2989 # -------------------------------------------------
2983 # rawsize() | L1 | L1 | L1 | L1
2990 # rawsize() | L1 | L1 | L1 | L1
2984 # size() | L1 | L2-LM | L1(*) | L1 (?)
2991 # size() | L1 | L2-LM | L1(*) | L1 (?)
2985 # len(rawtext) | L2 | L2 | L2 | L2
2992 # len(rawtext) | L2 | L2 | L2 | L2
2986 # len(text) | L2 | L2 | L2 | L3
2993 # len(text) | L2 | L2 | L2 | L3
2987 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2994 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2988 #
2995 #
2989 # LM: length of metadata, depending on rawtext
2996 # LM: length of metadata, depending on rawtext
2990 # (*): not ideal, see comment in filelog.size
2997 # (*): not ideal, see comment in filelog.size
2991 # (?): could be "- len(meta)" if the resolved content has
2998 # (?): could be "- len(meta)" if the resolved content has
2992 # rename metadata
2999 # rename metadata
2993 #
3000 #
2994 # Checks needed to be done:
3001 # Checks needed to be done:
2995 # 1. length check: L1 == L2, in all cases.
3002 # 1. length check: L1 == L2, in all cases.
2996 # 2. hash check: depending on flag processor, we may need to
3003 # 2. hash check: depending on flag processor, we may need to
2997 # use either "text" (external), or "rawtext" (in revlog).
3004 # use either "text" (external), or "rawtext" (in revlog).
2998
3005
2999 try:
3006 try:
3000 skipflags = state.get(b'skipflags', 0)
3007 skipflags = state.get(b'skipflags', 0)
3001 if skipflags:
3008 if skipflags:
3002 skipflags &= self.flags(rev)
3009 skipflags &= self.flags(rev)
3003
3010
3004 _verify_revision(self, skipflags, state, node)
3011 _verify_revision(self, skipflags, state, node)
3005
3012
3006 l1 = self.rawsize(rev)
3013 l1 = self.rawsize(rev)
3007 l2 = len(self.rawdata(node))
3014 l2 = len(self.rawdata(node))
3008
3015
3009 if l1 != l2:
3016 if l1 != l2:
3010 yield revlogproblem(
3017 yield revlogproblem(
3011 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3018 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3012 node=node,
3019 node=node,
3013 )
3020 )
3014
3021
3015 except error.CensoredNodeError:
3022 except error.CensoredNodeError:
3016 if state[b'erroroncensored']:
3023 if state[b'erroroncensored']:
3017 yield revlogproblem(
3024 yield revlogproblem(
3018 error=_(b'censored file data'), node=node
3025 error=_(b'censored file data'), node=node
3019 )
3026 )
3020 state[b'skipread'].add(node)
3027 state[b'skipread'].add(node)
3021 except Exception as e:
3028 except Exception as e:
3022 yield revlogproblem(
3029 yield revlogproblem(
3023 error=_(b'unpacking %s: %s')
3030 error=_(b'unpacking %s: %s')
3024 % (short(node), stringutil.forcebytestr(e)),
3031 % (short(node), stringutil.forcebytestr(e)),
3025 node=node,
3032 node=node,
3026 )
3033 )
3027 state[b'skipread'].add(node)
3034 state[b'skipread'].add(node)
3028
3035
3029 def storageinfo(
3036 def storageinfo(
3030 self,
3037 self,
3031 exclusivefiles=False,
3038 exclusivefiles=False,
3032 sharedfiles=False,
3039 sharedfiles=False,
3033 revisionscount=False,
3040 revisionscount=False,
3034 trackedsize=False,
3041 trackedsize=False,
3035 storedsize=False,
3042 storedsize=False,
3036 ):
3043 ):
3037 d = {}
3044 d = {}
3038
3045
3039 if exclusivefiles:
3046 if exclusivefiles:
3040 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3047 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3041 if not self._inline:
3048 if not self._inline:
3042 d[b'exclusivefiles'].append((self.opener, self.datafile))
3049 d[b'exclusivefiles'].append((self.opener, self.datafile))
3043
3050
3044 if sharedfiles:
3051 if sharedfiles:
3045 d[b'sharedfiles'] = []
3052 d[b'sharedfiles'] = []
3046
3053
3047 if revisionscount:
3054 if revisionscount:
3048 d[b'revisionscount'] = len(self)
3055 d[b'revisionscount'] = len(self)
3049
3056
3050 if trackedsize:
3057 if trackedsize:
3051 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3058 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3052
3059
3053 if storedsize:
3060 if storedsize:
3054 d[b'storedsize'] = sum(
3061 d[b'storedsize'] = sum(
3055 self.opener.stat(path).st_size for path in self.files()
3062 self.opener.stat(path).st_size for path in self.files()
3056 )
3063 )
3057
3064
3058 return d
3065 return d
@@ -1,589 +1,598
1 # nodemap.py - nodemap related code and utilities
1 # nodemap.py - nodemap related code and utilities
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14 import struct
14 import struct
15
15
16 from .. import (
16 from .. import (
17 error,
17 error,
18 node as nodemod,
18 node as nodemod,
19 util,
19 util,
20 )
20 )
21
21
22
22
23 class NodeMap(dict):
23 class NodeMap(dict):
24 def __missing__(self, x):
24 def __missing__(self, x):
25 raise error.RevlogError(b'unknown node: %s' % x)
25 raise error.RevlogError(b'unknown node: %s' % x)
26
26
27
27
28 def persisted_data(revlog):
28 def persisted_data(revlog):
29 """read the nodemap for a revlog from disk"""
29 """read the nodemap for a revlog from disk"""
30 if revlog.nodemap_file is None:
30 if revlog.nodemap_file is None:
31 return None
31 return None
32 pdata = revlog.opener.tryread(revlog.nodemap_file)
32 pdata = revlog.opener.tryread(revlog.nodemap_file)
33 if not pdata:
33 if not pdata:
34 return None
34 return None
35 offset = 0
35 offset = 0
36 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
36 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
37 if version != ONDISK_VERSION:
37 if version != ONDISK_VERSION:
38 return None
38 return None
39 offset += S_VERSION.size
39 offset += S_VERSION.size
40 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
40 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
41 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
41 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
42 offset += S_HEADER.size
42 offset += S_HEADER.size
43 docket = NodeMapDocket(pdata[offset : offset + uid_size])
43 docket = NodeMapDocket(pdata[offset : offset + uid_size])
44 offset += uid_size
44 offset += uid_size
45 docket.tip_rev = tip_rev
45 docket.tip_rev = tip_rev
46 docket.tip_node = pdata[offset : offset + tip_node_size]
46 docket.tip_node = pdata[offset : offset + tip_node_size]
47 docket.data_length = data_length
47 docket.data_length = data_length
48 docket.data_unused = data_unused
48 docket.data_unused = data_unused
49
49
50 filename = _rawdata_filepath(revlog, docket)
50 filename = _rawdata_filepath(revlog, docket)
51 use_mmap = revlog.opener.options.get("exp-persistent-nodemap.mmap")
51 use_mmap = revlog.opener.options.get("exp-persistent-nodemap.mmap")
52 try:
52 try:
53 with revlog.opener(filename) as fd:
53 with revlog.opener(filename) as fd:
54 if use_mmap:
54 if use_mmap:
55 data = util.buffer(util.mmapread(fd, data_length))
55 data = util.buffer(util.mmapread(fd, data_length))
56 else:
56 else:
57 data = fd.read(data_length)
57 data = fd.read(data_length)
58 except OSError as e:
58 except OSError as e:
59 if e.errno != errno.ENOENT:
59 if e.errno != errno.ENOENT:
60 raise
60 raise
61 if len(data) < data_length:
61 if len(data) < data_length:
62 return None
62 return None
63 return docket, data
63 return docket, data
64
64
65
65
66 def setup_persistent_nodemap(tr, revlog):
66 def setup_persistent_nodemap(tr, revlog):
67 """Install whatever is needed transaction side to persist a nodemap on disk
67 """Install whatever is needed transaction side to persist a nodemap on disk
68
68
69 (only actually persist the nodemap if this is relevant for this revlog)
69 (only actually persist the nodemap if this is relevant for this revlog)
70 """
70 """
71 if revlog._inline:
71 if revlog._inline:
72 return # inlined revlog are too small for this to be relevant
72 return # inlined revlog are too small for this to be relevant
73 if revlog.nodemap_file is None:
73 if revlog.nodemap_file is None:
74 return # we do not use persistent_nodemap on this revlog
74 return # we do not use persistent_nodemap on this revlog
75 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
75 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
76 if tr.hasfinalize(callback_id):
76 if tr.hasfinalize(callback_id):
77 return # no need to register again
77 return # no need to register again
78 tr.addpending(
79 callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True)
80 )
78 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
81 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
79
82
80
83
81 class _NoTransaction(object):
84 class _NoTransaction(object):
82 """transaction like object to update the nodemap outside a transaction
85 """transaction like object to update the nodemap outside a transaction
83 """
86 """
84
87
85 def __init__(self):
88 def __init__(self):
86 self._postclose = {}
89 self._postclose = {}
87
90
88 def addpostclose(self, callback_id, callback_func):
91 def addpostclose(self, callback_id, callback_func):
89 self._postclose[callback_id] = callback_func
92 self._postclose[callback_id] = callback_func
90
93
91
94
92 def update_persistent_nodemap(revlog):
95 def update_persistent_nodemap(revlog):
93 """update the persistent nodemap right now
96 """update the persistent nodemap right now
94
97
95 To be used for updating the nodemap on disk outside of a normal transaction
98 To be used for updating the nodemap on disk outside of a normal transaction
96 setup (eg, `debugupdatecache`).
99 setup (eg, `debugupdatecache`).
97 """
100 """
98 notr = _NoTransaction()
101 notr = _NoTransaction()
99 _persist_nodemap(notr, revlog)
102 _persist_nodemap(notr, revlog)
100 for k in sorted(notr._postclose):
103 for k in sorted(notr._postclose):
101 notr._postclose[k](None)
104 notr._postclose[k](None)
102
105
103
106
104 def _persist_nodemap(tr, revlog):
107 def _persist_nodemap(tr, revlog, pending=False):
105 """Write nodemap data on disk for a given revlog
108 """Write nodemap data on disk for a given revlog
106 """
109 """
107 if getattr(revlog, 'filteredrevs', ()):
110 if getattr(revlog, 'filteredrevs', ()):
108 raise error.ProgrammingError(
111 raise error.ProgrammingError(
109 "cannot persist nodemap of a filtered changelog"
112 "cannot persist nodemap of a filtered changelog"
110 )
113 )
111 if revlog.nodemap_file is None:
114 if revlog.nodemap_file is None:
112 msg = "calling persist nodemap on a revlog without the feature enableb"
115 msg = "calling persist nodemap on a revlog without the feature enableb"
113 raise error.ProgrammingError(msg)
116 raise error.ProgrammingError(msg)
114
117
115 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
118 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
116 ondisk_docket = revlog._nodemap_docket
119 ondisk_docket = revlog._nodemap_docket
117 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
120 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
118 use_mmap = revlog.opener.options.get("exp-persistent-nodemap.mmap")
121 use_mmap = revlog.opener.options.get("exp-persistent-nodemap.mmap")
119
122
120 data = None
123 data = None
121 # first attemp an incremental update of the data
124 # first attemp an incremental update of the data
122 if can_incremental and ondisk_docket is not None:
125 if can_incremental and ondisk_docket is not None:
123 target_docket = revlog._nodemap_docket.copy()
126 target_docket = revlog._nodemap_docket.copy()
124 (
127 (
125 src_docket,
128 src_docket,
126 data_changed_count,
129 data_changed_count,
127 data,
130 data,
128 ) = revlog.index.nodemap_data_incremental()
131 ) = revlog.index.nodemap_data_incremental()
129 if src_docket != target_docket:
132 if src_docket != target_docket:
130 data = None
133 data = None
131 else:
134 else:
132 datafile = _rawdata_filepath(revlog, target_docket)
135 datafile = _rawdata_filepath(revlog, target_docket)
133 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
136 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
134 # store vfs
137 # store vfs
135 new_length = target_docket.data_length + len(data)
138 new_length = target_docket.data_length + len(data)
136 with revlog.opener(datafile, b'r+') as fd:
139 with revlog.opener(datafile, b'r+') as fd:
137 fd.seek(target_docket.data_length)
140 fd.seek(target_docket.data_length)
138 fd.write(data)
141 fd.write(data)
139 if feed_data:
142 if feed_data:
140 if use_mmap:
143 if use_mmap:
141 fd.seek(0)
144 fd.seek(0)
142 new_data = fd.read(new_length)
145 new_data = fd.read(new_length)
143 else:
146 else:
144 fd.flush()
147 fd.flush()
145 new_data = util.buffer(util.mmapread(fd, new_length))
148 new_data = util.buffer(util.mmapread(fd, new_length))
146 target_docket.data_length = new_length
149 target_docket.data_length = new_length
147 target_docket.data_unused += data_changed_count
150 target_docket.data_unused += data_changed_count
148
151
149 if data is None:
152 if data is None:
150 # otherwise fallback to a full new export
153 # otherwise fallback to a full new export
151 target_docket = NodeMapDocket()
154 target_docket = NodeMapDocket()
152 datafile = _rawdata_filepath(revlog, target_docket)
155 datafile = _rawdata_filepath(revlog, target_docket)
153 if util.safehasattr(revlog.index, "nodemap_data_all"):
156 if util.safehasattr(revlog.index, "nodemap_data_all"):
154 data = revlog.index.nodemap_data_all()
157 data = revlog.index.nodemap_data_all()
155 else:
158 else:
156 data = persistent_data(revlog.index)
159 data = persistent_data(revlog.index)
157 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
160 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
158 # store vfs
161 # store vfs
159 with revlog.opener(datafile, b'w+') as fd:
162 with revlog.opener(datafile, b'w+') as fd:
160 fd.write(data)
163 fd.write(data)
161 if feed_data:
164 if feed_data:
162 if use_mmap:
165 if use_mmap:
163 new_data = data
166 new_data = data
164 else:
167 else:
165 fd.flush()
168 fd.flush()
166 new_data = util.buffer(util.mmapread(fd, len(data)))
169 new_data = util.buffer(util.mmapread(fd, len(data)))
167 target_docket.data_length = len(data)
170 target_docket.data_length = len(data)
168 target_docket.tip_rev = revlog.tiprev()
171 target_docket.tip_rev = revlog.tiprev()
169 target_docket.tip_node = revlog.node(target_docket.tip_rev)
172 target_docket.tip_node = revlog.node(target_docket.tip_rev)
170 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
173 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
171 # store vfs
174 # store vfs
172 with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
175 file_path = revlog.nodemap_file
176 if pending:
177 file_path += b'.a'
178 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
173 fp.write(target_docket.serialize())
179 fp.write(target_docket.serialize())
174 revlog._nodemap_docket = target_docket
180 revlog._nodemap_docket = target_docket
175 if feed_data:
181 if feed_data:
176 revlog.index.update_nodemap_data(target_docket, new_data)
182 revlog.index.update_nodemap_data(target_docket, new_data)
177
183
178 # EXP-TODO: if the transaction abort, we should remove the new data and
184 # EXP-TODO: if the transaction abort, we should remove the new data and
179 # reinstall the old one.
185 # reinstall the old one.
180
186
181 # search for old index file in all cases, some older process might have
187 # search for old index file in all cases, some older process might have
182 # left one behind.
188 # left one behind.
183 olds = _other_rawdata_filepath(revlog, target_docket)
189 olds = _other_rawdata_filepath(revlog, target_docket)
184 if olds:
190 if olds:
185 realvfs = getattr(revlog, '_realopener', revlog.opener)
191 realvfs = getattr(revlog, '_realopener', revlog.opener)
186
192
187 def cleanup(tr):
193 def cleanup(tr):
188 for oldfile in olds:
194 for oldfile in olds:
189 realvfs.tryunlink(oldfile)
195 realvfs.tryunlink(oldfile)
190
196
191 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
197 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
192 tr.addpostclose(callback_id, cleanup)
198 tr.addpostclose(callback_id, cleanup)
193
199
194
200
195 ### Nodemap docket file
201 ### Nodemap docket file
196 #
202 #
197 # The nodemap data are stored on disk using 2 files:
203 # The nodemap data are stored on disk using 2 files:
198 #
204 #
199 # * a raw data files containing a persistent nodemap
205 # * a raw data files containing a persistent nodemap
200 # (see `Nodemap Trie` section)
206 # (see `Nodemap Trie` section)
201 #
207 #
202 # * a small "docket" file containing medatadata
208 # * a small "docket" file containing medatadata
203 #
209 #
204 # While the nodemap data can be multiple tens of megabytes, the "docket" is
210 # While the nodemap data can be multiple tens of megabytes, the "docket" is
205 # small, it is easy to update it automatically or to duplicated its content
211 # small, it is easy to update it automatically or to duplicated its content
206 # during a transaction.
212 # during a transaction.
207 #
213 #
208 # Multiple raw data can exist at the same time (The currently valid one and a
214 # Multiple raw data can exist at the same time (The currently valid one and a
209 # new one beind used by an in progress transaction). To accomodate this, the
215 # new one beind used by an in progress transaction). To accomodate this, the
210 # filename hosting the raw data has a variable parts. The exact filename is
216 # filename hosting the raw data has a variable parts. The exact filename is
211 # specified inside the "docket" file.
217 # specified inside the "docket" file.
212 #
218 #
213 # The docket file contains information to find, qualify and validate the raw
219 # The docket file contains information to find, qualify and validate the raw
214 # data. Its content is currently very light, but it will expand as the on disk
220 # data. Its content is currently very light, but it will expand as the on disk
215 # nodemap gains the necessary features to be used in production.
221 # nodemap gains the necessary features to be used in production.
216
222
217 # version 0 is experimental, no BC garantee, do no use outside of tests.
223 # version 0 is experimental, no BC garantee, do no use outside of tests.
218 ONDISK_VERSION = 0
224 ONDISK_VERSION = 0
219 S_VERSION = struct.Struct(">B")
225 S_VERSION = struct.Struct(">B")
220 S_HEADER = struct.Struct(">BQQQQ")
226 S_HEADER = struct.Struct(">BQQQQ")
221
227
222 ID_SIZE = 8
228 ID_SIZE = 8
223
229
224
230
225 def _make_uid():
231 def _make_uid():
226 """return a new unique identifier.
232 """return a new unique identifier.
227
233
228 The identifier is random and composed of ascii characters."""
234 The identifier is random and composed of ascii characters."""
229 return nodemod.hex(os.urandom(ID_SIZE))
235 return nodemod.hex(os.urandom(ID_SIZE))
230
236
231
237
232 class NodeMapDocket(object):
238 class NodeMapDocket(object):
233 """metadata associated with persistent nodemap data
239 """metadata associated with persistent nodemap data
234
240
235 The persistent data may come from disk or be on their way to disk.
241 The persistent data may come from disk or be on their way to disk.
236 """
242 """
237
243
238 def __init__(self, uid=None):
244 def __init__(self, uid=None):
239 if uid is None:
245 if uid is None:
240 uid = _make_uid()
246 uid = _make_uid()
241 # a unique identifier for the data file:
247 # a unique identifier for the data file:
242 # - When new data are appended, it is preserved.
248 # - When new data are appended, it is preserved.
243 # - When a new data file is created, a new identifier is generated.
249 # - When a new data file is created, a new identifier is generated.
244 self.uid = uid
250 self.uid = uid
245 # the tipmost revision stored in the data file. This revision and all
251 # the tipmost revision stored in the data file. This revision and all
246 # revision before it are expected to be encoded in the data file.
252 # revision before it are expected to be encoded in the data file.
247 self.tip_rev = None
253 self.tip_rev = None
248 # the node of that tipmost revision, if it mismatch the current index
254 # the node of that tipmost revision, if it mismatch the current index
249 # data the docket is not valid for the current index and should be
255 # data the docket is not valid for the current index and should be
250 # discarded.
256 # discarded.
251 #
257 #
252 # note: this method is not perfect as some destructive operation could
258 # note: this method is not perfect as some destructive operation could
253 # preserve the same tip_rev + tip_node while altering lower revision.
259 # preserve the same tip_rev + tip_node while altering lower revision.
254 # However this multiple other caches have the same vulnerability (eg:
260 # However this multiple other caches have the same vulnerability (eg:
255 # brancmap cache).
261 # brancmap cache).
256 self.tip_node = None
262 self.tip_node = None
257 # the size (in bytes) of the persisted data to encode the nodemap valid
263 # the size (in bytes) of the persisted data to encode the nodemap valid
258 # for `tip_rev`.
264 # for `tip_rev`.
259 # - data file shorter than this are corrupted,
265 # - data file shorter than this are corrupted,
260 # - any extra data should be ignored.
266 # - any extra data should be ignored.
261 self.data_length = None
267 self.data_length = None
262 # the amount (in bytes) of "dead" data, still in the data file but no
268 # the amount (in bytes) of "dead" data, still in the data file but no
263 # longer used for the nodemap.
269 # longer used for the nodemap.
264 self.data_unused = 0
270 self.data_unused = 0
265
271
266 def copy(self):
272 def copy(self):
267 new = NodeMapDocket(uid=self.uid)
273 new = NodeMapDocket(uid=self.uid)
268 new.tip_rev = self.tip_rev
274 new.tip_rev = self.tip_rev
269 new.tip_node = self.tip_node
275 new.tip_node = self.tip_node
270 new.data_length = self.data_length
276 new.data_length = self.data_length
271 new.data_unused = self.data_unused
277 new.data_unused = self.data_unused
272 return new
278 return new
273
279
274 def __cmp__(self, other):
280 def __cmp__(self, other):
275 if self.uid < other.uid:
281 if self.uid < other.uid:
276 return -1
282 return -1
277 if self.uid > other.uid:
283 if self.uid > other.uid:
278 return 1
284 return 1
279 elif self.data_length < other.data_length:
285 elif self.data_length < other.data_length:
280 return -1
286 return -1
281 elif self.data_length > other.data_length:
287 elif self.data_length > other.data_length:
282 return 1
288 return 1
283 return 0
289 return 0
284
290
285 def __eq__(self, other):
291 def __eq__(self, other):
286 return self.uid == other.uid and self.data_length == other.data_length
292 return self.uid == other.uid and self.data_length == other.data_length
287
293
288 def serialize(self):
294 def serialize(self):
289 """return serialized bytes for a docket using the passed uid"""
295 """return serialized bytes for a docket using the passed uid"""
290 data = []
296 data = []
291 data.append(S_VERSION.pack(ONDISK_VERSION))
297 data.append(S_VERSION.pack(ONDISK_VERSION))
292 headers = (
298 headers = (
293 len(self.uid),
299 len(self.uid),
294 self.tip_rev,
300 self.tip_rev,
295 self.data_length,
301 self.data_length,
296 self.data_unused,
302 self.data_unused,
297 len(self.tip_node),
303 len(self.tip_node),
298 )
304 )
299 data.append(S_HEADER.pack(*headers))
305 data.append(S_HEADER.pack(*headers))
300 data.append(self.uid)
306 data.append(self.uid)
301 data.append(self.tip_node)
307 data.append(self.tip_node)
302 return b''.join(data)
308 return b''.join(data)
303
309
304
310
305 def _rawdata_filepath(revlog, docket):
311 def _rawdata_filepath(revlog, docket):
306 """The (vfs relative) nodemap's rawdata file for a given uid"""
312 """The (vfs relative) nodemap's rawdata file for a given uid"""
313 if revlog.nodemap_file.endswith(b'.n.a'):
314 prefix = revlog.nodemap_file[:-4]
315 else:
307 prefix = revlog.nodemap_file[:-2]
316 prefix = revlog.nodemap_file[:-2]
308 return b"%s-%s.nd" % (prefix, docket.uid)
317 return b"%s-%s.nd" % (prefix, docket.uid)
309
318
310
319
311 def _other_rawdata_filepath(revlog, docket):
320 def _other_rawdata_filepath(revlog, docket):
312 prefix = revlog.nodemap_file[:-2]
321 prefix = revlog.nodemap_file[:-2]
313 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
322 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
314 new_file_path = _rawdata_filepath(revlog, docket)
323 new_file_path = _rawdata_filepath(revlog, docket)
315 new_file_name = revlog.opener.basename(new_file_path)
324 new_file_name = revlog.opener.basename(new_file_path)
316 dirpath = revlog.opener.dirname(new_file_path)
325 dirpath = revlog.opener.dirname(new_file_path)
317 others = []
326 others = []
318 for f in revlog.opener.listdir(dirpath):
327 for f in revlog.opener.listdir(dirpath):
319 if pattern.match(f) and f != new_file_name:
328 if pattern.match(f) and f != new_file_name:
320 others.append(f)
329 others.append(f)
321 return others
330 return others
322
331
323
332
324 ### Nodemap Trie
333 ### Nodemap Trie
325 #
334 #
326 # This is a simple reference implementation to compute and persist a nodemap
335 # This is a simple reference implementation to compute and persist a nodemap
327 # trie. This reference implementation is write only. The python version of this
336 # trie. This reference implementation is write only. The python version of this
328 # is not expected to be actually used, since it wont provide performance
337 # is not expected to be actually used, since it wont provide performance
329 # improvement over existing non-persistent C implementation.
338 # improvement over existing non-persistent C implementation.
330 #
339 #
331 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
340 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
332 # revision can be adressed using its node shortest prefix.
341 # revision can be adressed using its node shortest prefix.
333 #
342 #
334 # The trie is stored as a sequence of block. Each block contains 16 entries
343 # The trie is stored as a sequence of block. Each block contains 16 entries
335 # (signed 64bit integer, big endian). Each entry can be one of the following:
344 # (signed 64bit integer, big endian). Each entry can be one of the following:
336 #
345 #
337 # * value >= 0 -> index of sub-block
346 # * value >= 0 -> index of sub-block
338 # * value == -1 -> no value
347 # * value == -1 -> no value
339 # * value < -1 -> a revision value: rev = -(value+10)
348 # * value < -1 -> a revision value: rev = -(value+10)
340 #
349 #
341 # The implementation focus on simplicity, not on performance. A Rust
350 # The implementation focus on simplicity, not on performance. A Rust
342 # implementation should provide a efficient version of the same binary
351 # implementation should provide a efficient version of the same binary
343 # persistence. This reference python implementation is never meant to be
352 # persistence. This reference python implementation is never meant to be
344 # extensively use in production.
353 # extensively use in production.
345
354
346
355
347 def persistent_data(index):
356 def persistent_data(index):
348 """return the persistent binary form for a nodemap for a given index
357 """return the persistent binary form for a nodemap for a given index
349 """
358 """
350 trie = _build_trie(index)
359 trie = _build_trie(index)
351 return _persist_trie(trie)
360 return _persist_trie(trie)
352
361
353
362
354 def update_persistent_data(index, root, max_idx, last_rev):
363 def update_persistent_data(index, root, max_idx, last_rev):
355 """return the incremental update for persistent nodemap from a given index
364 """return the incremental update for persistent nodemap from a given index
356 """
365 """
357 changed_block, trie = _update_trie(index, root, last_rev)
366 changed_block, trie = _update_trie(index, root, last_rev)
358 return (
367 return (
359 changed_block * S_BLOCK.size,
368 changed_block * S_BLOCK.size,
360 _persist_trie(trie, existing_idx=max_idx),
369 _persist_trie(trie, existing_idx=max_idx),
361 )
370 )
362
371
363
372
364 S_BLOCK = struct.Struct(">" + ("l" * 16))
373 S_BLOCK = struct.Struct(">" + ("l" * 16))
365
374
366 NO_ENTRY = -1
375 NO_ENTRY = -1
367 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
376 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
368 REV_OFFSET = 2
377 REV_OFFSET = 2
369
378
370
379
371 def _transform_rev(rev):
380 def _transform_rev(rev):
372 """Return the number used to represent the rev in the tree.
381 """Return the number used to represent the rev in the tree.
373
382
374 (or retrieve a rev number from such representation)
383 (or retrieve a rev number from such representation)
375
384
376 Note that this is an involution, a function equal to its inverse (i.e.
385 Note that this is an involution, a function equal to its inverse (i.e.
377 which gives the identity when applied to itself).
386 which gives the identity when applied to itself).
378 """
387 """
379 return -(rev + REV_OFFSET)
388 return -(rev + REV_OFFSET)
380
389
381
390
382 def _to_int(hex_digit):
391 def _to_int(hex_digit):
383 """turn an hexadecimal digit into a proper integer"""
392 """turn an hexadecimal digit into a proper integer"""
384 return int(hex_digit, 16)
393 return int(hex_digit, 16)
385
394
386
395
387 class Block(dict):
396 class Block(dict):
388 """represent a block of the Trie
397 """represent a block of the Trie
389
398
390 contains up to 16 entry indexed from 0 to 15"""
399 contains up to 16 entry indexed from 0 to 15"""
391
400
392 def __init__(self):
401 def __init__(self):
393 super(Block, self).__init__()
402 super(Block, self).__init__()
394 # If this block exist on disk, here is its ID
403 # If this block exist on disk, here is its ID
395 self.ondisk_id = None
404 self.ondisk_id = None
396
405
397 def __iter__(self):
406 def __iter__(self):
398 return iter(self.get(i) for i in range(16))
407 return iter(self.get(i) for i in range(16))
399
408
400
409
401 def _build_trie(index):
410 def _build_trie(index):
402 """build a nodemap trie
411 """build a nodemap trie
403
412
404 The nodemap stores revision number for each unique prefix.
413 The nodemap stores revision number for each unique prefix.
405
414
406 Each block is a dictionary with keys in `[0, 15]`. Values are either
415 Each block is a dictionary with keys in `[0, 15]`. Values are either
407 another block or a revision number.
416 another block or a revision number.
408 """
417 """
409 root = Block()
418 root = Block()
410 for rev in range(len(index)):
419 for rev in range(len(index)):
411 hex = nodemod.hex(index[rev][7])
420 hex = nodemod.hex(index[rev][7])
412 _insert_into_block(index, 0, root, rev, hex)
421 _insert_into_block(index, 0, root, rev, hex)
413 return root
422 return root
414
423
415
424
416 def _update_trie(index, root, last_rev):
425 def _update_trie(index, root, last_rev):
417 """consume"""
426 """consume"""
418 changed = 0
427 changed = 0
419 for rev in range(last_rev + 1, len(index)):
428 for rev in range(last_rev + 1, len(index)):
420 hex = nodemod.hex(index[rev][7])
429 hex = nodemod.hex(index[rev][7])
421 changed += _insert_into_block(index, 0, root, rev, hex)
430 changed += _insert_into_block(index, 0, root, rev, hex)
422 return changed, root
431 return changed, root
423
432
424
433
425 def _insert_into_block(index, level, block, current_rev, current_hex):
434 def _insert_into_block(index, level, block, current_rev, current_hex):
426 """insert a new revision in a block
435 """insert a new revision in a block
427
436
428 index: the index we are adding revision for
437 index: the index we are adding revision for
429 level: the depth of the current block in the trie
438 level: the depth of the current block in the trie
430 block: the block currently being considered
439 block: the block currently being considered
431 current_rev: the revision number we are adding
440 current_rev: the revision number we are adding
432 current_hex: the hexadecimal representation of the of that revision
441 current_hex: the hexadecimal representation of the of that revision
433 """
442 """
434 changed = 1
443 changed = 1
435 if block.ondisk_id is not None:
444 if block.ondisk_id is not None:
436 block.ondisk_id = None
445 block.ondisk_id = None
437 hex_digit = _to_int(current_hex[level : level + 1])
446 hex_digit = _to_int(current_hex[level : level + 1])
438 entry = block.get(hex_digit)
447 entry = block.get(hex_digit)
439 if entry is None:
448 if entry is None:
440 # no entry, simply store the revision number
449 # no entry, simply store the revision number
441 block[hex_digit] = current_rev
450 block[hex_digit] = current_rev
442 elif isinstance(entry, dict):
451 elif isinstance(entry, dict):
443 # need to recurse to an underlying block
452 # need to recurse to an underlying block
444 changed += _insert_into_block(
453 changed += _insert_into_block(
445 index, level + 1, entry, current_rev, current_hex
454 index, level + 1, entry, current_rev, current_hex
446 )
455 )
447 else:
456 else:
448 # collision with a previously unique prefix, inserting new
457 # collision with a previously unique prefix, inserting new
449 # vertices to fit both entry.
458 # vertices to fit both entry.
450 other_hex = nodemod.hex(index[entry][7])
459 other_hex = nodemod.hex(index[entry][7])
451 other_rev = entry
460 other_rev = entry
452 new = Block()
461 new = Block()
453 block[hex_digit] = new
462 block[hex_digit] = new
454 _insert_into_block(index, level + 1, new, other_rev, other_hex)
463 _insert_into_block(index, level + 1, new, other_rev, other_hex)
455 _insert_into_block(index, level + 1, new, current_rev, current_hex)
464 _insert_into_block(index, level + 1, new, current_rev, current_hex)
456 return changed
465 return changed
457
466
458
467
459 def _persist_trie(root, existing_idx=None):
468 def _persist_trie(root, existing_idx=None):
460 """turn a nodemap trie into persistent binary data
469 """turn a nodemap trie into persistent binary data
461
470
462 See `_build_trie` for nodemap trie structure"""
471 See `_build_trie` for nodemap trie structure"""
463 block_map = {}
472 block_map = {}
464 if existing_idx is not None:
473 if existing_idx is not None:
465 base_idx = existing_idx + 1
474 base_idx = existing_idx + 1
466 else:
475 else:
467 base_idx = 0
476 base_idx = 0
468 chunks = []
477 chunks = []
469 for tn in _walk_trie(root):
478 for tn in _walk_trie(root):
470 if tn.ondisk_id is not None:
479 if tn.ondisk_id is not None:
471 block_map[id(tn)] = tn.ondisk_id
480 block_map[id(tn)] = tn.ondisk_id
472 else:
481 else:
473 block_map[id(tn)] = len(chunks) + base_idx
482 block_map[id(tn)] = len(chunks) + base_idx
474 chunks.append(_persist_block(tn, block_map))
483 chunks.append(_persist_block(tn, block_map))
475 return b''.join(chunks)
484 return b''.join(chunks)
476
485
477
486
478 def _walk_trie(block):
487 def _walk_trie(block):
479 """yield all the block in a trie
488 """yield all the block in a trie
480
489
481 Children blocks are always yield before their parent block.
490 Children blocks are always yield before their parent block.
482 """
491 """
483 for (_, item) in sorted(block.items()):
492 for (_, item) in sorted(block.items()):
484 if isinstance(item, dict):
493 if isinstance(item, dict):
485 for sub_block in _walk_trie(item):
494 for sub_block in _walk_trie(item):
486 yield sub_block
495 yield sub_block
487 yield block
496 yield block
488
497
489
498
490 def _persist_block(block_node, block_map):
499 def _persist_block(block_node, block_map):
491 """produce persistent binary data for a single block
500 """produce persistent binary data for a single block
492
501
493 Children block are assumed to be already persisted and present in
502 Children block are assumed to be already persisted and present in
494 block_map.
503 block_map.
495 """
504 """
496 data = tuple(_to_value(v, block_map) for v in block_node)
505 data = tuple(_to_value(v, block_map) for v in block_node)
497 return S_BLOCK.pack(*data)
506 return S_BLOCK.pack(*data)
498
507
499
508
500 def _to_value(item, block_map):
509 def _to_value(item, block_map):
501 """persist any value as an integer"""
510 """persist any value as an integer"""
502 if item is None:
511 if item is None:
503 return NO_ENTRY
512 return NO_ENTRY
504 elif isinstance(item, dict):
513 elif isinstance(item, dict):
505 return block_map[id(item)]
514 return block_map[id(item)]
506 else:
515 else:
507 return _transform_rev(item)
516 return _transform_rev(item)
508
517
509
518
510 def parse_data(data):
519 def parse_data(data):
511 """parse parse nodemap data into a nodemap Trie"""
520 """parse parse nodemap data into a nodemap Trie"""
512 if (len(data) % S_BLOCK.size) != 0:
521 if (len(data) % S_BLOCK.size) != 0:
513 msg = "nodemap data size is not a multiple of block size (%d): %d"
522 msg = "nodemap data size is not a multiple of block size (%d): %d"
514 raise error.Abort(msg % (S_BLOCK.size, len(data)))
523 raise error.Abort(msg % (S_BLOCK.size, len(data)))
515 if not data:
524 if not data:
516 return Block(), None
525 return Block(), None
517 block_map = {}
526 block_map = {}
518 new_blocks = []
527 new_blocks = []
519 for i in range(0, len(data), S_BLOCK.size):
528 for i in range(0, len(data), S_BLOCK.size):
520 block = Block()
529 block = Block()
521 block.ondisk_id = len(block_map)
530 block.ondisk_id = len(block_map)
522 block_map[block.ondisk_id] = block
531 block_map[block.ondisk_id] = block
523 block_data = data[i : i + S_BLOCK.size]
532 block_data = data[i : i + S_BLOCK.size]
524 values = S_BLOCK.unpack(block_data)
533 values = S_BLOCK.unpack(block_data)
525 new_blocks.append((block, values))
534 new_blocks.append((block, values))
526 for b, values in new_blocks:
535 for b, values in new_blocks:
527 for idx, v in enumerate(values):
536 for idx, v in enumerate(values):
528 if v == NO_ENTRY:
537 if v == NO_ENTRY:
529 continue
538 continue
530 elif v >= 0:
539 elif v >= 0:
531 b[idx] = block_map[v]
540 b[idx] = block_map[v]
532 else:
541 else:
533 b[idx] = _transform_rev(v)
542 b[idx] = _transform_rev(v)
534 return block, i // S_BLOCK.size
543 return block, i // S_BLOCK.size
535
544
536
545
537 # debug utility
546 # debug utility
538
547
539
548
540 def check_data(ui, index, data):
549 def check_data(ui, index, data):
541 """verify that the provided nodemap data are valid for the given idex"""
550 """verify that the provided nodemap data are valid for the given idex"""
542 ret = 0
551 ret = 0
543 ui.status((b"revision in index: %d\n") % len(index))
552 ui.status((b"revision in index: %d\n") % len(index))
544 root, __ = parse_data(data)
553 root, __ = parse_data(data)
545 all_revs = set(_all_revisions(root))
554 all_revs = set(_all_revisions(root))
546 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
555 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
547 for r in range(len(index)):
556 for r in range(len(index)):
548 if r not in all_revs:
557 if r not in all_revs:
549 msg = b" revision missing from nodemap: %d\n" % r
558 msg = b" revision missing from nodemap: %d\n" % r
550 ui.write_err(msg)
559 ui.write_err(msg)
551 ret = 1
560 ret = 1
552 else:
561 else:
553 all_revs.remove(r)
562 all_revs.remove(r)
554 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
563 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
555 if nm_rev is None:
564 if nm_rev is None:
556 msg = b" revision node does not match any entries: %d\n" % r
565 msg = b" revision node does not match any entries: %d\n" % r
557 ui.write_err(msg)
566 ui.write_err(msg)
558 ret = 1
567 ret = 1
559 elif nm_rev != r:
568 elif nm_rev != r:
560 msg = (
569 msg = (
561 b" revision node does not match the expected revision: "
570 b" revision node does not match the expected revision: "
562 b"%d != %d\n" % (r, nm_rev)
571 b"%d != %d\n" % (r, nm_rev)
563 )
572 )
564 ui.write_err(msg)
573 ui.write_err(msg)
565 ret = 1
574 ret = 1
566
575
567 if all_revs:
576 if all_revs:
568 for r in sorted(all_revs):
577 for r in sorted(all_revs):
569 msg = b" extra revision in nodemap: %d\n" % r
578 msg = b" extra revision in nodemap: %d\n" % r
570 ui.write_err(msg)
579 ui.write_err(msg)
571 ret = 1
580 ret = 1
572 return ret
581 return ret
573
582
574
583
575 def _all_revisions(root):
584 def _all_revisions(root):
576 """return all revisions stored in a Trie"""
585 """return all revisions stored in a Trie"""
577 for block in _walk_trie(root):
586 for block in _walk_trie(root):
578 for v in block:
587 for v in block:
579 if v is None or isinstance(v, Block):
588 if v is None or isinstance(v, Block):
580 continue
589 continue
581 yield v
590 yield v
582
591
583
592
584 def _find_node(block, node):
593 def _find_node(block, node):
585 """find the revision associated with a given node"""
594 """find the revision associated with a given node"""
586 entry = block.get(_to_int(node[0:1]))
595 entry = block.get(_to_int(node[0:1]))
587 if isinstance(entry, dict):
596 if isinstance(entry, dict):
588 return _find_node(entry, node[1:])
597 return _find_node(entry, node[1:])
589 return entry
598 return entry
@@ -1,283 +1,319
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5 $ hg init test-repo
5 $ hg init test-repo
6 $ cd test-repo
6 $ cd test-repo
7 $ cat << EOF >> .hg/hgrc
7 $ cat << EOF >> .hg/hgrc
8 > [experimental]
8 > [experimental]
9 > exp-persistent-nodemap=yes
9 > exp-persistent-nodemap=yes
10 > [devel]
10 > [devel]
11 > persistent-nodemap=yes
11 > persistent-nodemap=yes
12 > EOF
12 > EOF
13 $ hg debugbuilddag .+5000
13 $ hg debugbuilddag .+5000
14 $ hg debugnodemap --metadata
14 $ hg debugnodemap --metadata
15 uid: ???????????????? (glob)
15 uid: ???????????????? (glob)
16 tip-rev: 5000
16 tip-rev: 5000
17 tip-node: 06ddac466af534d365326c13c3879f97caca3cb1
17 tip-node: 06ddac466af534d365326c13c3879f97caca3cb1
18 data-length: 122880
18 data-length: 122880
19 data-unused: 0
19 data-unused: 0
20 $ f --size .hg/store/00changelog.n
20 $ f --size .hg/store/00changelog.n
21 .hg/store/00changelog.n: size=70
21 .hg/store/00changelog.n: size=70
22
22
23 Simple lookup works
23 Simple lookup works
24
24
25 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
25 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
26 $ hg log -r "$ANYNODE" --template '{rev}\n'
26 $ hg log -r "$ANYNODE" --template '{rev}\n'
27 5000
27 5000
28
28
29
29
30 #if rust
30 #if rust
31
31
32 $ f --sha256 .hg/store/00changelog-*.nd
32 $ f --sha256 .hg/store/00changelog-*.nd
33 .hg/store/00changelog-????????????????.nd: sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6 (glob)
33 .hg/store/00changelog-????????????????.nd: sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6 (glob)
34 $ hg debugnodemap --dump-new | f --sha256 --size
34 $ hg debugnodemap --dump-new | f --sha256 --size
35 size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
35 size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
36 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
36 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
37 size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
37 size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
38 0000: 00 00 00 76 00 00 01 65 00 00 00 95 00 00 01 34 |...v...e.......4|
38 0000: 00 00 00 76 00 00 01 65 00 00 00 95 00 00 01 34 |...v...e.......4|
39 0010: 00 00 00 19 00 00 01 69 00 00 00 ab 00 00 00 4b |.......i.......K|
39 0010: 00 00 00 19 00 00 01 69 00 00 00 ab 00 00 00 4b |.......i.......K|
40 0020: 00 00 00 07 00 00 01 4c 00 00 00 f8 00 00 00 8f |.......L........|
40 0020: 00 00 00 07 00 00 01 4c 00 00 00 f8 00 00 00 8f |.......L........|
41 0030: 00 00 00 c0 00 00 00 a7 00 00 00 89 00 00 01 46 |...............F|
41 0030: 00 00 00 c0 00 00 00 a7 00 00 00 89 00 00 01 46 |...............F|
42 0040: 00 00 00 92 00 00 01 bc 00 00 00 71 00 00 00 ac |...........q....|
42 0040: 00 00 00 92 00 00 01 bc 00 00 00 71 00 00 00 ac |...........q....|
43 0050: 00 00 00 af 00 00 00 b4 00 00 00 34 00 00 01 ca |...........4....|
43 0050: 00 00 00 af 00 00 00 b4 00 00 00 34 00 00 01 ca |...........4....|
44 0060: 00 00 00 23 00 00 01 45 00 00 00 2d 00 00 00 b2 |...#...E...-....|
44 0060: 00 00 00 23 00 00 01 45 00 00 00 2d 00 00 00 b2 |...#...E...-....|
45 0070: 00 00 00 56 00 00 01 0f 00 00 00 4e 00 00 02 4c |...V.......N...L|
45 0070: 00 00 00 56 00 00 01 0f 00 00 00 4e 00 00 02 4c |...V.......N...L|
46 0080: 00 00 00 e7 00 00 00 cd 00 00 01 5b 00 00 00 78 |...........[...x|
46 0080: 00 00 00 e7 00 00 00 cd 00 00 01 5b 00 00 00 78 |...........[...x|
47 0090: 00 00 00 e3 00 00 01 8e 00 00 00 4f 00 00 00 b1 |...........O....|
47 0090: 00 00 00 e3 00 00 01 8e 00 00 00 4f 00 00 00 b1 |...........O....|
48 00a0: 00 00 00 30 00 00 00 11 00 00 00 25 00 00 00 d2 |...0.......%....|
48 00a0: 00 00 00 30 00 00 00 11 00 00 00 25 00 00 00 d2 |...0.......%....|
49 00b0: 00 00 00 ec 00 00 00 69 00 00 01 2b 00 00 01 2e |.......i...+....|
49 00b0: 00 00 00 ec 00 00 00 69 00 00 01 2b 00 00 01 2e |.......i...+....|
50 00c0: 00 00 00 aa 00 00 00 15 00 00 00 3a 00 00 01 4e |...........:...N|
50 00c0: 00 00 00 aa 00 00 00 15 00 00 00 3a 00 00 01 4e |...........:...N|
51 00d0: 00 00 00 4d 00 00 00 9d 00 00 00 8e 00 00 00 a4 |...M............|
51 00d0: 00 00 00 4d 00 00 00 9d 00 00 00 8e 00 00 00 a4 |...M............|
52 00e0: 00 00 00 c3 00 00 00 eb 00 00 00 29 00 00 00 ad |...........)....|
52 00e0: 00 00 00 c3 00 00 00 eb 00 00 00 29 00 00 00 ad |...........)....|
53 00f0: 00 00 01 3a 00 00 01 32 00 00 00 04 00 00 00 53 |...:...2.......S|
53 00f0: 00 00 01 3a 00 00 01 32 00 00 00 04 00 00 00 53 |...:...2.......S|
54
54
55
55
56 #else
56 #else
57
57
58 $ f --sha256 .hg/store/00changelog-*.nd
58 $ f --sha256 .hg/store/00changelog-*.nd
59 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
59 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
60 $ hg debugnodemap --dump-new | f --sha256 --size
60 $ hg debugnodemap --dump-new | f --sha256 --size
61 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
61 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
62 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
62 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
63 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
63 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
64 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
64 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
65 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
65 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
66 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
66 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
67 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
67 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
68 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
68 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
69 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
69 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
70 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
70 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
71 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
71 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
72 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
72 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
73 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
73 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
74 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
74 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
75 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
75 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
76 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
76 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
77 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
77 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
78 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
78 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
79 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
79 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
80
80
81 #endif
81 #endif
82
82
83 $ hg debugnodemap --check
83 $ hg debugnodemap --check
84 revision in index: 5001
84 revision in index: 5001
85 revision in nodemap: 5001
85 revision in nodemap: 5001
86
86
87 add a new commit
87 add a new commit
88
88
89 $ hg up
89 $ hg up
90 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
91 $ echo foo > foo
91 $ echo foo > foo
92 $ hg add foo
92 $ hg add foo
93 $ hg ci -m 'foo'
93 $ hg ci -m 'foo'
94
94
95 #if no-pure no-rust
95 #if no-pure no-rust
96 $ hg debugnodemap --metadata
96 $ hg debugnodemap --metadata
97 uid: ???????????????? (glob)
97 uid: ???????????????? (glob)
98 tip-rev: 5001
98 tip-rev: 5001
99 tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
99 tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
100 data-length: 122880
100 data-length: 122880
101 data-unused: 0
101 data-unused: 0
102 #else
102 #else
103 $ hg debugnodemap --metadata
103 $ hg debugnodemap --metadata
104 uid: ???????????????? (glob)
104 uid: ???????????????? (glob)
105 tip-rev: 5001
105 tip-rev: 5001
106 tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
106 tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
107 data-length: 123072
107 data-length: 123072
108 data-unused: 192
108 data-unused: 192
109 #endif
109 #endif
110
110
111 $ f --size .hg/store/00changelog.n
111 $ f --size .hg/store/00changelog.n
112 .hg/store/00changelog.n: size=70
112 .hg/store/00changelog.n: size=70
113
113
114 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
114 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
115
115
116 #if pure
116 #if pure
117 $ f --sha256 .hg/store/00changelog-*.nd --size
117 $ f --sha256 .hg/store/00changelog-*.nd --size
118 .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
118 .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
119 #endif
119 #endif
120
120
121 #if rust
121 #if rust
122 $ f --sha256 .hg/store/00changelog-*.nd --size
122 $ f --sha256 .hg/store/00changelog-*.nd --size
123 .hg/store/00changelog-????????????????.nd: size=123072, sha256=ccc8a43310ace13812fcc648683e259346754ef934c12dd238cf9b7fadfe9a4b (glob)
123 .hg/store/00changelog-????????????????.nd: size=123072, sha256=ccc8a43310ace13812fcc648683e259346754ef934c12dd238cf9b7fadfe9a4b (glob)
124 #endif
124 #endif
125
125
126 #if no-pure no-rust
126 #if no-pure no-rust
127 $ f --sha256 .hg/store/00changelog-*.nd --size
127 $ f --sha256 .hg/store/00changelog-*.nd --size
128 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
128 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
129 #endif
129 #endif
130
130
131 $ hg debugnodemap --check
131 $ hg debugnodemap --check
132 revision in index: 5002
132 revision in index: 5002
133 revision in nodemap: 5002
133 revision in nodemap: 5002
134
134
135 Test code path without mmap
135 Test code path without mmap
136 ---------------------------
136 ---------------------------
137
137
138 $ echo bar > bar
138 $ echo bar > bar
139 $ hg add bar
139 $ hg add bar
140 $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
140 $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
141
141
142 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
142 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
143 revision in index: 5003
143 revision in index: 5003
144 revision in nodemap: 5003
144 revision in nodemap: 5003
145 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
145 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
146 revision in index: 5003
146 revision in index: 5003
147 revision in nodemap: 5003
147 revision in nodemap: 5003
148
148
149
149
150 #if pure
150 #if pure
151 $ hg debugnodemap --metadata
151 $ hg debugnodemap --metadata
152 uid: ???????????????? (glob)
152 uid: ???????????????? (glob)
153 tip-rev: 5002
153 tip-rev: 5002
154 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
154 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
155 data-length: 123328
155 data-length: 123328
156 data-unused: 384
156 data-unused: 384
157 $ f --sha256 .hg/store/00changelog-*.nd --size
157 $ f --sha256 .hg/store/00changelog-*.nd --size
158 .hg/store/00changelog-????????????????.nd: size=123328, sha256=10d26e9776b6596af0f89143a54eba8cc581e929c38242a02a7b0760698c6c70 (glob)
158 .hg/store/00changelog-????????????????.nd: size=123328, sha256=10d26e9776b6596af0f89143a54eba8cc581e929c38242a02a7b0760698c6c70 (glob)
159 #endif
159 #endif
160 #if rust
160 #if rust
161 $ hg debugnodemap --metadata
161 $ hg debugnodemap --metadata
162 uid: ???????????????? (glob)
162 uid: ???????????????? (glob)
163 tip-rev: 5002
163 tip-rev: 5002
164 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
164 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
165 data-length: 123328
165 data-length: 123328
166 data-unused: 384
166 data-unused: 384
167 $ f --sha256 .hg/store/00changelog-*.nd --size
167 $ f --sha256 .hg/store/00changelog-*.nd --size
168 .hg/store/00changelog-????????????????.nd: size=123328, sha256=081eec9eb6708f2bf085d939b4c97bc0b6762bc8336bc4b93838f7fffa1516bf (glob)
168 .hg/store/00changelog-????????????????.nd: size=123328, sha256=081eec9eb6708f2bf085d939b4c97bc0b6762bc8336bc4b93838f7fffa1516bf (glob)
169 #endif
169 #endif
170 #if no-pure no-rust
170 #if no-pure no-rust
171 $ hg debugnodemap --metadata
171 $ hg debugnodemap --metadata
172 uid: ???????????????? (glob)
172 uid: ???????????????? (glob)
173 tip-rev: 5002
173 tip-rev: 5002
174 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
174 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
175 data-length: 122944
175 data-length: 122944
176 data-unused: 0
176 data-unused: 0
177 $ f --sha256 .hg/store/00changelog-*.nd --size
177 $ f --sha256 .hg/store/00changelog-*.nd --size
178 .hg/store/00changelog-????????????????.nd: size=122944, sha256=755976b22b64ab680401b45395953504e64e7fa8c31ac570f58dee21e15f9bc0 (glob)
178 .hg/store/00changelog-????????????????.nd: size=122944, sha256=755976b22b64ab680401b45395953504e64e7fa8c31ac570f58dee21e15f9bc0 (glob)
179 #endif
179 #endif
180
180
181 Test force warming the cache
181 Test force warming the cache
182
182
183 $ rm .hg/store/00changelog.n
183 $ rm .hg/store/00changelog.n
184 $ hg debugnodemap --metadata
184 $ hg debugnodemap --metadata
185 $ hg debugupdatecache
185 $ hg debugupdatecache
186 #if pure
186 #if pure
187 $ hg debugnodemap --metadata
187 $ hg debugnodemap --metadata
188 uid: ???????????????? (glob)
188 uid: ???????????????? (glob)
189 tip-rev: 5002
189 tip-rev: 5002
190 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
190 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
191 data-length: 122944
191 data-length: 122944
192 data-unused: 0
192 data-unused: 0
193 #else
193 #else
194 $ hg debugnodemap --metadata
194 $ hg debugnodemap --metadata
195 uid: ???????????????? (glob)
195 uid: ???????????????? (glob)
196 tip-rev: 5002
196 tip-rev: 5002
197 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
197 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
198 data-length: 122944
198 data-length: 122944
199 data-unused: 0
199 data-unused: 0
200 #endif
200 #endif
201
201
202 Check out of sync nodemap
202 Check out of sync nodemap
203 =========================
203 =========================
204
204
205 First copy old data on the side.
205 First copy old data on the side.
206
206
207 $ mkdir ../tmp-copies
207 $ mkdir ../tmp-copies
208 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
208 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
209
209
210 Nodemap lagging behind
210 Nodemap lagging behind
211 ----------------------
211 ----------------------
212
212
213 make a new commit
213 make a new commit
214
214
215 $ echo bar2 > bar
215 $ echo bar2 > bar
216 $ hg ci -m 'bar2'
216 $ hg ci -m 'bar2'
217 $ NODE=`hg log -r tip -T '{node}\n'`
217 $ NODE=`hg log -r tip -T '{node}\n'`
218 $ hg log -r "$NODE" -T '{rev}\n'
218 $ hg log -r "$NODE" -T '{rev}\n'
219 5003
219 5003
220
220
221 If the nodemap is lagging behind, it can catch up fine
221 If the nodemap is lagging behind, it can catch up fine
222
222
223 $ hg debugnodemap --metadata
223 $ hg debugnodemap --metadata
224 uid: ???????????????? (glob)
224 uid: ???????????????? (glob)
225 tip-rev: 5003
225 tip-rev: 5003
226 tip-node: 5c049e9c4a4af159bdcd65dce1b6bf303a0da6cf
226 tip-node: 5c049e9c4a4af159bdcd65dce1b6bf303a0da6cf
227 data-length: 123200 (pure !)
227 data-length: 123200 (pure !)
228 data-length: 123200 (rust !)
228 data-length: 123200 (rust !)
229 data-length: 122944 (no-rust no-pure !)
229 data-length: 122944 (no-rust no-pure !)
230 data-unused: 256 (pure !)
230 data-unused: 256 (pure !)
231 data-unused: 256 (rust !)
231 data-unused: 256 (rust !)
232 data-unused: 0 (no-rust no-pure !)
232 data-unused: 0 (no-rust no-pure !)
233 $ cp -f ../tmp-copies/* .hg/store/
233 $ cp -f ../tmp-copies/* .hg/store/
234 $ hg debugnodemap --metadata
234 $ hg debugnodemap --metadata
235 uid: ???????????????? (glob)
235 uid: ???????????????? (glob)
236 tip-rev: 5002
236 tip-rev: 5002
237 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
237 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
238 data-length: 122944
238 data-length: 122944
239 data-unused: 0
239 data-unused: 0
240 $ hg log -r "$NODE" -T '{rev}\n'
240 $ hg log -r "$NODE" -T '{rev}\n'
241 5003
241 5003
242
242
243 changelog altered
243 changelog altered
244 -----------------
244 -----------------
245
245
246 If the nodemap is not gated behind a requirements, an unaware client can alter
246 If the nodemap is not gated behind a requirements, an unaware client can alter
247 the repository so the revlog used to generate the nodemap is not longer
247 the repository so the revlog used to generate the nodemap is not longer
248 compatible with the persistent nodemap. We need to detect that.
248 compatible with the persistent nodemap. We need to detect that.
249
249
250 $ hg up "$NODE~5"
250 $ hg up "$NODE~5"
251 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
251 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
252 $ echo bar > babar
252 $ echo bar > babar
253 $ hg add babar
253 $ hg add babar
254 $ hg ci -m 'babar'
254 $ hg ci -m 'babar'
255 created new head
255 created new head
256 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
256 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
257 $ hg log -r "$OTHERNODE" -T '{rev}\n'
257 $ hg log -r "$OTHERNODE" -T '{rev}\n'
258 5004
258 5004
259
259
260 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
260 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
261
261
262 the nodemap should detect the changelog have been tampered with and recover.
262 the nodemap should detect the changelog have been tampered with and recover.
263
263
264 $ hg debugnodemap --metadata
264 $ hg debugnodemap --metadata
265 uid: ???????????????? (glob)
265 uid: ???????????????? (glob)
266 tip-rev: 5002
266 tip-rev: 5002
267 tip-node: 42bf3068c7ddfdfded53c4eb11d02266faeebfee
267 tip-node: 42bf3068c7ddfdfded53c4eb11d02266faeebfee
268 data-length: 123456 (pure !)
268 data-length: 123456 (pure !)
269 data-length: 246464 (rust !)
269 data-length: 246464 (rust !)
270 data-length: 123008 (no-pure no-rust !)
270 data-length: 123008 (no-pure no-rust !)
271 data-unused: 448 (pure !)
271 data-unused: 448 (pure !)
272 data-unused: 123904 (rust !)
272 data-unused: 123904 (rust !)
273 data-unused: 0 (no-pure no-rust !)
273 data-unused: 0 (no-pure no-rust !)
274
274
275 $ cp -f ../tmp-copies/* .hg/store/
275 $ cp -f ../tmp-copies/* .hg/store/
276 $ hg debugnodemap --metadata
276 $ hg debugnodemap --metadata
277 uid: ???????????????? (glob)
277 uid: ???????????????? (glob)
278 tip-rev: 5002
278 tip-rev: 5002
279 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
279 tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
280 data-length: 122944
280 data-length: 122944
281 data-unused: 0
281 data-unused: 0
282 $ hg log -r "$OTHERNODE" -T '{rev}\n'
282 $ hg log -r "$OTHERNODE" -T '{rev}\n'
283 5002
283 5002
284
285 Check transaction related property
286 ==================================
287
288 An up to date nodemap should be available to shell hooks,
289
290 $ echo dsljfl > a
291 $ hg add a
292 $ hg ci -m a
293 $ hg debugnodemap --metadata
294 uid: ???????????????? (glob)
295 tip-rev: 5003
296 tip-node: c91af76d172f1053cca41b83f7c2e4e514fe2bcf
297 data-length: 123008
298 data-unused: 0
299 $ echo babar2 > babar
300 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
301 uid: ???????????????? (glob)
302 tip-rev: 5004
303 tip-node: ba87cd9559559e4b91b28cb140d003985315e031
304 data-length: 123328 (pure !)
305 data-length: 123328 (rust !)
306 data-length: 123136 (no-pure no-rust !)
307 data-unused: 192 (pure !)
308 data-unused: 192 (rust !)
309 data-unused: 0 (no-pure no-rust !)
310 $ hg debugnodemap --metadata
311 uid: ???????????????? (glob)
312 tip-rev: 5004
313 tip-node: ba87cd9559559e4b91b28cb140d003985315e031
314 data-length: 123328 (pure !)
315 data-length: 123328 (rust !)
316 data-length: 123136 (no-pure no-rust !)
317 data-unused: 192 (pure !)
318 data-unused: 192 (rust !)
319 data-unused: 0 (no-pure no-rust !)
General Comments 0
You need to be logged in to leave comments. Login now