##// END OF EJS Templates
revlogv2: track current index size in the docket...
marmoute -
r48012:6597255a default
parent child Browse files
Show More
@@ -1,2698 +1,2699 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * properly hide uncommitted content to other process
1153 # * properly hide uncommitted content to other process
1154 # * expose transaction content hooks during pre-commit validation
1154 # * expose transaction content hooks during pre-commit validation
1155 # * include management of a persistent nodemap in the main docket
1155 # * include management of a persistent nodemap in the main docket
1156 # * enforce a "no-truncate" policy for mmap safety
1156 # * enforce a "no-truncate" policy for mmap safety
1157 # - for censoring operation
1157 # - for censoring operation
1158 # - for stripping operation
1158 # - for stripping operation
1159 # - for rollback operation
1159 # - for rollback operation
1160 # * proper streaming (race free) of the docket file
1160 # * store the data size in the docket to simplify sidedata rewrite.
1161 # * store the data size in the docket to simplify sidedata rewrite.
1161 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1162 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1162 # * Exchange-wise, we will also need to do something more efficient than
1163 # * Exchange-wise, we will also need to do something more efficient than
1163 # keeping references to the affected revlogs, especially memory-wise when
1164 # keeping references to the affected revlogs, especially memory-wise when
1164 # rewriting sidedata.
1165 # rewriting sidedata.
1165 # * sidedata compression
1166 # * sidedata compression
1166 # * introduce a proper solution to reduce the number of filelog related files.
1167 # * introduce a proper solution to reduce the number of filelog related files.
1167 # * Improvement to consider
1168 # * Improvement to consider
1168 # - track compression mode in the index entris instead of the chunks
1169 # - track compression mode in the index entris instead of the chunks
1169 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1170 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1170 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1171 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1171 # - keep track of chain base or size (probably not that useful anymore)
1172 # - keep track of chain base or size (probably not that useful anymore)
1172 # - store data and sidedata in different files
1173 # - store data and sidedata in different files
1173 coreconfigitem(
1174 coreconfigitem(
1174 b'experimental',
1175 b'experimental',
1175 b'revlogv2',
1176 b'revlogv2',
1176 default=None,
1177 default=None,
1177 )
1178 )
1178 coreconfigitem(
1179 coreconfigitem(
1179 b'experimental',
1180 b'experimental',
1180 b'revisions.disambiguatewithin',
1181 b'revisions.disambiguatewithin',
1181 default=None,
1182 default=None,
1182 )
1183 )
1183 coreconfigitem(
1184 coreconfigitem(
1184 b'experimental',
1185 b'experimental',
1185 b'rust.index',
1186 b'rust.index',
1186 default=False,
1187 default=False,
1187 )
1188 )
1188 coreconfigitem(
1189 coreconfigitem(
1189 b'experimental',
1190 b'experimental',
1190 b'server.filesdata.recommended-batch-size',
1191 b'server.filesdata.recommended-batch-size',
1191 default=50000,
1192 default=50000,
1192 )
1193 )
1193 coreconfigitem(
1194 coreconfigitem(
1194 b'experimental',
1195 b'experimental',
1195 b'server.manifestdata.recommended-batch-size',
1196 b'server.manifestdata.recommended-batch-size',
1196 default=100000,
1197 default=100000,
1197 )
1198 )
1198 coreconfigitem(
1199 coreconfigitem(
1199 b'experimental',
1200 b'experimental',
1200 b'server.stream-narrow-clones',
1201 b'server.stream-narrow-clones',
1201 default=False,
1202 default=False,
1202 )
1203 )
1203 coreconfigitem(
1204 coreconfigitem(
1204 b'experimental',
1205 b'experimental',
1205 b'single-head-per-branch',
1206 b'single-head-per-branch',
1206 default=False,
1207 default=False,
1207 )
1208 )
1208 coreconfigitem(
1209 coreconfigitem(
1209 b'experimental',
1210 b'experimental',
1210 b'single-head-per-branch:account-closed-heads',
1211 b'single-head-per-branch:account-closed-heads',
1211 default=False,
1212 default=False,
1212 )
1213 )
1213 coreconfigitem(
1214 coreconfigitem(
1214 b'experimental',
1215 b'experimental',
1215 b'single-head-per-branch:public-changes-only',
1216 b'single-head-per-branch:public-changes-only',
1216 default=False,
1217 default=False,
1217 )
1218 )
1218 coreconfigitem(
1219 coreconfigitem(
1219 b'experimental',
1220 b'experimental',
1220 b'sshserver.support-v2',
1221 b'sshserver.support-v2',
1221 default=False,
1222 default=False,
1222 )
1223 )
1223 coreconfigitem(
1224 coreconfigitem(
1224 b'experimental',
1225 b'experimental',
1225 b'sparse-read',
1226 b'sparse-read',
1226 default=False,
1227 default=False,
1227 )
1228 )
1228 coreconfigitem(
1229 coreconfigitem(
1229 b'experimental',
1230 b'experimental',
1230 b'sparse-read.density-threshold',
1231 b'sparse-read.density-threshold',
1231 default=0.50,
1232 default=0.50,
1232 )
1233 )
1233 coreconfigitem(
1234 coreconfigitem(
1234 b'experimental',
1235 b'experimental',
1235 b'sparse-read.min-gap-size',
1236 b'sparse-read.min-gap-size',
1236 default=b'65K',
1237 default=b'65K',
1237 )
1238 )
1238 coreconfigitem(
1239 coreconfigitem(
1239 b'experimental',
1240 b'experimental',
1240 b'treemanifest',
1241 b'treemanifest',
1241 default=False,
1242 default=False,
1242 )
1243 )
1243 coreconfigitem(
1244 coreconfigitem(
1244 b'experimental',
1245 b'experimental',
1245 b'update.atomic-file',
1246 b'update.atomic-file',
1246 default=False,
1247 default=False,
1247 )
1248 )
1248 coreconfigitem(
1249 coreconfigitem(
1249 b'experimental',
1250 b'experimental',
1250 b'sshpeer.advertise-v2',
1251 b'sshpeer.advertise-v2',
1251 default=False,
1252 default=False,
1252 )
1253 )
1253 coreconfigitem(
1254 coreconfigitem(
1254 b'experimental',
1255 b'experimental',
1255 b'web.apiserver',
1256 b'web.apiserver',
1256 default=False,
1257 default=False,
1257 )
1258 )
1258 coreconfigitem(
1259 coreconfigitem(
1259 b'experimental',
1260 b'experimental',
1260 b'web.api.http-v2',
1261 b'web.api.http-v2',
1261 default=False,
1262 default=False,
1262 )
1263 )
1263 coreconfigitem(
1264 coreconfigitem(
1264 b'experimental',
1265 b'experimental',
1265 b'web.api.debugreflect',
1266 b'web.api.debugreflect',
1266 default=False,
1267 default=False,
1267 )
1268 )
1268 coreconfigitem(
1269 coreconfigitem(
1269 b'experimental',
1270 b'experimental',
1270 b'worker.wdir-get-thread-safe',
1271 b'worker.wdir-get-thread-safe',
1271 default=False,
1272 default=False,
1272 )
1273 )
1273 coreconfigitem(
1274 coreconfigitem(
1274 b'experimental',
1275 b'experimental',
1275 b'worker.repository-upgrade',
1276 b'worker.repository-upgrade',
1276 default=False,
1277 default=False,
1277 )
1278 )
1278 coreconfigitem(
1279 coreconfigitem(
1279 b'experimental',
1280 b'experimental',
1280 b'xdiff',
1281 b'xdiff',
1281 default=False,
1282 default=False,
1282 )
1283 )
1283 coreconfigitem(
1284 coreconfigitem(
1284 b'extensions',
1285 b'extensions',
1285 b'.*',
1286 b'.*',
1286 default=None,
1287 default=None,
1287 generic=True,
1288 generic=True,
1288 )
1289 )
1289 coreconfigitem(
1290 coreconfigitem(
1290 b'extdata',
1291 b'extdata',
1291 b'.*',
1292 b'.*',
1292 default=None,
1293 default=None,
1293 generic=True,
1294 generic=True,
1294 )
1295 )
1295 coreconfigitem(
1296 coreconfigitem(
1296 b'format',
1297 b'format',
1297 b'bookmarks-in-store',
1298 b'bookmarks-in-store',
1298 default=False,
1299 default=False,
1299 )
1300 )
1300 coreconfigitem(
1301 coreconfigitem(
1301 b'format',
1302 b'format',
1302 b'chunkcachesize',
1303 b'chunkcachesize',
1303 default=None,
1304 default=None,
1304 experimental=True,
1305 experimental=True,
1305 )
1306 )
1306 coreconfigitem(
1307 coreconfigitem(
1307 b'format',
1308 b'format',
1308 b'dotencode',
1309 b'dotencode',
1309 default=True,
1310 default=True,
1310 )
1311 )
1311 coreconfigitem(
1312 coreconfigitem(
1312 b'format',
1313 b'format',
1313 b'generaldelta',
1314 b'generaldelta',
1314 default=False,
1315 default=False,
1315 experimental=True,
1316 experimental=True,
1316 )
1317 )
1317 coreconfigitem(
1318 coreconfigitem(
1318 b'format',
1319 b'format',
1319 b'manifestcachesize',
1320 b'manifestcachesize',
1320 default=None,
1321 default=None,
1321 experimental=True,
1322 experimental=True,
1322 )
1323 )
1323 coreconfigitem(
1324 coreconfigitem(
1324 b'format',
1325 b'format',
1325 b'maxchainlen',
1326 b'maxchainlen',
1326 default=dynamicdefault,
1327 default=dynamicdefault,
1327 experimental=True,
1328 experimental=True,
1328 )
1329 )
1329 coreconfigitem(
1330 coreconfigitem(
1330 b'format',
1331 b'format',
1331 b'obsstore-version',
1332 b'obsstore-version',
1332 default=None,
1333 default=None,
1333 )
1334 )
1334 coreconfigitem(
1335 coreconfigitem(
1335 b'format',
1336 b'format',
1336 b'sparse-revlog',
1337 b'sparse-revlog',
1337 default=True,
1338 default=True,
1338 )
1339 )
1339 coreconfigitem(
1340 coreconfigitem(
1340 b'format',
1341 b'format',
1341 b'revlog-compression',
1342 b'revlog-compression',
1342 default=lambda: [b'zstd', b'zlib'],
1343 default=lambda: [b'zstd', b'zlib'],
1343 alias=[(b'experimental', b'format.compression')],
1344 alias=[(b'experimental', b'format.compression')],
1344 )
1345 )
1345 coreconfigitem(
1346 coreconfigitem(
1346 b'format',
1347 b'format',
1347 b'usefncache',
1348 b'usefncache',
1348 default=True,
1349 default=True,
1349 )
1350 )
1350 coreconfigitem(
1351 coreconfigitem(
1351 b'format',
1352 b'format',
1352 b'usegeneraldelta',
1353 b'usegeneraldelta',
1353 default=True,
1354 default=True,
1354 )
1355 )
1355 coreconfigitem(
1356 coreconfigitem(
1356 b'format',
1357 b'format',
1357 b'usestore',
1358 b'usestore',
1358 default=True,
1359 default=True,
1359 )
1360 )
1360
1361
1361
1362
1362 def _persistent_nodemap_default():
1363 def _persistent_nodemap_default():
1363 """compute `use-persistent-nodemap` default value
1364 """compute `use-persistent-nodemap` default value
1364
1365
1365 The feature is disabled unless a fast implementation is available.
1366 The feature is disabled unless a fast implementation is available.
1366 """
1367 """
1367 from . import policy
1368 from . import policy
1368
1369
1369 return policy.importrust('revlog') is not None
1370 return policy.importrust('revlog') is not None
1370
1371
1371
1372
1372 coreconfigitem(
1373 coreconfigitem(
1373 b'format',
1374 b'format',
1374 b'use-persistent-nodemap',
1375 b'use-persistent-nodemap',
1375 default=_persistent_nodemap_default,
1376 default=_persistent_nodemap_default,
1376 )
1377 )
1377 coreconfigitem(
1378 coreconfigitem(
1378 b'format',
1379 b'format',
1379 b'exp-use-copies-side-data-changeset',
1380 b'exp-use-copies-side-data-changeset',
1380 default=False,
1381 default=False,
1381 experimental=True,
1382 experimental=True,
1382 )
1383 )
1383 coreconfigitem(
1384 coreconfigitem(
1384 b'format',
1385 b'format',
1385 b'use-share-safe',
1386 b'use-share-safe',
1386 default=False,
1387 default=False,
1387 )
1388 )
1388 coreconfigitem(
1389 coreconfigitem(
1389 b'format',
1390 b'format',
1390 b'internal-phase',
1391 b'internal-phase',
1391 default=False,
1392 default=False,
1392 experimental=True,
1393 experimental=True,
1393 )
1394 )
1394 coreconfigitem(
1395 coreconfigitem(
1395 b'fsmonitor',
1396 b'fsmonitor',
1396 b'warn_when_unused',
1397 b'warn_when_unused',
1397 default=True,
1398 default=True,
1398 )
1399 )
1399 coreconfigitem(
1400 coreconfigitem(
1400 b'fsmonitor',
1401 b'fsmonitor',
1401 b'warn_update_file_count',
1402 b'warn_update_file_count',
1402 default=50000,
1403 default=50000,
1403 )
1404 )
1404 coreconfigitem(
1405 coreconfigitem(
1405 b'fsmonitor',
1406 b'fsmonitor',
1406 b'warn_update_file_count_rust',
1407 b'warn_update_file_count_rust',
1407 default=400000,
1408 default=400000,
1408 )
1409 )
1409 coreconfigitem(
1410 coreconfigitem(
1410 b'help',
1411 b'help',
1411 br'hidden-command\..*',
1412 br'hidden-command\..*',
1412 default=False,
1413 default=False,
1413 generic=True,
1414 generic=True,
1414 )
1415 )
1415 coreconfigitem(
1416 coreconfigitem(
1416 b'help',
1417 b'help',
1417 br'hidden-topic\..*',
1418 br'hidden-topic\..*',
1418 default=False,
1419 default=False,
1419 generic=True,
1420 generic=True,
1420 )
1421 )
1421 coreconfigitem(
1422 coreconfigitem(
1422 b'hooks',
1423 b'hooks',
1423 b'[^:]*',
1424 b'[^:]*',
1424 default=dynamicdefault,
1425 default=dynamicdefault,
1425 generic=True,
1426 generic=True,
1426 )
1427 )
1427 coreconfigitem(
1428 coreconfigitem(
1428 b'hooks',
1429 b'hooks',
1429 b'.*:run-with-plain',
1430 b'.*:run-with-plain',
1430 default=True,
1431 default=True,
1431 generic=True,
1432 generic=True,
1432 )
1433 )
1433 coreconfigitem(
1434 coreconfigitem(
1434 b'hgweb-paths',
1435 b'hgweb-paths',
1435 b'.*',
1436 b'.*',
1436 default=list,
1437 default=list,
1437 generic=True,
1438 generic=True,
1438 )
1439 )
1439 coreconfigitem(
1440 coreconfigitem(
1440 b'hostfingerprints',
1441 b'hostfingerprints',
1441 b'.*',
1442 b'.*',
1442 default=list,
1443 default=list,
1443 generic=True,
1444 generic=True,
1444 )
1445 )
1445 coreconfigitem(
1446 coreconfigitem(
1446 b'hostsecurity',
1447 b'hostsecurity',
1447 b'ciphers',
1448 b'ciphers',
1448 default=None,
1449 default=None,
1449 )
1450 )
1450 coreconfigitem(
1451 coreconfigitem(
1451 b'hostsecurity',
1452 b'hostsecurity',
1452 b'minimumprotocol',
1453 b'minimumprotocol',
1453 default=dynamicdefault,
1454 default=dynamicdefault,
1454 )
1455 )
1455 coreconfigitem(
1456 coreconfigitem(
1456 b'hostsecurity',
1457 b'hostsecurity',
1457 b'.*:minimumprotocol$',
1458 b'.*:minimumprotocol$',
1458 default=dynamicdefault,
1459 default=dynamicdefault,
1459 generic=True,
1460 generic=True,
1460 )
1461 )
1461 coreconfigitem(
1462 coreconfigitem(
1462 b'hostsecurity',
1463 b'hostsecurity',
1463 b'.*:ciphers$',
1464 b'.*:ciphers$',
1464 default=dynamicdefault,
1465 default=dynamicdefault,
1465 generic=True,
1466 generic=True,
1466 )
1467 )
1467 coreconfigitem(
1468 coreconfigitem(
1468 b'hostsecurity',
1469 b'hostsecurity',
1469 b'.*:fingerprints$',
1470 b'.*:fingerprints$',
1470 default=list,
1471 default=list,
1471 generic=True,
1472 generic=True,
1472 )
1473 )
1473 coreconfigitem(
1474 coreconfigitem(
1474 b'hostsecurity',
1475 b'hostsecurity',
1475 b'.*:verifycertsfile$',
1476 b'.*:verifycertsfile$',
1476 default=None,
1477 default=None,
1477 generic=True,
1478 generic=True,
1478 )
1479 )
1479
1480
1480 coreconfigitem(
1481 coreconfigitem(
1481 b'http_proxy',
1482 b'http_proxy',
1482 b'always',
1483 b'always',
1483 default=False,
1484 default=False,
1484 )
1485 )
1485 coreconfigitem(
1486 coreconfigitem(
1486 b'http_proxy',
1487 b'http_proxy',
1487 b'host',
1488 b'host',
1488 default=None,
1489 default=None,
1489 )
1490 )
1490 coreconfigitem(
1491 coreconfigitem(
1491 b'http_proxy',
1492 b'http_proxy',
1492 b'no',
1493 b'no',
1493 default=list,
1494 default=list,
1494 )
1495 )
1495 coreconfigitem(
1496 coreconfigitem(
1496 b'http_proxy',
1497 b'http_proxy',
1497 b'passwd',
1498 b'passwd',
1498 default=None,
1499 default=None,
1499 )
1500 )
1500 coreconfigitem(
1501 coreconfigitem(
1501 b'http_proxy',
1502 b'http_proxy',
1502 b'user',
1503 b'user',
1503 default=None,
1504 default=None,
1504 )
1505 )
1505
1506
1506 coreconfigitem(
1507 coreconfigitem(
1507 b'http',
1508 b'http',
1508 b'timeout',
1509 b'timeout',
1509 default=None,
1510 default=None,
1510 )
1511 )
1511
1512
1512 coreconfigitem(
1513 coreconfigitem(
1513 b'logtoprocess',
1514 b'logtoprocess',
1514 b'commandexception',
1515 b'commandexception',
1515 default=None,
1516 default=None,
1516 )
1517 )
1517 coreconfigitem(
1518 coreconfigitem(
1518 b'logtoprocess',
1519 b'logtoprocess',
1519 b'commandfinish',
1520 b'commandfinish',
1520 default=None,
1521 default=None,
1521 )
1522 )
1522 coreconfigitem(
1523 coreconfigitem(
1523 b'logtoprocess',
1524 b'logtoprocess',
1524 b'command',
1525 b'command',
1525 default=None,
1526 default=None,
1526 )
1527 )
1527 coreconfigitem(
1528 coreconfigitem(
1528 b'logtoprocess',
1529 b'logtoprocess',
1529 b'develwarn',
1530 b'develwarn',
1530 default=None,
1531 default=None,
1531 )
1532 )
1532 coreconfigitem(
1533 coreconfigitem(
1533 b'logtoprocess',
1534 b'logtoprocess',
1534 b'uiblocked',
1535 b'uiblocked',
1535 default=None,
1536 default=None,
1536 )
1537 )
1537 coreconfigitem(
1538 coreconfigitem(
1538 b'merge',
1539 b'merge',
1539 b'checkunknown',
1540 b'checkunknown',
1540 default=b'abort',
1541 default=b'abort',
1541 )
1542 )
1542 coreconfigitem(
1543 coreconfigitem(
1543 b'merge',
1544 b'merge',
1544 b'checkignored',
1545 b'checkignored',
1545 default=b'abort',
1546 default=b'abort',
1546 )
1547 )
1547 coreconfigitem(
1548 coreconfigitem(
1548 b'experimental',
1549 b'experimental',
1549 b'merge.checkpathconflicts',
1550 b'merge.checkpathconflicts',
1550 default=False,
1551 default=False,
1551 )
1552 )
1552 coreconfigitem(
1553 coreconfigitem(
1553 b'merge',
1554 b'merge',
1554 b'followcopies',
1555 b'followcopies',
1555 default=True,
1556 default=True,
1556 )
1557 )
1557 coreconfigitem(
1558 coreconfigitem(
1558 b'merge',
1559 b'merge',
1559 b'on-failure',
1560 b'on-failure',
1560 default=b'continue',
1561 default=b'continue',
1561 )
1562 )
1562 coreconfigitem(
1563 coreconfigitem(
1563 b'merge',
1564 b'merge',
1564 b'preferancestor',
1565 b'preferancestor',
1565 default=lambda: [b'*'],
1566 default=lambda: [b'*'],
1566 experimental=True,
1567 experimental=True,
1567 )
1568 )
1568 coreconfigitem(
1569 coreconfigitem(
1569 b'merge',
1570 b'merge',
1570 b'strict-capability-check',
1571 b'strict-capability-check',
1571 default=False,
1572 default=False,
1572 )
1573 )
1573 coreconfigitem(
1574 coreconfigitem(
1574 b'merge-tools',
1575 b'merge-tools',
1575 b'.*',
1576 b'.*',
1576 default=None,
1577 default=None,
1577 generic=True,
1578 generic=True,
1578 )
1579 )
1579 coreconfigitem(
1580 coreconfigitem(
1580 b'merge-tools',
1581 b'merge-tools',
1581 br'.*\.args$',
1582 br'.*\.args$',
1582 default=b"$local $base $other",
1583 default=b"$local $base $other",
1583 generic=True,
1584 generic=True,
1584 priority=-1,
1585 priority=-1,
1585 )
1586 )
1586 coreconfigitem(
1587 coreconfigitem(
1587 b'merge-tools',
1588 b'merge-tools',
1588 br'.*\.binary$',
1589 br'.*\.binary$',
1589 default=False,
1590 default=False,
1590 generic=True,
1591 generic=True,
1591 priority=-1,
1592 priority=-1,
1592 )
1593 )
1593 coreconfigitem(
1594 coreconfigitem(
1594 b'merge-tools',
1595 b'merge-tools',
1595 br'.*\.check$',
1596 br'.*\.check$',
1596 default=list,
1597 default=list,
1597 generic=True,
1598 generic=True,
1598 priority=-1,
1599 priority=-1,
1599 )
1600 )
1600 coreconfigitem(
1601 coreconfigitem(
1601 b'merge-tools',
1602 b'merge-tools',
1602 br'.*\.checkchanged$',
1603 br'.*\.checkchanged$',
1603 default=False,
1604 default=False,
1604 generic=True,
1605 generic=True,
1605 priority=-1,
1606 priority=-1,
1606 )
1607 )
1607 coreconfigitem(
1608 coreconfigitem(
1608 b'merge-tools',
1609 b'merge-tools',
1609 br'.*\.executable$',
1610 br'.*\.executable$',
1610 default=dynamicdefault,
1611 default=dynamicdefault,
1611 generic=True,
1612 generic=True,
1612 priority=-1,
1613 priority=-1,
1613 )
1614 )
1614 coreconfigitem(
1615 coreconfigitem(
1615 b'merge-tools',
1616 b'merge-tools',
1616 br'.*\.fixeol$',
1617 br'.*\.fixeol$',
1617 default=False,
1618 default=False,
1618 generic=True,
1619 generic=True,
1619 priority=-1,
1620 priority=-1,
1620 )
1621 )
1621 coreconfigitem(
1622 coreconfigitem(
1622 b'merge-tools',
1623 b'merge-tools',
1623 br'.*\.gui$',
1624 br'.*\.gui$',
1624 default=False,
1625 default=False,
1625 generic=True,
1626 generic=True,
1626 priority=-1,
1627 priority=-1,
1627 )
1628 )
1628 coreconfigitem(
1629 coreconfigitem(
1629 b'merge-tools',
1630 b'merge-tools',
1630 br'.*\.mergemarkers$',
1631 br'.*\.mergemarkers$',
1631 default=b'basic',
1632 default=b'basic',
1632 generic=True,
1633 generic=True,
1633 priority=-1,
1634 priority=-1,
1634 )
1635 )
1635 coreconfigitem(
1636 coreconfigitem(
1636 b'merge-tools',
1637 b'merge-tools',
1637 br'.*\.mergemarkertemplate$',
1638 br'.*\.mergemarkertemplate$',
1638 default=dynamicdefault, # take from command-templates.mergemarker
1639 default=dynamicdefault, # take from command-templates.mergemarker
1639 generic=True,
1640 generic=True,
1640 priority=-1,
1641 priority=-1,
1641 )
1642 )
1642 coreconfigitem(
1643 coreconfigitem(
1643 b'merge-tools',
1644 b'merge-tools',
1644 br'.*\.priority$',
1645 br'.*\.priority$',
1645 default=0,
1646 default=0,
1646 generic=True,
1647 generic=True,
1647 priority=-1,
1648 priority=-1,
1648 )
1649 )
1649 coreconfigitem(
1650 coreconfigitem(
1650 b'merge-tools',
1651 b'merge-tools',
1651 br'.*\.premerge$',
1652 br'.*\.premerge$',
1652 default=dynamicdefault,
1653 default=dynamicdefault,
1653 generic=True,
1654 generic=True,
1654 priority=-1,
1655 priority=-1,
1655 )
1656 )
1656 coreconfigitem(
1657 coreconfigitem(
1657 b'merge-tools',
1658 b'merge-tools',
1658 br'.*\.symlink$',
1659 br'.*\.symlink$',
1659 default=False,
1660 default=False,
1660 generic=True,
1661 generic=True,
1661 priority=-1,
1662 priority=-1,
1662 )
1663 )
1663 coreconfigitem(
1664 coreconfigitem(
1664 b'pager',
1665 b'pager',
1665 b'attend-.*',
1666 b'attend-.*',
1666 default=dynamicdefault,
1667 default=dynamicdefault,
1667 generic=True,
1668 generic=True,
1668 )
1669 )
1669 coreconfigitem(
1670 coreconfigitem(
1670 b'pager',
1671 b'pager',
1671 b'ignore',
1672 b'ignore',
1672 default=list,
1673 default=list,
1673 )
1674 )
1674 coreconfigitem(
1675 coreconfigitem(
1675 b'pager',
1676 b'pager',
1676 b'pager',
1677 b'pager',
1677 default=dynamicdefault,
1678 default=dynamicdefault,
1678 )
1679 )
1679 coreconfigitem(
1680 coreconfigitem(
1680 b'patch',
1681 b'patch',
1681 b'eol',
1682 b'eol',
1682 default=b'strict',
1683 default=b'strict',
1683 )
1684 )
1684 coreconfigitem(
1685 coreconfigitem(
1685 b'patch',
1686 b'patch',
1686 b'fuzz',
1687 b'fuzz',
1687 default=2,
1688 default=2,
1688 )
1689 )
1689 coreconfigitem(
1690 coreconfigitem(
1690 b'paths',
1691 b'paths',
1691 b'default',
1692 b'default',
1692 default=None,
1693 default=None,
1693 )
1694 )
1694 coreconfigitem(
1695 coreconfigitem(
1695 b'paths',
1696 b'paths',
1696 b'default-push',
1697 b'default-push',
1697 default=None,
1698 default=None,
1698 )
1699 )
1699 coreconfigitem(
1700 coreconfigitem(
1700 b'paths',
1701 b'paths',
1701 b'.*',
1702 b'.*',
1702 default=None,
1703 default=None,
1703 generic=True,
1704 generic=True,
1704 )
1705 )
1705 coreconfigitem(
1706 coreconfigitem(
1706 b'phases',
1707 b'phases',
1707 b'checksubrepos',
1708 b'checksubrepos',
1708 default=b'follow',
1709 default=b'follow',
1709 )
1710 )
1710 coreconfigitem(
1711 coreconfigitem(
1711 b'phases',
1712 b'phases',
1712 b'new-commit',
1713 b'new-commit',
1713 default=b'draft',
1714 default=b'draft',
1714 )
1715 )
1715 coreconfigitem(
1716 coreconfigitem(
1716 b'phases',
1717 b'phases',
1717 b'publish',
1718 b'publish',
1718 default=True,
1719 default=True,
1719 )
1720 )
1720 coreconfigitem(
1721 coreconfigitem(
1721 b'profiling',
1722 b'profiling',
1722 b'enabled',
1723 b'enabled',
1723 default=False,
1724 default=False,
1724 )
1725 )
1725 coreconfigitem(
1726 coreconfigitem(
1726 b'profiling',
1727 b'profiling',
1727 b'format',
1728 b'format',
1728 default=b'text',
1729 default=b'text',
1729 )
1730 )
1730 coreconfigitem(
1731 coreconfigitem(
1731 b'profiling',
1732 b'profiling',
1732 b'freq',
1733 b'freq',
1733 default=1000,
1734 default=1000,
1734 )
1735 )
1735 coreconfigitem(
1736 coreconfigitem(
1736 b'profiling',
1737 b'profiling',
1737 b'limit',
1738 b'limit',
1738 default=30,
1739 default=30,
1739 )
1740 )
1740 coreconfigitem(
1741 coreconfigitem(
1741 b'profiling',
1742 b'profiling',
1742 b'nested',
1743 b'nested',
1743 default=0,
1744 default=0,
1744 )
1745 )
1745 coreconfigitem(
1746 coreconfigitem(
1746 b'profiling',
1747 b'profiling',
1747 b'output',
1748 b'output',
1748 default=None,
1749 default=None,
1749 )
1750 )
1750 coreconfigitem(
1751 coreconfigitem(
1751 b'profiling',
1752 b'profiling',
1752 b'showmax',
1753 b'showmax',
1753 default=0.999,
1754 default=0.999,
1754 )
1755 )
1755 coreconfigitem(
1756 coreconfigitem(
1756 b'profiling',
1757 b'profiling',
1757 b'showmin',
1758 b'showmin',
1758 default=dynamicdefault,
1759 default=dynamicdefault,
1759 )
1760 )
1760 coreconfigitem(
1761 coreconfigitem(
1761 b'profiling',
1762 b'profiling',
1762 b'showtime',
1763 b'showtime',
1763 default=True,
1764 default=True,
1764 )
1765 )
1765 coreconfigitem(
1766 coreconfigitem(
1766 b'profiling',
1767 b'profiling',
1767 b'sort',
1768 b'sort',
1768 default=b'inlinetime',
1769 default=b'inlinetime',
1769 )
1770 )
1770 coreconfigitem(
1771 coreconfigitem(
1771 b'profiling',
1772 b'profiling',
1772 b'statformat',
1773 b'statformat',
1773 default=b'hotpath',
1774 default=b'hotpath',
1774 )
1775 )
1775 coreconfigitem(
1776 coreconfigitem(
1776 b'profiling',
1777 b'profiling',
1777 b'time-track',
1778 b'time-track',
1778 default=dynamicdefault,
1779 default=dynamicdefault,
1779 )
1780 )
1780 coreconfigitem(
1781 coreconfigitem(
1781 b'profiling',
1782 b'profiling',
1782 b'type',
1783 b'type',
1783 default=b'stat',
1784 default=b'stat',
1784 )
1785 )
1785 coreconfigitem(
1786 coreconfigitem(
1786 b'progress',
1787 b'progress',
1787 b'assume-tty',
1788 b'assume-tty',
1788 default=False,
1789 default=False,
1789 )
1790 )
1790 coreconfigitem(
1791 coreconfigitem(
1791 b'progress',
1792 b'progress',
1792 b'changedelay',
1793 b'changedelay',
1793 default=1,
1794 default=1,
1794 )
1795 )
1795 coreconfigitem(
1796 coreconfigitem(
1796 b'progress',
1797 b'progress',
1797 b'clear-complete',
1798 b'clear-complete',
1798 default=True,
1799 default=True,
1799 )
1800 )
1800 coreconfigitem(
1801 coreconfigitem(
1801 b'progress',
1802 b'progress',
1802 b'debug',
1803 b'debug',
1803 default=False,
1804 default=False,
1804 )
1805 )
1805 coreconfigitem(
1806 coreconfigitem(
1806 b'progress',
1807 b'progress',
1807 b'delay',
1808 b'delay',
1808 default=3,
1809 default=3,
1809 )
1810 )
1810 coreconfigitem(
1811 coreconfigitem(
1811 b'progress',
1812 b'progress',
1812 b'disable',
1813 b'disable',
1813 default=False,
1814 default=False,
1814 )
1815 )
1815 coreconfigitem(
1816 coreconfigitem(
1816 b'progress',
1817 b'progress',
1817 b'estimateinterval',
1818 b'estimateinterval',
1818 default=60.0,
1819 default=60.0,
1819 )
1820 )
1820 coreconfigitem(
1821 coreconfigitem(
1821 b'progress',
1822 b'progress',
1822 b'format',
1823 b'format',
1823 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1824 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1824 )
1825 )
1825 coreconfigitem(
1826 coreconfigitem(
1826 b'progress',
1827 b'progress',
1827 b'refresh',
1828 b'refresh',
1828 default=0.1,
1829 default=0.1,
1829 )
1830 )
1830 coreconfigitem(
1831 coreconfigitem(
1831 b'progress',
1832 b'progress',
1832 b'width',
1833 b'width',
1833 default=dynamicdefault,
1834 default=dynamicdefault,
1834 )
1835 )
1835 coreconfigitem(
1836 coreconfigitem(
1836 b'pull',
1837 b'pull',
1837 b'confirm',
1838 b'confirm',
1838 default=False,
1839 default=False,
1839 )
1840 )
1840 coreconfigitem(
1841 coreconfigitem(
1841 b'push',
1842 b'push',
1842 b'pushvars.server',
1843 b'pushvars.server',
1843 default=False,
1844 default=False,
1844 )
1845 )
1845 coreconfigitem(
1846 coreconfigitem(
1846 b'rewrite',
1847 b'rewrite',
1847 b'backup-bundle',
1848 b'backup-bundle',
1848 default=True,
1849 default=True,
1849 alias=[(b'ui', b'history-editing-backup')],
1850 alias=[(b'ui', b'history-editing-backup')],
1850 )
1851 )
1851 coreconfigitem(
1852 coreconfigitem(
1852 b'rewrite',
1853 b'rewrite',
1853 b'update-timestamp',
1854 b'update-timestamp',
1854 default=False,
1855 default=False,
1855 )
1856 )
1856 coreconfigitem(
1857 coreconfigitem(
1857 b'rewrite',
1858 b'rewrite',
1858 b'empty-successor',
1859 b'empty-successor',
1859 default=b'skip',
1860 default=b'skip',
1860 experimental=True,
1861 experimental=True,
1861 )
1862 )
1862 coreconfigitem(
1863 coreconfigitem(
1863 b'storage',
1864 b'storage',
1864 b'new-repo-backend',
1865 b'new-repo-backend',
1865 default=b'revlogv1',
1866 default=b'revlogv1',
1866 experimental=True,
1867 experimental=True,
1867 )
1868 )
1868 coreconfigitem(
1869 coreconfigitem(
1869 b'storage',
1870 b'storage',
1870 b'revlog.optimize-delta-parent-choice',
1871 b'revlog.optimize-delta-parent-choice',
1871 default=True,
1872 default=True,
1872 alias=[(b'format', b'aggressivemergedeltas')],
1873 alias=[(b'format', b'aggressivemergedeltas')],
1873 )
1874 )
1874 # experimental as long as rust is experimental (or a C version is implemented)
1875 # experimental as long as rust is experimental (or a C version is implemented)
1875 coreconfigitem(
1876 coreconfigitem(
1876 b'storage',
1877 b'storage',
1877 b'revlog.persistent-nodemap.mmap',
1878 b'revlog.persistent-nodemap.mmap',
1878 default=True,
1879 default=True,
1879 )
1880 )
1880 # experimental as long as format.use-persistent-nodemap is.
1881 # experimental as long as format.use-persistent-nodemap is.
1881 coreconfigitem(
1882 coreconfigitem(
1882 b'storage',
1883 b'storage',
1883 b'revlog.persistent-nodemap.slow-path',
1884 b'revlog.persistent-nodemap.slow-path',
1884 default=b"abort",
1885 default=b"abort",
1885 )
1886 )
1886
1887
1887 coreconfigitem(
1888 coreconfigitem(
1888 b'storage',
1889 b'storage',
1889 b'revlog.reuse-external-delta',
1890 b'revlog.reuse-external-delta',
1890 default=True,
1891 default=True,
1891 )
1892 )
1892 coreconfigitem(
1893 coreconfigitem(
1893 b'storage',
1894 b'storage',
1894 b'revlog.reuse-external-delta-parent',
1895 b'revlog.reuse-external-delta-parent',
1895 default=None,
1896 default=None,
1896 )
1897 )
1897 coreconfigitem(
1898 coreconfigitem(
1898 b'storage',
1899 b'storage',
1899 b'revlog.zlib.level',
1900 b'revlog.zlib.level',
1900 default=None,
1901 default=None,
1901 )
1902 )
1902 coreconfigitem(
1903 coreconfigitem(
1903 b'storage',
1904 b'storage',
1904 b'revlog.zstd.level',
1905 b'revlog.zstd.level',
1905 default=None,
1906 default=None,
1906 )
1907 )
1907 coreconfigitem(
1908 coreconfigitem(
1908 b'server',
1909 b'server',
1909 b'bookmarks-pushkey-compat',
1910 b'bookmarks-pushkey-compat',
1910 default=True,
1911 default=True,
1911 )
1912 )
1912 coreconfigitem(
1913 coreconfigitem(
1913 b'server',
1914 b'server',
1914 b'bundle1',
1915 b'bundle1',
1915 default=True,
1916 default=True,
1916 )
1917 )
1917 coreconfigitem(
1918 coreconfigitem(
1918 b'server',
1919 b'server',
1919 b'bundle1gd',
1920 b'bundle1gd',
1920 default=None,
1921 default=None,
1921 )
1922 )
1922 coreconfigitem(
1923 coreconfigitem(
1923 b'server',
1924 b'server',
1924 b'bundle1.pull',
1925 b'bundle1.pull',
1925 default=None,
1926 default=None,
1926 )
1927 )
1927 coreconfigitem(
1928 coreconfigitem(
1928 b'server',
1929 b'server',
1929 b'bundle1gd.pull',
1930 b'bundle1gd.pull',
1930 default=None,
1931 default=None,
1931 )
1932 )
1932 coreconfigitem(
1933 coreconfigitem(
1933 b'server',
1934 b'server',
1934 b'bundle1.push',
1935 b'bundle1.push',
1935 default=None,
1936 default=None,
1936 )
1937 )
1937 coreconfigitem(
1938 coreconfigitem(
1938 b'server',
1939 b'server',
1939 b'bundle1gd.push',
1940 b'bundle1gd.push',
1940 default=None,
1941 default=None,
1941 )
1942 )
1942 coreconfigitem(
1943 coreconfigitem(
1943 b'server',
1944 b'server',
1944 b'bundle2.stream',
1945 b'bundle2.stream',
1945 default=True,
1946 default=True,
1946 alias=[(b'experimental', b'bundle2.stream')],
1947 alias=[(b'experimental', b'bundle2.stream')],
1947 )
1948 )
1948 coreconfigitem(
1949 coreconfigitem(
1949 b'server',
1950 b'server',
1950 b'compressionengines',
1951 b'compressionengines',
1951 default=list,
1952 default=list,
1952 )
1953 )
1953 coreconfigitem(
1954 coreconfigitem(
1954 b'server',
1955 b'server',
1955 b'concurrent-push-mode',
1956 b'concurrent-push-mode',
1956 default=b'check-related',
1957 default=b'check-related',
1957 )
1958 )
1958 coreconfigitem(
1959 coreconfigitem(
1959 b'server',
1960 b'server',
1960 b'disablefullbundle',
1961 b'disablefullbundle',
1961 default=False,
1962 default=False,
1962 )
1963 )
1963 coreconfigitem(
1964 coreconfigitem(
1964 b'server',
1965 b'server',
1965 b'maxhttpheaderlen',
1966 b'maxhttpheaderlen',
1966 default=1024,
1967 default=1024,
1967 )
1968 )
1968 coreconfigitem(
1969 coreconfigitem(
1969 b'server',
1970 b'server',
1970 b'pullbundle',
1971 b'pullbundle',
1971 default=False,
1972 default=False,
1972 )
1973 )
1973 coreconfigitem(
1974 coreconfigitem(
1974 b'server',
1975 b'server',
1975 b'preferuncompressed',
1976 b'preferuncompressed',
1976 default=False,
1977 default=False,
1977 )
1978 )
1978 coreconfigitem(
1979 coreconfigitem(
1979 b'server',
1980 b'server',
1980 b'streamunbundle',
1981 b'streamunbundle',
1981 default=False,
1982 default=False,
1982 )
1983 )
1983 coreconfigitem(
1984 coreconfigitem(
1984 b'server',
1985 b'server',
1985 b'uncompressed',
1986 b'uncompressed',
1986 default=True,
1987 default=True,
1987 )
1988 )
1988 coreconfigitem(
1989 coreconfigitem(
1989 b'server',
1990 b'server',
1990 b'uncompressedallowsecret',
1991 b'uncompressedallowsecret',
1991 default=False,
1992 default=False,
1992 )
1993 )
1993 coreconfigitem(
1994 coreconfigitem(
1994 b'server',
1995 b'server',
1995 b'view',
1996 b'view',
1996 default=b'served',
1997 default=b'served',
1997 )
1998 )
1998 coreconfigitem(
1999 coreconfigitem(
1999 b'server',
2000 b'server',
2000 b'validate',
2001 b'validate',
2001 default=False,
2002 default=False,
2002 )
2003 )
2003 coreconfigitem(
2004 coreconfigitem(
2004 b'server',
2005 b'server',
2005 b'zliblevel',
2006 b'zliblevel',
2006 default=-1,
2007 default=-1,
2007 )
2008 )
2008 coreconfigitem(
2009 coreconfigitem(
2009 b'server',
2010 b'server',
2010 b'zstdlevel',
2011 b'zstdlevel',
2011 default=3,
2012 default=3,
2012 )
2013 )
2013 coreconfigitem(
2014 coreconfigitem(
2014 b'share',
2015 b'share',
2015 b'pool',
2016 b'pool',
2016 default=None,
2017 default=None,
2017 )
2018 )
2018 coreconfigitem(
2019 coreconfigitem(
2019 b'share',
2020 b'share',
2020 b'poolnaming',
2021 b'poolnaming',
2021 default=b'identity',
2022 default=b'identity',
2022 )
2023 )
2023 coreconfigitem(
2024 coreconfigitem(
2024 b'share',
2025 b'share',
2025 b'safe-mismatch.source-not-safe',
2026 b'safe-mismatch.source-not-safe',
2026 default=b'abort',
2027 default=b'abort',
2027 )
2028 )
2028 coreconfigitem(
2029 coreconfigitem(
2029 b'share',
2030 b'share',
2030 b'safe-mismatch.source-safe',
2031 b'safe-mismatch.source-safe',
2031 default=b'abort',
2032 default=b'abort',
2032 )
2033 )
2033 coreconfigitem(
2034 coreconfigitem(
2034 b'share',
2035 b'share',
2035 b'safe-mismatch.source-not-safe.warn',
2036 b'safe-mismatch.source-not-safe.warn',
2036 default=True,
2037 default=True,
2037 )
2038 )
2038 coreconfigitem(
2039 coreconfigitem(
2039 b'share',
2040 b'share',
2040 b'safe-mismatch.source-safe.warn',
2041 b'safe-mismatch.source-safe.warn',
2041 default=True,
2042 default=True,
2042 )
2043 )
2043 coreconfigitem(
2044 coreconfigitem(
2044 b'shelve',
2045 b'shelve',
2045 b'maxbackups',
2046 b'maxbackups',
2046 default=10,
2047 default=10,
2047 )
2048 )
2048 coreconfigitem(
2049 coreconfigitem(
2049 b'smtp',
2050 b'smtp',
2050 b'host',
2051 b'host',
2051 default=None,
2052 default=None,
2052 )
2053 )
2053 coreconfigitem(
2054 coreconfigitem(
2054 b'smtp',
2055 b'smtp',
2055 b'local_hostname',
2056 b'local_hostname',
2056 default=None,
2057 default=None,
2057 )
2058 )
2058 coreconfigitem(
2059 coreconfigitem(
2059 b'smtp',
2060 b'smtp',
2060 b'password',
2061 b'password',
2061 default=None,
2062 default=None,
2062 )
2063 )
2063 coreconfigitem(
2064 coreconfigitem(
2064 b'smtp',
2065 b'smtp',
2065 b'port',
2066 b'port',
2066 default=dynamicdefault,
2067 default=dynamicdefault,
2067 )
2068 )
2068 coreconfigitem(
2069 coreconfigitem(
2069 b'smtp',
2070 b'smtp',
2070 b'tls',
2071 b'tls',
2071 default=b'none',
2072 default=b'none',
2072 )
2073 )
2073 coreconfigitem(
2074 coreconfigitem(
2074 b'smtp',
2075 b'smtp',
2075 b'username',
2076 b'username',
2076 default=None,
2077 default=None,
2077 )
2078 )
2078 coreconfigitem(
2079 coreconfigitem(
2079 b'sparse',
2080 b'sparse',
2080 b'missingwarning',
2081 b'missingwarning',
2081 default=True,
2082 default=True,
2082 experimental=True,
2083 experimental=True,
2083 )
2084 )
2084 coreconfigitem(
2085 coreconfigitem(
2085 b'subrepos',
2086 b'subrepos',
2086 b'allowed',
2087 b'allowed',
2087 default=dynamicdefault, # to make backporting simpler
2088 default=dynamicdefault, # to make backporting simpler
2088 )
2089 )
2089 coreconfigitem(
2090 coreconfigitem(
2090 b'subrepos',
2091 b'subrepos',
2091 b'hg:allowed',
2092 b'hg:allowed',
2092 default=dynamicdefault,
2093 default=dynamicdefault,
2093 )
2094 )
2094 coreconfigitem(
2095 coreconfigitem(
2095 b'subrepos',
2096 b'subrepos',
2096 b'git:allowed',
2097 b'git:allowed',
2097 default=dynamicdefault,
2098 default=dynamicdefault,
2098 )
2099 )
2099 coreconfigitem(
2100 coreconfigitem(
2100 b'subrepos',
2101 b'subrepos',
2101 b'svn:allowed',
2102 b'svn:allowed',
2102 default=dynamicdefault,
2103 default=dynamicdefault,
2103 )
2104 )
2104 coreconfigitem(
2105 coreconfigitem(
2105 b'templates',
2106 b'templates',
2106 b'.*',
2107 b'.*',
2107 default=None,
2108 default=None,
2108 generic=True,
2109 generic=True,
2109 )
2110 )
2110 coreconfigitem(
2111 coreconfigitem(
2111 b'templateconfig',
2112 b'templateconfig',
2112 b'.*',
2113 b'.*',
2113 default=dynamicdefault,
2114 default=dynamicdefault,
2114 generic=True,
2115 generic=True,
2115 )
2116 )
2116 coreconfigitem(
2117 coreconfigitem(
2117 b'trusted',
2118 b'trusted',
2118 b'groups',
2119 b'groups',
2119 default=list,
2120 default=list,
2120 )
2121 )
2121 coreconfigitem(
2122 coreconfigitem(
2122 b'trusted',
2123 b'trusted',
2123 b'users',
2124 b'users',
2124 default=list,
2125 default=list,
2125 )
2126 )
2126 coreconfigitem(
2127 coreconfigitem(
2127 b'ui',
2128 b'ui',
2128 b'_usedassubrepo',
2129 b'_usedassubrepo',
2129 default=False,
2130 default=False,
2130 )
2131 )
2131 coreconfigitem(
2132 coreconfigitem(
2132 b'ui',
2133 b'ui',
2133 b'allowemptycommit',
2134 b'allowemptycommit',
2134 default=False,
2135 default=False,
2135 )
2136 )
2136 coreconfigitem(
2137 coreconfigitem(
2137 b'ui',
2138 b'ui',
2138 b'archivemeta',
2139 b'archivemeta',
2139 default=True,
2140 default=True,
2140 )
2141 )
2141 coreconfigitem(
2142 coreconfigitem(
2142 b'ui',
2143 b'ui',
2143 b'askusername',
2144 b'askusername',
2144 default=False,
2145 default=False,
2145 )
2146 )
2146 coreconfigitem(
2147 coreconfigitem(
2147 b'ui',
2148 b'ui',
2148 b'available-memory',
2149 b'available-memory',
2149 default=None,
2150 default=None,
2150 )
2151 )
2151
2152
2152 coreconfigitem(
2153 coreconfigitem(
2153 b'ui',
2154 b'ui',
2154 b'clonebundlefallback',
2155 b'clonebundlefallback',
2155 default=False,
2156 default=False,
2156 )
2157 )
2157 coreconfigitem(
2158 coreconfigitem(
2158 b'ui',
2159 b'ui',
2159 b'clonebundleprefers',
2160 b'clonebundleprefers',
2160 default=list,
2161 default=list,
2161 )
2162 )
2162 coreconfigitem(
2163 coreconfigitem(
2163 b'ui',
2164 b'ui',
2164 b'clonebundles',
2165 b'clonebundles',
2165 default=True,
2166 default=True,
2166 )
2167 )
2167 coreconfigitem(
2168 coreconfigitem(
2168 b'ui',
2169 b'ui',
2169 b'color',
2170 b'color',
2170 default=b'auto',
2171 default=b'auto',
2171 )
2172 )
2172 coreconfigitem(
2173 coreconfigitem(
2173 b'ui',
2174 b'ui',
2174 b'commitsubrepos',
2175 b'commitsubrepos',
2175 default=False,
2176 default=False,
2176 )
2177 )
2177 coreconfigitem(
2178 coreconfigitem(
2178 b'ui',
2179 b'ui',
2179 b'debug',
2180 b'debug',
2180 default=False,
2181 default=False,
2181 )
2182 )
2182 coreconfigitem(
2183 coreconfigitem(
2183 b'ui',
2184 b'ui',
2184 b'debugger',
2185 b'debugger',
2185 default=None,
2186 default=None,
2186 )
2187 )
2187 coreconfigitem(
2188 coreconfigitem(
2188 b'ui',
2189 b'ui',
2189 b'editor',
2190 b'editor',
2190 default=dynamicdefault,
2191 default=dynamicdefault,
2191 )
2192 )
2192 coreconfigitem(
2193 coreconfigitem(
2193 b'ui',
2194 b'ui',
2194 b'detailed-exit-code',
2195 b'detailed-exit-code',
2195 default=False,
2196 default=False,
2196 experimental=True,
2197 experimental=True,
2197 )
2198 )
2198 coreconfigitem(
2199 coreconfigitem(
2199 b'ui',
2200 b'ui',
2200 b'fallbackencoding',
2201 b'fallbackencoding',
2201 default=None,
2202 default=None,
2202 )
2203 )
2203 coreconfigitem(
2204 coreconfigitem(
2204 b'ui',
2205 b'ui',
2205 b'forcecwd',
2206 b'forcecwd',
2206 default=None,
2207 default=None,
2207 )
2208 )
2208 coreconfigitem(
2209 coreconfigitem(
2209 b'ui',
2210 b'ui',
2210 b'forcemerge',
2211 b'forcemerge',
2211 default=None,
2212 default=None,
2212 )
2213 )
2213 coreconfigitem(
2214 coreconfigitem(
2214 b'ui',
2215 b'ui',
2215 b'formatdebug',
2216 b'formatdebug',
2216 default=False,
2217 default=False,
2217 )
2218 )
2218 coreconfigitem(
2219 coreconfigitem(
2219 b'ui',
2220 b'ui',
2220 b'formatjson',
2221 b'formatjson',
2221 default=False,
2222 default=False,
2222 )
2223 )
2223 coreconfigitem(
2224 coreconfigitem(
2224 b'ui',
2225 b'ui',
2225 b'formatted',
2226 b'formatted',
2226 default=None,
2227 default=None,
2227 )
2228 )
2228 coreconfigitem(
2229 coreconfigitem(
2229 b'ui',
2230 b'ui',
2230 b'interactive',
2231 b'interactive',
2231 default=None,
2232 default=None,
2232 )
2233 )
2233 coreconfigitem(
2234 coreconfigitem(
2234 b'ui',
2235 b'ui',
2235 b'interface',
2236 b'interface',
2236 default=None,
2237 default=None,
2237 )
2238 )
2238 coreconfigitem(
2239 coreconfigitem(
2239 b'ui',
2240 b'ui',
2240 b'interface.chunkselector',
2241 b'interface.chunkselector',
2241 default=None,
2242 default=None,
2242 )
2243 )
2243 coreconfigitem(
2244 coreconfigitem(
2244 b'ui',
2245 b'ui',
2245 b'large-file-limit',
2246 b'large-file-limit',
2246 default=10000000,
2247 default=10000000,
2247 )
2248 )
2248 coreconfigitem(
2249 coreconfigitem(
2249 b'ui',
2250 b'ui',
2250 b'logblockedtimes',
2251 b'logblockedtimes',
2251 default=False,
2252 default=False,
2252 )
2253 )
2253 coreconfigitem(
2254 coreconfigitem(
2254 b'ui',
2255 b'ui',
2255 b'merge',
2256 b'merge',
2256 default=None,
2257 default=None,
2257 )
2258 )
2258 coreconfigitem(
2259 coreconfigitem(
2259 b'ui',
2260 b'ui',
2260 b'mergemarkers',
2261 b'mergemarkers',
2261 default=b'basic',
2262 default=b'basic',
2262 )
2263 )
2263 coreconfigitem(
2264 coreconfigitem(
2264 b'ui',
2265 b'ui',
2265 b'message-output',
2266 b'message-output',
2266 default=b'stdio',
2267 default=b'stdio',
2267 )
2268 )
2268 coreconfigitem(
2269 coreconfigitem(
2269 b'ui',
2270 b'ui',
2270 b'nontty',
2271 b'nontty',
2271 default=False,
2272 default=False,
2272 )
2273 )
2273 coreconfigitem(
2274 coreconfigitem(
2274 b'ui',
2275 b'ui',
2275 b'origbackuppath',
2276 b'origbackuppath',
2276 default=None,
2277 default=None,
2277 )
2278 )
2278 coreconfigitem(
2279 coreconfigitem(
2279 b'ui',
2280 b'ui',
2280 b'paginate',
2281 b'paginate',
2281 default=True,
2282 default=True,
2282 )
2283 )
2283 coreconfigitem(
2284 coreconfigitem(
2284 b'ui',
2285 b'ui',
2285 b'patch',
2286 b'patch',
2286 default=None,
2287 default=None,
2287 )
2288 )
2288 coreconfigitem(
2289 coreconfigitem(
2289 b'ui',
2290 b'ui',
2290 b'portablefilenames',
2291 b'portablefilenames',
2291 default=b'warn',
2292 default=b'warn',
2292 )
2293 )
2293 coreconfigitem(
2294 coreconfigitem(
2294 b'ui',
2295 b'ui',
2295 b'promptecho',
2296 b'promptecho',
2296 default=False,
2297 default=False,
2297 )
2298 )
2298 coreconfigitem(
2299 coreconfigitem(
2299 b'ui',
2300 b'ui',
2300 b'quiet',
2301 b'quiet',
2301 default=False,
2302 default=False,
2302 )
2303 )
2303 coreconfigitem(
2304 coreconfigitem(
2304 b'ui',
2305 b'ui',
2305 b'quietbookmarkmove',
2306 b'quietbookmarkmove',
2306 default=False,
2307 default=False,
2307 )
2308 )
2308 coreconfigitem(
2309 coreconfigitem(
2309 b'ui',
2310 b'ui',
2310 b'relative-paths',
2311 b'relative-paths',
2311 default=b'legacy',
2312 default=b'legacy',
2312 )
2313 )
2313 coreconfigitem(
2314 coreconfigitem(
2314 b'ui',
2315 b'ui',
2315 b'remotecmd',
2316 b'remotecmd',
2316 default=b'hg',
2317 default=b'hg',
2317 )
2318 )
2318 coreconfigitem(
2319 coreconfigitem(
2319 b'ui',
2320 b'ui',
2320 b'report_untrusted',
2321 b'report_untrusted',
2321 default=True,
2322 default=True,
2322 )
2323 )
2323 coreconfigitem(
2324 coreconfigitem(
2324 b'ui',
2325 b'ui',
2325 b'rollback',
2326 b'rollback',
2326 default=True,
2327 default=True,
2327 )
2328 )
2328 coreconfigitem(
2329 coreconfigitem(
2329 b'ui',
2330 b'ui',
2330 b'signal-safe-lock',
2331 b'signal-safe-lock',
2331 default=True,
2332 default=True,
2332 )
2333 )
2333 coreconfigitem(
2334 coreconfigitem(
2334 b'ui',
2335 b'ui',
2335 b'slash',
2336 b'slash',
2336 default=False,
2337 default=False,
2337 )
2338 )
2338 coreconfigitem(
2339 coreconfigitem(
2339 b'ui',
2340 b'ui',
2340 b'ssh',
2341 b'ssh',
2341 default=b'ssh',
2342 default=b'ssh',
2342 )
2343 )
2343 coreconfigitem(
2344 coreconfigitem(
2344 b'ui',
2345 b'ui',
2345 b'ssherrorhint',
2346 b'ssherrorhint',
2346 default=None,
2347 default=None,
2347 )
2348 )
2348 coreconfigitem(
2349 coreconfigitem(
2349 b'ui',
2350 b'ui',
2350 b'statuscopies',
2351 b'statuscopies',
2351 default=False,
2352 default=False,
2352 )
2353 )
2353 coreconfigitem(
2354 coreconfigitem(
2354 b'ui',
2355 b'ui',
2355 b'strict',
2356 b'strict',
2356 default=False,
2357 default=False,
2357 )
2358 )
2358 coreconfigitem(
2359 coreconfigitem(
2359 b'ui',
2360 b'ui',
2360 b'style',
2361 b'style',
2361 default=b'',
2362 default=b'',
2362 )
2363 )
2363 coreconfigitem(
2364 coreconfigitem(
2364 b'ui',
2365 b'ui',
2365 b'supportcontact',
2366 b'supportcontact',
2366 default=None,
2367 default=None,
2367 )
2368 )
2368 coreconfigitem(
2369 coreconfigitem(
2369 b'ui',
2370 b'ui',
2370 b'textwidth',
2371 b'textwidth',
2371 default=78,
2372 default=78,
2372 )
2373 )
2373 coreconfigitem(
2374 coreconfigitem(
2374 b'ui',
2375 b'ui',
2375 b'timeout',
2376 b'timeout',
2376 default=b'600',
2377 default=b'600',
2377 )
2378 )
2378 coreconfigitem(
2379 coreconfigitem(
2379 b'ui',
2380 b'ui',
2380 b'timeout.warn',
2381 b'timeout.warn',
2381 default=0,
2382 default=0,
2382 )
2383 )
2383 coreconfigitem(
2384 coreconfigitem(
2384 b'ui',
2385 b'ui',
2385 b'timestamp-output',
2386 b'timestamp-output',
2386 default=False,
2387 default=False,
2387 )
2388 )
2388 coreconfigitem(
2389 coreconfigitem(
2389 b'ui',
2390 b'ui',
2390 b'traceback',
2391 b'traceback',
2391 default=False,
2392 default=False,
2392 )
2393 )
2393 coreconfigitem(
2394 coreconfigitem(
2394 b'ui',
2395 b'ui',
2395 b'tweakdefaults',
2396 b'tweakdefaults',
2396 default=False,
2397 default=False,
2397 )
2398 )
2398 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2399 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2399 coreconfigitem(
2400 coreconfigitem(
2400 b'ui',
2401 b'ui',
2401 b'verbose',
2402 b'verbose',
2402 default=False,
2403 default=False,
2403 )
2404 )
2404 coreconfigitem(
2405 coreconfigitem(
2405 b'verify',
2406 b'verify',
2406 b'skipflags',
2407 b'skipflags',
2407 default=None,
2408 default=None,
2408 )
2409 )
2409 coreconfigitem(
2410 coreconfigitem(
2410 b'web',
2411 b'web',
2411 b'allowbz2',
2412 b'allowbz2',
2412 default=False,
2413 default=False,
2413 )
2414 )
2414 coreconfigitem(
2415 coreconfigitem(
2415 b'web',
2416 b'web',
2416 b'allowgz',
2417 b'allowgz',
2417 default=False,
2418 default=False,
2418 )
2419 )
2419 coreconfigitem(
2420 coreconfigitem(
2420 b'web',
2421 b'web',
2421 b'allow-pull',
2422 b'allow-pull',
2422 alias=[(b'web', b'allowpull')],
2423 alias=[(b'web', b'allowpull')],
2423 default=True,
2424 default=True,
2424 )
2425 )
2425 coreconfigitem(
2426 coreconfigitem(
2426 b'web',
2427 b'web',
2427 b'allow-push',
2428 b'allow-push',
2428 alias=[(b'web', b'allow_push')],
2429 alias=[(b'web', b'allow_push')],
2429 default=list,
2430 default=list,
2430 )
2431 )
2431 coreconfigitem(
2432 coreconfigitem(
2432 b'web',
2433 b'web',
2433 b'allowzip',
2434 b'allowzip',
2434 default=False,
2435 default=False,
2435 )
2436 )
2436 coreconfigitem(
2437 coreconfigitem(
2437 b'web',
2438 b'web',
2438 b'archivesubrepos',
2439 b'archivesubrepos',
2439 default=False,
2440 default=False,
2440 )
2441 )
2441 coreconfigitem(
2442 coreconfigitem(
2442 b'web',
2443 b'web',
2443 b'cache',
2444 b'cache',
2444 default=True,
2445 default=True,
2445 )
2446 )
2446 coreconfigitem(
2447 coreconfigitem(
2447 b'web',
2448 b'web',
2448 b'comparisoncontext',
2449 b'comparisoncontext',
2449 default=5,
2450 default=5,
2450 )
2451 )
2451 coreconfigitem(
2452 coreconfigitem(
2452 b'web',
2453 b'web',
2453 b'contact',
2454 b'contact',
2454 default=None,
2455 default=None,
2455 )
2456 )
2456 coreconfigitem(
2457 coreconfigitem(
2457 b'web',
2458 b'web',
2458 b'deny_push',
2459 b'deny_push',
2459 default=list,
2460 default=list,
2460 )
2461 )
2461 coreconfigitem(
2462 coreconfigitem(
2462 b'web',
2463 b'web',
2463 b'guessmime',
2464 b'guessmime',
2464 default=False,
2465 default=False,
2465 )
2466 )
2466 coreconfigitem(
2467 coreconfigitem(
2467 b'web',
2468 b'web',
2468 b'hidden',
2469 b'hidden',
2469 default=False,
2470 default=False,
2470 )
2471 )
2471 coreconfigitem(
2472 coreconfigitem(
2472 b'web',
2473 b'web',
2473 b'labels',
2474 b'labels',
2474 default=list,
2475 default=list,
2475 )
2476 )
2476 coreconfigitem(
2477 coreconfigitem(
2477 b'web',
2478 b'web',
2478 b'logoimg',
2479 b'logoimg',
2479 default=b'hglogo.png',
2480 default=b'hglogo.png',
2480 )
2481 )
2481 coreconfigitem(
2482 coreconfigitem(
2482 b'web',
2483 b'web',
2483 b'logourl',
2484 b'logourl',
2484 default=b'https://mercurial-scm.org/',
2485 default=b'https://mercurial-scm.org/',
2485 )
2486 )
2486 coreconfigitem(
2487 coreconfigitem(
2487 b'web',
2488 b'web',
2488 b'accesslog',
2489 b'accesslog',
2489 default=b'-',
2490 default=b'-',
2490 )
2491 )
2491 coreconfigitem(
2492 coreconfigitem(
2492 b'web',
2493 b'web',
2493 b'address',
2494 b'address',
2494 default=b'',
2495 default=b'',
2495 )
2496 )
2496 coreconfigitem(
2497 coreconfigitem(
2497 b'web',
2498 b'web',
2498 b'allow-archive',
2499 b'allow-archive',
2499 alias=[(b'web', b'allow_archive')],
2500 alias=[(b'web', b'allow_archive')],
2500 default=list,
2501 default=list,
2501 )
2502 )
2502 coreconfigitem(
2503 coreconfigitem(
2503 b'web',
2504 b'web',
2504 b'allow_read',
2505 b'allow_read',
2505 default=list,
2506 default=list,
2506 )
2507 )
2507 coreconfigitem(
2508 coreconfigitem(
2508 b'web',
2509 b'web',
2509 b'baseurl',
2510 b'baseurl',
2510 default=None,
2511 default=None,
2511 )
2512 )
2512 coreconfigitem(
2513 coreconfigitem(
2513 b'web',
2514 b'web',
2514 b'cacerts',
2515 b'cacerts',
2515 default=None,
2516 default=None,
2516 )
2517 )
2517 coreconfigitem(
2518 coreconfigitem(
2518 b'web',
2519 b'web',
2519 b'certificate',
2520 b'certificate',
2520 default=None,
2521 default=None,
2521 )
2522 )
2522 coreconfigitem(
2523 coreconfigitem(
2523 b'web',
2524 b'web',
2524 b'collapse',
2525 b'collapse',
2525 default=False,
2526 default=False,
2526 )
2527 )
2527 coreconfigitem(
2528 coreconfigitem(
2528 b'web',
2529 b'web',
2529 b'csp',
2530 b'csp',
2530 default=None,
2531 default=None,
2531 )
2532 )
2532 coreconfigitem(
2533 coreconfigitem(
2533 b'web',
2534 b'web',
2534 b'deny_read',
2535 b'deny_read',
2535 default=list,
2536 default=list,
2536 )
2537 )
2537 coreconfigitem(
2538 coreconfigitem(
2538 b'web',
2539 b'web',
2539 b'descend',
2540 b'descend',
2540 default=True,
2541 default=True,
2541 )
2542 )
2542 coreconfigitem(
2543 coreconfigitem(
2543 b'web',
2544 b'web',
2544 b'description',
2545 b'description',
2545 default=b"",
2546 default=b"",
2546 )
2547 )
2547 coreconfigitem(
2548 coreconfigitem(
2548 b'web',
2549 b'web',
2549 b'encoding',
2550 b'encoding',
2550 default=lambda: encoding.encoding,
2551 default=lambda: encoding.encoding,
2551 )
2552 )
2552 coreconfigitem(
2553 coreconfigitem(
2553 b'web',
2554 b'web',
2554 b'errorlog',
2555 b'errorlog',
2555 default=b'-',
2556 default=b'-',
2556 )
2557 )
2557 coreconfigitem(
2558 coreconfigitem(
2558 b'web',
2559 b'web',
2559 b'ipv6',
2560 b'ipv6',
2560 default=False,
2561 default=False,
2561 )
2562 )
2562 coreconfigitem(
2563 coreconfigitem(
2563 b'web',
2564 b'web',
2564 b'maxchanges',
2565 b'maxchanges',
2565 default=10,
2566 default=10,
2566 )
2567 )
2567 coreconfigitem(
2568 coreconfigitem(
2568 b'web',
2569 b'web',
2569 b'maxfiles',
2570 b'maxfiles',
2570 default=10,
2571 default=10,
2571 )
2572 )
2572 coreconfigitem(
2573 coreconfigitem(
2573 b'web',
2574 b'web',
2574 b'maxshortchanges',
2575 b'maxshortchanges',
2575 default=60,
2576 default=60,
2576 )
2577 )
2577 coreconfigitem(
2578 coreconfigitem(
2578 b'web',
2579 b'web',
2579 b'motd',
2580 b'motd',
2580 default=b'',
2581 default=b'',
2581 )
2582 )
2582 coreconfigitem(
2583 coreconfigitem(
2583 b'web',
2584 b'web',
2584 b'name',
2585 b'name',
2585 default=dynamicdefault,
2586 default=dynamicdefault,
2586 )
2587 )
2587 coreconfigitem(
2588 coreconfigitem(
2588 b'web',
2589 b'web',
2589 b'port',
2590 b'port',
2590 default=8000,
2591 default=8000,
2591 )
2592 )
2592 coreconfigitem(
2593 coreconfigitem(
2593 b'web',
2594 b'web',
2594 b'prefix',
2595 b'prefix',
2595 default=b'',
2596 default=b'',
2596 )
2597 )
2597 coreconfigitem(
2598 coreconfigitem(
2598 b'web',
2599 b'web',
2599 b'push_ssl',
2600 b'push_ssl',
2600 default=True,
2601 default=True,
2601 )
2602 )
2602 coreconfigitem(
2603 coreconfigitem(
2603 b'web',
2604 b'web',
2604 b'refreshinterval',
2605 b'refreshinterval',
2605 default=20,
2606 default=20,
2606 )
2607 )
2607 coreconfigitem(
2608 coreconfigitem(
2608 b'web',
2609 b'web',
2609 b'server-header',
2610 b'server-header',
2610 default=None,
2611 default=None,
2611 )
2612 )
2612 coreconfigitem(
2613 coreconfigitem(
2613 b'web',
2614 b'web',
2614 b'static',
2615 b'static',
2615 default=None,
2616 default=None,
2616 )
2617 )
2617 coreconfigitem(
2618 coreconfigitem(
2618 b'web',
2619 b'web',
2619 b'staticurl',
2620 b'staticurl',
2620 default=None,
2621 default=None,
2621 )
2622 )
2622 coreconfigitem(
2623 coreconfigitem(
2623 b'web',
2624 b'web',
2624 b'stripes',
2625 b'stripes',
2625 default=1,
2626 default=1,
2626 )
2627 )
2627 coreconfigitem(
2628 coreconfigitem(
2628 b'web',
2629 b'web',
2629 b'style',
2630 b'style',
2630 default=b'paper',
2631 default=b'paper',
2631 )
2632 )
2632 coreconfigitem(
2633 coreconfigitem(
2633 b'web',
2634 b'web',
2634 b'templates',
2635 b'templates',
2635 default=None,
2636 default=None,
2636 )
2637 )
2637 coreconfigitem(
2638 coreconfigitem(
2638 b'web',
2639 b'web',
2639 b'view',
2640 b'view',
2640 default=b'served',
2641 default=b'served',
2641 experimental=True,
2642 experimental=True,
2642 )
2643 )
2643 coreconfigitem(
2644 coreconfigitem(
2644 b'worker',
2645 b'worker',
2645 b'backgroundclose',
2646 b'backgroundclose',
2646 default=dynamicdefault,
2647 default=dynamicdefault,
2647 )
2648 )
2648 # Windows defaults to a limit of 512 open files. A buffer of 128
2649 # Windows defaults to a limit of 512 open files. A buffer of 128
2649 # should give us enough headway.
2650 # should give us enough headway.
2650 coreconfigitem(
2651 coreconfigitem(
2651 b'worker',
2652 b'worker',
2652 b'backgroundclosemaxqueue',
2653 b'backgroundclosemaxqueue',
2653 default=384,
2654 default=384,
2654 )
2655 )
2655 coreconfigitem(
2656 coreconfigitem(
2656 b'worker',
2657 b'worker',
2657 b'backgroundcloseminfilecount',
2658 b'backgroundcloseminfilecount',
2658 default=2048,
2659 default=2048,
2659 )
2660 )
2660 coreconfigitem(
2661 coreconfigitem(
2661 b'worker',
2662 b'worker',
2662 b'backgroundclosethreadcount',
2663 b'backgroundclosethreadcount',
2663 default=4,
2664 default=4,
2664 )
2665 )
2665 coreconfigitem(
2666 coreconfigitem(
2666 b'worker',
2667 b'worker',
2667 b'enabled',
2668 b'enabled',
2668 default=True,
2669 default=True,
2669 )
2670 )
2670 coreconfigitem(
2671 coreconfigitem(
2671 b'worker',
2672 b'worker',
2672 b'numcpus',
2673 b'numcpus',
2673 default=None,
2674 default=None,
2674 )
2675 )
2675
2676
2676 # Rebase related configuration moved to core because other extension are doing
2677 # Rebase related configuration moved to core because other extension are doing
2677 # strange things. For example, shelve import the extensions to reuse some bit
2678 # strange things. For example, shelve import the extensions to reuse some bit
2678 # without formally loading it.
2679 # without formally loading it.
2679 coreconfigitem(
2680 coreconfigitem(
2680 b'commands',
2681 b'commands',
2681 b'rebase.requiredest',
2682 b'rebase.requiredest',
2682 default=False,
2683 default=False,
2683 )
2684 )
2684 coreconfigitem(
2685 coreconfigitem(
2685 b'experimental',
2686 b'experimental',
2686 b'rebaseskipobsolete',
2687 b'rebaseskipobsolete',
2687 default=True,
2688 default=True,
2688 )
2689 )
2689 coreconfigitem(
2690 coreconfigitem(
2690 b'rebase',
2691 b'rebase',
2691 b'singletransaction',
2692 b'singletransaction',
2692 default=False,
2693 default=False,
2693 )
2694 )
2694 coreconfigitem(
2695 coreconfigitem(
2695 b'rebase',
2696 b'rebase',
2696 b'experimental.inmemory',
2697 b'experimental.inmemory',
2697 default=False,
2698 default=False,
2698 )
2699 )
@@ -1,3769 +1,3771 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class mixedrepostorecache(_basefilecache):
147 class mixedrepostorecache(_basefilecache):
148 """filecache for a mix files in .hg/store and outside"""
148 """filecache for a mix files in .hg/store and outside"""
149
149
150 def __init__(self, *pathsandlocations):
150 def __init__(self, *pathsandlocations):
151 # scmutil.filecache only uses the path for passing back into our
151 # scmutil.filecache only uses the path for passing back into our
152 # join(), so we can safely pass a list of paths and locations
152 # join(), so we can safely pass a list of paths and locations
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
155
155
156 def join(self, obj, fnameandlocation):
156 def join(self, obj, fnameandlocation):
157 fname, location = fnameandlocation
157 fname, location = fnameandlocation
158 if location == b'plain':
158 if location == b'plain':
159 return obj.vfs.join(fname)
159 return obj.vfs.join(fname)
160 else:
160 else:
161 if location != b'':
161 if location != b'':
162 raise error.ProgrammingError(
162 raise error.ProgrammingError(
163 b'unexpected location: %s' % location
163 b'unexpected location: %s' % location
164 )
164 )
165 return obj.sjoin(fname)
165 return obj.sjoin(fname)
166
166
167
167
168 def isfilecached(repo, name):
168 def isfilecached(repo, name):
169 """check if a repo has already cached "name" filecache-ed property
169 """check if a repo has already cached "name" filecache-ed property
170
170
171 This returns (cachedobj-or-None, iscached) tuple.
171 This returns (cachedobj-or-None, iscached) tuple.
172 """
172 """
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 if not cacheentry:
174 if not cacheentry:
175 return None, False
175 return None, False
176 return cacheentry.obj, True
176 return cacheentry.obj, True
177
177
178
178
179 class unfilteredpropertycache(util.propertycache):
179 class unfilteredpropertycache(util.propertycache):
180 """propertycache that apply to unfiltered repo only"""
180 """propertycache that apply to unfiltered repo only"""
181
181
182 def __get__(self, repo, type=None):
182 def __get__(self, repo, type=None):
183 unfi = repo.unfiltered()
183 unfi = repo.unfiltered()
184 if unfi is repo:
184 if unfi is repo:
185 return super(unfilteredpropertycache, self).__get__(unfi)
185 return super(unfilteredpropertycache, self).__get__(unfi)
186 return getattr(unfi, self.name)
186 return getattr(unfi, self.name)
187
187
188
188
189 class filteredpropertycache(util.propertycache):
189 class filteredpropertycache(util.propertycache):
190 """propertycache that must take filtering in account"""
190 """propertycache that must take filtering in account"""
191
191
192 def cachevalue(self, obj, value):
192 def cachevalue(self, obj, value):
193 object.__setattr__(obj, self.name, value)
193 object.__setattr__(obj, self.name, value)
194
194
195
195
196 def hasunfilteredcache(repo, name):
196 def hasunfilteredcache(repo, name):
197 """check if a repo has an unfilteredpropertycache value for <name>"""
197 """check if a repo has an unfilteredpropertycache value for <name>"""
198 return name in vars(repo.unfiltered())
198 return name in vars(repo.unfiltered())
199
199
200
200
201 def unfilteredmethod(orig):
201 def unfilteredmethod(orig):
202 """decorate method that always need to be run on unfiltered version"""
202 """decorate method that always need to be run on unfiltered version"""
203
203
204 @functools.wraps(orig)
204 @functools.wraps(orig)
205 def wrapper(repo, *args, **kwargs):
205 def wrapper(repo, *args, **kwargs):
206 return orig(repo.unfiltered(), *args, **kwargs)
206 return orig(repo.unfiltered(), *args, **kwargs)
207
207
208 return wrapper
208 return wrapper
209
209
210
210
211 moderncaps = {
211 moderncaps = {
212 b'lookup',
212 b'lookup',
213 b'branchmap',
213 b'branchmap',
214 b'pushkey',
214 b'pushkey',
215 b'known',
215 b'known',
216 b'getbundle',
216 b'getbundle',
217 b'unbundle',
217 b'unbundle',
218 }
218 }
219 legacycaps = moderncaps.union({b'changegroupsubset'})
219 legacycaps = moderncaps.union({b'changegroupsubset'})
220
220
221
221
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 class localcommandexecutor(object):
223 class localcommandexecutor(object):
224 def __init__(self, peer):
224 def __init__(self, peer):
225 self._peer = peer
225 self._peer = peer
226 self._sent = False
226 self._sent = False
227 self._closed = False
227 self._closed = False
228
228
229 def __enter__(self):
229 def __enter__(self):
230 return self
230 return self
231
231
232 def __exit__(self, exctype, excvalue, exctb):
232 def __exit__(self, exctype, excvalue, exctb):
233 self.close()
233 self.close()
234
234
235 def callcommand(self, command, args):
235 def callcommand(self, command, args):
236 if self._sent:
236 if self._sent:
237 raise error.ProgrammingError(
237 raise error.ProgrammingError(
238 b'callcommand() cannot be used after sendcommands()'
238 b'callcommand() cannot be used after sendcommands()'
239 )
239 )
240
240
241 if self._closed:
241 if self._closed:
242 raise error.ProgrammingError(
242 raise error.ProgrammingError(
243 b'callcommand() cannot be used after close()'
243 b'callcommand() cannot be used after close()'
244 )
244 )
245
245
246 # We don't need to support anything fancy. Just call the named
246 # We don't need to support anything fancy. Just call the named
247 # method on the peer and return a resolved future.
247 # method on the peer and return a resolved future.
248 fn = getattr(self._peer, pycompat.sysstr(command))
248 fn = getattr(self._peer, pycompat.sysstr(command))
249
249
250 f = pycompat.futures.Future()
250 f = pycompat.futures.Future()
251
251
252 try:
252 try:
253 result = fn(**pycompat.strkwargs(args))
253 result = fn(**pycompat.strkwargs(args))
254 except Exception:
254 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 else:
256 else:
257 f.set_result(result)
257 f.set_result(result)
258
258
259 return f
259 return f
260
260
261 def sendcommands(self):
261 def sendcommands(self):
262 self._sent = True
262 self._sent = True
263
263
264 def close(self):
264 def close(self):
265 self._closed = True
265 self._closed = True
266
266
267
267
268 @interfaceutil.implementer(repository.ipeercommands)
268 @interfaceutil.implementer(repository.ipeercommands)
269 class localpeer(repository.peer):
269 class localpeer(repository.peer):
270 '''peer for a local repo; reflects only the most recent API'''
270 '''peer for a local repo; reflects only the most recent API'''
271
271
272 def __init__(self, repo, caps=None):
272 def __init__(self, repo, caps=None):
273 super(localpeer, self).__init__()
273 super(localpeer, self).__init__()
274
274
275 if caps is None:
275 if caps is None:
276 caps = moderncaps.copy()
276 caps = moderncaps.copy()
277 self._repo = repo.filtered(b'served')
277 self._repo = repo.filtered(b'served')
278 self.ui = repo.ui
278 self.ui = repo.ui
279
279
280 if repo._wanted_sidedata:
280 if repo._wanted_sidedata:
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
283
283
284 self._caps = repo._restrictcapabilities(caps)
284 self._caps = repo._restrictcapabilities(caps)
285
285
286 # Begin of _basepeer interface.
286 # Begin of _basepeer interface.
287
287
288 def url(self):
288 def url(self):
289 return self._repo.url()
289 return self._repo.url()
290
290
291 def local(self):
291 def local(self):
292 return self._repo
292 return self._repo
293
293
294 def peer(self):
294 def peer(self):
295 return self
295 return self
296
296
297 def canpush(self):
297 def canpush(self):
298 return True
298 return True
299
299
300 def close(self):
300 def close(self):
301 self._repo.close()
301 self._repo.close()
302
302
303 # End of _basepeer interface.
303 # End of _basepeer interface.
304
304
305 # Begin of _basewirecommands interface.
305 # Begin of _basewirecommands interface.
306
306
307 def branchmap(self):
307 def branchmap(self):
308 return self._repo.branchmap()
308 return self._repo.branchmap()
309
309
310 def capabilities(self):
310 def capabilities(self):
311 return self._caps
311 return self._caps
312
312
313 def clonebundles(self):
313 def clonebundles(self):
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315
315
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 """Used to test argument passing over the wire"""
317 """Used to test argument passing over the wire"""
318 return b"%s %s %s %s %s" % (
318 return b"%s %s %s %s %s" % (
319 one,
319 one,
320 two,
320 two,
321 pycompat.bytestr(three),
321 pycompat.bytestr(three),
322 pycompat.bytestr(four),
322 pycompat.bytestr(four),
323 pycompat.bytestr(five),
323 pycompat.bytestr(five),
324 )
324 )
325
325
326 def getbundle(
326 def getbundle(
327 self,
327 self,
328 source,
328 source,
329 heads=None,
329 heads=None,
330 common=None,
330 common=None,
331 bundlecaps=None,
331 bundlecaps=None,
332 remote_sidedata=None,
332 remote_sidedata=None,
333 **kwargs
333 **kwargs
334 ):
334 ):
335 chunks = exchange.getbundlechunks(
335 chunks = exchange.getbundlechunks(
336 self._repo,
336 self._repo,
337 source,
337 source,
338 heads=heads,
338 heads=heads,
339 common=common,
339 common=common,
340 bundlecaps=bundlecaps,
340 bundlecaps=bundlecaps,
341 remote_sidedata=remote_sidedata,
341 remote_sidedata=remote_sidedata,
342 **kwargs
342 **kwargs
343 )[1]
343 )[1]
344 cb = util.chunkbuffer(chunks)
344 cb = util.chunkbuffer(chunks)
345
345
346 if exchange.bundle2requested(bundlecaps):
346 if exchange.bundle2requested(bundlecaps):
347 # When requesting a bundle2, getbundle returns a stream to make the
347 # When requesting a bundle2, getbundle returns a stream to make the
348 # wire level function happier. We need to build a proper object
348 # wire level function happier. We need to build a proper object
349 # from it in local peer.
349 # from it in local peer.
350 return bundle2.getunbundler(self.ui, cb)
350 return bundle2.getunbundler(self.ui, cb)
351 else:
351 else:
352 return changegroup.getunbundler(b'01', cb, None)
352 return changegroup.getunbundler(b'01', cb, None)
353
353
354 def heads(self):
354 def heads(self):
355 return self._repo.heads()
355 return self._repo.heads()
356
356
357 def known(self, nodes):
357 def known(self, nodes):
358 return self._repo.known(nodes)
358 return self._repo.known(nodes)
359
359
360 def listkeys(self, namespace):
360 def listkeys(self, namespace):
361 return self._repo.listkeys(namespace)
361 return self._repo.listkeys(namespace)
362
362
363 def lookup(self, key):
363 def lookup(self, key):
364 return self._repo.lookup(key)
364 return self._repo.lookup(key)
365
365
366 def pushkey(self, namespace, key, old, new):
366 def pushkey(self, namespace, key, old, new):
367 return self._repo.pushkey(namespace, key, old, new)
367 return self._repo.pushkey(namespace, key, old, new)
368
368
369 def stream_out(self):
369 def stream_out(self):
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371
371
372 def unbundle(self, bundle, heads, url):
372 def unbundle(self, bundle, heads, url):
373 """apply a bundle on a repo
373 """apply a bundle on a repo
374
374
375 This function handles the repo locking itself."""
375 This function handles the repo locking itself."""
376 try:
376 try:
377 try:
377 try:
378 bundle = exchange.readbundle(self.ui, bundle, None)
378 bundle = exchange.readbundle(self.ui, bundle, None)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 if util.safehasattr(ret, b'getchunks'):
380 if util.safehasattr(ret, b'getchunks'):
381 # This is a bundle20 object, turn it into an unbundler.
381 # This is a bundle20 object, turn it into an unbundler.
382 # This little dance should be dropped eventually when the
382 # This little dance should be dropped eventually when the
383 # API is finally improved.
383 # API is finally improved.
384 stream = util.chunkbuffer(ret.getchunks())
384 stream = util.chunkbuffer(ret.getchunks())
385 ret = bundle2.getunbundler(self.ui, stream)
385 ret = bundle2.getunbundler(self.ui, stream)
386 return ret
386 return ret
387 except Exception as exc:
387 except Exception as exc:
388 # If the exception contains output salvaged from a bundle2
388 # If the exception contains output salvaged from a bundle2
389 # reply, we need to make sure it is printed before continuing
389 # reply, we need to make sure it is printed before continuing
390 # to fail. So we build a bundle2 with such output and consume
390 # to fail. So we build a bundle2 with such output and consume
391 # it directly.
391 # it directly.
392 #
392 #
393 # This is not very elegant but allows a "simple" solution for
393 # This is not very elegant but allows a "simple" solution for
394 # issue4594
394 # issue4594
395 output = getattr(exc, '_bundle2salvagedoutput', ())
395 output = getattr(exc, '_bundle2salvagedoutput', ())
396 if output:
396 if output:
397 bundler = bundle2.bundle20(self._repo.ui)
397 bundler = bundle2.bundle20(self._repo.ui)
398 for out in output:
398 for out in output:
399 bundler.addpart(out)
399 bundler.addpart(out)
400 stream = util.chunkbuffer(bundler.getchunks())
400 stream = util.chunkbuffer(bundler.getchunks())
401 b = bundle2.getunbundler(self.ui, stream)
401 b = bundle2.getunbundler(self.ui, stream)
402 bundle2.processbundle(self._repo, b)
402 bundle2.processbundle(self._repo, b)
403 raise
403 raise
404 except error.PushRaced as exc:
404 except error.PushRaced as exc:
405 raise error.ResponseError(
405 raise error.ResponseError(
406 _(b'push failed:'), stringutil.forcebytestr(exc)
406 _(b'push failed:'), stringutil.forcebytestr(exc)
407 )
407 )
408
408
409 # End of _basewirecommands interface.
409 # End of _basewirecommands interface.
410
410
411 # Begin of peer interface.
411 # Begin of peer interface.
412
412
413 def commandexecutor(self):
413 def commandexecutor(self):
414 return localcommandexecutor(self)
414 return localcommandexecutor(self)
415
415
416 # End of peer interface.
416 # End of peer interface.
417
417
418
418
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 class locallegacypeer(localpeer):
420 class locallegacypeer(localpeer):
421 """peer extension which implements legacy methods too; used for tests with
421 """peer extension which implements legacy methods too; used for tests with
422 restricted capabilities"""
422 restricted capabilities"""
423
423
424 def __init__(self, repo):
424 def __init__(self, repo):
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426
426
427 # Begin of baselegacywirecommands interface.
427 # Begin of baselegacywirecommands interface.
428
428
429 def between(self, pairs):
429 def between(self, pairs):
430 return self._repo.between(pairs)
430 return self._repo.between(pairs)
431
431
432 def branches(self, nodes):
432 def branches(self, nodes):
433 return self._repo.branches(nodes)
433 return self._repo.branches(nodes)
434
434
435 def changegroup(self, nodes, source):
435 def changegroup(self, nodes, source):
436 outgoing = discovery.outgoing(
436 outgoing = discovery.outgoing(
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 )
438 )
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440
440
441 def changegroupsubset(self, bases, heads, source):
441 def changegroupsubset(self, bases, heads, source):
442 outgoing = discovery.outgoing(
442 outgoing = discovery.outgoing(
443 self._repo, missingroots=bases, ancestorsof=heads
443 self._repo, missingroots=bases, ancestorsof=heads
444 )
444 )
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446
446
447 # End of baselegacywirecommands interface.
447 # End of baselegacywirecommands interface.
448
448
449
449
450 # Functions receiving (ui, features) that extensions can register to impact
450 # Functions receiving (ui, features) that extensions can register to impact
451 # the ability to load repositories with custom requirements. Only
451 # the ability to load repositories with custom requirements. Only
452 # functions defined in loaded extensions are called.
452 # functions defined in loaded extensions are called.
453 #
453 #
454 # The function receives a set of requirement strings that the repository
454 # The function receives a set of requirement strings that the repository
455 # is capable of opening. Functions will typically add elements to the
455 # is capable of opening. Functions will typically add elements to the
456 # set to reflect that the extension knows how to handle that requirements.
456 # set to reflect that the extension knows how to handle that requirements.
457 featuresetupfuncs = set()
457 featuresetupfuncs = set()
458
458
459
459
460 def _getsharedvfs(hgvfs, requirements):
460 def _getsharedvfs(hgvfs, requirements):
461 """returns the vfs object pointing to root of shared source
461 """returns the vfs object pointing to root of shared source
462 repo for a shared repository
462 repo for a shared repository
463
463
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
466 """
466 """
467 # The ``shared`` or ``relshared`` requirements indicate the
467 # The ``shared`` or ``relshared`` requirements indicate the
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 # This is an absolute path for ``shared`` and relative to
469 # This is an absolute path for ``shared`` and relative to
470 # ``.hg/`` for ``relshared``.
470 # ``.hg/`` for ``relshared``.
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
474
474
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476
476
477 if not sharedvfs.exists():
477 if not sharedvfs.exists():
478 raise error.RepoError(
478 raise error.RepoError(
479 _(b'.hg/sharedpath points to nonexistent directory %s')
479 _(b'.hg/sharedpath points to nonexistent directory %s')
480 % sharedvfs.base
480 % sharedvfs.base
481 )
481 )
482 return sharedvfs
482 return sharedvfs
483
483
484
484
485 def _readrequires(vfs, allowmissing):
485 def _readrequires(vfs, allowmissing):
486 """reads the require file present at root of this vfs
486 """reads the require file present at root of this vfs
487 and return a set of requirements
487 and return a set of requirements
488
488
489 If allowmissing is True, we suppress ENOENT if raised"""
489 If allowmissing is True, we suppress ENOENT if raised"""
490 # requires file contains a newline-delimited list of
490 # requires file contains a newline-delimited list of
491 # features/capabilities the opener (us) must have in order to use
491 # features/capabilities the opener (us) must have in order to use
492 # the repository. This file was introduced in Mercurial 0.9.2,
492 # the repository. This file was introduced in Mercurial 0.9.2,
493 # which means very old repositories may not have one. We assume
493 # which means very old repositories may not have one. We assume
494 # a missing file translates to no requirements.
494 # a missing file translates to no requirements.
495 try:
495 try:
496 requirements = set(vfs.read(b'requires').splitlines())
496 requirements = set(vfs.read(b'requires').splitlines())
497 except IOError as e:
497 except IOError as e:
498 if not (allowmissing and e.errno == errno.ENOENT):
498 if not (allowmissing and e.errno == errno.ENOENT):
499 raise
499 raise
500 requirements = set()
500 requirements = set()
501 return requirements
501 return requirements
502
502
503
503
504 def makelocalrepository(baseui, path, intents=None):
504 def makelocalrepository(baseui, path, intents=None):
505 """Create a local repository object.
505 """Create a local repository object.
506
506
507 Given arguments needed to construct a local repository, this function
507 Given arguments needed to construct a local repository, this function
508 performs various early repository loading functionality (such as
508 performs various early repository loading functionality (such as
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 the repository can be opened, derives a type suitable for representing
510 the repository can be opened, derives a type suitable for representing
511 that repository, and returns an instance of it.
511 that repository, and returns an instance of it.
512
512
513 The returned object conforms to the ``repository.completelocalrepository``
513 The returned object conforms to the ``repository.completelocalrepository``
514 interface.
514 interface.
515
515
516 The repository type is derived by calling a series of factory functions
516 The repository type is derived by calling a series of factory functions
517 for each aspect/interface of the final repository. These are defined by
517 for each aspect/interface of the final repository. These are defined by
518 ``REPO_INTERFACES``.
518 ``REPO_INTERFACES``.
519
519
520 Each factory function is called to produce a type implementing a specific
520 Each factory function is called to produce a type implementing a specific
521 interface. The cumulative list of returned types will be combined into a
521 interface. The cumulative list of returned types will be combined into a
522 new type and that type will be instantiated to represent the local
522 new type and that type will be instantiated to represent the local
523 repository.
523 repository.
524
524
525 The factory functions each receive various state that may be consulted
525 The factory functions each receive various state that may be consulted
526 as part of deriving a type.
526 as part of deriving a type.
527
527
528 Extensions should wrap these factory functions to customize repository type
528 Extensions should wrap these factory functions to customize repository type
529 creation. Note that an extension's wrapped function may be called even if
529 creation. Note that an extension's wrapped function may be called even if
530 that extension is not loaded for the repo being constructed. Extensions
530 that extension is not loaded for the repo being constructed. Extensions
531 should check if their ``__name__`` appears in the
531 should check if their ``__name__`` appears in the
532 ``extensionmodulenames`` set passed to the factory function and no-op if
532 ``extensionmodulenames`` set passed to the factory function and no-op if
533 not.
533 not.
534 """
534 """
535 ui = baseui.copy()
535 ui = baseui.copy()
536 # Prevent copying repo configuration.
536 # Prevent copying repo configuration.
537 ui.copy = baseui.copy
537 ui.copy = baseui.copy
538
538
539 # Working directory VFS rooted at repository root.
539 # Working directory VFS rooted at repository root.
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541
541
542 # Main VFS for .hg/ directory.
542 # Main VFS for .hg/ directory.
543 hgpath = wdirvfs.join(b'.hg')
543 hgpath = wdirvfs.join(b'.hg')
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 # Whether this repository is shared one or not
545 # Whether this repository is shared one or not
546 shared = False
546 shared = False
547 # If this repository is shared, vfs pointing to shared repo
547 # If this repository is shared, vfs pointing to shared repo
548 sharedvfs = None
548 sharedvfs = None
549
549
550 # The .hg/ path should exist and should be a directory. All other
550 # The .hg/ path should exist and should be a directory. All other
551 # cases are errors.
551 # cases are errors.
552 if not hgvfs.isdir():
552 if not hgvfs.isdir():
553 try:
553 try:
554 hgvfs.stat()
554 hgvfs.stat()
555 except OSError as e:
555 except OSError as e:
556 if e.errno != errno.ENOENT:
556 if e.errno != errno.ENOENT:
557 raise
557 raise
558 except ValueError as e:
558 except ValueError as e:
559 # Can be raised on Python 3.8 when path is invalid.
559 # Can be raised on Python 3.8 when path is invalid.
560 raise error.Abort(
560 raise error.Abort(
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 )
562 )
563
563
564 raise error.RepoError(_(b'repository %s not found') % path)
564 raise error.RepoError(_(b'repository %s not found') % path)
565
565
566 requirements = _readrequires(hgvfs, True)
566 requirements = _readrequires(hgvfs, True)
567 shared = (
567 shared = (
568 requirementsmod.SHARED_REQUIREMENT in requirements
568 requirementsmod.SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 )
570 )
571 storevfs = None
571 storevfs = None
572 if shared:
572 if shared:
573 # This is a shared repo
573 # This is a shared repo
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 else:
576 else:
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578
578
579 # if .hg/requires contains the sharesafe requirement, it means
579 # if .hg/requires contains the sharesafe requirement, it means
580 # there exists a `.hg/store/requires` too and we should read it
580 # there exists a `.hg/store/requires` too and we should read it
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 # is not present, refer checkrequirementscompat() for that
583 # is not present, refer checkrequirementscompat() for that
584 #
584 #
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 # repository was shared the old way. We check the share source .hg/requires
586 # repository was shared the old way. We check the share source .hg/requires
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 # to be reshared
588 # to be reshared
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591
591
592 if (
592 if (
593 shared
593 shared
594 and requirementsmod.SHARESAFE_REQUIREMENT
594 and requirementsmod.SHARESAFE_REQUIREMENT
595 not in _readrequires(sharedvfs, True)
595 not in _readrequires(sharedvfs, True)
596 ):
596 ):
597 mismatch_warn = ui.configbool(
597 mismatch_warn = ui.configbool(
598 b'share', b'safe-mismatch.source-not-safe.warn'
598 b'share', b'safe-mismatch.source-not-safe.warn'
599 )
599 )
600 mismatch_config = ui.config(
600 mismatch_config = ui.config(
601 b'share', b'safe-mismatch.source-not-safe'
601 b'share', b'safe-mismatch.source-not-safe'
602 )
602 )
603 if mismatch_config in (
603 if mismatch_config in (
604 b'downgrade-allow',
604 b'downgrade-allow',
605 b'allow',
605 b'allow',
606 b'downgrade-abort',
606 b'downgrade-abort',
607 ):
607 ):
608 # prevent cyclic import localrepo -> upgrade -> localrepo
608 # prevent cyclic import localrepo -> upgrade -> localrepo
609 from . import upgrade
609 from . import upgrade
610
610
611 upgrade.downgrade_share_to_non_safe(
611 upgrade.downgrade_share_to_non_safe(
612 ui,
612 ui,
613 hgvfs,
613 hgvfs,
614 sharedvfs,
614 sharedvfs,
615 requirements,
615 requirements,
616 mismatch_config,
616 mismatch_config,
617 mismatch_warn,
617 mismatch_warn,
618 )
618 )
619 elif mismatch_config == b'abort':
619 elif mismatch_config == b'abort':
620 raise error.Abort(
620 raise error.Abort(
621 _(b"share source does not support share-safe requirement"),
621 _(b"share source does not support share-safe requirement"),
622 hint=hint,
622 hint=hint,
623 )
623 )
624 else:
624 else:
625 raise error.Abort(
625 raise error.Abort(
626 _(
626 _(
627 b"share-safe mismatch with source.\nUnrecognized"
627 b"share-safe mismatch with source.\nUnrecognized"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 b" set."
629 b" set."
630 )
630 )
631 % mismatch_config,
631 % mismatch_config,
632 hint=hint,
632 hint=hint,
633 )
633 )
634 else:
634 else:
635 requirements |= _readrequires(storevfs, False)
635 requirements |= _readrequires(storevfs, False)
636 elif shared:
636 elif shared:
637 sourcerequires = _readrequires(sharedvfs, False)
637 sourcerequires = _readrequires(sharedvfs, False)
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 mismatch_warn = ui.configbool(
640 mismatch_warn = ui.configbool(
641 b'share', b'safe-mismatch.source-safe.warn'
641 b'share', b'safe-mismatch.source-safe.warn'
642 )
642 )
643 if mismatch_config in (
643 if mismatch_config in (
644 b'upgrade-allow',
644 b'upgrade-allow',
645 b'allow',
645 b'allow',
646 b'upgrade-abort',
646 b'upgrade-abort',
647 ):
647 ):
648 # prevent cyclic import localrepo -> upgrade -> localrepo
648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 from . import upgrade
649 from . import upgrade
650
650
651 upgrade.upgrade_share_to_safe(
651 upgrade.upgrade_share_to_safe(
652 ui,
652 ui,
653 hgvfs,
653 hgvfs,
654 storevfs,
654 storevfs,
655 requirements,
655 requirements,
656 mismatch_config,
656 mismatch_config,
657 mismatch_warn,
657 mismatch_warn,
658 )
658 )
659 elif mismatch_config == b'abort':
659 elif mismatch_config == b'abort':
660 raise error.Abort(
660 raise error.Abort(
661 _(
661 _(
662 b'version mismatch: source uses share-safe'
662 b'version mismatch: source uses share-safe'
663 b' functionality while the current share does not'
663 b' functionality while the current share does not'
664 ),
664 ),
665 hint=hint,
665 hint=hint,
666 )
666 )
667 else:
667 else:
668 raise error.Abort(
668 raise error.Abort(
669 _(
669 _(
670 b"share-safe mismatch with source.\nUnrecognized"
670 b"share-safe mismatch with source.\nUnrecognized"
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 )
672 )
673 % mismatch_config,
673 % mismatch_config,
674 hint=hint,
674 hint=hint,
675 )
675 )
676
676
677 # The .hg/hgrc file may load extensions or contain config options
677 # The .hg/hgrc file may load extensions or contain config options
678 # that influence repository construction. Attempt to load it and
678 # that influence repository construction. Attempt to load it and
679 # process any new extensions that it may have pulled in.
679 # process any new extensions that it may have pulled in.
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 extensions.loadall(ui)
682 extensions.loadall(ui)
683 extensions.populateui(ui)
683 extensions.populateui(ui)
684
684
685 # Set of module names of extensions loaded for this repository.
685 # Set of module names of extensions loaded for this repository.
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687
687
688 supportedrequirements = gathersupportedrequirements(ui)
688 supportedrequirements = gathersupportedrequirements(ui)
689
689
690 # We first validate the requirements are known.
690 # We first validate the requirements are known.
691 ensurerequirementsrecognized(requirements, supportedrequirements)
691 ensurerequirementsrecognized(requirements, supportedrequirements)
692
692
693 # Then we validate that the known set is reasonable to use together.
693 # Then we validate that the known set is reasonable to use together.
694 ensurerequirementscompatible(ui, requirements)
694 ensurerequirementscompatible(ui, requirements)
695
695
696 # TODO there are unhandled edge cases related to opening repositories with
696 # TODO there are unhandled edge cases related to opening repositories with
697 # shared storage. If storage is shared, we should also test for requirements
697 # shared storage. If storage is shared, we should also test for requirements
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 # that repo, as that repo may load extensions needed to open it. This is a
699 # that repo, as that repo may load extensions needed to open it. This is a
700 # bit complicated because we don't want the other hgrc to overwrite settings
700 # bit complicated because we don't want the other hgrc to overwrite settings
701 # in this hgrc.
701 # in this hgrc.
702 #
702 #
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 # file when sharing repos. But if a requirement is added after the share is
704 # file when sharing repos. But if a requirement is added after the share is
705 # performed, thereby introducing a new requirement for the opener, we may
705 # performed, thereby introducing a new requirement for the opener, we may
706 # will not see that and could encounter a run-time error interacting with
706 # will not see that and could encounter a run-time error interacting with
707 # that shared store since it has an unknown-to-us requirement.
707 # that shared store since it has an unknown-to-us requirement.
708
708
709 # At this point, we know we should be capable of opening the repository.
709 # At this point, we know we should be capable of opening the repository.
710 # Now get on with doing that.
710 # Now get on with doing that.
711
711
712 features = set()
712 features = set()
713
713
714 # The "store" part of the repository holds versioned data. How it is
714 # The "store" part of the repository holds versioned data. How it is
715 # accessed is determined by various requirements. If `shared` or
715 # accessed is determined by various requirements. If `shared` or
716 # `relshared` requirements are present, this indicates current repository
716 # `relshared` requirements are present, this indicates current repository
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 if shared:
718 if shared:
719 storebasepath = sharedvfs.base
719 storebasepath = sharedvfs.base
720 cachepath = sharedvfs.join(b'cache')
720 cachepath = sharedvfs.join(b'cache')
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 else:
722 else:
723 storebasepath = hgvfs.base
723 storebasepath = hgvfs.base
724 cachepath = hgvfs.join(b'cache')
724 cachepath = hgvfs.join(b'cache')
725 wcachepath = hgvfs.join(b'wcache')
725 wcachepath = hgvfs.join(b'wcache')
726
726
727 # The store has changed over time and the exact layout is dictated by
727 # The store has changed over time and the exact layout is dictated by
728 # requirements. The store interface abstracts differences across all
728 # requirements. The store interface abstracts differences across all
729 # of them.
729 # of them.
730 store = makestore(
730 store = makestore(
731 requirements,
731 requirements,
732 storebasepath,
732 storebasepath,
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 )
734 )
735 hgvfs.createmode = store.createmode
735 hgvfs.createmode = store.createmode
736
736
737 storevfs = store.vfs
737 storevfs = store.vfs
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739
739
740 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
740 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
741 features.add(repository.REPO_FEATURE_SIDE_DATA)
741 features.add(repository.REPO_FEATURE_SIDE_DATA)
742 # the revlogv2 docket introduced race condition that we need to fix
743 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
742
744
743 # The cache vfs is used to manage cache files.
745 # The cache vfs is used to manage cache files.
744 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
746 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
745 cachevfs.createmode = store.createmode
747 cachevfs.createmode = store.createmode
746 # The cache vfs is used to manage cache files related to the working copy
748 # The cache vfs is used to manage cache files related to the working copy
747 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
749 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
748 wcachevfs.createmode = store.createmode
750 wcachevfs.createmode = store.createmode
749
751
750 # Now resolve the type for the repository object. We do this by repeatedly
752 # Now resolve the type for the repository object. We do this by repeatedly
751 # calling a factory function to produces types for specific aspects of the
753 # calling a factory function to produces types for specific aspects of the
752 # repo's operation. The aggregate returned types are used as base classes
754 # repo's operation. The aggregate returned types are used as base classes
753 # for a dynamically-derived type, which will represent our new repository.
755 # for a dynamically-derived type, which will represent our new repository.
754
756
755 bases = []
757 bases = []
756 extrastate = {}
758 extrastate = {}
757
759
758 for iface, fn in REPO_INTERFACES:
760 for iface, fn in REPO_INTERFACES:
759 # We pass all potentially useful state to give extensions tons of
761 # We pass all potentially useful state to give extensions tons of
760 # flexibility.
762 # flexibility.
761 typ = fn()(
763 typ = fn()(
762 ui=ui,
764 ui=ui,
763 intents=intents,
765 intents=intents,
764 requirements=requirements,
766 requirements=requirements,
765 features=features,
767 features=features,
766 wdirvfs=wdirvfs,
768 wdirvfs=wdirvfs,
767 hgvfs=hgvfs,
769 hgvfs=hgvfs,
768 store=store,
770 store=store,
769 storevfs=storevfs,
771 storevfs=storevfs,
770 storeoptions=storevfs.options,
772 storeoptions=storevfs.options,
771 cachevfs=cachevfs,
773 cachevfs=cachevfs,
772 wcachevfs=wcachevfs,
774 wcachevfs=wcachevfs,
773 extensionmodulenames=extensionmodulenames,
775 extensionmodulenames=extensionmodulenames,
774 extrastate=extrastate,
776 extrastate=extrastate,
775 baseclasses=bases,
777 baseclasses=bases,
776 )
778 )
777
779
778 if not isinstance(typ, type):
780 if not isinstance(typ, type):
779 raise error.ProgrammingError(
781 raise error.ProgrammingError(
780 b'unable to construct type for %s' % iface
782 b'unable to construct type for %s' % iface
781 )
783 )
782
784
783 bases.append(typ)
785 bases.append(typ)
784
786
785 # type() allows you to use characters in type names that wouldn't be
787 # type() allows you to use characters in type names that wouldn't be
786 # recognized as Python symbols in source code. We abuse that to add
788 # recognized as Python symbols in source code. We abuse that to add
787 # rich information about our constructed repo.
789 # rich information about our constructed repo.
788 name = pycompat.sysstr(
790 name = pycompat.sysstr(
789 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
791 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
790 )
792 )
791
793
792 cls = type(name, tuple(bases), {})
794 cls = type(name, tuple(bases), {})
793
795
794 return cls(
796 return cls(
795 baseui=baseui,
797 baseui=baseui,
796 ui=ui,
798 ui=ui,
797 origroot=path,
799 origroot=path,
798 wdirvfs=wdirvfs,
800 wdirvfs=wdirvfs,
799 hgvfs=hgvfs,
801 hgvfs=hgvfs,
800 requirements=requirements,
802 requirements=requirements,
801 supportedrequirements=supportedrequirements,
803 supportedrequirements=supportedrequirements,
802 sharedpath=storebasepath,
804 sharedpath=storebasepath,
803 store=store,
805 store=store,
804 cachevfs=cachevfs,
806 cachevfs=cachevfs,
805 wcachevfs=wcachevfs,
807 wcachevfs=wcachevfs,
806 features=features,
808 features=features,
807 intents=intents,
809 intents=intents,
808 )
810 )
809
811
810
812
811 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
813 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
812 """Load hgrc files/content into a ui instance.
814 """Load hgrc files/content into a ui instance.
813
815
814 This is called during repository opening to load any additional
816 This is called during repository opening to load any additional
815 config files or settings relevant to the current repository.
817 config files or settings relevant to the current repository.
816
818
817 Returns a bool indicating whether any additional configs were loaded.
819 Returns a bool indicating whether any additional configs were loaded.
818
820
819 Extensions should monkeypatch this function to modify how per-repo
821 Extensions should monkeypatch this function to modify how per-repo
820 configs are loaded. For example, an extension may wish to pull in
822 configs are loaded. For example, an extension may wish to pull in
821 configs from alternate files or sources.
823 configs from alternate files or sources.
822
824
823 sharedvfs is vfs object pointing to source repo if the current one is a
825 sharedvfs is vfs object pointing to source repo if the current one is a
824 shared one
826 shared one
825 """
827 """
826 if not rcutil.use_repo_hgrc():
828 if not rcutil.use_repo_hgrc():
827 return False
829 return False
828
830
829 ret = False
831 ret = False
830 # first load config from shared source if we has to
832 # first load config from shared source if we has to
831 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
833 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
832 try:
834 try:
833 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
835 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
834 ret = True
836 ret = True
835 except IOError:
837 except IOError:
836 pass
838 pass
837
839
838 try:
840 try:
839 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
841 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
840 ret = True
842 ret = True
841 except IOError:
843 except IOError:
842 pass
844 pass
843
845
844 try:
846 try:
845 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
847 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
846 ret = True
848 ret = True
847 except IOError:
849 except IOError:
848 pass
850 pass
849
851
850 return ret
852 return ret
851
853
852
854
853 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
855 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
854 """Perform additional actions after .hg/hgrc is loaded.
856 """Perform additional actions after .hg/hgrc is loaded.
855
857
856 This function is called during repository loading immediately after
858 This function is called during repository loading immediately after
857 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
859 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
858
860
859 The function can be used to validate configs, automatically add
861 The function can be used to validate configs, automatically add
860 options (including extensions) based on requirements, etc.
862 options (including extensions) based on requirements, etc.
861 """
863 """
862
864
863 # Map of requirements to list of extensions to load automatically when
865 # Map of requirements to list of extensions to load automatically when
864 # requirement is present.
866 # requirement is present.
865 autoextensions = {
867 autoextensions = {
866 b'git': [b'git'],
868 b'git': [b'git'],
867 b'largefiles': [b'largefiles'],
869 b'largefiles': [b'largefiles'],
868 b'lfs': [b'lfs'],
870 b'lfs': [b'lfs'],
869 }
871 }
870
872
871 for requirement, names in sorted(autoextensions.items()):
873 for requirement, names in sorted(autoextensions.items()):
872 if requirement not in requirements:
874 if requirement not in requirements:
873 continue
875 continue
874
876
875 for name in names:
877 for name in names:
876 if not ui.hasconfig(b'extensions', name):
878 if not ui.hasconfig(b'extensions', name):
877 ui.setconfig(b'extensions', name, b'', source=b'autoload')
879 ui.setconfig(b'extensions', name, b'', source=b'autoload')
878
880
879
881
880 def gathersupportedrequirements(ui):
882 def gathersupportedrequirements(ui):
881 """Determine the complete set of recognized requirements."""
883 """Determine the complete set of recognized requirements."""
882 # Start with all requirements supported by this file.
884 # Start with all requirements supported by this file.
883 supported = set(localrepository._basesupported)
885 supported = set(localrepository._basesupported)
884
886
885 # Execute ``featuresetupfuncs`` entries if they belong to an extension
887 # Execute ``featuresetupfuncs`` entries if they belong to an extension
886 # relevant to this ui instance.
888 # relevant to this ui instance.
887 modules = {m.__name__ for n, m in extensions.extensions(ui)}
889 modules = {m.__name__ for n, m in extensions.extensions(ui)}
888
890
889 for fn in featuresetupfuncs:
891 for fn in featuresetupfuncs:
890 if fn.__module__ in modules:
892 if fn.__module__ in modules:
891 fn(ui, supported)
893 fn(ui, supported)
892
894
893 # Add derived requirements from registered compression engines.
895 # Add derived requirements from registered compression engines.
894 for name in util.compengines:
896 for name in util.compengines:
895 engine = util.compengines[name]
897 engine = util.compengines[name]
896 if engine.available() and engine.revlogheader():
898 if engine.available() and engine.revlogheader():
897 supported.add(b'exp-compression-%s' % name)
899 supported.add(b'exp-compression-%s' % name)
898 if engine.name() == b'zstd':
900 if engine.name() == b'zstd':
899 supported.add(b'revlog-compression-zstd')
901 supported.add(b'revlog-compression-zstd')
900
902
901 return supported
903 return supported
902
904
903
905
904 def ensurerequirementsrecognized(requirements, supported):
906 def ensurerequirementsrecognized(requirements, supported):
905 """Validate that a set of local requirements is recognized.
907 """Validate that a set of local requirements is recognized.
906
908
907 Receives a set of requirements. Raises an ``error.RepoError`` if there
909 Receives a set of requirements. Raises an ``error.RepoError`` if there
908 exists any requirement in that set that currently loaded code doesn't
910 exists any requirement in that set that currently loaded code doesn't
909 recognize.
911 recognize.
910
912
911 Returns a set of supported requirements.
913 Returns a set of supported requirements.
912 """
914 """
913 missing = set()
915 missing = set()
914
916
915 for requirement in requirements:
917 for requirement in requirements:
916 if requirement in supported:
918 if requirement in supported:
917 continue
919 continue
918
920
919 if not requirement or not requirement[0:1].isalnum():
921 if not requirement or not requirement[0:1].isalnum():
920 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
922 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
921
923
922 missing.add(requirement)
924 missing.add(requirement)
923
925
924 if missing:
926 if missing:
925 raise error.RequirementError(
927 raise error.RequirementError(
926 _(b'repository requires features unknown to this Mercurial: %s')
928 _(b'repository requires features unknown to this Mercurial: %s')
927 % b' '.join(sorted(missing)),
929 % b' '.join(sorted(missing)),
928 hint=_(
930 hint=_(
929 b'see https://mercurial-scm.org/wiki/MissingRequirement '
931 b'see https://mercurial-scm.org/wiki/MissingRequirement '
930 b'for more information'
932 b'for more information'
931 ),
933 ),
932 )
934 )
933
935
934
936
935 def ensurerequirementscompatible(ui, requirements):
937 def ensurerequirementscompatible(ui, requirements):
936 """Validates that a set of recognized requirements is mutually compatible.
938 """Validates that a set of recognized requirements is mutually compatible.
937
939
938 Some requirements may not be compatible with others or require
940 Some requirements may not be compatible with others or require
939 config options that aren't enabled. This function is called during
941 config options that aren't enabled. This function is called during
940 repository opening to ensure that the set of requirements needed
942 repository opening to ensure that the set of requirements needed
941 to open a repository is sane and compatible with config options.
943 to open a repository is sane and compatible with config options.
942
944
943 Extensions can monkeypatch this function to perform additional
945 Extensions can monkeypatch this function to perform additional
944 checking.
946 checking.
945
947
946 ``error.RepoError`` should be raised on failure.
948 ``error.RepoError`` should be raised on failure.
947 """
949 """
948 if (
950 if (
949 requirementsmod.SPARSE_REQUIREMENT in requirements
951 requirementsmod.SPARSE_REQUIREMENT in requirements
950 and not sparse.enabled
952 and not sparse.enabled
951 ):
953 ):
952 raise error.RepoError(
954 raise error.RepoError(
953 _(
955 _(
954 b'repository is using sparse feature but '
956 b'repository is using sparse feature but '
955 b'sparse is not enabled; enable the '
957 b'sparse is not enabled; enable the '
956 b'"sparse" extensions to access'
958 b'"sparse" extensions to access'
957 )
959 )
958 )
960 )
959
961
960
962
961 def makestore(requirements, path, vfstype):
963 def makestore(requirements, path, vfstype):
962 """Construct a storage object for a repository."""
964 """Construct a storage object for a repository."""
963 if requirementsmod.STORE_REQUIREMENT in requirements:
965 if requirementsmod.STORE_REQUIREMENT in requirements:
964 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
966 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
965 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
967 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
966 return storemod.fncachestore(path, vfstype, dotencode)
968 return storemod.fncachestore(path, vfstype, dotencode)
967
969
968 return storemod.encodedstore(path, vfstype)
970 return storemod.encodedstore(path, vfstype)
969
971
970 return storemod.basicstore(path, vfstype)
972 return storemod.basicstore(path, vfstype)
971
973
972
974
973 def resolvestorevfsoptions(ui, requirements, features):
975 def resolvestorevfsoptions(ui, requirements, features):
974 """Resolve the options to pass to the store vfs opener.
976 """Resolve the options to pass to the store vfs opener.
975
977
976 The returned dict is used to influence behavior of the storage layer.
978 The returned dict is used to influence behavior of the storage layer.
977 """
979 """
978 options = {}
980 options = {}
979
981
980 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
982 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
981 options[b'treemanifest'] = True
983 options[b'treemanifest'] = True
982
984
983 # experimental config: format.manifestcachesize
985 # experimental config: format.manifestcachesize
984 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
986 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
985 if manifestcachesize is not None:
987 if manifestcachesize is not None:
986 options[b'manifestcachesize'] = manifestcachesize
988 options[b'manifestcachesize'] = manifestcachesize
987
989
988 # In the absence of another requirement superseding a revlog-related
990 # In the absence of another requirement superseding a revlog-related
989 # requirement, we have to assume the repo is using revlog version 0.
991 # requirement, we have to assume the repo is using revlog version 0.
990 # This revlog format is super old and we don't bother trying to parse
992 # This revlog format is super old and we don't bother trying to parse
991 # opener options for it because those options wouldn't do anything
993 # opener options for it because those options wouldn't do anything
992 # meaningful on such old repos.
994 # meaningful on such old repos.
993 if (
995 if (
994 requirementsmod.REVLOGV1_REQUIREMENT in requirements
996 requirementsmod.REVLOGV1_REQUIREMENT in requirements
995 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
997 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
996 ):
998 ):
997 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
999 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
998 else: # explicitly mark repo as using revlogv0
1000 else: # explicitly mark repo as using revlogv0
999 options[b'revlogv0'] = True
1001 options[b'revlogv0'] = True
1000
1002
1001 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1003 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1002 options[b'copies-storage'] = b'changeset-sidedata'
1004 options[b'copies-storage'] = b'changeset-sidedata'
1003 else:
1005 else:
1004 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1006 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1005 copiesextramode = (b'changeset-only', b'compatibility')
1007 copiesextramode = (b'changeset-only', b'compatibility')
1006 if writecopiesto in copiesextramode:
1008 if writecopiesto in copiesextramode:
1007 options[b'copies-storage'] = b'extra'
1009 options[b'copies-storage'] = b'extra'
1008
1010
1009 return options
1011 return options
1010
1012
1011
1013
1012 def resolverevlogstorevfsoptions(ui, requirements, features):
1014 def resolverevlogstorevfsoptions(ui, requirements, features):
1013 """Resolve opener options specific to revlogs."""
1015 """Resolve opener options specific to revlogs."""
1014
1016
1015 options = {}
1017 options = {}
1016 options[b'flagprocessors'] = {}
1018 options[b'flagprocessors'] = {}
1017
1019
1018 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1020 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1019 options[b'revlogv1'] = True
1021 options[b'revlogv1'] = True
1020 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1022 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1021 options[b'revlogv2'] = True
1023 options[b'revlogv2'] = True
1022
1024
1023 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1025 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1024 options[b'generaldelta'] = True
1026 options[b'generaldelta'] = True
1025
1027
1026 # experimental config: format.chunkcachesize
1028 # experimental config: format.chunkcachesize
1027 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1029 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1028 if chunkcachesize is not None:
1030 if chunkcachesize is not None:
1029 options[b'chunkcachesize'] = chunkcachesize
1031 options[b'chunkcachesize'] = chunkcachesize
1030
1032
1031 deltabothparents = ui.configbool(
1033 deltabothparents = ui.configbool(
1032 b'storage', b'revlog.optimize-delta-parent-choice'
1034 b'storage', b'revlog.optimize-delta-parent-choice'
1033 )
1035 )
1034 options[b'deltabothparents'] = deltabothparents
1036 options[b'deltabothparents'] = deltabothparents
1035
1037
1036 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1038 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1037 lazydeltabase = False
1039 lazydeltabase = False
1038 if lazydelta:
1040 if lazydelta:
1039 lazydeltabase = ui.configbool(
1041 lazydeltabase = ui.configbool(
1040 b'storage', b'revlog.reuse-external-delta-parent'
1042 b'storage', b'revlog.reuse-external-delta-parent'
1041 )
1043 )
1042 if lazydeltabase is None:
1044 if lazydeltabase is None:
1043 lazydeltabase = not scmutil.gddeltaconfig(ui)
1045 lazydeltabase = not scmutil.gddeltaconfig(ui)
1044 options[b'lazydelta'] = lazydelta
1046 options[b'lazydelta'] = lazydelta
1045 options[b'lazydeltabase'] = lazydeltabase
1047 options[b'lazydeltabase'] = lazydeltabase
1046
1048
1047 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1049 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1048 if 0 <= chainspan:
1050 if 0 <= chainspan:
1049 options[b'maxdeltachainspan'] = chainspan
1051 options[b'maxdeltachainspan'] = chainspan
1050
1052
1051 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1053 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1052 if mmapindexthreshold is not None:
1054 if mmapindexthreshold is not None:
1053 options[b'mmapindexthreshold'] = mmapindexthreshold
1055 options[b'mmapindexthreshold'] = mmapindexthreshold
1054
1056
1055 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1057 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1056 srdensitythres = float(
1058 srdensitythres = float(
1057 ui.config(b'experimental', b'sparse-read.density-threshold')
1059 ui.config(b'experimental', b'sparse-read.density-threshold')
1058 )
1060 )
1059 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1061 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1060 options[b'with-sparse-read'] = withsparseread
1062 options[b'with-sparse-read'] = withsparseread
1061 options[b'sparse-read-density-threshold'] = srdensitythres
1063 options[b'sparse-read-density-threshold'] = srdensitythres
1062 options[b'sparse-read-min-gap-size'] = srmingapsize
1064 options[b'sparse-read-min-gap-size'] = srmingapsize
1063
1065
1064 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1066 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1065 options[b'sparse-revlog'] = sparserevlog
1067 options[b'sparse-revlog'] = sparserevlog
1066 if sparserevlog:
1068 if sparserevlog:
1067 options[b'generaldelta'] = True
1069 options[b'generaldelta'] = True
1068
1070
1069 maxchainlen = None
1071 maxchainlen = None
1070 if sparserevlog:
1072 if sparserevlog:
1071 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1073 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1072 # experimental config: format.maxchainlen
1074 # experimental config: format.maxchainlen
1073 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1075 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1074 if maxchainlen is not None:
1076 if maxchainlen is not None:
1075 options[b'maxchainlen'] = maxchainlen
1077 options[b'maxchainlen'] = maxchainlen
1076
1078
1077 for r in requirements:
1079 for r in requirements:
1078 # we allow multiple compression engine requirement to co-exist because
1080 # we allow multiple compression engine requirement to co-exist because
1079 # strickly speaking, revlog seems to support mixed compression style.
1081 # strickly speaking, revlog seems to support mixed compression style.
1080 #
1082 #
1081 # The compression used for new entries will be "the last one"
1083 # The compression used for new entries will be "the last one"
1082 prefix = r.startswith
1084 prefix = r.startswith
1083 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1085 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1084 options[b'compengine'] = r.split(b'-', 2)[2]
1086 options[b'compengine'] = r.split(b'-', 2)[2]
1085
1087
1086 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1088 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1087 if options[b'zlib.level'] is not None:
1089 if options[b'zlib.level'] is not None:
1088 if not (0 <= options[b'zlib.level'] <= 9):
1090 if not (0 <= options[b'zlib.level'] <= 9):
1089 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1091 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1090 raise error.Abort(msg % options[b'zlib.level'])
1092 raise error.Abort(msg % options[b'zlib.level'])
1091 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1093 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1092 if options[b'zstd.level'] is not None:
1094 if options[b'zstd.level'] is not None:
1093 if not (0 <= options[b'zstd.level'] <= 22):
1095 if not (0 <= options[b'zstd.level'] <= 22):
1094 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1096 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1095 raise error.Abort(msg % options[b'zstd.level'])
1097 raise error.Abort(msg % options[b'zstd.level'])
1096
1098
1097 if requirementsmod.NARROW_REQUIREMENT in requirements:
1099 if requirementsmod.NARROW_REQUIREMENT in requirements:
1098 options[b'enableellipsis'] = True
1100 options[b'enableellipsis'] = True
1099
1101
1100 if ui.configbool(b'experimental', b'rust.index'):
1102 if ui.configbool(b'experimental', b'rust.index'):
1101 options[b'rust.index'] = True
1103 options[b'rust.index'] = True
1102 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1104 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1103 slow_path = ui.config(
1105 slow_path = ui.config(
1104 b'storage', b'revlog.persistent-nodemap.slow-path'
1106 b'storage', b'revlog.persistent-nodemap.slow-path'
1105 )
1107 )
1106 if slow_path not in (b'allow', b'warn', b'abort'):
1108 if slow_path not in (b'allow', b'warn', b'abort'):
1107 default = ui.config_default(
1109 default = ui.config_default(
1108 b'storage', b'revlog.persistent-nodemap.slow-path'
1110 b'storage', b'revlog.persistent-nodemap.slow-path'
1109 )
1111 )
1110 msg = _(
1112 msg = _(
1111 b'unknown value for config '
1113 b'unknown value for config '
1112 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1114 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1113 )
1115 )
1114 ui.warn(msg % slow_path)
1116 ui.warn(msg % slow_path)
1115 if not ui.quiet:
1117 if not ui.quiet:
1116 ui.warn(_(b'falling back to default value: %s\n') % default)
1118 ui.warn(_(b'falling back to default value: %s\n') % default)
1117 slow_path = default
1119 slow_path = default
1118
1120
1119 msg = _(
1121 msg = _(
1120 b"accessing `persistent-nodemap` repository without associated "
1122 b"accessing `persistent-nodemap` repository without associated "
1121 b"fast implementation."
1123 b"fast implementation."
1122 )
1124 )
1123 hint = _(
1125 hint = _(
1124 b"check `hg help config.format.use-persistent-nodemap` "
1126 b"check `hg help config.format.use-persistent-nodemap` "
1125 b"for details"
1127 b"for details"
1126 )
1128 )
1127 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1129 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1128 if slow_path == b'warn':
1130 if slow_path == b'warn':
1129 msg = b"warning: " + msg + b'\n'
1131 msg = b"warning: " + msg + b'\n'
1130 ui.warn(msg)
1132 ui.warn(msg)
1131 if not ui.quiet:
1133 if not ui.quiet:
1132 hint = b'(' + hint + b')\n'
1134 hint = b'(' + hint + b')\n'
1133 ui.warn(hint)
1135 ui.warn(hint)
1134 if slow_path == b'abort':
1136 if slow_path == b'abort':
1135 raise error.Abort(msg, hint=hint)
1137 raise error.Abort(msg, hint=hint)
1136 options[b'persistent-nodemap'] = True
1138 options[b'persistent-nodemap'] = True
1137 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1139 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1138 options[b'persistent-nodemap.mmap'] = True
1140 options[b'persistent-nodemap.mmap'] = True
1139 if ui.configbool(b'devel', b'persistent-nodemap'):
1141 if ui.configbool(b'devel', b'persistent-nodemap'):
1140 options[b'devel-force-nodemap'] = True
1142 options[b'devel-force-nodemap'] = True
1141
1143
1142 return options
1144 return options
1143
1145
1144
1146
1145 def makemain(**kwargs):
1147 def makemain(**kwargs):
1146 """Produce a type conforming to ``ilocalrepositorymain``."""
1148 """Produce a type conforming to ``ilocalrepositorymain``."""
1147 return localrepository
1149 return localrepository
1148
1150
1149
1151
1150 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1152 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1151 class revlogfilestorage(object):
1153 class revlogfilestorage(object):
1152 """File storage when using revlogs."""
1154 """File storage when using revlogs."""
1153
1155
1154 def file(self, path):
1156 def file(self, path):
1155 if path.startswith(b'/'):
1157 if path.startswith(b'/'):
1156 path = path[1:]
1158 path = path[1:]
1157
1159
1158 return filelog.filelog(self.svfs, path)
1160 return filelog.filelog(self.svfs, path)
1159
1161
1160
1162
1161 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1163 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1162 class revlognarrowfilestorage(object):
1164 class revlognarrowfilestorage(object):
1163 """File storage when using revlogs and narrow files."""
1165 """File storage when using revlogs and narrow files."""
1164
1166
1165 def file(self, path):
1167 def file(self, path):
1166 if path.startswith(b'/'):
1168 if path.startswith(b'/'):
1167 path = path[1:]
1169 path = path[1:]
1168
1170
1169 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1171 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1170
1172
1171
1173
1172 def makefilestorage(requirements, features, **kwargs):
1174 def makefilestorage(requirements, features, **kwargs):
1173 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1175 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1174 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1176 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1175 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1177 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1176
1178
1177 if requirementsmod.NARROW_REQUIREMENT in requirements:
1179 if requirementsmod.NARROW_REQUIREMENT in requirements:
1178 return revlognarrowfilestorage
1180 return revlognarrowfilestorage
1179 else:
1181 else:
1180 return revlogfilestorage
1182 return revlogfilestorage
1181
1183
1182
1184
1183 # List of repository interfaces and factory functions for them. Each
1185 # List of repository interfaces and factory functions for them. Each
1184 # will be called in order during ``makelocalrepository()`` to iteratively
1186 # will be called in order during ``makelocalrepository()`` to iteratively
1185 # derive the final type for a local repository instance. We capture the
1187 # derive the final type for a local repository instance. We capture the
1186 # function as a lambda so we don't hold a reference and the module-level
1188 # function as a lambda so we don't hold a reference and the module-level
1187 # functions can be wrapped.
1189 # functions can be wrapped.
1188 REPO_INTERFACES = [
1190 REPO_INTERFACES = [
1189 (repository.ilocalrepositorymain, lambda: makemain),
1191 (repository.ilocalrepositorymain, lambda: makemain),
1190 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1192 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1191 ]
1193 ]
1192
1194
1193
1195
1194 @interfaceutil.implementer(repository.ilocalrepositorymain)
1196 @interfaceutil.implementer(repository.ilocalrepositorymain)
1195 class localrepository(object):
1197 class localrepository(object):
1196 """Main class for representing local repositories.
1198 """Main class for representing local repositories.
1197
1199
1198 All local repositories are instances of this class.
1200 All local repositories are instances of this class.
1199
1201
1200 Constructed on its own, instances of this class are not usable as
1202 Constructed on its own, instances of this class are not usable as
1201 repository objects. To obtain a usable repository object, call
1203 repository objects. To obtain a usable repository object, call
1202 ``hg.repository()``, ``localrepo.instance()``, or
1204 ``hg.repository()``, ``localrepo.instance()``, or
1203 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1205 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1204 ``instance()`` adds support for creating new repositories.
1206 ``instance()`` adds support for creating new repositories.
1205 ``hg.repository()`` adds more extension integration, including calling
1207 ``hg.repository()`` adds more extension integration, including calling
1206 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1208 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1207 used.
1209 used.
1208 """
1210 """
1209
1211
1210 # obsolete experimental requirements:
1212 # obsolete experimental requirements:
1211 # - manifestv2: An experimental new manifest format that allowed
1213 # - manifestv2: An experimental new manifest format that allowed
1212 # for stem compression of long paths. Experiment ended up not
1214 # for stem compression of long paths. Experiment ended up not
1213 # being successful (repository sizes went up due to worse delta
1215 # being successful (repository sizes went up due to worse delta
1214 # chains), and the code was deleted in 4.6.
1216 # chains), and the code was deleted in 4.6.
1215 supportedformats = {
1217 supportedformats = {
1216 requirementsmod.REVLOGV1_REQUIREMENT,
1218 requirementsmod.REVLOGV1_REQUIREMENT,
1217 requirementsmod.GENERALDELTA_REQUIREMENT,
1219 requirementsmod.GENERALDELTA_REQUIREMENT,
1218 requirementsmod.TREEMANIFEST_REQUIREMENT,
1220 requirementsmod.TREEMANIFEST_REQUIREMENT,
1219 requirementsmod.COPIESSDC_REQUIREMENT,
1221 requirementsmod.COPIESSDC_REQUIREMENT,
1220 requirementsmod.REVLOGV2_REQUIREMENT,
1222 requirementsmod.REVLOGV2_REQUIREMENT,
1221 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1223 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1222 requirementsmod.NODEMAP_REQUIREMENT,
1224 requirementsmod.NODEMAP_REQUIREMENT,
1223 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1225 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1224 requirementsmod.SHARESAFE_REQUIREMENT,
1226 requirementsmod.SHARESAFE_REQUIREMENT,
1225 }
1227 }
1226 _basesupported = supportedformats | {
1228 _basesupported = supportedformats | {
1227 requirementsmod.STORE_REQUIREMENT,
1229 requirementsmod.STORE_REQUIREMENT,
1228 requirementsmod.FNCACHE_REQUIREMENT,
1230 requirementsmod.FNCACHE_REQUIREMENT,
1229 requirementsmod.SHARED_REQUIREMENT,
1231 requirementsmod.SHARED_REQUIREMENT,
1230 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1232 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1231 requirementsmod.DOTENCODE_REQUIREMENT,
1233 requirementsmod.DOTENCODE_REQUIREMENT,
1232 requirementsmod.SPARSE_REQUIREMENT,
1234 requirementsmod.SPARSE_REQUIREMENT,
1233 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1235 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1234 }
1236 }
1235
1237
1236 # list of prefix for file which can be written without 'wlock'
1238 # list of prefix for file which can be written without 'wlock'
1237 # Extensions should extend this list when needed
1239 # Extensions should extend this list when needed
1238 _wlockfreeprefix = {
1240 _wlockfreeprefix = {
1239 # We migh consider requiring 'wlock' for the next
1241 # We migh consider requiring 'wlock' for the next
1240 # two, but pretty much all the existing code assume
1242 # two, but pretty much all the existing code assume
1241 # wlock is not needed so we keep them excluded for
1243 # wlock is not needed so we keep them excluded for
1242 # now.
1244 # now.
1243 b'hgrc',
1245 b'hgrc',
1244 b'requires',
1246 b'requires',
1245 # XXX cache is a complicatged business someone
1247 # XXX cache is a complicatged business someone
1246 # should investigate this in depth at some point
1248 # should investigate this in depth at some point
1247 b'cache/',
1249 b'cache/',
1248 # XXX shouldn't be dirstate covered by the wlock?
1250 # XXX shouldn't be dirstate covered by the wlock?
1249 b'dirstate',
1251 b'dirstate',
1250 # XXX bisect was still a bit too messy at the time
1252 # XXX bisect was still a bit too messy at the time
1251 # this changeset was introduced. Someone should fix
1253 # this changeset was introduced. Someone should fix
1252 # the remainig bit and drop this line
1254 # the remainig bit and drop this line
1253 b'bisect.state',
1255 b'bisect.state',
1254 }
1256 }
1255
1257
1256 def __init__(
1258 def __init__(
1257 self,
1259 self,
1258 baseui,
1260 baseui,
1259 ui,
1261 ui,
1260 origroot,
1262 origroot,
1261 wdirvfs,
1263 wdirvfs,
1262 hgvfs,
1264 hgvfs,
1263 requirements,
1265 requirements,
1264 supportedrequirements,
1266 supportedrequirements,
1265 sharedpath,
1267 sharedpath,
1266 store,
1268 store,
1267 cachevfs,
1269 cachevfs,
1268 wcachevfs,
1270 wcachevfs,
1269 features,
1271 features,
1270 intents=None,
1272 intents=None,
1271 ):
1273 ):
1272 """Create a new local repository instance.
1274 """Create a new local repository instance.
1273
1275
1274 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1276 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1275 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1277 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1276 object.
1278 object.
1277
1279
1278 Arguments:
1280 Arguments:
1279
1281
1280 baseui
1282 baseui
1281 ``ui.ui`` instance that ``ui`` argument was based off of.
1283 ``ui.ui`` instance that ``ui`` argument was based off of.
1282
1284
1283 ui
1285 ui
1284 ``ui.ui`` instance for use by the repository.
1286 ``ui.ui`` instance for use by the repository.
1285
1287
1286 origroot
1288 origroot
1287 ``bytes`` path to working directory root of this repository.
1289 ``bytes`` path to working directory root of this repository.
1288
1290
1289 wdirvfs
1291 wdirvfs
1290 ``vfs.vfs`` rooted at the working directory.
1292 ``vfs.vfs`` rooted at the working directory.
1291
1293
1292 hgvfs
1294 hgvfs
1293 ``vfs.vfs`` rooted at .hg/
1295 ``vfs.vfs`` rooted at .hg/
1294
1296
1295 requirements
1297 requirements
1296 ``set`` of bytestrings representing repository opening requirements.
1298 ``set`` of bytestrings representing repository opening requirements.
1297
1299
1298 supportedrequirements
1300 supportedrequirements
1299 ``set`` of bytestrings representing repository requirements that we
1301 ``set`` of bytestrings representing repository requirements that we
1300 know how to open. May be a supetset of ``requirements``.
1302 know how to open. May be a supetset of ``requirements``.
1301
1303
1302 sharedpath
1304 sharedpath
1303 ``bytes`` Defining path to storage base directory. Points to a
1305 ``bytes`` Defining path to storage base directory. Points to a
1304 ``.hg/`` directory somewhere.
1306 ``.hg/`` directory somewhere.
1305
1307
1306 store
1308 store
1307 ``store.basicstore`` (or derived) instance providing access to
1309 ``store.basicstore`` (or derived) instance providing access to
1308 versioned storage.
1310 versioned storage.
1309
1311
1310 cachevfs
1312 cachevfs
1311 ``vfs.vfs`` used for cache files.
1313 ``vfs.vfs`` used for cache files.
1312
1314
1313 wcachevfs
1315 wcachevfs
1314 ``vfs.vfs`` used for cache files related to the working copy.
1316 ``vfs.vfs`` used for cache files related to the working copy.
1315
1317
1316 features
1318 features
1317 ``set`` of bytestrings defining features/capabilities of this
1319 ``set`` of bytestrings defining features/capabilities of this
1318 instance.
1320 instance.
1319
1321
1320 intents
1322 intents
1321 ``set`` of system strings indicating what this repo will be used
1323 ``set`` of system strings indicating what this repo will be used
1322 for.
1324 for.
1323 """
1325 """
1324 self.baseui = baseui
1326 self.baseui = baseui
1325 self.ui = ui
1327 self.ui = ui
1326 self.origroot = origroot
1328 self.origroot = origroot
1327 # vfs rooted at working directory.
1329 # vfs rooted at working directory.
1328 self.wvfs = wdirvfs
1330 self.wvfs = wdirvfs
1329 self.root = wdirvfs.base
1331 self.root = wdirvfs.base
1330 # vfs rooted at .hg/. Used to access most non-store paths.
1332 # vfs rooted at .hg/. Used to access most non-store paths.
1331 self.vfs = hgvfs
1333 self.vfs = hgvfs
1332 self.path = hgvfs.base
1334 self.path = hgvfs.base
1333 self.requirements = requirements
1335 self.requirements = requirements
1334 self.nodeconstants = sha1nodeconstants
1336 self.nodeconstants = sha1nodeconstants
1335 self.nullid = self.nodeconstants.nullid
1337 self.nullid = self.nodeconstants.nullid
1336 self.supported = supportedrequirements
1338 self.supported = supportedrequirements
1337 self.sharedpath = sharedpath
1339 self.sharedpath = sharedpath
1338 self.store = store
1340 self.store = store
1339 self.cachevfs = cachevfs
1341 self.cachevfs = cachevfs
1340 self.wcachevfs = wcachevfs
1342 self.wcachevfs = wcachevfs
1341 self.features = features
1343 self.features = features
1342
1344
1343 self.filtername = None
1345 self.filtername = None
1344
1346
1345 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1347 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1346 b'devel', b'check-locks'
1348 b'devel', b'check-locks'
1347 ):
1349 ):
1348 self.vfs.audit = self._getvfsward(self.vfs.audit)
1350 self.vfs.audit = self._getvfsward(self.vfs.audit)
1349 # A list of callback to shape the phase if no data were found.
1351 # A list of callback to shape the phase if no data were found.
1350 # Callback are in the form: func(repo, roots) --> processed root.
1352 # Callback are in the form: func(repo, roots) --> processed root.
1351 # This list it to be filled by extension during repo setup
1353 # This list it to be filled by extension during repo setup
1352 self._phasedefaults = []
1354 self._phasedefaults = []
1353
1355
1354 color.setup(self.ui)
1356 color.setup(self.ui)
1355
1357
1356 self.spath = self.store.path
1358 self.spath = self.store.path
1357 self.svfs = self.store.vfs
1359 self.svfs = self.store.vfs
1358 self.sjoin = self.store.join
1360 self.sjoin = self.store.join
1359 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1361 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 b'devel', b'check-locks'
1362 b'devel', b'check-locks'
1361 ):
1363 ):
1362 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1364 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1363 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1365 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1364 else: # standard vfs
1366 else: # standard vfs
1365 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1367 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1366
1368
1367 self._dirstatevalidatewarned = False
1369 self._dirstatevalidatewarned = False
1368
1370
1369 self._branchcaches = branchmap.BranchMapCache()
1371 self._branchcaches = branchmap.BranchMapCache()
1370 self._revbranchcache = None
1372 self._revbranchcache = None
1371 self._filterpats = {}
1373 self._filterpats = {}
1372 self._datafilters = {}
1374 self._datafilters = {}
1373 self._transref = self._lockref = self._wlockref = None
1375 self._transref = self._lockref = self._wlockref = None
1374
1376
1375 # A cache for various files under .hg/ that tracks file changes,
1377 # A cache for various files under .hg/ that tracks file changes,
1376 # (used by the filecache decorator)
1378 # (used by the filecache decorator)
1377 #
1379 #
1378 # Maps a property name to its util.filecacheentry
1380 # Maps a property name to its util.filecacheentry
1379 self._filecache = {}
1381 self._filecache = {}
1380
1382
1381 # hold sets of revision to be filtered
1383 # hold sets of revision to be filtered
1382 # should be cleared when something might have changed the filter value:
1384 # should be cleared when something might have changed the filter value:
1383 # - new changesets,
1385 # - new changesets,
1384 # - phase change,
1386 # - phase change,
1385 # - new obsolescence marker,
1387 # - new obsolescence marker,
1386 # - working directory parent change,
1388 # - working directory parent change,
1387 # - bookmark changes
1389 # - bookmark changes
1388 self.filteredrevcache = {}
1390 self.filteredrevcache = {}
1389
1391
1390 # post-dirstate-status hooks
1392 # post-dirstate-status hooks
1391 self._postdsstatus = []
1393 self._postdsstatus = []
1392
1394
1393 # generic mapping between names and nodes
1395 # generic mapping between names and nodes
1394 self.names = namespaces.namespaces()
1396 self.names = namespaces.namespaces()
1395
1397
1396 # Key to signature value.
1398 # Key to signature value.
1397 self._sparsesignaturecache = {}
1399 self._sparsesignaturecache = {}
1398 # Signature to cached matcher instance.
1400 # Signature to cached matcher instance.
1399 self._sparsematchercache = {}
1401 self._sparsematchercache = {}
1400
1402
1401 self._extrafilterid = repoview.extrafilter(ui)
1403 self._extrafilterid = repoview.extrafilter(ui)
1402
1404
1403 self.filecopiesmode = None
1405 self.filecopiesmode = None
1404 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1406 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1405 self.filecopiesmode = b'changeset-sidedata'
1407 self.filecopiesmode = b'changeset-sidedata'
1406
1408
1407 self._wanted_sidedata = set()
1409 self._wanted_sidedata = set()
1408 self._sidedata_computers = {}
1410 self._sidedata_computers = {}
1409 sidedatamod.set_sidedata_spec_for_repo(self)
1411 sidedatamod.set_sidedata_spec_for_repo(self)
1410
1412
1411 def _getvfsward(self, origfunc):
1413 def _getvfsward(self, origfunc):
1412 """build a ward for self.vfs"""
1414 """build a ward for self.vfs"""
1413 rref = weakref.ref(self)
1415 rref = weakref.ref(self)
1414
1416
1415 def checkvfs(path, mode=None):
1417 def checkvfs(path, mode=None):
1416 ret = origfunc(path, mode=mode)
1418 ret = origfunc(path, mode=mode)
1417 repo = rref()
1419 repo = rref()
1418 if (
1420 if (
1419 repo is None
1421 repo is None
1420 or not util.safehasattr(repo, b'_wlockref')
1422 or not util.safehasattr(repo, b'_wlockref')
1421 or not util.safehasattr(repo, b'_lockref')
1423 or not util.safehasattr(repo, b'_lockref')
1422 ):
1424 ):
1423 return
1425 return
1424 if mode in (None, b'r', b'rb'):
1426 if mode in (None, b'r', b'rb'):
1425 return
1427 return
1426 if path.startswith(repo.path):
1428 if path.startswith(repo.path):
1427 # truncate name relative to the repository (.hg)
1429 # truncate name relative to the repository (.hg)
1428 path = path[len(repo.path) + 1 :]
1430 path = path[len(repo.path) + 1 :]
1429 if path.startswith(b'cache/'):
1431 if path.startswith(b'cache/'):
1430 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1432 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1431 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1433 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1432 # path prefixes covered by 'lock'
1434 # path prefixes covered by 'lock'
1433 vfs_path_prefixes = (
1435 vfs_path_prefixes = (
1434 b'journal.',
1436 b'journal.',
1435 b'undo.',
1437 b'undo.',
1436 b'strip-backup/',
1438 b'strip-backup/',
1437 b'cache/',
1439 b'cache/',
1438 )
1440 )
1439 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1441 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1440 if repo._currentlock(repo._lockref) is None:
1442 if repo._currentlock(repo._lockref) is None:
1441 repo.ui.develwarn(
1443 repo.ui.develwarn(
1442 b'write with no lock: "%s"' % path,
1444 b'write with no lock: "%s"' % path,
1443 stacklevel=3,
1445 stacklevel=3,
1444 config=b'check-locks',
1446 config=b'check-locks',
1445 )
1447 )
1446 elif repo._currentlock(repo._wlockref) is None:
1448 elif repo._currentlock(repo._wlockref) is None:
1447 # rest of vfs files are covered by 'wlock'
1449 # rest of vfs files are covered by 'wlock'
1448 #
1450 #
1449 # exclude special files
1451 # exclude special files
1450 for prefix in self._wlockfreeprefix:
1452 for prefix in self._wlockfreeprefix:
1451 if path.startswith(prefix):
1453 if path.startswith(prefix):
1452 return
1454 return
1453 repo.ui.develwarn(
1455 repo.ui.develwarn(
1454 b'write with no wlock: "%s"' % path,
1456 b'write with no wlock: "%s"' % path,
1455 stacklevel=3,
1457 stacklevel=3,
1456 config=b'check-locks',
1458 config=b'check-locks',
1457 )
1459 )
1458 return ret
1460 return ret
1459
1461
1460 return checkvfs
1462 return checkvfs
1461
1463
1462 def _getsvfsward(self, origfunc):
1464 def _getsvfsward(self, origfunc):
1463 """build a ward for self.svfs"""
1465 """build a ward for self.svfs"""
1464 rref = weakref.ref(self)
1466 rref = weakref.ref(self)
1465
1467
1466 def checksvfs(path, mode=None):
1468 def checksvfs(path, mode=None):
1467 ret = origfunc(path, mode=mode)
1469 ret = origfunc(path, mode=mode)
1468 repo = rref()
1470 repo = rref()
1469 if repo is None or not util.safehasattr(repo, b'_lockref'):
1471 if repo is None or not util.safehasattr(repo, b'_lockref'):
1470 return
1472 return
1471 if mode in (None, b'r', b'rb'):
1473 if mode in (None, b'r', b'rb'):
1472 return
1474 return
1473 if path.startswith(repo.sharedpath):
1475 if path.startswith(repo.sharedpath):
1474 # truncate name relative to the repository (.hg)
1476 # truncate name relative to the repository (.hg)
1475 path = path[len(repo.sharedpath) + 1 :]
1477 path = path[len(repo.sharedpath) + 1 :]
1476 if repo._currentlock(repo._lockref) is None:
1478 if repo._currentlock(repo._lockref) is None:
1477 repo.ui.develwarn(
1479 repo.ui.develwarn(
1478 b'write with no lock: "%s"' % path, stacklevel=4
1480 b'write with no lock: "%s"' % path, stacklevel=4
1479 )
1481 )
1480 return ret
1482 return ret
1481
1483
1482 return checksvfs
1484 return checksvfs
1483
1485
1484 def close(self):
1486 def close(self):
1485 self._writecaches()
1487 self._writecaches()
1486
1488
1487 def _writecaches(self):
1489 def _writecaches(self):
1488 if self._revbranchcache:
1490 if self._revbranchcache:
1489 self._revbranchcache.write()
1491 self._revbranchcache.write()
1490
1492
1491 def _restrictcapabilities(self, caps):
1493 def _restrictcapabilities(self, caps):
1492 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1494 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1493 caps = set(caps)
1495 caps = set(caps)
1494 capsblob = bundle2.encodecaps(
1496 capsblob = bundle2.encodecaps(
1495 bundle2.getrepocaps(self, role=b'client')
1497 bundle2.getrepocaps(self, role=b'client')
1496 )
1498 )
1497 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1499 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1498 if self.ui.configbool(b'experimental', b'narrow'):
1500 if self.ui.configbool(b'experimental', b'narrow'):
1499 caps.add(wireprototypes.NARROWCAP)
1501 caps.add(wireprototypes.NARROWCAP)
1500 return caps
1502 return caps
1501
1503
1502 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1504 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1503 # self -> auditor -> self._checknested -> self
1505 # self -> auditor -> self._checknested -> self
1504
1506
1505 @property
1507 @property
1506 def auditor(self):
1508 def auditor(self):
1507 # This is only used by context.workingctx.match in order to
1509 # This is only used by context.workingctx.match in order to
1508 # detect files in subrepos.
1510 # detect files in subrepos.
1509 return pathutil.pathauditor(self.root, callback=self._checknested)
1511 return pathutil.pathauditor(self.root, callback=self._checknested)
1510
1512
1511 @property
1513 @property
1512 def nofsauditor(self):
1514 def nofsauditor(self):
1513 # This is only used by context.basectx.match in order to detect
1515 # This is only used by context.basectx.match in order to detect
1514 # files in subrepos.
1516 # files in subrepos.
1515 return pathutil.pathauditor(
1517 return pathutil.pathauditor(
1516 self.root, callback=self._checknested, realfs=False, cached=True
1518 self.root, callback=self._checknested, realfs=False, cached=True
1517 )
1519 )
1518
1520
1519 def _checknested(self, path):
1521 def _checknested(self, path):
1520 """Determine if path is a legal nested repository."""
1522 """Determine if path is a legal nested repository."""
1521 if not path.startswith(self.root):
1523 if not path.startswith(self.root):
1522 return False
1524 return False
1523 subpath = path[len(self.root) + 1 :]
1525 subpath = path[len(self.root) + 1 :]
1524 normsubpath = util.pconvert(subpath)
1526 normsubpath = util.pconvert(subpath)
1525
1527
1526 # XXX: Checking against the current working copy is wrong in
1528 # XXX: Checking against the current working copy is wrong in
1527 # the sense that it can reject things like
1529 # the sense that it can reject things like
1528 #
1530 #
1529 # $ hg cat -r 10 sub/x.txt
1531 # $ hg cat -r 10 sub/x.txt
1530 #
1532 #
1531 # if sub/ is no longer a subrepository in the working copy
1533 # if sub/ is no longer a subrepository in the working copy
1532 # parent revision.
1534 # parent revision.
1533 #
1535 #
1534 # However, it can of course also allow things that would have
1536 # However, it can of course also allow things that would have
1535 # been rejected before, such as the above cat command if sub/
1537 # been rejected before, such as the above cat command if sub/
1536 # is a subrepository now, but was a normal directory before.
1538 # is a subrepository now, but was a normal directory before.
1537 # The old path auditor would have rejected by mistake since it
1539 # The old path auditor would have rejected by mistake since it
1538 # panics when it sees sub/.hg/.
1540 # panics when it sees sub/.hg/.
1539 #
1541 #
1540 # All in all, checking against the working copy seems sensible
1542 # All in all, checking against the working copy seems sensible
1541 # since we want to prevent access to nested repositories on
1543 # since we want to prevent access to nested repositories on
1542 # the filesystem *now*.
1544 # the filesystem *now*.
1543 ctx = self[None]
1545 ctx = self[None]
1544 parts = util.splitpath(subpath)
1546 parts = util.splitpath(subpath)
1545 while parts:
1547 while parts:
1546 prefix = b'/'.join(parts)
1548 prefix = b'/'.join(parts)
1547 if prefix in ctx.substate:
1549 if prefix in ctx.substate:
1548 if prefix == normsubpath:
1550 if prefix == normsubpath:
1549 return True
1551 return True
1550 else:
1552 else:
1551 sub = ctx.sub(prefix)
1553 sub = ctx.sub(prefix)
1552 return sub.checknested(subpath[len(prefix) + 1 :])
1554 return sub.checknested(subpath[len(prefix) + 1 :])
1553 else:
1555 else:
1554 parts.pop()
1556 parts.pop()
1555 return False
1557 return False
1556
1558
1557 def peer(self):
1559 def peer(self):
1558 return localpeer(self) # not cached to avoid reference cycle
1560 return localpeer(self) # not cached to avoid reference cycle
1559
1561
1560 def unfiltered(self):
1562 def unfiltered(self):
1561 """Return unfiltered version of the repository
1563 """Return unfiltered version of the repository
1562
1564
1563 Intended to be overwritten by filtered repo."""
1565 Intended to be overwritten by filtered repo."""
1564 return self
1566 return self
1565
1567
1566 def filtered(self, name, visibilityexceptions=None):
1568 def filtered(self, name, visibilityexceptions=None):
1567 """Return a filtered version of a repository
1569 """Return a filtered version of a repository
1568
1570
1569 The `name` parameter is the identifier of the requested view. This
1571 The `name` parameter is the identifier of the requested view. This
1570 will return a repoview object set "exactly" to the specified view.
1572 will return a repoview object set "exactly" to the specified view.
1571
1573
1572 This function does not apply recursive filtering to a repository. For
1574 This function does not apply recursive filtering to a repository. For
1573 example calling `repo.filtered("served")` will return a repoview using
1575 example calling `repo.filtered("served")` will return a repoview using
1574 the "served" view, regardless of the initial view used by `repo`.
1576 the "served" view, regardless of the initial view used by `repo`.
1575
1577
1576 In other word, there is always only one level of `repoview` "filtering".
1578 In other word, there is always only one level of `repoview` "filtering".
1577 """
1579 """
1578 if self._extrafilterid is not None and b'%' not in name:
1580 if self._extrafilterid is not None and b'%' not in name:
1579 name = name + b'%' + self._extrafilterid
1581 name = name + b'%' + self._extrafilterid
1580
1582
1581 cls = repoview.newtype(self.unfiltered().__class__)
1583 cls = repoview.newtype(self.unfiltered().__class__)
1582 return cls(self, name, visibilityexceptions)
1584 return cls(self, name, visibilityexceptions)
1583
1585
1584 @mixedrepostorecache(
1586 @mixedrepostorecache(
1585 (b'bookmarks', b'plain'),
1587 (b'bookmarks', b'plain'),
1586 (b'bookmarks.current', b'plain'),
1588 (b'bookmarks.current', b'plain'),
1587 (b'bookmarks', b''),
1589 (b'bookmarks', b''),
1588 (b'00changelog.i', b''),
1590 (b'00changelog.i', b''),
1589 )
1591 )
1590 def _bookmarks(self):
1592 def _bookmarks(self):
1591 # Since the multiple files involved in the transaction cannot be
1593 # Since the multiple files involved in the transaction cannot be
1592 # written atomically (with current repository format), there is a race
1594 # written atomically (with current repository format), there is a race
1593 # condition here.
1595 # condition here.
1594 #
1596 #
1595 # 1) changelog content A is read
1597 # 1) changelog content A is read
1596 # 2) outside transaction update changelog to content B
1598 # 2) outside transaction update changelog to content B
1597 # 3) outside transaction update bookmark file referring to content B
1599 # 3) outside transaction update bookmark file referring to content B
1598 # 4) bookmarks file content is read and filtered against changelog-A
1600 # 4) bookmarks file content is read and filtered against changelog-A
1599 #
1601 #
1600 # When this happens, bookmarks against nodes missing from A are dropped.
1602 # When this happens, bookmarks against nodes missing from A are dropped.
1601 #
1603 #
1602 # Having this happening during read is not great, but it become worse
1604 # Having this happening during read is not great, but it become worse
1603 # when this happen during write because the bookmarks to the "unknown"
1605 # when this happen during write because the bookmarks to the "unknown"
1604 # nodes will be dropped for good. However, writes happen within locks.
1606 # nodes will be dropped for good. However, writes happen within locks.
1605 # This locking makes it possible to have a race free consistent read.
1607 # This locking makes it possible to have a race free consistent read.
1606 # For this purpose data read from disc before locking are
1608 # For this purpose data read from disc before locking are
1607 # "invalidated" right after the locks are taken. This invalidations are
1609 # "invalidated" right after the locks are taken. This invalidations are
1608 # "light", the `filecache` mechanism keep the data in memory and will
1610 # "light", the `filecache` mechanism keep the data in memory and will
1609 # reuse them if the underlying files did not changed. Not parsing the
1611 # reuse them if the underlying files did not changed. Not parsing the
1610 # same data multiple times helps performances.
1612 # same data multiple times helps performances.
1611 #
1613 #
1612 # Unfortunately in the case describe above, the files tracked by the
1614 # Unfortunately in the case describe above, the files tracked by the
1613 # bookmarks file cache might not have changed, but the in-memory
1615 # bookmarks file cache might not have changed, but the in-memory
1614 # content is still "wrong" because we used an older changelog content
1616 # content is still "wrong" because we used an older changelog content
1615 # to process the on-disk data. So after locking, the changelog would be
1617 # to process the on-disk data. So after locking, the changelog would be
1616 # refreshed but `_bookmarks` would be preserved.
1618 # refreshed but `_bookmarks` would be preserved.
1617 # Adding `00changelog.i` to the list of tracked file is not
1619 # Adding `00changelog.i` to the list of tracked file is not
1618 # enough, because at the time we build the content for `_bookmarks` in
1620 # enough, because at the time we build the content for `_bookmarks` in
1619 # (4), the changelog file has already diverged from the content used
1621 # (4), the changelog file has already diverged from the content used
1620 # for loading `changelog` in (1)
1622 # for loading `changelog` in (1)
1621 #
1623 #
1622 # To prevent the issue, we force the changelog to be explicitly
1624 # To prevent the issue, we force the changelog to be explicitly
1623 # reloaded while computing `_bookmarks`. The data race can still happen
1625 # reloaded while computing `_bookmarks`. The data race can still happen
1624 # without the lock (with a narrower window), but it would no longer go
1626 # without the lock (with a narrower window), but it would no longer go
1625 # undetected during the lock time refresh.
1627 # undetected during the lock time refresh.
1626 #
1628 #
1627 # The new schedule is as follow
1629 # The new schedule is as follow
1628 #
1630 #
1629 # 1) filecache logic detect that `_bookmarks` needs to be computed
1631 # 1) filecache logic detect that `_bookmarks` needs to be computed
1630 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1632 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1631 # 3) We force `changelog` filecache to be tested
1633 # 3) We force `changelog` filecache to be tested
1632 # 4) cachestat for `changelog` are captured (for changelog)
1634 # 4) cachestat for `changelog` are captured (for changelog)
1633 # 5) `_bookmarks` is computed and cached
1635 # 5) `_bookmarks` is computed and cached
1634 #
1636 #
1635 # The step in (3) ensure we have a changelog at least as recent as the
1637 # The step in (3) ensure we have a changelog at least as recent as the
1636 # cache stat computed in (1). As a result at locking time:
1638 # cache stat computed in (1). As a result at locking time:
1637 # * if the changelog did not changed since (1) -> we can reuse the data
1639 # * if the changelog did not changed since (1) -> we can reuse the data
1638 # * otherwise -> the bookmarks get refreshed.
1640 # * otherwise -> the bookmarks get refreshed.
1639 self._refreshchangelog()
1641 self._refreshchangelog()
1640 return bookmarks.bmstore(self)
1642 return bookmarks.bmstore(self)
1641
1643
1642 def _refreshchangelog(self):
1644 def _refreshchangelog(self):
1643 """make sure the in memory changelog match the on-disk one"""
1645 """make sure the in memory changelog match the on-disk one"""
1644 if 'changelog' in vars(self) and self.currenttransaction() is None:
1646 if 'changelog' in vars(self) and self.currenttransaction() is None:
1645 del self.changelog
1647 del self.changelog
1646
1648
1647 @property
1649 @property
1648 def _activebookmark(self):
1650 def _activebookmark(self):
1649 return self._bookmarks.active
1651 return self._bookmarks.active
1650
1652
1651 # _phasesets depend on changelog. what we need is to call
1653 # _phasesets depend on changelog. what we need is to call
1652 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1654 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1653 # can't be easily expressed in filecache mechanism.
1655 # can't be easily expressed in filecache mechanism.
1654 @storecache(b'phaseroots', b'00changelog.i')
1656 @storecache(b'phaseroots', b'00changelog.i')
1655 def _phasecache(self):
1657 def _phasecache(self):
1656 return phases.phasecache(self, self._phasedefaults)
1658 return phases.phasecache(self, self._phasedefaults)
1657
1659
1658 @storecache(b'obsstore')
1660 @storecache(b'obsstore')
1659 def obsstore(self):
1661 def obsstore(self):
1660 return obsolete.makestore(self.ui, self)
1662 return obsolete.makestore(self.ui, self)
1661
1663
1662 @storecache(b'00changelog.i')
1664 @storecache(b'00changelog.i')
1663 def changelog(self):
1665 def changelog(self):
1664 # load dirstate before changelog to avoid race see issue6303
1666 # load dirstate before changelog to avoid race see issue6303
1665 self.dirstate.prefetch_parents()
1667 self.dirstate.prefetch_parents()
1666 return self.store.changelog(
1668 return self.store.changelog(
1667 txnutil.mayhavepending(self.root),
1669 txnutil.mayhavepending(self.root),
1668 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1670 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1669 )
1671 )
1670
1672
1671 @storecache(b'00manifest.i')
1673 @storecache(b'00manifest.i')
1672 def manifestlog(self):
1674 def manifestlog(self):
1673 return self.store.manifestlog(self, self._storenarrowmatch)
1675 return self.store.manifestlog(self, self._storenarrowmatch)
1674
1676
1675 @repofilecache(b'dirstate')
1677 @repofilecache(b'dirstate')
1676 def dirstate(self):
1678 def dirstate(self):
1677 return self._makedirstate()
1679 return self._makedirstate()
1678
1680
1679 def _makedirstate(self):
1681 def _makedirstate(self):
1680 """Extension point for wrapping the dirstate per-repo."""
1682 """Extension point for wrapping the dirstate per-repo."""
1681 sparsematchfn = lambda: sparse.matcher(self)
1683 sparsematchfn = lambda: sparse.matcher(self)
1682
1684
1683 return dirstate.dirstate(
1685 return dirstate.dirstate(
1684 self.vfs,
1686 self.vfs,
1685 self.ui,
1687 self.ui,
1686 self.root,
1688 self.root,
1687 self._dirstatevalidate,
1689 self._dirstatevalidate,
1688 sparsematchfn,
1690 sparsematchfn,
1689 self.nodeconstants,
1691 self.nodeconstants,
1690 )
1692 )
1691
1693
1692 def _dirstatevalidate(self, node):
1694 def _dirstatevalidate(self, node):
1693 try:
1695 try:
1694 self.changelog.rev(node)
1696 self.changelog.rev(node)
1695 return node
1697 return node
1696 except error.LookupError:
1698 except error.LookupError:
1697 if not self._dirstatevalidatewarned:
1699 if not self._dirstatevalidatewarned:
1698 self._dirstatevalidatewarned = True
1700 self._dirstatevalidatewarned = True
1699 self.ui.warn(
1701 self.ui.warn(
1700 _(b"warning: ignoring unknown working parent %s!\n")
1702 _(b"warning: ignoring unknown working parent %s!\n")
1701 % short(node)
1703 % short(node)
1702 )
1704 )
1703 return self.nullid
1705 return self.nullid
1704
1706
1705 @storecache(narrowspec.FILENAME)
1707 @storecache(narrowspec.FILENAME)
1706 def narrowpats(self):
1708 def narrowpats(self):
1707 """matcher patterns for this repository's narrowspec
1709 """matcher patterns for this repository's narrowspec
1708
1710
1709 A tuple of (includes, excludes).
1711 A tuple of (includes, excludes).
1710 """
1712 """
1711 return narrowspec.load(self)
1713 return narrowspec.load(self)
1712
1714
1713 @storecache(narrowspec.FILENAME)
1715 @storecache(narrowspec.FILENAME)
1714 def _storenarrowmatch(self):
1716 def _storenarrowmatch(self):
1715 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1717 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1716 return matchmod.always()
1718 return matchmod.always()
1717 include, exclude = self.narrowpats
1719 include, exclude = self.narrowpats
1718 return narrowspec.match(self.root, include=include, exclude=exclude)
1720 return narrowspec.match(self.root, include=include, exclude=exclude)
1719
1721
1720 @storecache(narrowspec.FILENAME)
1722 @storecache(narrowspec.FILENAME)
1721 def _narrowmatch(self):
1723 def _narrowmatch(self):
1722 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1724 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1723 return matchmod.always()
1725 return matchmod.always()
1724 narrowspec.checkworkingcopynarrowspec(self)
1726 narrowspec.checkworkingcopynarrowspec(self)
1725 include, exclude = self.narrowpats
1727 include, exclude = self.narrowpats
1726 return narrowspec.match(self.root, include=include, exclude=exclude)
1728 return narrowspec.match(self.root, include=include, exclude=exclude)
1727
1729
1728 def narrowmatch(self, match=None, includeexact=False):
1730 def narrowmatch(self, match=None, includeexact=False):
1729 """matcher corresponding the the repo's narrowspec
1731 """matcher corresponding the the repo's narrowspec
1730
1732
1731 If `match` is given, then that will be intersected with the narrow
1733 If `match` is given, then that will be intersected with the narrow
1732 matcher.
1734 matcher.
1733
1735
1734 If `includeexact` is True, then any exact matches from `match` will
1736 If `includeexact` is True, then any exact matches from `match` will
1735 be included even if they're outside the narrowspec.
1737 be included even if they're outside the narrowspec.
1736 """
1738 """
1737 if match:
1739 if match:
1738 if includeexact and not self._narrowmatch.always():
1740 if includeexact and not self._narrowmatch.always():
1739 # do not exclude explicitly-specified paths so that they can
1741 # do not exclude explicitly-specified paths so that they can
1740 # be warned later on
1742 # be warned later on
1741 em = matchmod.exact(match.files())
1743 em = matchmod.exact(match.files())
1742 nm = matchmod.unionmatcher([self._narrowmatch, em])
1744 nm = matchmod.unionmatcher([self._narrowmatch, em])
1743 return matchmod.intersectmatchers(match, nm)
1745 return matchmod.intersectmatchers(match, nm)
1744 return matchmod.intersectmatchers(match, self._narrowmatch)
1746 return matchmod.intersectmatchers(match, self._narrowmatch)
1745 return self._narrowmatch
1747 return self._narrowmatch
1746
1748
1747 def setnarrowpats(self, newincludes, newexcludes):
1749 def setnarrowpats(self, newincludes, newexcludes):
1748 narrowspec.save(self, newincludes, newexcludes)
1750 narrowspec.save(self, newincludes, newexcludes)
1749 self.invalidate(clearfilecache=True)
1751 self.invalidate(clearfilecache=True)
1750
1752
1751 @unfilteredpropertycache
1753 @unfilteredpropertycache
1752 def _quick_access_changeid_null(self):
1754 def _quick_access_changeid_null(self):
1753 return {
1755 return {
1754 b'null': (nullrev, self.nodeconstants.nullid),
1756 b'null': (nullrev, self.nodeconstants.nullid),
1755 nullrev: (nullrev, self.nodeconstants.nullid),
1757 nullrev: (nullrev, self.nodeconstants.nullid),
1756 self.nullid: (nullrev, self.nullid),
1758 self.nullid: (nullrev, self.nullid),
1757 }
1759 }
1758
1760
1759 @unfilteredpropertycache
1761 @unfilteredpropertycache
1760 def _quick_access_changeid_wc(self):
1762 def _quick_access_changeid_wc(self):
1761 # also fast path access to the working copy parents
1763 # also fast path access to the working copy parents
1762 # however, only do it for filter that ensure wc is visible.
1764 # however, only do it for filter that ensure wc is visible.
1763 quick = self._quick_access_changeid_null.copy()
1765 quick = self._quick_access_changeid_null.copy()
1764 cl = self.unfiltered().changelog
1766 cl = self.unfiltered().changelog
1765 for node in self.dirstate.parents():
1767 for node in self.dirstate.parents():
1766 if node == self.nullid:
1768 if node == self.nullid:
1767 continue
1769 continue
1768 rev = cl.index.get_rev(node)
1770 rev = cl.index.get_rev(node)
1769 if rev is None:
1771 if rev is None:
1770 # unknown working copy parent case:
1772 # unknown working copy parent case:
1771 #
1773 #
1772 # skip the fast path and let higher code deal with it
1774 # skip the fast path and let higher code deal with it
1773 continue
1775 continue
1774 pair = (rev, node)
1776 pair = (rev, node)
1775 quick[rev] = pair
1777 quick[rev] = pair
1776 quick[node] = pair
1778 quick[node] = pair
1777 # also add the parents of the parents
1779 # also add the parents of the parents
1778 for r in cl.parentrevs(rev):
1780 for r in cl.parentrevs(rev):
1779 if r == nullrev:
1781 if r == nullrev:
1780 continue
1782 continue
1781 n = cl.node(r)
1783 n = cl.node(r)
1782 pair = (r, n)
1784 pair = (r, n)
1783 quick[r] = pair
1785 quick[r] = pair
1784 quick[n] = pair
1786 quick[n] = pair
1785 p1node = self.dirstate.p1()
1787 p1node = self.dirstate.p1()
1786 if p1node != self.nullid:
1788 if p1node != self.nullid:
1787 quick[b'.'] = quick[p1node]
1789 quick[b'.'] = quick[p1node]
1788 return quick
1790 return quick
1789
1791
1790 @unfilteredmethod
1792 @unfilteredmethod
1791 def _quick_access_changeid_invalidate(self):
1793 def _quick_access_changeid_invalidate(self):
1792 if '_quick_access_changeid_wc' in vars(self):
1794 if '_quick_access_changeid_wc' in vars(self):
1793 del self.__dict__['_quick_access_changeid_wc']
1795 del self.__dict__['_quick_access_changeid_wc']
1794
1796
1795 @property
1797 @property
1796 def _quick_access_changeid(self):
1798 def _quick_access_changeid(self):
1797 """an helper dictionnary for __getitem__ calls
1799 """an helper dictionnary for __getitem__ calls
1798
1800
1799 This contains a list of symbol we can recognise right away without
1801 This contains a list of symbol we can recognise right away without
1800 further processing.
1802 further processing.
1801 """
1803 """
1802 if self.filtername in repoview.filter_has_wc:
1804 if self.filtername in repoview.filter_has_wc:
1803 return self._quick_access_changeid_wc
1805 return self._quick_access_changeid_wc
1804 return self._quick_access_changeid_null
1806 return self._quick_access_changeid_null
1805
1807
1806 def __getitem__(self, changeid):
1808 def __getitem__(self, changeid):
1807 # dealing with special cases
1809 # dealing with special cases
1808 if changeid is None:
1810 if changeid is None:
1809 return context.workingctx(self)
1811 return context.workingctx(self)
1810 if isinstance(changeid, context.basectx):
1812 if isinstance(changeid, context.basectx):
1811 return changeid
1813 return changeid
1812
1814
1813 # dealing with multiple revisions
1815 # dealing with multiple revisions
1814 if isinstance(changeid, slice):
1816 if isinstance(changeid, slice):
1815 # wdirrev isn't contiguous so the slice shouldn't include it
1817 # wdirrev isn't contiguous so the slice shouldn't include it
1816 return [
1818 return [
1817 self[i]
1819 self[i]
1818 for i in pycompat.xrange(*changeid.indices(len(self)))
1820 for i in pycompat.xrange(*changeid.indices(len(self)))
1819 if i not in self.changelog.filteredrevs
1821 if i not in self.changelog.filteredrevs
1820 ]
1822 ]
1821
1823
1822 # dealing with some special values
1824 # dealing with some special values
1823 quick_access = self._quick_access_changeid.get(changeid)
1825 quick_access = self._quick_access_changeid.get(changeid)
1824 if quick_access is not None:
1826 if quick_access is not None:
1825 rev, node = quick_access
1827 rev, node = quick_access
1826 return context.changectx(self, rev, node, maybe_filtered=False)
1828 return context.changectx(self, rev, node, maybe_filtered=False)
1827 if changeid == b'tip':
1829 if changeid == b'tip':
1828 node = self.changelog.tip()
1830 node = self.changelog.tip()
1829 rev = self.changelog.rev(node)
1831 rev = self.changelog.rev(node)
1830 return context.changectx(self, rev, node)
1832 return context.changectx(self, rev, node)
1831
1833
1832 # dealing with arbitrary values
1834 # dealing with arbitrary values
1833 try:
1835 try:
1834 if isinstance(changeid, int):
1836 if isinstance(changeid, int):
1835 node = self.changelog.node(changeid)
1837 node = self.changelog.node(changeid)
1836 rev = changeid
1838 rev = changeid
1837 elif changeid == b'.':
1839 elif changeid == b'.':
1838 # this is a hack to delay/avoid loading obsmarkers
1840 # this is a hack to delay/avoid loading obsmarkers
1839 # when we know that '.' won't be hidden
1841 # when we know that '.' won't be hidden
1840 node = self.dirstate.p1()
1842 node = self.dirstate.p1()
1841 rev = self.unfiltered().changelog.rev(node)
1843 rev = self.unfiltered().changelog.rev(node)
1842 elif len(changeid) == self.nodeconstants.nodelen:
1844 elif len(changeid) == self.nodeconstants.nodelen:
1843 try:
1845 try:
1844 node = changeid
1846 node = changeid
1845 rev = self.changelog.rev(changeid)
1847 rev = self.changelog.rev(changeid)
1846 except error.FilteredLookupError:
1848 except error.FilteredLookupError:
1847 changeid = hex(changeid) # for the error message
1849 changeid = hex(changeid) # for the error message
1848 raise
1850 raise
1849 except LookupError:
1851 except LookupError:
1850 # check if it might have come from damaged dirstate
1852 # check if it might have come from damaged dirstate
1851 #
1853 #
1852 # XXX we could avoid the unfiltered if we had a recognizable
1854 # XXX we could avoid the unfiltered if we had a recognizable
1853 # exception for filtered changeset access
1855 # exception for filtered changeset access
1854 if (
1856 if (
1855 self.local()
1857 self.local()
1856 and changeid in self.unfiltered().dirstate.parents()
1858 and changeid in self.unfiltered().dirstate.parents()
1857 ):
1859 ):
1858 msg = _(b"working directory has unknown parent '%s'!")
1860 msg = _(b"working directory has unknown parent '%s'!")
1859 raise error.Abort(msg % short(changeid))
1861 raise error.Abort(msg % short(changeid))
1860 changeid = hex(changeid) # for the error message
1862 changeid = hex(changeid) # for the error message
1861 raise
1863 raise
1862
1864
1863 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1865 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1864 node = bin(changeid)
1866 node = bin(changeid)
1865 rev = self.changelog.rev(node)
1867 rev = self.changelog.rev(node)
1866 else:
1868 else:
1867 raise error.ProgrammingError(
1869 raise error.ProgrammingError(
1868 b"unsupported changeid '%s' of type %s"
1870 b"unsupported changeid '%s' of type %s"
1869 % (changeid, pycompat.bytestr(type(changeid)))
1871 % (changeid, pycompat.bytestr(type(changeid)))
1870 )
1872 )
1871
1873
1872 return context.changectx(self, rev, node)
1874 return context.changectx(self, rev, node)
1873
1875
1874 except (error.FilteredIndexError, error.FilteredLookupError):
1876 except (error.FilteredIndexError, error.FilteredLookupError):
1875 raise error.FilteredRepoLookupError(
1877 raise error.FilteredRepoLookupError(
1876 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1878 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1877 )
1879 )
1878 except (IndexError, LookupError):
1880 except (IndexError, LookupError):
1879 raise error.RepoLookupError(
1881 raise error.RepoLookupError(
1880 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1882 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1881 )
1883 )
1882 except error.WdirUnsupported:
1884 except error.WdirUnsupported:
1883 return context.workingctx(self)
1885 return context.workingctx(self)
1884
1886
1885 def __contains__(self, changeid):
1887 def __contains__(self, changeid):
1886 """True if the given changeid exists"""
1888 """True if the given changeid exists"""
1887 try:
1889 try:
1888 self[changeid]
1890 self[changeid]
1889 return True
1891 return True
1890 except error.RepoLookupError:
1892 except error.RepoLookupError:
1891 return False
1893 return False
1892
1894
1893 def __nonzero__(self):
1895 def __nonzero__(self):
1894 return True
1896 return True
1895
1897
1896 __bool__ = __nonzero__
1898 __bool__ = __nonzero__
1897
1899
1898 def __len__(self):
1900 def __len__(self):
1899 # no need to pay the cost of repoview.changelog
1901 # no need to pay the cost of repoview.changelog
1900 unfi = self.unfiltered()
1902 unfi = self.unfiltered()
1901 return len(unfi.changelog)
1903 return len(unfi.changelog)
1902
1904
1903 def __iter__(self):
1905 def __iter__(self):
1904 return iter(self.changelog)
1906 return iter(self.changelog)
1905
1907
1906 def revs(self, expr, *args):
1908 def revs(self, expr, *args):
1907 """Find revisions matching a revset.
1909 """Find revisions matching a revset.
1908
1910
1909 The revset is specified as a string ``expr`` that may contain
1911 The revset is specified as a string ``expr`` that may contain
1910 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1912 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1911
1913
1912 Revset aliases from the configuration are not expanded. To expand
1914 Revset aliases from the configuration are not expanded. To expand
1913 user aliases, consider calling ``scmutil.revrange()`` or
1915 user aliases, consider calling ``scmutil.revrange()`` or
1914 ``repo.anyrevs([expr], user=True)``.
1916 ``repo.anyrevs([expr], user=True)``.
1915
1917
1916 Returns a smartset.abstractsmartset, which is a list-like interface
1918 Returns a smartset.abstractsmartset, which is a list-like interface
1917 that contains integer revisions.
1919 that contains integer revisions.
1918 """
1920 """
1919 tree = revsetlang.spectree(expr, *args)
1921 tree = revsetlang.spectree(expr, *args)
1920 return revset.makematcher(tree)(self)
1922 return revset.makematcher(tree)(self)
1921
1923
1922 def set(self, expr, *args):
1924 def set(self, expr, *args):
1923 """Find revisions matching a revset and emit changectx instances.
1925 """Find revisions matching a revset and emit changectx instances.
1924
1926
1925 This is a convenience wrapper around ``revs()`` that iterates the
1927 This is a convenience wrapper around ``revs()`` that iterates the
1926 result and is a generator of changectx instances.
1928 result and is a generator of changectx instances.
1927
1929
1928 Revset aliases from the configuration are not expanded. To expand
1930 Revset aliases from the configuration are not expanded. To expand
1929 user aliases, consider calling ``scmutil.revrange()``.
1931 user aliases, consider calling ``scmutil.revrange()``.
1930 """
1932 """
1931 for r in self.revs(expr, *args):
1933 for r in self.revs(expr, *args):
1932 yield self[r]
1934 yield self[r]
1933
1935
1934 def anyrevs(self, specs, user=False, localalias=None):
1936 def anyrevs(self, specs, user=False, localalias=None):
1935 """Find revisions matching one of the given revsets.
1937 """Find revisions matching one of the given revsets.
1936
1938
1937 Revset aliases from the configuration are not expanded by default. To
1939 Revset aliases from the configuration are not expanded by default. To
1938 expand user aliases, specify ``user=True``. To provide some local
1940 expand user aliases, specify ``user=True``. To provide some local
1939 definitions overriding user aliases, set ``localalias`` to
1941 definitions overriding user aliases, set ``localalias`` to
1940 ``{name: definitionstring}``.
1942 ``{name: definitionstring}``.
1941 """
1943 """
1942 if specs == [b'null']:
1944 if specs == [b'null']:
1943 return revset.baseset([nullrev])
1945 return revset.baseset([nullrev])
1944 if specs == [b'.']:
1946 if specs == [b'.']:
1945 quick_data = self._quick_access_changeid.get(b'.')
1947 quick_data = self._quick_access_changeid.get(b'.')
1946 if quick_data is not None:
1948 if quick_data is not None:
1947 return revset.baseset([quick_data[0]])
1949 return revset.baseset([quick_data[0]])
1948 if user:
1950 if user:
1949 m = revset.matchany(
1951 m = revset.matchany(
1950 self.ui,
1952 self.ui,
1951 specs,
1953 specs,
1952 lookup=revset.lookupfn(self),
1954 lookup=revset.lookupfn(self),
1953 localalias=localalias,
1955 localalias=localalias,
1954 )
1956 )
1955 else:
1957 else:
1956 m = revset.matchany(None, specs, localalias=localalias)
1958 m = revset.matchany(None, specs, localalias=localalias)
1957 return m(self)
1959 return m(self)
1958
1960
1959 def url(self):
1961 def url(self):
1960 return b'file:' + self.root
1962 return b'file:' + self.root
1961
1963
1962 def hook(self, name, throw=False, **args):
1964 def hook(self, name, throw=False, **args):
1963 """Call a hook, passing this repo instance.
1965 """Call a hook, passing this repo instance.
1964
1966
1965 This a convenience method to aid invoking hooks. Extensions likely
1967 This a convenience method to aid invoking hooks. Extensions likely
1966 won't call this unless they have registered a custom hook or are
1968 won't call this unless they have registered a custom hook or are
1967 replacing code that is expected to call a hook.
1969 replacing code that is expected to call a hook.
1968 """
1970 """
1969 return hook.hook(self.ui, self, name, throw, **args)
1971 return hook.hook(self.ui, self, name, throw, **args)
1970
1972
1971 @filteredpropertycache
1973 @filteredpropertycache
1972 def _tagscache(self):
1974 def _tagscache(self):
1973 """Returns a tagscache object that contains various tags related
1975 """Returns a tagscache object that contains various tags related
1974 caches."""
1976 caches."""
1975
1977
1976 # This simplifies its cache management by having one decorated
1978 # This simplifies its cache management by having one decorated
1977 # function (this one) and the rest simply fetch things from it.
1979 # function (this one) and the rest simply fetch things from it.
1978 class tagscache(object):
1980 class tagscache(object):
1979 def __init__(self):
1981 def __init__(self):
1980 # These two define the set of tags for this repository. tags
1982 # These two define the set of tags for this repository. tags
1981 # maps tag name to node; tagtypes maps tag name to 'global' or
1983 # maps tag name to node; tagtypes maps tag name to 'global' or
1982 # 'local'. (Global tags are defined by .hgtags across all
1984 # 'local'. (Global tags are defined by .hgtags across all
1983 # heads, and local tags are defined in .hg/localtags.)
1985 # heads, and local tags are defined in .hg/localtags.)
1984 # They constitute the in-memory cache of tags.
1986 # They constitute the in-memory cache of tags.
1985 self.tags = self.tagtypes = None
1987 self.tags = self.tagtypes = None
1986
1988
1987 self.nodetagscache = self.tagslist = None
1989 self.nodetagscache = self.tagslist = None
1988
1990
1989 cache = tagscache()
1991 cache = tagscache()
1990 cache.tags, cache.tagtypes = self._findtags()
1992 cache.tags, cache.tagtypes = self._findtags()
1991
1993
1992 return cache
1994 return cache
1993
1995
1994 def tags(self):
1996 def tags(self):
1995 '''return a mapping of tag to node'''
1997 '''return a mapping of tag to node'''
1996 t = {}
1998 t = {}
1997 if self.changelog.filteredrevs:
1999 if self.changelog.filteredrevs:
1998 tags, tt = self._findtags()
2000 tags, tt = self._findtags()
1999 else:
2001 else:
2000 tags = self._tagscache.tags
2002 tags = self._tagscache.tags
2001 rev = self.changelog.rev
2003 rev = self.changelog.rev
2002 for k, v in pycompat.iteritems(tags):
2004 for k, v in pycompat.iteritems(tags):
2003 try:
2005 try:
2004 # ignore tags to unknown nodes
2006 # ignore tags to unknown nodes
2005 rev(v)
2007 rev(v)
2006 t[k] = v
2008 t[k] = v
2007 except (error.LookupError, ValueError):
2009 except (error.LookupError, ValueError):
2008 pass
2010 pass
2009 return t
2011 return t
2010
2012
2011 def _findtags(self):
2013 def _findtags(self):
2012 """Do the hard work of finding tags. Return a pair of dicts
2014 """Do the hard work of finding tags. Return a pair of dicts
2013 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2015 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2014 maps tag name to a string like \'global\' or \'local\'.
2016 maps tag name to a string like \'global\' or \'local\'.
2015 Subclasses or extensions are free to add their own tags, but
2017 Subclasses or extensions are free to add their own tags, but
2016 should be aware that the returned dicts will be retained for the
2018 should be aware that the returned dicts will be retained for the
2017 duration of the localrepo object."""
2019 duration of the localrepo object."""
2018
2020
2019 # XXX what tagtype should subclasses/extensions use? Currently
2021 # XXX what tagtype should subclasses/extensions use? Currently
2020 # mq and bookmarks add tags, but do not set the tagtype at all.
2022 # mq and bookmarks add tags, but do not set the tagtype at all.
2021 # Should each extension invent its own tag type? Should there
2023 # Should each extension invent its own tag type? Should there
2022 # be one tagtype for all such "virtual" tags? Or is the status
2024 # be one tagtype for all such "virtual" tags? Or is the status
2023 # quo fine?
2025 # quo fine?
2024
2026
2025 # map tag name to (node, hist)
2027 # map tag name to (node, hist)
2026 alltags = tagsmod.findglobaltags(self.ui, self)
2028 alltags = tagsmod.findglobaltags(self.ui, self)
2027 # map tag name to tag type
2029 # map tag name to tag type
2028 tagtypes = {tag: b'global' for tag in alltags}
2030 tagtypes = {tag: b'global' for tag in alltags}
2029
2031
2030 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2032 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2031
2033
2032 # Build the return dicts. Have to re-encode tag names because
2034 # Build the return dicts. Have to re-encode tag names because
2033 # the tags module always uses UTF-8 (in order not to lose info
2035 # the tags module always uses UTF-8 (in order not to lose info
2034 # writing to the cache), but the rest of Mercurial wants them in
2036 # writing to the cache), but the rest of Mercurial wants them in
2035 # local encoding.
2037 # local encoding.
2036 tags = {}
2038 tags = {}
2037 for (name, (node, hist)) in pycompat.iteritems(alltags):
2039 for (name, (node, hist)) in pycompat.iteritems(alltags):
2038 if node != self.nullid:
2040 if node != self.nullid:
2039 tags[encoding.tolocal(name)] = node
2041 tags[encoding.tolocal(name)] = node
2040 tags[b'tip'] = self.changelog.tip()
2042 tags[b'tip'] = self.changelog.tip()
2041 tagtypes = {
2043 tagtypes = {
2042 encoding.tolocal(name): value
2044 encoding.tolocal(name): value
2043 for (name, value) in pycompat.iteritems(tagtypes)
2045 for (name, value) in pycompat.iteritems(tagtypes)
2044 }
2046 }
2045 return (tags, tagtypes)
2047 return (tags, tagtypes)
2046
2048
2047 def tagtype(self, tagname):
2049 def tagtype(self, tagname):
2048 """
2050 """
2049 return the type of the given tag. result can be:
2051 return the type of the given tag. result can be:
2050
2052
2051 'local' : a local tag
2053 'local' : a local tag
2052 'global' : a global tag
2054 'global' : a global tag
2053 None : tag does not exist
2055 None : tag does not exist
2054 """
2056 """
2055
2057
2056 return self._tagscache.tagtypes.get(tagname)
2058 return self._tagscache.tagtypes.get(tagname)
2057
2059
2058 def tagslist(self):
2060 def tagslist(self):
2059 '''return a list of tags ordered by revision'''
2061 '''return a list of tags ordered by revision'''
2060 if not self._tagscache.tagslist:
2062 if not self._tagscache.tagslist:
2061 l = []
2063 l = []
2062 for t, n in pycompat.iteritems(self.tags()):
2064 for t, n in pycompat.iteritems(self.tags()):
2063 l.append((self.changelog.rev(n), t, n))
2065 l.append((self.changelog.rev(n), t, n))
2064 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2066 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2065
2067
2066 return self._tagscache.tagslist
2068 return self._tagscache.tagslist
2067
2069
2068 def nodetags(self, node):
2070 def nodetags(self, node):
2069 '''return the tags associated with a node'''
2071 '''return the tags associated with a node'''
2070 if not self._tagscache.nodetagscache:
2072 if not self._tagscache.nodetagscache:
2071 nodetagscache = {}
2073 nodetagscache = {}
2072 for t, n in pycompat.iteritems(self._tagscache.tags):
2074 for t, n in pycompat.iteritems(self._tagscache.tags):
2073 nodetagscache.setdefault(n, []).append(t)
2075 nodetagscache.setdefault(n, []).append(t)
2074 for tags in pycompat.itervalues(nodetagscache):
2076 for tags in pycompat.itervalues(nodetagscache):
2075 tags.sort()
2077 tags.sort()
2076 self._tagscache.nodetagscache = nodetagscache
2078 self._tagscache.nodetagscache = nodetagscache
2077 return self._tagscache.nodetagscache.get(node, [])
2079 return self._tagscache.nodetagscache.get(node, [])
2078
2080
2079 def nodebookmarks(self, node):
2081 def nodebookmarks(self, node):
2080 """return the list of bookmarks pointing to the specified node"""
2082 """return the list of bookmarks pointing to the specified node"""
2081 return self._bookmarks.names(node)
2083 return self._bookmarks.names(node)
2082
2084
2083 def branchmap(self):
2085 def branchmap(self):
2084 """returns a dictionary {branch: [branchheads]} with branchheads
2086 """returns a dictionary {branch: [branchheads]} with branchheads
2085 ordered by increasing revision number"""
2087 ordered by increasing revision number"""
2086 return self._branchcaches[self]
2088 return self._branchcaches[self]
2087
2089
2088 @unfilteredmethod
2090 @unfilteredmethod
2089 def revbranchcache(self):
2091 def revbranchcache(self):
2090 if not self._revbranchcache:
2092 if not self._revbranchcache:
2091 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2093 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2092 return self._revbranchcache
2094 return self._revbranchcache
2093
2095
2094 def register_changeset(self, rev, changelogrevision):
2096 def register_changeset(self, rev, changelogrevision):
2095 self.revbranchcache().setdata(rev, changelogrevision)
2097 self.revbranchcache().setdata(rev, changelogrevision)
2096
2098
2097 def branchtip(self, branch, ignoremissing=False):
2099 def branchtip(self, branch, ignoremissing=False):
2098 """return the tip node for a given branch
2100 """return the tip node for a given branch
2099
2101
2100 If ignoremissing is True, then this method will not raise an error.
2102 If ignoremissing is True, then this method will not raise an error.
2101 This is helpful for callers that only expect None for a missing branch
2103 This is helpful for callers that only expect None for a missing branch
2102 (e.g. namespace).
2104 (e.g. namespace).
2103
2105
2104 """
2106 """
2105 try:
2107 try:
2106 return self.branchmap().branchtip(branch)
2108 return self.branchmap().branchtip(branch)
2107 except KeyError:
2109 except KeyError:
2108 if not ignoremissing:
2110 if not ignoremissing:
2109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2111 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2110 else:
2112 else:
2111 pass
2113 pass
2112
2114
2113 def lookup(self, key):
2115 def lookup(self, key):
2114 node = scmutil.revsymbol(self, key).node()
2116 node = scmutil.revsymbol(self, key).node()
2115 if node is None:
2117 if node is None:
2116 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2118 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2117 return node
2119 return node
2118
2120
2119 def lookupbranch(self, key):
2121 def lookupbranch(self, key):
2120 if self.branchmap().hasbranch(key):
2122 if self.branchmap().hasbranch(key):
2121 return key
2123 return key
2122
2124
2123 return scmutil.revsymbol(self, key).branch()
2125 return scmutil.revsymbol(self, key).branch()
2124
2126
2125 def known(self, nodes):
2127 def known(self, nodes):
2126 cl = self.changelog
2128 cl = self.changelog
2127 get_rev = cl.index.get_rev
2129 get_rev = cl.index.get_rev
2128 filtered = cl.filteredrevs
2130 filtered = cl.filteredrevs
2129 result = []
2131 result = []
2130 for n in nodes:
2132 for n in nodes:
2131 r = get_rev(n)
2133 r = get_rev(n)
2132 resp = not (r is None or r in filtered)
2134 resp = not (r is None or r in filtered)
2133 result.append(resp)
2135 result.append(resp)
2134 return result
2136 return result
2135
2137
2136 def local(self):
2138 def local(self):
2137 return self
2139 return self
2138
2140
2139 def publishing(self):
2141 def publishing(self):
2140 # it's safe (and desirable) to trust the publish flag unconditionally
2142 # it's safe (and desirable) to trust the publish flag unconditionally
2141 # so that we don't finalize changes shared between users via ssh or nfs
2143 # so that we don't finalize changes shared between users via ssh or nfs
2142 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2144 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2143
2145
2144 def cancopy(self):
2146 def cancopy(self):
2145 # so statichttprepo's override of local() works
2147 # so statichttprepo's override of local() works
2146 if not self.local():
2148 if not self.local():
2147 return False
2149 return False
2148 if not self.publishing():
2150 if not self.publishing():
2149 return True
2151 return True
2150 # if publishing we can't copy if there is filtered content
2152 # if publishing we can't copy if there is filtered content
2151 return not self.filtered(b'visible').changelog.filteredrevs
2153 return not self.filtered(b'visible').changelog.filteredrevs
2152
2154
2153 def shared(self):
2155 def shared(self):
2154 '''the type of shared repository (None if not shared)'''
2156 '''the type of shared repository (None if not shared)'''
2155 if self.sharedpath != self.path:
2157 if self.sharedpath != self.path:
2156 return b'store'
2158 return b'store'
2157 return None
2159 return None
2158
2160
2159 def wjoin(self, f, *insidef):
2161 def wjoin(self, f, *insidef):
2160 return self.vfs.reljoin(self.root, f, *insidef)
2162 return self.vfs.reljoin(self.root, f, *insidef)
2161
2163
2162 def setparents(self, p1, p2=None):
2164 def setparents(self, p1, p2=None):
2163 if p2 is None:
2165 if p2 is None:
2164 p2 = self.nullid
2166 p2 = self.nullid
2165 self[None].setparents(p1, p2)
2167 self[None].setparents(p1, p2)
2166 self._quick_access_changeid_invalidate()
2168 self._quick_access_changeid_invalidate()
2167
2169
2168 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2170 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2169 """changeid must be a changeset revision, if specified.
2171 """changeid must be a changeset revision, if specified.
2170 fileid can be a file revision or node."""
2172 fileid can be a file revision or node."""
2171 return context.filectx(
2173 return context.filectx(
2172 self, path, changeid, fileid, changectx=changectx
2174 self, path, changeid, fileid, changectx=changectx
2173 )
2175 )
2174
2176
2175 def getcwd(self):
2177 def getcwd(self):
2176 return self.dirstate.getcwd()
2178 return self.dirstate.getcwd()
2177
2179
2178 def pathto(self, f, cwd=None):
2180 def pathto(self, f, cwd=None):
2179 return self.dirstate.pathto(f, cwd)
2181 return self.dirstate.pathto(f, cwd)
2180
2182
2181 def _loadfilter(self, filter):
2183 def _loadfilter(self, filter):
2182 if filter not in self._filterpats:
2184 if filter not in self._filterpats:
2183 l = []
2185 l = []
2184 for pat, cmd in self.ui.configitems(filter):
2186 for pat, cmd in self.ui.configitems(filter):
2185 if cmd == b'!':
2187 if cmd == b'!':
2186 continue
2188 continue
2187 mf = matchmod.match(self.root, b'', [pat])
2189 mf = matchmod.match(self.root, b'', [pat])
2188 fn = None
2190 fn = None
2189 params = cmd
2191 params = cmd
2190 for name, filterfn in pycompat.iteritems(self._datafilters):
2192 for name, filterfn in pycompat.iteritems(self._datafilters):
2191 if cmd.startswith(name):
2193 if cmd.startswith(name):
2192 fn = filterfn
2194 fn = filterfn
2193 params = cmd[len(name) :].lstrip()
2195 params = cmd[len(name) :].lstrip()
2194 break
2196 break
2195 if not fn:
2197 if not fn:
2196 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2198 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2197 fn.__name__ = 'commandfilter'
2199 fn.__name__ = 'commandfilter'
2198 # Wrap old filters not supporting keyword arguments
2200 # Wrap old filters not supporting keyword arguments
2199 if not pycompat.getargspec(fn)[2]:
2201 if not pycompat.getargspec(fn)[2]:
2200 oldfn = fn
2202 oldfn = fn
2201 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2203 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2202 fn.__name__ = 'compat-' + oldfn.__name__
2204 fn.__name__ = 'compat-' + oldfn.__name__
2203 l.append((mf, fn, params))
2205 l.append((mf, fn, params))
2204 self._filterpats[filter] = l
2206 self._filterpats[filter] = l
2205 return self._filterpats[filter]
2207 return self._filterpats[filter]
2206
2208
2207 def _filter(self, filterpats, filename, data):
2209 def _filter(self, filterpats, filename, data):
2208 for mf, fn, cmd in filterpats:
2210 for mf, fn, cmd in filterpats:
2209 if mf(filename):
2211 if mf(filename):
2210 self.ui.debug(
2212 self.ui.debug(
2211 b"filtering %s through %s\n"
2213 b"filtering %s through %s\n"
2212 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2214 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2213 )
2215 )
2214 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2216 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2215 break
2217 break
2216
2218
2217 return data
2219 return data
2218
2220
2219 @unfilteredpropertycache
2221 @unfilteredpropertycache
2220 def _encodefilterpats(self):
2222 def _encodefilterpats(self):
2221 return self._loadfilter(b'encode')
2223 return self._loadfilter(b'encode')
2222
2224
2223 @unfilteredpropertycache
2225 @unfilteredpropertycache
2224 def _decodefilterpats(self):
2226 def _decodefilterpats(self):
2225 return self._loadfilter(b'decode')
2227 return self._loadfilter(b'decode')
2226
2228
2227 def adddatafilter(self, name, filter):
2229 def adddatafilter(self, name, filter):
2228 self._datafilters[name] = filter
2230 self._datafilters[name] = filter
2229
2231
2230 def wread(self, filename):
2232 def wread(self, filename):
2231 if self.wvfs.islink(filename):
2233 if self.wvfs.islink(filename):
2232 data = self.wvfs.readlink(filename)
2234 data = self.wvfs.readlink(filename)
2233 else:
2235 else:
2234 data = self.wvfs.read(filename)
2236 data = self.wvfs.read(filename)
2235 return self._filter(self._encodefilterpats, filename, data)
2237 return self._filter(self._encodefilterpats, filename, data)
2236
2238
2237 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2239 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2238 """write ``data`` into ``filename`` in the working directory
2240 """write ``data`` into ``filename`` in the working directory
2239
2241
2240 This returns length of written (maybe decoded) data.
2242 This returns length of written (maybe decoded) data.
2241 """
2243 """
2242 data = self._filter(self._decodefilterpats, filename, data)
2244 data = self._filter(self._decodefilterpats, filename, data)
2243 if b'l' in flags:
2245 if b'l' in flags:
2244 self.wvfs.symlink(data, filename)
2246 self.wvfs.symlink(data, filename)
2245 else:
2247 else:
2246 self.wvfs.write(
2248 self.wvfs.write(
2247 filename, data, backgroundclose=backgroundclose, **kwargs
2249 filename, data, backgroundclose=backgroundclose, **kwargs
2248 )
2250 )
2249 if b'x' in flags:
2251 if b'x' in flags:
2250 self.wvfs.setflags(filename, False, True)
2252 self.wvfs.setflags(filename, False, True)
2251 else:
2253 else:
2252 self.wvfs.setflags(filename, False, False)
2254 self.wvfs.setflags(filename, False, False)
2253 return len(data)
2255 return len(data)
2254
2256
2255 def wwritedata(self, filename, data):
2257 def wwritedata(self, filename, data):
2256 return self._filter(self._decodefilterpats, filename, data)
2258 return self._filter(self._decodefilterpats, filename, data)
2257
2259
2258 def currenttransaction(self):
2260 def currenttransaction(self):
2259 """return the current transaction or None if non exists"""
2261 """return the current transaction or None if non exists"""
2260 if self._transref:
2262 if self._transref:
2261 tr = self._transref()
2263 tr = self._transref()
2262 else:
2264 else:
2263 tr = None
2265 tr = None
2264
2266
2265 if tr and tr.running():
2267 if tr and tr.running():
2266 return tr
2268 return tr
2267 return None
2269 return None
2268
2270
2269 def transaction(self, desc, report=None):
2271 def transaction(self, desc, report=None):
2270 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2272 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2271 b'devel', b'check-locks'
2273 b'devel', b'check-locks'
2272 ):
2274 ):
2273 if self._currentlock(self._lockref) is None:
2275 if self._currentlock(self._lockref) is None:
2274 raise error.ProgrammingError(b'transaction requires locking')
2276 raise error.ProgrammingError(b'transaction requires locking')
2275 tr = self.currenttransaction()
2277 tr = self.currenttransaction()
2276 if tr is not None:
2278 if tr is not None:
2277 return tr.nest(name=desc)
2279 return tr.nest(name=desc)
2278
2280
2279 # abort here if the journal already exists
2281 # abort here if the journal already exists
2280 if self.svfs.exists(b"journal"):
2282 if self.svfs.exists(b"journal"):
2281 raise error.RepoError(
2283 raise error.RepoError(
2282 _(b"abandoned transaction found"),
2284 _(b"abandoned transaction found"),
2283 hint=_(b"run 'hg recover' to clean up transaction"),
2285 hint=_(b"run 'hg recover' to clean up transaction"),
2284 )
2286 )
2285
2287
2286 idbase = b"%.40f#%f" % (random.random(), time.time())
2288 idbase = b"%.40f#%f" % (random.random(), time.time())
2287 ha = hex(hashutil.sha1(idbase).digest())
2289 ha = hex(hashutil.sha1(idbase).digest())
2288 txnid = b'TXN:' + ha
2290 txnid = b'TXN:' + ha
2289 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2291 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2290
2292
2291 self._writejournal(desc)
2293 self._writejournal(desc)
2292 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2294 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2293 if report:
2295 if report:
2294 rp = report
2296 rp = report
2295 else:
2297 else:
2296 rp = self.ui.warn
2298 rp = self.ui.warn
2297 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2299 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2298 # we must avoid cyclic reference between repo and transaction.
2300 # we must avoid cyclic reference between repo and transaction.
2299 reporef = weakref.ref(self)
2301 reporef = weakref.ref(self)
2300 # Code to track tag movement
2302 # Code to track tag movement
2301 #
2303 #
2302 # Since tags are all handled as file content, it is actually quite hard
2304 # Since tags are all handled as file content, it is actually quite hard
2303 # to track these movement from a code perspective. So we fallback to a
2305 # to track these movement from a code perspective. So we fallback to a
2304 # tracking at the repository level. One could envision to track changes
2306 # tracking at the repository level. One could envision to track changes
2305 # to the '.hgtags' file through changegroup apply but that fails to
2307 # to the '.hgtags' file through changegroup apply but that fails to
2306 # cope with case where transaction expose new heads without changegroup
2308 # cope with case where transaction expose new heads without changegroup
2307 # being involved (eg: phase movement).
2309 # being involved (eg: phase movement).
2308 #
2310 #
2309 # For now, We gate the feature behind a flag since this likely comes
2311 # For now, We gate the feature behind a flag since this likely comes
2310 # with performance impacts. The current code run more often than needed
2312 # with performance impacts. The current code run more often than needed
2311 # and do not use caches as much as it could. The current focus is on
2313 # and do not use caches as much as it could. The current focus is on
2312 # the behavior of the feature so we disable it by default. The flag
2314 # the behavior of the feature so we disable it by default. The flag
2313 # will be removed when we are happy with the performance impact.
2315 # will be removed when we are happy with the performance impact.
2314 #
2316 #
2315 # Once this feature is no longer experimental move the following
2317 # Once this feature is no longer experimental move the following
2316 # documentation to the appropriate help section:
2318 # documentation to the appropriate help section:
2317 #
2319 #
2318 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2320 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2319 # tags (new or changed or deleted tags). In addition the details of
2321 # tags (new or changed or deleted tags). In addition the details of
2320 # these changes are made available in a file at:
2322 # these changes are made available in a file at:
2321 # ``REPOROOT/.hg/changes/tags.changes``.
2323 # ``REPOROOT/.hg/changes/tags.changes``.
2322 # Make sure you check for HG_TAG_MOVED before reading that file as it
2324 # Make sure you check for HG_TAG_MOVED before reading that file as it
2323 # might exist from a previous transaction even if no tag were touched
2325 # might exist from a previous transaction even if no tag were touched
2324 # in this one. Changes are recorded in a line base format::
2326 # in this one. Changes are recorded in a line base format::
2325 #
2327 #
2326 # <action> <hex-node> <tag-name>\n
2328 # <action> <hex-node> <tag-name>\n
2327 #
2329 #
2328 # Actions are defined as follow:
2330 # Actions are defined as follow:
2329 # "-R": tag is removed,
2331 # "-R": tag is removed,
2330 # "+A": tag is added,
2332 # "+A": tag is added,
2331 # "-M": tag is moved (old value),
2333 # "-M": tag is moved (old value),
2332 # "+M": tag is moved (new value),
2334 # "+M": tag is moved (new value),
2333 tracktags = lambda x: None
2335 tracktags = lambda x: None
2334 # experimental config: experimental.hook-track-tags
2336 # experimental config: experimental.hook-track-tags
2335 shouldtracktags = self.ui.configbool(
2337 shouldtracktags = self.ui.configbool(
2336 b'experimental', b'hook-track-tags'
2338 b'experimental', b'hook-track-tags'
2337 )
2339 )
2338 if desc != b'strip' and shouldtracktags:
2340 if desc != b'strip' and shouldtracktags:
2339 oldheads = self.changelog.headrevs()
2341 oldheads = self.changelog.headrevs()
2340
2342
2341 def tracktags(tr2):
2343 def tracktags(tr2):
2342 repo = reporef()
2344 repo = reporef()
2343 assert repo is not None # help pytype
2345 assert repo is not None # help pytype
2344 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2346 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2345 newheads = repo.changelog.headrevs()
2347 newheads = repo.changelog.headrevs()
2346 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2348 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2347 # notes: we compare lists here.
2349 # notes: we compare lists here.
2348 # As we do it only once buiding set would not be cheaper
2350 # As we do it only once buiding set would not be cheaper
2349 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2351 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2350 if changes:
2352 if changes:
2351 tr2.hookargs[b'tag_moved'] = b'1'
2353 tr2.hookargs[b'tag_moved'] = b'1'
2352 with repo.vfs(
2354 with repo.vfs(
2353 b'changes/tags.changes', b'w', atomictemp=True
2355 b'changes/tags.changes', b'w', atomictemp=True
2354 ) as changesfile:
2356 ) as changesfile:
2355 # note: we do not register the file to the transaction
2357 # note: we do not register the file to the transaction
2356 # because we needs it to still exist on the transaction
2358 # because we needs it to still exist on the transaction
2357 # is close (for txnclose hooks)
2359 # is close (for txnclose hooks)
2358 tagsmod.writediff(changesfile, changes)
2360 tagsmod.writediff(changesfile, changes)
2359
2361
2360 def validate(tr2):
2362 def validate(tr2):
2361 """will run pre-closing hooks"""
2363 """will run pre-closing hooks"""
2362 # XXX the transaction API is a bit lacking here so we take a hacky
2364 # XXX the transaction API is a bit lacking here so we take a hacky
2363 # path for now
2365 # path for now
2364 #
2366 #
2365 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2367 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2366 # dict is copied before these run. In addition we needs the data
2368 # dict is copied before these run. In addition we needs the data
2367 # available to in memory hooks too.
2369 # available to in memory hooks too.
2368 #
2370 #
2369 # Moreover, we also need to make sure this runs before txnclose
2371 # Moreover, we also need to make sure this runs before txnclose
2370 # hooks and there is no "pending" mechanism that would execute
2372 # hooks and there is no "pending" mechanism that would execute
2371 # logic only if hooks are about to run.
2373 # logic only if hooks are about to run.
2372 #
2374 #
2373 # Fixing this limitation of the transaction is also needed to track
2375 # Fixing this limitation of the transaction is also needed to track
2374 # other families of changes (bookmarks, phases, obsolescence).
2376 # other families of changes (bookmarks, phases, obsolescence).
2375 #
2377 #
2376 # This will have to be fixed before we remove the experimental
2378 # This will have to be fixed before we remove the experimental
2377 # gating.
2379 # gating.
2378 tracktags(tr2)
2380 tracktags(tr2)
2379 repo = reporef()
2381 repo = reporef()
2380 assert repo is not None # help pytype
2382 assert repo is not None # help pytype
2381
2383
2382 singleheadopt = (b'experimental', b'single-head-per-branch')
2384 singleheadopt = (b'experimental', b'single-head-per-branch')
2383 singlehead = repo.ui.configbool(*singleheadopt)
2385 singlehead = repo.ui.configbool(*singleheadopt)
2384 if singlehead:
2386 if singlehead:
2385 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2387 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2386 accountclosed = singleheadsub.get(
2388 accountclosed = singleheadsub.get(
2387 b"account-closed-heads", False
2389 b"account-closed-heads", False
2388 )
2390 )
2389 if singleheadsub.get(b"public-changes-only", False):
2391 if singleheadsub.get(b"public-changes-only", False):
2390 filtername = b"immutable"
2392 filtername = b"immutable"
2391 else:
2393 else:
2392 filtername = b"visible"
2394 filtername = b"visible"
2393 scmutil.enforcesinglehead(
2395 scmutil.enforcesinglehead(
2394 repo, tr2, desc, accountclosed, filtername
2396 repo, tr2, desc, accountclosed, filtername
2395 )
2397 )
2396 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2398 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2397 for name, (old, new) in sorted(
2399 for name, (old, new) in sorted(
2398 tr.changes[b'bookmarks'].items()
2400 tr.changes[b'bookmarks'].items()
2399 ):
2401 ):
2400 args = tr.hookargs.copy()
2402 args = tr.hookargs.copy()
2401 args.update(bookmarks.preparehookargs(name, old, new))
2403 args.update(bookmarks.preparehookargs(name, old, new))
2402 repo.hook(
2404 repo.hook(
2403 b'pretxnclose-bookmark',
2405 b'pretxnclose-bookmark',
2404 throw=True,
2406 throw=True,
2405 **pycompat.strkwargs(args)
2407 **pycompat.strkwargs(args)
2406 )
2408 )
2407 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2409 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2408 cl = repo.unfiltered().changelog
2410 cl = repo.unfiltered().changelog
2409 for revs, (old, new) in tr.changes[b'phases']:
2411 for revs, (old, new) in tr.changes[b'phases']:
2410 for rev in revs:
2412 for rev in revs:
2411 args = tr.hookargs.copy()
2413 args = tr.hookargs.copy()
2412 node = hex(cl.node(rev))
2414 node = hex(cl.node(rev))
2413 args.update(phases.preparehookargs(node, old, new))
2415 args.update(phases.preparehookargs(node, old, new))
2414 repo.hook(
2416 repo.hook(
2415 b'pretxnclose-phase',
2417 b'pretxnclose-phase',
2416 throw=True,
2418 throw=True,
2417 **pycompat.strkwargs(args)
2419 **pycompat.strkwargs(args)
2418 )
2420 )
2419
2421
2420 repo.hook(
2422 repo.hook(
2421 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2423 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2422 )
2424 )
2423
2425
2424 def releasefn(tr, success):
2426 def releasefn(tr, success):
2425 repo = reporef()
2427 repo = reporef()
2426 if repo is None:
2428 if repo is None:
2427 # If the repo has been GC'd (and this release function is being
2429 # If the repo has been GC'd (and this release function is being
2428 # called from transaction.__del__), there's not much we can do,
2430 # called from transaction.__del__), there's not much we can do,
2429 # so just leave the unfinished transaction there and let the
2431 # so just leave the unfinished transaction there and let the
2430 # user run `hg recover`.
2432 # user run `hg recover`.
2431 return
2433 return
2432 if success:
2434 if success:
2433 # this should be explicitly invoked here, because
2435 # this should be explicitly invoked here, because
2434 # in-memory changes aren't written out at closing
2436 # in-memory changes aren't written out at closing
2435 # transaction, if tr.addfilegenerator (via
2437 # transaction, if tr.addfilegenerator (via
2436 # dirstate.write or so) isn't invoked while
2438 # dirstate.write or so) isn't invoked while
2437 # transaction running
2439 # transaction running
2438 repo.dirstate.write(None)
2440 repo.dirstate.write(None)
2439 else:
2441 else:
2440 # discard all changes (including ones already written
2442 # discard all changes (including ones already written
2441 # out) in this transaction
2443 # out) in this transaction
2442 narrowspec.restorebackup(self, b'journal.narrowspec')
2444 narrowspec.restorebackup(self, b'journal.narrowspec')
2443 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2445 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2444 repo.dirstate.restorebackup(None, b'journal.dirstate')
2446 repo.dirstate.restorebackup(None, b'journal.dirstate')
2445
2447
2446 repo.invalidate(clearfilecache=True)
2448 repo.invalidate(clearfilecache=True)
2447
2449
2448 tr = transaction.transaction(
2450 tr = transaction.transaction(
2449 rp,
2451 rp,
2450 self.svfs,
2452 self.svfs,
2451 vfsmap,
2453 vfsmap,
2452 b"journal",
2454 b"journal",
2453 b"undo",
2455 b"undo",
2454 aftertrans(renames),
2456 aftertrans(renames),
2455 self.store.createmode,
2457 self.store.createmode,
2456 validator=validate,
2458 validator=validate,
2457 releasefn=releasefn,
2459 releasefn=releasefn,
2458 checkambigfiles=_cachedfiles,
2460 checkambigfiles=_cachedfiles,
2459 name=desc,
2461 name=desc,
2460 )
2462 )
2461 tr.changes[b'origrepolen'] = len(self)
2463 tr.changes[b'origrepolen'] = len(self)
2462 tr.changes[b'obsmarkers'] = set()
2464 tr.changes[b'obsmarkers'] = set()
2463 tr.changes[b'phases'] = []
2465 tr.changes[b'phases'] = []
2464 tr.changes[b'bookmarks'] = {}
2466 tr.changes[b'bookmarks'] = {}
2465
2467
2466 tr.hookargs[b'txnid'] = txnid
2468 tr.hookargs[b'txnid'] = txnid
2467 tr.hookargs[b'txnname'] = desc
2469 tr.hookargs[b'txnname'] = desc
2468 tr.hookargs[b'changes'] = tr.changes
2470 tr.hookargs[b'changes'] = tr.changes
2469 # note: writing the fncache only during finalize mean that the file is
2471 # note: writing the fncache only during finalize mean that the file is
2470 # outdated when running hooks. As fncache is used for streaming clone,
2472 # outdated when running hooks. As fncache is used for streaming clone,
2471 # this is not expected to break anything that happen during the hooks.
2473 # this is not expected to break anything that happen during the hooks.
2472 tr.addfinalize(b'flush-fncache', self.store.write)
2474 tr.addfinalize(b'flush-fncache', self.store.write)
2473
2475
2474 def txnclosehook(tr2):
2476 def txnclosehook(tr2):
2475 """To be run if transaction is successful, will schedule a hook run"""
2477 """To be run if transaction is successful, will schedule a hook run"""
2476 # Don't reference tr2 in hook() so we don't hold a reference.
2478 # Don't reference tr2 in hook() so we don't hold a reference.
2477 # This reduces memory consumption when there are multiple
2479 # This reduces memory consumption when there are multiple
2478 # transactions per lock. This can likely go away if issue5045
2480 # transactions per lock. This can likely go away if issue5045
2479 # fixes the function accumulation.
2481 # fixes the function accumulation.
2480 hookargs = tr2.hookargs
2482 hookargs = tr2.hookargs
2481
2483
2482 def hookfunc(unused_success):
2484 def hookfunc(unused_success):
2483 repo = reporef()
2485 repo = reporef()
2484 assert repo is not None # help pytype
2486 assert repo is not None # help pytype
2485
2487
2486 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2488 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2487 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2489 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2488 for name, (old, new) in bmchanges:
2490 for name, (old, new) in bmchanges:
2489 args = tr.hookargs.copy()
2491 args = tr.hookargs.copy()
2490 args.update(bookmarks.preparehookargs(name, old, new))
2492 args.update(bookmarks.preparehookargs(name, old, new))
2491 repo.hook(
2493 repo.hook(
2492 b'txnclose-bookmark',
2494 b'txnclose-bookmark',
2493 throw=False,
2495 throw=False,
2494 **pycompat.strkwargs(args)
2496 **pycompat.strkwargs(args)
2495 )
2497 )
2496
2498
2497 if hook.hashook(repo.ui, b'txnclose-phase'):
2499 if hook.hashook(repo.ui, b'txnclose-phase'):
2498 cl = repo.unfiltered().changelog
2500 cl = repo.unfiltered().changelog
2499 phasemv = sorted(
2501 phasemv = sorted(
2500 tr.changes[b'phases'], key=lambda r: r[0][0]
2502 tr.changes[b'phases'], key=lambda r: r[0][0]
2501 )
2503 )
2502 for revs, (old, new) in phasemv:
2504 for revs, (old, new) in phasemv:
2503 for rev in revs:
2505 for rev in revs:
2504 args = tr.hookargs.copy()
2506 args = tr.hookargs.copy()
2505 node = hex(cl.node(rev))
2507 node = hex(cl.node(rev))
2506 args.update(phases.preparehookargs(node, old, new))
2508 args.update(phases.preparehookargs(node, old, new))
2507 repo.hook(
2509 repo.hook(
2508 b'txnclose-phase',
2510 b'txnclose-phase',
2509 throw=False,
2511 throw=False,
2510 **pycompat.strkwargs(args)
2512 **pycompat.strkwargs(args)
2511 )
2513 )
2512
2514
2513 repo.hook(
2515 repo.hook(
2514 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2516 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2515 )
2517 )
2516
2518
2517 repo = reporef()
2519 repo = reporef()
2518 assert repo is not None # help pytype
2520 assert repo is not None # help pytype
2519 repo._afterlock(hookfunc)
2521 repo._afterlock(hookfunc)
2520
2522
2521 tr.addfinalize(b'txnclose-hook', txnclosehook)
2523 tr.addfinalize(b'txnclose-hook', txnclosehook)
2522 # Include a leading "-" to make it happen before the transaction summary
2524 # Include a leading "-" to make it happen before the transaction summary
2523 # reports registered via scmutil.registersummarycallback() whose names
2525 # reports registered via scmutil.registersummarycallback() whose names
2524 # are 00-txnreport etc. That way, the caches will be warm when the
2526 # are 00-txnreport etc. That way, the caches will be warm when the
2525 # callbacks run.
2527 # callbacks run.
2526 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2528 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2527
2529
2528 def txnaborthook(tr2):
2530 def txnaborthook(tr2):
2529 """To be run if transaction is aborted"""
2531 """To be run if transaction is aborted"""
2530 repo = reporef()
2532 repo = reporef()
2531 assert repo is not None # help pytype
2533 assert repo is not None # help pytype
2532 repo.hook(
2534 repo.hook(
2533 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2535 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2534 )
2536 )
2535
2537
2536 tr.addabort(b'txnabort-hook', txnaborthook)
2538 tr.addabort(b'txnabort-hook', txnaborthook)
2537 # avoid eager cache invalidation. in-memory data should be identical
2539 # avoid eager cache invalidation. in-memory data should be identical
2538 # to stored data if transaction has no error.
2540 # to stored data if transaction has no error.
2539 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2541 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2540 self._transref = weakref.ref(tr)
2542 self._transref = weakref.ref(tr)
2541 scmutil.registersummarycallback(self, tr, desc)
2543 scmutil.registersummarycallback(self, tr, desc)
2542 return tr
2544 return tr
2543
2545
2544 def _journalfiles(self):
2546 def _journalfiles(self):
2545 return (
2547 return (
2546 (self.svfs, b'journal'),
2548 (self.svfs, b'journal'),
2547 (self.svfs, b'journal.narrowspec'),
2549 (self.svfs, b'journal.narrowspec'),
2548 (self.vfs, b'journal.narrowspec.dirstate'),
2550 (self.vfs, b'journal.narrowspec.dirstate'),
2549 (self.vfs, b'journal.dirstate'),
2551 (self.vfs, b'journal.dirstate'),
2550 (self.vfs, b'journal.branch'),
2552 (self.vfs, b'journal.branch'),
2551 (self.vfs, b'journal.desc'),
2553 (self.vfs, b'journal.desc'),
2552 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2554 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2553 (self.svfs, b'journal.phaseroots'),
2555 (self.svfs, b'journal.phaseroots'),
2554 )
2556 )
2555
2557
2556 def undofiles(self):
2558 def undofiles(self):
2557 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2559 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2558
2560
2559 @unfilteredmethod
2561 @unfilteredmethod
2560 def _writejournal(self, desc):
2562 def _writejournal(self, desc):
2561 self.dirstate.savebackup(None, b'journal.dirstate')
2563 self.dirstate.savebackup(None, b'journal.dirstate')
2562 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2564 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2563 narrowspec.savebackup(self, b'journal.narrowspec')
2565 narrowspec.savebackup(self, b'journal.narrowspec')
2564 self.vfs.write(
2566 self.vfs.write(
2565 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2567 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2566 )
2568 )
2567 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2569 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2568 bookmarksvfs = bookmarks.bookmarksvfs(self)
2570 bookmarksvfs = bookmarks.bookmarksvfs(self)
2569 bookmarksvfs.write(
2571 bookmarksvfs.write(
2570 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2572 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2571 )
2573 )
2572 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2574 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2573
2575
2574 def recover(self):
2576 def recover(self):
2575 with self.lock():
2577 with self.lock():
2576 if self.svfs.exists(b"journal"):
2578 if self.svfs.exists(b"journal"):
2577 self.ui.status(_(b"rolling back interrupted transaction\n"))
2579 self.ui.status(_(b"rolling back interrupted transaction\n"))
2578 vfsmap = {
2580 vfsmap = {
2579 b'': self.svfs,
2581 b'': self.svfs,
2580 b'plain': self.vfs,
2582 b'plain': self.vfs,
2581 }
2583 }
2582 transaction.rollback(
2584 transaction.rollback(
2583 self.svfs,
2585 self.svfs,
2584 vfsmap,
2586 vfsmap,
2585 b"journal",
2587 b"journal",
2586 self.ui.warn,
2588 self.ui.warn,
2587 checkambigfiles=_cachedfiles,
2589 checkambigfiles=_cachedfiles,
2588 )
2590 )
2589 self.invalidate()
2591 self.invalidate()
2590 return True
2592 return True
2591 else:
2593 else:
2592 self.ui.warn(_(b"no interrupted transaction available\n"))
2594 self.ui.warn(_(b"no interrupted transaction available\n"))
2593 return False
2595 return False
2594
2596
2595 def rollback(self, dryrun=False, force=False):
2597 def rollback(self, dryrun=False, force=False):
2596 wlock = lock = dsguard = None
2598 wlock = lock = dsguard = None
2597 try:
2599 try:
2598 wlock = self.wlock()
2600 wlock = self.wlock()
2599 lock = self.lock()
2601 lock = self.lock()
2600 if self.svfs.exists(b"undo"):
2602 if self.svfs.exists(b"undo"):
2601 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2603 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2602
2604
2603 return self._rollback(dryrun, force, dsguard)
2605 return self._rollback(dryrun, force, dsguard)
2604 else:
2606 else:
2605 self.ui.warn(_(b"no rollback information available\n"))
2607 self.ui.warn(_(b"no rollback information available\n"))
2606 return 1
2608 return 1
2607 finally:
2609 finally:
2608 release(dsguard, lock, wlock)
2610 release(dsguard, lock, wlock)
2609
2611
2610 @unfilteredmethod # Until we get smarter cache management
2612 @unfilteredmethod # Until we get smarter cache management
2611 def _rollback(self, dryrun, force, dsguard):
2613 def _rollback(self, dryrun, force, dsguard):
2612 ui = self.ui
2614 ui = self.ui
2613 try:
2615 try:
2614 args = self.vfs.read(b'undo.desc').splitlines()
2616 args = self.vfs.read(b'undo.desc').splitlines()
2615 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2617 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2616 if len(args) >= 3:
2618 if len(args) >= 3:
2617 detail = args[2]
2619 detail = args[2]
2618 oldtip = oldlen - 1
2620 oldtip = oldlen - 1
2619
2621
2620 if detail and ui.verbose:
2622 if detail and ui.verbose:
2621 msg = _(
2623 msg = _(
2622 b'repository tip rolled back to revision %d'
2624 b'repository tip rolled back to revision %d'
2623 b' (undo %s: %s)\n'
2625 b' (undo %s: %s)\n'
2624 ) % (oldtip, desc, detail)
2626 ) % (oldtip, desc, detail)
2625 else:
2627 else:
2626 msg = _(
2628 msg = _(
2627 b'repository tip rolled back to revision %d (undo %s)\n'
2629 b'repository tip rolled back to revision %d (undo %s)\n'
2628 ) % (oldtip, desc)
2630 ) % (oldtip, desc)
2629 except IOError:
2631 except IOError:
2630 msg = _(b'rolling back unknown transaction\n')
2632 msg = _(b'rolling back unknown transaction\n')
2631 desc = None
2633 desc = None
2632
2634
2633 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2635 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2634 raise error.Abort(
2636 raise error.Abort(
2635 _(
2637 _(
2636 b'rollback of last commit while not checked out '
2638 b'rollback of last commit while not checked out '
2637 b'may lose data'
2639 b'may lose data'
2638 ),
2640 ),
2639 hint=_(b'use -f to force'),
2641 hint=_(b'use -f to force'),
2640 )
2642 )
2641
2643
2642 ui.status(msg)
2644 ui.status(msg)
2643 if dryrun:
2645 if dryrun:
2644 return 0
2646 return 0
2645
2647
2646 parents = self.dirstate.parents()
2648 parents = self.dirstate.parents()
2647 self.destroying()
2649 self.destroying()
2648 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2650 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2649 transaction.rollback(
2651 transaction.rollback(
2650 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2652 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2651 )
2653 )
2652 bookmarksvfs = bookmarks.bookmarksvfs(self)
2654 bookmarksvfs = bookmarks.bookmarksvfs(self)
2653 if bookmarksvfs.exists(b'undo.bookmarks'):
2655 if bookmarksvfs.exists(b'undo.bookmarks'):
2654 bookmarksvfs.rename(
2656 bookmarksvfs.rename(
2655 b'undo.bookmarks', b'bookmarks', checkambig=True
2657 b'undo.bookmarks', b'bookmarks', checkambig=True
2656 )
2658 )
2657 if self.svfs.exists(b'undo.phaseroots'):
2659 if self.svfs.exists(b'undo.phaseroots'):
2658 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2660 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2659 self.invalidate()
2661 self.invalidate()
2660
2662
2661 has_node = self.changelog.index.has_node
2663 has_node = self.changelog.index.has_node
2662 parentgone = any(not has_node(p) for p in parents)
2664 parentgone = any(not has_node(p) for p in parents)
2663 if parentgone:
2665 if parentgone:
2664 # prevent dirstateguard from overwriting already restored one
2666 # prevent dirstateguard from overwriting already restored one
2665 dsguard.close()
2667 dsguard.close()
2666
2668
2667 narrowspec.restorebackup(self, b'undo.narrowspec')
2669 narrowspec.restorebackup(self, b'undo.narrowspec')
2668 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2670 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2669 self.dirstate.restorebackup(None, b'undo.dirstate')
2671 self.dirstate.restorebackup(None, b'undo.dirstate')
2670 try:
2672 try:
2671 branch = self.vfs.read(b'undo.branch')
2673 branch = self.vfs.read(b'undo.branch')
2672 self.dirstate.setbranch(encoding.tolocal(branch))
2674 self.dirstate.setbranch(encoding.tolocal(branch))
2673 except IOError:
2675 except IOError:
2674 ui.warn(
2676 ui.warn(
2675 _(
2677 _(
2676 b'named branch could not be reset: '
2678 b'named branch could not be reset: '
2677 b'current branch is still \'%s\'\n'
2679 b'current branch is still \'%s\'\n'
2678 )
2680 )
2679 % self.dirstate.branch()
2681 % self.dirstate.branch()
2680 )
2682 )
2681
2683
2682 parents = tuple([p.rev() for p in self[None].parents()])
2684 parents = tuple([p.rev() for p in self[None].parents()])
2683 if len(parents) > 1:
2685 if len(parents) > 1:
2684 ui.status(
2686 ui.status(
2685 _(
2687 _(
2686 b'working directory now based on '
2688 b'working directory now based on '
2687 b'revisions %d and %d\n'
2689 b'revisions %d and %d\n'
2688 )
2690 )
2689 % parents
2691 % parents
2690 )
2692 )
2691 else:
2693 else:
2692 ui.status(
2694 ui.status(
2693 _(b'working directory now based on revision %d\n') % parents
2695 _(b'working directory now based on revision %d\n') % parents
2694 )
2696 )
2695 mergestatemod.mergestate.clean(self)
2697 mergestatemod.mergestate.clean(self)
2696
2698
2697 # TODO: if we know which new heads may result from this rollback, pass
2699 # TODO: if we know which new heads may result from this rollback, pass
2698 # them to destroy(), which will prevent the branchhead cache from being
2700 # them to destroy(), which will prevent the branchhead cache from being
2699 # invalidated.
2701 # invalidated.
2700 self.destroyed()
2702 self.destroyed()
2701 return 0
2703 return 0
2702
2704
2703 def _buildcacheupdater(self, newtransaction):
2705 def _buildcacheupdater(self, newtransaction):
2704 """called during transaction to build the callback updating cache
2706 """called during transaction to build the callback updating cache
2705
2707
2706 Lives on the repository to help extension who might want to augment
2708 Lives on the repository to help extension who might want to augment
2707 this logic. For this purpose, the created transaction is passed to the
2709 this logic. For this purpose, the created transaction is passed to the
2708 method.
2710 method.
2709 """
2711 """
2710 # we must avoid cyclic reference between repo and transaction.
2712 # we must avoid cyclic reference between repo and transaction.
2711 reporef = weakref.ref(self)
2713 reporef = weakref.ref(self)
2712
2714
2713 def updater(tr):
2715 def updater(tr):
2714 repo = reporef()
2716 repo = reporef()
2715 assert repo is not None # help pytype
2717 assert repo is not None # help pytype
2716 repo.updatecaches(tr)
2718 repo.updatecaches(tr)
2717
2719
2718 return updater
2720 return updater
2719
2721
2720 @unfilteredmethod
2722 @unfilteredmethod
2721 def updatecaches(self, tr=None, full=False):
2723 def updatecaches(self, tr=None, full=False):
2722 """warm appropriate caches
2724 """warm appropriate caches
2723
2725
2724 If this function is called after a transaction closed. The transaction
2726 If this function is called after a transaction closed. The transaction
2725 will be available in the 'tr' argument. This can be used to selectively
2727 will be available in the 'tr' argument. This can be used to selectively
2726 update caches relevant to the changes in that transaction.
2728 update caches relevant to the changes in that transaction.
2727
2729
2728 If 'full' is set, make sure all caches the function knows about have
2730 If 'full' is set, make sure all caches the function knows about have
2729 up-to-date data. Even the ones usually loaded more lazily.
2731 up-to-date data. Even the ones usually loaded more lazily.
2730
2732
2731 The `full` argument can take a special "post-clone" value. In this case
2733 The `full` argument can take a special "post-clone" value. In this case
2732 the cache warming is made after a clone and of the slower cache might
2734 the cache warming is made after a clone and of the slower cache might
2733 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2735 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2734 as we plan for a cleaner way to deal with this for 5.9.
2736 as we plan for a cleaner way to deal with this for 5.9.
2735 """
2737 """
2736 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2738 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2737 # During strip, many caches are invalid but
2739 # During strip, many caches are invalid but
2738 # later call to `destroyed` will refresh them.
2740 # later call to `destroyed` will refresh them.
2739 return
2741 return
2740
2742
2741 if tr is None or tr.changes[b'origrepolen'] < len(self):
2743 if tr is None or tr.changes[b'origrepolen'] < len(self):
2742 # accessing the 'served' branchmap should refresh all the others,
2744 # accessing the 'served' branchmap should refresh all the others,
2743 self.ui.debug(b'updating the branch cache\n')
2745 self.ui.debug(b'updating the branch cache\n')
2744 self.filtered(b'served').branchmap()
2746 self.filtered(b'served').branchmap()
2745 self.filtered(b'served.hidden').branchmap()
2747 self.filtered(b'served.hidden').branchmap()
2746
2748
2747 if full:
2749 if full:
2748 unfi = self.unfiltered()
2750 unfi = self.unfiltered()
2749
2751
2750 self.changelog.update_caches(transaction=tr)
2752 self.changelog.update_caches(transaction=tr)
2751 self.manifestlog.update_caches(transaction=tr)
2753 self.manifestlog.update_caches(transaction=tr)
2752
2754
2753 rbc = unfi.revbranchcache()
2755 rbc = unfi.revbranchcache()
2754 for r in unfi.changelog:
2756 for r in unfi.changelog:
2755 rbc.branchinfo(r)
2757 rbc.branchinfo(r)
2756 rbc.write()
2758 rbc.write()
2757
2759
2758 # ensure the working copy parents are in the manifestfulltextcache
2760 # ensure the working copy parents are in the manifestfulltextcache
2759 for ctx in self[b'.'].parents():
2761 for ctx in self[b'.'].parents():
2760 ctx.manifest() # accessing the manifest is enough
2762 ctx.manifest() # accessing the manifest is enough
2761
2763
2762 if not full == b"post-clone":
2764 if not full == b"post-clone":
2763 # accessing fnode cache warms the cache
2765 # accessing fnode cache warms the cache
2764 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2766 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2765 # accessing tags warm the cache
2767 # accessing tags warm the cache
2766 self.tags()
2768 self.tags()
2767 self.filtered(b'served').tags()
2769 self.filtered(b'served').tags()
2768
2770
2769 # The `full` arg is documented as updating even the lazily-loaded
2771 # The `full` arg is documented as updating even the lazily-loaded
2770 # caches immediately, so we're forcing a write to cause these caches
2772 # caches immediately, so we're forcing a write to cause these caches
2771 # to be warmed up even if they haven't explicitly been requested
2773 # to be warmed up even if they haven't explicitly been requested
2772 # yet (if they've never been used by hg, they won't ever have been
2774 # yet (if they've never been used by hg, they won't ever have been
2773 # written, even if they're a subset of another kind of cache that
2775 # written, even if they're a subset of another kind of cache that
2774 # *has* been used).
2776 # *has* been used).
2775 for filt in repoview.filtertable.keys():
2777 for filt in repoview.filtertable.keys():
2776 filtered = self.filtered(filt)
2778 filtered = self.filtered(filt)
2777 filtered.branchmap().write(filtered)
2779 filtered.branchmap().write(filtered)
2778
2780
2779 def invalidatecaches(self):
2781 def invalidatecaches(self):
2780
2782
2781 if '_tagscache' in vars(self):
2783 if '_tagscache' in vars(self):
2782 # can't use delattr on proxy
2784 # can't use delattr on proxy
2783 del self.__dict__['_tagscache']
2785 del self.__dict__['_tagscache']
2784
2786
2785 self._branchcaches.clear()
2787 self._branchcaches.clear()
2786 self.invalidatevolatilesets()
2788 self.invalidatevolatilesets()
2787 self._sparsesignaturecache.clear()
2789 self._sparsesignaturecache.clear()
2788
2790
2789 def invalidatevolatilesets(self):
2791 def invalidatevolatilesets(self):
2790 self.filteredrevcache.clear()
2792 self.filteredrevcache.clear()
2791 obsolete.clearobscaches(self)
2793 obsolete.clearobscaches(self)
2792 self._quick_access_changeid_invalidate()
2794 self._quick_access_changeid_invalidate()
2793
2795
2794 def invalidatedirstate(self):
2796 def invalidatedirstate(self):
2795 """Invalidates the dirstate, causing the next call to dirstate
2797 """Invalidates the dirstate, causing the next call to dirstate
2796 to check if it was modified since the last time it was read,
2798 to check if it was modified since the last time it was read,
2797 rereading it if it has.
2799 rereading it if it has.
2798
2800
2799 This is different to dirstate.invalidate() that it doesn't always
2801 This is different to dirstate.invalidate() that it doesn't always
2800 rereads the dirstate. Use dirstate.invalidate() if you want to
2802 rereads the dirstate. Use dirstate.invalidate() if you want to
2801 explicitly read the dirstate again (i.e. restoring it to a previous
2803 explicitly read the dirstate again (i.e. restoring it to a previous
2802 known good state)."""
2804 known good state)."""
2803 if hasunfilteredcache(self, 'dirstate'):
2805 if hasunfilteredcache(self, 'dirstate'):
2804 for k in self.dirstate._filecache:
2806 for k in self.dirstate._filecache:
2805 try:
2807 try:
2806 delattr(self.dirstate, k)
2808 delattr(self.dirstate, k)
2807 except AttributeError:
2809 except AttributeError:
2808 pass
2810 pass
2809 delattr(self.unfiltered(), 'dirstate')
2811 delattr(self.unfiltered(), 'dirstate')
2810
2812
2811 def invalidate(self, clearfilecache=False):
2813 def invalidate(self, clearfilecache=False):
2812 """Invalidates both store and non-store parts other than dirstate
2814 """Invalidates both store and non-store parts other than dirstate
2813
2815
2814 If a transaction is running, invalidation of store is omitted,
2816 If a transaction is running, invalidation of store is omitted,
2815 because discarding in-memory changes might cause inconsistency
2817 because discarding in-memory changes might cause inconsistency
2816 (e.g. incomplete fncache causes unintentional failure, but
2818 (e.g. incomplete fncache causes unintentional failure, but
2817 redundant one doesn't).
2819 redundant one doesn't).
2818 """
2820 """
2819 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2821 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2820 for k in list(self._filecache.keys()):
2822 for k in list(self._filecache.keys()):
2821 # dirstate is invalidated separately in invalidatedirstate()
2823 # dirstate is invalidated separately in invalidatedirstate()
2822 if k == b'dirstate':
2824 if k == b'dirstate':
2823 continue
2825 continue
2824 if (
2826 if (
2825 k == b'changelog'
2827 k == b'changelog'
2826 and self.currenttransaction()
2828 and self.currenttransaction()
2827 and self.changelog._delayed
2829 and self.changelog._delayed
2828 ):
2830 ):
2829 # The changelog object may store unwritten revisions. We don't
2831 # The changelog object may store unwritten revisions. We don't
2830 # want to lose them.
2832 # want to lose them.
2831 # TODO: Solve the problem instead of working around it.
2833 # TODO: Solve the problem instead of working around it.
2832 continue
2834 continue
2833
2835
2834 if clearfilecache:
2836 if clearfilecache:
2835 del self._filecache[k]
2837 del self._filecache[k]
2836 try:
2838 try:
2837 delattr(unfiltered, k)
2839 delattr(unfiltered, k)
2838 except AttributeError:
2840 except AttributeError:
2839 pass
2841 pass
2840 self.invalidatecaches()
2842 self.invalidatecaches()
2841 if not self.currenttransaction():
2843 if not self.currenttransaction():
2842 # TODO: Changing contents of store outside transaction
2844 # TODO: Changing contents of store outside transaction
2843 # causes inconsistency. We should make in-memory store
2845 # causes inconsistency. We should make in-memory store
2844 # changes detectable, and abort if changed.
2846 # changes detectable, and abort if changed.
2845 self.store.invalidatecaches()
2847 self.store.invalidatecaches()
2846
2848
2847 def invalidateall(self):
2849 def invalidateall(self):
2848 """Fully invalidates both store and non-store parts, causing the
2850 """Fully invalidates both store and non-store parts, causing the
2849 subsequent operation to reread any outside changes."""
2851 subsequent operation to reread any outside changes."""
2850 # extension should hook this to invalidate its caches
2852 # extension should hook this to invalidate its caches
2851 self.invalidate()
2853 self.invalidate()
2852 self.invalidatedirstate()
2854 self.invalidatedirstate()
2853
2855
2854 @unfilteredmethod
2856 @unfilteredmethod
2855 def _refreshfilecachestats(self, tr):
2857 def _refreshfilecachestats(self, tr):
2856 """Reload stats of cached files so that they are flagged as valid"""
2858 """Reload stats of cached files so that they are flagged as valid"""
2857 for k, ce in self._filecache.items():
2859 for k, ce in self._filecache.items():
2858 k = pycompat.sysstr(k)
2860 k = pycompat.sysstr(k)
2859 if k == 'dirstate' or k not in self.__dict__:
2861 if k == 'dirstate' or k not in self.__dict__:
2860 continue
2862 continue
2861 ce.refresh()
2863 ce.refresh()
2862
2864
2863 def _lock(
2865 def _lock(
2864 self,
2866 self,
2865 vfs,
2867 vfs,
2866 lockname,
2868 lockname,
2867 wait,
2869 wait,
2868 releasefn,
2870 releasefn,
2869 acquirefn,
2871 acquirefn,
2870 desc,
2872 desc,
2871 ):
2873 ):
2872 timeout = 0
2874 timeout = 0
2873 warntimeout = 0
2875 warntimeout = 0
2874 if wait:
2876 if wait:
2875 timeout = self.ui.configint(b"ui", b"timeout")
2877 timeout = self.ui.configint(b"ui", b"timeout")
2876 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2878 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2877 # internal config: ui.signal-safe-lock
2879 # internal config: ui.signal-safe-lock
2878 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2880 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2879
2881
2880 l = lockmod.trylock(
2882 l = lockmod.trylock(
2881 self.ui,
2883 self.ui,
2882 vfs,
2884 vfs,
2883 lockname,
2885 lockname,
2884 timeout,
2886 timeout,
2885 warntimeout,
2887 warntimeout,
2886 releasefn=releasefn,
2888 releasefn=releasefn,
2887 acquirefn=acquirefn,
2889 acquirefn=acquirefn,
2888 desc=desc,
2890 desc=desc,
2889 signalsafe=signalsafe,
2891 signalsafe=signalsafe,
2890 )
2892 )
2891 return l
2893 return l
2892
2894
2893 def _afterlock(self, callback):
2895 def _afterlock(self, callback):
2894 """add a callback to be run when the repository is fully unlocked
2896 """add a callback to be run when the repository is fully unlocked
2895
2897
2896 The callback will be executed when the outermost lock is released
2898 The callback will be executed when the outermost lock is released
2897 (with wlock being higher level than 'lock')."""
2899 (with wlock being higher level than 'lock')."""
2898 for ref in (self._wlockref, self._lockref):
2900 for ref in (self._wlockref, self._lockref):
2899 l = ref and ref()
2901 l = ref and ref()
2900 if l and l.held:
2902 if l and l.held:
2901 l.postrelease.append(callback)
2903 l.postrelease.append(callback)
2902 break
2904 break
2903 else: # no lock have been found.
2905 else: # no lock have been found.
2904 callback(True)
2906 callback(True)
2905
2907
2906 def lock(self, wait=True):
2908 def lock(self, wait=True):
2907 """Lock the repository store (.hg/store) and return a weak reference
2909 """Lock the repository store (.hg/store) and return a weak reference
2908 to the lock. Use this before modifying the store (e.g. committing or
2910 to the lock. Use this before modifying the store (e.g. committing or
2909 stripping). If you are opening a transaction, get a lock as well.)
2911 stripping). If you are opening a transaction, get a lock as well.)
2910
2912
2911 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2913 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2912 'wlock' first to avoid a dead-lock hazard."""
2914 'wlock' first to avoid a dead-lock hazard."""
2913 l = self._currentlock(self._lockref)
2915 l = self._currentlock(self._lockref)
2914 if l is not None:
2916 if l is not None:
2915 l.lock()
2917 l.lock()
2916 return l
2918 return l
2917
2919
2918 l = self._lock(
2920 l = self._lock(
2919 vfs=self.svfs,
2921 vfs=self.svfs,
2920 lockname=b"lock",
2922 lockname=b"lock",
2921 wait=wait,
2923 wait=wait,
2922 releasefn=None,
2924 releasefn=None,
2923 acquirefn=self.invalidate,
2925 acquirefn=self.invalidate,
2924 desc=_(b'repository %s') % self.origroot,
2926 desc=_(b'repository %s') % self.origroot,
2925 )
2927 )
2926 self._lockref = weakref.ref(l)
2928 self._lockref = weakref.ref(l)
2927 return l
2929 return l
2928
2930
2929 def wlock(self, wait=True):
2931 def wlock(self, wait=True):
2930 """Lock the non-store parts of the repository (everything under
2932 """Lock the non-store parts of the repository (everything under
2931 .hg except .hg/store) and return a weak reference to the lock.
2933 .hg except .hg/store) and return a weak reference to the lock.
2932
2934
2933 Use this before modifying files in .hg.
2935 Use this before modifying files in .hg.
2934
2936
2935 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2937 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2936 'wlock' first to avoid a dead-lock hazard."""
2938 'wlock' first to avoid a dead-lock hazard."""
2937 l = self._wlockref() if self._wlockref else None
2939 l = self._wlockref() if self._wlockref else None
2938 if l is not None and l.held:
2940 if l is not None and l.held:
2939 l.lock()
2941 l.lock()
2940 return l
2942 return l
2941
2943
2942 # We do not need to check for non-waiting lock acquisition. Such
2944 # We do not need to check for non-waiting lock acquisition. Such
2943 # acquisition would not cause dead-lock as they would just fail.
2945 # acquisition would not cause dead-lock as they would just fail.
2944 if wait and (
2946 if wait and (
2945 self.ui.configbool(b'devel', b'all-warnings')
2947 self.ui.configbool(b'devel', b'all-warnings')
2946 or self.ui.configbool(b'devel', b'check-locks')
2948 or self.ui.configbool(b'devel', b'check-locks')
2947 ):
2949 ):
2948 if self._currentlock(self._lockref) is not None:
2950 if self._currentlock(self._lockref) is not None:
2949 self.ui.develwarn(b'"wlock" acquired after "lock"')
2951 self.ui.develwarn(b'"wlock" acquired after "lock"')
2950
2952
2951 def unlock():
2953 def unlock():
2952 if self.dirstate.pendingparentchange():
2954 if self.dirstate.pendingparentchange():
2953 self.dirstate.invalidate()
2955 self.dirstate.invalidate()
2954 else:
2956 else:
2955 self.dirstate.write(None)
2957 self.dirstate.write(None)
2956
2958
2957 self._filecache[b'dirstate'].refresh()
2959 self._filecache[b'dirstate'].refresh()
2958
2960
2959 l = self._lock(
2961 l = self._lock(
2960 self.vfs,
2962 self.vfs,
2961 b"wlock",
2963 b"wlock",
2962 wait,
2964 wait,
2963 unlock,
2965 unlock,
2964 self.invalidatedirstate,
2966 self.invalidatedirstate,
2965 _(b'working directory of %s') % self.origroot,
2967 _(b'working directory of %s') % self.origroot,
2966 )
2968 )
2967 self._wlockref = weakref.ref(l)
2969 self._wlockref = weakref.ref(l)
2968 return l
2970 return l
2969
2971
2970 def _currentlock(self, lockref):
2972 def _currentlock(self, lockref):
2971 """Returns the lock if it's held, or None if it's not."""
2973 """Returns the lock if it's held, or None if it's not."""
2972 if lockref is None:
2974 if lockref is None:
2973 return None
2975 return None
2974 l = lockref()
2976 l = lockref()
2975 if l is None or not l.held:
2977 if l is None or not l.held:
2976 return None
2978 return None
2977 return l
2979 return l
2978
2980
2979 def currentwlock(self):
2981 def currentwlock(self):
2980 """Returns the wlock if it's held, or None if it's not."""
2982 """Returns the wlock if it's held, or None if it's not."""
2981 return self._currentlock(self._wlockref)
2983 return self._currentlock(self._wlockref)
2982
2984
2983 def checkcommitpatterns(self, wctx, match, status, fail):
2985 def checkcommitpatterns(self, wctx, match, status, fail):
2984 """check for commit arguments that aren't committable"""
2986 """check for commit arguments that aren't committable"""
2985 if match.isexact() or match.prefix():
2987 if match.isexact() or match.prefix():
2986 matched = set(status.modified + status.added + status.removed)
2988 matched = set(status.modified + status.added + status.removed)
2987
2989
2988 for f in match.files():
2990 for f in match.files():
2989 f = self.dirstate.normalize(f)
2991 f = self.dirstate.normalize(f)
2990 if f == b'.' or f in matched or f in wctx.substate:
2992 if f == b'.' or f in matched or f in wctx.substate:
2991 continue
2993 continue
2992 if f in status.deleted:
2994 if f in status.deleted:
2993 fail(f, _(b'file not found!'))
2995 fail(f, _(b'file not found!'))
2994 # Is it a directory that exists or used to exist?
2996 # Is it a directory that exists or used to exist?
2995 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2997 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2996 d = f + b'/'
2998 d = f + b'/'
2997 for mf in matched:
2999 for mf in matched:
2998 if mf.startswith(d):
3000 if mf.startswith(d):
2999 break
3001 break
3000 else:
3002 else:
3001 fail(f, _(b"no match under directory!"))
3003 fail(f, _(b"no match under directory!"))
3002 elif f not in self.dirstate:
3004 elif f not in self.dirstate:
3003 fail(f, _(b"file not tracked!"))
3005 fail(f, _(b"file not tracked!"))
3004
3006
3005 @unfilteredmethod
3007 @unfilteredmethod
3006 def commit(
3008 def commit(
3007 self,
3009 self,
3008 text=b"",
3010 text=b"",
3009 user=None,
3011 user=None,
3010 date=None,
3012 date=None,
3011 match=None,
3013 match=None,
3012 force=False,
3014 force=False,
3013 editor=None,
3015 editor=None,
3014 extra=None,
3016 extra=None,
3015 ):
3017 ):
3016 """Add a new revision to current repository.
3018 """Add a new revision to current repository.
3017
3019
3018 Revision information is gathered from the working directory,
3020 Revision information is gathered from the working directory,
3019 match can be used to filter the committed files. If editor is
3021 match can be used to filter the committed files. If editor is
3020 supplied, it is called to get a commit message.
3022 supplied, it is called to get a commit message.
3021 """
3023 """
3022 if extra is None:
3024 if extra is None:
3023 extra = {}
3025 extra = {}
3024
3026
3025 def fail(f, msg):
3027 def fail(f, msg):
3026 raise error.InputError(b'%s: %s' % (f, msg))
3028 raise error.InputError(b'%s: %s' % (f, msg))
3027
3029
3028 if not match:
3030 if not match:
3029 match = matchmod.always()
3031 match = matchmod.always()
3030
3032
3031 if not force:
3033 if not force:
3032 match.bad = fail
3034 match.bad = fail
3033
3035
3034 # lock() for recent changelog (see issue4368)
3036 # lock() for recent changelog (see issue4368)
3035 with self.wlock(), self.lock():
3037 with self.wlock(), self.lock():
3036 wctx = self[None]
3038 wctx = self[None]
3037 merge = len(wctx.parents()) > 1
3039 merge = len(wctx.parents()) > 1
3038
3040
3039 if not force and merge and not match.always():
3041 if not force and merge and not match.always():
3040 raise error.Abort(
3042 raise error.Abort(
3041 _(
3043 _(
3042 b'cannot partially commit a merge '
3044 b'cannot partially commit a merge '
3043 b'(do not specify files or patterns)'
3045 b'(do not specify files or patterns)'
3044 )
3046 )
3045 )
3047 )
3046
3048
3047 status = self.status(match=match, clean=force)
3049 status = self.status(match=match, clean=force)
3048 if force:
3050 if force:
3049 status.modified.extend(
3051 status.modified.extend(
3050 status.clean
3052 status.clean
3051 ) # mq may commit clean files
3053 ) # mq may commit clean files
3052
3054
3053 # check subrepos
3055 # check subrepos
3054 subs, commitsubs, newstate = subrepoutil.precommit(
3056 subs, commitsubs, newstate = subrepoutil.precommit(
3055 self.ui, wctx, status, match, force=force
3057 self.ui, wctx, status, match, force=force
3056 )
3058 )
3057
3059
3058 # make sure all explicit patterns are matched
3060 # make sure all explicit patterns are matched
3059 if not force:
3061 if not force:
3060 self.checkcommitpatterns(wctx, match, status, fail)
3062 self.checkcommitpatterns(wctx, match, status, fail)
3061
3063
3062 cctx = context.workingcommitctx(
3064 cctx = context.workingcommitctx(
3063 self, status, text, user, date, extra
3065 self, status, text, user, date, extra
3064 )
3066 )
3065
3067
3066 ms = mergestatemod.mergestate.read(self)
3068 ms = mergestatemod.mergestate.read(self)
3067 mergeutil.checkunresolved(ms)
3069 mergeutil.checkunresolved(ms)
3068
3070
3069 # internal config: ui.allowemptycommit
3071 # internal config: ui.allowemptycommit
3070 if cctx.isempty() and not self.ui.configbool(
3072 if cctx.isempty() and not self.ui.configbool(
3071 b'ui', b'allowemptycommit'
3073 b'ui', b'allowemptycommit'
3072 ):
3074 ):
3073 self.ui.debug(b'nothing to commit, clearing merge state\n')
3075 self.ui.debug(b'nothing to commit, clearing merge state\n')
3074 ms.reset()
3076 ms.reset()
3075 return None
3077 return None
3076
3078
3077 if merge and cctx.deleted():
3079 if merge and cctx.deleted():
3078 raise error.Abort(_(b"cannot commit merge with missing files"))
3080 raise error.Abort(_(b"cannot commit merge with missing files"))
3079
3081
3080 if editor:
3082 if editor:
3081 cctx._text = editor(self, cctx, subs)
3083 cctx._text = editor(self, cctx, subs)
3082 edited = text != cctx._text
3084 edited = text != cctx._text
3083
3085
3084 # Save commit message in case this transaction gets rolled back
3086 # Save commit message in case this transaction gets rolled back
3085 # (e.g. by a pretxncommit hook). Leave the content alone on
3087 # (e.g. by a pretxncommit hook). Leave the content alone on
3086 # the assumption that the user will use the same editor again.
3088 # the assumption that the user will use the same editor again.
3087 msgfn = self.savecommitmessage(cctx._text)
3089 msgfn = self.savecommitmessage(cctx._text)
3088
3090
3089 # commit subs and write new state
3091 # commit subs and write new state
3090 if subs:
3092 if subs:
3091 uipathfn = scmutil.getuipathfn(self)
3093 uipathfn = scmutil.getuipathfn(self)
3092 for s in sorted(commitsubs):
3094 for s in sorted(commitsubs):
3093 sub = wctx.sub(s)
3095 sub = wctx.sub(s)
3094 self.ui.status(
3096 self.ui.status(
3095 _(b'committing subrepository %s\n')
3097 _(b'committing subrepository %s\n')
3096 % uipathfn(subrepoutil.subrelpath(sub))
3098 % uipathfn(subrepoutil.subrelpath(sub))
3097 )
3099 )
3098 sr = sub.commit(cctx._text, user, date)
3100 sr = sub.commit(cctx._text, user, date)
3099 newstate[s] = (newstate[s][0], sr)
3101 newstate[s] = (newstate[s][0], sr)
3100 subrepoutil.writestate(self, newstate)
3102 subrepoutil.writestate(self, newstate)
3101
3103
3102 p1, p2 = self.dirstate.parents()
3104 p1, p2 = self.dirstate.parents()
3103 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3105 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3104 try:
3106 try:
3105 self.hook(
3107 self.hook(
3106 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3108 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3107 )
3109 )
3108 with self.transaction(b'commit'):
3110 with self.transaction(b'commit'):
3109 ret = self.commitctx(cctx, True)
3111 ret = self.commitctx(cctx, True)
3110 # update bookmarks, dirstate and mergestate
3112 # update bookmarks, dirstate and mergestate
3111 bookmarks.update(self, [p1, p2], ret)
3113 bookmarks.update(self, [p1, p2], ret)
3112 cctx.markcommitted(ret)
3114 cctx.markcommitted(ret)
3113 ms.reset()
3115 ms.reset()
3114 except: # re-raises
3116 except: # re-raises
3115 if edited:
3117 if edited:
3116 self.ui.write(
3118 self.ui.write(
3117 _(b'note: commit message saved in %s\n') % msgfn
3119 _(b'note: commit message saved in %s\n') % msgfn
3118 )
3120 )
3119 self.ui.write(
3121 self.ui.write(
3120 _(
3122 _(
3121 b"note: use 'hg commit --logfile "
3123 b"note: use 'hg commit --logfile "
3122 b".hg/last-message.txt --edit' to reuse it\n"
3124 b".hg/last-message.txt --edit' to reuse it\n"
3123 )
3125 )
3124 )
3126 )
3125 raise
3127 raise
3126
3128
3127 def commithook(unused_success):
3129 def commithook(unused_success):
3128 # hack for command that use a temporary commit (eg: histedit)
3130 # hack for command that use a temporary commit (eg: histedit)
3129 # temporary commit got stripped before hook release
3131 # temporary commit got stripped before hook release
3130 if self.changelog.hasnode(ret):
3132 if self.changelog.hasnode(ret):
3131 self.hook(
3133 self.hook(
3132 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3134 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3133 )
3135 )
3134
3136
3135 self._afterlock(commithook)
3137 self._afterlock(commithook)
3136 return ret
3138 return ret
3137
3139
3138 @unfilteredmethod
3140 @unfilteredmethod
3139 def commitctx(self, ctx, error=False, origctx=None):
3141 def commitctx(self, ctx, error=False, origctx=None):
3140 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3142 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3141
3143
3142 @unfilteredmethod
3144 @unfilteredmethod
3143 def destroying(self):
3145 def destroying(self):
3144 """Inform the repository that nodes are about to be destroyed.
3146 """Inform the repository that nodes are about to be destroyed.
3145 Intended for use by strip and rollback, so there's a common
3147 Intended for use by strip and rollback, so there's a common
3146 place for anything that has to be done before destroying history.
3148 place for anything that has to be done before destroying history.
3147
3149
3148 This is mostly useful for saving state that is in memory and waiting
3150 This is mostly useful for saving state that is in memory and waiting
3149 to be flushed when the current lock is released. Because a call to
3151 to be flushed when the current lock is released. Because a call to
3150 destroyed is imminent, the repo will be invalidated causing those
3152 destroyed is imminent, the repo will be invalidated causing those
3151 changes to stay in memory (waiting for the next unlock), or vanish
3153 changes to stay in memory (waiting for the next unlock), or vanish
3152 completely.
3154 completely.
3153 """
3155 """
3154 # When using the same lock to commit and strip, the phasecache is left
3156 # When using the same lock to commit and strip, the phasecache is left
3155 # dirty after committing. Then when we strip, the repo is invalidated,
3157 # dirty after committing. Then when we strip, the repo is invalidated,
3156 # causing those changes to disappear.
3158 # causing those changes to disappear.
3157 if '_phasecache' in vars(self):
3159 if '_phasecache' in vars(self):
3158 self._phasecache.write()
3160 self._phasecache.write()
3159
3161
3160 @unfilteredmethod
3162 @unfilteredmethod
3161 def destroyed(self):
3163 def destroyed(self):
3162 """Inform the repository that nodes have been destroyed.
3164 """Inform the repository that nodes have been destroyed.
3163 Intended for use by strip and rollback, so there's a common
3165 Intended for use by strip and rollback, so there's a common
3164 place for anything that has to be done after destroying history.
3166 place for anything that has to be done after destroying history.
3165 """
3167 """
3166 # When one tries to:
3168 # When one tries to:
3167 # 1) destroy nodes thus calling this method (e.g. strip)
3169 # 1) destroy nodes thus calling this method (e.g. strip)
3168 # 2) use phasecache somewhere (e.g. commit)
3170 # 2) use phasecache somewhere (e.g. commit)
3169 #
3171 #
3170 # then 2) will fail because the phasecache contains nodes that were
3172 # then 2) will fail because the phasecache contains nodes that were
3171 # removed. We can either remove phasecache from the filecache,
3173 # removed. We can either remove phasecache from the filecache,
3172 # causing it to reload next time it is accessed, or simply filter
3174 # causing it to reload next time it is accessed, or simply filter
3173 # the removed nodes now and write the updated cache.
3175 # the removed nodes now and write the updated cache.
3174 self._phasecache.filterunknown(self)
3176 self._phasecache.filterunknown(self)
3175 self._phasecache.write()
3177 self._phasecache.write()
3176
3178
3177 # refresh all repository caches
3179 # refresh all repository caches
3178 self.updatecaches()
3180 self.updatecaches()
3179
3181
3180 # Ensure the persistent tag cache is updated. Doing it now
3182 # Ensure the persistent tag cache is updated. Doing it now
3181 # means that the tag cache only has to worry about destroyed
3183 # means that the tag cache only has to worry about destroyed
3182 # heads immediately after a strip/rollback. That in turn
3184 # heads immediately after a strip/rollback. That in turn
3183 # guarantees that "cachetip == currenttip" (comparing both rev
3185 # guarantees that "cachetip == currenttip" (comparing both rev
3184 # and node) always means no nodes have been added or destroyed.
3186 # and node) always means no nodes have been added or destroyed.
3185
3187
3186 # XXX this is suboptimal when qrefresh'ing: we strip the current
3188 # XXX this is suboptimal when qrefresh'ing: we strip the current
3187 # head, refresh the tag cache, then immediately add a new head.
3189 # head, refresh the tag cache, then immediately add a new head.
3188 # But I think doing it this way is necessary for the "instant
3190 # But I think doing it this way is necessary for the "instant
3189 # tag cache retrieval" case to work.
3191 # tag cache retrieval" case to work.
3190 self.invalidate()
3192 self.invalidate()
3191
3193
3192 def status(
3194 def status(
3193 self,
3195 self,
3194 node1=b'.',
3196 node1=b'.',
3195 node2=None,
3197 node2=None,
3196 match=None,
3198 match=None,
3197 ignored=False,
3199 ignored=False,
3198 clean=False,
3200 clean=False,
3199 unknown=False,
3201 unknown=False,
3200 listsubrepos=False,
3202 listsubrepos=False,
3201 ):
3203 ):
3202 '''a convenience method that calls node1.status(node2)'''
3204 '''a convenience method that calls node1.status(node2)'''
3203 return self[node1].status(
3205 return self[node1].status(
3204 node2, match, ignored, clean, unknown, listsubrepos
3206 node2, match, ignored, clean, unknown, listsubrepos
3205 )
3207 )
3206
3208
3207 def addpostdsstatus(self, ps):
3209 def addpostdsstatus(self, ps):
3208 """Add a callback to run within the wlock, at the point at which status
3210 """Add a callback to run within the wlock, at the point at which status
3209 fixups happen.
3211 fixups happen.
3210
3212
3211 On status completion, callback(wctx, status) will be called with the
3213 On status completion, callback(wctx, status) will be called with the
3212 wlock held, unless the dirstate has changed from underneath or the wlock
3214 wlock held, unless the dirstate has changed from underneath or the wlock
3213 couldn't be grabbed.
3215 couldn't be grabbed.
3214
3216
3215 Callbacks should not capture and use a cached copy of the dirstate --
3217 Callbacks should not capture and use a cached copy of the dirstate --
3216 it might change in the meanwhile. Instead, they should access the
3218 it might change in the meanwhile. Instead, they should access the
3217 dirstate via wctx.repo().dirstate.
3219 dirstate via wctx.repo().dirstate.
3218
3220
3219 This list is emptied out after each status run -- extensions should
3221 This list is emptied out after each status run -- extensions should
3220 make sure it adds to this list each time dirstate.status is called.
3222 make sure it adds to this list each time dirstate.status is called.
3221 Extensions should also make sure they don't call this for statuses
3223 Extensions should also make sure they don't call this for statuses
3222 that don't involve the dirstate.
3224 that don't involve the dirstate.
3223 """
3225 """
3224
3226
3225 # The list is located here for uniqueness reasons -- it is actually
3227 # The list is located here for uniqueness reasons -- it is actually
3226 # managed by the workingctx, but that isn't unique per-repo.
3228 # managed by the workingctx, but that isn't unique per-repo.
3227 self._postdsstatus.append(ps)
3229 self._postdsstatus.append(ps)
3228
3230
3229 def postdsstatus(self):
3231 def postdsstatus(self):
3230 """Used by workingctx to get the list of post-dirstate-status hooks."""
3232 """Used by workingctx to get the list of post-dirstate-status hooks."""
3231 return self._postdsstatus
3233 return self._postdsstatus
3232
3234
3233 def clearpostdsstatus(self):
3235 def clearpostdsstatus(self):
3234 """Used by workingctx to clear post-dirstate-status hooks."""
3236 """Used by workingctx to clear post-dirstate-status hooks."""
3235 del self._postdsstatus[:]
3237 del self._postdsstatus[:]
3236
3238
3237 def heads(self, start=None):
3239 def heads(self, start=None):
3238 if start is None:
3240 if start is None:
3239 cl = self.changelog
3241 cl = self.changelog
3240 headrevs = reversed(cl.headrevs())
3242 headrevs = reversed(cl.headrevs())
3241 return [cl.node(rev) for rev in headrevs]
3243 return [cl.node(rev) for rev in headrevs]
3242
3244
3243 heads = self.changelog.heads(start)
3245 heads = self.changelog.heads(start)
3244 # sort the output in rev descending order
3246 # sort the output in rev descending order
3245 return sorted(heads, key=self.changelog.rev, reverse=True)
3247 return sorted(heads, key=self.changelog.rev, reverse=True)
3246
3248
3247 def branchheads(self, branch=None, start=None, closed=False):
3249 def branchheads(self, branch=None, start=None, closed=False):
3248 """return a (possibly filtered) list of heads for the given branch
3250 """return a (possibly filtered) list of heads for the given branch
3249
3251
3250 Heads are returned in topological order, from newest to oldest.
3252 Heads are returned in topological order, from newest to oldest.
3251 If branch is None, use the dirstate branch.
3253 If branch is None, use the dirstate branch.
3252 If start is not None, return only heads reachable from start.
3254 If start is not None, return only heads reachable from start.
3253 If closed is True, return heads that are marked as closed as well.
3255 If closed is True, return heads that are marked as closed as well.
3254 """
3256 """
3255 if branch is None:
3257 if branch is None:
3256 branch = self[None].branch()
3258 branch = self[None].branch()
3257 branches = self.branchmap()
3259 branches = self.branchmap()
3258 if not branches.hasbranch(branch):
3260 if not branches.hasbranch(branch):
3259 return []
3261 return []
3260 # the cache returns heads ordered lowest to highest
3262 # the cache returns heads ordered lowest to highest
3261 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3263 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3262 if start is not None:
3264 if start is not None:
3263 # filter out the heads that cannot be reached from startrev
3265 # filter out the heads that cannot be reached from startrev
3264 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3266 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3265 bheads = [h for h in bheads if h in fbheads]
3267 bheads = [h for h in bheads if h in fbheads]
3266 return bheads
3268 return bheads
3267
3269
3268 def branches(self, nodes):
3270 def branches(self, nodes):
3269 if not nodes:
3271 if not nodes:
3270 nodes = [self.changelog.tip()]
3272 nodes = [self.changelog.tip()]
3271 b = []
3273 b = []
3272 for n in nodes:
3274 for n in nodes:
3273 t = n
3275 t = n
3274 while True:
3276 while True:
3275 p = self.changelog.parents(n)
3277 p = self.changelog.parents(n)
3276 if p[1] != self.nullid or p[0] == self.nullid:
3278 if p[1] != self.nullid or p[0] == self.nullid:
3277 b.append((t, n, p[0], p[1]))
3279 b.append((t, n, p[0], p[1]))
3278 break
3280 break
3279 n = p[0]
3281 n = p[0]
3280 return b
3282 return b
3281
3283
3282 def between(self, pairs):
3284 def between(self, pairs):
3283 r = []
3285 r = []
3284
3286
3285 for top, bottom in pairs:
3287 for top, bottom in pairs:
3286 n, l, i = top, [], 0
3288 n, l, i = top, [], 0
3287 f = 1
3289 f = 1
3288
3290
3289 while n != bottom and n != self.nullid:
3291 while n != bottom and n != self.nullid:
3290 p = self.changelog.parents(n)[0]
3292 p = self.changelog.parents(n)[0]
3291 if i == f:
3293 if i == f:
3292 l.append(n)
3294 l.append(n)
3293 f = f * 2
3295 f = f * 2
3294 n = p
3296 n = p
3295 i += 1
3297 i += 1
3296
3298
3297 r.append(l)
3299 r.append(l)
3298
3300
3299 return r
3301 return r
3300
3302
3301 def checkpush(self, pushop):
3303 def checkpush(self, pushop):
3302 """Extensions can override this function if additional checks have
3304 """Extensions can override this function if additional checks have
3303 to be performed before pushing, or call it if they override push
3305 to be performed before pushing, or call it if they override push
3304 command.
3306 command.
3305 """
3307 """
3306
3308
3307 @unfilteredpropertycache
3309 @unfilteredpropertycache
3308 def prepushoutgoinghooks(self):
3310 def prepushoutgoinghooks(self):
3309 """Return util.hooks consists of a pushop with repo, remote, outgoing
3311 """Return util.hooks consists of a pushop with repo, remote, outgoing
3310 methods, which are called before pushing changesets.
3312 methods, which are called before pushing changesets.
3311 """
3313 """
3312 return util.hooks()
3314 return util.hooks()
3313
3315
3314 def pushkey(self, namespace, key, old, new):
3316 def pushkey(self, namespace, key, old, new):
3315 try:
3317 try:
3316 tr = self.currenttransaction()
3318 tr = self.currenttransaction()
3317 hookargs = {}
3319 hookargs = {}
3318 if tr is not None:
3320 if tr is not None:
3319 hookargs.update(tr.hookargs)
3321 hookargs.update(tr.hookargs)
3320 hookargs = pycompat.strkwargs(hookargs)
3322 hookargs = pycompat.strkwargs(hookargs)
3321 hookargs['namespace'] = namespace
3323 hookargs['namespace'] = namespace
3322 hookargs['key'] = key
3324 hookargs['key'] = key
3323 hookargs['old'] = old
3325 hookargs['old'] = old
3324 hookargs['new'] = new
3326 hookargs['new'] = new
3325 self.hook(b'prepushkey', throw=True, **hookargs)
3327 self.hook(b'prepushkey', throw=True, **hookargs)
3326 except error.HookAbort as exc:
3328 except error.HookAbort as exc:
3327 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3329 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3328 if exc.hint:
3330 if exc.hint:
3329 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3331 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3330 return False
3332 return False
3331 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3333 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3332 ret = pushkey.push(self, namespace, key, old, new)
3334 ret = pushkey.push(self, namespace, key, old, new)
3333
3335
3334 def runhook(unused_success):
3336 def runhook(unused_success):
3335 self.hook(
3337 self.hook(
3336 b'pushkey',
3338 b'pushkey',
3337 namespace=namespace,
3339 namespace=namespace,
3338 key=key,
3340 key=key,
3339 old=old,
3341 old=old,
3340 new=new,
3342 new=new,
3341 ret=ret,
3343 ret=ret,
3342 )
3344 )
3343
3345
3344 self._afterlock(runhook)
3346 self._afterlock(runhook)
3345 return ret
3347 return ret
3346
3348
3347 def listkeys(self, namespace):
3349 def listkeys(self, namespace):
3348 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3350 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3349 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3351 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3350 values = pushkey.list(self, namespace)
3352 values = pushkey.list(self, namespace)
3351 self.hook(b'listkeys', namespace=namespace, values=values)
3353 self.hook(b'listkeys', namespace=namespace, values=values)
3352 return values
3354 return values
3353
3355
3354 def debugwireargs(self, one, two, three=None, four=None, five=None):
3356 def debugwireargs(self, one, two, three=None, four=None, five=None):
3355 '''used to test argument passing over the wire'''
3357 '''used to test argument passing over the wire'''
3356 return b"%s %s %s %s %s" % (
3358 return b"%s %s %s %s %s" % (
3357 one,
3359 one,
3358 two,
3360 two,
3359 pycompat.bytestr(three),
3361 pycompat.bytestr(three),
3360 pycompat.bytestr(four),
3362 pycompat.bytestr(four),
3361 pycompat.bytestr(five),
3363 pycompat.bytestr(five),
3362 )
3364 )
3363
3365
3364 def savecommitmessage(self, text):
3366 def savecommitmessage(self, text):
3365 fp = self.vfs(b'last-message.txt', b'wb')
3367 fp = self.vfs(b'last-message.txt', b'wb')
3366 try:
3368 try:
3367 fp.write(text)
3369 fp.write(text)
3368 finally:
3370 finally:
3369 fp.close()
3371 fp.close()
3370 return self.pathto(fp.name[len(self.root) + 1 :])
3372 return self.pathto(fp.name[len(self.root) + 1 :])
3371
3373
3372 def register_wanted_sidedata(self, category):
3374 def register_wanted_sidedata(self, category):
3373 if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
3375 if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
3374 # Only revlogv2 repos can want sidedata.
3376 # Only revlogv2 repos can want sidedata.
3375 return
3377 return
3376 self._wanted_sidedata.add(pycompat.bytestr(category))
3378 self._wanted_sidedata.add(pycompat.bytestr(category))
3377
3379
3378 def register_sidedata_computer(
3380 def register_sidedata_computer(
3379 self, kind, category, keys, computer, flags, replace=False
3381 self, kind, category, keys, computer, flags, replace=False
3380 ):
3382 ):
3381 if kind not in revlogconst.ALL_KINDS:
3383 if kind not in revlogconst.ALL_KINDS:
3382 msg = _(b"unexpected revlog kind '%s'.")
3384 msg = _(b"unexpected revlog kind '%s'.")
3383 raise error.ProgrammingError(msg % kind)
3385 raise error.ProgrammingError(msg % kind)
3384 category = pycompat.bytestr(category)
3386 category = pycompat.bytestr(category)
3385 already_registered = category in self._sidedata_computers.get(kind, [])
3387 already_registered = category in self._sidedata_computers.get(kind, [])
3386 if already_registered and not replace:
3388 if already_registered and not replace:
3387 msg = _(
3389 msg = _(
3388 b"cannot register a sidedata computer twice for category '%s'."
3390 b"cannot register a sidedata computer twice for category '%s'."
3389 )
3391 )
3390 raise error.ProgrammingError(msg % category)
3392 raise error.ProgrammingError(msg % category)
3391 if replace and not already_registered:
3393 if replace and not already_registered:
3392 msg = _(
3394 msg = _(
3393 b"cannot replace a sidedata computer that isn't registered "
3395 b"cannot replace a sidedata computer that isn't registered "
3394 b"for category '%s'."
3396 b"for category '%s'."
3395 )
3397 )
3396 raise error.ProgrammingError(msg % category)
3398 raise error.ProgrammingError(msg % category)
3397 self._sidedata_computers.setdefault(kind, {})
3399 self._sidedata_computers.setdefault(kind, {})
3398 self._sidedata_computers[kind][category] = (keys, computer, flags)
3400 self._sidedata_computers[kind][category] = (keys, computer, flags)
3399
3401
3400
3402
3401 # used to avoid circular references so destructors work
3403 # used to avoid circular references so destructors work
3402 def aftertrans(files):
3404 def aftertrans(files):
3403 renamefiles = [tuple(t) for t in files]
3405 renamefiles = [tuple(t) for t in files]
3404
3406
3405 def a():
3407 def a():
3406 for vfs, src, dest in renamefiles:
3408 for vfs, src, dest in renamefiles:
3407 # if src and dest refer to a same file, vfs.rename is a no-op,
3409 # if src and dest refer to a same file, vfs.rename is a no-op,
3408 # leaving both src and dest on disk. delete dest to make sure
3410 # leaving both src and dest on disk. delete dest to make sure
3409 # the rename couldn't be such a no-op.
3411 # the rename couldn't be such a no-op.
3410 vfs.tryunlink(dest)
3412 vfs.tryunlink(dest)
3411 try:
3413 try:
3412 vfs.rename(src, dest)
3414 vfs.rename(src, dest)
3413 except OSError: # journal file does not yet exist
3415 except OSError: # journal file does not yet exist
3414 pass
3416 pass
3415
3417
3416 return a
3418 return a
3417
3419
3418
3420
3419 def undoname(fn):
3421 def undoname(fn):
3420 base, name = os.path.split(fn)
3422 base, name = os.path.split(fn)
3421 assert name.startswith(b'journal')
3423 assert name.startswith(b'journal')
3422 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3424 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3423
3425
3424
3426
3425 def instance(ui, path, create, intents=None, createopts=None):
3427 def instance(ui, path, create, intents=None, createopts=None):
3426 localpath = urlutil.urllocalpath(path)
3428 localpath = urlutil.urllocalpath(path)
3427 if create:
3429 if create:
3428 createrepository(ui, localpath, createopts=createopts)
3430 createrepository(ui, localpath, createopts=createopts)
3429
3431
3430 return makelocalrepository(ui, localpath, intents=intents)
3432 return makelocalrepository(ui, localpath, intents=intents)
3431
3433
3432
3434
3433 def islocal(path):
3435 def islocal(path):
3434 return True
3436 return True
3435
3437
3436
3438
3437 def defaultcreateopts(ui, createopts=None):
3439 def defaultcreateopts(ui, createopts=None):
3438 """Populate the default creation options for a repository.
3440 """Populate the default creation options for a repository.
3439
3441
3440 A dictionary of explicitly requested creation options can be passed
3442 A dictionary of explicitly requested creation options can be passed
3441 in. Missing keys will be populated.
3443 in. Missing keys will be populated.
3442 """
3444 """
3443 createopts = dict(createopts or {})
3445 createopts = dict(createopts or {})
3444
3446
3445 if b'backend' not in createopts:
3447 if b'backend' not in createopts:
3446 # experimental config: storage.new-repo-backend
3448 # experimental config: storage.new-repo-backend
3447 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3449 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3448
3450
3449 return createopts
3451 return createopts
3450
3452
3451
3453
3452 def newreporequirements(ui, createopts):
3454 def newreporequirements(ui, createopts):
3453 """Determine the set of requirements for a new local repository.
3455 """Determine the set of requirements for a new local repository.
3454
3456
3455 Extensions can wrap this function to specify custom requirements for
3457 Extensions can wrap this function to specify custom requirements for
3456 new repositories.
3458 new repositories.
3457 """
3459 """
3458 # If the repo is being created from a shared repository, we copy
3460 # If the repo is being created from a shared repository, we copy
3459 # its requirements.
3461 # its requirements.
3460 if b'sharedrepo' in createopts:
3462 if b'sharedrepo' in createopts:
3461 requirements = set(createopts[b'sharedrepo'].requirements)
3463 requirements = set(createopts[b'sharedrepo'].requirements)
3462 if createopts.get(b'sharedrelative'):
3464 if createopts.get(b'sharedrelative'):
3463 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3465 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3464 else:
3466 else:
3465 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3467 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3466
3468
3467 return requirements
3469 return requirements
3468
3470
3469 if b'backend' not in createopts:
3471 if b'backend' not in createopts:
3470 raise error.ProgrammingError(
3472 raise error.ProgrammingError(
3471 b'backend key not present in createopts; '
3473 b'backend key not present in createopts; '
3472 b'was defaultcreateopts() called?'
3474 b'was defaultcreateopts() called?'
3473 )
3475 )
3474
3476
3475 if createopts[b'backend'] != b'revlogv1':
3477 if createopts[b'backend'] != b'revlogv1':
3476 raise error.Abort(
3478 raise error.Abort(
3477 _(
3479 _(
3478 b'unable to determine repository requirements for '
3480 b'unable to determine repository requirements for '
3479 b'storage backend: %s'
3481 b'storage backend: %s'
3480 )
3482 )
3481 % createopts[b'backend']
3483 % createopts[b'backend']
3482 )
3484 )
3483
3485
3484 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3486 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3485 if ui.configbool(b'format', b'usestore'):
3487 if ui.configbool(b'format', b'usestore'):
3486 requirements.add(requirementsmod.STORE_REQUIREMENT)
3488 requirements.add(requirementsmod.STORE_REQUIREMENT)
3487 if ui.configbool(b'format', b'usefncache'):
3489 if ui.configbool(b'format', b'usefncache'):
3488 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3490 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3489 if ui.configbool(b'format', b'dotencode'):
3491 if ui.configbool(b'format', b'dotencode'):
3490 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3492 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3491
3493
3492 compengines = ui.configlist(b'format', b'revlog-compression')
3494 compengines = ui.configlist(b'format', b'revlog-compression')
3493 for compengine in compengines:
3495 for compengine in compengines:
3494 if compengine in util.compengines:
3496 if compengine in util.compengines:
3495 engine = util.compengines[compengine]
3497 engine = util.compengines[compengine]
3496 if engine.available() and engine.revlogheader():
3498 if engine.available() and engine.revlogheader():
3497 break
3499 break
3498 else:
3500 else:
3499 raise error.Abort(
3501 raise error.Abort(
3500 _(
3502 _(
3501 b'compression engines %s defined by '
3503 b'compression engines %s defined by '
3502 b'format.revlog-compression not available'
3504 b'format.revlog-compression not available'
3503 )
3505 )
3504 % b', '.join(b'"%s"' % e for e in compengines),
3506 % b', '.join(b'"%s"' % e for e in compengines),
3505 hint=_(
3507 hint=_(
3506 b'run "hg debuginstall" to list available '
3508 b'run "hg debuginstall" to list available '
3507 b'compression engines'
3509 b'compression engines'
3508 ),
3510 ),
3509 )
3511 )
3510
3512
3511 # zlib is the historical default and doesn't need an explicit requirement.
3513 # zlib is the historical default and doesn't need an explicit requirement.
3512 if compengine == b'zstd':
3514 if compengine == b'zstd':
3513 requirements.add(b'revlog-compression-zstd')
3515 requirements.add(b'revlog-compression-zstd')
3514 elif compengine != b'zlib':
3516 elif compengine != b'zlib':
3515 requirements.add(b'exp-compression-%s' % compengine)
3517 requirements.add(b'exp-compression-%s' % compengine)
3516
3518
3517 if scmutil.gdinitconfig(ui):
3519 if scmutil.gdinitconfig(ui):
3518 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3520 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3519 if ui.configbool(b'format', b'sparse-revlog'):
3521 if ui.configbool(b'format', b'sparse-revlog'):
3520 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3522 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3521
3523
3522 # experimental config: format.exp-use-copies-side-data-changeset
3524 # experimental config: format.exp-use-copies-side-data-changeset
3523 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3525 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3524 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3526 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3525 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3527 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3526 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3528 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3527 if ui.configbool(b'experimental', b'treemanifest'):
3529 if ui.configbool(b'experimental', b'treemanifest'):
3528 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3530 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3529
3531
3530 revlogv2 = ui.config(b'experimental', b'revlogv2')
3532 revlogv2 = ui.config(b'experimental', b'revlogv2')
3531 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3533 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3532 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3534 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3533 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3535 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3534 # experimental config: format.internal-phase
3536 # experimental config: format.internal-phase
3535 if ui.configbool(b'format', b'internal-phase'):
3537 if ui.configbool(b'format', b'internal-phase'):
3536 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3538 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3537
3539
3538 if createopts.get(b'narrowfiles'):
3540 if createopts.get(b'narrowfiles'):
3539 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3541 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3540
3542
3541 if createopts.get(b'lfs'):
3543 if createopts.get(b'lfs'):
3542 requirements.add(b'lfs')
3544 requirements.add(b'lfs')
3543
3545
3544 if ui.configbool(b'format', b'bookmarks-in-store'):
3546 if ui.configbool(b'format', b'bookmarks-in-store'):
3545 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3547 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3546
3548
3547 if ui.configbool(b'format', b'use-persistent-nodemap'):
3549 if ui.configbool(b'format', b'use-persistent-nodemap'):
3548 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3550 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3549
3551
3550 # if share-safe is enabled, let's create the new repository with the new
3552 # if share-safe is enabled, let's create the new repository with the new
3551 # requirement
3553 # requirement
3552 if ui.configbool(b'format', b'use-share-safe'):
3554 if ui.configbool(b'format', b'use-share-safe'):
3553 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3555 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3554
3556
3555 return requirements
3557 return requirements
3556
3558
3557
3559
3558 def checkrequirementscompat(ui, requirements):
3560 def checkrequirementscompat(ui, requirements):
3559 """Checks compatibility of repository requirements enabled and disabled.
3561 """Checks compatibility of repository requirements enabled and disabled.
3560
3562
3561 Returns a set of requirements which needs to be dropped because dependend
3563 Returns a set of requirements which needs to be dropped because dependend
3562 requirements are not enabled. Also warns users about it"""
3564 requirements are not enabled. Also warns users about it"""
3563
3565
3564 dropped = set()
3566 dropped = set()
3565
3567
3566 if requirementsmod.STORE_REQUIREMENT not in requirements:
3568 if requirementsmod.STORE_REQUIREMENT not in requirements:
3567 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3569 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3568 ui.warn(
3570 ui.warn(
3569 _(
3571 _(
3570 b'ignoring enabled \'format.bookmarks-in-store\' config '
3572 b'ignoring enabled \'format.bookmarks-in-store\' config '
3571 b'beacuse it is incompatible with disabled '
3573 b'beacuse it is incompatible with disabled '
3572 b'\'format.usestore\' config\n'
3574 b'\'format.usestore\' config\n'
3573 )
3575 )
3574 )
3576 )
3575 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3577 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3576
3578
3577 if (
3579 if (
3578 requirementsmod.SHARED_REQUIREMENT in requirements
3580 requirementsmod.SHARED_REQUIREMENT in requirements
3579 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3581 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3580 ):
3582 ):
3581 raise error.Abort(
3583 raise error.Abort(
3582 _(
3584 _(
3583 b"cannot create shared repository as source was created"
3585 b"cannot create shared repository as source was created"
3584 b" with 'format.usestore' config disabled"
3586 b" with 'format.usestore' config disabled"
3585 )
3587 )
3586 )
3588 )
3587
3589
3588 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3589 ui.warn(
3591 ui.warn(
3590 _(
3592 _(
3591 b"ignoring enabled 'format.use-share-safe' config because "
3593 b"ignoring enabled 'format.use-share-safe' config because "
3592 b"it is incompatible with disabled 'format.usestore'"
3594 b"it is incompatible with disabled 'format.usestore'"
3593 b" config\n"
3595 b" config\n"
3594 )
3596 )
3595 )
3597 )
3596 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3598 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3597
3599
3598 return dropped
3600 return dropped
3599
3601
3600
3602
3601 def filterknowncreateopts(ui, createopts):
3603 def filterknowncreateopts(ui, createopts):
3602 """Filters a dict of repo creation options against options that are known.
3604 """Filters a dict of repo creation options against options that are known.
3603
3605
3604 Receives a dict of repo creation options and returns a dict of those
3606 Receives a dict of repo creation options and returns a dict of those
3605 options that we don't know how to handle.
3607 options that we don't know how to handle.
3606
3608
3607 This function is called as part of repository creation. If the
3609 This function is called as part of repository creation. If the
3608 returned dict contains any items, repository creation will not
3610 returned dict contains any items, repository creation will not
3609 be allowed, as it means there was a request to create a repository
3611 be allowed, as it means there was a request to create a repository
3610 with options not recognized by loaded code.
3612 with options not recognized by loaded code.
3611
3613
3612 Extensions can wrap this function to filter out creation options
3614 Extensions can wrap this function to filter out creation options
3613 they know how to handle.
3615 they know how to handle.
3614 """
3616 """
3615 known = {
3617 known = {
3616 b'backend',
3618 b'backend',
3617 b'lfs',
3619 b'lfs',
3618 b'narrowfiles',
3620 b'narrowfiles',
3619 b'sharedrepo',
3621 b'sharedrepo',
3620 b'sharedrelative',
3622 b'sharedrelative',
3621 b'shareditems',
3623 b'shareditems',
3622 b'shallowfilestore',
3624 b'shallowfilestore',
3623 }
3625 }
3624
3626
3625 return {k: v for k, v in createopts.items() if k not in known}
3627 return {k: v for k, v in createopts.items() if k not in known}
3626
3628
3627
3629
3628 def createrepository(ui, path, createopts=None):
3630 def createrepository(ui, path, createopts=None):
3629 """Create a new repository in a vfs.
3631 """Create a new repository in a vfs.
3630
3632
3631 ``path`` path to the new repo's working directory.
3633 ``path`` path to the new repo's working directory.
3632 ``createopts`` options for the new repository.
3634 ``createopts`` options for the new repository.
3633
3635
3634 The following keys for ``createopts`` are recognized:
3636 The following keys for ``createopts`` are recognized:
3635
3637
3636 backend
3638 backend
3637 The storage backend to use.
3639 The storage backend to use.
3638 lfs
3640 lfs
3639 Repository will be created with ``lfs`` requirement. The lfs extension
3641 Repository will be created with ``lfs`` requirement. The lfs extension
3640 will automatically be loaded when the repository is accessed.
3642 will automatically be loaded when the repository is accessed.
3641 narrowfiles
3643 narrowfiles
3642 Set up repository to support narrow file storage.
3644 Set up repository to support narrow file storage.
3643 sharedrepo
3645 sharedrepo
3644 Repository object from which storage should be shared.
3646 Repository object from which storage should be shared.
3645 sharedrelative
3647 sharedrelative
3646 Boolean indicating if the path to the shared repo should be
3648 Boolean indicating if the path to the shared repo should be
3647 stored as relative. By default, the pointer to the "parent" repo
3649 stored as relative. By default, the pointer to the "parent" repo
3648 is stored as an absolute path.
3650 is stored as an absolute path.
3649 shareditems
3651 shareditems
3650 Set of items to share to the new repository (in addition to storage).
3652 Set of items to share to the new repository (in addition to storage).
3651 shallowfilestore
3653 shallowfilestore
3652 Indicates that storage for files should be shallow (not all ancestor
3654 Indicates that storage for files should be shallow (not all ancestor
3653 revisions are known).
3655 revisions are known).
3654 """
3656 """
3655 createopts = defaultcreateopts(ui, createopts=createopts)
3657 createopts = defaultcreateopts(ui, createopts=createopts)
3656
3658
3657 unknownopts = filterknowncreateopts(ui, createopts)
3659 unknownopts = filterknowncreateopts(ui, createopts)
3658
3660
3659 if not isinstance(unknownopts, dict):
3661 if not isinstance(unknownopts, dict):
3660 raise error.ProgrammingError(
3662 raise error.ProgrammingError(
3661 b'filterknowncreateopts() did not return a dict'
3663 b'filterknowncreateopts() did not return a dict'
3662 )
3664 )
3663
3665
3664 if unknownopts:
3666 if unknownopts:
3665 raise error.Abort(
3667 raise error.Abort(
3666 _(
3668 _(
3667 b'unable to create repository because of unknown '
3669 b'unable to create repository because of unknown '
3668 b'creation option: %s'
3670 b'creation option: %s'
3669 )
3671 )
3670 % b', '.join(sorted(unknownopts)),
3672 % b', '.join(sorted(unknownopts)),
3671 hint=_(b'is a required extension not loaded?'),
3673 hint=_(b'is a required extension not loaded?'),
3672 )
3674 )
3673
3675
3674 requirements = newreporequirements(ui, createopts=createopts)
3676 requirements = newreporequirements(ui, createopts=createopts)
3675 requirements -= checkrequirementscompat(ui, requirements)
3677 requirements -= checkrequirementscompat(ui, requirements)
3676
3678
3677 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3679 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3678
3680
3679 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3681 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3680 if hgvfs.exists():
3682 if hgvfs.exists():
3681 raise error.RepoError(_(b'repository %s already exists') % path)
3683 raise error.RepoError(_(b'repository %s already exists') % path)
3682
3684
3683 if b'sharedrepo' in createopts:
3685 if b'sharedrepo' in createopts:
3684 sharedpath = createopts[b'sharedrepo'].sharedpath
3686 sharedpath = createopts[b'sharedrepo'].sharedpath
3685
3687
3686 if createopts.get(b'sharedrelative'):
3688 if createopts.get(b'sharedrelative'):
3687 try:
3689 try:
3688 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3690 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3689 sharedpath = util.pconvert(sharedpath)
3691 sharedpath = util.pconvert(sharedpath)
3690 except (IOError, ValueError) as e:
3692 except (IOError, ValueError) as e:
3691 # ValueError is raised on Windows if the drive letters differ
3693 # ValueError is raised on Windows if the drive letters differ
3692 # on each path.
3694 # on each path.
3693 raise error.Abort(
3695 raise error.Abort(
3694 _(b'cannot calculate relative path'),
3696 _(b'cannot calculate relative path'),
3695 hint=stringutil.forcebytestr(e),
3697 hint=stringutil.forcebytestr(e),
3696 )
3698 )
3697
3699
3698 if not wdirvfs.exists():
3700 if not wdirvfs.exists():
3699 wdirvfs.makedirs()
3701 wdirvfs.makedirs()
3700
3702
3701 hgvfs.makedir(notindexed=True)
3703 hgvfs.makedir(notindexed=True)
3702 if b'sharedrepo' not in createopts:
3704 if b'sharedrepo' not in createopts:
3703 hgvfs.mkdir(b'cache')
3705 hgvfs.mkdir(b'cache')
3704 hgvfs.mkdir(b'wcache')
3706 hgvfs.mkdir(b'wcache')
3705
3707
3706 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3708 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3707 if has_store and b'sharedrepo' not in createopts:
3709 if has_store and b'sharedrepo' not in createopts:
3708 hgvfs.mkdir(b'store')
3710 hgvfs.mkdir(b'store')
3709
3711
3710 # We create an invalid changelog outside the store so very old
3712 # We create an invalid changelog outside the store so very old
3711 # Mercurial versions (which didn't know about the requirements
3713 # Mercurial versions (which didn't know about the requirements
3712 # file) encounter an error on reading the changelog. This
3714 # file) encounter an error on reading the changelog. This
3713 # effectively locks out old clients and prevents them from
3715 # effectively locks out old clients and prevents them from
3714 # mucking with a repo in an unknown format.
3716 # mucking with a repo in an unknown format.
3715 #
3717 #
3716 # The revlog header has version 65535, which won't be recognized by
3718 # The revlog header has version 65535, which won't be recognized by
3717 # such old clients.
3719 # such old clients.
3718 hgvfs.append(
3720 hgvfs.append(
3719 b'00changelog.i',
3721 b'00changelog.i',
3720 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3722 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3721 b'layout',
3723 b'layout',
3722 )
3724 )
3723
3725
3724 # Filter the requirements into working copy and store ones
3726 # Filter the requirements into working copy and store ones
3725 wcreq, storereq = scmutil.filterrequirements(requirements)
3727 wcreq, storereq = scmutil.filterrequirements(requirements)
3726 # write working copy ones
3728 # write working copy ones
3727 scmutil.writerequires(hgvfs, wcreq)
3729 scmutil.writerequires(hgvfs, wcreq)
3728 # If there are store requirements and the current repository
3730 # If there are store requirements and the current repository
3729 # is not a shared one, write stored requirements
3731 # is not a shared one, write stored requirements
3730 # For new shared repository, we don't need to write the store
3732 # For new shared repository, we don't need to write the store
3731 # requirements as they are already present in store requires
3733 # requirements as they are already present in store requires
3732 if storereq and b'sharedrepo' not in createopts:
3734 if storereq and b'sharedrepo' not in createopts:
3733 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3735 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3734 scmutil.writerequires(storevfs, storereq)
3736 scmutil.writerequires(storevfs, storereq)
3735
3737
3736 # Write out file telling readers where to find the shared store.
3738 # Write out file telling readers where to find the shared store.
3737 if b'sharedrepo' in createopts:
3739 if b'sharedrepo' in createopts:
3738 hgvfs.write(b'sharedpath', sharedpath)
3740 hgvfs.write(b'sharedpath', sharedpath)
3739
3741
3740 if createopts.get(b'shareditems'):
3742 if createopts.get(b'shareditems'):
3741 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3743 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3742 hgvfs.write(b'shared', shared)
3744 hgvfs.write(b'shared', shared)
3743
3745
3744
3746
3745 def poisonrepository(repo):
3747 def poisonrepository(repo):
3746 """Poison a repository instance so it can no longer be used."""
3748 """Poison a repository instance so it can no longer be used."""
3747 # Perform any cleanup on the instance.
3749 # Perform any cleanup on the instance.
3748 repo.close()
3750 repo.close()
3749
3751
3750 # Our strategy is to replace the type of the object with one that
3752 # Our strategy is to replace the type of the object with one that
3751 # has all attribute lookups result in error.
3753 # has all attribute lookups result in error.
3752 #
3754 #
3753 # But we have to allow the close() method because some constructors
3755 # But we have to allow the close() method because some constructors
3754 # of repos call close() on repo references.
3756 # of repos call close() on repo references.
3755 class poisonedrepository(object):
3757 class poisonedrepository(object):
3756 def __getattribute__(self, item):
3758 def __getattribute__(self, item):
3757 if item == 'close':
3759 if item == 'close':
3758 return object.__getattribute__(self, item)
3760 return object.__getattribute__(self, item)
3759
3761
3760 raise error.ProgrammingError(
3762 raise error.ProgrammingError(
3761 b'repo instances should not be used after unshare'
3763 b'repo instances should not be used after unshare'
3762 )
3764 )
3763
3765
3764 def close(self):
3766 def close(self):
3765 pass
3767 pass
3766
3768
3767 # We may have a repoview, which intercepts __setattr__. So be sure
3769 # We may have a repoview, which intercepts __setattr__. So be sure
3768 # we operate at the lowest level possible.
3770 # we operate at the lowest level possible.
3769 object.__setattr__(repo, '__class__', poisonedrepository)
3771 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3214 +1,3249 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 FEATURES_BY_VERSION,
38 FEATURES_BY_VERSION,
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 INDEX_HEADER,
41 INDEX_HEADER,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 SUPPORTED_FLAGS,
50 SUPPORTED_FLAGS,
51 )
51 )
52 from .revlogutils.flagutil import (
52 from .revlogutils.flagutil import (
53 REVIDX_DEFAULT_FLAGS,
53 REVIDX_DEFAULT_FLAGS,
54 REVIDX_ELLIPSIS,
54 REVIDX_ELLIPSIS,
55 REVIDX_EXTSTORED,
55 REVIDX_EXTSTORED,
56 REVIDX_FLAGS_ORDER,
56 REVIDX_FLAGS_ORDER,
57 REVIDX_HASCOPIESINFO,
57 REVIDX_HASCOPIESINFO,
58 REVIDX_ISCENSORED,
58 REVIDX_ISCENSORED,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 REVIDX_RAWTEXT_CHANGING_FLAGS,
60 )
60 )
61 from .thirdparty import attr
61 from .thirdparty import attr
62 from . import (
62 from . import (
63 ancestor,
63 ancestor,
64 dagop,
64 dagop,
65 error,
65 error,
66 mdiff,
66 mdiff,
67 policy,
67 policy,
68 pycompat,
68 pycompat,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76 from .revlogutils import (
76 from .revlogutils import (
77 deltas as deltautil,
77 deltas as deltautil,
78 docket as docketutil,
78 docket as docketutil,
79 flagutil,
79 flagutil,
80 nodemap as nodemaputil,
80 nodemap as nodemaputil,
81 revlogv0,
81 revlogv0,
82 sidedata as sidedatautil,
82 sidedata as sidedatautil,
83 )
83 )
84 from .utils import (
84 from .utils import (
85 storageutil,
85 storageutil,
86 stringutil,
86 stringutil,
87 )
87 )
88
88
89 # blanked usage of all the name to prevent pyflakes constraints
89 # blanked usage of all the name to prevent pyflakes constraints
90 # We need these name available in the module for extensions.
90 # We need these name available in the module for extensions.
91
91
92 REVLOGV0
92 REVLOGV0
93 REVLOGV1
93 REVLOGV1
94 REVLOGV2
94 REVLOGV2
95 FLAG_INLINE_DATA
95 FLAG_INLINE_DATA
96 FLAG_GENERALDELTA
96 FLAG_GENERALDELTA
97 REVLOG_DEFAULT_FLAGS
97 REVLOG_DEFAULT_FLAGS
98 REVLOG_DEFAULT_FORMAT
98 REVLOG_DEFAULT_FORMAT
99 REVLOG_DEFAULT_VERSION
99 REVLOG_DEFAULT_VERSION
100 REVLOGV1_FLAGS
100 REVLOGV1_FLAGS
101 REVLOGV2_FLAGS
101 REVLOGV2_FLAGS
102 REVIDX_ISCENSORED
102 REVIDX_ISCENSORED
103 REVIDX_ELLIPSIS
103 REVIDX_ELLIPSIS
104 REVIDX_HASCOPIESINFO
104 REVIDX_HASCOPIESINFO
105 REVIDX_EXTSTORED
105 REVIDX_EXTSTORED
106 REVIDX_DEFAULT_FLAGS
106 REVIDX_DEFAULT_FLAGS
107 REVIDX_FLAGS_ORDER
107 REVIDX_FLAGS_ORDER
108 REVIDX_RAWTEXT_CHANGING_FLAGS
108 REVIDX_RAWTEXT_CHANGING_FLAGS
109
109
110 parsers = policy.importmod('parsers')
110 parsers = policy.importmod('parsers')
111 rustancestor = policy.importrust('ancestor')
111 rustancestor = policy.importrust('ancestor')
112 rustdagop = policy.importrust('dagop')
112 rustdagop = policy.importrust('dagop')
113 rustrevlog = policy.importrust('revlog')
113 rustrevlog = policy.importrust('revlog')
114
114
115 # Aliased for performance.
115 # Aliased for performance.
116 _zlibdecompress = zlib.decompress
116 _zlibdecompress = zlib.decompress
117
117
118 # max size of revlog with inline data
118 # max size of revlog with inline data
119 _maxinline = 131072
119 _maxinline = 131072
120 _chunksize = 1048576
120 _chunksize = 1048576
121
121
122 # Flag processors for REVIDX_ELLIPSIS.
122 # Flag processors for REVIDX_ELLIPSIS.
123 def ellipsisreadprocessor(rl, text):
123 def ellipsisreadprocessor(rl, text):
124 return text, False
124 return text, False
125
125
126
126
127 def ellipsiswriteprocessor(rl, text):
127 def ellipsiswriteprocessor(rl, text):
128 return text, False
128 return text, False
129
129
130
130
131 def ellipsisrawprocessor(rl, text):
131 def ellipsisrawprocessor(rl, text):
132 return False
132 return False
133
133
134
134
135 ellipsisprocessor = (
135 ellipsisprocessor = (
136 ellipsisreadprocessor,
136 ellipsisreadprocessor,
137 ellipsiswriteprocessor,
137 ellipsiswriteprocessor,
138 ellipsisrawprocessor,
138 ellipsisrawprocessor,
139 )
139 )
140
140
141
141
142 def offset_type(offset, type):
142 def offset_type(offset, type):
143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
144 raise ValueError(b'unknown revlog index flags')
144 raise ValueError(b'unknown revlog index flags')
145 return int(int(offset) << 16 | type)
145 return int(int(offset) << 16 | type)
146
146
147
147
148 def _verify_revision(rl, skipflags, state, node):
148 def _verify_revision(rl, skipflags, state, node):
149 """Verify the integrity of the given revlog ``node`` while providing a hook
149 """Verify the integrity of the given revlog ``node`` while providing a hook
150 point for extensions to influence the operation."""
150 point for extensions to influence the operation."""
151 if skipflags:
151 if skipflags:
152 state[b'skipread'].add(node)
152 state[b'skipread'].add(node)
153 else:
153 else:
154 # Side-effect: read content and verify hash.
154 # Side-effect: read content and verify hash.
155 rl.revision(node)
155 rl.revision(node)
156
156
157
157
158 # True if a fast implementation for persistent-nodemap is available
158 # True if a fast implementation for persistent-nodemap is available
159 #
159 #
160 # We also consider we have a "fast" implementation in "pure" python because
160 # We also consider we have a "fast" implementation in "pure" python because
161 # people using pure don't really have performance consideration (and a
161 # people using pure don't really have performance consideration (and a
162 # wheelbarrow of other slowness source)
162 # wheelbarrow of other slowness source)
163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
164 parsers, 'BaseIndexObject'
164 parsers, 'BaseIndexObject'
165 )
165 )
166
166
167
167
168 @attr.s(slots=True, frozen=True)
168 @attr.s(slots=True, frozen=True)
169 class _revisioninfo(object):
169 class _revisioninfo(object):
170 """Information about a revision that allows building its fulltext
170 """Information about a revision that allows building its fulltext
171 node: expected hash of the revision
171 node: expected hash of the revision
172 p1, p2: parent revs of the revision
172 p1, p2: parent revs of the revision
173 btext: built text cache consisting of a one-element list
173 btext: built text cache consisting of a one-element list
174 cachedelta: (baserev, uncompressed_delta) or None
174 cachedelta: (baserev, uncompressed_delta) or None
175 flags: flags associated to the revision storage
175 flags: flags associated to the revision storage
176
176
177 One of btext[0] or cachedelta must be set.
177 One of btext[0] or cachedelta must be set.
178 """
178 """
179
179
180 node = attr.ib()
180 node = attr.ib()
181 p1 = attr.ib()
181 p1 = attr.ib()
182 p2 = attr.ib()
182 p2 = attr.ib()
183 btext = attr.ib()
183 btext = attr.ib()
184 textlen = attr.ib()
184 textlen = attr.ib()
185 cachedelta = attr.ib()
185 cachedelta = attr.ib()
186 flags = attr.ib()
186 flags = attr.ib()
187
187
188
188
189 @interfaceutil.implementer(repository.irevisiondelta)
189 @interfaceutil.implementer(repository.irevisiondelta)
190 @attr.s(slots=True)
190 @attr.s(slots=True)
191 class revlogrevisiondelta(object):
191 class revlogrevisiondelta(object):
192 node = attr.ib()
192 node = attr.ib()
193 p1node = attr.ib()
193 p1node = attr.ib()
194 p2node = attr.ib()
194 p2node = attr.ib()
195 basenode = attr.ib()
195 basenode = attr.ib()
196 flags = attr.ib()
196 flags = attr.ib()
197 baserevisionsize = attr.ib()
197 baserevisionsize = attr.ib()
198 revision = attr.ib()
198 revision = attr.ib()
199 delta = attr.ib()
199 delta = attr.ib()
200 sidedata = attr.ib()
200 sidedata = attr.ib()
201 protocol_flags = attr.ib()
201 protocol_flags = attr.ib()
202 linknode = attr.ib(default=None)
202 linknode = attr.ib(default=None)
203
203
204
204
205 @interfaceutil.implementer(repository.iverifyproblem)
205 @interfaceutil.implementer(repository.iverifyproblem)
206 @attr.s(frozen=True)
206 @attr.s(frozen=True)
207 class revlogproblem(object):
207 class revlogproblem(object):
208 warning = attr.ib(default=None)
208 warning = attr.ib(default=None)
209 error = attr.ib(default=None)
209 error = attr.ib(default=None)
210 node = attr.ib(default=None)
210 node = attr.ib(default=None)
211
211
212
212
213 def parse_index_v1(data, inline):
213 def parse_index_v1(data, inline):
214 # call the C implementation to parse the index data
214 # call the C implementation to parse the index data
215 index, cache = parsers.parse_index2(data, inline)
215 index, cache = parsers.parse_index2(data, inline)
216 return index, cache
216 return index, cache
217
217
218
218
219 def parse_index_v2(data, inline):
219 def parse_index_v2(data, inline):
220 # call the C implementation to parse the index data
220 # call the C implementation to parse the index data
221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
222 return index, cache
222 return index, cache
223
223
224
224
225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
226
226
227 def parse_index_v1_nodemap(data, inline):
227 def parse_index_v1_nodemap(data, inline):
228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
229 return index, cache
229 return index, cache
230
230
231
231
232 else:
232 else:
233 parse_index_v1_nodemap = None
233 parse_index_v1_nodemap = None
234
234
235
235
236 def parse_index_v1_mixed(data, inline):
236 def parse_index_v1_mixed(data, inline):
237 index, cache = parse_index_v1(data, inline)
237 index, cache = parse_index_v1(data, inline)
238 return rustrevlog.MixedIndex(index), cache
238 return rustrevlog.MixedIndex(index), cache
239
239
240
240
241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
242 # signed integer)
242 # signed integer)
243 _maxentrysize = 0x7FFFFFFF
243 _maxentrysize = 0x7FFFFFFF
244
244
245
245
246 class revlog(object):
246 class revlog(object):
247 """
247 """
248 the underlying revision storage object
248 the underlying revision storage object
249
249
250 A revlog consists of two parts, an index and the revision data.
250 A revlog consists of two parts, an index and the revision data.
251
251
252 The index is a file with a fixed record size containing
252 The index is a file with a fixed record size containing
253 information on each revision, including its nodeid (hash), the
253 information on each revision, including its nodeid (hash), the
254 nodeids of its parents, the position and offset of its data within
254 nodeids of its parents, the position and offset of its data within
255 the data file, and the revision it's based on. Finally, each entry
255 the data file, and the revision it's based on. Finally, each entry
256 contains a linkrev entry that can serve as a pointer to external
256 contains a linkrev entry that can serve as a pointer to external
257 data.
257 data.
258
258
259 The revision data itself is a linear collection of data chunks.
259 The revision data itself is a linear collection of data chunks.
260 Each chunk represents a revision and is usually represented as a
260 Each chunk represents a revision and is usually represented as a
261 delta against the previous chunk. To bound lookup time, runs of
261 delta against the previous chunk. To bound lookup time, runs of
262 deltas are limited to about 2 times the length of the original
262 deltas are limited to about 2 times the length of the original
263 version data. This makes retrieval of a version proportional to
263 version data. This makes retrieval of a version proportional to
264 its size, or O(1) relative to the number of revisions.
264 its size, or O(1) relative to the number of revisions.
265
265
266 Both pieces of the revlog are written to in an append-only
266 Both pieces of the revlog are written to in an append-only
267 fashion, which means we never need to rewrite a file to insert or
267 fashion, which means we never need to rewrite a file to insert or
268 remove data, and can use some simple techniques to avoid the need
268 remove data, and can use some simple techniques to avoid the need
269 for locking while reading.
269 for locking while reading.
270
270
271 If checkambig, indexfile is opened with checkambig=True at
271 If checkambig, indexfile is opened with checkambig=True at
272 writing, to avoid file stat ambiguity.
272 writing, to avoid file stat ambiguity.
273
273
274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
275 index will be mmapped rather than read if it is larger than the
275 index will be mmapped rather than read if it is larger than the
276 configured threshold.
276 configured threshold.
277
277
278 If censorable is True, the revlog can have censored revisions.
278 If censorable is True, the revlog can have censored revisions.
279
279
280 If `upperboundcomp` is not None, this is the expected maximal gain from
280 If `upperboundcomp` is not None, this is the expected maximal gain from
281 compression for the data content.
281 compression for the data content.
282
282
283 `concurrencychecker` is an optional function that receives 3 arguments: a
283 `concurrencychecker` is an optional function that receives 3 arguments: a
284 file handle, a filename, and an expected position. It should check whether
284 file handle, a filename, and an expected position. It should check whether
285 the current position in the file handle is valid, and log/warn/fail (by
285 the current position in the file handle is valid, and log/warn/fail (by
286 raising).
286 raising).
287 """
287 """
288
288
289 _flagserrorclass = error.RevlogError
289 _flagserrorclass = error.RevlogError
290
290
291 def __init__(
291 def __init__(
292 self,
292 self,
293 opener,
293 opener,
294 target,
294 target,
295 radix,
295 radix,
296 postfix=None,
296 postfix=None,
297 checkambig=False,
297 checkambig=False,
298 mmaplargeindex=False,
298 mmaplargeindex=False,
299 censorable=False,
299 censorable=False,
300 upperboundcomp=None,
300 upperboundcomp=None,
301 persistentnodemap=False,
301 persistentnodemap=False,
302 concurrencychecker=None,
302 concurrencychecker=None,
303 ):
303 ):
304 """
304 """
305 create a revlog object
305 create a revlog object
306
306
307 opener is a function that abstracts the file opening operation
307 opener is a function that abstracts the file opening operation
308 and can be used to implement COW semantics or the like.
308 and can be used to implement COW semantics or the like.
309
309
310 `target`: a (KIND, ID) tuple that identify the content stored in
310 `target`: a (KIND, ID) tuple that identify the content stored in
311 this revlog. It help the rest of the code to understand what the revlog
311 this revlog. It help the rest of the code to understand what the revlog
312 is about without having to resort to heuristic and index filename
312 is about without having to resort to heuristic and index filename
313 analysis. Note: that this must be reliably be set by normal code, but
313 analysis. Note: that this must be reliably be set by normal code, but
314 that test, debug, or performance measurement code might not set this to
314 that test, debug, or performance measurement code might not set this to
315 accurate value.
315 accurate value.
316 """
316 """
317 self.upperboundcomp = upperboundcomp
317 self.upperboundcomp = upperboundcomp
318
318
319 self.radix = radix
319 self.radix = radix
320
320
321 self._docket_file = None
321 self._docket_file = None
322 self._indexfile = None
322 self._indexfile = None
323 self._datafile = None
323 self._datafile = None
324 self._nodemap_file = None
324 self._nodemap_file = None
325 self.postfix = postfix
325 self.postfix = postfix
326 self.opener = opener
326 self.opener = opener
327 if persistentnodemap:
327 if persistentnodemap:
328 self._nodemap_file = nodemaputil.get_nodemap_file(self)
328 self._nodemap_file = nodemaputil.get_nodemap_file(self)
329
329
330 assert target[0] in ALL_KINDS
330 assert target[0] in ALL_KINDS
331 assert len(target) == 2
331 assert len(target) == 2
332 self.target = target
332 self.target = target
333 # When True, indexfile is opened with checkambig=True at writing, to
333 # When True, indexfile is opened with checkambig=True at writing, to
334 # avoid file stat ambiguity.
334 # avoid file stat ambiguity.
335 self._checkambig = checkambig
335 self._checkambig = checkambig
336 self._mmaplargeindex = mmaplargeindex
336 self._mmaplargeindex = mmaplargeindex
337 self._censorable = censorable
337 self._censorable = censorable
338 # 3-tuple of (node, rev, text) for a raw revision.
338 # 3-tuple of (node, rev, text) for a raw revision.
339 self._revisioncache = None
339 self._revisioncache = None
340 # Maps rev to chain base rev.
340 # Maps rev to chain base rev.
341 self._chainbasecache = util.lrucachedict(100)
341 self._chainbasecache = util.lrucachedict(100)
342 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
342 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
343 self._chunkcache = (0, b'')
343 self._chunkcache = (0, b'')
344 # How much data to read and cache into the raw revlog data cache.
344 # How much data to read and cache into the raw revlog data cache.
345 self._chunkcachesize = 65536
345 self._chunkcachesize = 65536
346 self._maxchainlen = None
346 self._maxchainlen = None
347 self._deltabothparents = True
347 self._deltabothparents = True
348 self.index = None
348 self.index = None
349 self._docket = None
349 self._docket = None
350 self._nodemap_docket = None
350 self._nodemap_docket = None
351 # Mapping of partial identifiers to full nodes.
351 # Mapping of partial identifiers to full nodes.
352 self._pcache = {}
352 self._pcache = {}
353 # Mapping of revision integer to full node.
353 # Mapping of revision integer to full node.
354 self._compengine = b'zlib'
354 self._compengine = b'zlib'
355 self._compengineopts = {}
355 self._compengineopts = {}
356 self._maxdeltachainspan = -1
356 self._maxdeltachainspan = -1
357 self._withsparseread = False
357 self._withsparseread = False
358 self._sparserevlog = False
358 self._sparserevlog = False
359 self.hassidedata = False
359 self.hassidedata = False
360 self._srdensitythreshold = 0.50
360 self._srdensitythreshold = 0.50
361 self._srmingapsize = 262144
361 self._srmingapsize = 262144
362
362
363 # Make copy of flag processors so each revlog instance can support
363 # Make copy of flag processors so each revlog instance can support
364 # custom flags.
364 # custom flags.
365 self._flagprocessors = dict(flagutil.flagprocessors)
365 self._flagprocessors = dict(flagutil.flagprocessors)
366
366
367 # 2-tuple of file handles being used for active writing.
367 # 2-tuple of file handles being used for active writing.
368 self._writinghandles = None
368 self._writinghandles = None
369 # prevent nesting of addgroup
369 # prevent nesting of addgroup
370 self._adding_group = None
370 self._adding_group = None
371
371
372 self._loadindex()
372 self._loadindex()
373
373
374 self._concurrencychecker = concurrencychecker
374 self._concurrencychecker = concurrencychecker
375
375
376 def _init_opts(self):
376 def _init_opts(self):
377 """process options (from above/config) to setup associated default revlog mode
377 """process options (from above/config) to setup associated default revlog mode
378
378
379 These values might be affected when actually reading on disk information.
379 These values might be affected when actually reading on disk information.
380
380
381 The relevant values are returned for use in _loadindex().
381 The relevant values are returned for use in _loadindex().
382
382
383 * newversionflags:
383 * newversionflags:
384 version header to use if we need to create a new revlog
384 version header to use if we need to create a new revlog
385
385
386 * mmapindexthreshold:
386 * mmapindexthreshold:
387 minimal index size for start to use mmap
387 minimal index size for start to use mmap
388
388
389 * force_nodemap:
389 * force_nodemap:
390 force the usage of a "development" version of the nodemap code
390 force the usage of a "development" version of the nodemap code
391 """
391 """
392 mmapindexthreshold = None
392 mmapindexthreshold = None
393 opts = self.opener.options
393 opts = self.opener.options
394
394
395 if b'revlogv2' in opts:
395 if b'revlogv2' in opts:
396 new_header = REVLOGV2 | FLAG_INLINE_DATA
396 new_header = REVLOGV2 | FLAG_INLINE_DATA
397 elif b'revlogv1' in opts:
397 elif b'revlogv1' in opts:
398 new_header = REVLOGV1 | FLAG_INLINE_DATA
398 new_header = REVLOGV1 | FLAG_INLINE_DATA
399 if b'generaldelta' in opts:
399 if b'generaldelta' in opts:
400 new_header |= FLAG_GENERALDELTA
400 new_header |= FLAG_GENERALDELTA
401 elif b'revlogv0' in self.opener.options:
401 elif b'revlogv0' in self.opener.options:
402 new_header = REVLOGV0
402 new_header = REVLOGV0
403 else:
403 else:
404 new_header = REVLOG_DEFAULT_VERSION
404 new_header = REVLOG_DEFAULT_VERSION
405
405
406 if b'chunkcachesize' in opts:
406 if b'chunkcachesize' in opts:
407 self._chunkcachesize = opts[b'chunkcachesize']
407 self._chunkcachesize = opts[b'chunkcachesize']
408 if b'maxchainlen' in opts:
408 if b'maxchainlen' in opts:
409 self._maxchainlen = opts[b'maxchainlen']
409 self._maxchainlen = opts[b'maxchainlen']
410 if b'deltabothparents' in opts:
410 if b'deltabothparents' in opts:
411 self._deltabothparents = opts[b'deltabothparents']
411 self._deltabothparents = opts[b'deltabothparents']
412 self._lazydelta = bool(opts.get(b'lazydelta', True))
412 self._lazydelta = bool(opts.get(b'lazydelta', True))
413 self._lazydeltabase = False
413 self._lazydeltabase = False
414 if self._lazydelta:
414 if self._lazydelta:
415 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
415 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
416 if b'compengine' in opts:
416 if b'compengine' in opts:
417 self._compengine = opts[b'compengine']
417 self._compengine = opts[b'compengine']
418 if b'zlib.level' in opts:
418 if b'zlib.level' in opts:
419 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
419 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
420 if b'zstd.level' in opts:
420 if b'zstd.level' in opts:
421 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
421 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
422 if b'maxdeltachainspan' in opts:
422 if b'maxdeltachainspan' in opts:
423 self._maxdeltachainspan = opts[b'maxdeltachainspan']
423 self._maxdeltachainspan = opts[b'maxdeltachainspan']
424 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
424 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
425 mmapindexthreshold = opts[b'mmapindexthreshold']
425 mmapindexthreshold = opts[b'mmapindexthreshold']
426 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
426 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
427 withsparseread = bool(opts.get(b'with-sparse-read', False))
427 withsparseread = bool(opts.get(b'with-sparse-read', False))
428 # sparse-revlog forces sparse-read
428 # sparse-revlog forces sparse-read
429 self._withsparseread = self._sparserevlog or withsparseread
429 self._withsparseread = self._sparserevlog or withsparseread
430 if b'sparse-read-density-threshold' in opts:
430 if b'sparse-read-density-threshold' in opts:
431 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
431 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
432 if b'sparse-read-min-gap-size' in opts:
432 if b'sparse-read-min-gap-size' in opts:
433 self._srmingapsize = opts[b'sparse-read-min-gap-size']
433 self._srmingapsize = opts[b'sparse-read-min-gap-size']
434 if opts.get(b'enableellipsis'):
434 if opts.get(b'enableellipsis'):
435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
436
436
437 # revlog v0 doesn't have flag processors
437 # revlog v0 doesn't have flag processors
438 for flag, processor in pycompat.iteritems(
438 for flag, processor in pycompat.iteritems(
439 opts.get(b'flagprocessors', {})
439 opts.get(b'flagprocessors', {})
440 ):
440 ):
441 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
441 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
442
442
443 if self._chunkcachesize <= 0:
443 if self._chunkcachesize <= 0:
444 raise error.RevlogError(
444 raise error.RevlogError(
445 _(b'revlog chunk cache size %r is not greater than 0')
445 _(b'revlog chunk cache size %r is not greater than 0')
446 % self._chunkcachesize
446 % self._chunkcachesize
447 )
447 )
448 elif self._chunkcachesize & (self._chunkcachesize - 1):
448 elif self._chunkcachesize & (self._chunkcachesize - 1):
449 raise error.RevlogError(
449 raise error.RevlogError(
450 _(b'revlog chunk cache size %r is not a power of 2')
450 _(b'revlog chunk cache size %r is not a power of 2')
451 % self._chunkcachesize
451 % self._chunkcachesize
452 )
452 )
453 force_nodemap = opts.get(b'devel-force-nodemap', False)
453 force_nodemap = opts.get(b'devel-force-nodemap', False)
454 return new_header, mmapindexthreshold, force_nodemap
454 return new_header, mmapindexthreshold, force_nodemap
455
455
456 def _get_data(self, filepath, mmap_threshold):
456 def _get_data(self, filepath, mmap_threshold, size=None):
457 """return a file content with or without mmap
457 """return a file content with or without mmap
458
458
459 If the file is missing return the empty string"""
459 If the file is missing return the empty string"""
460 try:
460 try:
461 with self.opener(filepath) as fp:
461 with self.opener(filepath) as fp:
462 if mmap_threshold is not None:
462 if mmap_threshold is not None:
463 file_size = self.opener.fstat(fp).st_size
463 file_size = self.opener.fstat(fp).st_size
464 if file_size >= mmap_threshold:
464 if file_size >= mmap_threshold:
465 if size is not None:
466 # avoid potentiel mmap crash
467 size = min(file_size, size)
465 # TODO: should .close() to release resources without
468 # TODO: should .close() to release resources without
466 # relying on Python GC
469 # relying on Python GC
470 if size is None:
467 return util.buffer(util.mmapread(fp))
471 return util.buffer(util.mmapread(fp))
472 else:
473 return util.buffer(util.mmapread(fp, size))
474 if size is None:
468 return fp.read()
475 return fp.read()
476 else:
477 return fp.read(size)
469 except IOError as inst:
478 except IOError as inst:
470 if inst.errno != errno.ENOENT:
479 if inst.errno != errno.ENOENT:
471 raise
480 raise
472 return b''
481 return b''
473
482
474 def _loadindex(self):
483 def _loadindex(self):
475
484
476 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
485 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
477
486
478 if self.postfix is None:
487 if self.postfix is None:
479 entry_point = b'%s.i' % self.radix
488 entry_point = b'%s.i' % self.radix
480 else:
489 else:
481 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
490 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
482
491
483 entry_data = b''
492 entry_data = b''
484 self._initempty = True
493 self._initempty = True
485 entry_data = self._get_data(entry_point, mmapindexthreshold)
494 entry_data = self._get_data(entry_point, mmapindexthreshold)
486 if len(entry_data) > 0:
495 if len(entry_data) > 0:
487 header = INDEX_HEADER.unpack(entry_data[:4])[0]
496 header = INDEX_HEADER.unpack(entry_data[:4])[0]
488 self._initempty = False
497 self._initempty = False
489 else:
498 else:
490 header = new_header
499 header = new_header
491
500
492 self._format_flags = header & ~0xFFFF
501 self._format_flags = header & ~0xFFFF
493 self._format_version = header & 0xFFFF
502 self._format_version = header & 0xFFFF
494
503
495 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
504 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
496 if supported_flags is None:
505 if supported_flags is None:
497 msg = _(b'unknown version (%d) in revlog %s')
506 msg = _(b'unknown version (%d) in revlog %s')
498 msg %= (self._format_version, self.display_id)
507 msg %= (self._format_version, self.display_id)
499 raise error.RevlogError(msg)
508 raise error.RevlogError(msg)
500 elif self._format_flags & ~supported_flags:
509 elif self._format_flags & ~supported_flags:
501 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
510 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
502 display_flag = self._format_flags >> 16
511 display_flag = self._format_flags >> 16
503 msg %= (display_flag, self._format_version, self.display_id)
512 msg %= (display_flag, self._format_version, self.display_id)
504 raise error.RevlogError(msg)
513 raise error.RevlogError(msg)
505
514
506 features = FEATURES_BY_VERSION[self._format_version]
515 features = FEATURES_BY_VERSION[self._format_version]
507 self._inline = features[b'inline'](self._format_flags)
516 self._inline = features[b'inline'](self._format_flags)
508 self._generaldelta = features[b'generaldelta'](self._format_flags)
517 self._generaldelta = features[b'generaldelta'](self._format_flags)
509 self.hassidedata = features[b'sidedata']
518 self.hassidedata = features[b'sidedata']
510
519
511 if not features[b'docket']:
520 if not features[b'docket']:
512 self._indexfile = entry_point
521 self._indexfile = entry_point
513 index_data = entry_data
522 index_data = entry_data
514 else:
523 else:
515 self._docket_file = entry_point
524 self._docket_file = entry_point
516 if self._initempty:
525 if self._initempty:
517 self._docket = docketutil.default_docket(self, header)
526 self._docket = docketutil.default_docket(self, header)
518 else:
527 else:
519 self._docket = docketutil.parse_docket(self, entry_data)
528 self._docket = docketutil.parse_docket(self, entry_data)
520 self._indexfile = self._docket.index_filepath()
529 self._indexfile = self._docket.index_filepath()
521 index_data = self._get_data(self._indexfile, mmapindexthreshold)
530 index_data = b''
531 index_size = self._docket.index_end
532 if index_size > 0:
533 index_data = self._get_data(
534 self._indexfile, mmapindexthreshold, size=index_size
535 )
536 if len(index_data) < index_size:
537 msg = _(b'too few index data for %s: got %d, expected %d')
538 msg %= (self.display_id, len(index_data), index_size)
539 raise error.RevlogError(msg)
540
522 self._inline = False
541 self._inline = False
523 # generaldelta implied by version 2 revlogs.
542 # generaldelta implied by version 2 revlogs.
524 self._generaldelta = True
543 self._generaldelta = True
525 # the logic for persistent nodemap will be dealt with within the
544 # the logic for persistent nodemap will be dealt with within the
526 # main docket, so disable it for now.
545 # main docket, so disable it for now.
527 self._nodemap_file = None
546 self._nodemap_file = None
528
547
529 if self.postfix is None or self.postfix == b'a':
548 if self.postfix is None or self.postfix == b'a':
530 self._datafile = b'%s.d' % self.radix
549 self._datafile = b'%s.d' % self.radix
531 else:
550 else:
532 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
551 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
533
552
534 self.nodeconstants = sha1nodeconstants
553 self.nodeconstants = sha1nodeconstants
535 self.nullid = self.nodeconstants.nullid
554 self.nullid = self.nodeconstants.nullid
536
555
537 # sparse-revlog can't be on without general-delta (issue6056)
556 # sparse-revlog can't be on without general-delta (issue6056)
538 if not self._generaldelta:
557 if not self._generaldelta:
539 self._sparserevlog = False
558 self._sparserevlog = False
540
559
541 self._storedeltachains = True
560 self._storedeltachains = True
542
561
543 devel_nodemap = (
562 devel_nodemap = (
544 self._nodemap_file
563 self._nodemap_file
545 and force_nodemap
564 and force_nodemap
546 and parse_index_v1_nodemap is not None
565 and parse_index_v1_nodemap is not None
547 )
566 )
548
567
549 use_rust_index = False
568 use_rust_index = False
550 if rustrevlog is not None:
569 if rustrevlog is not None:
551 if self._nodemap_file is not None:
570 if self._nodemap_file is not None:
552 use_rust_index = True
571 use_rust_index = True
553 else:
572 else:
554 use_rust_index = self.opener.options.get(b'rust.index')
573 use_rust_index = self.opener.options.get(b'rust.index')
555
574
556 self._parse_index = parse_index_v1
575 self._parse_index = parse_index_v1
557 if self._format_version == REVLOGV0:
576 if self._format_version == REVLOGV0:
558 self._parse_index = revlogv0.parse_index_v0
577 self._parse_index = revlogv0.parse_index_v0
559 elif self._format_version == REVLOGV2:
578 elif self._format_version == REVLOGV2:
560 self._parse_index = parse_index_v2
579 self._parse_index = parse_index_v2
561 elif devel_nodemap:
580 elif devel_nodemap:
562 self._parse_index = parse_index_v1_nodemap
581 self._parse_index = parse_index_v1_nodemap
563 elif use_rust_index:
582 elif use_rust_index:
564 self._parse_index = parse_index_v1_mixed
583 self._parse_index = parse_index_v1_mixed
565 try:
584 try:
566 d = self._parse_index(index_data, self._inline)
585 d = self._parse_index(index_data, self._inline)
567 index, _chunkcache = d
586 index, _chunkcache = d
568 use_nodemap = (
587 use_nodemap = (
569 not self._inline
588 not self._inline
570 and self._nodemap_file is not None
589 and self._nodemap_file is not None
571 and util.safehasattr(index, 'update_nodemap_data')
590 and util.safehasattr(index, 'update_nodemap_data')
572 )
591 )
573 if use_nodemap:
592 if use_nodemap:
574 nodemap_data = nodemaputil.persisted_data(self)
593 nodemap_data = nodemaputil.persisted_data(self)
575 if nodemap_data is not None:
594 if nodemap_data is not None:
576 docket = nodemap_data[0]
595 docket = nodemap_data[0]
577 if (
596 if (
578 len(d[0]) > docket.tip_rev
597 len(d[0]) > docket.tip_rev
579 and d[0][docket.tip_rev][7] == docket.tip_node
598 and d[0][docket.tip_rev][7] == docket.tip_node
580 ):
599 ):
581 # no changelog tampering
600 # no changelog tampering
582 self._nodemap_docket = docket
601 self._nodemap_docket = docket
583 index.update_nodemap_data(*nodemap_data)
602 index.update_nodemap_data(*nodemap_data)
584 except (ValueError, IndexError):
603 except (ValueError, IndexError):
585 raise error.RevlogError(
604 raise error.RevlogError(
586 _(b"index %s is corrupted") % self.display_id
605 _(b"index %s is corrupted") % self.display_id
587 )
606 )
588 self.index, self._chunkcache = d
607 self.index, self._chunkcache = d
589 if not self._chunkcache:
608 if not self._chunkcache:
590 self._chunkclear()
609 self._chunkclear()
591 # revnum -> (chain-length, sum-delta-length)
610 # revnum -> (chain-length, sum-delta-length)
592 self._chaininfocache = util.lrucachedict(500)
611 self._chaininfocache = util.lrucachedict(500)
593 # revlog header -> revlog compressor
612 # revlog header -> revlog compressor
594 self._decompressors = {}
613 self._decompressors = {}
595
614
596 @util.propertycache
615 @util.propertycache
597 def revlog_kind(self):
616 def revlog_kind(self):
598 return self.target[0]
617 return self.target[0]
599
618
600 @util.propertycache
619 @util.propertycache
601 def display_id(self):
620 def display_id(self):
602 """The public facing "ID" of the revlog that we use in message"""
621 """The public facing "ID" of the revlog that we use in message"""
603 # Maybe we should build a user facing representation of
622 # Maybe we should build a user facing representation of
604 # revlog.target instead of using `self.radix`
623 # revlog.target instead of using `self.radix`
605 return self.radix
624 return self.radix
606
625
607 @util.propertycache
626 @util.propertycache
608 def _compressor(self):
627 def _compressor(self):
609 engine = util.compengines[self._compengine]
628 engine = util.compengines[self._compengine]
610 return engine.revlogcompressor(self._compengineopts)
629 return engine.revlogcompressor(self._compengineopts)
611
630
612 def _indexfp(self):
631 def _indexfp(self):
613 """file object for the revlog's index file"""
632 """file object for the revlog's index file"""
614 return self.opener(self._indexfile, mode=b"r")
633 return self.opener(self._indexfile, mode=b"r")
615
634
616 def __index_write_fp(self):
635 def __index_write_fp(self):
617 # You should not use this directly and use `_writing` instead
636 # You should not use this directly and use `_writing` instead
618 try:
637 try:
619 f = self.opener(
638 f = self.opener(
620 self._indexfile, mode=b"r+", checkambig=self._checkambig
639 self._indexfile, mode=b"r+", checkambig=self._checkambig
621 )
640 )
641 if self._docket is None:
622 f.seek(0, os.SEEK_END)
642 f.seek(0, os.SEEK_END)
643 else:
644 f.seek(self._docket.index_end, os.SEEK_SET)
623 return f
645 return f
624 except IOError as inst:
646 except IOError as inst:
625 if inst.errno != errno.ENOENT:
647 if inst.errno != errno.ENOENT:
626 raise
648 raise
627 return self.opener(
649 return self.opener(
628 self._indexfile, mode=b"w+", checkambig=self._checkambig
650 self._indexfile, mode=b"w+", checkambig=self._checkambig
629 )
651 )
630
652
631 def __index_new_fp(self):
653 def __index_new_fp(self):
632 # You should not use this unless you are upgrading from inline revlog
654 # You should not use this unless you are upgrading from inline revlog
633 return self.opener(
655 return self.opener(
634 self._indexfile,
656 self._indexfile,
635 mode=b"w",
657 mode=b"w",
636 checkambig=self._checkambig,
658 checkambig=self._checkambig,
637 atomictemp=True,
659 atomictemp=True,
638 )
660 )
639
661
640 def _datafp(self, mode=b'r'):
662 def _datafp(self, mode=b'r'):
641 """file object for the revlog's data file"""
663 """file object for the revlog's data file"""
642 return self.opener(self._datafile, mode=mode)
664 return self.opener(self._datafile, mode=mode)
643
665
644 @contextlib.contextmanager
666 @contextlib.contextmanager
645 def _datareadfp(self, existingfp=None):
667 def _datareadfp(self, existingfp=None):
646 """file object suitable to read data"""
668 """file object suitable to read data"""
647 # Use explicit file handle, if given.
669 # Use explicit file handle, if given.
648 if existingfp is not None:
670 if existingfp is not None:
649 yield existingfp
671 yield existingfp
650
672
651 # Use a file handle being actively used for writes, if available.
673 # Use a file handle being actively used for writes, if available.
652 # There is some danger to doing this because reads will seek the
674 # There is some danger to doing this because reads will seek the
653 # file. However, _writeentry() performs a SEEK_END before all writes,
675 # file. However, _writeentry() performs a SEEK_END before all writes,
654 # so we should be safe.
676 # so we should be safe.
655 elif self._writinghandles:
677 elif self._writinghandles:
656 if self._inline:
678 if self._inline:
657 yield self._writinghandles[0]
679 yield self._writinghandles[0]
658 else:
680 else:
659 yield self._writinghandles[1]
681 yield self._writinghandles[1]
660
682
661 # Otherwise open a new file handle.
683 # Otherwise open a new file handle.
662 else:
684 else:
663 if self._inline:
685 if self._inline:
664 func = self._indexfp
686 func = self._indexfp
665 else:
687 else:
666 func = self._datafp
688 func = self._datafp
667 with func() as fp:
689 with func() as fp:
668 yield fp
690 yield fp
669
691
670 def tiprev(self):
692 def tiprev(self):
671 return len(self.index) - 1
693 return len(self.index) - 1
672
694
673 def tip(self):
695 def tip(self):
674 return self.node(self.tiprev())
696 return self.node(self.tiprev())
675
697
676 def __contains__(self, rev):
698 def __contains__(self, rev):
677 return 0 <= rev < len(self)
699 return 0 <= rev < len(self)
678
700
679 def __len__(self):
701 def __len__(self):
680 return len(self.index)
702 return len(self.index)
681
703
682 def __iter__(self):
704 def __iter__(self):
683 return iter(pycompat.xrange(len(self)))
705 return iter(pycompat.xrange(len(self)))
684
706
685 def revs(self, start=0, stop=None):
707 def revs(self, start=0, stop=None):
686 """iterate over all rev in this revlog (from start to stop)"""
708 """iterate over all rev in this revlog (from start to stop)"""
687 return storageutil.iterrevs(len(self), start=start, stop=stop)
709 return storageutil.iterrevs(len(self), start=start, stop=stop)
688
710
689 @property
711 @property
690 def nodemap(self):
712 def nodemap(self):
691 msg = (
713 msg = (
692 b"revlog.nodemap is deprecated, "
714 b"revlog.nodemap is deprecated, "
693 b"use revlog.index.[has_node|rev|get_rev]"
715 b"use revlog.index.[has_node|rev|get_rev]"
694 )
716 )
695 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
717 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
696 return self.index.nodemap
718 return self.index.nodemap
697
719
698 @property
720 @property
699 def _nodecache(self):
721 def _nodecache(self):
700 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
722 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
701 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
723 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
702 return self.index.nodemap
724 return self.index.nodemap
703
725
704 def hasnode(self, node):
726 def hasnode(self, node):
705 try:
727 try:
706 self.rev(node)
728 self.rev(node)
707 return True
729 return True
708 except KeyError:
730 except KeyError:
709 return False
731 return False
710
732
711 def candelta(self, baserev, rev):
733 def candelta(self, baserev, rev):
712 """whether two revisions (baserev, rev) can be delta-ed or not"""
734 """whether two revisions (baserev, rev) can be delta-ed or not"""
713 # Disable delta if either rev requires a content-changing flag
735 # Disable delta if either rev requires a content-changing flag
714 # processor (ex. LFS). This is because such flag processor can alter
736 # processor (ex. LFS). This is because such flag processor can alter
715 # the rawtext content that the delta will be based on, and two clients
737 # the rawtext content that the delta will be based on, and two clients
716 # could have a same revlog node with different flags (i.e. different
738 # could have a same revlog node with different flags (i.e. different
717 # rawtext contents) and the delta could be incompatible.
739 # rawtext contents) and the delta could be incompatible.
718 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
740 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
719 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
741 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
720 ):
742 ):
721 return False
743 return False
722 return True
744 return True
723
745
724 def update_caches(self, transaction):
746 def update_caches(self, transaction):
725 if self._nodemap_file is not None:
747 if self._nodemap_file is not None:
726 if transaction is None:
748 if transaction is None:
727 nodemaputil.update_persistent_nodemap(self)
749 nodemaputil.update_persistent_nodemap(self)
728 else:
750 else:
729 nodemaputil.setup_persistent_nodemap(transaction, self)
751 nodemaputil.setup_persistent_nodemap(transaction, self)
730
752
731 def clearcaches(self):
753 def clearcaches(self):
732 self._revisioncache = None
754 self._revisioncache = None
733 self._chainbasecache.clear()
755 self._chainbasecache.clear()
734 self._chunkcache = (0, b'')
756 self._chunkcache = (0, b'')
735 self._pcache = {}
757 self._pcache = {}
736 self._nodemap_docket = None
758 self._nodemap_docket = None
737 self.index.clearcaches()
759 self.index.clearcaches()
738 # The python code is the one responsible for validating the docket, we
760 # The python code is the one responsible for validating the docket, we
739 # end up having to refresh it here.
761 # end up having to refresh it here.
740 use_nodemap = (
762 use_nodemap = (
741 not self._inline
763 not self._inline
742 and self._nodemap_file is not None
764 and self._nodemap_file is not None
743 and util.safehasattr(self.index, 'update_nodemap_data')
765 and util.safehasattr(self.index, 'update_nodemap_data')
744 )
766 )
745 if use_nodemap:
767 if use_nodemap:
746 nodemap_data = nodemaputil.persisted_data(self)
768 nodemap_data = nodemaputil.persisted_data(self)
747 if nodemap_data is not None:
769 if nodemap_data is not None:
748 self._nodemap_docket = nodemap_data[0]
770 self._nodemap_docket = nodemap_data[0]
749 self.index.update_nodemap_data(*nodemap_data)
771 self.index.update_nodemap_data(*nodemap_data)
750
772
751 def rev(self, node):
773 def rev(self, node):
752 try:
774 try:
753 return self.index.rev(node)
775 return self.index.rev(node)
754 except TypeError:
776 except TypeError:
755 raise
777 raise
756 except error.RevlogError:
778 except error.RevlogError:
757 # parsers.c radix tree lookup failed
779 # parsers.c radix tree lookup failed
758 if (
780 if (
759 node == self.nodeconstants.wdirid
781 node == self.nodeconstants.wdirid
760 or node in self.nodeconstants.wdirfilenodeids
782 or node in self.nodeconstants.wdirfilenodeids
761 ):
783 ):
762 raise error.WdirUnsupported
784 raise error.WdirUnsupported
763 raise error.LookupError(node, self.display_id, _(b'no node'))
785 raise error.LookupError(node, self.display_id, _(b'no node'))
764
786
765 # Accessors for index entries.
787 # Accessors for index entries.
766
788
767 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
789 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
768 # are flags.
790 # are flags.
769 def start(self, rev):
791 def start(self, rev):
770 return int(self.index[rev][0] >> 16)
792 return int(self.index[rev][0] >> 16)
771
793
772 def flags(self, rev):
794 def flags(self, rev):
773 return self.index[rev][0] & 0xFFFF
795 return self.index[rev][0] & 0xFFFF
774
796
775 def length(self, rev):
797 def length(self, rev):
776 return self.index[rev][1]
798 return self.index[rev][1]
777
799
778 def sidedata_length(self, rev):
800 def sidedata_length(self, rev):
779 if not self.hassidedata:
801 if not self.hassidedata:
780 return 0
802 return 0
781 return self.index[rev][9]
803 return self.index[rev][9]
782
804
783 def rawsize(self, rev):
805 def rawsize(self, rev):
784 """return the length of the uncompressed text for a given revision"""
806 """return the length of the uncompressed text for a given revision"""
785 l = self.index[rev][2]
807 l = self.index[rev][2]
786 if l >= 0:
808 if l >= 0:
787 return l
809 return l
788
810
789 t = self.rawdata(rev)
811 t = self.rawdata(rev)
790 return len(t)
812 return len(t)
791
813
792 def size(self, rev):
814 def size(self, rev):
793 """length of non-raw text (processed by a "read" flag processor)"""
815 """length of non-raw text (processed by a "read" flag processor)"""
794 # fast path: if no "read" flag processor could change the content,
816 # fast path: if no "read" flag processor could change the content,
795 # size is rawsize. note: ELLIPSIS is known to not change the content.
817 # size is rawsize. note: ELLIPSIS is known to not change the content.
796 flags = self.flags(rev)
818 flags = self.flags(rev)
797 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
819 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
798 return self.rawsize(rev)
820 return self.rawsize(rev)
799
821
800 return len(self.revision(rev, raw=False))
822 return len(self.revision(rev, raw=False))
801
823
802 def chainbase(self, rev):
824 def chainbase(self, rev):
803 base = self._chainbasecache.get(rev)
825 base = self._chainbasecache.get(rev)
804 if base is not None:
826 if base is not None:
805 return base
827 return base
806
828
807 index = self.index
829 index = self.index
808 iterrev = rev
830 iterrev = rev
809 base = index[iterrev][3]
831 base = index[iterrev][3]
810 while base != iterrev:
832 while base != iterrev:
811 iterrev = base
833 iterrev = base
812 base = index[iterrev][3]
834 base = index[iterrev][3]
813
835
814 self._chainbasecache[rev] = base
836 self._chainbasecache[rev] = base
815 return base
837 return base
816
838
817 def linkrev(self, rev):
839 def linkrev(self, rev):
818 return self.index[rev][4]
840 return self.index[rev][4]
819
841
820 def parentrevs(self, rev):
842 def parentrevs(self, rev):
821 try:
843 try:
822 entry = self.index[rev]
844 entry = self.index[rev]
823 except IndexError:
845 except IndexError:
824 if rev == wdirrev:
846 if rev == wdirrev:
825 raise error.WdirUnsupported
847 raise error.WdirUnsupported
826 raise
848 raise
827 if entry[5] == nullrev:
849 if entry[5] == nullrev:
828 return entry[6], entry[5]
850 return entry[6], entry[5]
829 else:
851 else:
830 return entry[5], entry[6]
852 return entry[5], entry[6]
831
853
832 # fast parentrevs(rev) where rev isn't filtered
854 # fast parentrevs(rev) where rev isn't filtered
833 _uncheckedparentrevs = parentrevs
855 _uncheckedparentrevs = parentrevs
834
856
835 def node(self, rev):
857 def node(self, rev):
836 try:
858 try:
837 return self.index[rev][7]
859 return self.index[rev][7]
838 except IndexError:
860 except IndexError:
839 if rev == wdirrev:
861 if rev == wdirrev:
840 raise error.WdirUnsupported
862 raise error.WdirUnsupported
841 raise
863 raise
842
864
843 # Derived from index values.
865 # Derived from index values.
844
866
845 def end(self, rev):
867 def end(self, rev):
846 return self.start(rev) + self.length(rev)
868 return self.start(rev) + self.length(rev)
847
869
848 def parents(self, node):
870 def parents(self, node):
849 i = self.index
871 i = self.index
850 d = i[self.rev(node)]
872 d = i[self.rev(node)]
851 # inline node() to avoid function call overhead
873 # inline node() to avoid function call overhead
852 if d[5] == self.nullid:
874 if d[5] == self.nullid:
853 return i[d[6]][7], i[d[5]][7]
875 return i[d[6]][7], i[d[5]][7]
854 else:
876 else:
855 return i[d[5]][7], i[d[6]][7]
877 return i[d[5]][7], i[d[6]][7]
856
878
857 def chainlen(self, rev):
879 def chainlen(self, rev):
858 return self._chaininfo(rev)[0]
880 return self._chaininfo(rev)[0]
859
881
860 def _chaininfo(self, rev):
882 def _chaininfo(self, rev):
861 chaininfocache = self._chaininfocache
883 chaininfocache = self._chaininfocache
862 if rev in chaininfocache:
884 if rev in chaininfocache:
863 return chaininfocache[rev]
885 return chaininfocache[rev]
864 index = self.index
886 index = self.index
865 generaldelta = self._generaldelta
887 generaldelta = self._generaldelta
866 iterrev = rev
888 iterrev = rev
867 e = index[iterrev]
889 e = index[iterrev]
868 clen = 0
890 clen = 0
869 compresseddeltalen = 0
891 compresseddeltalen = 0
870 while iterrev != e[3]:
892 while iterrev != e[3]:
871 clen += 1
893 clen += 1
872 compresseddeltalen += e[1]
894 compresseddeltalen += e[1]
873 if generaldelta:
895 if generaldelta:
874 iterrev = e[3]
896 iterrev = e[3]
875 else:
897 else:
876 iterrev -= 1
898 iterrev -= 1
877 if iterrev in chaininfocache:
899 if iterrev in chaininfocache:
878 t = chaininfocache[iterrev]
900 t = chaininfocache[iterrev]
879 clen += t[0]
901 clen += t[0]
880 compresseddeltalen += t[1]
902 compresseddeltalen += t[1]
881 break
903 break
882 e = index[iterrev]
904 e = index[iterrev]
883 else:
905 else:
884 # Add text length of base since decompressing that also takes
906 # Add text length of base since decompressing that also takes
885 # work. For cache hits the length is already included.
907 # work. For cache hits the length is already included.
886 compresseddeltalen += e[1]
908 compresseddeltalen += e[1]
887 r = (clen, compresseddeltalen)
909 r = (clen, compresseddeltalen)
888 chaininfocache[rev] = r
910 chaininfocache[rev] = r
889 return r
911 return r
890
912
891 def _deltachain(self, rev, stoprev=None):
913 def _deltachain(self, rev, stoprev=None):
892 """Obtain the delta chain for a revision.
914 """Obtain the delta chain for a revision.
893
915
894 ``stoprev`` specifies a revision to stop at. If not specified, we
916 ``stoprev`` specifies a revision to stop at. If not specified, we
895 stop at the base of the chain.
917 stop at the base of the chain.
896
918
897 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
919 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
898 revs in ascending order and ``stopped`` is a bool indicating whether
920 revs in ascending order and ``stopped`` is a bool indicating whether
899 ``stoprev`` was hit.
921 ``stoprev`` was hit.
900 """
922 """
901 # Try C implementation.
923 # Try C implementation.
902 try:
924 try:
903 return self.index.deltachain(rev, stoprev, self._generaldelta)
925 return self.index.deltachain(rev, stoprev, self._generaldelta)
904 except AttributeError:
926 except AttributeError:
905 pass
927 pass
906
928
907 chain = []
929 chain = []
908
930
909 # Alias to prevent attribute lookup in tight loop.
931 # Alias to prevent attribute lookup in tight loop.
910 index = self.index
932 index = self.index
911 generaldelta = self._generaldelta
933 generaldelta = self._generaldelta
912
934
913 iterrev = rev
935 iterrev = rev
914 e = index[iterrev]
936 e = index[iterrev]
915 while iterrev != e[3] and iterrev != stoprev:
937 while iterrev != e[3] and iterrev != stoprev:
916 chain.append(iterrev)
938 chain.append(iterrev)
917 if generaldelta:
939 if generaldelta:
918 iterrev = e[3]
940 iterrev = e[3]
919 else:
941 else:
920 iterrev -= 1
942 iterrev -= 1
921 e = index[iterrev]
943 e = index[iterrev]
922
944
923 if iterrev == stoprev:
945 if iterrev == stoprev:
924 stopped = True
946 stopped = True
925 else:
947 else:
926 chain.append(iterrev)
948 chain.append(iterrev)
927 stopped = False
949 stopped = False
928
950
929 chain.reverse()
951 chain.reverse()
930 return chain, stopped
952 return chain, stopped
931
953
932 def ancestors(self, revs, stoprev=0, inclusive=False):
954 def ancestors(self, revs, stoprev=0, inclusive=False):
933 """Generate the ancestors of 'revs' in reverse revision order.
955 """Generate the ancestors of 'revs' in reverse revision order.
934 Does not generate revs lower than stoprev.
956 Does not generate revs lower than stoprev.
935
957
936 See the documentation for ancestor.lazyancestors for more details."""
958 See the documentation for ancestor.lazyancestors for more details."""
937
959
938 # first, make sure start revisions aren't filtered
960 # first, make sure start revisions aren't filtered
939 revs = list(revs)
961 revs = list(revs)
940 checkrev = self.node
962 checkrev = self.node
941 for r in revs:
963 for r in revs:
942 checkrev(r)
964 checkrev(r)
943 # and we're sure ancestors aren't filtered as well
965 # and we're sure ancestors aren't filtered as well
944
966
945 if rustancestor is not None:
967 if rustancestor is not None:
946 lazyancestors = rustancestor.LazyAncestors
968 lazyancestors = rustancestor.LazyAncestors
947 arg = self.index
969 arg = self.index
948 else:
970 else:
949 lazyancestors = ancestor.lazyancestors
971 lazyancestors = ancestor.lazyancestors
950 arg = self._uncheckedparentrevs
972 arg = self._uncheckedparentrevs
951 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
973 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
952
974
953 def descendants(self, revs):
975 def descendants(self, revs):
954 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
976 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
955
977
956 def findcommonmissing(self, common=None, heads=None):
978 def findcommonmissing(self, common=None, heads=None):
957 """Return a tuple of the ancestors of common and the ancestors of heads
979 """Return a tuple of the ancestors of common and the ancestors of heads
958 that are not ancestors of common. In revset terminology, we return the
980 that are not ancestors of common. In revset terminology, we return the
959 tuple:
981 tuple:
960
982
961 ::common, (::heads) - (::common)
983 ::common, (::heads) - (::common)
962
984
963 The list is sorted by revision number, meaning it is
985 The list is sorted by revision number, meaning it is
964 topologically sorted.
986 topologically sorted.
965
987
966 'heads' and 'common' are both lists of node IDs. If heads is
988 'heads' and 'common' are both lists of node IDs. If heads is
967 not supplied, uses all of the revlog's heads. If common is not
989 not supplied, uses all of the revlog's heads. If common is not
968 supplied, uses nullid."""
990 supplied, uses nullid."""
969 if common is None:
991 if common is None:
970 common = [self.nullid]
992 common = [self.nullid]
971 if heads is None:
993 if heads is None:
972 heads = self.heads()
994 heads = self.heads()
973
995
974 common = [self.rev(n) for n in common]
996 common = [self.rev(n) for n in common]
975 heads = [self.rev(n) for n in heads]
997 heads = [self.rev(n) for n in heads]
976
998
977 # we want the ancestors, but inclusive
999 # we want the ancestors, but inclusive
978 class lazyset(object):
1000 class lazyset(object):
979 def __init__(self, lazyvalues):
1001 def __init__(self, lazyvalues):
980 self.addedvalues = set()
1002 self.addedvalues = set()
981 self.lazyvalues = lazyvalues
1003 self.lazyvalues = lazyvalues
982
1004
983 def __contains__(self, value):
1005 def __contains__(self, value):
984 return value in self.addedvalues or value in self.lazyvalues
1006 return value in self.addedvalues or value in self.lazyvalues
985
1007
986 def __iter__(self):
1008 def __iter__(self):
987 added = self.addedvalues
1009 added = self.addedvalues
988 for r in added:
1010 for r in added:
989 yield r
1011 yield r
990 for r in self.lazyvalues:
1012 for r in self.lazyvalues:
991 if not r in added:
1013 if not r in added:
992 yield r
1014 yield r
993
1015
994 def add(self, value):
1016 def add(self, value):
995 self.addedvalues.add(value)
1017 self.addedvalues.add(value)
996
1018
997 def update(self, values):
1019 def update(self, values):
998 self.addedvalues.update(values)
1020 self.addedvalues.update(values)
999
1021
1000 has = lazyset(self.ancestors(common))
1022 has = lazyset(self.ancestors(common))
1001 has.add(nullrev)
1023 has.add(nullrev)
1002 has.update(common)
1024 has.update(common)
1003
1025
1004 # take all ancestors from heads that aren't in has
1026 # take all ancestors from heads that aren't in has
1005 missing = set()
1027 missing = set()
1006 visit = collections.deque(r for r in heads if r not in has)
1028 visit = collections.deque(r for r in heads if r not in has)
1007 while visit:
1029 while visit:
1008 r = visit.popleft()
1030 r = visit.popleft()
1009 if r in missing:
1031 if r in missing:
1010 continue
1032 continue
1011 else:
1033 else:
1012 missing.add(r)
1034 missing.add(r)
1013 for p in self.parentrevs(r):
1035 for p in self.parentrevs(r):
1014 if p not in has:
1036 if p not in has:
1015 visit.append(p)
1037 visit.append(p)
1016 missing = list(missing)
1038 missing = list(missing)
1017 missing.sort()
1039 missing.sort()
1018 return has, [self.node(miss) for miss in missing]
1040 return has, [self.node(miss) for miss in missing]
1019
1041
1020 def incrementalmissingrevs(self, common=None):
1042 def incrementalmissingrevs(self, common=None):
1021 """Return an object that can be used to incrementally compute the
1043 """Return an object that can be used to incrementally compute the
1022 revision numbers of the ancestors of arbitrary sets that are not
1044 revision numbers of the ancestors of arbitrary sets that are not
1023 ancestors of common. This is an ancestor.incrementalmissingancestors
1045 ancestors of common. This is an ancestor.incrementalmissingancestors
1024 object.
1046 object.
1025
1047
1026 'common' is a list of revision numbers. If common is not supplied, uses
1048 'common' is a list of revision numbers. If common is not supplied, uses
1027 nullrev.
1049 nullrev.
1028 """
1050 """
1029 if common is None:
1051 if common is None:
1030 common = [nullrev]
1052 common = [nullrev]
1031
1053
1032 if rustancestor is not None:
1054 if rustancestor is not None:
1033 return rustancestor.MissingAncestors(self.index, common)
1055 return rustancestor.MissingAncestors(self.index, common)
1034 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1056 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1035
1057
1036 def findmissingrevs(self, common=None, heads=None):
1058 def findmissingrevs(self, common=None, heads=None):
1037 """Return the revision numbers of the ancestors of heads that
1059 """Return the revision numbers of the ancestors of heads that
1038 are not ancestors of common.
1060 are not ancestors of common.
1039
1061
1040 More specifically, return a list of revision numbers corresponding to
1062 More specifically, return a list of revision numbers corresponding to
1041 nodes N such that every N satisfies the following constraints:
1063 nodes N such that every N satisfies the following constraints:
1042
1064
1043 1. N is an ancestor of some node in 'heads'
1065 1. N is an ancestor of some node in 'heads'
1044 2. N is not an ancestor of any node in 'common'
1066 2. N is not an ancestor of any node in 'common'
1045
1067
1046 The list is sorted by revision number, meaning it is
1068 The list is sorted by revision number, meaning it is
1047 topologically sorted.
1069 topologically sorted.
1048
1070
1049 'heads' and 'common' are both lists of revision numbers. If heads is
1071 'heads' and 'common' are both lists of revision numbers. If heads is
1050 not supplied, uses all of the revlog's heads. If common is not
1072 not supplied, uses all of the revlog's heads. If common is not
1051 supplied, uses nullid."""
1073 supplied, uses nullid."""
1052 if common is None:
1074 if common is None:
1053 common = [nullrev]
1075 common = [nullrev]
1054 if heads is None:
1076 if heads is None:
1055 heads = self.headrevs()
1077 heads = self.headrevs()
1056
1078
1057 inc = self.incrementalmissingrevs(common=common)
1079 inc = self.incrementalmissingrevs(common=common)
1058 return inc.missingancestors(heads)
1080 return inc.missingancestors(heads)
1059
1081
1060 def findmissing(self, common=None, heads=None):
1082 def findmissing(self, common=None, heads=None):
1061 """Return the ancestors of heads that are not ancestors of common.
1083 """Return the ancestors of heads that are not ancestors of common.
1062
1084
1063 More specifically, return a list of nodes N such that every N
1085 More specifically, return a list of nodes N such that every N
1064 satisfies the following constraints:
1086 satisfies the following constraints:
1065
1087
1066 1. N is an ancestor of some node in 'heads'
1088 1. N is an ancestor of some node in 'heads'
1067 2. N is not an ancestor of any node in 'common'
1089 2. N is not an ancestor of any node in 'common'
1068
1090
1069 The list is sorted by revision number, meaning it is
1091 The list is sorted by revision number, meaning it is
1070 topologically sorted.
1092 topologically sorted.
1071
1093
1072 'heads' and 'common' are both lists of node IDs. If heads is
1094 'heads' and 'common' are both lists of node IDs. If heads is
1073 not supplied, uses all of the revlog's heads. If common is not
1095 not supplied, uses all of the revlog's heads. If common is not
1074 supplied, uses nullid."""
1096 supplied, uses nullid."""
1075 if common is None:
1097 if common is None:
1076 common = [self.nullid]
1098 common = [self.nullid]
1077 if heads is None:
1099 if heads is None:
1078 heads = self.heads()
1100 heads = self.heads()
1079
1101
1080 common = [self.rev(n) for n in common]
1102 common = [self.rev(n) for n in common]
1081 heads = [self.rev(n) for n in heads]
1103 heads = [self.rev(n) for n in heads]
1082
1104
1083 inc = self.incrementalmissingrevs(common=common)
1105 inc = self.incrementalmissingrevs(common=common)
1084 return [self.node(r) for r in inc.missingancestors(heads)]
1106 return [self.node(r) for r in inc.missingancestors(heads)]
1085
1107
1086 def nodesbetween(self, roots=None, heads=None):
1108 def nodesbetween(self, roots=None, heads=None):
1087 """Return a topological path from 'roots' to 'heads'.
1109 """Return a topological path from 'roots' to 'heads'.
1088
1110
1089 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1111 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1090 topologically sorted list of all nodes N that satisfy both of
1112 topologically sorted list of all nodes N that satisfy both of
1091 these constraints:
1113 these constraints:
1092
1114
1093 1. N is a descendant of some node in 'roots'
1115 1. N is a descendant of some node in 'roots'
1094 2. N is an ancestor of some node in 'heads'
1116 2. N is an ancestor of some node in 'heads'
1095
1117
1096 Every node is considered to be both a descendant and an ancestor
1118 Every node is considered to be both a descendant and an ancestor
1097 of itself, so every reachable node in 'roots' and 'heads' will be
1119 of itself, so every reachable node in 'roots' and 'heads' will be
1098 included in 'nodes'.
1120 included in 'nodes'.
1099
1121
1100 'outroots' is the list of reachable nodes in 'roots', i.e., the
1122 'outroots' is the list of reachable nodes in 'roots', i.e., the
1101 subset of 'roots' that is returned in 'nodes'. Likewise,
1123 subset of 'roots' that is returned in 'nodes'. Likewise,
1102 'outheads' is the subset of 'heads' that is also in 'nodes'.
1124 'outheads' is the subset of 'heads' that is also in 'nodes'.
1103
1125
1104 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1126 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1105 unspecified, uses nullid as the only root. If 'heads' is
1127 unspecified, uses nullid as the only root. If 'heads' is
1106 unspecified, uses list of all of the revlog's heads."""
1128 unspecified, uses list of all of the revlog's heads."""
1107 nonodes = ([], [], [])
1129 nonodes = ([], [], [])
1108 if roots is not None:
1130 if roots is not None:
1109 roots = list(roots)
1131 roots = list(roots)
1110 if not roots:
1132 if not roots:
1111 return nonodes
1133 return nonodes
1112 lowestrev = min([self.rev(n) for n in roots])
1134 lowestrev = min([self.rev(n) for n in roots])
1113 else:
1135 else:
1114 roots = [self.nullid] # Everybody's a descendant of nullid
1136 roots = [self.nullid] # Everybody's a descendant of nullid
1115 lowestrev = nullrev
1137 lowestrev = nullrev
1116 if (lowestrev == nullrev) and (heads is None):
1138 if (lowestrev == nullrev) and (heads is None):
1117 # We want _all_ the nodes!
1139 # We want _all_ the nodes!
1118 return (
1140 return (
1119 [self.node(r) for r in self],
1141 [self.node(r) for r in self],
1120 [self.nullid],
1142 [self.nullid],
1121 list(self.heads()),
1143 list(self.heads()),
1122 )
1144 )
1123 if heads is None:
1145 if heads is None:
1124 # All nodes are ancestors, so the latest ancestor is the last
1146 # All nodes are ancestors, so the latest ancestor is the last
1125 # node.
1147 # node.
1126 highestrev = len(self) - 1
1148 highestrev = len(self) - 1
1127 # Set ancestors to None to signal that every node is an ancestor.
1149 # Set ancestors to None to signal that every node is an ancestor.
1128 ancestors = None
1150 ancestors = None
1129 # Set heads to an empty dictionary for later discovery of heads
1151 # Set heads to an empty dictionary for later discovery of heads
1130 heads = {}
1152 heads = {}
1131 else:
1153 else:
1132 heads = list(heads)
1154 heads = list(heads)
1133 if not heads:
1155 if not heads:
1134 return nonodes
1156 return nonodes
1135 ancestors = set()
1157 ancestors = set()
1136 # Turn heads into a dictionary so we can remove 'fake' heads.
1158 # Turn heads into a dictionary so we can remove 'fake' heads.
1137 # Also, later we will be using it to filter out the heads we can't
1159 # Also, later we will be using it to filter out the heads we can't
1138 # find from roots.
1160 # find from roots.
1139 heads = dict.fromkeys(heads, False)
1161 heads = dict.fromkeys(heads, False)
1140 # Start at the top and keep marking parents until we're done.
1162 # Start at the top and keep marking parents until we're done.
1141 nodestotag = set(heads)
1163 nodestotag = set(heads)
1142 # Remember where the top was so we can use it as a limit later.
1164 # Remember where the top was so we can use it as a limit later.
1143 highestrev = max([self.rev(n) for n in nodestotag])
1165 highestrev = max([self.rev(n) for n in nodestotag])
1144 while nodestotag:
1166 while nodestotag:
1145 # grab a node to tag
1167 # grab a node to tag
1146 n = nodestotag.pop()
1168 n = nodestotag.pop()
1147 # Never tag nullid
1169 # Never tag nullid
1148 if n == self.nullid:
1170 if n == self.nullid:
1149 continue
1171 continue
1150 # A node's revision number represents its place in a
1172 # A node's revision number represents its place in a
1151 # topologically sorted list of nodes.
1173 # topologically sorted list of nodes.
1152 r = self.rev(n)
1174 r = self.rev(n)
1153 if r >= lowestrev:
1175 if r >= lowestrev:
1154 if n not in ancestors:
1176 if n not in ancestors:
1155 # If we are possibly a descendant of one of the roots
1177 # If we are possibly a descendant of one of the roots
1156 # and we haven't already been marked as an ancestor
1178 # and we haven't already been marked as an ancestor
1157 ancestors.add(n) # Mark as ancestor
1179 ancestors.add(n) # Mark as ancestor
1158 # Add non-nullid parents to list of nodes to tag.
1180 # Add non-nullid parents to list of nodes to tag.
1159 nodestotag.update(
1181 nodestotag.update(
1160 [p for p in self.parents(n) if p != self.nullid]
1182 [p for p in self.parents(n) if p != self.nullid]
1161 )
1183 )
1162 elif n in heads: # We've seen it before, is it a fake head?
1184 elif n in heads: # We've seen it before, is it a fake head?
1163 # So it is, real heads should not be the ancestors of
1185 # So it is, real heads should not be the ancestors of
1164 # any other heads.
1186 # any other heads.
1165 heads.pop(n)
1187 heads.pop(n)
1166 if not ancestors:
1188 if not ancestors:
1167 return nonodes
1189 return nonodes
1168 # Now that we have our set of ancestors, we want to remove any
1190 # Now that we have our set of ancestors, we want to remove any
1169 # roots that are not ancestors.
1191 # roots that are not ancestors.
1170
1192
1171 # If one of the roots was nullid, everything is included anyway.
1193 # If one of the roots was nullid, everything is included anyway.
1172 if lowestrev > nullrev:
1194 if lowestrev > nullrev:
1173 # But, since we weren't, let's recompute the lowest rev to not
1195 # But, since we weren't, let's recompute the lowest rev to not
1174 # include roots that aren't ancestors.
1196 # include roots that aren't ancestors.
1175
1197
1176 # Filter out roots that aren't ancestors of heads
1198 # Filter out roots that aren't ancestors of heads
1177 roots = [root for root in roots if root in ancestors]
1199 roots = [root for root in roots if root in ancestors]
1178 # Recompute the lowest revision
1200 # Recompute the lowest revision
1179 if roots:
1201 if roots:
1180 lowestrev = min([self.rev(root) for root in roots])
1202 lowestrev = min([self.rev(root) for root in roots])
1181 else:
1203 else:
1182 # No more roots? Return empty list
1204 # No more roots? Return empty list
1183 return nonodes
1205 return nonodes
1184 else:
1206 else:
1185 # We are descending from nullid, and don't need to care about
1207 # We are descending from nullid, and don't need to care about
1186 # any other roots.
1208 # any other roots.
1187 lowestrev = nullrev
1209 lowestrev = nullrev
1188 roots = [self.nullid]
1210 roots = [self.nullid]
1189 # Transform our roots list into a set.
1211 # Transform our roots list into a set.
1190 descendants = set(roots)
1212 descendants = set(roots)
1191 # Also, keep the original roots so we can filter out roots that aren't
1213 # Also, keep the original roots so we can filter out roots that aren't
1192 # 'real' roots (i.e. are descended from other roots).
1214 # 'real' roots (i.e. are descended from other roots).
1193 roots = descendants.copy()
1215 roots = descendants.copy()
1194 # Our topologically sorted list of output nodes.
1216 # Our topologically sorted list of output nodes.
1195 orderedout = []
1217 orderedout = []
1196 # Don't start at nullid since we don't want nullid in our output list,
1218 # Don't start at nullid since we don't want nullid in our output list,
1197 # and if nullid shows up in descendants, empty parents will look like
1219 # and if nullid shows up in descendants, empty parents will look like
1198 # they're descendants.
1220 # they're descendants.
1199 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1221 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1200 n = self.node(r)
1222 n = self.node(r)
1201 isdescendant = False
1223 isdescendant = False
1202 if lowestrev == nullrev: # Everybody is a descendant of nullid
1224 if lowestrev == nullrev: # Everybody is a descendant of nullid
1203 isdescendant = True
1225 isdescendant = True
1204 elif n in descendants:
1226 elif n in descendants:
1205 # n is already a descendant
1227 # n is already a descendant
1206 isdescendant = True
1228 isdescendant = True
1207 # This check only needs to be done here because all the roots
1229 # This check only needs to be done here because all the roots
1208 # will start being marked is descendants before the loop.
1230 # will start being marked is descendants before the loop.
1209 if n in roots:
1231 if n in roots:
1210 # If n was a root, check if it's a 'real' root.
1232 # If n was a root, check if it's a 'real' root.
1211 p = tuple(self.parents(n))
1233 p = tuple(self.parents(n))
1212 # If any of its parents are descendants, it's not a root.
1234 # If any of its parents are descendants, it's not a root.
1213 if (p[0] in descendants) or (p[1] in descendants):
1235 if (p[0] in descendants) or (p[1] in descendants):
1214 roots.remove(n)
1236 roots.remove(n)
1215 else:
1237 else:
1216 p = tuple(self.parents(n))
1238 p = tuple(self.parents(n))
1217 # A node is a descendant if either of its parents are
1239 # A node is a descendant if either of its parents are
1218 # descendants. (We seeded the dependents list with the roots
1240 # descendants. (We seeded the dependents list with the roots
1219 # up there, remember?)
1241 # up there, remember?)
1220 if (p[0] in descendants) or (p[1] in descendants):
1242 if (p[0] in descendants) or (p[1] in descendants):
1221 descendants.add(n)
1243 descendants.add(n)
1222 isdescendant = True
1244 isdescendant = True
1223 if isdescendant and ((ancestors is None) or (n in ancestors)):
1245 if isdescendant and ((ancestors is None) or (n in ancestors)):
1224 # Only include nodes that are both descendants and ancestors.
1246 # Only include nodes that are both descendants and ancestors.
1225 orderedout.append(n)
1247 orderedout.append(n)
1226 if (ancestors is not None) and (n in heads):
1248 if (ancestors is not None) and (n in heads):
1227 # We're trying to figure out which heads are reachable
1249 # We're trying to figure out which heads are reachable
1228 # from roots.
1250 # from roots.
1229 # Mark this head as having been reached
1251 # Mark this head as having been reached
1230 heads[n] = True
1252 heads[n] = True
1231 elif ancestors is None:
1253 elif ancestors is None:
1232 # Otherwise, we're trying to discover the heads.
1254 # Otherwise, we're trying to discover the heads.
1233 # Assume this is a head because if it isn't, the next step
1255 # Assume this is a head because if it isn't, the next step
1234 # will eventually remove it.
1256 # will eventually remove it.
1235 heads[n] = True
1257 heads[n] = True
1236 # But, obviously its parents aren't.
1258 # But, obviously its parents aren't.
1237 for p in self.parents(n):
1259 for p in self.parents(n):
1238 heads.pop(p, None)
1260 heads.pop(p, None)
1239 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1261 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1240 roots = list(roots)
1262 roots = list(roots)
1241 assert orderedout
1263 assert orderedout
1242 assert roots
1264 assert roots
1243 assert heads
1265 assert heads
1244 return (orderedout, roots, heads)
1266 return (orderedout, roots, heads)
1245
1267
1246 def headrevs(self, revs=None):
1268 def headrevs(self, revs=None):
1247 if revs is None:
1269 if revs is None:
1248 try:
1270 try:
1249 return self.index.headrevs()
1271 return self.index.headrevs()
1250 except AttributeError:
1272 except AttributeError:
1251 return self._headrevs()
1273 return self._headrevs()
1252 if rustdagop is not None:
1274 if rustdagop is not None:
1253 return rustdagop.headrevs(self.index, revs)
1275 return rustdagop.headrevs(self.index, revs)
1254 return dagop.headrevs(revs, self._uncheckedparentrevs)
1276 return dagop.headrevs(revs, self._uncheckedparentrevs)
1255
1277
1256 def computephases(self, roots):
1278 def computephases(self, roots):
1257 return self.index.computephasesmapsets(roots)
1279 return self.index.computephasesmapsets(roots)
1258
1280
1259 def _headrevs(self):
1281 def _headrevs(self):
1260 count = len(self)
1282 count = len(self)
1261 if not count:
1283 if not count:
1262 return [nullrev]
1284 return [nullrev]
1263 # we won't iter over filtered rev so nobody is a head at start
1285 # we won't iter over filtered rev so nobody is a head at start
1264 ishead = [0] * (count + 1)
1286 ishead = [0] * (count + 1)
1265 index = self.index
1287 index = self.index
1266 for r in self:
1288 for r in self:
1267 ishead[r] = 1 # I may be an head
1289 ishead[r] = 1 # I may be an head
1268 e = index[r]
1290 e = index[r]
1269 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1291 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1270 return [r for r, val in enumerate(ishead) if val]
1292 return [r for r, val in enumerate(ishead) if val]
1271
1293
1272 def heads(self, start=None, stop=None):
1294 def heads(self, start=None, stop=None):
1273 """return the list of all nodes that have no children
1295 """return the list of all nodes that have no children
1274
1296
1275 if start is specified, only heads that are descendants of
1297 if start is specified, only heads that are descendants of
1276 start will be returned
1298 start will be returned
1277 if stop is specified, it will consider all the revs from stop
1299 if stop is specified, it will consider all the revs from stop
1278 as if they had no children
1300 as if they had no children
1279 """
1301 """
1280 if start is None and stop is None:
1302 if start is None and stop is None:
1281 if not len(self):
1303 if not len(self):
1282 return [self.nullid]
1304 return [self.nullid]
1283 return [self.node(r) for r in self.headrevs()]
1305 return [self.node(r) for r in self.headrevs()]
1284
1306
1285 if start is None:
1307 if start is None:
1286 start = nullrev
1308 start = nullrev
1287 else:
1309 else:
1288 start = self.rev(start)
1310 start = self.rev(start)
1289
1311
1290 stoprevs = {self.rev(n) for n in stop or []}
1312 stoprevs = {self.rev(n) for n in stop or []}
1291
1313
1292 revs = dagop.headrevssubset(
1314 revs = dagop.headrevssubset(
1293 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1315 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1294 )
1316 )
1295
1317
1296 return [self.node(rev) for rev in revs]
1318 return [self.node(rev) for rev in revs]
1297
1319
1298 def children(self, node):
1320 def children(self, node):
1299 """find the children of a given node"""
1321 """find the children of a given node"""
1300 c = []
1322 c = []
1301 p = self.rev(node)
1323 p = self.rev(node)
1302 for r in self.revs(start=p + 1):
1324 for r in self.revs(start=p + 1):
1303 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1325 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1304 if prevs:
1326 if prevs:
1305 for pr in prevs:
1327 for pr in prevs:
1306 if pr == p:
1328 if pr == p:
1307 c.append(self.node(r))
1329 c.append(self.node(r))
1308 elif p == nullrev:
1330 elif p == nullrev:
1309 c.append(self.node(r))
1331 c.append(self.node(r))
1310 return c
1332 return c
1311
1333
1312 def commonancestorsheads(self, a, b):
1334 def commonancestorsheads(self, a, b):
1313 """calculate all the heads of the common ancestors of nodes a and b"""
1335 """calculate all the heads of the common ancestors of nodes a and b"""
1314 a, b = self.rev(a), self.rev(b)
1336 a, b = self.rev(a), self.rev(b)
1315 ancs = self._commonancestorsheads(a, b)
1337 ancs = self._commonancestorsheads(a, b)
1316 return pycompat.maplist(self.node, ancs)
1338 return pycompat.maplist(self.node, ancs)
1317
1339
1318 def _commonancestorsheads(self, *revs):
1340 def _commonancestorsheads(self, *revs):
1319 """calculate all the heads of the common ancestors of revs"""
1341 """calculate all the heads of the common ancestors of revs"""
1320 try:
1342 try:
1321 ancs = self.index.commonancestorsheads(*revs)
1343 ancs = self.index.commonancestorsheads(*revs)
1322 except (AttributeError, OverflowError): # C implementation failed
1344 except (AttributeError, OverflowError): # C implementation failed
1323 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1345 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1324 return ancs
1346 return ancs
1325
1347
1326 def isancestor(self, a, b):
1348 def isancestor(self, a, b):
1327 """return True if node a is an ancestor of node b
1349 """return True if node a is an ancestor of node b
1328
1350
1329 A revision is considered an ancestor of itself."""
1351 A revision is considered an ancestor of itself."""
1330 a, b = self.rev(a), self.rev(b)
1352 a, b = self.rev(a), self.rev(b)
1331 return self.isancestorrev(a, b)
1353 return self.isancestorrev(a, b)
1332
1354
1333 def isancestorrev(self, a, b):
1355 def isancestorrev(self, a, b):
1334 """return True if revision a is an ancestor of revision b
1356 """return True if revision a is an ancestor of revision b
1335
1357
1336 A revision is considered an ancestor of itself.
1358 A revision is considered an ancestor of itself.
1337
1359
1338 The implementation of this is trivial but the use of
1360 The implementation of this is trivial but the use of
1339 reachableroots is not."""
1361 reachableroots is not."""
1340 if a == nullrev:
1362 if a == nullrev:
1341 return True
1363 return True
1342 elif a == b:
1364 elif a == b:
1343 return True
1365 return True
1344 elif a > b:
1366 elif a > b:
1345 return False
1367 return False
1346 return bool(self.reachableroots(a, [b], [a], includepath=False))
1368 return bool(self.reachableroots(a, [b], [a], includepath=False))
1347
1369
1348 def reachableroots(self, minroot, heads, roots, includepath=False):
1370 def reachableroots(self, minroot, heads, roots, includepath=False):
1349 """return (heads(::(<roots> and <roots>::<heads>)))
1371 """return (heads(::(<roots> and <roots>::<heads>)))
1350
1372
1351 If includepath is True, return (<roots>::<heads>)."""
1373 If includepath is True, return (<roots>::<heads>)."""
1352 try:
1374 try:
1353 return self.index.reachableroots2(
1375 return self.index.reachableroots2(
1354 minroot, heads, roots, includepath
1376 minroot, heads, roots, includepath
1355 )
1377 )
1356 except AttributeError:
1378 except AttributeError:
1357 return dagop._reachablerootspure(
1379 return dagop._reachablerootspure(
1358 self.parentrevs, minroot, roots, heads, includepath
1380 self.parentrevs, minroot, roots, heads, includepath
1359 )
1381 )
1360
1382
1361 def ancestor(self, a, b):
1383 def ancestor(self, a, b):
1362 """calculate the "best" common ancestor of nodes a and b"""
1384 """calculate the "best" common ancestor of nodes a and b"""
1363
1385
1364 a, b = self.rev(a), self.rev(b)
1386 a, b = self.rev(a), self.rev(b)
1365 try:
1387 try:
1366 ancs = self.index.ancestors(a, b)
1388 ancs = self.index.ancestors(a, b)
1367 except (AttributeError, OverflowError):
1389 except (AttributeError, OverflowError):
1368 ancs = ancestor.ancestors(self.parentrevs, a, b)
1390 ancs = ancestor.ancestors(self.parentrevs, a, b)
1369 if ancs:
1391 if ancs:
1370 # choose a consistent winner when there's a tie
1392 # choose a consistent winner when there's a tie
1371 return min(map(self.node, ancs))
1393 return min(map(self.node, ancs))
1372 return self.nullid
1394 return self.nullid
1373
1395
1374 def _match(self, id):
1396 def _match(self, id):
1375 if isinstance(id, int):
1397 if isinstance(id, int):
1376 # rev
1398 # rev
1377 return self.node(id)
1399 return self.node(id)
1378 if len(id) == self.nodeconstants.nodelen:
1400 if len(id) == self.nodeconstants.nodelen:
1379 # possibly a binary node
1401 # possibly a binary node
1380 # odds of a binary node being all hex in ASCII are 1 in 10**25
1402 # odds of a binary node being all hex in ASCII are 1 in 10**25
1381 try:
1403 try:
1382 node = id
1404 node = id
1383 self.rev(node) # quick search the index
1405 self.rev(node) # quick search the index
1384 return node
1406 return node
1385 except error.LookupError:
1407 except error.LookupError:
1386 pass # may be partial hex id
1408 pass # may be partial hex id
1387 try:
1409 try:
1388 # str(rev)
1410 # str(rev)
1389 rev = int(id)
1411 rev = int(id)
1390 if b"%d" % rev != id:
1412 if b"%d" % rev != id:
1391 raise ValueError
1413 raise ValueError
1392 if rev < 0:
1414 if rev < 0:
1393 rev = len(self) + rev
1415 rev = len(self) + rev
1394 if rev < 0 or rev >= len(self):
1416 if rev < 0 or rev >= len(self):
1395 raise ValueError
1417 raise ValueError
1396 return self.node(rev)
1418 return self.node(rev)
1397 except (ValueError, OverflowError):
1419 except (ValueError, OverflowError):
1398 pass
1420 pass
1399 if len(id) == 2 * self.nodeconstants.nodelen:
1421 if len(id) == 2 * self.nodeconstants.nodelen:
1400 try:
1422 try:
1401 # a full hex nodeid?
1423 # a full hex nodeid?
1402 node = bin(id)
1424 node = bin(id)
1403 self.rev(node)
1425 self.rev(node)
1404 return node
1426 return node
1405 except (TypeError, error.LookupError):
1427 except (TypeError, error.LookupError):
1406 pass
1428 pass
1407
1429
1408 def _partialmatch(self, id):
1430 def _partialmatch(self, id):
1409 # we don't care wdirfilenodeids as they should be always full hash
1431 # we don't care wdirfilenodeids as they should be always full hash
1410 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1432 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1411 try:
1433 try:
1412 partial = self.index.partialmatch(id)
1434 partial = self.index.partialmatch(id)
1413 if partial and self.hasnode(partial):
1435 if partial and self.hasnode(partial):
1414 if maybewdir:
1436 if maybewdir:
1415 # single 'ff...' match in radix tree, ambiguous with wdir
1437 # single 'ff...' match in radix tree, ambiguous with wdir
1416 raise error.RevlogError
1438 raise error.RevlogError
1417 return partial
1439 return partial
1418 if maybewdir:
1440 if maybewdir:
1419 # no 'ff...' match in radix tree, wdir identified
1441 # no 'ff...' match in radix tree, wdir identified
1420 raise error.WdirUnsupported
1442 raise error.WdirUnsupported
1421 return None
1443 return None
1422 except error.RevlogError:
1444 except error.RevlogError:
1423 # parsers.c radix tree lookup gave multiple matches
1445 # parsers.c radix tree lookup gave multiple matches
1424 # fast path: for unfiltered changelog, radix tree is accurate
1446 # fast path: for unfiltered changelog, radix tree is accurate
1425 if not getattr(self, 'filteredrevs', None):
1447 if not getattr(self, 'filteredrevs', None):
1426 raise error.AmbiguousPrefixLookupError(
1448 raise error.AmbiguousPrefixLookupError(
1427 id, self.display_id, _(b'ambiguous identifier')
1449 id, self.display_id, _(b'ambiguous identifier')
1428 )
1450 )
1429 # fall through to slow path that filters hidden revisions
1451 # fall through to slow path that filters hidden revisions
1430 except (AttributeError, ValueError):
1452 except (AttributeError, ValueError):
1431 # we are pure python, or key was too short to search radix tree
1453 # we are pure python, or key was too short to search radix tree
1432 pass
1454 pass
1433
1455
1434 if id in self._pcache:
1456 if id in self._pcache:
1435 return self._pcache[id]
1457 return self._pcache[id]
1436
1458
1437 if len(id) <= 40:
1459 if len(id) <= 40:
1438 try:
1460 try:
1439 # hex(node)[:...]
1461 # hex(node)[:...]
1440 l = len(id) // 2 # grab an even number of digits
1462 l = len(id) // 2 # grab an even number of digits
1441 prefix = bin(id[: l * 2])
1463 prefix = bin(id[: l * 2])
1442 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1464 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1443 nl = [
1465 nl = [
1444 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1466 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1445 ]
1467 ]
1446 if self.nodeconstants.nullhex.startswith(id):
1468 if self.nodeconstants.nullhex.startswith(id):
1447 nl.append(self.nullid)
1469 nl.append(self.nullid)
1448 if len(nl) > 0:
1470 if len(nl) > 0:
1449 if len(nl) == 1 and not maybewdir:
1471 if len(nl) == 1 and not maybewdir:
1450 self._pcache[id] = nl[0]
1472 self._pcache[id] = nl[0]
1451 return nl[0]
1473 return nl[0]
1452 raise error.AmbiguousPrefixLookupError(
1474 raise error.AmbiguousPrefixLookupError(
1453 id, self.display_id, _(b'ambiguous identifier')
1475 id, self.display_id, _(b'ambiguous identifier')
1454 )
1476 )
1455 if maybewdir:
1477 if maybewdir:
1456 raise error.WdirUnsupported
1478 raise error.WdirUnsupported
1457 return None
1479 return None
1458 except TypeError:
1480 except TypeError:
1459 pass
1481 pass
1460
1482
1461 def lookup(self, id):
1483 def lookup(self, id):
1462 """locate a node based on:
1484 """locate a node based on:
1463 - revision number or str(revision number)
1485 - revision number or str(revision number)
1464 - nodeid or subset of hex nodeid
1486 - nodeid or subset of hex nodeid
1465 """
1487 """
1466 n = self._match(id)
1488 n = self._match(id)
1467 if n is not None:
1489 if n is not None:
1468 return n
1490 return n
1469 n = self._partialmatch(id)
1491 n = self._partialmatch(id)
1470 if n:
1492 if n:
1471 return n
1493 return n
1472
1494
1473 raise error.LookupError(id, self.display_id, _(b'no match found'))
1495 raise error.LookupError(id, self.display_id, _(b'no match found'))
1474
1496
1475 def shortest(self, node, minlength=1):
1497 def shortest(self, node, minlength=1):
1476 """Find the shortest unambiguous prefix that matches node."""
1498 """Find the shortest unambiguous prefix that matches node."""
1477
1499
1478 def isvalid(prefix):
1500 def isvalid(prefix):
1479 try:
1501 try:
1480 matchednode = self._partialmatch(prefix)
1502 matchednode = self._partialmatch(prefix)
1481 except error.AmbiguousPrefixLookupError:
1503 except error.AmbiguousPrefixLookupError:
1482 return False
1504 return False
1483 except error.WdirUnsupported:
1505 except error.WdirUnsupported:
1484 # single 'ff...' match
1506 # single 'ff...' match
1485 return True
1507 return True
1486 if matchednode is None:
1508 if matchednode is None:
1487 raise error.LookupError(node, self.display_id, _(b'no node'))
1509 raise error.LookupError(node, self.display_id, _(b'no node'))
1488 return True
1510 return True
1489
1511
1490 def maybewdir(prefix):
1512 def maybewdir(prefix):
1491 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1513 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1492
1514
1493 hexnode = hex(node)
1515 hexnode = hex(node)
1494
1516
1495 def disambiguate(hexnode, minlength):
1517 def disambiguate(hexnode, minlength):
1496 """Disambiguate against wdirid."""
1518 """Disambiguate against wdirid."""
1497 for length in range(minlength, len(hexnode) + 1):
1519 for length in range(minlength, len(hexnode) + 1):
1498 prefix = hexnode[:length]
1520 prefix = hexnode[:length]
1499 if not maybewdir(prefix):
1521 if not maybewdir(prefix):
1500 return prefix
1522 return prefix
1501
1523
1502 if not getattr(self, 'filteredrevs', None):
1524 if not getattr(self, 'filteredrevs', None):
1503 try:
1525 try:
1504 length = max(self.index.shortest(node), minlength)
1526 length = max(self.index.shortest(node), minlength)
1505 return disambiguate(hexnode, length)
1527 return disambiguate(hexnode, length)
1506 except error.RevlogError:
1528 except error.RevlogError:
1507 if node != self.nodeconstants.wdirid:
1529 if node != self.nodeconstants.wdirid:
1508 raise error.LookupError(
1530 raise error.LookupError(
1509 node, self.display_id, _(b'no node')
1531 node, self.display_id, _(b'no node')
1510 )
1532 )
1511 except AttributeError:
1533 except AttributeError:
1512 # Fall through to pure code
1534 # Fall through to pure code
1513 pass
1535 pass
1514
1536
1515 if node == self.nodeconstants.wdirid:
1537 if node == self.nodeconstants.wdirid:
1516 for length in range(minlength, len(hexnode) + 1):
1538 for length in range(minlength, len(hexnode) + 1):
1517 prefix = hexnode[:length]
1539 prefix = hexnode[:length]
1518 if isvalid(prefix):
1540 if isvalid(prefix):
1519 return prefix
1541 return prefix
1520
1542
1521 for length in range(minlength, len(hexnode) + 1):
1543 for length in range(minlength, len(hexnode) + 1):
1522 prefix = hexnode[:length]
1544 prefix = hexnode[:length]
1523 if isvalid(prefix):
1545 if isvalid(prefix):
1524 return disambiguate(hexnode, length)
1546 return disambiguate(hexnode, length)
1525
1547
1526 def cmp(self, node, text):
1548 def cmp(self, node, text):
1527 """compare text with a given file revision
1549 """compare text with a given file revision
1528
1550
1529 returns True if text is different than what is stored.
1551 returns True if text is different than what is stored.
1530 """
1552 """
1531 p1, p2 = self.parents(node)
1553 p1, p2 = self.parents(node)
1532 return storageutil.hashrevisionsha1(text, p1, p2) != node
1554 return storageutil.hashrevisionsha1(text, p1, p2) != node
1533
1555
1534 def _cachesegment(self, offset, data):
1556 def _cachesegment(self, offset, data):
1535 """Add a segment to the revlog cache.
1557 """Add a segment to the revlog cache.
1536
1558
1537 Accepts an absolute offset and the data that is at that location.
1559 Accepts an absolute offset and the data that is at that location.
1538 """
1560 """
1539 o, d = self._chunkcache
1561 o, d = self._chunkcache
1540 # try to add to existing cache
1562 # try to add to existing cache
1541 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1563 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1542 self._chunkcache = o, d + data
1564 self._chunkcache = o, d + data
1543 else:
1565 else:
1544 self._chunkcache = offset, data
1566 self._chunkcache = offset, data
1545
1567
1546 def _readsegment(self, offset, length, df=None):
1568 def _readsegment(self, offset, length, df=None):
1547 """Load a segment of raw data from the revlog.
1569 """Load a segment of raw data from the revlog.
1548
1570
1549 Accepts an absolute offset, length to read, and an optional existing
1571 Accepts an absolute offset, length to read, and an optional existing
1550 file handle to read from.
1572 file handle to read from.
1551
1573
1552 If an existing file handle is passed, it will be seeked and the
1574 If an existing file handle is passed, it will be seeked and the
1553 original seek position will NOT be restored.
1575 original seek position will NOT be restored.
1554
1576
1555 Returns a str or buffer of raw byte data.
1577 Returns a str or buffer of raw byte data.
1556
1578
1557 Raises if the requested number of bytes could not be read.
1579 Raises if the requested number of bytes could not be read.
1558 """
1580 """
1559 # Cache data both forward and backward around the requested
1581 # Cache data both forward and backward around the requested
1560 # data, in a fixed size window. This helps speed up operations
1582 # data, in a fixed size window. This helps speed up operations
1561 # involving reading the revlog backwards.
1583 # involving reading the revlog backwards.
1562 cachesize = self._chunkcachesize
1584 cachesize = self._chunkcachesize
1563 realoffset = offset & ~(cachesize - 1)
1585 realoffset = offset & ~(cachesize - 1)
1564 reallength = (
1586 reallength = (
1565 (offset + length + cachesize) & ~(cachesize - 1)
1587 (offset + length + cachesize) & ~(cachesize - 1)
1566 ) - realoffset
1588 ) - realoffset
1567 with self._datareadfp(df) as df:
1589 with self._datareadfp(df) as df:
1568 df.seek(realoffset)
1590 df.seek(realoffset)
1569 d = df.read(reallength)
1591 d = df.read(reallength)
1570
1592
1571 self._cachesegment(realoffset, d)
1593 self._cachesegment(realoffset, d)
1572 if offset != realoffset or reallength != length:
1594 if offset != realoffset or reallength != length:
1573 startoffset = offset - realoffset
1595 startoffset = offset - realoffset
1574 if len(d) - startoffset < length:
1596 if len(d) - startoffset < length:
1575 raise error.RevlogError(
1597 raise error.RevlogError(
1576 _(
1598 _(
1577 b'partial read of revlog %s; expected %d bytes from '
1599 b'partial read of revlog %s; expected %d bytes from '
1578 b'offset %d, got %d'
1600 b'offset %d, got %d'
1579 )
1601 )
1580 % (
1602 % (
1581 self._indexfile if self._inline else self._datafile,
1603 self._indexfile if self._inline else self._datafile,
1582 length,
1604 length,
1583 offset,
1605 offset,
1584 len(d) - startoffset,
1606 len(d) - startoffset,
1585 )
1607 )
1586 )
1608 )
1587
1609
1588 return util.buffer(d, startoffset, length)
1610 return util.buffer(d, startoffset, length)
1589
1611
1590 if len(d) < length:
1612 if len(d) < length:
1591 raise error.RevlogError(
1613 raise error.RevlogError(
1592 _(
1614 _(
1593 b'partial read of revlog %s; expected %d bytes from offset '
1615 b'partial read of revlog %s; expected %d bytes from offset '
1594 b'%d, got %d'
1616 b'%d, got %d'
1595 )
1617 )
1596 % (
1618 % (
1597 self._indexfile if self._inline else self._datafile,
1619 self._indexfile if self._inline else self._datafile,
1598 length,
1620 length,
1599 offset,
1621 offset,
1600 len(d),
1622 len(d),
1601 )
1623 )
1602 )
1624 )
1603
1625
1604 return d
1626 return d
1605
1627
1606 def _getsegment(self, offset, length, df=None):
1628 def _getsegment(self, offset, length, df=None):
1607 """Obtain a segment of raw data from the revlog.
1629 """Obtain a segment of raw data from the revlog.
1608
1630
1609 Accepts an absolute offset, length of bytes to obtain, and an
1631 Accepts an absolute offset, length of bytes to obtain, and an
1610 optional file handle to the already-opened revlog. If the file
1632 optional file handle to the already-opened revlog. If the file
1611 handle is used, it's original seek position will not be preserved.
1633 handle is used, it's original seek position will not be preserved.
1612
1634
1613 Requests for data may be returned from a cache.
1635 Requests for data may be returned from a cache.
1614
1636
1615 Returns a str or a buffer instance of raw byte data.
1637 Returns a str or a buffer instance of raw byte data.
1616 """
1638 """
1617 o, d = self._chunkcache
1639 o, d = self._chunkcache
1618 l = len(d)
1640 l = len(d)
1619
1641
1620 # is it in the cache?
1642 # is it in the cache?
1621 cachestart = offset - o
1643 cachestart = offset - o
1622 cacheend = cachestart + length
1644 cacheend = cachestart + length
1623 if cachestart >= 0 and cacheend <= l:
1645 if cachestart >= 0 and cacheend <= l:
1624 if cachestart == 0 and cacheend == l:
1646 if cachestart == 0 and cacheend == l:
1625 return d # avoid a copy
1647 return d # avoid a copy
1626 return util.buffer(d, cachestart, cacheend - cachestart)
1648 return util.buffer(d, cachestart, cacheend - cachestart)
1627
1649
1628 return self._readsegment(offset, length, df=df)
1650 return self._readsegment(offset, length, df=df)
1629
1651
1630 def _getsegmentforrevs(self, startrev, endrev, df=None):
1652 def _getsegmentforrevs(self, startrev, endrev, df=None):
1631 """Obtain a segment of raw data corresponding to a range of revisions.
1653 """Obtain a segment of raw data corresponding to a range of revisions.
1632
1654
1633 Accepts the start and end revisions and an optional already-open
1655 Accepts the start and end revisions and an optional already-open
1634 file handle to be used for reading. If the file handle is read, its
1656 file handle to be used for reading. If the file handle is read, its
1635 seek position will not be preserved.
1657 seek position will not be preserved.
1636
1658
1637 Requests for data may be satisfied by a cache.
1659 Requests for data may be satisfied by a cache.
1638
1660
1639 Returns a 2-tuple of (offset, data) for the requested range of
1661 Returns a 2-tuple of (offset, data) for the requested range of
1640 revisions. Offset is the integer offset from the beginning of the
1662 revisions. Offset is the integer offset from the beginning of the
1641 revlog and data is a str or buffer of the raw byte data.
1663 revlog and data is a str or buffer of the raw byte data.
1642
1664
1643 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1665 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1644 to determine where each revision's data begins and ends.
1666 to determine where each revision's data begins and ends.
1645 """
1667 """
1646 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1668 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1647 # (functions are expensive).
1669 # (functions are expensive).
1648 index = self.index
1670 index = self.index
1649 istart = index[startrev]
1671 istart = index[startrev]
1650 start = int(istart[0] >> 16)
1672 start = int(istart[0] >> 16)
1651 if startrev == endrev:
1673 if startrev == endrev:
1652 end = start + istart[1]
1674 end = start + istart[1]
1653 else:
1675 else:
1654 iend = index[endrev]
1676 iend = index[endrev]
1655 end = int(iend[0] >> 16) + iend[1]
1677 end = int(iend[0] >> 16) + iend[1]
1656
1678
1657 if self._inline:
1679 if self._inline:
1658 start += (startrev + 1) * self.index.entry_size
1680 start += (startrev + 1) * self.index.entry_size
1659 end += (endrev + 1) * self.index.entry_size
1681 end += (endrev + 1) * self.index.entry_size
1660 length = end - start
1682 length = end - start
1661
1683
1662 return start, self._getsegment(start, length, df=df)
1684 return start, self._getsegment(start, length, df=df)
1663
1685
1664 def _chunk(self, rev, df=None):
1686 def _chunk(self, rev, df=None):
1665 """Obtain a single decompressed chunk for a revision.
1687 """Obtain a single decompressed chunk for a revision.
1666
1688
1667 Accepts an integer revision and an optional already-open file handle
1689 Accepts an integer revision and an optional already-open file handle
1668 to be used for reading. If used, the seek position of the file will not
1690 to be used for reading. If used, the seek position of the file will not
1669 be preserved.
1691 be preserved.
1670
1692
1671 Returns a str holding uncompressed data for the requested revision.
1693 Returns a str holding uncompressed data for the requested revision.
1672 """
1694 """
1673 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1695 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1674
1696
1675 def _chunks(self, revs, df=None, targetsize=None):
1697 def _chunks(self, revs, df=None, targetsize=None):
1676 """Obtain decompressed chunks for the specified revisions.
1698 """Obtain decompressed chunks for the specified revisions.
1677
1699
1678 Accepts an iterable of numeric revisions that are assumed to be in
1700 Accepts an iterable of numeric revisions that are assumed to be in
1679 ascending order. Also accepts an optional already-open file handle
1701 ascending order. Also accepts an optional already-open file handle
1680 to be used for reading. If used, the seek position of the file will
1702 to be used for reading. If used, the seek position of the file will
1681 not be preserved.
1703 not be preserved.
1682
1704
1683 This function is similar to calling ``self._chunk()`` multiple times,
1705 This function is similar to calling ``self._chunk()`` multiple times,
1684 but is faster.
1706 but is faster.
1685
1707
1686 Returns a list with decompressed data for each requested revision.
1708 Returns a list with decompressed data for each requested revision.
1687 """
1709 """
1688 if not revs:
1710 if not revs:
1689 return []
1711 return []
1690 start = self.start
1712 start = self.start
1691 length = self.length
1713 length = self.length
1692 inline = self._inline
1714 inline = self._inline
1693 iosize = self.index.entry_size
1715 iosize = self.index.entry_size
1694 buffer = util.buffer
1716 buffer = util.buffer
1695
1717
1696 l = []
1718 l = []
1697 ladd = l.append
1719 ladd = l.append
1698
1720
1699 if not self._withsparseread:
1721 if not self._withsparseread:
1700 slicedchunks = (revs,)
1722 slicedchunks = (revs,)
1701 else:
1723 else:
1702 slicedchunks = deltautil.slicechunk(
1724 slicedchunks = deltautil.slicechunk(
1703 self, revs, targetsize=targetsize
1725 self, revs, targetsize=targetsize
1704 )
1726 )
1705
1727
1706 for revschunk in slicedchunks:
1728 for revschunk in slicedchunks:
1707 firstrev = revschunk[0]
1729 firstrev = revschunk[0]
1708 # Skip trailing revisions with empty diff
1730 # Skip trailing revisions with empty diff
1709 for lastrev in revschunk[::-1]:
1731 for lastrev in revschunk[::-1]:
1710 if length(lastrev) != 0:
1732 if length(lastrev) != 0:
1711 break
1733 break
1712
1734
1713 try:
1735 try:
1714 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1736 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1715 except OverflowError:
1737 except OverflowError:
1716 # issue4215 - we can't cache a run of chunks greater than
1738 # issue4215 - we can't cache a run of chunks greater than
1717 # 2G on Windows
1739 # 2G on Windows
1718 return [self._chunk(rev, df=df) for rev in revschunk]
1740 return [self._chunk(rev, df=df) for rev in revschunk]
1719
1741
1720 decomp = self.decompress
1742 decomp = self.decompress
1721 for rev in revschunk:
1743 for rev in revschunk:
1722 chunkstart = start(rev)
1744 chunkstart = start(rev)
1723 if inline:
1745 if inline:
1724 chunkstart += (rev + 1) * iosize
1746 chunkstart += (rev + 1) * iosize
1725 chunklength = length(rev)
1747 chunklength = length(rev)
1726 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1748 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1727
1749
1728 return l
1750 return l
1729
1751
1730 def _chunkclear(self):
1752 def _chunkclear(self):
1731 """Clear the raw chunk cache."""
1753 """Clear the raw chunk cache."""
1732 self._chunkcache = (0, b'')
1754 self._chunkcache = (0, b'')
1733
1755
1734 def deltaparent(self, rev):
1756 def deltaparent(self, rev):
1735 """return deltaparent of the given revision"""
1757 """return deltaparent of the given revision"""
1736 base = self.index[rev][3]
1758 base = self.index[rev][3]
1737 if base == rev:
1759 if base == rev:
1738 return nullrev
1760 return nullrev
1739 elif self._generaldelta:
1761 elif self._generaldelta:
1740 return base
1762 return base
1741 else:
1763 else:
1742 return rev - 1
1764 return rev - 1
1743
1765
1744 def issnapshot(self, rev):
1766 def issnapshot(self, rev):
1745 """tells whether rev is a snapshot"""
1767 """tells whether rev is a snapshot"""
1746 if not self._sparserevlog:
1768 if not self._sparserevlog:
1747 return self.deltaparent(rev) == nullrev
1769 return self.deltaparent(rev) == nullrev
1748 elif util.safehasattr(self.index, b'issnapshot'):
1770 elif util.safehasattr(self.index, b'issnapshot'):
1749 # directly assign the method to cache the testing and access
1771 # directly assign the method to cache the testing and access
1750 self.issnapshot = self.index.issnapshot
1772 self.issnapshot = self.index.issnapshot
1751 return self.issnapshot(rev)
1773 return self.issnapshot(rev)
1752 if rev == nullrev:
1774 if rev == nullrev:
1753 return True
1775 return True
1754 entry = self.index[rev]
1776 entry = self.index[rev]
1755 base = entry[3]
1777 base = entry[3]
1756 if base == rev:
1778 if base == rev:
1757 return True
1779 return True
1758 if base == nullrev:
1780 if base == nullrev:
1759 return True
1781 return True
1760 p1 = entry[5]
1782 p1 = entry[5]
1761 p2 = entry[6]
1783 p2 = entry[6]
1762 if base == p1 or base == p2:
1784 if base == p1 or base == p2:
1763 return False
1785 return False
1764 return self.issnapshot(base)
1786 return self.issnapshot(base)
1765
1787
1766 def snapshotdepth(self, rev):
1788 def snapshotdepth(self, rev):
1767 """number of snapshot in the chain before this one"""
1789 """number of snapshot in the chain before this one"""
1768 if not self.issnapshot(rev):
1790 if not self.issnapshot(rev):
1769 raise error.ProgrammingError(b'revision %d not a snapshot')
1791 raise error.ProgrammingError(b'revision %d not a snapshot')
1770 return len(self._deltachain(rev)[0]) - 1
1792 return len(self._deltachain(rev)[0]) - 1
1771
1793
1772 def revdiff(self, rev1, rev2):
1794 def revdiff(self, rev1, rev2):
1773 """return or calculate a delta between two revisions
1795 """return or calculate a delta between two revisions
1774
1796
1775 The delta calculated is in binary form and is intended to be written to
1797 The delta calculated is in binary form and is intended to be written to
1776 revlog data directly. So this function needs raw revision data.
1798 revlog data directly. So this function needs raw revision data.
1777 """
1799 """
1778 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1800 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1779 return bytes(self._chunk(rev2))
1801 return bytes(self._chunk(rev2))
1780
1802
1781 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1803 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1782
1804
1783 def _processflags(self, text, flags, operation, raw=False):
1805 def _processflags(self, text, flags, operation, raw=False):
1784 """deprecated entry point to access flag processors"""
1806 """deprecated entry point to access flag processors"""
1785 msg = b'_processflag(...) use the specialized variant'
1807 msg = b'_processflag(...) use the specialized variant'
1786 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1808 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1787 if raw:
1809 if raw:
1788 return text, flagutil.processflagsraw(self, text, flags)
1810 return text, flagutil.processflagsraw(self, text, flags)
1789 elif operation == b'read':
1811 elif operation == b'read':
1790 return flagutil.processflagsread(self, text, flags)
1812 return flagutil.processflagsread(self, text, flags)
1791 else: # write operation
1813 else: # write operation
1792 return flagutil.processflagswrite(self, text, flags)
1814 return flagutil.processflagswrite(self, text, flags)
1793
1815
1794 def revision(self, nodeorrev, _df=None, raw=False):
1816 def revision(self, nodeorrev, _df=None, raw=False):
1795 """return an uncompressed revision of a given node or revision
1817 """return an uncompressed revision of a given node or revision
1796 number.
1818 number.
1797
1819
1798 _df - an existing file handle to read from. (internal-only)
1820 _df - an existing file handle to read from. (internal-only)
1799 raw - an optional argument specifying if the revision data is to be
1821 raw - an optional argument specifying if the revision data is to be
1800 treated as raw data when applying flag transforms. 'raw' should be set
1822 treated as raw data when applying flag transforms. 'raw' should be set
1801 to True when generating changegroups or in debug commands.
1823 to True when generating changegroups or in debug commands.
1802 """
1824 """
1803 if raw:
1825 if raw:
1804 msg = (
1826 msg = (
1805 b'revlog.revision(..., raw=True) is deprecated, '
1827 b'revlog.revision(..., raw=True) is deprecated, '
1806 b'use revlog.rawdata(...)'
1828 b'use revlog.rawdata(...)'
1807 )
1829 )
1808 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1830 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1809 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1831 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1810
1832
1811 def sidedata(self, nodeorrev, _df=None):
1833 def sidedata(self, nodeorrev, _df=None):
1812 """a map of extra data related to the changeset but not part of the hash
1834 """a map of extra data related to the changeset but not part of the hash
1813
1835
1814 This function currently return a dictionary. However, more advanced
1836 This function currently return a dictionary. However, more advanced
1815 mapping object will likely be used in the future for a more
1837 mapping object will likely be used in the future for a more
1816 efficient/lazy code.
1838 efficient/lazy code.
1817 """
1839 """
1818 return self._revisiondata(nodeorrev, _df)[1]
1840 return self._revisiondata(nodeorrev, _df)[1]
1819
1841
1820 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1842 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1821 # deal with <nodeorrev> argument type
1843 # deal with <nodeorrev> argument type
1822 if isinstance(nodeorrev, int):
1844 if isinstance(nodeorrev, int):
1823 rev = nodeorrev
1845 rev = nodeorrev
1824 node = self.node(rev)
1846 node = self.node(rev)
1825 else:
1847 else:
1826 node = nodeorrev
1848 node = nodeorrev
1827 rev = None
1849 rev = None
1828
1850
1829 # fast path the special `nullid` rev
1851 # fast path the special `nullid` rev
1830 if node == self.nullid:
1852 if node == self.nullid:
1831 return b"", {}
1853 return b"", {}
1832
1854
1833 # ``rawtext`` is the text as stored inside the revlog. Might be the
1855 # ``rawtext`` is the text as stored inside the revlog. Might be the
1834 # revision or might need to be processed to retrieve the revision.
1856 # revision or might need to be processed to retrieve the revision.
1835 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1857 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1836
1858
1837 if self.hassidedata:
1859 if self.hassidedata:
1838 if rev is None:
1860 if rev is None:
1839 rev = self.rev(node)
1861 rev = self.rev(node)
1840 sidedata = self._sidedata(rev)
1862 sidedata = self._sidedata(rev)
1841 else:
1863 else:
1842 sidedata = {}
1864 sidedata = {}
1843
1865
1844 if raw and validated:
1866 if raw and validated:
1845 # if we don't want to process the raw text and that raw
1867 # if we don't want to process the raw text and that raw
1846 # text is cached, we can exit early.
1868 # text is cached, we can exit early.
1847 return rawtext, sidedata
1869 return rawtext, sidedata
1848 if rev is None:
1870 if rev is None:
1849 rev = self.rev(node)
1871 rev = self.rev(node)
1850 # the revlog's flag for this revision
1872 # the revlog's flag for this revision
1851 # (usually alter its state or content)
1873 # (usually alter its state or content)
1852 flags = self.flags(rev)
1874 flags = self.flags(rev)
1853
1875
1854 if validated and flags == REVIDX_DEFAULT_FLAGS:
1876 if validated and flags == REVIDX_DEFAULT_FLAGS:
1855 # no extra flags set, no flag processor runs, text = rawtext
1877 # no extra flags set, no flag processor runs, text = rawtext
1856 return rawtext, sidedata
1878 return rawtext, sidedata
1857
1879
1858 if raw:
1880 if raw:
1859 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1881 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1860 text = rawtext
1882 text = rawtext
1861 else:
1883 else:
1862 r = flagutil.processflagsread(self, rawtext, flags)
1884 r = flagutil.processflagsread(self, rawtext, flags)
1863 text, validatehash = r
1885 text, validatehash = r
1864 if validatehash:
1886 if validatehash:
1865 self.checkhash(text, node, rev=rev)
1887 self.checkhash(text, node, rev=rev)
1866 if not validated:
1888 if not validated:
1867 self._revisioncache = (node, rev, rawtext)
1889 self._revisioncache = (node, rev, rawtext)
1868
1890
1869 return text, sidedata
1891 return text, sidedata
1870
1892
1871 def _rawtext(self, node, rev, _df=None):
1893 def _rawtext(self, node, rev, _df=None):
1872 """return the possibly unvalidated rawtext for a revision
1894 """return the possibly unvalidated rawtext for a revision
1873
1895
1874 returns (rev, rawtext, validated)
1896 returns (rev, rawtext, validated)
1875 """
1897 """
1876
1898
1877 # revision in the cache (could be useful to apply delta)
1899 # revision in the cache (could be useful to apply delta)
1878 cachedrev = None
1900 cachedrev = None
1879 # An intermediate text to apply deltas to
1901 # An intermediate text to apply deltas to
1880 basetext = None
1902 basetext = None
1881
1903
1882 # Check if we have the entry in cache
1904 # Check if we have the entry in cache
1883 # The cache entry looks like (node, rev, rawtext)
1905 # The cache entry looks like (node, rev, rawtext)
1884 if self._revisioncache:
1906 if self._revisioncache:
1885 if self._revisioncache[0] == node:
1907 if self._revisioncache[0] == node:
1886 return (rev, self._revisioncache[2], True)
1908 return (rev, self._revisioncache[2], True)
1887 cachedrev = self._revisioncache[1]
1909 cachedrev = self._revisioncache[1]
1888
1910
1889 if rev is None:
1911 if rev is None:
1890 rev = self.rev(node)
1912 rev = self.rev(node)
1891
1913
1892 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1914 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1893 if stopped:
1915 if stopped:
1894 basetext = self._revisioncache[2]
1916 basetext = self._revisioncache[2]
1895
1917
1896 # drop cache to save memory, the caller is expected to
1918 # drop cache to save memory, the caller is expected to
1897 # update self._revisioncache after validating the text
1919 # update self._revisioncache after validating the text
1898 self._revisioncache = None
1920 self._revisioncache = None
1899
1921
1900 targetsize = None
1922 targetsize = None
1901 rawsize = self.index[rev][2]
1923 rawsize = self.index[rev][2]
1902 if 0 <= rawsize:
1924 if 0 <= rawsize:
1903 targetsize = 4 * rawsize
1925 targetsize = 4 * rawsize
1904
1926
1905 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1927 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1906 if basetext is None:
1928 if basetext is None:
1907 basetext = bytes(bins[0])
1929 basetext = bytes(bins[0])
1908 bins = bins[1:]
1930 bins = bins[1:]
1909
1931
1910 rawtext = mdiff.patches(basetext, bins)
1932 rawtext = mdiff.patches(basetext, bins)
1911 del basetext # let us have a chance to free memory early
1933 del basetext # let us have a chance to free memory early
1912 return (rev, rawtext, False)
1934 return (rev, rawtext, False)
1913
1935
1914 def _sidedata(self, rev):
1936 def _sidedata(self, rev):
1915 """Return the sidedata for a given revision number."""
1937 """Return the sidedata for a given revision number."""
1916 index_entry = self.index[rev]
1938 index_entry = self.index[rev]
1917 sidedata_offset = index_entry[8]
1939 sidedata_offset = index_entry[8]
1918 sidedata_size = index_entry[9]
1940 sidedata_size = index_entry[9]
1919
1941
1920 if self._inline:
1942 if self._inline:
1921 sidedata_offset += self.index.entry_size * (1 + rev)
1943 sidedata_offset += self.index.entry_size * (1 + rev)
1922 if sidedata_size == 0:
1944 if sidedata_size == 0:
1923 return {}
1945 return {}
1924
1946
1925 segment = self._getsegment(sidedata_offset, sidedata_size)
1947 segment = self._getsegment(sidedata_offset, sidedata_size)
1926 sidedata = sidedatautil.deserialize_sidedata(segment)
1948 sidedata = sidedatautil.deserialize_sidedata(segment)
1927 return sidedata
1949 return sidedata
1928
1950
1929 def rawdata(self, nodeorrev, _df=None):
1951 def rawdata(self, nodeorrev, _df=None):
1930 """return an uncompressed raw data of a given node or revision number.
1952 """return an uncompressed raw data of a given node or revision number.
1931
1953
1932 _df - an existing file handle to read from. (internal-only)
1954 _df - an existing file handle to read from. (internal-only)
1933 """
1955 """
1934 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1956 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1935
1957
1936 def hash(self, text, p1, p2):
1958 def hash(self, text, p1, p2):
1937 """Compute a node hash.
1959 """Compute a node hash.
1938
1960
1939 Available as a function so that subclasses can replace the hash
1961 Available as a function so that subclasses can replace the hash
1940 as needed.
1962 as needed.
1941 """
1963 """
1942 return storageutil.hashrevisionsha1(text, p1, p2)
1964 return storageutil.hashrevisionsha1(text, p1, p2)
1943
1965
1944 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1966 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1945 """Check node hash integrity.
1967 """Check node hash integrity.
1946
1968
1947 Available as a function so that subclasses can extend hash mismatch
1969 Available as a function so that subclasses can extend hash mismatch
1948 behaviors as needed.
1970 behaviors as needed.
1949 """
1971 """
1950 try:
1972 try:
1951 if p1 is None and p2 is None:
1973 if p1 is None and p2 is None:
1952 p1, p2 = self.parents(node)
1974 p1, p2 = self.parents(node)
1953 if node != self.hash(text, p1, p2):
1975 if node != self.hash(text, p1, p2):
1954 # Clear the revision cache on hash failure. The revision cache
1976 # Clear the revision cache on hash failure. The revision cache
1955 # only stores the raw revision and clearing the cache does have
1977 # only stores the raw revision and clearing the cache does have
1956 # the side-effect that we won't have a cache hit when the raw
1978 # the side-effect that we won't have a cache hit when the raw
1957 # revision data is accessed. But this case should be rare and
1979 # revision data is accessed. But this case should be rare and
1958 # it is extra work to teach the cache about the hash
1980 # it is extra work to teach the cache about the hash
1959 # verification state.
1981 # verification state.
1960 if self._revisioncache and self._revisioncache[0] == node:
1982 if self._revisioncache and self._revisioncache[0] == node:
1961 self._revisioncache = None
1983 self._revisioncache = None
1962
1984
1963 revornode = rev
1985 revornode = rev
1964 if revornode is None:
1986 if revornode is None:
1965 revornode = templatefilters.short(hex(node))
1987 revornode = templatefilters.short(hex(node))
1966 raise error.RevlogError(
1988 raise error.RevlogError(
1967 _(b"integrity check failed on %s:%s")
1989 _(b"integrity check failed on %s:%s")
1968 % (self.display_id, pycompat.bytestr(revornode))
1990 % (self.display_id, pycompat.bytestr(revornode))
1969 )
1991 )
1970 except error.RevlogError:
1992 except error.RevlogError:
1971 if self._censorable and storageutil.iscensoredtext(text):
1993 if self._censorable and storageutil.iscensoredtext(text):
1972 raise error.CensoredNodeError(self.display_id, node, text)
1994 raise error.CensoredNodeError(self.display_id, node, text)
1973 raise
1995 raise
1974
1996
1975 def _enforceinlinesize(self, tr):
1997 def _enforceinlinesize(self, tr):
1976 """Check if the revlog is too big for inline and convert if so.
1998 """Check if the revlog is too big for inline and convert if so.
1977
1999
1978 This should be called after revisions are added to the revlog. If the
2000 This should be called after revisions are added to the revlog. If the
1979 revlog has grown too large to be an inline revlog, it will convert it
2001 revlog has grown too large to be an inline revlog, it will convert it
1980 to use multiple index and data files.
2002 to use multiple index and data files.
1981 """
2003 """
1982 tiprev = len(self) - 1
2004 tiprev = len(self) - 1
1983 total_size = self.start(tiprev) + self.length(tiprev)
2005 total_size = self.start(tiprev) + self.length(tiprev)
1984 if not self._inline or total_size < _maxinline:
2006 if not self._inline or total_size < _maxinline:
1985 return
2007 return
1986
2008
1987 troffset = tr.findoffset(self._indexfile)
2009 troffset = tr.findoffset(self._indexfile)
1988 if troffset is None:
2010 if troffset is None:
1989 raise error.RevlogError(
2011 raise error.RevlogError(
1990 _(b"%s not found in the transaction") % self._indexfile
2012 _(b"%s not found in the transaction") % self._indexfile
1991 )
2013 )
1992 trindex = 0
2014 trindex = 0
1993 tr.add(self._datafile, 0)
2015 tr.add(self._datafile, 0)
1994
2016
1995 existing_handles = False
2017 existing_handles = False
1996 if self._writinghandles is not None:
2018 if self._writinghandles is not None:
1997 existing_handles = True
2019 existing_handles = True
1998 fp = self._writinghandles[0]
2020 fp = self._writinghandles[0]
1999 fp.flush()
2021 fp.flush()
2000 fp.close()
2022 fp.close()
2001 # We can't use the cached file handle after close(). So prevent
2023 # We can't use the cached file handle after close(). So prevent
2002 # its usage.
2024 # its usage.
2003 self._writinghandles = None
2025 self._writinghandles = None
2004
2026
2005 new_dfh = self._datafp(b'w+')
2027 new_dfh = self._datafp(b'w+')
2006 new_dfh.truncate(0) # drop any potentially existing data
2028 new_dfh.truncate(0) # drop any potentially existing data
2007 try:
2029 try:
2008 with self._indexfp() as read_ifh:
2030 with self._indexfp() as read_ifh:
2009 for r in self:
2031 for r in self:
2010 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2032 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2011 if troffset <= self.start(r):
2033 if troffset <= self.start(r):
2012 trindex = r
2034 trindex = r
2013 new_dfh.flush()
2035 new_dfh.flush()
2014
2036
2015 with self.__index_new_fp() as fp:
2037 with self.__index_new_fp() as fp:
2016 self._format_flags &= ~FLAG_INLINE_DATA
2038 self._format_flags &= ~FLAG_INLINE_DATA
2017 self._inline = False
2039 self._inline = False
2018 for i in self:
2040 for i in self:
2019 e = self.index.entry_binary(i)
2041 e = self.index.entry_binary(i)
2020 if i == 0 and self._docket is None:
2042 if i == 0 and self._docket is None:
2021 header = self._format_flags | self._format_version
2043 header = self._format_flags | self._format_version
2022 header = self.index.pack_header(header)
2044 header = self.index.pack_header(header)
2023 e = header + e
2045 e = header + e
2024 fp.write(e)
2046 fp.write(e)
2047 if self._docket is not None:
2048 self._docket.index_end = fp.tell()
2025 # the temp file replace the real index when we exit the context
2049 # the temp file replace the real index when we exit the context
2026 # manager
2050 # manager
2027
2051
2028 tr.replace(self._indexfile, trindex * self.index.entry_size)
2052 tr.replace(self._indexfile, trindex * self.index.entry_size)
2029 nodemaputil.setup_persistent_nodemap(tr, self)
2053 nodemaputil.setup_persistent_nodemap(tr, self)
2030 self._chunkclear()
2054 self._chunkclear()
2031
2055
2032 if existing_handles:
2056 if existing_handles:
2033 # switched from inline to conventional reopen the index
2057 # switched from inline to conventional reopen the index
2034 ifh = self.__index_write_fp()
2058 ifh = self.__index_write_fp()
2035 self._writinghandles = (ifh, new_dfh)
2059 self._writinghandles = (ifh, new_dfh)
2036 new_dfh = None
2060 new_dfh = None
2037 finally:
2061 finally:
2038 if new_dfh is not None:
2062 if new_dfh is not None:
2039 new_dfh.close()
2063 new_dfh.close()
2040
2064
2041 def _nodeduplicatecallback(self, transaction, node):
2065 def _nodeduplicatecallback(self, transaction, node):
2042 """called when trying to add a node already stored."""
2066 """called when trying to add a node already stored."""
2043
2067
2044 @contextlib.contextmanager
2068 @contextlib.contextmanager
2045 def _writing(self, transaction):
2069 def _writing(self, transaction):
2046 if self._writinghandles is not None:
2070 if self._writinghandles is not None:
2047 yield
2071 yield
2048 else:
2072 else:
2049 r = len(self)
2073 r = len(self)
2050 dsize = 0
2074 dsize = 0
2051 if r:
2075 if r:
2052 dsize = self.end(r - 1)
2076 dsize = self.end(r - 1)
2053 dfh = None
2077 dfh = None
2054 if not self._inline:
2078 if not self._inline:
2055 try:
2079 try:
2056 dfh = self._datafp(b"r+")
2080 dfh = self._datafp(b"r+")
2057 dfh.seek(0, os.SEEK_END)
2081 dfh.seek(0, os.SEEK_END)
2058 except IOError as inst:
2082 except IOError as inst:
2059 if inst.errno != errno.ENOENT:
2083 if inst.errno != errno.ENOENT:
2060 raise
2084 raise
2061 dfh = self._datafp(b"w+")
2085 dfh = self._datafp(b"w+")
2062 transaction.add(self._datafile, dsize)
2086 transaction.add(self._datafile, dsize)
2063 try:
2087 try:
2064 isize = r * self.index.entry_size
2088 isize = r * self.index.entry_size
2065 ifh = self.__index_write_fp()
2089 ifh = self.__index_write_fp()
2066 if self._inline:
2090 if self._inline:
2067 transaction.add(self._indexfile, dsize + isize)
2091 transaction.add(self._indexfile, dsize + isize)
2068 else:
2092 else:
2069 transaction.add(self._indexfile, isize)
2093 transaction.add(self._indexfile, isize)
2070 try:
2094 try:
2071 self._writinghandles = (ifh, dfh)
2095 self._writinghandles = (ifh, dfh)
2072 try:
2096 try:
2073 yield
2097 yield
2074 if self._docket is not None:
2098 if self._docket is not None:
2075 self._docket.write(transaction)
2099 self._docket.write(transaction)
2076 finally:
2100 finally:
2077 self._writinghandles = None
2101 self._writinghandles = None
2078 finally:
2102 finally:
2079 ifh.close()
2103 ifh.close()
2080 finally:
2104 finally:
2081 if dfh is not None:
2105 if dfh is not None:
2082 dfh.close()
2106 dfh.close()
2083
2107
2084 def addrevision(
2108 def addrevision(
2085 self,
2109 self,
2086 text,
2110 text,
2087 transaction,
2111 transaction,
2088 link,
2112 link,
2089 p1,
2113 p1,
2090 p2,
2114 p2,
2091 cachedelta=None,
2115 cachedelta=None,
2092 node=None,
2116 node=None,
2093 flags=REVIDX_DEFAULT_FLAGS,
2117 flags=REVIDX_DEFAULT_FLAGS,
2094 deltacomputer=None,
2118 deltacomputer=None,
2095 sidedata=None,
2119 sidedata=None,
2096 ):
2120 ):
2097 """add a revision to the log
2121 """add a revision to the log
2098
2122
2099 text - the revision data to add
2123 text - the revision data to add
2100 transaction - the transaction object used for rollback
2124 transaction - the transaction object used for rollback
2101 link - the linkrev data to add
2125 link - the linkrev data to add
2102 p1, p2 - the parent nodeids of the revision
2126 p1, p2 - the parent nodeids of the revision
2103 cachedelta - an optional precomputed delta
2127 cachedelta - an optional precomputed delta
2104 node - nodeid of revision; typically node is not specified, and it is
2128 node - nodeid of revision; typically node is not specified, and it is
2105 computed by default as hash(text, p1, p2), however subclasses might
2129 computed by default as hash(text, p1, p2), however subclasses might
2106 use different hashing method (and override checkhash() in such case)
2130 use different hashing method (and override checkhash() in such case)
2107 flags - the known flags to set on the revision
2131 flags - the known flags to set on the revision
2108 deltacomputer - an optional deltacomputer instance shared between
2132 deltacomputer - an optional deltacomputer instance shared between
2109 multiple calls
2133 multiple calls
2110 """
2134 """
2111 if link == nullrev:
2135 if link == nullrev:
2112 raise error.RevlogError(
2136 raise error.RevlogError(
2113 _(b"attempted to add linkrev -1 to %s") % self.display_id
2137 _(b"attempted to add linkrev -1 to %s") % self.display_id
2114 )
2138 )
2115
2139
2116 if sidedata is None:
2140 if sidedata is None:
2117 sidedata = {}
2141 sidedata = {}
2118 elif sidedata and not self.hassidedata:
2142 elif sidedata and not self.hassidedata:
2119 raise error.ProgrammingError(
2143 raise error.ProgrammingError(
2120 _(b"trying to add sidedata to a revlog who don't support them")
2144 _(b"trying to add sidedata to a revlog who don't support them")
2121 )
2145 )
2122
2146
2123 if flags:
2147 if flags:
2124 node = node or self.hash(text, p1, p2)
2148 node = node or self.hash(text, p1, p2)
2125
2149
2126 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2150 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2127
2151
2128 # If the flag processor modifies the revision data, ignore any provided
2152 # If the flag processor modifies the revision data, ignore any provided
2129 # cachedelta.
2153 # cachedelta.
2130 if rawtext != text:
2154 if rawtext != text:
2131 cachedelta = None
2155 cachedelta = None
2132
2156
2133 if len(rawtext) > _maxentrysize:
2157 if len(rawtext) > _maxentrysize:
2134 raise error.RevlogError(
2158 raise error.RevlogError(
2135 _(
2159 _(
2136 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2160 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2137 )
2161 )
2138 % (self.display_id, len(rawtext))
2162 % (self.display_id, len(rawtext))
2139 )
2163 )
2140
2164
2141 node = node or self.hash(rawtext, p1, p2)
2165 node = node or self.hash(rawtext, p1, p2)
2142 rev = self.index.get_rev(node)
2166 rev = self.index.get_rev(node)
2143 if rev is not None:
2167 if rev is not None:
2144 return rev
2168 return rev
2145
2169
2146 if validatehash:
2170 if validatehash:
2147 self.checkhash(rawtext, node, p1=p1, p2=p2)
2171 self.checkhash(rawtext, node, p1=p1, p2=p2)
2148
2172
2149 return self.addrawrevision(
2173 return self.addrawrevision(
2150 rawtext,
2174 rawtext,
2151 transaction,
2175 transaction,
2152 link,
2176 link,
2153 p1,
2177 p1,
2154 p2,
2178 p2,
2155 node,
2179 node,
2156 flags,
2180 flags,
2157 cachedelta=cachedelta,
2181 cachedelta=cachedelta,
2158 deltacomputer=deltacomputer,
2182 deltacomputer=deltacomputer,
2159 sidedata=sidedata,
2183 sidedata=sidedata,
2160 )
2184 )
2161
2185
2162 def addrawrevision(
2186 def addrawrevision(
2163 self,
2187 self,
2164 rawtext,
2188 rawtext,
2165 transaction,
2189 transaction,
2166 link,
2190 link,
2167 p1,
2191 p1,
2168 p2,
2192 p2,
2169 node,
2193 node,
2170 flags,
2194 flags,
2171 cachedelta=None,
2195 cachedelta=None,
2172 deltacomputer=None,
2196 deltacomputer=None,
2173 sidedata=None,
2197 sidedata=None,
2174 ):
2198 ):
2175 """add a raw revision with known flags, node and parents
2199 """add a raw revision with known flags, node and parents
2176 useful when reusing a revision not stored in this revlog (ex: received
2200 useful when reusing a revision not stored in this revlog (ex: received
2177 over wire, or read from an external bundle).
2201 over wire, or read from an external bundle).
2178 """
2202 """
2179 with self._writing(transaction):
2203 with self._writing(transaction):
2180 return self._addrevision(
2204 return self._addrevision(
2181 node,
2205 node,
2182 rawtext,
2206 rawtext,
2183 transaction,
2207 transaction,
2184 link,
2208 link,
2185 p1,
2209 p1,
2186 p2,
2210 p2,
2187 flags,
2211 flags,
2188 cachedelta,
2212 cachedelta,
2189 deltacomputer=deltacomputer,
2213 deltacomputer=deltacomputer,
2190 sidedata=sidedata,
2214 sidedata=sidedata,
2191 )
2215 )
2192
2216
2193 def compress(self, data):
2217 def compress(self, data):
2194 """Generate a possibly-compressed representation of data."""
2218 """Generate a possibly-compressed representation of data."""
2195 if not data:
2219 if not data:
2196 return b'', data
2220 return b'', data
2197
2221
2198 compressed = self._compressor.compress(data)
2222 compressed = self._compressor.compress(data)
2199
2223
2200 if compressed:
2224 if compressed:
2201 # The revlog compressor added the header in the returned data.
2225 # The revlog compressor added the header in the returned data.
2202 return b'', compressed
2226 return b'', compressed
2203
2227
2204 if data[0:1] == b'\0':
2228 if data[0:1] == b'\0':
2205 return b'', data
2229 return b'', data
2206 return b'u', data
2230 return b'u', data
2207
2231
2208 def decompress(self, data):
2232 def decompress(self, data):
2209 """Decompress a revlog chunk.
2233 """Decompress a revlog chunk.
2210
2234
2211 The chunk is expected to begin with a header identifying the
2235 The chunk is expected to begin with a header identifying the
2212 format type so it can be routed to an appropriate decompressor.
2236 format type so it can be routed to an appropriate decompressor.
2213 """
2237 """
2214 if not data:
2238 if not data:
2215 return data
2239 return data
2216
2240
2217 # Revlogs are read much more frequently than they are written and many
2241 # Revlogs are read much more frequently than they are written and many
2218 # chunks only take microseconds to decompress, so performance is
2242 # chunks only take microseconds to decompress, so performance is
2219 # important here.
2243 # important here.
2220 #
2244 #
2221 # We can make a few assumptions about revlogs:
2245 # We can make a few assumptions about revlogs:
2222 #
2246 #
2223 # 1) the majority of chunks will be compressed (as opposed to inline
2247 # 1) the majority of chunks will be compressed (as opposed to inline
2224 # raw data).
2248 # raw data).
2225 # 2) decompressing *any* data will likely by at least 10x slower than
2249 # 2) decompressing *any* data will likely by at least 10x slower than
2226 # returning raw inline data.
2250 # returning raw inline data.
2227 # 3) we want to prioritize common and officially supported compression
2251 # 3) we want to prioritize common and officially supported compression
2228 # engines
2252 # engines
2229 #
2253 #
2230 # It follows that we want to optimize for "decompress compressed data
2254 # It follows that we want to optimize for "decompress compressed data
2231 # when encoded with common and officially supported compression engines"
2255 # when encoded with common and officially supported compression engines"
2232 # case over "raw data" and "data encoded by less common or non-official
2256 # case over "raw data" and "data encoded by less common or non-official
2233 # compression engines." That is why we have the inline lookup first
2257 # compression engines." That is why we have the inline lookup first
2234 # followed by the compengines lookup.
2258 # followed by the compengines lookup.
2235 #
2259 #
2236 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2260 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2237 # compressed chunks. And this matters for changelog and manifest reads.
2261 # compressed chunks. And this matters for changelog and manifest reads.
2238 t = data[0:1]
2262 t = data[0:1]
2239
2263
2240 if t == b'x':
2264 if t == b'x':
2241 try:
2265 try:
2242 return _zlibdecompress(data)
2266 return _zlibdecompress(data)
2243 except zlib.error as e:
2267 except zlib.error as e:
2244 raise error.RevlogError(
2268 raise error.RevlogError(
2245 _(b'revlog decompress error: %s')
2269 _(b'revlog decompress error: %s')
2246 % stringutil.forcebytestr(e)
2270 % stringutil.forcebytestr(e)
2247 )
2271 )
2248 # '\0' is more common than 'u' so it goes first.
2272 # '\0' is more common than 'u' so it goes first.
2249 elif t == b'\0':
2273 elif t == b'\0':
2250 return data
2274 return data
2251 elif t == b'u':
2275 elif t == b'u':
2252 return util.buffer(data, 1)
2276 return util.buffer(data, 1)
2253
2277
2254 try:
2278 try:
2255 compressor = self._decompressors[t]
2279 compressor = self._decompressors[t]
2256 except KeyError:
2280 except KeyError:
2257 try:
2281 try:
2258 engine = util.compengines.forrevlogheader(t)
2282 engine = util.compengines.forrevlogheader(t)
2259 compressor = engine.revlogcompressor(self._compengineopts)
2283 compressor = engine.revlogcompressor(self._compengineopts)
2260 self._decompressors[t] = compressor
2284 self._decompressors[t] = compressor
2261 except KeyError:
2285 except KeyError:
2262 raise error.RevlogError(
2286 raise error.RevlogError(
2263 _(b'unknown compression type %s') % binascii.hexlify(t)
2287 _(b'unknown compression type %s') % binascii.hexlify(t)
2264 )
2288 )
2265
2289
2266 return compressor.decompress(data)
2290 return compressor.decompress(data)
2267
2291
2268 def _addrevision(
2292 def _addrevision(
2269 self,
2293 self,
2270 node,
2294 node,
2271 rawtext,
2295 rawtext,
2272 transaction,
2296 transaction,
2273 link,
2297 link,
2274 p1,
2298 p1,
2275 p2,
2299 p2,
2276 flags,
2300 flags,
2277 cachedelta,
2301 cachedelta,
2278 alwayscache=False,
2302 alwayscache=False,
2279 deltacomputer=None,
2303 deltacomputer=None,
2280 sidedata=None,
2304 sidedata=None,
2281 ):
2305 ):
2282 """internal function to add revisions to the log
2306 """internal function to add revisions to the log
2283
2307
2284 see addrevision for argument descriptions.
2308 see addrevision for argument descriptions.
2285
2309
2286 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2310 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2287
2311
2288 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2312 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2289 be used.
2313 be used.
2290
2314
2291 invariants:
2315 invariants:
2292 - rawtext is optional (can be None); if not set, cachedelta must be set.
2316 - rawtext is optional (can be None); if not set, cachedelta must be set.
2293 if both are set, they must correspond to each other.
2317 if both are set, they must correspond to each other.
2294 """
2318 """
2295 if node == self.nullid:
2319 if node == self.nullid:
2296 raise error.RevlogError(
2320 raise error.RevlogError(
2297 _(b"%s: attempt to add null revision") % self.display_id
2321 _(b"%s: attempt to add null revision") % self.display_id
2298 )
2322 )
2299 if (
2323 if (
2300 node == self.nodeconstants.wdirid
2324 node == self.nodeconstants.wdirid
2301 or node in self.nodeconstants.wdirfilenodeids
2325 or node in self.nodeconstants.wdirfilenodeids
2302 ):
2326 ):
2303 raise error.RevlogError(
2327 raise error.RevlogError(
2304 _(b"%s: attempt to add wdir revision") % self.display_id
2328 _(b"%s: attempt to add wdir revision") % self.display_id
2305 )
2329 )
2306 if self._writinghandles is None:
2330 if self._writinghandles is None:
2307 msg = b'adding revision outside `revlog._writing` context'
2331 msg = b'adding revision outside `revlog._writing` context'
2308 raise error.ProgrammingError(msg)
2332 raise error.ProgrammingError(msg)
2309
2333
2310 if self._inline:
2334 if self._inline:
2311 fh = self._writinghandles[0]
2335 fh = self._writinghandles[0]
2312 else:
2336 else:
2313 fh = self._writinghandles[1]
2337 fh = self._writinghandles[1]
2314
2338
2315 btext = [rawtext]
2339 btext = [rawtext]
2316
2340
2317 curr = len(self)
2341 curr = len(self)
2318 prev = curr - 1
2342 prev = curr - 1
2319
2343
2320 offset = self._get_data_offset(prev)
2344 offset = self._get_data_offset(prev)
2321
2345
2322 if self._concurrencychecker:
2346 if self._concurrencychecker:
2323 ifh, dfh = self._writinghandles
2347 ifh, dfh = self._writinghandles
2324 if self._inline:
2348 if self._inline:
2325 # offset is "as if" it were in the .d file, so we need to add on
2349 # offset is "as if" it were in the .d file, so we need to add on
2326 # the size of the entry metadata.
2350 # the size of the entry metadata.
2327 self._concurrencychecker(
2351 self._concurrencychecker(
2328 ifh, self._indexfile, offset + curr * self.index.entry_size
2352 ifh, self._indexfile, offset + curr * self.index.entry_size
2329 )
2353 )
2330 else:
2354 else:
2331 # Entries in the .i are a consistent size.
2355 # Entries in the .i are a consistent size.
2332 self._concurrencychecker(
2356 self._concurrencychecker(
2333 ifh, self._indexfile, curr * self.index.entry_size
2357 ifh, self._indexfile, curr * self.index.entry_size
2334 )
2358 )
2335 self._concurrencychecker(dfh, self._datafile, offset)
2359 self._concurrencychecker(dfh, self._datafile, offset)
2336
2360
2337 p1r, p2r = self.rev(p1), self.rev(p2)
2361 p1r, p2r = self.rev(p1), self.rev(p2)
2338
2362
2339 # full versions are inserted when the needed deltas
2363 # full versions are inserted when the needed deltas
2340 # become comparable to the uncompressed text
2364 # become comparable to the uncompressed text
2341 if rawtext is None:
2365 if rawtext is None:
2342 # need rawtext size, before changed by flag processors, which is
2366 # need rawtext size, before changed by flag processors, which is
2343 # the non-raw size. use revlog explicitly to avoid filelog's extra
2367 # the non-raw size. use revlog explicitly to avoid filelog's extra
2344 # logic that might remove metadata size.
2368 # logic that might remove metadata size.
2345 textlen = mdiff.patchedsize(
2369 textlen = mdiff.patchedsize(
2346 revlog.size(self, cachedelta[0]), cachedelta[1]
2370 revlog.size(self, cachedelta[0]), cachedelta[1]
2347 )
2371 )
2348 else:
2372 else:
2349 textlen = len(rawtext)
2373 textlen = len(rawtext)
2350
2374
2351 if deltacomputer is None:
2375 if deltacomputer is None:
2352 deltacomputer = deltautil.deltacomputer(self)
2376 deltacomputer = deltautil.deltacomputer(self)
2353
2377
2354 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2378 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2355
2379
2356 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2380 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2357
2381
2358 if sidedata and self.hassidedata:
2382 if sidedata and self.hassidedata:
2359 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2383 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2360 sidedata_offset = offset + deltainfo.deltalen
2384 sidedata_offset = offset + deltainfo.deltalen
2361 else:
2385 else:
2362 serialized_sidedata = b""
2386 serialized_sidedata = b""
2363 # Don't store the offset if the sidedata is empty, that way
2387 # Don't store the offset if the sidedata is empty, that way
2364 # we can easily detect empty sidedata and they will be no different
2388 # we can easily detect empty sidedata and they will be no different
2365 # than ones we manually add.
2389 # than ones we manually add.
2366 sidedata_offset = 0
2390 sidedata_offset = 0
2367
2391
2368 e = (
2392 e = (
2369 offset_type(offset, flags),
2393 offset_type(offset, flags),
2370 deltainfo.deltalen,
2394 deltainfo.deltalen,
2371 textlen,
2395 textlen,
2372 deltainfo.base,
2396 deltainfo.base,
2373 link,
2397 link,
2374 p1r,
2398 p1r,
2375 p2r,
2399 p2r,
2376 node,
2400 node,
2377 sidedata_offset,
2401 sidedata_offset,
2378 len(serialized_sidedata),
2402 len(serialized_sidedata),
2379 )
2403 )
2380
2404
2381 self.index.append(e)
2405 self.index.append(e)
2382 entry = self.index.entry_binary(curr)
2406 entry = self.index.entry_binary(curr)
2383 if curr == 0 and self._docket is None:
2407 if curr == 0 and self._docket is None:
2384 header = self._format_flags | self._format_version
2408 header = self._format_flags | self._format_version
2385 header = self.index.pack_header(header)
2409 header = self.index.pack_header(header)
2386 entry = header + entry
2410 entry = header + entry
2387 self._writeentry(
2411 self._writeentry(
2388 transaction,
2412 transaction,
2389 entry,
2413 entry,
2390 deltainfo.data,
2414 deltainfo.data,
2391 link,
2415 link,
2392 offset,
2416 offset,
2393 serialized_sidedata,
2417 serialized_sidedata,
2394 )
2418 )
2395
2419
2396 rawtext = btext[0]
2420 rawtext = btext[0]
2397
2421
2398 if alwayscache and rawtext is None:
2422 if alwayscache and rawtext is None:
2399 rawtext = deltacomputer.buildtext(revinfo, fh)
2423 rawtext = deltacomputer.buildtext(revinfo, fh)
2400
2424
2401 if type(rawtext) == bytes: # only accept immutable objects
2425 if type(rawtext) == bytes: # only accept immutable objects
2402 self._revisioncache = (node, curr, rawtext)
2426 self._revisioncache = (node, curr, rawtext)
2403 self._chainbasecache[curr] = deltainfo.chainbase
2427 self._chainbasecache[curr] = deltainfo.chainbase
2404 return curr
2428 return curr
2405
2429
2406 def _get_data_offset(self, prev):
2430 def _get_data_offset(self, prev):
2407 """Returns the current offset in the (in-transaction) data file.
2431 """Returns the current offset in the (in-transaction) data file.
2408 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2432 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2409 file to store that information: since sidedata can be rewritten to the
2433 file to store that information: since sidedata can be rewritten to the
2410 end of the data file within a transaction, you can have cases where, for
2434 end of the data file within a transaction, you can have cases where, for
2411 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2435 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2412 to `n - 1`'s sidedata being written after `n`'s data.
2436 to `n - 1`'s sidedata being written after `n`'s data.
2413
2437
2414 TODO cache this in a docket file before getting out of experimental."""
2438 TODO cache this in a docket file before getting out of experimental."""
2415 if self._format_version != REVLOGV2:
2439 if self._format_version != REVLOGV2:
2416 return self.end(prev)
2440 return self.end(prev)
2417
2441
2418 offset = 0
2442 offset = 0
2419 for rev, entry in enumerate(self.index):
2443 for rev, entry in enumerate(self.index):
2420 sidedata_end = entry[8] + entry[9]
2444 sidedata_end = entry[8] + entry[9]
2421 # Sidedata for a previous rev has potentially been written after
2445 # Sidedata for a previous rev has potentially been written after
2422 # this rev's end, so take the max.
2446 # this rev's end, so take the max.
2423 offset = max(self.end(rev), offset, sidedata_end)
2447 offset = max(self.end(rev), offset, sidedata_end)
2424 return offset
2448 return offset
2425
2449
2426 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2450 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2427 # Files opened in a+ mode have inconsistent behavior on various
2451 # Files opened in a+ mode have inconsistent behavior on various
2428 # platforms. Windows requires that a file positioning call be made
2452 # platforms. Windows requires that a file positioning call be made
2429 # when the file handle transitions between reads and writes. See
2453 # when the file handle transitions between reads and writes. See
2430 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2454 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2431 # platforms, Python or the platform itself can be buggy. Some versions
2455 # platforms, Python or the platform itself can be buggy. Some versions
2432 # of Solaris have been observed to not append at the end of the file
2456 # of Solaris have been observed to not append at the end of the file
2433 # if the file was seeked to before the end. See issue4943 for more.
2457 # if the file was seeked to before the end. See issue4943 for more.
2434 #
2458 #
2435 # We work around this issue by inserting a seek() before writing.
2459 # We work around this issue by inserting a seek() before writing.
2436 # Note: This is likely not necessary on Python 3. However, because
2460 # Note: This is likely not necessary on Python 3. However, because
2437 # the file handle is reused for reads and may be seeked there, we need
2461 # the file handle is reused for reads and may be seeked there, we need
2438 # to be careful before changing this.
2462 # to be careful before changing this.
2439 if self._writinghandles is None:
2463 if self._writinghandles is None:
2440 msg = b'adding revision outside `revlog._writing` context'
2464 msg = b'adding revision outside `revlog._writing` context'
2441 raise error.ProgrammingError(msg)
2465 raise error.ProgrammingError(msg)
2442 ifh, dfh = self._writinghandles
2466 ifh, dfh = self._writinghandles
2467 if self._docket is None:
2443 ifh.seek(0, os.SEEK_END)
2468 ifh.seek(0, os.SEEK_END)
2469 else:
2470 ifh.seek(self._docket.index_end, os.SEEK_SET)
2444 if dfh:
2471 if dfh:
2445 dfh.seek(0, os.SEEK_END)
2472 dfh.seek(0, os.SEEK_END)
2446
2473
2447 curr = len(self) - 1
2474 curr = len(self) - 1
2448 if not self._inline:
2475 if not self._inline:
2449 transaction.add(self._datafile, offset)
2476 transaction.add(self._datafile, offset)
2450 transaction.add(self._indexfile, curr * len(entry))
2477 transaction.add(self._indexfile, curr * len(entry))
2451 if data[0]:
2478 if data[0]:
2452 dfh.write(data[0])
2479 dfh.write(data[0])
2453 dfh.write(data[1])
2480 dfh.write(data[1])
2454 if sidedata:
2481 if sidedata:
2455 dfh.write(sidedata)
2482 dfh.write(sidedata)
2456 ifh.write(entry)
2483 ifh.write(entry)
2457 else:
2484 else:
2458 offset += curr * self.index.entry_size
2485 offset += curr * self.index.entry_size
2459 transaction.add(self._indexfile, offset)
2486 transaction.add(self._indexfile, offset)
2460 ifh.write(entry)
2487 ifh.write(entry)
2461 ifh.write(data[0])
2488 ifh.write(data[0])
2462 ifh.write(data[1])
2489 ifh.write(data[1])
2463 if sidedata:
2490 if sidedata:
2464 ifh.write(sidedata)
2491 ifh.write(sidedata)
2465 self._enforceinlinesize(transaction)
2492 self._enforceinlinesize(transaction)
2493 if self._docket is not None:
2494 self._docket.index_end = self._writinghandles[0].tell()
2495
2466 nodemaputil.setup_persistent_nodemap(transaction, self)
2496 nodemaputil.setup_persistent_nodemap(transaction, self)
2467
2497
2468 def addgroup(
2498 def addgroup(
2469 self,
2499 self,
2470 deltas,
2500 deltas,
2471 linkmapper,
2501 linkmapper,
2472 transaction,
2502 transaction,
2473 alwayscache=False,
2503 alwayscache=False,
2474 addrevisioncb=None,
2504 addrevisioncb=None,
2475 duplicaterevisioncb=None,
2505 duplicaterevisioncb=None,
2476 ):
2506 ):
2477 """
2507 """
2478 add a delta group
2508 add a delta group
2479
2509
2480 given a set of deltas, add them to the revision log. the
2510 given a set of deltas, add them to the revision log. the
2481 first delta is against its parent, which should be in our
2511 first delta is against its parent, which should be in our
2482 log, the rest are against the previous delta.
2512 log, the rest are against the previous delta.
2483
2513
2484 If ``addrevisioncb`` is defined, it will be called with arguments of
2514 If ``addrevisioncb`` is defined, it will be called with arguments of
2485 this revlog and the node that was added.
2515 this revlog and the node that was added.
2486 """
2516 """
2487
2517
2488 if self._adding_group:
2518 if self._adding_group:
2489 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2519 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2490
2520
2491 self._adding_group = True
2521 self._adding_group = True
2492 empty = True
2522 empty = True
2493 try:
2523 try:
2494 with self._writing(transaction):
2524 with self._writing(transaction):
2495 deltacomputer = deltautil.deltacomputer(self)
2525 deltacomputer = deltautil.deltacomputer(self)
2496 # loop through our set of deltas
2526 # loop through our set of deltas
2497 for data in deltas:
2527 for data in deltas:
2498 (
2528 (
2499 node,
2529 node,
2500 p1,
2530 p1,
2501 p2,
2531 p2,
2502 linknode,
2532 linknode,
2503 deltabase,
2533 deltabase,
2504 delta,
2534 delta,
2505 flags,
2535 flags,
2506 sidedata,
2536 sidedata,
2507 ) = data
2537 ) = data
2508 link = linkmapper(linknode)
2538 link = linkmapper(linknode)
2509 flags = flags or REVIDX_DEFAULT_FLAGS
2539 flags = flags or REVIDX_DEFAULT_FLAGS
2510
2540
2511 rev = self.index.get_rev(node)
2541 rev = self.index.get_rev(node)
2512 if rev is not None:
2542 if rev is not None:
2513 # this can happen if two branches make the same change
2543 # this can happen if two branches make the same change
2514 self._nodeduplicatecallback(transaction, rev)
2544 self._nodeduplicatecallback(transaction, rev)
2515 if duplicaterevisioncb:
2545 if duplicaterevisioncb:
2516 duplicaterevisioncb(self, rev)
2546 duplicaterevisioncb(self, rev)
2517 empty = False
2547 empty = False
2518 continue
2548 continue
2519
2549
2520 for p in (p1, p2):
2550 for p in (p1, p2):
2521 if not self.index.has_node(p):
2551 if not self.index.has_node(p):
2522 raise error.LookupError(
2552 raise error.LookupError(
2523 p, self.radix, _(b'unknown parent')
2553 p, self.radix, _(b'unknown parent')
2524 )
2554 )
2525
2555
2526 if not self.index.has_node(deltabase):
2556 if not self.index.has_node(deltabase):
2527 raise error.LookupError(
2557 raise error.LookupError(
2528 deltabase, self.display_id, _(b'unknown delta base')
2558 deltabase, self.display_id, _(b'unknown delta base')
2529 )
2559 )
2530
2560
2531 baserev = self.rev(deltabase)
2561 baserev = self.rev(deltabase)
2532
2562
2533 if baserev != nullrev and self.iscensored(baserev):
2563 if baserev != nullrev and self.iscensored(baserev):
2534 # if base is censored, delta must be full replacement in a
2564 # if base is censored, delta must be full replacement in a
2535 # single patch operation
2565 # single patch operation
2536 hlen = struct.calcsize(b">lll")
2566 hlen = struct.calcsize(b">lll")
2537 oldlen = self.rawsize(baserev)
2567 oldlen = self.rawsize(baserev)
2538 newlen = len(delta) - hlen
2568 newlen = len(delta) - hlen
2539 if delta[:hlen] != mdiff.replacediffheader(
2569 if delta[:hlen] != mdiff.replacediffheader(
2540 oldlen, newlen
2570 oldlen, newlen
2541 ):
2571 ):
2542 raise error.CensoredBaseError(
2572 raise error.CensoredBaseError(
2543 self.display_id, self.node(baserev)
2573 self.display_id, self.node(baserev)
2544 )
2574 )
2545
2575
2546 if not flags and self._peek_iscensored(baserev, delta):
2576 if not flags and self._peek_iscensored(baserev, delta):
2547 flags |= REVIDX_ISCENSORED
2577 flags |= REVIDX_ISCENSORED
2548
2578
2549 # We assume consumers of addrevisioncb will want to retrieve
2579 # We assume consumers of addrevisioncb will want to retrieve
2550 # the added revision, which will require a call to
2580 # the added revision, which will require a call to
2551 # revision(). revision() will fast path if there is a cache
2581 # revision(). revision() will fast path if there is a cache
2552 # hit. So, we tell _addrevision() to always cache in this case.
2582 # hit. So, we tell _addrevision() to always cache in this case.
2553 # We're only using addgroup() in the context of changegroup
2583 # We're only using addgroup() in the context of changegroup
2554 # generation so the revision data can always be handled as raw
2584 # generation so the revision data can always be handled as raw
2555 # by the flagprocessor.
2585 # by the flagprocessor.
2556 rev = self._addrevision(
2586 rev = self._addrevision(
2557 node,
2587 node,
2558 None,
2588 None,
2559 transaction,
2589 transaction,
2560 link,
2590 link,
2561 p1,
2591 p1,
2562 p2,
2592 p2,
2563 flags,
2593 flags,
2564 (baserev, delta),
2594 (baserev, delta),
2565 alwayscache=alwayscache,
2595 alwayscache=alwayscache,
2566 deltacomputer=deltacomputer,
2596 deltacomputer=deltacomputer,
2567 sidedata=sidedata,
2597 sidedata=sidedata,
2568 )
2598 )
2569
2599
2570 if addrevisioncb:
2600 if addrevisioncb:
2571 addrevisioncb(self, rev)
2601 addrevisioncb(self, rev)
2572 empty = False
2602 empty = False
2573 finally:
2603 finally:
2574 self._adding_group = False
2604 self._adding_group = False
2575 return not empty
2605 return not empty
2576
2606
2577 def iscensored(self, rev):
2607 def iscensored(self, rev):
2578 """Check if a file revision is censored."""
2608 """Check if a file revision is censored."""
2579 if not self._censorable:
2609 if not self._censorable:
2580 return False
2610 return False
2581
2611
2582 return self.flags(rev) & REVIDX_ISCENSORED
2612 return self.flags(rev) & REVIDX_ISCENSORED
2583
2613
2584 def _peek_iscensored(self, baserev, delta):
2614 def _peek_iscensored(self, baserev, delta):
2585 """Quickly check if a delta produces a censored revision."""
2615 """Quickly check if a delta produces a censored revision."""
2586 if not self._censorable:
2616 if not self._censorable:
2587 return False
2617 return False
2588
2618
2589 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2619 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2590
2620
2591 def getstrippoint(self, minlink):
2621 def getstrippoint(self, minlink):
2592 """find the minimum rev that must be stripped to strip the linkrev
2622 """find the minimum rev that must be stripped to strip the linkrev
2593
2623
2594 Returns a tuple containing the minimum rev and a set of all revs that
2624 Returns a tuple containing the minimum rev and a set of all revs that
2595 have linkrevs that will be broken by this strip.
2625 have linkrevs that will be broken by this strip.
2596 """
2626 """
2597 return storageutil.resolvestripinfo(
2627 return storageutil.resolvestripinfo(
2598 minlink,
2628 minlink,
2599 len(self) - 1,
2629 len(self) - 1,
2600 self.headrevs(),
2630 self.headrevs(),
2601 self.linkrev,
2631 self.linkrev,
2602 self.parentrevs,
2632 self.parentrevs,
2603 )
2633 )
2604
2634
2605 def strip(self, minlink, transaction):
2635 def strip(self, minlink, transaction):
2606 """truncate the revlog on the first revision with a linkrev >= minlink
2636 """truncate the revlog on the first revision with a linkrev >= minlink
2607
2637
2608 This function is called when we're stripping revision minlink and
2638 This function is called when we're stripping revision minlink and
2609 its descendants from the repository.
2639 its descendants from the repository.
2610
2640
2611 We have to remove all revisions with linkrev >= minlink, because
2641 We have to remove all revisions with linkrev >= minlink, because
2612 the equivalent changelog revisions will be renumbered after the
2642 the equivalent changelog revisions will be renumbered after the
2613 strip.
2643 strip.
2614
2644
2615 So we truncate the revlog on the first of these revisions, and
2645 So we truncate the revlog on the first of these revisions, and
2616 trust that the caller has saved the revisions that shouldn't be
2646 trust that the caller has saved the revisions that shouldn't be
2617 removed and that it'll re-add them after this truncation.
2647 removed and that it'll re-add them after this truncation.
2618 """
2648 """
2619 if len(self) == 0:
2649 if len(self) == 0:
2620 return
2650 return
2621
2651
2622 rev, _ = self.getstrippoint(minlink)
2652 rev, _ = self.getstrippoint(minlink)
2623 if rev == len(self):
2653 if rev == len(self):
2624 return
2654 return
2625
2655
2626 # first truncate the files on disk
2656 # first truncate the files on disk
2627 end = self.start(rev)
2657 end = self.start(rev)
2628 if not self._inline:
2658 if not self._inline:
2629 transaction.add(self._datafile, end)
2659 transaction.add(self._datafile, end)
2630 end = rev * self.index.entry_size
2660 end = rev * self.index.entry_size
2631 else:
2661 else:
2632 end += rev * self.index.entry_size
2662 end += rev * self.index.entry_size
2633
2663
2634 transaction.add(self._indexfile, end)
2664 transaction.add(self._indexfile, end)
2665 if self._docket is not None:
2666 # XXX we could, leverage the docket while stripping. However it is
2667 # not powerfull enough at the time of this comment
2668 self._docket.index_end = end
2669 self._docket.write(transaction, stripping=True)
2635
2670
2636 # then reset internal state in memory to forget those revisions
2671 # then reset internal state in memory to forget those revisions
2637 self._revisioncache = None
2672 self._revisioncache = None
2638 self._chaininfocache = util.lrucachedict(500)
2673 self._chaininfocache = util.lrucachedict(500)
2639 self._chunkclear()
2674 self._chunkclear()
2640
2675
2641 del self.index[rev:-1]
2676 del self.index[rev:-1]
2642
2677
2643 def checksize(self):
2678 def checksize(self):
2644 """Check size of index and data files
2679 """Check size of index and data files
2645
2680
2646 return a (dd, di) tuple.
2681 return a (dd, di) tuple.
2647 - dd: extra bytes for the "data" file
2682 - dd: extra bytes for the "data" file
2648 - di: extra bytes for the "index" file
2683 - di: extra bytes for the "index" file
2649
2684
2650 A healthy revlog will return (0, 0).
2685 A healthy revlog will return (0, 0).
2651 """
2686 """
2652 expected = 0
2687 expected = 0
2653 if len(self):
2688 if len(self):
2654 expected = max(0, self.end(len(self) - 1))
2689 expected = max(0, self.end(len(self) - 1))
2655
2690
2656 try:
2691 try:
2657 with self._datafp() as f:
2692 with self._datafp() as f:
2658 f.seek(0, io.SEEK_END)
2693 f.seek(0, io.SEEK_END)
2659 actual = f.tell()
2694 actual = f.tell()
2660 dd = actual - expected
2695 dd = actual - expected
2661 except IOError as inst:
2696 except IOError as inst:
2662 if inst.errno != errno.ENOENT:
2697 if inst.errno != errno.ENOENT:
2663 raise
2698 raise
2664 dd = 0
2699 dd = 0
2665
2700
2666 try:
2701 try:
2667 f = self.opener(self._indexfile)
2702 f = self.opener(self._indexfile)
2668 f.seek(0, io.SEEK_END)
2703 f.seek(0, io.SEEK_END)
2669 actual = f.tell()
2704 actual = f.tell()
2670 f.close()
2705 f.close()
2671 s = self.index.entry_size
2706 s = self.index.entry_size
2672 i = max(0, actual // s)
2707 i = max(0, actual // s)
2673 di = actual - (i * s)
2708 di = actual - (i * s)
2674 if self._inline:
2709 if self._inline:
2675 databytes = 0
2710 databytes = 0
2676 for r in self:
2711 for r in self:
2677 databytes += max(0, self.length(r))
2712 databytes += max(0, self.length(r))
2678 dd = 0
2713 dd = 0
2679 di = actual - len(self) * s - databytes
2714 di = actual - len(self) * s - databytes
2680 except IOError as inst:
2715 except IOError as inst:
2681 if inst.errno != errno.ENOENT:
2716 if inst.errno != errno.ENOENT:
2682 raise
2717 raise
2683 di = 0
2718 di = 0
2684
2719
2685 return (dd, di)
2720 return (dd, di)
2686
2721
2687 def files(self):
2722 def files(self):
2688 res = [self._indexfile]
2723 res = [self._indexfile]
2689 if not self._inline:
2724 if not self._inline:
2690 res.append(self._datafile)
2725 res.append(self._datafile)
2691 return res
2726 return res
2692
2727
2693 def emitrevisions(
2728 def emitrevisions(
2694 self,
2729 self,
2695 nodes,
2730 nodes,
2696 nodesorder=None,
2731 nodesorder=None,
2697 revisiondata=False,
2732 revisiondata=False,
2698 assumehaveparentrevisions=False,
2733 assumehaveparentrevisions=False,
2699 deltamode=repository.CG_DELTAMODE_STD,
2734 deltamode=repository.CG_DELTAMODE_STD,
2700 sidedata_helpers=None,
2735 sidedata_helpers=None,
2701 ):
2736 ):
2702 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2737 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2703 raise error.ProgrammingError(
2738 raise error.ProgrammingError(
2704 b'unhandled value for nodesorder: %s' % nodesorder
2739 b'unhandled value for nodesorder: %s' % nodesorder
2705 )
2740 )
2706
2741
2707 if nodesorder is None and not self._generaldelta:
2742 if nodesorder is None and not self._generaldelta:
2708 nodesorder = b'storage'
2743 nodesorder = b'storage'
2709
2744
2710 if (
2745 if (
2711 not self._storedeltachains
2746 not self._storedeltachains
2712 and deltamode != repository.CG_DELTAMODE_PREV
2747 and deltamode != repository.CG_DELTAMODE_PREV
2713 ):
2748 ):
2714 deltamode = repository.CG_DELTAMODE_FULL
2749 deltamode = repository.CG_DELTAMODE_FULL
2715
2750
2716 return storageutil.emitrevisions(
2751 return storageutil.emitrevisions(
2717 self,
2752 self,
2718 nodes,
2753 nodes,
2719 nodesorder,
2754 nodesorder,
2720 revlogrevisiondelta,
2755 revlogrevisiondelta,
2721 deltaparentfn=self.deltaparent,
2756 deltaparentfn=self.deltaparent,
2722 candeltafn=self.candelta,
2757 candeltafn=self.candelta,
2723 rawsizefn=self.rawsize,
2758 rawsizefn=self.rawsize,
2724 revdifffn=self.revdiff,
2759 revdifffn=self.revdiff,
2725 flagsfn=self.flags,
2760 flagsfn=self.flags,
2726 deltamode=deltamode,
2761 deltamode=deltamode,
2727 revisiondata=revisiondata,
2762 revisiondata=revisiondata,
2728 assumehaveparentrevisions=assumehaveparentrevisions,
2763 assumehaveparentrevisions=assumehaveparentrevisions,
2729 sidedata_helpers=sidedata_helpers,
2764 sidedata_helpers=sidedata_helpers,
2730 )
2765 )
2731
2766
2732 DELTAREUSEALWAYS = b'always'
2767 DELTAREUSEALWAYS = b'always'
2733 DELTAREUSESAMEREVS = b'samerevs'
2768 DELTAREUSESAMEREVS = b'samerevs'
2734 DELTAREUSENEVER = b'never'
2769 DELTAREUSENEVER = b'never'
2735
2770
2736 DELTAREUSEFULLADD = b'fulladd'
2771 DELTAREUSEFULLADD = b'fulladd'
2737
2772
2738 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2773 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2739
2774
2740 def clone(
2775 def clone(
2741 self,
2776 self,
2742 tr,
2777 tr,
2743 destrevlog,
2778 destrevlog,
2744 addrevisioncb=None,
2779 addrevisioncb=None,
2745 deltareuse=DELTAREUSESAMEREVS,
2780 deltareuse=DELTAREUSESAMEREVS,
2746 forcedeltabothparents=None,
2781 forcedeltabothparents=None,
2747 sidedata_helpers=None,
2782 sidedata_helpers=None,
2748 ):
2783 ):
2749 """Copy this revlog to another, possibly with format changes.
2784 """Copy this revlog to another, possibly with format changes.
2750
2785
2751 The destination revlog will contain the same revisions and nodes.
2786 The destination revlog will contain the same revisions and nodes.
2752 However, it may not be bit-for-bit identical due to e.g. delta encoding
2787 However, it may not be bit-for-bit identical due to e.g. delta encoding
2753 differences.
2788 differences.
2754
2789
2755 The ``deltareuse`` argument control how deltas from the existing revlog
2790 The ``deltareuse`` argument control how deltas from the existing revlog
2756 are preserved in the destination revlog. The argument can have the
2791 are preserved in the destination revlog. The argument can have the
2757 following values:
2792 following values:
2758
2793
2759 DELTAREUSEALWAYS
2794 DELTAREUSEALWAYS
2760 Deltas will always be reused (if possible), even if the destination
2795 Deltas will always be reused (if possible), even if the destination
2761 revlog would not select the same revisions for the delta. This is the
2796 revlog would not select the same revisions for the delta. This is the
2762 fastest mode of operation.
2797 fastest mode of operation.
2763 DELTAREUSESAMEREVS
2798 DELTAREUSESAMEREVS
2764 Deltas will be reused if the destination revlog would pick the same
2799 Deltas will be reused if the destination revlog would pick the same
2765 revisions for the delta. This mode strikes a balance between speed
2800 revisions for the delta. This mode strikes a balance between speed
2766 and optimization.
2801 and optimization.
2767 DELTAREUSENEVER
2802 DELTAREUSENEVER
2768 Deltas will never be reused. This is the slowest mode of execution.
2803 Deltas will never be reused. This is the slowest mode of execution.
2769 This mode can be used to recompute deltas (e.g. if the diff/delta
2804 This mode can be used to recompute deltas (e.g. if the diff/delta
2770 algorithm changes).
2805 algorithm changes).
2771 DELTAREUSEFULLADD
2806 DELTAREUSEFULLADD
2772 Revision will be re-added as if their were new content. This is
2807 Revision will be re-added as if their were new content. This is
2773 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2808 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2774 eg: large file detection and handling.
2809 eg: large file detection and handling.
2775
2810
2776 Delta computation can be slow, so the choice of delta reuse policy can
2811 Delta computation can be slow, so the choice of delta reuse policy can
2777 significantly affect run time.
2812 significantly affect run time.
2778
2813
2779 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2814 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2780 two extremes. Deltas will be reused if they are appropriate. But if the
2815 two extremes. Deltas will be reused if they are appropriate. But if the
2781 delta could choose a better revision, it will do so. This means if you
2816 delta could choose a better revision, it will do so. This means if you
2782 are converting a non-generaldelta revlog to a generaldelta revlog,
2817 are converting a non-generaldelta revlog to a generaldelta revlog,
2783 deltas will be recomputed if the delta's parent isn't a parent of the
2818 deltas will be recomputed if the delta's parent isn't a parent of the
2784 revision.
2819 revision.
2785
2820
2786 In addition to the delta policy, the ``forcedeltabothparents``
2821 In addition to the delta policy, the ``forcedeltabothparents``
2787 argument controls whether to force compute deltas against both parents
2822 argument controls whether to force compute deltas against both parents
2788 for merges. By default, the current default is used.
2823 for merges. By default, the current default is used.
2789
2824
2790 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2825 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2791 `sidedata_helpers`.
2826 `sidedata_helpers`.
2792 """
2827 """
2793 if deltareuse not in self.DELTAREUSEALL:
2828 if deltareuse not in self.DELTAREUSEALL:
2794 raise ValueError(
2829 raise ValueError(
2795 _(b'value for deltareuse invalid: %s') % deltareuse
2830 _(b'value for deltareuse invalid: %s') % deltareuse
2796 )
2831 )
2797
2832
2798 if len(destrevlog):
2833 if len(destrevlog):
2799 raise ValueError(_(b'destination revlog is not empty'))
2834 raise ValueError(_(b'destination revlog is not empty'))
2800
2835
2801 if getattr(self, 'filteredrevs', None):
2836 if getattr(self, 'filteredrevs', None):
2802 raise ValueError(_(b'source revlog has filtered revisions'))
2837 raise ValueError(_(b'source revlog has filtered revisions'))
2803 if getattr(destrevlog, 'filteredrevs', None):
2838 if getattr(destrevlog, 'filteredrevs', None):
2804 raise ValueError(_(b'destination revlog has filtered revisions'))
2839 raise ValueError(_(b'destination revlog has filtered revisions'))
2805
2840
2806 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2841 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2807 # if possible.
2842 # if possible.
2808 oldlazydelta = destrevlog._lazydelta
2843 oldlazydelta = destrevlog._lazydelta
2809 oldlazydeltabase = destrevlog._lazydeltabase
2844 oldlazydeltabase = destrevlog._lazydeltabase
2810 oldamd = destrevlog._deltabothparents
2845 oldamd = destrevlog._deltabothparents
2811
2846
2812 try:
2847 try:
2813 if deltareuse == self.DELTAREUSEALWAYS:
2848 if deltareuse == self.DELTAREUSEALWAYS:
2814 destrevlog._lazydeltabase = True
2849 destrevlog._lazydeltabase = True
2815 destrevlog._lazydelta = True
2850 destrevlog._lazydelta = True
2816 elif deltareuse == self.DELTAREUSESAMEREVS:
2851 elif deltareuse == self.DELTAREUSESAMEREVS:
2817 destrevlog._lazydeltabase = False
2852 destrevlog._lazydeltabase = False
2818 destrevlog._lazydelta = True
2853 destrevlog._lazydelta = True
2819 elif deltareuse == self.DELTAREUSENEVER:
2854 elif deltareuse == self.DELTAREUSENEVER:
2820 destrevlog._lazydeltabase = False
2855 destrevlog._lazydeltabase = False
2821 destrevlog._lazydelta = False
2856 destrevlog._lazydelta = False
2822
2857
2823 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2858 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2824
2859
2825 self._clone(
2860 self._clone(
2826 tr,
2861 tr,
2827 destrevlog,
2862 destrevlog,
2828 addrevisioncb,
2863 addrevisioncb,
2829 deltareuse,
2864 deltareuse,
2830 forcedeltabothparents,
2865 forcedeltabothparents,
2831 sidedata_helpers,
2866 sidedata_helpers,
2832 )
2867 )
2833
2868
2834 finally:
2869 finally:
2835 destrevlog._lazydelta = oldlazydelta
2870 destrevlog._lazydelta = oldlazydelta
2836 destrevlog._lazydeltabase = oldlazydeltabase
2871 destrevlog._lazydeltabase = oldlazydeltabase
2837 destrevlog._deltabothparents = oldamd
2872 destrevlog._deltabothparents = oldamd
2838
2873
2839 def _clone(
2874 def _clone(
2840 self,
2875 self,
2841 tr,
2876 tr,
2842 destrevlog,
2877 destrevlog,
2843 addrevisioncb,
2878 addrevisioncb,
2844 deltareuse,
2879 deltareuse,
2845 forcedeltabothparents,
2880 forcedeltabothparents,
2846 sidedata_helpers,
2881 sidedata_helpers,
2847 ):
2882 ):
2848 """perform the core duty of `revlog.clone` after parameter processing"""
2883 """perform the core duty of `revlog.clone` after parameter processing"""
2849 deltacomputer = deltautil.deltacomputer(destrevlog)
2884 deltacomputer = deltautil.deltacomputer(destrevlog)
2850 index = self.index
2885 index = self.index
2851 for rev in self:
2886 for rev in self:
2852 entry = index[rev]
2887 entry = index[rev]
2853
2888
2854 # Some classes override linkrev to take filtered revs into
2889 # Some classes override linkrev to take filtered revs into
2855 # account. Use raw entry from index.
2890 # account. Use raw entry from index.
2856 flags = entry[0] & 0xFFFF
2891 flags = entry[0] & 0xFFFF
2857 linkrev = entry[4]
2892 linkrev = entry[4]
2858 p1 = index[entry[5]][7]
2893 p1 = index[entry[5]][7]
2859 p2 = index[entry[6]][7]
2894 p2 = index[entry[6]][7]
2860 node = entry[7]
2895 node = entry[7]
2861
2896
2862 # (Possibly) reuse the delta from the revlog if allowed and
2897 # (Possibly) reuse the delta from the revlog if allowed and
2863 # the revlog chunk is a delta.
2898 # the revlog chunk is a delta.
2864 cachedelta = None
2899 cachedelta = None
2865 rawtext = None
2900 rawtext = None
2866 if deltareuse == self.DELTAREUSEFULLADD:
2901 if deltareuse == self.DELTAREUSEFULLADD:
2867 text, sidedata = self._revisiondata(rev)
2902 text, sidedata = self._revisiondata(rev)
2868
2903
2869 if sidedata_helpers is not None:
2904 if sidedata_helpers is not None:
2870 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2905 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2871 self, sidedata_helpers, sidedata, rev
2906 self, sidedata_helpers, sidedata, rev
2872 )
2907 )
2873 flags = flags | new_flags[0] & ~new_flags[1]
2908 flags = flags | new_flags[0] & ~new_flags[1]
2874
2909
2875 destrevlog.addrevision(
2910 destrevlog.addrevision(
2876 text,
2911 text,
2877 tr,
2912 tr,
2878 linkrev,
2913 linkrev,
2879 p1,
2914 p1,
2880 p2,
2915 p2,
2881 cachedelta=cachedelta,
2916 cachedelta=cachedelta,
2882 node=node,
2917 node=node,
2883 flags=flags,
2918 flags=flags,
2884 deltacomputer=deltacomputer,
2919 deltacomputer=deltacomputer,
2885 sidedata=sidedata,
2920 sidedata=sidedata,
2886 )
2921 )
2887 else:
2922 else:
2888 if destrevlog._lazydelta:
2923 if destrevlog._lazydelta:
2889 dp = self.deltaparent(rev)
2924 dp = self.deltaparent(rev)
2890 if dp != nullrev:
2925 if dp != nullrev:
2891 cachedelta = (dp, bytes(self._chunk(rev)))
2926 cachedelta = (dp, bytes(self._chunk(rev)))
2892
2927
2893 sidedata = None
2928 sidedata = None
2894 if not cachedelta:
2929 if not cachedelta:
2895 rawtext, sidedata = self._revisiondata(rev)
2930 rawtext, sidedata = self._revisiondata(rev)
2896 if sidedata is None:
2931 if sidedata is None:
2897 sidedata = self.sidedata(rev)
2932 sidedata = self.sidedata(rev)
2898
2933
2899 if sidedata_helpers is not None:
2934 if sidedata_helpers is not None:
2900 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2935 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2901 self, sidedata_helpers, sidedata, rev
2936 self, sidedata_helpers, sidedata, rev
2902 )
2937 )
2903 flags = flags | new_flags[0] & ~new_flags[1]
2938 flags = flags | new_flags[0] & ~new_flags[1]
2904
2939
2905 with destrevlog._writing(tr):
2940 with destrevlog._writing(tr):
2906 destrevlog._addrevision(
2941 destrevlog._addrevision(
2907 node,
2942 node,
2908 rawtext,
2943 rawtext,
2909 tr,
2944 tr,
2910 linkrev,
2945 linkrev,
2911 p1,
2946 p1,
2912 p2,
2947 p2,
2913 flags,
2948 flags,
2914 cachedelta,
2949 cachedelta,
2915 deltacomputer=deltacomputer,
2950 deltacomputer=deltacomputer,
2916 sidedata=sidedata,
2951 sidedata=sidedata,
2917 )
2952 )
2918
2953
2919 if addrevisioncb:
2954 if addrevisioncb:
2920 addrevisioncb(self, rev, node)
2955 addrevisioncb(self, rev, node)
2921
2956
2922 def censorrevision(self, tr, censornode, tombstone=b''):
2957 def censorrevision(self, tr, censornode, tombstone=b''):
2923 if self._format_version == REVLOGV0:
2958 if self._format_version == REVLOGV0:
2924 raise error.RevlogError(
2959 raise error.RevlogError(
2925 _(b'cannot censor with version %d revlogs')
2960 _(b'cannot censor with version %d revlogs')
2926 % self._format_version
2961 % self._format_version
2927 )
2962 )
2928
2963
2929 censorrev = self.rev(censornode)
2964 censorrev = self.rev(censornode)
2930 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2965 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2931
2966
2932 if len(tombstone) > self.rawsize(censorrev):
2967 if len(tombstone) > self.rawsize(censorrev):
2933 raise error.Abort(
2968 raise error.Abort(
2934 _(b'censor tombstone must be no longer than censored data')
2969 _(b'censor tombstone must be no longer than censored data')
2935 )
2970 )
2936
2971
2937 # Rewriting the revlog in place is hard. Our strategy for censoring is
2972 # Rewriting the revlog in place is hard. Our strategy for censoring is
2938 # to create a new revlog, copy all revisions to it, then replace the
2973 # to create a new revlog, copy all revisions to it, then replace the
2939 # revlogs on transaction close.
2974 # revlogs on transaction close.
2940 #
2975 #
2941 # This is a bit dangerous. We could easily have a mismatch of state.
2976 # This is a bit dangerous. We could easily have a mismatch of state.
2942 newrl = revlog(
2977 newrl = revlog(
2943 self.opener,
2978 self.opener,
2944 target=self.target,
2979 target=self.target,
2945 radix=self.radix,
2980 radix=self.radix,
2946 postfix=b'tmpcensored',
2981 postfix=b'tmpcensored',
2947 censorable=True,
2982 censorable=True,
2948 )
2983 )
2949 newrl._format_version = self._format_version
2984 newrl._format_version = self._format_version
2950 newrl._format_flags = self._format_flags
2985 newrl._format_flags = self._format_flags
2951 newrl._generaldelta = self._generaldelta
2986 newrl._generaldelta = self._generaldelta
2952 newrl._parse_index = self._parse_index
2987 newrl._parse_index = self._parse_index
2953
2988
2954 for rev in self.revs():
2989 for rev in self.revs():
2955 node = self.node(rev)
2990 node = self.node(rev)
2956 p1, p2 = self.parents(node)
2991 p1, p2 = self.parents(node)
2957
2992
2958 if rev == censorrev:
2993 if rev == censorrev:
2959 newrl.addrawrevision(
2994 newrl.addrawrevision(
2960 tombstone,
2995 tombstone,
2961 tr,
2996 tr,
2962 self.linkrev(censorrev),
2997 self.linkrev(censorrev),
2963 p1,
2998 p1,
2964 p2,
2999 p2,
2965 censornode,
3000 censornode,
2966 REVIDX_ISCENSORED,
3001 REVIDX_ISCENSORED,
2967 )
3002 )
2968
3003
2969 if newrl.deltaparent(rev) != nullrev:
3004 if newrl.deltaparent(rev) != nullrev:
2970 raise error.Abort(
3005 raise error.Abort(
2971 _(
3006 _(
2972 b'censored revision stored as delta; '
3007 b'censored revision stored as delta; '
2973 b'cannot censor'
3008 b'cannot censor'
2974 ),
3009 ),
2975 hint=_(
3010 hint=_(
2976 b'censoring of revlogs is not '
3011 b'censoring of revlogs is not '
2977 b'fully implemented; please report '
3012 b'fully implemented; please report '
2978 b'this bug'
3013 b'this bug'
2979 ),
3014 ),
2980 )
3015 )
2981 continue
3016 continue
2982
3017
2983 if self.iscensored(rev):
3018 if self.iscensored(rev):
2984 if self.deltaparent(rev) != nullrev:
3019 if self.deltaparent(rev) != nullrev:
2985 raise error.Abort(
3020 raise error.Abort(
2986 _(
3021 _(
2987 b'cannot censor due to censored '
3022 b'cannot censor due to censored '
2988 b'revision having delta stored'
3023 b'revision having delta stored'
2989 )
3024 )
2990 )
3025 )
2991 rawtext = self._chunk(rev)
3026 rawtext = self._chunk(rev)
2992 else:
3027 else:
2993 rawtext = self.rawdata(rev)
3028 rawtext = self.rawdata(rev)
2994
3029
2995 newrl.addrawrevision(
3030 newrl.addrawrevision(
2996 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3031 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2997 )
3032 )
2998
3033
2999 tr.addbackup(self._indexfile, location=b'store')
3034 tr.addbackup(self._indexfile, location=b'store')
3000 if not self._inline:
3035 if not self._inline:
3001 tr.addbackup(self._datafile, location=b'store')
3036 tr.addbackup(self._datafile, location=b'store')
3002
3037
3003 self.opener.rename(newrl._indexfile, self._indexfile)
3038 self.opener.rename(newrl._indexfile, self._indexfile)
3004 if not self._inline:
3039 if not self._inline:
3005 self.opener.rename(newrl._datafile, self._datafile)
3040 self.opener.rename(newrl._datafile, self._datafile)
3006
3041
3007 self.clearcaches()
3042 self.clearcaches()
3008 self._loadindex()
3043 self._loadindex()
3009
3044
3010 def verifyintegrity(self, state):
3045 def verifyintegrity(self, state):
3011 """Verifies the integrity of the revlog.
3046 """Verifies the integrity of the revlog.
3012
3047
3013 Yields ``revlogproblem`` instances describing problems that are
3048 Yields ``revlogproblem`` instances describing problems that are
3014 found.
3049 found.
3015 """
3050 """
3016 dd, di = self.checksize()
3051 dd, di = self.checksize()
3017 if dd:
3052 if dd:
3018 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3053 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3019 if di:
3054 if di:
3020 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3055 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3021
3056
3022 version = self._format_version
3057 version = self._format_version
3023
3058
3024 # The verifier tells us what version revlog we should be.
3059 # The verifier tells us what version revlog we should be.
3025 if version != state[b'expectedversion']:
3060 if version != state[b'expectedversion']:
3026 yield revlogproblem(
3061 yield revlogproblem(
3027 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3062 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3028 % (self.display_id, version, state[b'expectedversion'])
3063 % (self.display_id, version, state[b'expectedversion'])
3029 )
3064 )
3030
3065
3031 state[b'skipread'] = set()
3066 state[b'skipread'] = set()
3032 state[b'safe_renamed'] = set()
3067 state[b'safe_renamed'] = set()
3033
3068
3034 for rev in self:
3069 for rev in self:
3035 node = self.node(rev)
3070 node = self.node(rev)
3036
3071
3037 # Verify contents. 4 cases to care about:
3072 # Verify contents. 4 cases to care about:
3038 #
3073 #
3039 # common: the most common case
3074 # common: the most common case
3040 # rename: with a rename
3075 # rename: with a rename
3041 # meta: file content starts with b'\1\n', the metadata
3076 # meta: file content starts with b'\1\n', the metadata
3042 # header defined in filelog.py, but without a rename
3077 # header defined in filelog.py, but without a rename
3043 # ext: content stored externally
3078 # ext: content stored externally
3044 #
3079 #
3045 # More formally, their differences are shown below:
3080 # More formally, their differences are shown below:
3046 #
3081 #
3047 # | common | rename | meta | ext
3082 # | common | rename | meta | ext
3048 # -------------------------------------------------------
3083 # -------------------------------------------------------
3049 # flags() | 0 | 0 | 0 | not 0
3084 # flags() | 0 | 0 | 0 | not 0
3050 # renamed() | False | True | False | ?
3085 # renamed() | False | True | False | ?
3051 # rawtext[0:2]=='\1\n'| False | True | True | ?
3086 # rawtext[0:2]=='\1\n'| False | True | True | ?
3052 #
3087 #
3053 # "rawtext" means the raw text stored in revlog data, which
3088 # "rawtext" means the raw text stored in revlog data, which
3054 # could be retrieved by "rawdata(rev)". "text"
3089 # could be retrieved by "rawdata(rev)". "text"
3055 # mentioned below is "revision(rev)".
3090 # mentioned below is "revision(rev)".
3056 #
3091 #
3057 # There are 3 different lengths stored physically:
3092 # There are 3 different lengths stored physically:
3058 # 1. L1: rawsize, stored in revlog index
3093 # 1. L1: rawsize, stored in revlog index
3059 # 2. L2: len(rawtext), stored in revlog data
3094 # 2. L2: len(rawtext), stored in revlog data
3060 # 3. L3: len(text), stored in revlog data if flags==0, or
3095 # 3. L3: len(text), stored in revlog data if flags==0, or
3061 # possibly somewhere else if flags!=0
3096 # possibly somewhere else if flags!=0
3062 #
3097 #
3063 # L1 should be equal to L2. L3 could be different from them.
3098 # L1 should be equal to L2. L3 could be different from them.
3064 # "text" may or may not affect commit hash depending on flag
3099 # "text" may or may not affect commit hash depending on flag
3065 # processors (see flagutil.addflagprocessor).
3100 # processors (see flagutil.addflagprocessor).
3066 #
3101 #
3067 # | common | rename | meta | ext
3102 # | common | rename | meta | ext
3068 # -------------------------------------------------
3103 # -------------------------------------------------
3069 # rawsize() | L1 | L1 | L1 | L1
3104 # rawsize() | L1 | L1 | L1 | L1
3070 # size() | L1 | L2-LM | L1(*) | L1 (?)
3105 # size() | L1 | L2-LM | L1(*) | L1 (?)
3071 # len(rawtext) | L2 | L2 | L2 | L2
3106 # len(rawtext) | L2 | L2 | L2 | L2
3072 # len(text) | L2 | L2 | L2 | L3
3107 # len(text) | L2 | L2 | L2 | L3
3073 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3108 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3074 #
3109 #
3075 # LM: length of metadata, depending on rawtext
3110 # LM: length of metadata, depending on rawtext
3076 # (*): not ideal, see comment in filelog.size
3111 # (*): not ideal, see comment in filelog.size
3077 # (?): could be "- len(meta)" if the resolved content has
3112 # (?): could be "- len(meta)" if the resolved content has
3078 # rename metadata
3113 # rename metadata
3079 #
3114 #
3080 # Checks needed to be done:
3115 # Checks needed to be done:
3081 # 1. length check: L1 == L2, in all cases.
3116 # 1. length check: L1 == L2, in all cases.
3082 # 2. hash check: depending on flag processor, we may need to
3117 # 2. hash check: depending on flag processor, we may need to
3083 # use either "text" (external), or "rawtext" (in revlog).
3118 # use either "text" (external), or "rawtext" (in revlog).
3084
3119
3085 try:
3120 try:
3086 skipflags = state.get(b'skipflags', 0)
3121 skipflags = state.get(b'skipflags', 0)
3087 if skipflags:
3122 if skipflags:
3088 skipflags &= self.flags(rev)
3123 skipflags &= self.flags(rev)
3089
3124
3090 _verify_revision(self, skipflags, state, node)
3125 _verify_revision(self, skipflags, state, node)
3091
3126
3092 l1 = self.rawsize(rev)
3127 l1 = self.rawsize(rev)
3093 l2 = len(self.rawdata(node))
3128 l2 = len(self.rawdata(node))
3094
3129
3095 if l1 != l2:
3130 if l1 != l2:
3096 yield revlogproblem(
3131 yield revlogproblem(
3097 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3132 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3098 node=node,
3133 node=node,
3099 )
3134 )
3100
3135
3101 except error.CensoredNodeError:
3136 except error.CensoredNodeError:
3102 if state[b'erroroncensored']:
3137 if state[b'erroroncensored']:
3103 yield revlogproblem(
3138 yield revlogproblem(
3104 error=_(b'censored file data'), node=node
3139 error=_(b'censored file data'), node=node
3105 )
3140 )
3106 state[b'skipread'].add(node)
3141 state[b'skipread'].add(node)
3107 except Exception as e:
3142 except Exception as e:
3108 yield revlogproblem(
3143 yield revlogproblem(
3109 error=_(b'unpacking %s: %s')
3144 error=_(b'unpacking %s: %s')
3110 % (short(node), stringutil.forcebytestr(e)),
3145 % (short(node), stringutil.forcebytestr(e)),
3111 node=node,
3146 node=node,
3112 )
3147 )
3113 state[b'skipread'].add(node)
3148 state[b'skipread'].add(node)
3114
3149
3115 def storageinfo(
3150 def storageinfo(
3116 self,
3151 self,
3117 exclusivefiles=False,
3152 exclusivefiles=False,
3118 sharedfiles=False,
3153 sharedfiles=False,
3119 revisionscount=False,
3154 revisionscount=False,
3120 trackedsize=False,
3155 trackedsize=False,
3121 storedsize=False,
3156 storedsize=False,
3122 ):
3157 ):
3123 d = {}
3158 d = {}
3124
3159
3125 if exclusivefiles:
3160 if exclusivefiles:
3126 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3161 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3127 if not self._inline:
3162 if not self._inline:
3128 d[b'exclusivefiles'].append((self.opener, self._datafile))
3163 d[b'exclusivefiles'].append((self.opener, self._datafile))
3129
3164
3130 if sharedfiles:
3165 if sharedfiles:
3131 d[b'sharedfiles'] = []
3166 d[b'sharedfiles'] = []
3132
3167
3133 if revisionscount:
3168 if revisionscount:
3134 d[b'revisionscount'] = len(self)
3169 d[b'revisionscount'] = len(self)
3135
3170
3136 if trackedsize:
3171 if trackedsize:
3137 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3172 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3138
3173
3139 if storedsize:
3174 if storedsize:
3140 d[b'storedsize'] = sum(
3175 d[b'storedsize'] = sum(
3141 self.opener.stat(path).st_size for path in self.files()
3176 self.opener.stat(path).st_size for path in self.files()
3142 )
3177 )
3143
3178
3144 return d
3179 return d
3145
3180
3146 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3181 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3147 if not self.hassidedata:
3182 if not self.hassidedata:
3148 return
3183 return
3149 # revlog formats with sidedata support does not support inline
3184 # revlog formats with sidedata support does not support inline
3150 assert not self._inline
3185 assert not self._inline
3151 if not helpers[1] and not helpers[2]:
3186 if not helpers[1] and not helpers[2]:
3152 # Nothing to generate or remove
3187 # Nothing to generate or remove
3153 return
3188 return
3154
3189
3155 # changelog implement some "delayed" writing mechanism that assume that
3190 # changelog implement some "delayed" writing mechanism that assume that
3156 # all index data is writen in append mode and is therefor incompatible
3191 # all index data is writen in append mode and is therefor incompatible
3157 # with the seeked write done in this method. The use of such "delayed"
3192 # with the seeked write done in this method. The use of such "delayed"
3158 # writing will soon be removed for revlog version that support side
3193 # writing will soon be removed for revlog version that support side
3159 # data, so for now, we only keep this simple assert to highlight the
3194 # data, so for now, we only keep this simple assert to highlight the
3160 # situation.
3195 # situation.
3161 delayed = getattr(self, '_delayed', False)
3196 delayed = getattr(self, '_delayed', False)
3162 diverted = getattr(self, '_divert', False)
3197 diverted = getattr(self, '_divert', False)
3163 if delayed and not diverted:
3198 if delayed and not diverted:
3164 msg = "cannot rewrite_sidedata of a delayed revlog"
3199 msg = "cannot rewrite_sidedata of a delayed revlog"
3165 raise error.ProgrammingError(msg)
3200 raise error.ProgrammingError(msg)
3166
3201
3167 new_entries = []
3202 new_entries = []
3168 # append the new sidedata
3203 # append the new sidedata
3169 with self._writing(transaction):
3204 with self._writing(transaction):
3170 ifh, dfh = self._writinghandles
3205 ifh, dfh = self._writinghandles
3171 dfh.seek(0, os.SEEK_END)
3206 dfh.seek(0, os.SEEK_END)
3172 current_offset = dfh.tell()
3207 current_offset = dfh.tell()
3173 for rev in range(startrev, endrev + 1):
3208 for rev in range(startrev, endrev + 1):
3174 entry = self.index[rev]
3209 entry = self.index[rev]
3175 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3210 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3176 store=self,
3211 store=self,
3177 sidedata_helpers=helpers,
3212 sidedata_helpers=helpers,
3178 sidedata={},
3213 sidedata={},
3179 rev=rev,
3214 rev=rev,
3180 )
3215 )
3181
3216
3182 serialized_sidedata = sidedatautil.serialize_sidedata(
3217 serialized_sidedata = sidedatautil.serialize_sidedata(
3183 new_sidedata
3218 new_sidedata
3184 )
3219 )
3185 if entry[8] != 0 or entry[9] != 0:
3220 if entry[8] != 0 or entry[9] != 0:
3186 # rewriting entries that already have sidedata is not
3221 # rewriting entries that already have sidedata is not
3187 # supported yet, because it introduces garbage data in the
3222 # supported yet, because it introduces garbage data in the
3188 # revlog.
3223 # revlog.
3189 msg = b"rewriting existing sidedata is not supported yet"
3224 msg = b"rewriting existing sidedata is not supported yet"
3190 raise error.Abort(msg)
3225 raise error.Abort(msg)
3191
3226
3192 # Apply (potential) flags to add and to remove after running
3227 # Apply (potential) flags to add and to remove after running
3193 # the sidedata helpers
3228 # the sidedata helpers
3194 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3229 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3195 entry = (new_offset_flags,) + entry[1:8]
3230 entry = (new_offset_flags,) + entry[1:8]
3196 entry += (current_offset, len(serialized_sidedata))
3231 entry += (current_offset, len(serialized_sidedata))
3197
3232
3198 # the sidedata computation might have move the file cursors around
3233 # the sidedata computation might have move the file cursors around
3199 dfh.seek(current_offset, os.SEEK_SET)
3234 dfh.seek(current_offset, os.SEEK_SET)
3200 dfh.write(serialized_sidedata)
3235 dfh.write(serialized_sidedata)
3201 new_entries.append(entry)
3236 new_entries.append(entry)
3202 current_offset += len(serialized_sidedata)
3237 current_offset += len(serialized_sidedata)
3203
3238
3204 # rewrite the new index entries
3239 # rewrite the new index entries
3205 ifh.seek(startrev * self.index.entry_size)
3240 ifh.seek(startrev * self.index.entry_size)
3206 for i, e in enumerate(new_entries):
3241 for i, e in enumerate(new_entries):
3207 rev = startrev + i
3242 rev = startrev + i
3208 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3243 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3209 packed = self.index.entry_binary(rev)
3244 packed = self.index.entry_binary(rev)
3210 if rev == 0 and self._docket is None:
3245 if rev == 0 and self._docket is None:
3211 header = self._format_flags | self._format_version
3246 header = self._format_flags | self._format_version
3212 header = self.index.pack_header(header)
3247 header = self.index.pack_header(header)
3213 packed = header + packed
3248 packed = header + packed
3214 ifh.write(packed)
3249 ifh.write(packed)
@@ -1,80 +1,100 b''
1 # docket - code related to revlog "docket"
1 # docket - code related to revlog "docket"
2 #
2 #
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 ### Revlog docket file
8 ### Revlog docket file
9 #
9 #
10 # The revlog is stored on disk using multiple files:
10 # The revlog is stored on disk using multiple files:
11 #
11 #
12 # * a small docket file, containing metadata and a pointer,
12 # * a small docket file, containing metadata and a pointer,
13 #
13 #
14 # * an index file, containing fixed width information about revisions,
14 # * an index file, containing fixed width information about revisions,
15 #
15 #
16 # * a data file, containing variable width data for these revisions,
16 # * a data file, containing variable width data for these revisions,
17
17
18 from __future__ import absolute_import
18 from __future__ import absolute_import
19
19
20 import struct
20 import struct
21
21
22 from . import (
22 from . import (
23 constants,
23 constants,
24 )
24 )
25
25
26 # Docket format
26 # Docket format
27 #
27 #
28 # * 4 bytes: revlog version
28 # * 4 bytes: revlog version
29 # | This is mandatory as docket must be compatible with the previous
29 # | This is mandatory as docket must be compatible with the previous
30 # | revlog index header.
30 # | revlog index header.
31 S_HEADER = struct.Struct(constants.INDEX_HEADER.format)
31 # * 8 bytes: size of index data
32 S_HEADER = struct.Struct(constants.INDEX_HEADER.format + 'L')
32
33
33
34
34 class RevlogDocket(object):
35 class RevlogDocket(object):
35 """metadata associated with revlog"""
36 """metadata associated with revlog"""
36
37
37 def __init__(self, revlog, version_header=None):
38 def __init__(self, revlog, version_header=None, index_end=0):
38 self._version_header = version_header
39 self._version_header = version_header
39 self._dirty = False
40 self._dirty = False
40 self._radix = revlog.radix
41 self._radix = revlog.radix
41 self._path = revlog._docket_file
42 self._path = revlog._docket_file
42 self._opener = revlog.opener
43 self._opener = revlog.opener
44 self._index_end = index_end
43
45
44 def index_filepath(self):
46 def index_filepath(self):
45 """file path to the current index file associated to this docket"""
47 """file path to the current index file associated to this docket"""
46 # very simplistic version at first
48 # very simplistic version at first
47 return b"%s.idx" % self._radix
49 return b"%s.idx" % self._radix
48
50
49 def write(self, transaction):
51 @property
52 def index_end(self):
53 return self._index_end
54
55 @index_end.setter
56 def index_end(self, new_size):
57 if new_size != self._index_end:
58 self._index_end = new_size
59 self._dirty = True
60
61 def write(self, transaction, stripping=False):
50 """write the modification of disk if any
62 """write the modification of disk if any
51
63
52 This make the new content visible to all process"""
64 This make the new content visible to all process"""
53 if self._dirty:
65 if self._dirty:
66 if not stripping:
67 # XXX we could, leverage the docket while stripping. However it
68 # is not powerfull enough at the time of this comment
54 transaction.addbackup(self._path, location=b'store')
69 transaction.addbackup(self._path, location=b'store')
55 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
70 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
56 f.write(self._serialize())
71 f.write(self._serialize())
57 self._dirty = False
72 self._dirty = False
58
73
59 def _serialize(self):
74 def _serialize(self):
60 return S_HEADER.pack(self._version_header)
75 data = (
76 self._version_header,
77 self._index_end,
78 )
79 return S_HEADER.pack(*data)
61
80
62
81
63 def default_docket(revlog, version_header):
82 def default_docket(revlog, version_header):
64 """given a revlog version a new docket object for the given revlog"""
83 """given a revlog version a new docket object for the given revlog"""
65 if (version_header & 0xFFFF) != constants.REVLOGV2:
84 if (version_header & 0xFFFF) != constants.REVLOGV2:
66 return None
85 return None
67 docket = RevlogDocket(revlog, version_header=version_header)
86 docket = RevlogDocket(revlog, version_header=version_header)
68 docket._dirty = True
87 docket._dirty = True
69 return docket
88 return docket
70
89
71
90
72 def parse_docket(revlog, data):
91 def parse_docket(revlog, data):
73 """given some docket data return a docket object for the given revlog"""
92 """given some docket data return a docket object for the given revlog"""
74 header = S_HEADER.unpack(data[: S_HEADER.size])
93 header = S_HEADER.unpack(data[: S_HEADER.size])
75 (version_header,) = header
94 version_header, index_size = header
76 docket = RevlogDocket(
95 docket = RevlogDocket(
77 revlog,
96 revlog,
78 version_header=version_header,
97 version_header=version_header,
98 index_end=index_size,
79 )
99 )
80 return docket
100 return docket
General Comments 0
You need to be logged in to leave comments. Login now