##// END OF EJS Templates
changelog-v2: add a configuration to disable rank computation...
marmoute -
r50558:45d7b8c3 default
parent child Browse files
Show More
@@ -1,2901 +1,2908 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = b"extension '%s' overwrite config item '%s.%s'"
25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config=b'warn-config')
27 ui.develwarn(msg, config=b'warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31
31
32 class configitem:
32 class configitem:
33 """represent a known config item
33 """represent a known config item
34
34
35 :section: the official config section where to find this item,
35 :section: the official config section where to find this item,
36 :name: the official name within the section,
36 :name: the official name within the section,
37 :default: default value for this item,
37 :default: default value for this item,
38 :alias: optional list of tuples as alternatives,
38 :alias: optional list of tuples as alternatives,
39 :generic: this is a generic definition, match name using regular expression.
39 :generic: this is a generic definition, match name using regular expression.
40 """
40 """
41
41
42 def __init__(
42 def __init__(
43 self,
43 self,
44 section,
44 section,
45 name,
45 name,
46 default=None,
46 default=None,
47 alias=(),
47 alias=(),
48 generic=False,
48 generic=False,
49 priority=0,
49 priority=0,
50 experimental=False,
50 experimental=False,
51 ):
51 ):
52 self.section = section
52 self.section = section
53 self.name = name
53 self.name = name
54 self.default = default
54 self.default = default
55 self.alias = list(alias)
55 self.alias = list(alias)
56 self.generic = generic
56 self.generic = generic
57 self.priority = priority
57 self.priority = priority
58 self.experimental = experimental
58 self.experimental = experimental
59 self._re = None
59 self._re = None
60 if generic:
60 if generic:
61 self._re = re.compile(self.name)
61 self._re = re.compile(self.name)
62
62
63
63
64 class itemregister(dict):
64 class itemregister(dict):
65 """A specialized dictionary that can handle wild-card selection"""
65 """A specialized dictionary that can handle wild-card selection"""
66
66
67 def __init__(self):
67 def __init__(self):
68 super(itemregister, self).__init__()
68 super(itemregister, self).__init__()
69 self._generics = set()
69 self._generics = set()
70
70
71 def update(self, other):
71 def update(self, other):
72 super(itemregister, self).update(other)
72 super(itemregister, self).update(other)
73 self._generics.update(other._generics)
73 self._generics.update(other._generics)
74
74
75 def __setitem__(self, key, item):
75 def __setitem__(self, key, item):
76 super(itemregister, self).__setitem__(key, item)
76 super(itemregister, self).__setitem__(key, item)
77 if item.generic:
77 if item.generic:
78 self._generics.add(item)
78 self._generics.add(item)
79
79
80 def get(self, key):
80 def get(self, key):
81 baseitem = super(itemregister, self).get(key)
81 baseitem = super(itemregister, self).get(key)
82 if baseitem is not None and not baseitem.generic:
82 if baseitem is not None and not baseitem.generic:
83 return baseitem
83 return baseitem
84
84
85 # search for a matching generic item
85 # search for a matching generic item
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 for item in generics:
87 for item in generics:
88 # we use 'match' instead of 'search' to make the matching simpler
88 # we use 'match' instead of 'search' to make the matching simpler
89 # for people unfamiliar with regular expression. Having the match
89 # for people unfamiliar with regular expression. Having the match
90 # rooted to the start of the string will produce less surprising
90 # rooted to the start of the string will produce less surprising
91 # result for user writing simple regex for sub-attribute.
91 # result for user writing simple regex for sub-attribute.
92 #
92 #
93 # For example using "color\..*" match produces an unsurprising
93 # For example using "color\..*" match produces an unsurprising
94 # result, while using search could suddenly match apparently
94 # result, while using search could suddenly match apparently
95 # unrelated configuration that happens to contains "color."
95 # unrelated configuration that happens to contains "color."
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # some match to avoid the need to prefix most pattern with "^".
97 # some match to avoid the need to prefix most pattern with "^".
98 # The "^" seems more error prone.
98 # The "^" seems more error prone.
99 if item._re.match(key):
99 if item._re.match(key):
100 return item
100 return item
101
101
102 return None
102 return None
103
103
104
104
105 coreitems = {}
105 coreitems = {}
106
106
107
107
108 def _register(configtable, *args, **kwargs):
108 def _register(configtable, *args, **kwargs):
109 item = configitem(*args, **kwargs)
109 item = configitem(*args, **kwargs)
110 section = configtable.setdefault(item.section, itemregister())
110 section = configtable.setdefault(item.section, itemregister())
111 if item.name in section:
111 if item.name in section:
112 msg = b"duplicated config item registration for '%s.%s'"
112 msg = b"duplicated config item registration for '%s.%s'"
113 raise error.ProgrammingError(msg % (item.section, item.name))
113 raise error.ProgrammingError(msg % (item.section, item.name))
114 section[item.name] = item
114 section[item.name] = item
115
115
116
116
117 # special value for case where the default is derived from other values
117 # special value for case where the default is derived from other values
118 dynamicdefault = object()
118 dynamicdefault = object()
119
119
120 # Registering actual config items
120 # Registering actual config items
121
121
122
122
123 def getitemregister(configtable):
123 def getitemregister(configtable):
124 f = functools.partial(_register, configtable)
124 f = functools.partial(_register, configtable)
125 # export pseudo enum as configitem.*
125 # export pseudo enum as configitem.*
126 f.dynamicdefault = dynamicdefault
126 f.dynamicdefault = dynamicdefault
127 return f
127 return f
128
128
129
129
130 coreconfigitem = getitemregister(coreitems)
130 coreconfigitem = getitemregister(coreitems)
131
131
132
132
133 def _registerdiffopts(section, configprefix=b''):
133 def _registerdiffopts(section, configprefix=b''):
134 coreconfigitem(
134 coreconfigitem(
135 section,
135 section,
136 configprefix + b'nodates',
136 configprefix + b'nodates',
137 default=False,
137 default=False,
138 )
138 )
139 coreconfigitem(
139 coreconfigitem(
140 section,
140 section,
141 configprefix + b'showfunc',
141 configprefix + b'showfunc',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section,
145 section,
146 configprefix + b'unified',
146 configprefix + b'unified',
147 default=None,
147 default=None,
148 )
148 )
149 coreconfigitem(
149 coreconfigitem(
150 section,
150 section,
151 configprefix + b'git',
151 configprefix + b'git',
152 default=False,
152 default=False,
153 )
153 )
154 coreconfigitem(
154 coreconfigitem(
155 section,
155 section,
156 configprefix + b'ignorews',
156 configprefix + b'ignorews',
157 default=False,
157 default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section,
160 section,
161 configprefix + b'ignorewsamount',
161 configprefix + b'ignorewsamount',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem(
164 coreconfigitem(
165 section,
165 section,
166 configprefix + b'ignoreblanklines',
166 configprefix + b'ignoreblanklines',
167 default=False,
167 default=False,
168 )
168 )
169 coreconfigitem(
169 coreconfigitem(
170 section,
170 section,
171 configprefix + b'ignorewseol',
171 configprefix + b'ignorewseol',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem(
174 coreconfigitem(
175 section,
175 section,
176 configprefix + b'nobinary',
176 configprefix + b'nobinary',
177 default=False,
177 default=False,
178 )
178 )
179 coreconfigitem(
179 coreconfigitem(
180 section,
180 section,
181 configprefix + b'noprefix',
181 configprefix + b'noprefix',
182 default=False,
182 default=False,
183 )
183 )
184 coreconfigitem(
184 coreconfigitem(
185 section,
185 section,
186 configprefix + b'word-diff',
186 configprefix + b'word-diff',
187 default=False,
187 default=False,
188 )
188 )
189
189
190
190
191 coreconfigitem(
191 coreconfigitem(
192 b'alias',
192 b'alias',
193 b'.*',
193 b'.*',
194 default=dynamicdefault,
194 default=dynamicdefault,
195 generic=True,
195 generic=True,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'auth',
198 b'auth',
199 b'cookiefile',
199 b'cookiefile',
200 default=None,
200 default=None,
201 )
201 )
202 _registerdiffopts(section=b'annotate')
202 _registerdiffopts(section=b'annotate')
203 # bookmarks.pushing: internal hack for discovery
203 # bookmarks.pushing: internal hack for discovery
204 coreconfigitem(
204 coreconfigitem(
205 b'bookmarks',
205 b'bookmarks',
206 b'pushing',
206 b'pushing',
207 default=list,
207 default=list,
208 )
208 )
209 # bundle.mainreporoot: internal hack for bundlerepo
209 # bundle.mainreporoot: internal hack for bundlerepo
210 coreconfigitem(
210 coreconfigitem(
211 b'bundle',
211 b'bundle',
212 b'mainreporoot',
212 b'mainreporoot',
213 default=b'',
213 default=b'',
214 )
214 )
215 coreconfigitem(
215 coreconfigitem(
216 b'censor',
216 b'censor',
217 b'policy',
217 b'policy',
218 default=b'abort',
218 default=b'abort',
219 experimental=True,
219 experimental=True,
220 )
220 )
221 coreconfigitem(
221 coreconfigitem(
222 b'chgserver',
222 b'chgserver',
223 b'idletimeout',
223 b'idletimeout',
224 default=3600,
224 default=3600,
225 )
225 )
226 coreconfigitem(
226 coreconfigitem(
227 b'chgserver',
227 b'chgserver',
228 b'skiphash',
228 b'skiphash',
229 default=False,
229 default=False,
230 )
230 )
231 coreconfigitem(
231 coreconfigitem(
232 b'cmdserver',
232 b'cmdserver',
233 b'log',
233 b'log',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'cmdserver',
237 b'cmdserver',
238 b'max-log-files',
238 b'max-log-files',
239 default=7,
239 default=7,
240 )
240 )
241 coreconfigitem(
241 coreconfigitem(
242 b'cmdserver',
242 b'cmdserver',
243 b'max-log-size',
243 b'max-log-size',
244 default=b'1 MB',
244 default=b'1 MB',
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'cmdserver',
247 b'cmdserver',
248 b'max-repo-cache',
248 b'max-repo-cache',
249 default=0,
249 default=0,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'cmdserver',
253 b'cmdserver',
254 b'message-encodings',
254 b'message-encodings',
255 default=list,
255 default=list,
256 )
256 )
257 coreconfigitem(
257 coreconfigitem(
258 b'cmdserver',
258 b'cmdserver',
259 b'track-log',
259 b'track-log',
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 )
261 )
262 coreconfigitem(
262 coreconfigitem(
263 b'cmdserver',
263 b'cmdserver',
264 b'shutdown-on-interrupt',
264 b'shutdown-on-interrupt',
265 default=True,
265 default=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'color',
268 b'color',
269 b'.*',
269 b'.*',
270 default=None,
270 default=None,
271 generic=True,
271 generic=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'color',
274 b'color',
275 b'mode',
275 b'mode',
276 default=b'auto',
276 default=b'auto',
277 )
277 )
278 coreconfigitem(
278 coreconfigitem(
279 b'color',
279 b'color',
280 b'pagermode',
280 b'pagermode',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem(
283 coreconfigitem(
284 b'command-templates',
284 b'command-templates',
285 b'graphnode',
285 b'graphnode',
286 default=None,
286 default=None,
287 alias=[(b'ui', b'graphnodetemplate')],
287 alias=[(b'ui', b'graphnodetemplate')],
288 )
288 )
289 coreconfigitem(
289 coreconfigitem(
290 b'command-templates',
290 b'command-templates',
291 b'log',
291 b'log',
292 default=None,
292 default=None,
293 alias=[(b'ui', b'logtemplate')],
293 alias=[(b'ui', b'logtemplate')],
294 )
294 )
295 coreconfigitem(
295 coreconfigitem(
296 b'command-templates',
296 b'command-templates',
297 b'mergemarker',
297 b'mergemarker',
298 default=(
298 default=(
299 b'{node|short} '
299 b'{node|short} '
300 b'{ifeq(tags, "tip", "", '
300 b'{ifeq(tags, "tip", "", '
301 b'ifeq(tags, "", "", "{tags} "))}'
301 b'ifeq(tags, "", "", "{tags} "))}'
302 b'{if(bookmarks, "{bookmarks} ")}'
302 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'- {author|user}: {desc|firstline}'
304 b'- {author|user}: {desc|firstline}'
305 ),
305 ),
306 alias=[(b'ui', b'mergemarkertemplate')],
306 alias=[(b'ui', b'mergemarkertemplate')],
307 )
307 )
308 coreconfigitem(
308 coreconfigitem(
309 b'command-templates',
309 b'command-templates',
310 b'pre-merge-tool-output',
310 b'pre-merge-tool-output',
311 default=None,
311 default=None,
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 )
313 )
314 coreconfigitem(
314 coreconfigitem(
315 b'command-templates',
315 b'command-templates',
316 b'oneline-summary',
316 b'oneline-summary',
317 default=None,
317 default=None,
318 )
318 )
319 coreconfigitem(
319 coreconfigitem(
320 b'command-templates',
320 b'command-templates',
321 b'oneline-summary.*',
321 b'oneline-summary.*',
322 default=dynamicdefault,
322 default=dynamicdefault,
323 generic=True,
323 generic=True,
324 )
324 )
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 coreconfigitem(
326 coreconfigitem(
327 b'commands',
327 b'commands',
328 b'commit.post-status',
328 b'commit.post-status',
329 default=False,
329 default=False,
330 )
330 )
331 coreconfigitem(
331 coreconfigitem(
332 b'commands',
332 b'commands',
333 b'grep.all-files',
333 b'grep.all-files',
334 default=False,
334 default=False,
335 experimental=True,
335 experimental=True,
336 )
336 )
337 coreconfigitem(
337 coreconfigitem(
338 b'commands',
338 b'commands',
339 b'merge.require-rev',
339 b'merge.require-rev',
340 default=False,
340 default=False,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'commands',
343 b'commands',
344 b'push.require-revs',
344 b'push.require-revs',
345 default=False,
345 default=False,
346 )
346 )
347 coreconfigitem(
347 coreconfigitem(
348 b'commands',
348 b'commands',
349 b'resolve.confirm',
349 b'resolve.confirm',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem(
352 coreconfigitem(
353 b'commands',
353 b'commands',
354 b'resolve.explicit-re-merge',
354 b'resolve.explicit-re-merge',
355 default=False,
355 default=False,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'commands',
358 b'commands',
359 b'resolve.mark-check',
359 b'resolve.mark-check',
360 default=b'none',
360 default=b'none',
361 )
361 )
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 coreconfigitem(
363 coreconfigitem(
364 b'commands',
364 b'commands',
365 b'show.aliasprefix',
365 b'show.aliasprefix',
366 default=list,
366 default=list,
367 )
367 )
368 coreconfigitem(
368 coreconfigitem(
369 b'commands',
369 b'commands',
370 b'status.relative',
370 b'status.relative',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem(
373 coreconfigitem(
374 b'commands',
374 b'commands',
375 b'status.skipstates',
375 b'status.skipstates',
376 default=[],
376 default=[],
377 experimental=True,
377 experimental=True,
378 )
378 )
379 coreconfigitem(
379 coreconfigitem(
380 b'commands',
380 b'commands',
381 b'status.terse',
381 b'status.terse',
382 default=b'',
382 default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'commands',
385 b'commands',
386 b'status.verbose',
386 b'status.verbose',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem(
389 coreconfigitem(
390 b'commands',
390 b'commands',
391 b'update.check',
391 b'update.check',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem(
394 coreconfigitem(
395 b'commands',
395 b'commands',
396 b'update.requiredest',
396 b'update.requiredest',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'committemplate',
400 b'committemplate',
401 b'.*',
401 b'.*',
402 default=None,
402 default=None,
403 generic=True,
403 generic=True,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'convert',
406 b'convert',
407 b'bzr.saverev',
407 b'bzr.saverev',
408 default=True,
408 default=True,
409 )
409 )
410 coreconfigitem(
410 coreconfigitem(
411 b'convert',
411 b'convert',
412 b'cvsps.cache',
412 b'cvsps.cache',
413 default=True,
413 default=True,
414 )
414 )
415 coreconfigitem(
415 coreconfigitem(
416 b'convert',
416 b'convert',
417 b'cvsps.fuzz',
417 b'cvsps.fuzz',
418 default=60,
418 default=60,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'convert',
421 b'convert',
422 b'cvsps.logencoding',
422 b'cvsps.logencoding',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem(
425 coreconfigitem(
426 b'convert',
426 b'convert',
427 b'cvsps.mergefrom',
427 b'cvsps.mergefrom',
428 default=None,
428 default=None,
429 )
429 )
430 coreconfigitem(
430 coreconfigitem(
431 b'convert',
431 b'convert',
432 b'cvsps.mergeto',
432 b'cvsps.mergeto',
433 default=None,
433 default=None,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'convert',
436 b'convert',
437 b'git.committeractions',
437 b'git.committeractions',
438 default=lambda: [b'messagedifferent'],
438 default=lambda: [b'messagedifferent'],
439 )
439 )
440 coreconfigitem(
440 coreconfigitem(
441 b'convert',
441 b'convert',
442 b'git.extrakeys',
442 b'git.extrakeys',
443 default=list,
443 default=list,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'convert',
446 b'convert',
447 b'git.findcopiesharder',
447 b'git.findcopiesharder',
448 default=False,
448 default=False,
449 )
449 )
450 coreconfigitem(
450 coreconfigitem(
451 b'convert',
451 b'convert',
452 b'git.remoteprefix',
452 b'git.remoteprefix',
453 default=b'remote',
453 default=b'remote',
454 )
454 )
455 coreconfigitem(
455 coreconfigitem(
456 b'convert',
456 b'convert',
457 b'git.renamelimit',
457 b'git.renamelimit',
458 default=400,
458 default=400,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'convert',
461 b'convert',
462 b'git.saverev',
462 b'git.saverev',
463 default=True,
463 default=True,
464 )
464 )
465 coreconfigitem(
465 coreconfigitem(
466 b'convert',
466 b'convert',
467 b'git.similarity',
467 b'git.similarity',
468 default=50,
468 default=50,
469 )
469 )
470 coreconfigitem(
470 coreconfigitem(
471 b'convert',
471 b'convert',
472 b'git.skipsubmodules',
472 b'git.skipsubmodules',
473 default=False,
473 default=False,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'convert',
476 b'convert',
477 b'hg.clonebranches',
477 b'hg.clonebranches',
478 default=False,
478 default=False,
479 )
479 )
480 coreconfigitem(
480 coreconfigitem(
481 b'convert',
481 b'convert',
482 b'hg.ignoreerrors',
482 b'hg.ignoreerrors',
483 default=False,
483 default=False,
484 )
484 )
485 coreconfigitem(
485 coreconfigitem(
486 b'convert',
486 b'convert',
487 b'hg.preserve-hash',
487 b'hg.preserve-hash',
488 default=False,
488 default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'convert',
491 b'convert',
492 b'hg.revs',
492 b'hg.revs',
493 default=None,
493 default=None,
494 )
494 )
495 coreconfigitem(
495 coreconfigitem(
496 b'convert',
496 b'convert',
497 b'hg.saverev',
497 b'hg.saverev',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem(
500 coreconfigitem(
501 b'convert',
501 b'convert',
502 b'hg.sourcename',
502 b'hg.sourcename',
503 default=None,
503 default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'convert',
506 b'convert',
507 b'hg.startrev',
507 b'hg.startrev',
508 default=None,
508 default=None,
509 )
509 )
510 coreconfigitem(
510 coreconfigitem(
511 b'convert',
511 b'convert',
512 b'hg.tagsbranch',
512 b'hg.tagsbranch',
513 default=b'default',
513 default=b'default',
514 )
514 )
515 coreconfigitem(
515 coreconfigitem(
516 b'convert',
516 b'convert',
517 b'hg.usebranchnames',
517 b'hg.usebranchnames',
518 default=True,
518 default=True,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'convert',
521 b'convert',
522 b'ignoreancestorcheck',
522 b'ignoreancestorcheck',
523 default=False,
523 default=False,
524 experimental=True,
524 experimental=True,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'convert',
527 b'convert',
528 b'localtimezone',
528 b'localtimezone',
529 default=False,
529 default=False,
530 )
530 )
531 coreconfigitem(
531 coreconfigitem(
532 b'convert',
532 b'convert',
533 b'p4.encoding',
533 b'p4.encoding',
534 default=dynamicdefault,
534 default=dynamicdefault,
535 )
535 )
536 coreconfigitem(
536 coreconfigitem(
537 b'convert',
537 b'convert',
538 b'p4.startrev',
538 b'p4.startrev',
539 default=0,
539 default=0,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'convert',
542 b'convert',
543 b'skiptags',
543 b'skiptags',
544 default=False,
544 default=False,
545 )
545 )
546 coreconfigitem(
546 coreconfigitem(
547 b'convert',
547 b'convert',
548 b'svn.debugsvnlog',
548 b'svn.debugsvnlog',
549 default=True,
549 default=True,
550 )
550 )
551 coreconfigitem(
551 coreconfigitem(
552 b'convert',
552 b'convert',
553 b'svn.trunk',
553 b'svn.trunk',
554 default=None,
554 default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'convert',
557 b'convert',
558 b'svn.tags',
558 b'svn.tags',
559 default=None,
559 default=None,
560 )
560 )
561 coreconfigitem(
561 coreconfigitem(
562 b'convert',
562 b'convert',
563 b'svn.branches',
563 b'svn.branches',
564 default=None,
564 default=None,
565 )
565 )
566 coreconfigitem(
566 coreconfigitem(
567 b'convert',
567 b'convert',
568 b'svn.startrev',
568 b'svn.startrev',
569 default=0,
569 default=0,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'convert',
572 b'convert',
573 b'svn.dangerous-set-commit-dates',
573 b'svn.dangerous-set-commit-dates',
574 default=False,
574 default=False,
575 )
575 )
576 coreconfigitem(
576 coreconfigitem(
577 b'debug',
577 b'debug',
578 b'dirstate.delaywrite',
578 b'dirstate.delaywrite',
579 default=0,
579 default=0,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'debug',
582 b'debug',
583 b'revlog.verifyposition.changelog',
583 b'revlog.verifyposition.changelog',
584 default=b'',
584 default=b'',
585 )
585 )
586 coreconfigitem(
586 coreconfigitem(
587 b'debug',
587 b'debug',
588 b'revlog.debug-delta',
588 b'revlog.debug-delta',
589 default=False,
589 default=False,
590 )
590 )
591 # display extra information about the bundling process
591 # display extra information about the bundling process
592 coreconfigitem(
592 coreconfigitem(
593 b'debug',
593 b'debug',
594 b'bundling-stats',
594 b'bundling-stats',
595 default=False,
595 default=False,
596 )
596 )
597 # display extra information about the unbundling process
597 # display extra information about the unbundling process
598 coreconfigitem(
598 coreconfigitem(
599 b'debug',
599 b'debug',
600 b'unbundling-stats',
600 b'unbundling-stats',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'defaults',
604 b'defaults',
605 b'.*',
605 b'.*',
606 default=None,
606 default=None,
607 generic=True,
607 generic=True,
608 )
608 )
609 coreconfigitem(
609 coreconfigitem(
610 b'devel',
610 b'devel',
611 b'all-warnings',
611 b'all-warnings',
612 default=False,
612 default=False,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'devel',
615 b'devel',
616 b'bundle2.debug',
616 b'bundle2.debug',
617 default=False,
617 default=False,
618 )
618 )
619 coreconfigitem(
619 coreconfigitem(
620 b'devel',
620 b'devel',
621 b'bundle.delta',
621 b'bundle.delta',
622 default=b'',
622 default=b'',
623 )
623 )
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'cache-vfs',
626 b'cache-vfs',
627 default=None,
627 default=None,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'check-locks',
631 b'check-locks',
632 default=False,
632 default=False,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'check-relroot',
636 b'check-relroot',
637 default=False,
637 default=False,
638 )
638 )
639 # Track copy information for all file, not just "added" one (very slow)
639 # Track copy information for all file, not just "added" one (very slow)
640 coreconfigitem(
640 coreconfigitem(
641 b'devel',
641 b'devel',
642 b'copy-tracing.trace-all-files',
642 b'copy-tracing.trace-all-files',
643 default=False,
643 default=False,
644 )
644 )
645 coreconfigitem(
645 coreconfigitem(
646 b'devel',
646 b'devel',
647 b'default-date',
647 b'default-date',
648 default=None,
648 default=None,
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'devel',
651 b'devel',
652 b'deprec-warn',
652 b'deprec-warn',
653 default=False,
653 default=False,
654 )
654 )
655 coreconfigitem(
655 coreconfigitem(
656 b'devel',
656 b'devel',
657 b'disableloaddefaultcerts',
657 b'disableloaddefaultcerts',
658 default=False,
658 default=False,
659 )
659 )
660 coreconfigitem(
660 coreconfigitem(
661 b'devel',
661 b'devel',
662 b'warn-empty-changegroup',
662 b'warn-empty-changegroup',
663 default=False,
663 default=False,
664 )
664 )
665 coreconfigitem(
665 coreconfigitem(
666 b'devel',
666 b'devel',
667 b'legacy.exchange',
667 b'legacy.exchange',
668 default=list,
668 default=list,
669 )
669 )
670 # When True, revlogs use a special reference version of the nodemap, that is not
670 # When True, revlogs use a special reference version of the nodemap, that is not
671 # performant but is "known" to behave properly.
671 # performant but is "known" to behave properly.
672 coreconfigitem(
672 coreconfigitem(
673 b'devel',
673 b'devel',
674 b'persistent-nodemap',
674 b'persistent-nodemap',
675 default=False,
675 default=False,
676 )
676 )
677 coreconfigitem(
677 coreconfigitem(
678 b'devel',
678 b'devel',
679 b'servercafile',
679 b'servercafile',
680 default=b'',
680 default=b'',
681 )
681 )
682 coreconfigitem(
682 coreconfigitem(
683 b'devel',
683 b'devel',
684 b'serverexactprotocol',
684 b'serverexactprotocol',
685 default=b'',
685 default=b'',
686 )
686 )
687 coreconfigitem(
687 coreconfigitem(
688 b'devel',
688 b'devel',
689 b'serverrequirecert',
689 b'serverrequirecert',
690 default=False,
690 default=False,
691 )
691 )
692 coreconfigitem(
692 coreconfigitem(
693 b'devel',
693 b'devel',
694 b'strip-obsmarkers',
694 b'strip-obsmarkers',
695 default=True,
695 default=True,
696 )
696 )
697 coreconfigitem(
697 coreconfigitem(
698 b'devel',
698 b'devel',
699 b'warn-config',
699 b'warn-config',
700 default=None,
700 default=None,
701 )
701 )
702 coreconfigitem(
702 coreconfigitem(
703 b'devel',
703 b'devel',
704 b'warn-config-default',
704 b'warn-config-default',
705 default=None,
705 default=None,
706 )
706 )
707 coreconfigitem(
707 coreconfigitem(
708 b'devel',
708 b'devel',
709 b'user.obsmarker',
709 b'user.obsmarker',
710 default=None,
710 default=None,
711 )
711 )
712 coreconfigitem(
712 coreconfigitem(
713 b'devel',
713 b'devel',
714 b'warn-config-unknown',
714 b'warn-config-unknown',
715 default=None,
715 default=None,
716 )
716 )
717 coreconfigitem(
717 coreconfigitem(
718 b'devel',
718 b'devel',
719 b'debug.copies',
719 b'debug.copies',
720 default=False,
720 default=False,
721 )
721 )
722 coreconfigitem(
722 coreconfigitem(
723 b'devel',
723 b'devel',
724 b'copy-tracing.multi-thread',
724 b'copy-tracing.multi-thread',
725 default=True,
725 default=True,
726 )
726 )
727 coreconfigitem(
727 coreconfigitem(
728 b'devel',
728 b'devel',
729 b'debug.extensions',
729 b'debug.extensions',
730 default=False,
730 default=False,
731 )
731 )
732 coreconfigitem(
732 coreconfigitem(
733 b'devel',
733 b'devel',
734 b'debug.repo-filters',
734 b'debug.repo-filters',
735 default=False,
735 default=False,
736 )
736 )
737 coreconfigitem(
737 coreconfigitem(
738 b'devel',
738 b'devel',
739 b'debug.peer-request',
739 b'debug.peer-request',
740 default=False,
740 default=False,
741 )
741 )
742 # If discovery.exchange-heads is False, the discovery will not start with
742 # If discovery.exchange-heads is False, the discovery will not start with
743 # remote head fetching and local head querying.
743 # remote head fetching and local head querying.
744 coreconfigitem(
744 coreconfigitem(
745 b'devel',
745 b'devel',
746 b'discovery.exchange-heads',
746 b'discovery.exchange-heads',
747 default=True,
747 default=True,
748 )
748 )
749 # If discovery.grow-sample is False, the sample size used in set discovery will
749 # If discovery.grow-sample is False, the sample size used in set discovery will
750 # not be increased through the process
750 # not be increased through the process
751 coreconfigitem(
751 coreconfigitem(
752 b'devel',
752 b'devel',
753 b'discovery.grow-sample',
753 b'discovery.grow-sample',
754 default=True,
754 default=True,
755 )
755 )
756 # When discovery.grow-sample.dynamic is True, the default, the sample size is
756 # When discovery.grow-sample.dynamic is True, the default, the sample size is
757 # adapted to the shape of the undecided set (it is set to the max of:
757 # adapted to the shape of the undecided set (it is set to the max of:
758 # <target-size>, len(roots(undecided)), len(heads(undecided)
758 # <target-size>, len(roots(undecided)), len(heads(undecided)
759 coreconfigitem(
759 coreconfigitem(
760 b'devel',
760 b'devel',
761 b'discovery.grow-sample.dynamic',
761 b'discovery.grow-sample.dynamic',
762 default=True,
762 default=True,
763 )
763 )
764 # discovery.grow-sample.rate control the rate at which the sample grow
764 # discovery.grow-sample.rate control the rate at which the sample grow
765 coreconfigitem(
765 coreconfigitem(
766 b'devel',
766 b'devel',
767 b'discovery.grow-sample.rate',
767 b'discovery.grow-sample.rate',
768 default=1.05,
768 default=1.05,
769 )
769 )
770 # If discovery.randomize is False, random sampling during discovery are
770 # If discovery.randomize is False, random sampling during discovery are
771 # deterministic. It is meant for integration tests.
771 # deterministic. It is meant for integration tests.
772 coreconfigitem(
772 coreconfigitem(
773 b'devel',
773 b'devel',
774 b'discovery.randomize',
774 b'discovery.randomize',
775 default=True,
775 default=True,
776 )
776 )
777 # Control the initial size of the discovery sample
777 # Control the initial size of the discovery sample
778 coreconfigitem(
778 coreconfigitem(
779 b'devel',
779 b'devel',
780 b'discovery.sample-size',
780 b'discovery.sample-size',
781 default=200,
781 default=200,
782 )
782 )
783 # Control the initial size of the discovery for initial change
783 # Control the initial size of the discovery for initial change
784 coreconfigitem(
784 coreconfigitem(
785 b'devel',
785 b'devel',
786 b'discovery.sample-size.initial',
786 b'discovery.sample-size.initial',
787 default=100,
787 default=100,
788 )
788 )
789 _registerdiffopts(section=b'diff')
789 _registerdiffopts(section=b'diff')
790 coreconfigitem(
790 coreconfigitem(
791 b'diff',
791 b'diff',
792 b'merge',
792 b'merge',
793 default=False,
793 default=False,
794 experimental=True,
794 experimental=True,
795 )
795 )
796 coreconfigitem(
796 coreconfigitem(
797 b'email',
797 b'email',
798 b'bcc',
798 b'bcc',
799 default=None,
799 default=None,
800 )
800 )
801 coreconfigitem(
801 coreconfigitem(
802 b'email',
802 b'email',
803 b'cc',
803 b'cc',
804 default=None,
804 default=None,
805 )
805 )
806 coreconfigitem(
806 coreconfigitem(
807 b'email',
807 b'email',
808 b'charsets',
808 b'charsets',
809 default=list,
809 default=list,
810 )
810 )
811 coreconfigitem(
811 coreconfigitem(
812 b'email',
812 b'email',
813 b'from',
813 b'from',
814 default=None,
814 default=None,
815 )
815 )
816 coreconfigitem(
816 coreconfigitem(
817 b'email',
817 b'email',
818 b'method',
818 b'method',
819 default=b'smtp',
819 default=b'smtp',
820 )
820 )
821 coreconfigitem(
821 coreconfigitem(
822 b'email',
822 b'email',
823 b'reply-to',
823 b'reply-to',
824 default=None,
824 default=None,
825 )
825 )
826 coreconfigitem(
826 coreconfigitem(
827 b'email',
827 b'email',
828 b'to',
828 b'to',
829 default=None,
829 default=None,
830 )
830 )
831 coreconfigitem(
831 coreconfigitem(
832 b'experimental',
832 b'experimental',
833 b'archivemetatemplate',
833 b'archivemetatemplate',
834 default=dynamicdefault,
834 default=dynamicdefault,
835 )
835 )
836 coreconfigitem(
836 coreconfigitem(
837 b'experimental',
837 b'experimental',
838 b'auto-publish',
838 b'auto-publish',
839 default=b'publish',
839 default=b'publish',
840 )
840 )
841 coreconfigitem(
841 coreconfigitem(
842 b'experimental',
842 b'experimental',
843 b'bundle-phases',
843 b'bundle-phases',
844 default=False,
844 default=False,
845 )
845 )
846 coreconfigitem(
846 coreconfigitem(
847 b'experimental',
847 b'experimental',
848 b'bundle2-advertise',
848 b'bundle2-advertise',
849 default=True,
849 default=True,
850 )
850 )
851 coreconfigitem(
851 coreconfigitem(
852 b'experimental',
852 b'experimental',
853 b'bundle2-output-capture',
853 b'bundle2-output-capture',
854 default=False,
854 default=False,
855 )
855 )
856 coreconfigitem(
856 coreconfigitem(
857 b'experimental',
857 b'experimental',
858 b'bundle2.pushback',
858 b'bundle2.pushback',
859 default=False,
859 default=False,
860 )
860 )
861 coreconfigitem(
861 coreconfigitem(
862 b'experimental',
862 b'experimental',
863 b'bundle2lazylocking',
863 b'bundle2lazylocking',
864 default=False,
864 default=False,
865 )
865 )
866 coreconfigitem(
866 coreconfigitem(
867 b'experimental',
867 b'experimental',
868 b'bundlecomplevel',
868 b'bundlecomplevel',
869 default=None,
869 default=None,
870 )
870 )
871 coreconfigitem(
871 coreconfigitem(
872 b'experimental',
872 b'experimental',
873 b'bundlecomplevel.bzip2',
873 b'bundlecomplevel.bzip2',
874 default=None,
874 default=None,
875 )
875 )
876 coreconfigitem(
876 coreconfigitem(
877 b'experimental',
877 b'experimental',
878 b'bundlecomplevel.gzip',
878 b'bundlecomplevel.gzip',
879 default=None,
879 default=None,
880 )
880 )
881 coreconfigitem(
881 coreconfigitem(
882 b'experimental',
882 b'experimental',
883 b'bundlecomplevel.none',
883 b'bundlecomplevel.none',
884 default=None,
884 default=None,
885 )
885 )
886 coreconfigitem(
886 coreconfigitem(
887 b'experimental',
887 b'experimental',
888 b'bundlecomplevel.zstd',
888 b'bundlecomplevel.zstd',
889 default=None,
889 default=None,
890 )
890 )
891 coreconfigitem(
891 coreconfigitem(
892 b'experimental',
892 b'experimental',
893 b'bundlecompthreads',
893 b'bundlecompthreads',
894 default=None,
894 default=None,
895 )
895 )
896 coreconfigitem(
896 coreconfigitem(
897 b'experimental',
897 b'experimental',
898 b'bundlecompthreads.bzip2',
898 b'bundlecompthreads.bzip2',
899 default=None,
899 default=None,
900 )
900 )
901 coreconfigitem(
901 coreconfigitem(
902 b'experimental',
902 b'experimental',
903 b'bundlecompthreads.gzip',
903 b'bundlecompthreads.gzip',
904 default=None,
904 default=None,
905 )
905 )
906 coreconfigitem(
906 coreconfigitem(
907 b'experimental',
907 b'experimental',
908 b'bundlecompthreads.none',
908 b'bundlecompthreads.none',
909 default=None,
909 default=None,
910 )
910 )
911 coreconfigitem(
911 coreconfigitem(
912 b'experimental',
912 b'experimental',
913 b'bundlecompthreads.zstd',
913 b'bundlecompthreads.zstd',
914 default=None,
914 default=None,
915 )
915 )
916 coreconfigitem(
916 coreconfigitem(
917 b'experimental',
917 b'experimental',
918 b'changegroup3',
918 b'changegroup3',
919 default=False,
919 default=False,
920 )
920 )
921 coreconfigitem(
921 coreconfigitem(
922 b'experimental',
922 b'experimental',
923 b'changegroup4',
923 b'changegroup4',
924 default=False,
924 default=False,
925 )
925 )
926
927 # might remove rank configuration once the computation has no impact
928 coreconfigitem(
929 b'experimental',
930 b'changelog-v2.compute-rank',
931 default=True,
932 )
926 coreconfigitem(
933 coreconfigitem(
927 b'experimental',
934 b'experimental',
928 b'cleanup-as-archived',
935 b'cleanup-as-archived',
929 default=False,
936 default=False,
930 )
937 )
931 coreconfigitem(
938 coreconfigitem(
932 b'experimental',
939 b'experimental',
933 b'clientcompressionengines',
940 b'clientcompressionengines',
934 default=list,
941 default=list,
935 )
942 )
936 coreconfigitem(
943 coreconfigitem(
937 b'experimental',
944 b'experimental',
938 b'copytrace',
945 b'copytrace',
939 default=b'on',
946 default=b'on',
940 )
947 )
941 coreconfigitem(
948 coreconfigitem(
942 b'experimental',
949 b'experimental',
943 b'copytrace.movecandidateslimit',
950 b'copytrace.movecandidateslimit',
944 default=100,
951 default=100,
945 )
952 )
946 coreconfigitem(
953 coreconfigitem(
947 b'experimental',
954 b'experimental',
948 b'copytrace.sourcecommitlimit',
955 b'copytrace.sourcecommitlimit',
949 default=100,
956 default=100,
950 )
957 )
951 coreconfigitem(
958 coreconfigitem(
952 b'experimental',
959 b'experimental',
953 b'copies.read-from',
960 b'copies.read-from',
954 default=b"filelog-only",
961 default=b"filelog-only",
955 )
962 )
956 coreconfigitem(
963 coreconfigitem(
957 b'experimental',
964 b'experimental',
958 b'copies.write-to',
965 b'copies.write-to',
959 default=b'filelog-only',
966 default=b'filelog-only',
960 )
967 )
961 coreconfigitem(
968 coreconfigitem(
962 b'experimental',
969 b'experimental',
963 b'crecordtest',
970 b'crecordtest',
964 default=None,
971 default=None,
965 )
972 )
966 coreconfigitem(
973 coreconfigitem(
967 b'experimental',
974 b'experimental',
968 b'directaccess',
975 b'directaccess',
969 default=False,
976 default=False,
970 )
977 )
971 coreconfigitem(
978 coreconfigitem(
972 b'experimental',
979 b'experimental',
973 b'directaccess.revnums',
980 b'directaccess.revnums',
974 default=False,
981 default=False,
975 )
982 )
976 coreconfigitem(
983 coreconfigitem(
977 b'experimental',
984 b'experimental',
978 b'editortmpinhg',
985 b'editortmpinhg',
979 default=False,
986 default=False,
980 )
987 )
981 coreconfigitem(
988 coreconfigitem(
982 b'experimental',
989 b'experimental',
983 b'evolution',
990 b'evolution',
984 default=list,
991 default=list,
985 )
992 )
986 coreconfigitem(
993 coreconfigitem(
987 b'experimental',
994 b'experimental',
988 b'evolution.allowdivergence',
995 b'evolution.allowdivergence',
989 default=False,
996 default=False,
990 alias=[(b'experimental', b'allowdivergence')],
997 alias=[(b'experimental', b'allowdivergence')],
991 )
998 )
992 coreconfigitem(
999 coreconfigitem(
993 b'experimental',
1000 b'experimental',
994 b'evolution.allowunstable',
1001 b'evolution.allowunstable',
995 default=None,
1002 default=None,
996 )
1003 )
997 coreconfigitem(
1004 coreconfigitem(
998 b'experimental',
1005 b'experimental',
999 b'evolution.createmarkers',
1006 b'evolution.createmarkers',
1000 default=None,
1007 default=None,
1001 )
1008 )
1002 coreconfigitem(
1009 coreconfigitem(
1003 b'experimental',
1010 b'experimental',
1004 b'evolution.effect-flags',
1011 b'evolution.effect-flags',
1005 default=True,
1012 default=True,
1006 alias=[(b'experimental', b'effect-flags')],
1013 alias=[(b'experimental', b'effect-flags')],
1007 )
1014 )
1008 coreconfigitem(
1015 coreconfigitem(
1009 b'experimental',
1016 b'experimental',
1010 b'evolution.exchange',
1017 b'evolution.exchange',
1011 default=None,
1018 default=None,
1012 )
1019 )
1013 coreconfigitem(
1020 coreconfigitem(
1014 b'experimental',
1021 b'experimental',
1015 b'evolution.bundle-obsmarker',
1022 b'evolution.bundle-obsmarker',
1016 default=False,
1023 default=False,
1017 )
1024 )
1018 coreconfigitem(
1025 coreconfigitem(
1019 b'experimental',
1026 b'experimental',
1020 b'evolution.bundle-obsmarker:mandatory',
1027 b'evolution.bundle-obsmarker:mandatory',
1021 default=True,
1028 default=True,
1022 )
1029 )
1023 coreconfigitem(
1030 coreconfigitem(
1024 b'experimental',
1031 b'experimental',
1025 b'log.topo',
1032 b'log.topo',
1026 default=False,
1033 default=False,
1027 )
1034 )
1028 coreconfigitem(
1035 coreconfigitem(
1029 b'experimental',
1036 b'experimental',
1030 b'evolution.report-instabilities',
1037 b'evolution.report-instabilities',
1031 default=True,
1038 default=True,
1032 )
1039 )
1033 coreconfigitem(
1040 coreconfigitem(
1034 b'experimental',
1041 b'experimental',
1035 b'evolution.track-operation',
1042 b'evolution.track-operation',
1036 default=True,
1043 default=True,
1037 )
1044 )
1038 # repo-level config to exclude a revset visibility
1045 # repo-level config to exclude a revset visibility
1039 #
1046 #
1040 # The target use case is to use `share` to expose different subset of the same
1047 # The target use case is to use `share` to expose different subset of the same
1041 # repository, especially server side. See also `server.view`.
1048 # repository, especially server side. See also `server.view`.
1042 coreconfigitem(
1049 coreconfigitem(
1043 b'experimental',
1050 b'experimental',
1044 b'extra-filter-revs',
1051 b'extra-filter-revs',
1045 default=None,
1052 default=None,
1046 )
1053 )
1047 coreconfigitem(
1054 coreconfigitem(
1048 b'experimental',
1055 b'experimental',
1049 b'maxdeltachainspan',
1056 b'maxdeltachainspan',
1050 default=-1,
1057 default=-1,
1051 )
1058 )
1052 # tracks files which were undeleted (merge might delete them but we explicitly
1059 # tracks files which were undeleted (merge might delete them but we explicitly
1053 # kept/undeleted them) and creates new filenodes for them
1060 # kept/undeleted them) and creates new filenodes for them
1054 coreconfigitem(
1061 coreconfigitem(
1055 b'experimental',
1062 b'experimental',
1056 b'merge-track-salvaged',
1063 b'merge-track-salvaged',
1057 default=False,
1064 default=False,
1058 )
1065 )
1059 coreconfigitem(
1066 coreconfigitem(
1060 b'experimental',
1067 b'experimental',
1061 b'mmapindexthreshold',
1068 b'mmapindexthreshold',
1062 default=None,
1069 default=None,
1063 )
1070 )
1064 coreconfigitem(
1071 coreconfigitem(
1065 b'experimental',
1072 b'experimental',
1066 b'narrow',
1073 b'narrow',
1067 default=False,
1074 default=False,
1068 )
1075 )
1069 coreconfigitem(
1076 coreconfigitem(
1070 b'experimental',
1077 b'experimental',
1071 b'nonnormalparanoidcheck',
1078 b'nonnormalparanoidcheck',
1072 default=False,
1079 default=False,
1073 )
1080 )
1074 coreconfigitem(
1081 coreconfigitem(
1075 b'experimental',
1082 b'experimental',
1076 b'exportableenviron',
1083 b'exportableenviron',
1077 default=list,
1084 default=list,
1078 )
1085 )
1079 coreconfigitem(
1086 coreconfigitem(
1080 b'experimental',
1087 b'experimental',
1081 b'extendedheader.index',
1088 b'extendedheader.index',
1082 default=None,
1089 default=None,
1083 )
1090 )
1084 coreconfigitem(
1091 coreconfigitem(
1085 b'experimental',
1092 b'experimental',
1086 b'extendedheader.similarity',
1093 b'extendedheader.similarity',
1087 default=False,
1094 default=False,
1088 )
1095 )
1089 coreconfigitem(
1096 coreconfigitem(
1090 b'experimental',
1097 b'experimental',
1091 b'graphshorten',
1098 b'graphshorten',
1092 default=False,
1099 default=False,
1093 )
1100 )
1094 coreconfigitem(
1101 coreconfigitem(
1095 b'experimental',
1102 b'experimental',
1096 b'graphstyle.parent',
1103 b'graphstyle.parent',
1097 default=dynamicdefault,
1104 default=dynamicdefault,
1098 )
1105 )
1099 coreconfigitem(
1106 coreconfigitem(
1100 b'experimental',
1107 b'experimental',
1101 b'graphstyle.missing',
1108 b'graphstyle.missing',
1102 default=dynamicdefault,
1109 default=dynamicdefault,
1103 )
1110 )
1104 coreconfigitem(
1111 coreconfigitem(
1105 b'experimental',
1112 b'experimental',
1106 b'graphstyle.grandparent',
1113 b'graphstyle.grandparent',
1107 default=dynamicdefault,
1114 default=dynamicdefault,
1108 )
1115 )
1109 coreconfigitem(
1116 coreconfigitem(
1110 b'experimental',
1117 b'experimental',
1111 b'hook-track-tags',
1118 b'hook-track-tags',
1112 default=False,
1119 default=False,
1113 )
1120 )
1114 coreconfigitem(
1121 coreconfigitem(
1115 b'experimental',
1122 b'experimental',
1116 b'httppostargs',
1123 b'httppostargs',
1117 default=False,
1124 default=False,
1118 )
1125 )
1119 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1126 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1120 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1127 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1121
1128
1122 coreconfigitem(
1129 coreconfigitem(
1123 b'experimental',
1130 b'experimental',
1124 b'obsmarkers-exchange-debug',
1131 b'obsmarkers-exchange-debug',
1125 default=False,
1132 default=False,
1126 )
1133 )
1127 coreconfigitem(
1134 coreconfigitem(
1128 b'experimental',
1135 b'experimental',
1129 b'remotenames',
1136 b'remotenames',
1130 default=False,
1137 default=False,
1131 )
1138 )
1132 coreconfigitem(
1139 coreconfigitem(
1133 b'experimental',
1140 b'experimental',
1134 b'removeemptydirs',
1141 b'removeemptydirs',
1135 default=True,
1142 default=True,
1136 )
1143 )
1137 coreconfigitem(
1144 coreconfigitem(
1138 b'experimental',
1145 b'experimental',
1139 b'revert.interactive.select-to-keep',
1146 b'revert.interactive.select-to-keep',
1140 default=False,
1147 default=False,
1141 )
1148 )
1142 coreconfigitem(
1149 coreconfigitem(
1143 b'experimental',
1150 b'experimental',
1144 b'revisions.prefixhexnode',
1151 b'revisions.prefixhexnode',
1145 default=False,
1152 default=False,
1146 )
1153 )
1147 # "out of experimental" todo list.
1154 # "out of experimental" todo list.
1148 #
1155 #
1149 # * include management of a persistent nodemap in the main docket
1156 # * include management of a persistent nodemap in the main docket
1150 # * enforce a "no-truncate" policy for mmap safety
1157 # * enforce a "no-truncate" policy for mmap safety
1151 # - for censoring operation
1158 # - for censoring operation
1152 # - for stripping operation
1159 # - for stripping operation
1153 # - for rollback operation
1160 # - for rollback operation
1154 # * proper streaming (race free) of the docket file
1161 # * proper streaming (race free) of the docket file
1155 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1162 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1156 # * Exchange-wise, we will also need to do something more efficient than
1163 # * Exchange-wise, we will also need to do something more efficient than
1157 # keeping references to the affected revlogs, especially memory-wise when
1164 # keeping references to the affected revlogs, especially memory-wise when
1158 # rewriting sidedata.
1165 # rewriting sidedata.
1159 # * introduce a proper solution to reduce the number of filelog related files.
1166 # * introduce a proper solution to reduce the number of filelog related files.
1160 # * use caching for reading sidedata (similar to what we do for data).
1167 # * use caching for reading sidedata (similar to what we do for data).
1161 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1168 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1162 # * Improvement to consider
1169 # * Improvement to consider
1163 # - avoid compression header in chunk using the default compression?
1170 # - avoid compression header in chunk using the default compression?
1164 # - forbid "inline" compression mode entirely?
1171 # - forbid "inline" compression mode entirely?
1165 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1172 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1166 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1173 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1167 # - keep track of chain base or size (probably not that useful anymore)
1174 # - keep track of chain base or size (probably not that useful anymore)
1168 coreconfigitem(
1175 coreconfigitem(
1169 b'experimental',
1176 b'experimental',
1170 b'revlogv2',
1177 b'revlogv2',
1171 default=None,
1178 default=None,
1172 )
1179 )
1173 coreconfigitem(
1180 coreconfigitem(
1174 b'experimental',
1181 b'experimental',
1175 b'revisions.disambiguatewithin',
1182 b'revisions.disambiguatewithin',
1176 default=None,
1183 default=None,
1177 )
1184 )
1178 coreconfigitem(
1185 coreconfigitem(
1179 b'experimental',
1186 b'experimental',
1180 b'rust.index',
1187 b'rust.index',
1181 default=False,
1188 default=False,
1182 )
1189 )
1183 coreconfigitem(
1190 coreconfigitem(
1184 b'experimental',
1191 b'experimental',
1185 b'server.filesdata.recommended-batch-size',
1192 b'server.filesdata.recommended-batch-size',
1186 default=50000,
1193 default=50000,
1187 )
1194 )
1188 coreconfigitem(
1195 coreconfigitem(
1189 b'experimental',
1196 b'experimental',
1190 b'server.manifestdata.recommended-batch-size',
1197 b'server.manifestdata.recommended-batch-size',
1191 default=100000,
1198 default=100000,
1192 )
1199 )
1193 coreconfigitem(
1200 coreconfigitem(
1194 b'experimental',
1201 b'experimental',
1195 b'server.stream-narrow-clones',
1202 b'server.stream-narrow-clones',
1196 default=False,
1203 default=False,
1197 )
1204 )
1198 coreconfigitem(
1205 coreconfigitem(
1199 b'experimental',
1206 b'experimental',
1200 b'single-head-per-branch',
1207 b'single-head-per-branch',
1201 default=False,
1208 default=False,
1202 )
1209 )
1203 coreconfigitem(
1210 coreconfigitem(
1204 b'experimental',
1211 b'experimental',
1205 b'single-head-per-branch:account-closed-heads',
1212 b'single-head-per-branch:account-closed-heads',
1206 default=False,
1213 default=False,
1207 )
1214 )
1208 coreconfigitem(
1215 coreconfigitem(
1209 b'experimental',
1216 b'experimental',
1210 b'single-head-per-branch:public-changes-only',
1217 b'single-head-per-branch:public-changes-only',
1211 default=False,
1218 default=False,
1212 )
1219 )
1213 coreconfigitem(
1220 coreconfigitem(
1214 b'experimental',
1221 b'experimental',
1215 b'sparse-read',
1222 b'sparse-read',
1216 default=False,
1223 default=False,
1217 )
1224 )
1218 coreconfigitem(
1225 coreconfigitem(
1219 b'experimental',
1226 b'experimental',
1220 b'sparse-read.density-threshold',
1227 b'sparse-read.density-threshold',
1221 default=0.50,
1228 default=0.50,
1222 )
1229 )
1223 coreconfigitem(
1230 coreconfigitem(
1224 b'experimental',
1231 b'experimental',
1225 b'sparse-read.min-gap-size',
1232 b'sparse-read.min-gap-size',
1226 default=b'65K',
1233 default=b'65K',
1227 )
1234 )
1228 coreconfigitem(
1235 coreconfigitem(
1229 b'experimental',
1236 b'experimental',
1230 b'treemanifest',
1237 b'treemanifest',
1231 default=False,
1238 default=False,
1232 )
1239 )
1233 coreconfigitem(
1240 coreconfigitem(
1234 b'experimental',
1241 b'experimental',
1235 b'update.atomic-file',
1242 b'update.atomic-file',
1236 default=False,
1243 default=False,
1237 )
1244 )
1238 coreconfigitem(
1245 coreconfigitem(
1239 b'experimental',
1246 b'experimental',
1240 b'web.full-garbage-collection-rate',
1247 b'web.full-garbage-collection-rate',
1241 default=1, # still forcing a full collection on each request
1248 default=1, # still forcing a full collection on each request
1242 )
1249 )
1243 coreconfigitem(
1250 coreconfigitem(
1244 b'experimental',
1251 b'experimental',
1245 b'worker.wdir-get-thread-safe',
1252 b'worker.wdir-get-thread-safe',
1246 default=False,
1253 default=False,
1247 )
1254 )
1248 coreconfigitem(
1255 coreconfigitem(
1249 b'experimental',
1256 b'experimental',
1250 b'worker.repository-upgrade',
1257 b'worker.repository-upgrade',
1251 default=False,
1258 default=False,
1252 )
1259 )
1253 coreconfigitem(
1260 coreconfigitem(
1254 b'experimental',
1261 b'experimental',
1255 b'xdiff',
1262 b'xdiff',
1256 default=False,
1263 default=False,
1257 )
1264 )
1258 coreconfigitem(
1265 coreconfigitem(
1259 b'extensions',
1266 b'extensions',
1260 b'[^:]*',
1267 b'[^:]*',
1261 default=None,
1268 default=None,
1262 generic=True,
1269 generic=True,
1263 )
1270 )
1264 coreconfigitem(
1271 coreconfigitem(
1265 b'extensions',
1272 b'extensions',
1266 b'[^:]*:required',
1273 b'[^:]*:required',
1267 default=False,
1274 default=False,
1268 generic=True,
1275 generic=True,
1269 )
1276 )
1270 coreconfigitem(
1277 coreconfigitem(
1271 b'extdata',
1278 b'extdata',
1272 b'.*',
1279 b'.*',
1273 default=None,
1280 default=None,
1274 generic=True,
1281 generic=True,
1275 )
1282 )
1276 coreconfigitem(
1283 coreconfigitem(
1277 b'format',
1284 b'format',
1278 b'bookmarks-in-store',
1285 b'bookmarks-in-store',
1279 default=False,
1286 default=False,
1280 )
1287 )
1281 coreconfigitem(
1288 coreconfigitem(
1282 b'format',
1289 b'format',
1283 b'chunkcachesize',
1290 b'chunkcachesize',
1284 default=None,
1291 default=None,
1285 experimental=True,
1292 experimental=True,
1286 )
1293 )
1287 coreconfigitem(
1294 coreconfigitem(
1288 # Enable this dirstate format *when creating a new repository*.
1295 # Enable this dirstate format *when creating a new repository*.
1289 # Which format to use for existing repos is controlled by .hg/requires
1296 # Which format to use for existing repos is controlled by .hg/requires
1290 b'format',
1297 b'format',
1291 b'use-dirstate-v2',
1298 b'use-dirstate-v2',
1292 default=False,
1299 default=False,
1293 experimental=True,
1300 experimental=True,
1294 alias=[(b'format', b'exp-rc-dirstate-v2')],
1301 alias=[(b'format', b'exp-rc-dirstate-v2')],
1295 )
1302 )
1296 coreconfigitem(
1303 coreconfigitem(
1297 b'format',
1304 b'format',
1298 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1305 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1299 default=False,
1306 default=False,
1300 experimental=True,
1307 experimental=True,
1301 )
1308 )
1302 coreconfigitem(
1309 coreconfigitem(
1303 b'format',
1310 b'format',
1304 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1311 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1305 default=False,
1312 default=False,
1306 experimental=True,
1313 experimental=True,
1307 )
1314 )
1308 coreconfigitem(
1315 coreconfigitem(
1309 b'format',
1316 b'format',
1310 b'use-dirstate-tracked-hint',
1317 b'use-dirstate-tracked-hint',
1311 default=False,
1318 default=False,
1312 experimental=True,
1319 experimental=True,
1313 )
1320 )
1314 coreconfigitem(
1321 coreconfigitem(
1315 b'format',
1322 b'format',
1316 b'use-dirstate-tracked-hint.version',
1323 b'use-dirstate-tracked-hint.version',
1317 default=1,
1324 default=1,
1318 experimental=True,
1325 experimental=True,
1319 )
1326 )
1320 coreconfigitem(
1327 coreconfigitem(
1321 b'format',
1328 b'format',
1322 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1329 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1323 default=False,
1330 default=False,
1324 experimental=True,
1331 experimental=True,
1325 )
1332 )
1326 coreconfigitem(
1333 coreconfigitem(
1327 b'format',
1334 b'format',
1328 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1335 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1329 default=False,
1336 default=False,
1330 experimental=True,
1337 experimental=True,
1331 )
1338 )
1332 coreconfigitem(
1339 coreconfigitem(
1333 b'format',
1340 b'format',
1334 b'dotencode',
1341 b'dotencode',
1335 default=True,
1342 default=True,
1336 )
1343 )
1337 coreconfigitem(
1344 coreconfigitem(
1338 b'format',
1345 b'format',
1339 b'generaldelta',
1346 b'generaldelta',
1340 default=False,
1347 default=False,
1341 experimental=True,
1348 experimental=True,
1342 )
1349 )
1343 coreconfigitem(
1350 coreconfigitem(
1344 b'format',
1351 b'format',
1345 b'manifestcachesize',
1352 b'manifestcachesize',
1346 default=None,
1353 default=None,
1347 experimental=True,
1354 experimental=True,
1348 )
1355 )
1349 coreconfigitem(
1356 coreconfigitem(
1350 b'format',
1357 b'format',
1351 b'maxchainlen',
1358 b'maxchainlen',
1352 default=dynamicdefault,
1359 default=dynamicdefault,
1353 experimental=True,
1360 experimental=True,
1354 )
1361 )
1355 coreconfigitem(
1362 coreconfigitem(
1356 b'format',
1363 b'format',
1357 b'obsstore-version',
1364 b'obsstore-version',
1358 default=None,
1365 default=None,
1359 )
1366 )
1360 coreconfigitem(
1367 coreconfigitem(
1361 b'format',
1368 b'format',
1362 b'sparse-revlog',
1369 b'sparse-revlog',
1363 default=True,
1370 default=True,
1364 )
1371 )
1365 coreconfigitem(
1372 coreconfigitem(
1366 b'format',
1373 b'format',
1367 b'revlog-compression',
1374 b'revlog-compression',
1368 default=lambda: [b'zstd', b'zlib'],
1375 default=lambda: [b'zstd', b'zlib'],
1369 alias=[(b'experimental', b'format.compression')],
1376 alias=[(b'experimental', b'format.compression')],
1370 )
1377 )
1371 # Experimental TODOs:
1378 # Experimental TODOs:
1372 #
1379 #
1373 # * Same as for revlogv2 (but for the reduction of the number of files)
1380 # * Same as for revlogv2 (but for the reduction of the number of files)
1374 # * Actually computing the rank of changesets
1381 # * Actually computing the rank of changesets
1375 # * Improvement to investigate
1382 # * Improvement to investigate
1376 # - storing .hgtags fnode
1383 # - storing .hgtags fnode
1377 # - storing branch related identifier
1384 # - storing branch related identifier
1378
1385
1379 coreconfigitem(
1386 coreconfigitem(
1380 b'format',
1387 b'format',
1381 b'exp-use-changelog-v2',
1388 b'exp-use-changelog-v2',
1382 default=None,
1389 default=None,
1383 experimental=True,
1390 experimental=True,
1384 )
1391 )
1385 coreconfigitem(
1392 coreconfigitem(
1386 b'format',
1393 b'format',
1387 b'usefncache',
1394 b'usefncache',
1388 default=True,
1395 default=True,
1389 )
1396 )
1390 coreconfigitem(
1397 coreconfigitem(
1391 b'format',
1398 b'format',
1392 b'usegeneraldelta',
1399 b'usegeneraldelta',
1393 default=True,
1400 default=True,
1394 )
1401 )
1395 coreconfigitem(
1402 coreconfigitem(
1396 b'format',
1403 b'format',
1397 b'usestore',
1404 b'usestore',
1398 default=True,
1405 default=True,
1399 )
1406 )
1400
1407
1401
1408
1402 def _persistent_nodemap_default():
1409 def _persistent_nodemap_default():
1403 """compute `use-persistent-nodemap` default value
1410 """compute `use-persistent-nodemap` default value
1404
1411
1405 The feature is disabled unless a fast implementation is available.
1412 The feature is disabled unless a fast implementation is available.
1406 """
1413 """
1407 from . import policy
1414 from . import policy
1408
1415
1409 return policy.importrust('revlog') is not None
1416 return policy.importrust('revlog') is not None
1410
1417
1411
1418
1412 coreconfigitem(
1419 coreconfigitem(
1413 b'format',
1420 b'format',
1414 b'use-persistent-nodemap',
1421 b'use-persistent-nodemap',
1415 default=_persistent_nodemap_default,
1422 default=_persistent_nodemap_default,
1416 )
1423 )
1417 coreconfigitem(
1424 coreconfigitem(
1418 b'format',
1425 b'format',
1419 b'exp-use-copies-side-data-changeset',
1426 b'exp-use-copies-side-data-changeset',
1420 default=False,
1427 default=False,
1421 experimental=True,
1428 experimental=True,
1422 )
1429 )
1423 coreconfigitem(
1430 coreconfigitem(
1424 b'format',
1431 b'format',
1425 b'use-share-safe',
1432 b'use-share-safe',
1426 default=True,
1433 default=True,
1427 )
1434 )
1428 coreconfigitem(
1435 coreconfigitem(
1429 b'format',
1436 b'format',
1430 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1437 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1431 default=False,
1438 default=False,
1432 experimental=True,
1439 experimental=True,
1433 )
1440 )
1434 coreconfigitem(
1441 coreconfigitem(
1435 b'format',
1442 b'format',
1436 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1443 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1437 default=False,
1444 default=False,
1438 experimental=True,
1445 experimental=True,
1439 )
1446 )
1440
1447
1441 # Moving this on by default means we are confident about the scaling of phases.
1448 # Moving this on by default means we are confident about the scaling of phases.
1442 # This is not garanteed to be the case at the time this message is written.
1449 # This is not garanteed to be the case at the time this message is written.
1443 coreconfigitem(
1450 coreconfigitem(
1444 b'format',
1451 b'format',
1445 b'use-internal-phase',
1452 b'use-internal-phase',
1446 default=False,
1453 default=False,
1447 experimental=True,
1454 experimental=True,
1448 )
1455 )
1449 # The interaction between the archived phase and obsolescence markers needs to
1456 # The interaction between the archived phase and obsolescence markers needs to
1450 # be sorted out before wider usage of this are to be considered.
1457 # be sorted out before wider usage of this are to be considered.
1451 #
1458 #
1452 # At the time this message is written, behavior when archiving obsolete
1459 # At the time this message is written, behavior when archiving obsolete
1453 # changeset differ significantly from stripping. As part of stripping, we also
1460 # changeset differ significantly from stripping. As part of stripping, we also
1454 # remove the obsolescence marker associated to the stripped changesets,
1461 # remove the obsolescence marker associated to the stripped changesets,
1455 # revealing the precedecessors changesets when applicable. When archiving, we
1462 # revealing the precedecessors changesets when applicable. When archiving, we
1456 # don't touch the obsolescence markers, keeping everything hidden. This can
1463 # don't touch the obsolescence markers, keeping everything hidden. This can
1457 # result in quite confusing situation for people combining exchanging draft
1464 # result in quite confusing situation for people combining exchanging draft
1458 # with the archived phases. As some markers needed by others may be skipped
1465 # with the archived phases. As some markers needed by others may be skipped
1459 # during exchange.
1466 # during exchange.
1460 coreconfigitem(
1467 coreconfigitem(
1461 b'format',
1468 b'format',
1462 b'exp-archived-phase',
1469 b'exp-archived-phase',
1463 default=False,
1470 default=False,
1464 experimental=True,
1471 experimental=True,
1465 )
1472 )
1466 coreconfigitem(
1473 coreconfigitem(
1467 b'shelve',
1474 b'shelve',
1468 b'store',
1475 b'store',
1469 default=b'internal',
1476 default=b'internal',
1470 experimental=True,
1477 experimental=True,
1471 )
1478 )
1472 coreconfigitem(
1479 coreconfigitem(
1473 b'fsmonitor',
1480 b'fsmonitor',
1474 b'warn_when_unused',
1481 b'warn_when_unused',
1475 default=True,
1482 default=True,
1476 )
1483 )
1477 coreconfigitem(
1484 coreconfigitem(
1478 b'fsmonitor',
1485 b'fsmonitor',
1479 b'warn_update_file_count',
1486 b'warn_update_file_count',
1480 default=50000,
1487 default=50000,
1481 )
1488 )
1482 coreconfigitem(
1489 coreconfigitem(
1483 b'fsmonitor',
1490 b'fsmonitor',
1484 b'warn_update_file_count_rust',
1491 b'warn_update_file_count_rust',
1485 default=400000,
1492 default=400000,
1486 )
1493 )
1487 coreconfigitem(
1494 coreconfigitem(
1488 b'help',
1495 b'help',
1489 br'hidden-command\..*',
1496 br'hidden-command\..*',
1490 default=False,
1497 default=False,
1491 generic=True,
1498 generic=True,
1492 )
1499 )
1493 coreconfigitem(
1500 coreconfigitem(
1494 b'help',
1501 b'help',
1495 br'hidden-topic\..*',
1502 br'hidden-topic\..*',
1496 default=False,
1503 default=False,
1497 generic=True,
1504 generic=True,
1498 )
1505 )
1499 coreconfigitem(
1506 coreconfigitem(
1500 b'hooks',
1507 b'hooks',
1501 b'[^:]*',
1508 b'[^:]*',
1502 default=dynamicdefault,
1509 default=dynamicdefault,
1503 generic=True,
1510 generic=True,
1504 )
1511 )
1505 coreconfigitem(
1512 coreconfigitem(
1506 b'hooks',
1513 b'hooks',
1507 b'.*:run-with-plain',
1514 b'.*:run-with-plain',
1508 default=True,
1515 default=True,
1509 generic=True,
1516 generic=True,
1510 )
1517 )
1511 coreconfigitem(
1518 coreconfigitem(
1512 b'hgweb-paths',
1519 b'hgweb-paths',
1513 b'.*',
1520 b'.*',
1514 default=list,
1521 default=list,
1515 generic=True,
1522 generic=True,
1516 )
1523 )
1517 coreconfigitem(
1524 coreconfigitem(
1518 b'hostfingerprints',
1525 b'hostfingerprints',
1519 b'.*',
1526 b'.*',
1520 default=list,
1527 default=list,
1521 generic=True,
1528 generic=True,
1522 )
1529 )
1523 coreconfigitem(
1530 coreconfigitem(
1524 b'hostsecurity',
1531 b'hostsecurity',
1525 b'ciphers',
1532 b'ciphers',
1526 default=None,
1533 default=None,
1527 )
1534 )
1528 coreconfigitem(
1535 coreconfigitem(
1529 b'hostsecurity',
1536 b'hostsecurity',
1530 b'minimumprotocol',
1537 b'minimumprotocol',
1531 default=dynamicdefault,
1538 default=dynamicdefault,
1532 )
1539 )
1533 coreconfigitem(
1540 coreconfigitem(
1534 b'hostsecurity',
1541 b'hostsecurity',
1535 b'.*:minimumprotocol$',
1542 b'.*:minimumprotocol$',
1536 default=dynamicdefault,
1543 default=dynamicdefault,
1537 generic=True,
1544 generic=True,
1538 )
1545 )
1539 coreconfigitem(
1546 coreconfigitem(
1540 b'hostsecurity',
1547 b'hostsecurity',
1541 b'.*:ciphers$',
1548 b'.*:ciphers$',
1542 default=dynamicdefault,
1549 default=dynamicdefault,
1543 generic=True,
1550 generic=True,
1544 )
1551 )
1545 coreconfigitem(
1552 coreconfigitem(
1546 b'hostsecurity',
1553 b'hostsecurity',
1547 b'.*:fingerprints$',
1554 b'.*:fingerprints$',
1548 default=list,
1555 default=list,
1549 generic=True,
1556 generic=True,
1550 )
1557 )
1551 coreconfigitem(
1558 coreconfigitem(
1552 b'hostsecurity',
1559 b'hostsecurity',
1553 b'.*:verifycertsfile$',
1560 b'.*:verifycertsfile$',
1554 default=None,
1561 default=None,
1555 generic=True,
1562 generic=True,
1556 )
1563 )
1557
1564
1558 coreconfigitem(
1565 coreconfigitem(
1559 b'http_proxy',
1566 b'http_proxy',
1560 b'always',
1567 b'always',
1561 default=False,
1568 default=False,
1562 )
1569 )
1563 coreconfigitem(
1570 coreconfigitem(
1564 b'http_proxy',
1571 b'http_proxy',
1565 b'host',
1572 b'host',
1566 default=None,
1573 default=None,
1567 )
1574 )
1568 coreconfigitem(
1575 coreconfigitem(
1569 b'http_proxy',
1576 b'http_proxy',
1570 b'no',
1577 b'no',
1571 default=list,
1578 default=list,
1572 )
1579 )
1573 coreconfigitem(
1580 coreconfigitem(
1574 b'http_proxy',
1581 b'http_proxy',
1575 b'passwd',
1582 b'passwd',
1576 default=None,
1583 default=None,
1577 )
1584 )
1578 coreconfigitem(
1585 coreconfigitem(
1579 b'http_proxy',
1586 b'http_proxy',
1580 b'user',
1587 b'user',
1581 default=None,
1588 default=None,
1582 )
1589 )
1583
1590
1584 coreconfigitem(
1591 coreconfigitem(
1585 b'http',
1592 b'http',
1586 b'timeout',
1593 b'timeout',
1587 default=None,
1594 default=None,
1588 )
1595 )
1589
1596
1590 coreconfigitem(
1597 coreconfigitem(
1591 b'logtoprocess',
1598 b'logtoprocess',
1592 b'commandexception',
1599 b'commandexception',
1593 default=None,
1600 default=None,
1594 )
1601 )
1595 coreconfigitem(
1602 coreconfigitem(
1596 b'logtoprocess',
1603 b'logtoprocess',
1597 b'commandfinish',
1604 b'commandfinish',
1598 default=None,
1605 default=None,
1599 )
1606 )
1600 coreconfigitem(
1607 coreconfigitem(
1601 b'logtoprocess',
1608 b'logtoprocess',
1602 b'command',
1609 b'command',
1603 default=None,
1610 default=None,
1604 )
1611 )
1605 coreconfigitem(
1612 coreconfigitem(
1606 b'logtoprocess',
1613 b'logtoprocess',
1607 b'develwarn',
1614 b'develwarn',
1608 default=None,
1615 default=None,
1609 )
1616 )
1610 coreconfigitem(
1617 coreconfigitem(
1611 b'logtoprocess',
1618 b'logtoprocess',
1612 b'uiblocked',
1619 b'uiblocked',
1613 default=None,
1620 default=None,
1614 )
1621 )
1615 coreconfigitem(
1622 coreconfigitem(
1616 b'merge',
1623 b'merge',
1617 b'checkunknown',
1624 b'checkunknown',
1618 default=b'abort',
1625 default=b'abort',
1619 )
1626 )
1620 coreconfigitem(
1627 coreconfigitem(
1621 b'merge',
1628 b'merge',
1622 b'checkignored',
1629 b'checkignored',
1623 default=b'abort',
1630 default=b'abort',
1624 )
1631 )
1625 coreconfigitem(
1632 coreconfigitem(
1626 b'experimental',
1633 b'experimental',
1627 b'merge.checkpathconflicts',
1634 b'merge.checkpathconflicts',
1628 default=False,
1635 default=False,
1629 )
1636 )
1630 coreconfigitem(
1637 coreconfigitem(
1631 b'merge',
1638 b'merge',
1632 b'followcopies',
1639 b'followcopies',
1633 default=True,
1640 default=True,
1634 )
1641 )
1635 coreconfigitem(
1642 coreconfigitem(
1636 b'merge',
1643 b'merge',
1637 b'on-failure',
1644 b'on-failure',
1638 default=b'continue',
1645 default=b'continue',
1639 )
1646 )
1640 coreconfigitem(
1647 coreconfigitem(
1641 b'merge',
1648 b'merge',
1642 b'preferancestor',
1649 b'preferancestor',
1643 default=lambda: [b'*'],
1650 default=lambda: [b'*'],
1644 experimental=True,
1651 experimental=True,
1645 )
1652 )
1646 coreconfigitem(
1653 coreconfigitem(
1647 b'merge',
1654 b'merge',
1648 b'strict-capability-check',
1655 b'strict-capability-check',
1649 default=False,
1656 default=False,
1650 )
1657 )
1651 coreconfigitem(
1658 coreconfigitem(
1652 b'merge',
1659 b'merge',
1653 b'disable-partial-tools',
1660 b'disable-partial-tools',
1654 default=False,
1661 default=False,
1655 experimental=True,
1662 experimental=True,
1656 )
1663 )
1657 coreconfigitem(
1664 coreconfigitem(
1658 b'partial-merge-tools',
1665 b'partial-merge-tools',
1659 b'.*',
1666 b'.*',
1660 default=None,
1667 default=None,
1661 generic=True,
1668 generic=True,
1662 experimental=True,
1669 experimental=True,
1663 )
1670 )
1664 coreconfigitem(
1671 coreconfigitem(
1665 b'partial-merge-tools',
1672 b'partial-merge-tools',
1666 br'.*\.patterns',
1673 br'.*\.patterns',
1667 default=dynamicdefault,
1674 default=dynamicdefault,
1668 generic=True,
1675 generic=True,
1669 priority=-1,
1676 priority=-1,
1670 experimental=True,
1677 experimental=True,
1671 )
1678 )
1672 coreconfigitem(
1679 coreconfigitem(
1673 b'partial-merge-tools',
1680 b'partial-merge-tools',
1674 br'.*\.executable$',
1681 br'.*\.executable$',
1675 default=dynamicdefault,
1682 default=dynamicdefault,
1676 generic=True,
1683 generic=True,
1677 priority=-1,
1684 priority=-1,
1678 experimental=True,
1685 experimental=True,
1679 )
1686 )
1680 coreconfigitem(
1687 coreconfigitem(
1681 b'partial-merge-tools',
1688 b'partial-merge-tools',
1682 br'.*\.order',
1689 br'.*\.order',
1683 default=0,
1690 default=0,
1684 generic=True,
1691 generic=True,
1685 priority=-1,
1692 priority=-1,
1686 experimental=True,
1693 experimental=True,
1687 )
1694 )
1688 coreconfigitem(
1695 coreconfigitem(
1689 b'partial-merge-tools',
1696 b'partial-merge-tools',
1690 br'.*\.args',
1697 br'.*\.args',
1691 default=b"$local $base $other",
1698 default=b"$local $base $other",
1692 generic=True,
1699 generic=True,
1693 priority=-1,
1700 priority=-1,
1694 experimental=True,
1701 experimental=True,
1695 )
1702 )
1696 coreconfigitem(
1703 coreconfigitem(
1697 b'partial-merge-tools',
1704 b'partial-merge-tools',
1698 br'.*\.disable',
1705 br'.*\.disable',
1699 default=False,
1706 default=False,
1700 generic=True,
1707 generic=True,
1701 priority=-1,
1708 priority=-1,
1702 experimental=True,
1709 experimental=True,
1703 )
1710 )
1704 coreconfigitem(
1711 coreconfigitem(
1705 b'merge-tools',
1712 b'merge-tools',
1706 b'.*',
1713 b'.*',
1707 default=None,
1714 default=None,
1708 generic=True,
1715 generic=True,
1709 )
1716 )
1710 coreconfigitem(
1717 coreconfigitem(
1711 b'merge-tools',
1718 b'merge-tools',
1712 br'.*\.args$',
1719 br'.*\.args$',
1713 default=b"$local $base $other",
1720 default=b"$local $base $other",
1714 generic=True,
1721 generic=True,
1715 priority=-1,
1722 priority=-1,
1716 )
1723 )
1717 coreconfigitem(
1724 coreconfigitem(
1718 b'merge-tools',
1725 b'merge-tools',
1719 br'.*\.binary$',
1726 br'.*\.binary$',
1720 default=False,
1727 default=False,
1721 generic=True,
1728 generic=True,
1722 priority=-1,
1729 priority=-1,
1723 )
1730 )
1724 coreconfigitem(
1731 coreconfigitem(
1725 b'merge-tools',
1732 b'merge-tools',
1726 br'.*\.check$',
1733 br'.*\.check$',
1727 default=list,
1734 default=list,
1728 generic=True,
1735 generic=True,
1729 priority=-1,
1736 priority=-1,
1730 )
1737 )
1731 coreconfigitem(
1738 coreconfigitem(
1732 b'merge-tools',
1739 b'merge-tools',
1733 br'.*\.checkchanged$',
1740 br'.*\.checkchanged$',
1734 default=False,
1741 default=False,
1735 generic=True,
1742 generic=True,
1736 priority=-1,
1743 priority=-1,
1737 )
1744 )
1738 coreconfigitem(
1745 coreconfigitem(
1739 b'merge-tools',
1746 b'merge-tools',
1740 br'.*\.executable$',
1747 br'.*\.executable$',
1741 default=dynamicdefault,
1748 default=dynamicdefault,
1742 generic=True,
1749 generic=True,
1743 priority=-1,
1750 priority=-1,
1744 )
1751 )
1745 coreconfigitem(
1752 coreconfigitem(
1746 b'merge-tools',
1753 b'merge-tools',
1747 br'.*\.fixeol$',
1754 br'.*\.fixeol$',
1748 default=False,
1755 default=False,
1749 generic=True,
1756 generic=True,
1750 priority=-1,
1757 priority=-1,
1751 )
1758 )
1752 coreconfigitem(
1759 coreconfigitem(
1753 b'merge-tools',
1760 b'merge-tools',
1754 br'.*\.gui$',
1761 br'.*\.gui$',
1755 default=False,
1762 default=False,
1756 generic=True,
1763 generic=True,
1757 priority=-1,
1764 priority=-1,
1758 )
1765 )
1759 coreconfigitem(
1766 coreconfigitem(
1760 b'merge-tools',
1767 b'merge-tools',
1761 br'.*\.mergemarkers$',
1768 br'.*\.mergemarkers$',
1762 default=b'basic',
1769 default=b'basic',
1763 generic=True,
1770 generic=True,
1764 priority=-1,
1771 priority=-1,
1765 )
1772 )
1766 coreconfigitem(
1773 coreconfigitem(
1767 b'merge-tools',
1774 b'merge-tools',
1768 br'.*\.mergemarkertemplate$',
1775 br'.*\.mergemarkertemplate$',
1769 default=dynamicdefault, # take from command-templates.mergemarker
1776 default=dynamicdefault, # take from command-templates.mergemarker
1770 generic=True,
1777 generic=True,
1771 priority=-1,
1778 priority=-1,
1772 )
1779 )
1773 coreconfigitem(
1780 coreconfigitem(
1774 b'merge-tools',
1781 b'merge-tools',
1775 br'.*\.priority$',
1782 br'.*\.priority$',
1776 default=0,
1783 default=0,
1777 generic=True,
1784 generic=True,
1778 priority=-1,
1785 priority=-1,
1779 )
1786 )
1780 coreconfigitem(
1787 coreconfigitem(
1781 b'merge-tools',
1788 b'merge-tools',
1782 br'.*\.premerge$',
1789 br'.*\.premerge$',
1783 default=dynamicdefault,
1790 default=dynamicdefault,
1784 generic=True,
1791 generic=True,
1785 priority=-1,
1792 priority=-1,
1786 )
1793 )
1787 coreconfigitem(
1794 coreconfigitem(
1788 b'merge-tools',
1795 b'merge-tools',
1789 br'.*\.regappend$',
1796 br'.*\.regappend$',
1790 default=b"",
1797 default=b"",
1791 generic=True,
1798 generic=True,
1792 priority=-1,
1799 priority=-1,
1793 )
1800 )
1794 coreconfigitem(
1801 coreconfigitem(
1795 b'merge-tools',
1802 b'merge-tools',
1796 br'.*\.symlink$',
1803 br'.*\.symlink$',
1797 default=False,
1804 default=False,
1798 generic=True,
1805 generic=True,
1799 priority=-1,
1806 priority=-1,
1800 )
1807 )
1801 coreconfigitem(
1808 coreconfigitem(
1802 b'pager',
1809 b'pager',
1803 b'attend-.*',
1810 b'attend-.*',
1804 default=dynamicdefault,
1811 default=dynamicdefault,
1805 generic=True,
1812 generic=True,
1806 )
1813 )
1807 coreconfigitem(
1814 coreconfigitem(
1808 b'pager',
1815 b'pager',
1809 b'ignore',
1816 b'ignore',
1810 default=list,
1817 default=list,
1811 )
1818 )
1812 coreconfigitem(
1819 coreconfigitem(
1813 b'pager',
1820 b'pager',
1814 b'pager',
1821 b'pager',
1815 default=dynamicdefault,
1822 default=dynamicdefault,
1816 )
1823 )
1817 coreconfigitem(
1824 coreconfigitem(
1818 b'patch',
1825 b'patch',
1819 b'eol',
1826 b'eol',
1820 default=b'strict',
1827 default=b'strict',
1821 )
1828 )
1822 coreconfigitem(
1829 coreconfigitem(
1823 b'patch',
1830 b'patch',
1824 b'fuzz',
1831 b'fuzz',
1825 default=2,
1832 default=2,
1826 )
1833 )
1827 coreconfigitem(
1834 coreconfigitem(
1828 b'paths',
1835 b'paths',
1829 b'default',
1836 b'default',
1830 default=None,
1837 default=None,
1831 )
1838 )
1832 coreconfigitem(
1839 coreconfigitem(
1833 b'paths',
1840 b'paths',
1834 b'default-push',
1841 b'default-push',
1835 default=None,
1842 default=None,
1836 )
1843 )
1837 coreconfigitem(
1844 coreconfigitem(
1838 b'paths',
1845 b'paths',
1839 b'.*',
1846 b'.*',
1840 default=None,
1847 default=None,
1841 generic=True,
1848 generic=True,
1842 )
1849 )
1843 coreconfigitem(
1850 coreconfigitem(
1844 b'paths',
1851 b'paths',
1845 b'.*:bookmarks.mode',
1852 b'.*:bookmarks.mode',
1846 default='default',
1853 default='default',
1847 generic=True,
1854 generic=True,
1848 )
1855 )
1849 coreconfigitem(
1856 coreconfigitem(
1850 b'paths',
1857 b'paths',
1851 b'.*:multi-urls',
1858 b'.*:multi-urls',
1852 default=False,
1859 default=False,
1853 generic=True,
1860 generic=True,
1854 )
1861 )
1855 coreconfigitem(
1862 coreconfigitem(
1856 b'paths',
1863 b'paths',
1857 b'.*:pushrev',
1864 b'.*:pushrev',
1858 default=None,
1865 default=None,
1859 generic=True,
1866 generic=True,
1860 )
1867 )
1861 coreconfigitem(
1868 coreconfigitem(
1862 b'paths',
1869 b'paths',
1863 b'.*:pushurl',
1870 b'.*:pushurl',
1864 default=None,
1871 default=None,
1865 generic=True,
1872 generic=True,
1866 )
1873 )
1867 coreconfigitem(
1874 coreconfigitem(
1868 b'phases',
1875 b'phases',
1869 b'checksubrepos',
1876 b'checksubrepos',
1870 default=b'follow',
1877 default=b'follow',
1871 )
1878 )
1872 coreconfigitem(
1879 coreconfigitem(
1873 b'phases',
1880 b'phases',
1874 b'new-commit',
1881 b'new-commit',
1875 default=b'draft',
1882 default=b'draft',
1876 )
1883 )
1877 coreconfigitem(
1884 coreconfigitem(
1878 b'phases',
1885 b'phases',
1879 b'publish',
1886 b'publish',
1880 default=True,
1887 default=True,
1881 )
1888 )
1882 coreconfigitem(
1889 coreconfigitem(
1883 b'profiling',
1890 b'profiling',
1884 b'enabled',
1891 b'enabled',
1885 default=False,
1892 default=False,
1886 )
1893 )
1887 coreconfigitem(
1894 coreconfigitem(
1888 b'profiling',
1895 b'profiling',
1889 b'format',
1896 b'format',
1890 default=b'text',
1897 default=b'text',
1891 )
1898 )
1892 coreconfigitem(
1899 coreconfigitem(
1893 b'profiling',
1900 b'profiling',
1894 b'freq',
1901 b'freq',
1895 default=1000,
1902 default=1000,
1896 )
1903 )
1897 coreconfigitem(
1904 coreconfigitem(
1898 b'profiling',
1905 b'profiling',
1899 b'limit',
1906 b'limit',
1900 default=30,
1907 default=30,
1901 )
1908 )
1902 coreconfigitem(
1909 coreconfigitem(
1903 b'profiling',
1910 b'profiling',
1904 b'nested',
1911 b'nested',
1905 default=0,
1912 default=0,
1906 )
1913 )
1907 coreconfigitem(
1914 coreconfigitem(
1908 b'profiling',
1915 b'profiling',
1909 b'output',
1916 b'output',
1910 default=None,
1917 default=None,
1911 )
1918 )
1912 coreconfigitem(
1919 coreconfigitem(
1913 b'profiling',
1920 b'profiling',
1914 b'showmax',
1921 b'showmax',
1915 default=0.999,
1922 default=0.999,
1916 )
1923 )
1917 coreconfigitem(
1924 coreconfigitem(
1918 b'profiling',
1925 b'profiling',
1919 b'showmin',
1926 b'showmin',
1920 default=dynamicdefault,
1927 default=dynamicdefault,
1921 )
1928 )
1922 coreconfigitem(
1929 coreconfigitem(
1923 b'profiling',
1930 b'profiling',
1924 b'showtime',
1931 b'showtime',
1925 default=True,
1932 default=True,
1926 )
1933 )
1927 coreconfigitem(
1934 coreconfigitem(
1928 b'profiling',
1935 b'profiling',
1929 b'sort',
1936 b'sort',
1930 default=b'inlinetime',
1937 default=b'inlinetime',
1931 )
1938 )
1932 coreconfigitem(
1939 coreconfigitem(
1933 b'profiling',
1940 b'profiling',
1934 b'statformat',
1941 b'statformat',
1935 default=b'hotpath',
1942 default=b'hotpath',
1936 )
1943 )
1937 coreconfigitem(
1944 coreconfigitem(
1938 b'profiling',
1945 b'profiling',
1939 b'time-track',
1946 b'time-track',
1940 default=dynamicdefault,
1947 default=dynamicdefault,
1941 )
1948 )
1942 coreconfigitem(
1949 coreconfigitem(
1943 b'profiling',
1950 b'profiling',
1944 b'type',
1951 b'type',
1945 default=b'stat',
1952 default=b'stat',
1946 )
1953 )
1947 coreconfigitem(
1954 coreconfigitem(
1948 b'progress',
1955 b'progress',
1949 b'assume-tty',
1956 b'assume-tty',
1950 default=False,
1957 default=False,
1951 )
1958 )
1952 coreconfigitem(
1959 coreconfigitem(
1953 b'progress',
1960 b'progress',
1954 b'changedelay',
1961 b'changedelay',
1955 default=1,
1962 default=1,
1956 )
1963 )
1957 coreconfigitem(
1964 coreconfigitem(
1958 b'progress',
1965 b'progress',
1959 b'clear-complete',
1966 b'clear-complete',
1960 default=True,
1967 default=True,
1961 )
1968 )
1962 coreconfigitem(
1969 coreconfigitem(
1963 b'progress',
1970 b'progress',
1964 b'debug',
1971 b'debug',
1965 default=False,
1972 default=False,
1966 )
1973 )
1967 coreconfigitem(
1974 coreconfigitem(
1968 b'progress',
1975 b'progress',
1969 b'delay',
1976 b'delay',
1970 default=3,
1977 default=3,
1971 )
1978 )
1972 coreconfigitem(
1979 coreconfigitem(
1973 b'progress',
1980 b'progress',
1974 b'disable',
1981 b'disable',
1975 default=False,
1982 default=False,
1976 )
1983 )
1977 coreconfigitem(
1984 coreconfigitem(
1978 b'progress',
1985 b'progress',
1979 b'estimateinterval',
1986 b'estimateinterval',
1980 default=60.0,
1987 default=60.0,
1981 )
1988 )
1982 coreconfigitem(
1989 coreconfigitem(
1983 b'progress',
1990 b'progress',
1984 b'format',
1991 b'format',
1985 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1992 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1986 )
1993 )
1987 coreconfigitem(
1994 coreconfigitem(
1988 b'progress',
1995 b'progress',
1989 b'refresh',
1996 b'refresh',
1990 default=0.1,
1997 default=0.1,
1991 )
1998 )
1992 coreconfigitem(
1999 coreconfigitem(
1993 b'progress',
2000 b'progress',
1994 b'width',
2001 b'width',
1995 default=dynamicdefault,
2002 default=dynamicdefault,
1996 )
2003 )
1997 coreconfigitem(
2004 coreconfigitem(
1998 b'pull',
2005 b'pull',
1999 b'confirm',
2006 b'confirm',
2000 default=False,
2007 default=False,
2001 )
2008 )
2002 coreconfigitem(
2009 coreconfigitem(
2003 b'push',
2010 b'push',
2004 b'pushvars.server',
2011 b'pushvars.server',
2005 default=False,
2012 default=False,
2006 )
2013 )
2007 coreconfigitem(
2014 coreconfigitem(
2008 b'rewrite',
2015 b'rewrite',
2009 b'backup-bundle',
2016 b'backup-bundle',
2010 default=True,
2017 default=True,
2011 alias=[(b'ui', b'history-editing-backup')],
2018 alias=[(b'ui', b'history-editing-backup')],
2012 )
2019 )
2013 coreconfigitem(
2020 coreconfigitem(
2014 b'rewrite',
2021 b'rewrite',
2015 b'update-timestamp',
2022 b'update-timestamp',
2016 default=False,
2023 default=False,
2017 )
2024 )
2018 coreconfigitem(
2025 coreconfigitem(
2019 b'rewrite',
2026 b'rewrite',
2020 b'empty-successor',
2027 b'empty-successor',
2021 default=b'skip',
2028 default=b'skip',
2022 experimental=True,
2029 experimental=True,
2023 )
2030 )
2024 # experimental as long as format.use-dirstate-v2 is.
2031 # experimental as long as format.use-dirstate-v2 is.
2025 coreconfigitem(
2032 coreconfigitem(
2026 b'storage',
2033 b'storage',
2027 b'dirstate-v2.slow-path',
2034 b'dirstate-v2.slow-path',
2028 default=b"abort",
2035 default=b"abort",
2029 experimental=True,
2036 experimental=True,
2030 )
2037 )
2031 coreconfigitem(
2038 coreconfigitem(
2032 b'storage',
2039 b'storage',
2033 b'new-repo-backend',
2040 b'new-repo-backend',
2034 default=b'revlogv1',
2041 default=b'revlogv1',
2035 experimental=True,
2042 experimental=True,
2036 )
2043 )
2037 coreconfigitem(
2044 coreconfigitem(
2038 b'storage',
2045 b'storage',
2039 b'revlog.optimize-delta-parent-choice',
2046 b'revlog.optimize-delta-parent-choice',
2040 default=True,
2047 default=True,
2041 alias=[(b'format', b'aggressivemergedeltas')],
2048 alias=[(b'format', b'aggressivemergedeltas')],
2042 )
2049 )
2043 coreconfigitem(
2050 coreconfigitem(
2044 b'storage',
2051 b'storage',
2045 b'revlog.delta-parent-search.candidate-group-chunk-size',
2052 b'revlog.delta-parent-search.candidate-group-chunk-size',
2046 default=10,
2053 default=10,
2047 )
2054 )
2048 coreconfigitem(
2055 coreconfigitem(
2049 b'storage',
2056 b'storage',
2050 b'revlog.issue6528.fix-incoming',
2057 b'revlog.issue6528.fix-incoming',
2051 default=True,
2058 default=True,
2052 )
2059 )
2053 # experimental as long as rust is experimental (or a C version is implemented)
2060 # experimental as long as rust is experimental (or a C version is implemented)
2054 coreconfigitem(
2061 coreconfigitem(
2055 b'storage',
2062 b'storage',
2056 b'revlog.persistent-nodemap.mmap',
2063 b'revlog.persistent-nodemap.mmap',
2057 default=True,
2064 default=True,
2058 )
2065 )
2059 # experimental as long as format.use-persistent-nodemap is.
2066 # experimental as long as format.use-persistent-nodemap is.
2060 coreconfigitem(
2067 coreconfigitem(
2061 b'storage',
2068 b'storage',
2062 b'revlog.persistent-nodemap.slow-path',
2069 b'revlog.persistent-nodemap.slow-path',
2063 default=b"abort",
2070 default=b"abort",
2064 )
2071 )
2065
2072
2066 coreconfigitem(
2073 coreconfigitem(
2067 b'storage',
2074 b'storage',
2068 b'revlog.reuse-external-delta',
2075 b'revlog.reuse-external-delta',
2069 default=True,
2076 default=True,
2070 )
2077 )
2071 coreconfigitem(
2078 coreconfigitem(
2072 b'storage',
2079 b'storage',
2073 b'revlog.reuse-external-delta-parent',
2080 b'revlog.reuse-external-delta-parent',
2074 default=None,
2081 default=None,
2075 )
2082 )
2076 coreconfigitem(
2083 coreconfigitem(
2077 b'storage',
2084 b'storage',
2078 b'revlog.zlib.level',
2085 b'revlog.zlib.level',
2079 default=None,
2086 default=None,
2080 )
2087 )
2081 coreconfigitem(
2088 coreconfigitem(
2082 b'storage',
2089 b'storage',
2083 b'revlog.zstd.level',
2090 b'revlog.zstd.level',
2084 default=None,
2091 default=None,
2085 )
2092 )
2086 coreconfigitem(
2093 coreconfigitem(
2087 b'server',
2094 b'server',
2088 b'bookmarks-pushkey-compat',
2095 b'bookmarks-pushkey-compat',
2089 default=True,
2096 default=True,
2090 )
2097 )
2091 coreconfigitem(
2098 coreconfigitem(
2092 b'server',
2099 b'server',
2093 b'bundle1',
2100 b'bundle1',
2094 default=True,
2101 default=True,
2095 )
2102 )
2096 coreconfigitem(
2103 coreconfigitem(
2097 b'server',
2104 b'server',
2098 b'bundle1gd',
2105 b'bundle1gd',
2099 default=None,
2106 default=None,
2100 )
2107 )
2101 coreconfigitem(
2108 coreconfigitem(
2102 b'server',
2109 b'server',
2103 b'bundle1.pull',
2110 b'bundle1.pull',
2104 default=None,
2111 default=None,
2105 )
2112 )
2106 coreconfigitem(
2113 coreconfigitem(
2107 b'server',
2114 b'server',
2108 b'bundle1gd.pull',
2115 b'bundle1gd.pull',
2109 default=None,
2116 default=None,
2110 )
2117 )
2111 coreconfigitem(
2118 coreconfigitem(
2112 b'server',
2119 b'server',
2113 b'bundle1.push',
2120 b'bundle1.push',
2114 default=None,
2121 default=None,
2115 )
2122 )
2116 coreconfigitem(
2123 coreconfigitem(
2117 b'server',
2124 b'server',
2118 b'bundle1gd.push',
2125 b'bundle1gd.push',
2119 default=None,
2126 default=None,
2120 )
2127 )
2121 coreconfigitem(
2128 coreconfigitem(
2122 b'server',
2129 b'server',
2123 b'bundle2.stream',
2130 b'bundle2.stream',
2124 default=True,
2131 default=True,
2125 alias=[(b'experimental', b'bundle2.stream')],
2132 alias=[(b'experimental', b'bundle2.stream')],
2126 )
2133 )
2127 coreconfigitem(
2134 coreconfigitem(
2128 b'server',
2135 b'server',
2129 b'compressionengines',
2136 b'compressionengines',
2130 default=list,
2137 default=list,
2131 )
2138 )
2132 coreconfigitem(
2139 coreconfigitem(
2133 b'server',
2140 b'server',
2134 b'concurrent-push-mode',
2141 b'concurrent-push-mode',
2135 default=b'check-related',
2142 default=b'check-related',
2136 )
2143 )
2137 coreconfigitem(
2144 coreconfigitem(
2138 b'server',
2145 b'server',
2139 b'disablefullbundle',
2146 b'disablefullbundle',
2140 default=False,
2147 default=False,
2141 )
2148 )
2142 coreconfigitem(
2149 coreconfigitem(
2143 b'server',
2150 b'server',
2144 b'maxhttpheaderlen',
2151 b'maxhttpheaderlen',
2145 default=1024,
2152 default=1024,
2146 )
2153 )
2147 coreconfigitem(
2154 coreconfigitem(
2148 b'server',
2155 b'server',
2149 b'pullbundle',
2156 b'pullbundle',
2150 default=True,
2157 default=True,
2151 )
2158 )
2152 coreconfigitem(
2159 coreconfigitem(
2153 b'server',
2160 b'server',
2154 b'preferuncompressed',
2161 b'preferuncompressed',
2155 default=False,
2162 default=False,
2156 )
2163 )
2157 coreconfigitem(
2164 coreconfigitem(
2158 b'server',
2165 b'server',
2159 b'streamunbundle',
2166 b'streamunbundle',
2160 default=False,
2167 default=False,
2161 )
2168 )
2162 coreconfigitem(
2169 coreconfigitem(
2163 b'server',
2170 b'server',
2164 b'uncompressed',
2171 b'uncompressed',
2165 default=True,
2172 default=True,
2166 )
2173 )
2167 coreconfigitem(
2174 coreconfigitem(
2168 b'server',
2175 b'server',
2169 b'uncompressedallowsecret',
2176 b'uncompressedallowsecret',
2170 default=False,
2177 default=False,
2171 )
2178 )
2172 coreconfigitem(
2179 coreconfigitem(
2173 b'server',
2180 b'server',
2174 b'view',
2181 b'view',
2175 default=b'served',
2182 default=b'served',
2176 )
2183 )
2177 coreconfigitem(
2184 coreconfigitem(
2178 b'server',
2185 b'server',
2179 b'validate',
2186 b'validate',
2180 default=False,
2187 default=False,
2181 )
2188 )
2182 coreconfigitem(
2189 coreconfigitem(
2183 b'server',
2190 b'server',
2184 b'zliblevel',
2191 b'zliblevel',
2185 default=-1,
2192 default=-1,
2186 )
2193 )
2187 coreconfigitem(
2194 coreconfigitem(
2188 b'server',
2195 b'server',
2189 b'zstdlevel',
2196 b'zstdlevel',
2190 default=3,
2197 default=3,
2191 )
2198 )
2192 coreconfigitem(
2199 coreconfigitem(
2193 b'share',
2200 b'share',
2194 b'pool',
2201 b'pool',
2195 default=None,
2202 default=None,
2196 )
2203 )
2197 coreconfigitem(
2204 coreconfigitem(
2198 b'share',
2205 b'share',
2199 b'poolnaming',
2206 b'poolnaming',
2200 default=b'identity',
2207 default=b'identity',
2201 )
2208 )
2202 coreconfigitem(
2209 coreconfigitem(
2203 b'share',
2210 b'share',
2204 b'safe-mismatch.source-not-safe',
2211 b'safe-mismatch.source-not-safe',
2205 default=b'abort',
2212 default=b'abort',
2206 )
2213 )
2207 coreconfigitem(
2214 coreconfigitem(
2208 b'share',
2215 b'share',
2209 b'safe-mismatch.source-safe',
2216 b'safe-mismatch.source-safe',
2210 default=b'abort',
2217 default=b'abort',
2211 )
2218 )
2212 coreconfigitem(
2219 coreconfigitem(
2213 b'share',
2220 b'share',
2214 b'safe-mismatch.source-not-safe.warn',
2221 b'safe-mismatch.source-not-safe.warn',
2215 default=True,
2222 default=True,
2216 )
2223 )
2217 coreconfigitem(
2224 coreconfigitem(
2218 b'share',
2225 b'share',
2219 b'safe-mismatch.source-safe.warn',
2226 b'safe-mismatch.source-safe.warn',
2220 default=True,
2227 default=True,
2221 )
2228 )
2222 coreconfigitem(
2229 coreconfigitem(
2223 b'share',
2230 b'share',
2224 b'safe-mismatch.source-not-safe:verbose-upgrade',
2231 b'safe-mismatch.source-not-safe:verbose-upgrade',
2225 default=True,
2232 default=True,
2226 )
2233 )
2227 coreconfigitem(
2234 coreconfigitem(
2228 b'share',
2235 b'share',
2229 b'safe-mismatch.source-safe:verbose-upgrade',
2236 b'safe-mismatch.source-safe:verbose-upgrade',
2230 default=True,
2237 default=True,
2231 )
2238 )
2232 coreconfigitem(
2239 coreconfigitem(
2233 b'shelve',
2240 b'shelve',
2234 b'maxbackups',
2241 b'maxbackups',
2235 default=10,
2242 default=10,
2236 )
2243 )
2237 coreconfigitem(
2244 coreconfigitem(
2238 b'smtp',
2245 b'smtp',
2239 b'host',
2246 b'host',
2240 default=None,
2247 default=None,
2241 )
2248 )
2242 coreconfigitem(
2249 coreconfigitem(
2243 b'smtp',
2250 b'smtp',
2244 b'local_hostname',
2251 b'local_hostname',
2245 default=None,
2252 default=None,
2246 )
2253 )
2247 coreconfigitem(
2254 coreconfigitem(
2248 b'smtp',
2255 b'smtp',
2249 b'password',
2256 b'password',
2250 default=None,
2257 default=None,
2251 )
2258 )
2252 coreconfigitem(
2259 coreconfigitem(
2253 b'smtp',
2260 b'smtp',
2254 b'port',
2261 b'port',
2255 default=dynamicdefault,
2262 default=dynamicdefault,
2256 )
2263 )
2257 coreconfigitem(
2264 coreconfigitem(
2258 b'smtp',
2265 b'smtp',
2259 b'tls',
2266 b'tls',
2260 default=b'none',
2267 default=b'none',
2261 )
2268 )
2262 coreconfigitem(
2269 coreconfigitem(
2263 b'smtp',
2270 b'smtp',
2264 b'username',
2271 b'username',
2265 default=None,
2272 default=None,
2266 )
2273 )
2267 coreconfigitem(
2274 coreconfigitem(
2268 b'sparse',
2275 b'sparse',
2269 b'missingwarning',
2276 b'missingwarning',
2270 default=True,
2277 default=True,
2271 experimental=True,
2278 experimental=True,
2272 )
2279 )
2273 coreconfigitem(
2280 coreconfigitem(
2274 b'subrepos',
2281 b'subrepos',
2275 b'allowed',
2282 b'allowed',
2276 default=dynamicdefault, # to make backporting simpler
2283 default=dynamicdefault, # to make backporting simpler
2277 )
2284 )
2278 coreconfigitem(
2285 coreconfigitem(
2279 b'subrepos',
2286 b'subrepos',
2280 b'hg:allowed',
2287 b'hg:allowed',
2281 default=dynamicdefault,
2288 default=dynamicdefault,
2282 )
2289 )
2283 coreconfigitem(
2290 coreconfigitem(
2284 b'subrepos',
2291 b'subrepos',
2285 b'git:allowed',
2292 b'git:allowed',
2286 default=dynamicdefault,
2293 default=dynamicdefault,
2287 )
2294 )
2288 coreconfigitem(
2295 coreconfigitem(
2289 b'subrepos',
2296 b'subrepos',
2290 b'svn:allowed',
2297 b'svn:allowed',
2291 default=dynamicdefault,
2298 default=dynamicdefault,
2292 )
2299 )
2293 coreconfigitem(
2300 coreconfigitem(
2294 b'templates',
2301 b'templates',
2295 b'.*',
2302 b'.*',
2296 default=None,
2303 default=None,
2297 generic=True,
2304 generic=True,
2298 )
2305 )
2299 coreconfigitem(
2306 coreconfigitem(
2300 b'templateconfig',
2307 b'templateconfig',
2301 b'.*',
2308 b'.*',
2302 default=dynamicdefault,
2309 default=dynamicdefault,
2303 generic=True,
2310 generic=True,
2304 )
2311 )
2305 coreconfigitem(
2312 coreconfigitem(
2306 b'trusted',
2313 b'trusted',
2307 b'groups',
2314 b'groups',
2308 default=list,
2315 default=list,
2309 )
2316 )
2310 coreconfigitem(
2317 coreconfigitem(
2311 b'trusted',
2318 b'trusted',
2312 b'users',
2319 b'users',
2313 default=list,
2320 default=list,
2314 )
2321 )
2315 coreconfigitem(
2322 coreconfigitem(
2316 b'ui',
2323 b'ui',
2317 b'_usedassubrepo',
2324 b'_usedassubrepo',
2318 default=False,
2325 default=False,
2319 )
2326 )
2320 coreconfigitem(
2327 coreconfigitem(
2321 b'ui',
2328 b'ui',
2322 b'allowemptycommit',
2329 b'allowemptycommit',
2323 default=False,
2330 default=False,
2324 )
2331 )
2325 coreconfigitem(
2332 coreconfigitem(
2326 b'ui',
2333 b'ui',
2327 b'archivemeta',
2334 b'archivemeta',
2328 default=True,
2335 default=True,
2329 )
2336 )
2330 coreconfigitem(
2337 coreconfigitem(
2331 b'ui',
2338 b'ui',
2332 b'askusername',
2339 b'askusername',
2333 default=False,
2340 default=False,
2334 )
2341 )
2335 coreconfigitem(
2342 coreconfigitem(
2336 b'ui',
2343 b'ui',
2337 b'available-memory',
2344 b'available-memory',
2338 default=None,
2345 default=None,
2339 )
2346 )
2340
2347
2341 coreconfigitem(
2348 coreconfigitem(
2342 b'ui',
2349 b'ui',
2343 b'clonebundlefallback',
2350 b'clonebundlefallback',
2344 default=False,
2351 default=False,
2345 )
2352 )
2346 coreconfigitem(
2353 coreconfigitem(
2347 b'ui',
2354 b'ui',
2348 b'clonebundleprefers',
2355 b'clonebundleprefers',
2349 default=list,
2356 default=list,
2350 )
2357 )
2351 coreconfigitem(
2358 coreconfigitem(
2352 b'ui',
2359 b'ui',
2353 b'clonebundles',
2360 b'clonebundles',
2354 default=True,
2361 default=True,
2355 )
2362 )
2356 coreconfigitem(
2363 coreconfigitem(
2357 b'ui',
2364 b'ui',
2358 b'color',
2365 b'color',
2359 default=b'auto',
2366 default=b'auto',
2360 )
2367 )
2361 coreconfigitem(
2368 coreconfigitem(
2362 b'ui',
2369 b'ui',
2363 b'commitsubrepos',
2370 b'commitsubrepos',
2364 default=False,
2371 default=False,
2365 )
2372 )
2366 coreconfigitem(
2373 coreconfigitem(
2367 b'ui',
2374 b'ui',
2368 b'debug',
2375 b'debug',
2369 default=False,
2376 default=False,
2370 )
2377 )
2371 coreconfigitem(
2378 coreconfigitem(
2372 b'ui',
2379 b'ui',
2373 b'debugger',
2380 b'debugger',
2374 default=None,
2381 default=None,
2375 )
2382 )
2376 coreconfigitem(
2383 coreconfigitem(
2377 b'ui',
2384 b'ui',
2378 b'editor',
2385 b'editor',
2379 default=dynamicdefault,
2386 default=dynamicdefault,
2380 )
2387 )
2381 coreconfigitem(
2388 coreconfigitem(
2382 b'ui',
2389 b'ui',
2383 b'detailed-exit-code',
2390 b'detailed-exit-code',
2384 default=False,
2391 default=False,
2385 experimental=True,
2392 experimental=True,
2386 )
2393 )
2387 coreconfigitem(
2394 coreconfigitem(
2388 b'ui',
2395 b'ui',
2389 b'fallbackencoding',
2396 b'fallbackencoding',
2390 default=None,
2397 default=None,
2391 )
2398 )
2392 coreconfigitem(
2399 coreconfigitem(
2393 b'ui',
2400 b'ui',
2394 b'forcecwd',
2401 b'forcecwd',
2395 default=None,
2402 default=None,
2396 )
2403 )
2397 coreconfigitem(
2404 coreconfigitem(
2398 b'ui',
2405 b'ui',
2399 b'forcemerge',
2406 b'forcemerge',
2400 default=None,
2407 default=None,
2401 )
2408 )
2402 coreconfigitem(
2409 coreconfigitem(
2403 b'ui',
2410 b'ui',
2404 b'formatdebug',
2411 b'formatdebug',
2405 default=False,
2412 default=False,
2406 )
2413 )
2407 coreconfigitem(
2414 coreconfigitem(
2408 b'ui',
2415 b'ui',
2409 b'formatjson',
2416 b'formatjson',
2410 default=False,
2417 default=False,
2411 )
2418 )
2412 coreconfigitem(
2419 coreconfigitem(
2413 b'ui',
2420 b'ui',
2414 b'formatted',
2421 b'formatted',
2415 default=None,
2422 default=None,
2416 )
2423 )
2417 coreconfigitem(
2424 coreconfigitem(
2418 b'ui',
2425 b'ui',
2419 b'interactive',
2426 b'interactive',
2420 default=None,
2427 default=None,
2421 )
2428 )
2422 coreconfigitem(
2429 coreconfigitem(
2423 b'ui',
2430 b'ui',
2424 b'interface',
2431 b'interface',
2425 default=None,
2432 default=None,
2426 )
2433 )
2427 coreconfigitem(
2434 coreconfigitem(
2428 b'ui',
2435 b'ui',
2429 b'interface.chunkselector',
2436 b'interface.chunkselector',
2430 default=None,
2437 default=None,
2431 )
2438 )
2432 coreconfigitem(
2439 coreconfigitem(
2433 b'ui',
2440 b'ui',
2434 b'large-file-limit',
2441 b'large-file-limit',
2435 default=10 * (2 ** 20),
2442 default=10 * (2 ** 20),
2436 )
2443 )
2437 coreconfigitem(
2444 coreconfigitem(
2438 b'ui',
2445 b'ui',
2439 b'logblockedtimes',
2446 b'logblockedtimes',
2440 default=False,
2447 default=False,
2441 )
2448 )
2442 coreconfigitem(
2449 coreconfigitem(
2443 b'ui',
2450 b'ui',
2444 b'merge',
2451 b'merge',
2445 default=None,
2452 default=None,
2446 )
2453 )
2447 coreconfigitem(
2454 coreconfigitem(
2448 b'ui',
2455 b'ui',
2449 b'mergemarkers',
2456 b'mergemarkers',
2450 default=b'basic',
2457 default=b'basic',
2451 )
2458 )
2452 coreconfigitem(
2459 coreconfigitem(
2453 b'ui',
2460 b'ui',
2454 b'message-output',
2461 b'message-output',
2455 default=b'stdio',
2462 default=b'stdio',
2456 )
2463 )
2457 coreconfigitem(
2464 coreconfigitem(
2458 b'ui',
2465 b'ui',
2459 b'nontty',
2466 b'nontty',
2460 default=False,
2467 default=False,
2461 )
2468 )
2462 coreconfigitem(
2469 coreconfigitem(
2463 b'ui',
2470 b'ui',
2464 b'origbackuppath',
2471 b'origbackuppath',
2465 default=None,
2472 default=None,
2466 )
2473 )
2467 coreconfigitem(
2474 coreconfigitem(
2468 b'ui',
2475 b'ui',
2469 b'paginate',
2476 b'paginate',
2470 default=True,
2477 default=True,
2471 )
2478 )
2472 coreconfigitem(
2479 coreconfigitem(
2473 b'ui',
2480 b'ui',
2474 b'patch',
2481 b'patch',
2475 default=None,
2482 default=None,
2476 )
2483 )
2477 coreconfigitem(
2484 coreconfigitem(
2478 b'ui',
2485 b'ui',
2479 b'portablefilenames',
2486 b'portablefilenames',
2480 default=b'warn',
2487 default=b'warn',
2481 )
2488 )
2482 coreconfigitem(
2489 coreconfigitem(
2483 b'ui',
2490 b'ui',
2484 b'promptecho',
2491 b'promptecho',
2485 default=False,
2492 default=False,
2486 )
2493 )
2487 coreconfigitem(
2494 coreconfigitem(
2488 b'ui',
2495 b'ui',
2489 b'quiet',
2496 b'quiet',
2490 default=False,
2497 default=False,
2491 )
2498 )
2492 coreconfigitem(
2499 coreconfigitem(
2493 b'ui',
2500 b'ui',
2494 b'quietbookmarkmove',
2501 b'quietbookmarkmove',
2495 default=False,
2502 default=False,
2496 )
2503 )
2497 coreconfigitem(
2504 coreconfigitem(
2498 b'ui',
2505 b'ui',
2499 b'relative-paths',
2506 b'relative-paths',
2500 default=b'legacy',
2507 default=b'legacy',
2501 )
2508 )
2502 coreconfigitem(
2509 coreconfigitem(
2503 b'ui',
2510 b'ui',
2504 b'remotecmd',
2511 b'remotecmd',
2505 default=b'hg',
2512 default=b'hg',
2506 )
2513 )
2507 coreconfigitem(
2514 coreconfigitem(
2508 b'ui',
2515 b'ui',
2509 b'report_untrusted',
2516 b'report_untrusted',
2510 default=True,
2517 default=True,
2511 )
2518 )
2512 coreconfigitem(
2519 coreconfigitem(
2513 b'ui',
2520 b'ui',
2514 b'rollback',
2521 b'rollback',
2515 default=True,
2522 default=True,
2516 )
2523 )
2517 coreconfigitem(
2524 coreconfigitem(
2518 b'ui',
2525 b'ui',
2519 b'signal-safe-lock',
2526 b'signal-safe-lock',
2520 default=True,
2527 default=True,
2521 )
2528 )
2522 coreconfigitem(
2529 coreconfigitem(
2523 b'ui',
2530 b'ui',
2524 b'slash',
2531 b'slash',
2525 default=False,
2532 default=False,
2526 )
2533 )
2527 coreconfigitem(
2534 coreconfigitem(
2528 b'ui',
2535 b'ui',
2529 b'ssh',
2536 b'ssh',
2530 default=b'ssh',
2537 default=b'ssh',
2531 )
2538 )
2532 coreconfigitem(
2539 coreconfigitem(
2533 b'ui',
2540 b'ui',
2534 b'ssherrorhint',
2541 b'ssherrorhint',
2535 default=None,
2542 default=None,
2536 )
2543 )
2537 coreconfigitem(
2544 coreconfigitem(
2538 b'ui',
2545 b'ui',
2539 b'statuscopies',
2546 b'statuscopies',
2540 default=False,
2547 default=False,
2541 )
2548 )
2542 coreconfigitem(
2549 coreconfigitem(
2543 b'ui',
2550 b'ui',
2544 b'strict',
2551 b'strict',
2545 default=False,
2552 default=False,
2546 )
2553 )
2547 coreconfigitem(
2554 coreconfigitem(
2548 b'ui',
2555 b'ui',
2549 b'style',
2556 b'style',
2550 default=b'',
2557 default=b'',
2551 )
2558 )
2552 coreconfigitem(
2559 coreconfigitem(
2553 b'ui',
2560 b'ui',
2554 b'supportcontact',
2561 b'supportcontact',
2555 default=None,
2562 default=None,
2556 )
2563 )
2557 coreconfigitem(
2564 coreconfigitem(
2558 b'ui',
2565 b'ui',
2559 b'textwidth',
2566 b'textwidth',
2560 default=78,
2567 default=78,
2561 )
2568 )
2562 coreconfigitem(
2569 coreconfigitem(
2563 b'ui',
2570 b'ui',
2564 b'timeout',
2571 b'timeout',
2565 default=b'600',
2572 default=b'600',
2566 )
2573 )
2567 coreconfigitem(
2574 coreconfigitem(
2568 b'ui',
2575 b'ui',
2569 b'timeout.warn',
2576 b'timeout.warn',
2570 default=0,
2577 default=0,
2571 )
2578 )
2572 coreconfigitem(
2579 coreconfigitem(
2573 b'ui',
2580 b'ui',
2574 b'timestamp-output',
2581 b'timestamp-output',
2575 default=False,
2582 default=False,
2576 )
2583 )
2577 coreconfigitem(
2584 coreconfigitem(
2578 b'ui',
2585 b'ui',
2579 b'traceback',
2586 b'traceback',
2580 default=False,
2587 default=False,
2581 )
2588 )
2582 coreconfigitem(
2589 coreconfigitem(
2583 b'ui',
2590 b'ui',
2584 b'tweakdefaults',
2591 b'tweakdefaults',
2585 default=False,
2592 default=False,
2586 )
2593 )
2587 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2594 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2588 coreconfigitem(
2595 coreconfigitem(
2589 b'ui',
2596 b'ui',
2590 b'verbose',
2597 b'verbose',
2591 default=False,
2598 default=False,
2592 )
2599 )
2593 coreconfigitem(
2600 coreconfigitem(
2594 b'verify',
2601 b'verify',
2595 b'skipflags',
2602 b'skipflags',
2596 default=0,
2603 default=0,
2597 )
2604 )
2598 coreconfigitem(
2605 coreconfigitem(
2599 b'web',
2606 b'web',
2600 b'allowbz2',
2607 b'allowbz2',
2601 default=False,
2608 default=False,
2602 )
2609 )
2603 coreconfigitem(
2610 coreconfigitem(
2604 b'web',
2611 b'web',
2605 b'allowgz',
2612 b'allowgz',
2606 default=False,
2613 default=False,
2607 )
2614 )
2608 coreconfigitem(
2615 coreconfigitem(
2609 b'web',
2616 b'web',
2610 b'allow-pull',
2617 b'allow-pull',
2611 alias=[(b'web', b'allowpull')],
2618 alias=[(b'web', b'allowpull')],
2612 default=True,
2619 default=True,
2613 )
2620 )
2614 coreconfigitem(
2621 coreconfigitem(
2615 b'web',
2622 b'web',
2616 b'allow-push',
2623 b'allow-push',
2617 alias=[(b'web', b'allow_push')],
2624 alias=[(b'web', b'allow_push')],
2618 default=list,
2625 default=list,
2619 )
2626 )
2620 coreconfigitem(
2627 coreconfigitem(
2621 b'web',
2628 b'web',
2622 b'allowzip',
2629 b'allowzip',
2623 default=False,
2630 default=False,
2624 )
2631 )
2625 coreconfigitem(
2632 coreconfigitem(
2626 b'web',
2633 b'web',
2627 b'archivesubrepos',
2634 b'archivesubrepos',
2628 default=False,
2635 default=False,
2629 )
2636 )
2630 coreconfigitem(
2637 coreconfigitem(
2631 b'web',
2638 b'web',
2632 b'cache',
2639 b'cache',
2633 default=True,
2640 default=True,
2634 )
2641 )
2635 coreconfigitem(
2642 coreconfigitem(
2636 b'web',
2643 b'web',
2637 b'comparisoncontext',
2644 b'comparisoncontext',
2638 default=5,
2645 default=5,
2639 )
2646 )
2640 coreconfigitem(
2647 coreconfigitem(
2641 b'web',
2648 b'web',
2642 b'contact',
2649 b'contact',
2643 default=None,
2650 default=None,
2644 )
2651 )
2645 coreconfigitem(
2652 coreconfigitem(
2646 b'web',
2653 b'web',
2647 b'deny_push',
2654 b'deny_push',
2648 default=list,
2655 default=list,
2649 )
2656 )
2650 coreconfigitem(
2657 coreconfigitem(
2651 b'web',
2658 b'web',
2652 b'guessmime',
2659 b'guessmime',
2653 default=False,
2660 default=False,
2654 )
2661 )
2655 coreconfigitem(
2662 coreconfigitem(
2656 b'web',
2663 b'web',
2657 b'hidden',
2664 b'hidden',
2658 default=False,
2665 default=False,
2659 )
2666 )
2660 coreconfigitem(
2667 coreconfigitem(
2661 b'web',
2668 b'web',
2662 b'labels',
2669 b'labels',
2663 default=list,
2670 default=list,
2664 )
2671 )
2665 coreconfigitem(
2672 coreconfigitem(
2666 b'web',
2673 b'web',
2667 b'logoimg',
2674 b'logoimg',
2668 default=b'hglogo.png',
2675 default=b'hglogo.png',
2669 )
2676 )
2670 coreconfigitem(
2677 coreconfigitem(
2671 b'web',
2678 b'web',
2672 b'logourl',
2679 b'logourl',
2673 default=b'https://mercurial-scm.org/',
2680 default=b'https://mercurial-scm.org/',
2674 )
2681 )
2675 coreconfigitem(
2682 coreconfigitem(
2676 b'web',
2683 b'web',
2677 b'accesslog',
2684 b'accesslog',
2678 default=b'-',
2685 default=b'-',
2679 )
2686 )
2680 coreconfigitem(
2687 coreconfigitem(
2681 b'web',
2688 b'web',
2682 b'address',
2689 b'address',
2683 default=b'',
2690 default=b'',
2684 )
2691 )
2685 coreconfigitem(
2692 coreconfigitem(
2686 b'web',
2693 b'web',
2687 b'allow-archive',
2694 b'allow-archive',
2688 alias=[(b'web', b'allow_archive')],
2695 alias=[(b'web', b'allow_archive')],
2689 default=list,
2696 default=list,
2690 )
2697 )
2691 coreconfigitem(
2698 coreconfigitem(
2692 b'web',
2699 b'web',
2693 b'allow_read',
2700 b'allow_read',
2694 default=list,
2701 default=list,
2695 )
2702 )
2696 coreconfigitem(
2703 coreconfigitem(
2697 b'web',
2704 b'web',
2698 b'baseurl',
2705 b'baseurl',
2699 default=None,
2706 default=None,
2700 )
2707 )
2701 coreconfigitem(
2708 coreconfigitem(
2702 b'web',
2709 b'web',
2703 b'cacerts',
2710 b'cacerts',
2704 default=None,
2711 default=None,
2705 )
2712 )
2706 coreconfigitem(
2713 coreconfigitem(
2707 b'web',
2714 b'web',
2708 b'certificate',
2715 b'certificate',
2709 default=None,
2716 default=None,
2710 )
2717 )
2711 coreconfigitem(
2718 coreconfigitem(
2712 b'web',
2719 b'web',
2713 b'collapse',
2720 b'collapse',
2714 default=False,
2721 default=False,
2715 )
2722 )
2716 coreconfigitem(
2723 coreconfigitem(
2717 b'web',
2724 b'web',
2718 b'csp',
2725 b'csp',
2719 default=None,
2726 default=None,
2720 )
2727 )
2721 coreconfigitem(
2728 coreconfigitem(
2722 b'web',
2729 b'web',
2723 b'deny_read',
2730 b'deny_read',
2724 default=list,
2731 default=list,
2725 )
2732 )
2726 coreconfigitem(
2733 coreconfigitem(
2727 b'web',
2734 b'web',
2728 b'descend',
2735 b'descend',
2729 default=True,
2736 default=True,
2730 )
2737 )
2731 coreconfigitem(
2738 coreconfigitem(
2732 b'web',
2739 b'web',
2733 b'description',
2740 b'description',
2734 default=b"",
2741 default=b"",
2735 )
2742 )
2736 coreconfigitem(
2743 coreconfigitem(
2737 b'web',
2744 b'web',
2738 b'encoding',
2745 b'encoding',
2739 default=lambda: encoding.encoding,
2746 default=lambda: encoding.encoding,
2740 )
2747 )
2741 coreconfigitem(
2748 coreconfigitem(
2742 b'web',
2749 b'web',
2743 b'errorlog',
2750 b'errorlog',
2744 default=b'-',
2751 default=b'-',
2745 )
2752 )
2746 coreconfigitem(
2753 coreconfigitem(
2747 b'web',
2754 b'web',
2748 b'ipv6',
2755 b'ipv6',
2749 default=False,
2756 default=False,
2750 )
2757 )
2751 coreconfigitem(
2758 coreconfigitem(
2752 b'web',
2759 b'web',
2753 b'maxchanges',
2760 b'maxchanges',
2754 default=10,
2761 default=10,
2755 )
2762 )
2756 coreconfigitem(
2763 coreconfigitem(
2757 b'web',
2764 b'web',
2758 b'maxfiles',
2765 b'maxfiles',
2759 default=10,
2766 default=10,
2760 )
2767 )
2761 coreconfigitem(
2768 coreconfigitem(
2762 b'web',
2769 b'web',
2763 b'maxshortchanges',
2770 b'maxshortchanges',
2764 default=60,
2771 default=60,
2765 )
2772 )
2766 coreconfigitem(
2773 coreconfigitem(
2767 b'web',
2774 b'web',
2768 b'motd',
2775 b'motd',
2769 default=b'',
2776 default=b'',
2770 )
2777 )
2771 coreconfigitem(
2778 coreconfigitem(
2772 b'web',
2779 b'web',
2773 b'name',
2780 b'name',
2774 default=dynamicdefault,
2781 default=dynamicdefault,
2775 )
2782 )
2776 coreconfigitem(
2783 coreconfigitem(
2777 b'web',
2784 b'web',
2778 b'port',
2785 b'port',
2779 default=8000,
2786 default=8000,
2780 )
2787 )
2781 coreconfigitem(
2788 coreconfigitem(
2782 b'web',
2789 b'web',
2783 b'prefix',
2790 b'prefix',
2784 default=b'',
2791 default=b'',
2785 )
2792 )
2786 coreconfigitem(
2793 coreconfigitem(
2787 b'web',
2794 b'web',
2788 b'push_ssl',
2795 b'push_ssl',
2789 default=True,
2796 default=True,
2790 )
2797 )
2791 coreconfigitem(
2798 coreconfigitem(
2792 b'web',
2799 b'web',
2793 b'refreshinterval',
2800 b'refreshinterval',
2794 default=20,
2801 default=20,
2795 )
2802 )
2796 coreconfigitem(
2803 coreconfigitem(
2797 b'web',
2804 b'web',
2798 b'server-header',
2805 b'server-header',
2799 default=None,
2806 default=None,
2800 )
2807 )
2801 coreconfigitem(
2808 coreconfigitem(
2802 b'web',
2809 b'web',
2803 b'static',
2810 b'static',
2804 default=None,
2811 default=None,
2805 )
2812 )
2806 coreconfigitem(
2813 coreconfigitem(
2807 b'web',
2814 b'web',
2808 b'staticurl',
2815 b'staticurl',
2809 default=None,
2816 default=None,
2810 )
2817 )
2811 coreconfigitem(
2818 coreconfigitem(
2812 b'web',
2819 b'web',
2813 b'stripes',
2820 b'stripes',
2814 default=1,
2821 default=1,
2815 )
2822 )
2816 coreconfigitem(
2823 coreconfigitem(
2817 b'web',
2824 b'web',
2818 b'style',
2825 b'style',
2819 default=b'paper',
2826 default=b'paper',
2820 )
2827 )
2821 coreconfigitem(
2828 coreconfigitem(
2822 b'web',
2829 b'web',
2823 b'templates',
2830 b'templates',
2824 default=None,
2831 default=None,
2825 )
2832 )
2826 coreconfigitem(
2833 coreconfigitem(
2827 b'web',
2834 b'web',
2828 b'view',
2835 b'view',
2829 default=b'served',
2836 default=b'served',
2830 experimental=True,
2837 experimental=True,
2831 )
2838 )
2832 coreconfigitem(
2839 coreconfigitem(
2833 b'worker',
2840 b'worker',
2834 b'backgroundclose',
2841 b'backgroundclose',
2835 default=dynamicdefault,
2842 default=dynamicdefault,
2836 )
2843 )
2837 # Windows defaults to a limit of 512 open files. A buffer of 128
2844 # Windows defaults to a limit of 512 open files. A buffer of 128
2838 # should give us enough headway.
2845 # should give us enough headway.
2839 coreconfigitem(
2846 coreconfigitem(
2840 b'worker',
2847 b'worker',
2841 b'backgroundclosemaxqueue',
2848 b'backgroundclosemaxqueue',
2842 default=384,
2849 default=384,
2843 )
2850 )
2844 coreconfigitem(
2851 coreconfigitem(
2845 b'worker',
2852 b'worker',
2846 b'backgroundcloseminfilecount',
2853 b'backgroundcloseminfilecount',
2847 default=2048,
2854 default=2048,
2848 )
2855 )
2849 coreconfigitem(
2856 coreconfigitem(
2850 b'worker',
2857 b'worker',
2851 b'backgroundclosethreadcount',
2858 b'backgroundclosethreadcount',
2852 default=4,
2859 default=4,
2853 )
2860 )
2854 coreconfigitem(
2861 coreconfigitem(
2855 b'worker',
2862 b'worker',
2856 b'enabled',
2863 b'enabled',
2857 default=True,
2864 default=True,
2858 )
2865 )
2859 coreconfigitem(
2866 coreconfigitem(
2860 b'worker',
2867 b'worker',
2861 b'numcpus',
2868 b'numcpus',
2862 default=None,
2869 default=None,
2863 )
2870 )
2864
2871
2865 # Rebase related configuration moved to core because other extension are doing
2872 # Rebase related configuration moved to core because other extension are doing
2866 # strange things. For example, shelve import the extensions to reuse some bit
2873 # strange things. For example, shelve import the extensions to reuse some bit
2867 # without formally loading it.
2874 # without formally loading it.
2868 coreconfigitem(
2875 coreconfigitem(
2869 b'commands',
2876 b'commands',
2870 b'rebase.requiredest',
2877 b'rebase.requiredest',
2871 default=False,
2878 default=False,
2872 )
2879 )
2873 coreconfigitem(
2880 coreconfigitem(
2874 b'experimental',
2881 b'experimental',
2875 b'rebaseskipobsolete',
2882 b'rebaseskipobsolete',
2876 default=True,
2883 default=True,
2877 )
2884 )
2878 coreconfigitem(
2885 coreconfigitem(
2879 b'rebase',
2886 b'rebase',
2880 b'singletransaction',
2887 b'singletransaction',
2881 default=False,
2888 default=False,
2882 )
2889 )
2883 coreconfigitem(
2890 coreconfigitem(
2884 b'rebase',
2891 b'rebase',
2885 b'experimental.inmemory',
2892 b'experimental.inmemory',
2886 default=False,
2893 default=False,
2887 )
2894 )
2888
2895
2889 # This setting controls creation of a rebase_source extra field
2896 # This setting controls creation of a rebase_source extra field
2890 # during rebase. When False, no such field is created. This is
2897 # during rebase. When False, no such field is created. This is
2891 # useful eg for incrementally converting changesets and then
2898 # useful eg for incrementally converting changesets and then
2892 # rebasing them onto an existing repo.
2899 # rebasing them onto an existing repo.
2893 # WARNING: this is an advanced setting reserved for people who know
2900 # WARNING: this is an advanced setting reserved for people who know
2894 # exactly what they are doing. Misuse of this setting can easily
2901 # exactly what they are doing. Misuse of this setting can easily
2895 # result in obsmarker cycles and a vivid headache.
2902 # result in obsmarker cycles and a vivid headache.
2896 coreconfigitem(
2903 coreconfigitem(
2897 b'rebase',
2904 b'rebase',
2898 b'store-source',
2905 b'store-source',
2899 default=True,
2906 default=True,
2900 experimental=True,
2907 experimental=True,
2901 )
2908 )
@@ -1,3978 +1,3980 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from concurrent import futures
17 from concurrent import futures
18 from typing import (
18 from typing import (
19 Optional,
19 Optional,
20 )
20 )
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 bin,
24 bin,
25 hex,
25 hex,
26 nullrev,
26 nullrev,
27 sha1nodeconstants,
27 sha1nodeconstants,
28 short,
28 short,
29 )
29 )
30 from .pycompat import (
30 from .pycompat import (
31 delattr,
31 delattr,
32 getattr,
32 getattr,
33 )
33 )
34 from . import (
34 from . import (
35 bookmarks,
35 bookmarks,
36 branchmap,
36 branchmap,
37 bundle2,
37 bundle2,
38 bundlecaches,
38 bundlecaches,
39 changegroup,
39 changegroup,
40 color,
40 color,
41 commit,
41 commit,
42 context,
42 context,
43 dirstate,
43 dirstate,
44 dirstateguard,
44 dirstateguard,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 # set of (path, vfs-location) tuples. vfs-location is:
103 # set of (path, vfs-location) tuples. vfs-location is:
104 # - 'plain for vfs relative paths
104 # - 'plain for vfs relative paths
105 # - '' for svfs relative paths
105 # - '' for svfs relative paths
106 _cachedfiles = set()
106 _cachedfiles = set()
107
107
108
108
109 class _basefilecache(scmutil.filecache):
109 class _basefilecache(scmutil.filecache):
110 """All filecache usage on repo are done for logic that should be unfiltered"""
110 """All filecache usage on repo are done for logic that should be unfiltered"""
111
111
112 def __get__(self, repo, type=None):
112 def __get__(self, repo, type=None):
113 if repo is None:
113 if repo is None:
114 return self
114 return self
115 # proxy to unfiltered __dict__ since filtered repo has no entry
115 # proxy to unfiltered __dict__ since filtered repo has no entry
116 unfi = repo.unfiltered()
116 unfi = repo.unfiltered()
117 try:
117 try:
118 return unfi.__dict__[self.sname]
118 return unfi.__dict__[self.sname]
119 except KeyError:
119 except KeyError:
120 pass
120 pass
121 return super(_basefilecache, self).__get__(unfi, type)
121 return super(_basefilecache, self).__get__(unfi, type)
122
122
123 def set(self, repo, value):
123 def set(self, repo, value):
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
125
125
126
126
127 class repofilecache(_basefilecache):
127 class repofilecache(_basefilecache):
128 """filecache for files in .hg but outside of .hg/store"""
128 """filecache for files in .hg but outside of .hg/store"""
129
129
130 def __init__(self, *paths):
130 def __init__(self, *paths):
131 super(repofilecache, self).__init__(*paths)
131 super(repofilecache, self).__init__(*paths)
132 for path in paths:
132 for path in paths:
133 _cachedfiles.add((path, b'plain'))
133 _cachedfiles.add((path, b'plain'))
134
134
135 def join(self, obj, fname):
135 def join(self, obj, fname):
136 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
137
137
138
138
139 class storecache(_basefilecache):
139 class storecache(_basefilecache):
140 """filecache for files in the store"""
140 """filecache for files in the store"""
141
141
142 def __init__(self, *paths):
142 def __init__(self, *paths):
143 super(storecache, self).__init__(*paths)
143 super(storecache, self).__init__(*paths)
144 for path in paths:
144 for path in paths:
145 _cachedfiles.add((path, b''))
145 _cachedfiles.add((path, b''))
146
146
147 def join(self, obj, fname):
147 def join(self, obj, fname):
148 return obj.sjoin(fname)
148 return obj.sjoin(fname)
149
149
150
150
151 class changelogcache(storecache):
151 class changelogcache(storecache):
152 """filecache for the changelog"""
152 """filecache for the changelog"""
153
153
154 def __init__(self):
154 def __init__(self):
155 super(changelogcache, self).__init__()
155 super(changelogcache, self).__init__()
156 _cachedfiles.add((b'00changelog.i', b''))
156 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
158
158
159 def tracked_paths(self, obj):
159 def tracked_paths(self, obj):
160 paths = [self.join(obj, b'00changelog.i')]
160 paths = [self.join(obj, b'00changelog.i')]
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 paths.append(self.join(obj, b'00changelog.n'))
162 paths.append(self.join(obj, b'00changelog.n'))
163 return paths
163 return paths
164
164
165
165
166 class manifestlogcache(storecache):
166 class manifestlogcache(storecache):
167 """filecache for the manifestlog"""
167 """filecache for the manifestlog"""
168
168
169 def __init__(self):
169 def __init__(self):
170 super(manifestlogcache, self).__init__()
170 super(manifestlogcache, self).__init__()
171 _cachedfiles.add((b'00manifest.i', b''))
171 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
173
173
174 def tracked_paths(self, obj):
174 def tracked_paths(self, obj):
175 paths = [self.join(obj, b'00manifest.i')]
175 paths = [self.join(obj, b'00manifest.i')]
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 paths.append(self.join(obj, b'00manifest.n'))
177 paths.append(self.join(obj, b'00manifest.n'))
178 return paths
178 return paths
179
179
180
180
181 class mixedrepostorecache(_basefilecache):
181 class mixedrepostorecache(_basefilecache):
182 """filecache for a mix files in .hg/store and outside"""
182 """filecache for a mix files in .hg/store and outside"""
183
183
184 def __init__(self, *pathsandlocations):
184 def __init__(self, *pathsandlocations):
185 # scmutil.filecache only uses the path for passing back into our
185 # scmutil.filecache only uses the path for passing back into our
186 # join(), so we can safely pass a list of paths and locations
186 # join(), so we can safely pass a list of paths and locations
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
189
189
190 def join(self, obj, fnameandlocation):
190 def join(self, obj, fnameandlocation):
191 fname, location = fnameandlocation
191 fname, location = fnameandlocation
192 if location == b'plain':
192 if location == b'plain':
193 return obj.vfs.join(fname)
193 return obj.vfs.join(fname)
194 else:
194 else:
195 if location != b'':
195 if location != b'':
196 raise error.ProgrammingError(
196 raise error.ProgrammingError(
197 b'unexpected location: %s' % location
197 b'unexpected location: %s' % location
198 )
198 )
199 return obj.sjoin(fname)
199 return obj.sjoin(fname)
200
200
201
201
202 def isfilecached(repo, name):
202 def isfilecached(repo, name):
203 """check if a repo has already cached "name" filecache-ed property
203 """check if a repo has already cached "name" filecache-ed property
204
204
205 This returns (cachedobj-or-None, iscached) tuple.
205 This returns (cachedobj-or-None, iscached) tuple.
206 """
206 """
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 if not cacheentry:
208 if not cacheentry:
209 return None, False
209 return None, False
210 return cacheentry.obj, True
210 return cacheentry.obj, True
211
211
212
212
213 class unfilteredpropertycache(util.propertycache):
213 class unfilteredpropertycache(util.propertycache):
214 """propertycache that apply to unfiltered repo only"""
214 """propertycache that apply to unfiltered repo only"""
215
215
216 def __get__(self, repo, type=None):
216 def __get__(self, repo, type=None):
217 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
218 if unfi is repo:
218 if unfi is repo:
219 return super(unfilteredpropertycache, self).__get__(unfi)
219 return super(unfilteredpropertycache, self).__get__(unfi)
220 return getattr(unfi, self.name)
220 return getattr(unfi, self.name)
221
221
222
222
223 class filteredpropertycache(util.propertycache):
223 class filteredpropertycache(util.propertycache):
224 """propertycache that must take filtering in account"""
224 """propertycache that must take filtering in account"""
225
225
226 def cachevalue(self, obj, value):
226 def cachevalue(self, obj, value):
227 object.__setattr__(obj, self.name, value)
227 object.__setattr__(obj, self.name, value)
228
228
229
229
230 def hasunfilteredcache(repo, name):
230 def hasunfilteredcache(repo, name):
231 """check if a repo has an unfilteredpropertycache value for <name>"""
231 """check if a repo has an unfilteredpropertycache value for <name>"""
232 return name in vars(repo.unfiltered())
232 return name in vars(repo.unfiltered())
233
233
234
234
235 def unfilteredmethod(orig):
235 def unfilteredmethod(orig):
236 """decorate method that always need to be run on unfiltered version"""
236 """decorate method that always need to be run on unfiltered version"""
237
237
238 @functools.wraps(orig)
238 @functools.wraps(orig)
239 def wrapper(repo, *args, **kwargs):
239 def wrapper(repo, *args, **kwargs):
240 return orig(repo.unfiltered(), *args, **kwargs)
240 return orig(repo.unfiltered(), *args, **kwargs)
241
241
242 return wrapper
242 return wrapper
243
243
244
244
245 moderncaps = {
245 moderncaps = {
246 b'lookup',
246 b'lookup',
247 b'branchmap',
247 b'branchmap',
248 b'pushkey',
248 b'pushkey',
249 b'known',
249 b'known',
250 b'getbundle',
250 b'getbundle',
251 b'unbundle',
251 b'unbundle',
252 }
252 }
253 legacycaps = moderncaps.union({b'changegroupsubset'})
253 legacycaps = moderncaps.union({b'changegroupsubset'})
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 class localcommandexecutor:
257 class localcommandexecutor:
258 def __init__(self, peer):
258 def __init__(self, peer):
259 self._peer = peer
259 self._peer = peer
260 self._sent = False
260 self._sent = False
261 self._closed = False
261 self._closed = False
262
262
263 def __enter__(self):
263 def __enter__(self):
264 return self
264 return self
265
265
266 def __exit__(self, exctype, excvalue, exctb):
266 def __exit__(self, exctype, excvalue, exctb):
267 self.close()
267 self.close()
268
268
269 def callcommand(self, command, args):
269 def callcommand(self, command, args):
270 if self._sent:
270 if self._sent:
271 raise error.ProgrammingError(
271 raise error.ProgrammingError(
272 b'callcommand() cannot be used after sendcommands()'
272 b'callcommand() cannot be used after sendcommands()'
273 )
273 )
274
274
275 if self._closed:
275 if self._closed:
276 raise error.ProgrammingError(
276 raise error.ProgrammingError(
277 b'callcommand() cannot be used after close()'
277 b'callcommand() cannot be used after close()'
278 )
278 )
279
279
280 # We don't need to support anything fancy. Just call the named
280 # We don't need to support anything fancy. Just call the named
281 # method on the peer and return a resolved future.
281 # method on the peer and return a resolved future.
282 fn = getattr(self._peer, pycompat.sysstr(command))
282 fn = getattr(self._peer, pycompat.sysstr(command))
283
283
284 f = futures.Future()
284 f = futures.Future()
285
285
286 try:
286 try:
287 result = fn(**pycompat.strkwargs(args))
287 result = fn(**pycompat.strkwargs(args))
288 except Exception:
288 except Exception:
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 else:
290 else:
291 f.set_result(result)
291 f.set_result(result)
292
292
293 return f
293 return f
294
294
295 def sendcommands(self):
295 def sendcommands(self):
296 self._sent = True
296 self._sent = True
297
297
298 def close(self):
298 def close(self):
299 self._closed = True
299 self._closed = True
300
300
301
301
302 @interfaceutil.implementer(repository.ipeercommands)
302 @interfaceutil.implementer(repository.ipeercommands)
303 class localpeer(repository.peer):
303 class localpeer(repository.peer):
304 '''peer for a local repo; reflects only the most recent API'''
304 '''peer for a local repo; reflects only the most recent API'''
305
305
306 def __init__(self, repo, caps=None):
306 def __init__(self, repo, caps=None):
307 super(localpeer, self).__init__()
307 super(localpeer, self).__init__()
308
308
309 if caps is None:
309 if caps is None:
310 caps = moderncaps.copy()
310 caps = moderncaps.copy()
311 self._repo = repo.filtered(b'served')
311 self._repo = repo.filtered(b'served')
312 self.ui = repo.ui
312 self.ui = repo.ui
313
313
314 if repo._wanted_sidedata:
314 if repo._wanted_sidedata:
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
317
317
318 self._caps = repo._restrictcapabilities(caps)
318 self._caps = repo._restrictcapabilities(caps)
319
319
320 # Begin of _basepeer interface.
320 # Begin of _basepeer interface.
321
321
322 def url(self):
322 def url(self):
323 return self._repo.url()
323 return self._repo.url()
324
324
325 def local(self):
325 def local(self):
326 return self._repo
326 return self._repo
327
327
328 def peer(self):
328 def peer(self):
329 return self
329 return self
330
330
331 def canpush(self):
331 def canpush(self):
332 return True
332 return True
333
333
334 def close(self):
334 def close(self):
335 self._repo.close()
335 self._repo.close()
336
336
337 # End of _basepeer interface.
337 # End of _basepeer interface.
338
338
339 # Begin of _basewirecommands interface.
339 # Begin of _basewirecommands interface.
340
340
341 def branchmap(self):
341 def branchmap(self):
342 return self._repo.branchmap()
342 return self._repo.branchmap()
343
343
344 def capabilities(self):
344 def capabilities(self):
345 return self._caps
345 return self._caps
346
346
347 def clonebundles(self):
347 def clonebundles(self):
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349
349
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 """Used to test argument passing over the wire"""
351 """Used to test argument passing over the wire"""
352 return b"%s %s %s %s %s" % (
352 return b"%s %s %s %s %s" % (
353 one,
353 one,
354 two,
354 two,
355 pycompat.bytestr(three),
355 pycompat.bytestr(three),
356 pycompat.bytestr(four),
356 pycompat.bytestr(four),
357 pycompat.bytestr(five),
357 pycompat.bytestr(five),
358 )
358 )
359
359
360 def getbundle(
360 def getbundle(
361 self,
361 self,
362 source,
362 source,
363 heads=None,
363 heads=None,
364 common=None,
364 common=None,
365 bundlecaps=None,
365 bundlecaps=None,
366 remote_sidedata=None,
366 remote_sidedata=None,
367 **kwargs
367 **kwargs
368 ):
368 ):
369 chunks = exchange.getbundlechunks(
369 chunks = exchange.getbundlechunks(
370 self._repo,
370 self._repo,
371 source,
371 source,
372 heads=heads,
372 heads=heads,
373 common=common,
373 common=common,
374 bundlecaps=bundlecaps,
374 bundlecaps=bundlecaps,
375 remote_sidedata=remote_sidedata,
375 remote_sidedata=remote_sidedata,
376 **kwargs
376 **kwargs
377 )[1]
377 )[1]
378 cb = util.chunkbuffer(chunks)
378 cb = util.chunkbuffer(chunks)
379
379
380 if exchange.bundle2requested(bundlecaps):
380 if exchange.bundle2requested(bundlecaps):
381 # When requesting a bundle2, getbundle returns a stream to make the
381 # When requesting a bundle2, getbundle returns a stream to make the
382 # wire level function happier. We need to build a proper object
382 # wire level function happier. We need to build a proper object
383 # from it in local peer.
383 # from it in local peer.
384 return bundle2.getunbundler(self.ui, cb)
384 return bundle2.getunbundler(self.ui, cb)
385 else:
385 else:
386 return changegroup.getunbundler(b'01', cb, None)
386 return changegroup.getunbundler(b'01', cb, None)
387
387
388 def heads(self):
388 def heads(self):
389 return self._repo.heads()
389 return self._repo.heads()
390
390
391 def known(self, nodes):
391 def known(self, nodes):
392 return self._repo.known(nodes)
392 return self._repo.known(nodes)
393
393
394 def listkeys(self, namespace):
394 def listkeys(self, namespace):
395 return self._repo.listkeys(namespace)
395 return self._repo.listkeys(namespace)
396
396
397 def lookup(self, key):
397 def lookup(self, key):
398 return self._repo.lookup(key)
398 return self._repo.lookup(key)
399
399
400 def pushkey(self, namespace, key, old, new):
400 def pushkey(self, namespace, key, old, new):
401 return self._repo.pushkey(namespace, key, old, new)
401 return self._repo.pushkey(namespace, key, old, new)
402
402
403 def stream_out(self):
403 def stream_out(self):
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405
405
406 def unbundle(self, bundle, heads, url):
406 def unbundle(self, bundle, heads, url):
407 """apply a bundle on a repo
407 """apply a bundle on a repo
408
408
409 This function handles the repo locking itself."""
409 This function handles the repo locking itself."""
410 try:
410 try:
411 try:
411 try:
412 bundle = exchange.readbundle(self.ui, bundle, None)
412 bundle = exchange.readbundle(self.ui, bundle, None)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 if util.safehasattr(ret, b'getchunks'):
414 if util.safehasattr(ret, b'getchunks'):
415 # This is a bundle20 object, turn it into an unbundler.
415 # This is a bundle20 object, turn it into an unbundler.
416 # This little dance should be dropped eventually when the
416 # This little dance should be dropped eventually when the
417 # API is finally improved.
417 # API is finally improved.
418 stream = util.chunkbuffer(ret.getchunks())
418 stream = util.chunkbuffer(ret.getchunks())
419 ret = bundle2.getunbundler(self.ui, stream)
419 ret = bundle2.getunbundler(self.ui, stream)
420 return ret
420 return ret
421 except Exception as exc:
421 except Exception as exc:
422 # If the exception contains output salvaged from a bundle2
422 # If the exception contains output salvaged from a bundle2
423 # reply, we need to make sure it is printed before continuing
423 # reply, we need to make sure it is printed before continuing
424 # to fail. So we build a bundle2 with such output and consume
424 # to fail. So we build a bundle2 with such output and consume
425 # it directly.
425 # it directly.
426 #
426 #
427 # This is not very elegant but allows a "simple" solution for
427 # This is not very elegant but allows a "simple" solution for
428 # issue4594
428 # issue4594
429 output = getattr(exc, '_bundle2salvagedoutput', ())
429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 if output:
430 if output:
431 bundler = bundle2.bundle20(self._repo.ui)
431 bundler = bundle2.bundle20(self._repo.ui)
432 for out in output:
432 for out in output:
433 bundler.addpart(out)
433 bundler.addpart(out)
434 stream = util.chunkbuffer(bundler.getchunks())
434 stream = util.chunkbuffer(bundler.getchunks())
435 b = bundle2.getunbundler(self.ui, stream)
435 b = bundle2.getunbundler(self.ui, stream)
436 bundle2.processbundle(self._repo, b)
436 bundle2.processbundle(self._repo, b)
437 raise
437 raise
438 except error.PushRaced as exc:
438 except error.PushRaced as exc:
439 raise error.ResponseError(
439 raise error.ResponseError(
440 _(b'push failed:'), stringutil.forcebytestr(exc)
440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 )
441 )
442
442
443 # End of _basewirecommands interface.
443 # End of _basewirecommands interface.
444
444
445 # Begin of peer interface.
445 # Begin of peer interface.
446
446
447 def commandexecutor(self):
447 def commandexecutor(self):
448 return localcommandexecutor(self)
448 return localcommandexecutor(self)
449
449
450 # End of peer interface.
450 # End of peer interface.
451
451
452
452
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 class locallegacypeer(localpeer):
454 class locallegacypeer(localpeer):
455 """peer extension which implements legacy methods too; used for tests with
455 """peer extension which implements legacy methods too; used for tests with
456 restricted capabilities"""
456 restricted capabilities"""
457
457
458 def __init__(self, repo):
458 def __init__(self, repo):
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
460
460
461 # Begin of baselegacywirecommands interface.
461 # Begin of baselegacywirecommands interface.
462
462
463 def between(self, pairs):
463 def between(self, pairs):
464 return self._repo.between(pairs)
464 return self._repo.between(pairs)
465
465
466 def branches(self, nodes):
466 def branches(self, nodes):
467 return self._repo.branches(nodes)
467 return self._repo.branches(nodes)
468
468
469 def changegroup(self, nodes, source):
469 def changegroup(self, nodes, source):
470 outgoing = discovery.outgoing(
470 outgoing = discovery.outgoing(
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 )
472 )
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474
474
475 def changegroupsubset(self, bases, heads, source):
475 def changegroupsubset(self, bases, heads, source):
476 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
477 self._repo, missingroots=bases, ancestorsof=heads
477 self._repo, missingroots=bases, ancestorsof=heads
478 )
478 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
480
481 # End of baselegacywirecommands interface.
481 # End of baselegacywirecommands interface.
482
482
483
483
484 # Functions receiving (ui, features) that extensions can register to impact
484 # Functions receiving (ui, features) that extensions can register to impact
485 # the ability to load repositories with custom requirements. Only
485 # the ability to load repositories with custom requirements. Only
486 # functions defined in loaded extensions are called.
486 # functions defined in loaded extensions are called.
487 #
487 #
488 # The function receives a set of requirement strings that the repository
488 # The function receives a set of requirement strings that the repository
489 # is capable of opening. Functions will typically add elements to the
489 # is capable of opening. Functions will typically add elements to the
490 # set to reflect that the extension knows how to handle that requirements.
490 # set to reflect that the extension knows how to handle that requirements.
491 featuresetupfuncs = set()
491 featuresetupfuncs = set()
492
492
493
493
494 def _getsharedvfs(hgvfs, requirements):
494 def _getsharedvfs(hgvfs, requirements):
495 """returns the vfs object pointing to root of shared source
495 """returns the vfs object pointing to root of shared source
496 repo for a shared repository
496 repo for a shared repository
497
497
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
500 """
500 """
501 # The ``shared`` or ``relshared`` requirements indicate the
501 # The ``shared`` or ``relshared`` requirements indicate the
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 # This is an absolute path for ``shared`` and relative to
503 # This is an absolute path for ``shared`` and relative to
504 # ``.hg/`` for ``relshared``.
504 # ``.hg/`` for ``relshared``.
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508
508
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510
510
511 if not sharedvfs.exists():
511 if not sharedvfs.exists():
512 raise error.RepoError(
512 raise error.RepoError(
513 _(b'.hg/sharedpath points to nonexistent directory %s')
513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 % sharedvfs.base
514 % sharedvfs.base
515 )
515 )
516 return sharedvfs
516 return sharedvfs
517
517
518
518
519 def _readrequires(vfs, allowmissing):
519 def _readrequires(vfs, allowmissing):
520 """reads the require file present at root of this vfs
520 """reads the require file present at root of this vfs
521 and return a set of requirements
521 and return a set of requirements
522
522
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 # requires file contains a newline-delimited list of
524 # requires file contains a newline-delimited list of
525 # features/capabilities the opener (us) must have in order to use
525 # features/capabilities the opener (us) must have in order to use
526 # the repository. This file was introduced in Mercurial 0.9.2,
526 # the repository. This file was introduced in Mercurial 0.9.2,
527 # which means very old repositories may not have one. We assume
527 # which means very old repositories may not have one. We assume
528 # a missing file translates to no requirements.
528 # a missing file translates to no requirements.
529 read = vfs.tryread if allowmissing else vfs.read
529 read = vfs.tryread if allowmissing else vfs.read
530 return set(read(b'requires').splitlines())
530 return set(read(b'requires').splitlines())
531
531
532
532
533 def makelocalrepository(baseui, path: bytes, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
534 """Create a local repository object.
534 """Create a local repository object.
535
535
536 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
537 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
540 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
541
541
542 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
543 interface.
543 interface.
544
544
545 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
546 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
547 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
548
548
549 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
550 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
551 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
552 repository.
552 repository.
553
553
554 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
555 as part of deriving a type.
555 as part of deriving a type.
556
556
557 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
558 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
559 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
560 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
561 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 not.
562 not.
563 """
563 """
564 ui = baseui.copy()
564 ui = baseui.copy()
565 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
566 ui.copy = baseui.copy
566 ui.copy = baseui.copy
567
567
568 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570
570
571 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
572 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
575 shared = False
575 shared = False
576 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
577 sharedvfs = None
577 sharedvfs = None
578
578
579 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
580 # cases are errors.
580 # cases are errors.
581 if not hgvfs.isdir():
581 if not hgvfs.isdir():
582 try:
582 try:
583 hgvfs.stat()
583 hgvfs.stat()
584 except FileNotFoundError:
584 except FileNotFoundError:
585 pass
585 pass
586 except ValueError as e:
586 except ValueError as e:
587 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
588 raise error.Abort(
588 raise error.Abort(
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 )
590 )
591
591
592 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
593
593
594 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
595 shared = (
595 shared = (
596 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 )
598 )
599 storevfs = None
599 storevfs = None
600 if shared:
600 if shared:
601 # This is a shared repo
601 # This is a shared repo
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 else:
604 else:
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606
606
607 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
608 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
612 #
612 #
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 # to be reshared
616 # to be reshared
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619
619
620 if (
620 if (
621 shared
621 shared
622 and requirementsmod.SHARESAFE_REQUIREMENT
622 and requirementsmod.SHARESAFE_REQUIREMENT
623 not in _readrequires(sharedvfs, True)
623 not in _readrequires(sharedvfs, True)
624 ):
624 ):
625 mismatch_warn = ui.configbool(
625 mismatch_warn = ui.configbool(
626 b'share', b'safe-mismatch.source-not-safe.warn'
626 b'share', b'safe-mismatch.source-not-safe.warn'
627 )
627 )
628 mismatch_config = ui.config(
628 mismatch_config = ui.config(
629 b'share', b'safe-mismatch.source-not-safe'
629 b'share', b'safe-mismatch.source-not-safe'
630 )
630 )
631 mismatch_verbose_upgrade = ui.configbool(
631 mismatch_verbose_upgrade = ui.configbool(
632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
633 )
633 )
634 if mismatch_config in (
634 if mismatch_config in (
635 b'downgrade-allow',
635 b'downgrade-allow',
636 b'allow',
636 b'allow',
637 b'downgrade-abort',
637 b'downgrade-abort',
638 ):
638 ):
639 # prevent cyclic import localrepo -> upgrade -> localrepo
639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 from . import upgrade
640 from . import upgrade
641
641
642 upgrade.downgrade_share_to_non_safe(
642 upgrade.downgrade_share_to_non_safe(
643 ui,
643 ui,
644 hgvfs,
644 hgvfs,
645 sharedvfs,
645 sharedvfs,
646 requirements,
646 requirements,
647 mismatch_config,
647 mismatch_config,
648 mismatch_warn,
648 mismatch_warn,
649 mismatch_verbose_upgrade,
649 mismatch_verbose_upgrade,
650 )
650 )
651 elif mismatch_config == b'abort':
651 elif mismatch_config == b'abort':
652 raise error.Abort(
652 raise error.Abort(
653 _(b"share source does not support share-safe requirement"),
653 _(b"share source does not support share-safe requirement"),
654 hint=hint,
654 hint=hint,
655 )
655 )
656 else:
656 else:
657 raise error.Abort(
657 raise error.Abort(
658 _(
658 _(
659 b"share-safe mismatch with source.\nUnrecognized"
659 b"share-safe mismatch with source.\nUnrecognized"
660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
661 b" set."
661 b" set."
662 )
662 )
663 % mismatch_config,
663 % mismatch_config,
664 hint=hint,
664 hint=hint,
665 )
665 )
666 else:
666 else:
667 requirements |= _readrequires(storevfs, False)
667 requirements |= _readrequires(storevfs, False)
668 elif shared:
668 elif shared:
669 sourcerequires = _readrequires(sharedvfs, False)
669 sourcerequires = _readrequires(sharedvfs, False)
670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
672 mismatch_warn = ui.configbool(
672 mismatch_warn = ui.configbool(
673 b'share', b'safe-mismatch.source-safe.warn'
673 b'share', b'safe-mismatch.source-safe.warn'
674 )
674 )
675 mismatch_verbose_upgrade = ui.configbool(
675 mismatch_verbose_upgrade = ui.configbool(
676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
677 )
677 )
678 if mismatch_config in (
678 if mismatch_config in (
679 b'upgrade-allow',
679 b'upgrade-allow',
680 b'allow',
680 b'allow',
681 b'upgrade-abort',
681 b'upgrade-abort',
682 ):
682 ):
683 # prevent cyclic import localrepo -> upgrade -> localrepo
683 # prevent cyclic import localrepo -> upgrade -> localrepo
684 from . import upgrade
684 from . import upgrade
685
685
686 upgrade.upgrade_share_to_safe(
686 upgrade.upgrade_share_to_safe(
687 ui,
687 ui,
688 hgvfs,
688 hgvfs,
689 storevfs,
689 storevfs,
690 requirements,
690 requirements,
691 mismatch_config,
691 mismatch_config,
692 mismatch_warn,
692 mismatch_warn,
693 mismatch_verbose_upgrade,
693 mismatch_verbose_upgrade,
694 )
694 )
695 elif mismatch_config == b'abort':
695 elif mismatch_config == b'abort':
696 raise error.Abort(
696 raise error.Abort(
697 _(
697 _(
698 b'version mismatch: source uses share-safe'
698 b'version mismatch: source uses share-safe'
699 b' functionality while the current share does not'
699 b' functionality while the current share does not'
700 ),
700 ),
701 hint=hint,
701 hint=hint,
702 )
702 )
703 else:
703 else:
704 raise error.Abort(
704 raise error.Abort(
705 _(
705 _(
706 b"share-safe mismatch with source.\nUnrecognized"
706 b"share-safe mismatch with source.\nUnrecognized"
707 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 b" value '%s' of `share.safe-mismatch.source-safe` set."
708 )
708 )
709 % mismatch_config,
709 % mismatch_config,
710 hint=hint,
710 hint=hint,
711 )
711 )
712
712
713 # The .hg/hgrc file may load extensions or contain config options
713 # The .hg/hgrc file may load extensions or contain config options
714 # that influence repository construction. Attempt to load it and
714 # that influence repository construction. Attempt to load it and
715 # process any new extensions that it may have pulled in.
715 # process any new extensions that it may have pulled in.
716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
718 extensions.loadall(ui)
718 extensions.loadall(ui)
719 extensions.populateui(ui)
719 extensions.populateui(ui)
720
720
721 # Set of module names of extensions loaded for this repository.
721 # Set of module names of extensions loaded for this repository.
722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
723
723
724 supportedrequirements = gathersupportedrequirements(ui)
724 supportedrequirements = gathersupportedrequirements(ui)
725
725
726 # We first validate the requirements are known.
726 # We first validate the requirements are known.
727 ensurerequirementsrecognized(requirements, supportedrequirements)
727 ensurerequirementsrecognized(requirements, supportedrequirements)
728
728
729 # Then we validate that the known set is reasonable to use together.
729 # Then we validate that the known set is reasonable to use together.
730 ensurerequirementscompatible(ui, requirements)
730 ensurerequirementscompatible(ui, requirements)
731
731
732 # TODO there are unhandled edge cases related to opening repositories with
732 # TODO there are unhandled edge cases related to opening repositories with
733 # shared storage. If storage is shared, we should also test for requirements
733 # shared storage. If storage is shared, we should also test for requirements
734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
735 # that repo, as that repo may load extensions needed to open it. This is a
735 # that repo, as that repo may load extensions needed to open it. This is a
736 # bit complicated because we don't want the other hgrc to overwrite settings
736 # bit complicated because we don't want the other hgrc to overwrite settings
737 # in this hgrc.
737 # in this hgrc.
738 #
738 #
739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
740 # file when sharing repos. But if a requirement is added after the share is
740 # file when sharing repos. But if a requirement is added after the share is
741 # performed, thereby introducing a new requirement for the opener, we may
741 # performed, thereby introducing a new requirement for the opener, we may
742 # will not see that and could encounter a run-time error interacting with
742 # will not see that and could encounter a run-time error interacting with
743 # that shared store since it has an unknown-to-us requirement.
743 # that shared store since it has an unknown-to-us requirement.
744
744
745 # At this point, we know we should be capable of opening the repository.
745 # At this point, we know we should be capable of opening the repository.
746 # Now get on with doing that.
746 # Now get on with doing that.
747
747
748 features = set()
748 features = set()
749
749
750 # The "store" part of the repository holds versioned data. How it is
750 # The "store" part of the repository holds versioned data. How it is
751 # accessed is determined by various requirements. If `shared` or
751 # accessed is determined by various requirements. If `shared` or
752 # `relshared` requirements are present, this indicates current repository
752 # `relshared` requirements are present, this indicates current repository
753 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 # is a share and store exists in path mentioned in `.hg/sharedpath`
754 if shared:
754 if shared:
755 storebasepath = sharedvfs.base
755 storebasepath = sharedvfs.base
756 cachepath = sharedvfs.join(b'cache')
756 cachepath = sharedvfs.join(b'cache')
757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
758 else:
758 else:
759 storebasepath = hgvfs.base
759 storebasepath = hgvfs.base
760 cachepath = hgvfs.join(b'cache')
760 cachepath = hgvfs.join(b'cache')
761 wcachepath = hgvfs.join(b'wcache')
761 wcachepath = hgvfs.join(b'wcache')
762
762
763 # The store has changed over time and the exact layout is dictated by
763 # The store has changed over time and the exact layout is dictated by
764 # requirements. The store interface abstracts differences across all
764 # requirements. The store interface abstracts differences across all
765 # of them.
765 # of them.
766 store = makestore(
766 store = makestore(
767 requirements,
767 requirements,
768 storebasepath,
768 storebasepath,
769 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 lambda base: vfsmod.vfs(base, cacheaudited=True),
770 )
770 )
771 hgvfs.createmode = store.createmode
771 hgvfs.createmode = store.createmode
772
772
773 storevfs = store.vfs
773 storevfs = store.vfs
774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
775
775
776 if (
776 if (
777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
779 ):
779 ):
780 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 features.add(repository.REPO_FEATURE_SIDE_DATA)
781 # the revlogv2 docket introduced race condition that we need to fix
781 # the revlogv2 docket introduced race condition that we need to fix
782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
783
783
784 # The cache vfs is used to manage cache files.
784 # The cache vfs is used to manage cache files.
785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
786 cachevfs.createmode = store.createmode
786 cachevfs.createmode = store.createmode
787 # The cache vfs is used to manage cache files related to the working copy
787 # The cache vfs is used to manage cache files related to the working copy
788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
789 wcachevfs.createmode = store.createmode
789 wcachevfs.createmode = store.createmode
790
790
791 # Now resolve the type for the repository object. We do this by repeatedly
791 # Now resolve the type for the repository object. We do this by repeatedly
792 # calling a factory function to produces types for specific aspects of the
792 # calling a factory function to produces types for specific aspects of the
793 # repo's operation. The aggregate returned types are used as base classes
793 # repo's operation. The aggregate returned types are used as base classes
794 # for a dynamically-derived type, which will represent our new repository.
794 # for a dynamically-derived type, which will represent our new repository.
795
795
796 bases = []
796 bases = []
797 extrastate = {}
797 extrastate = {}
798
798
799 for iface, fn in REPO_INTERFACES:
799 for iface, fn in REPO_INTERFACES:
800 # We pass all potentially useful state to give extensions tons of
800 # We pass all potentially useful state to give extensions tons of
801 # flexibility.
801 # flexibility.
802 typ = fn()(
802 typ = fn()(
803 ui=ui,
803 ui=ui,
804 intents=intents,
804 intents=intents,
805 requirements=requirements,
805 requirements=requirements,
806 features=features,
806 features=features,
807 wdirvfs=wdirvfs,
807 wdirvfs=wdirvfs,
808 hgvfs=hgvfs,
808 hgvfs=hgvfs,
809 store=store,
809 store=store,
810 storevfs=storevfs,
810 storevfs=storevfs,
811 storeoptions=storevfs.options,
811 storeoptions=storevfs.options,
812 cachevfs=cachevfs,
812 cachevfs=cachevfs,
813 wcachevfs=wcachevfs,
813 wcachevfs=wcachevfs,
814 extensionmodulenames=extensionmodulenames,
814 extensionmodulenames=extensionmodulenames,
815 extrastate=extrastate,
815 extrastate=extrastate,
816 baseclasses=bases,
816 baseclasses=bases,
817 )
817 )
818
818
819 if not isinstance(typ, type):
819 if not isinstance(typ, type):
820 raise error.ProgrammingError(
820 raise error.ProgrammingError(
821 b'unable to construct type for %s' % iface
821 b'unable to construct type for %s' % iface
822 )
822 )
823
823
824 bases.append(typ)
824 bases.append(typ)
825
825
826 # type() allows you to use characters in type names that wouldn't be
826 # type() allows you to use characters in type names that wouldn't be
827 # recognized as Python symbols in source code. We abuse that to add
827 # recognized as Python symbols in source code. We abuse that to add
828 # rich information about our constructed repo.
828 # rich information about our constructed repo.
829 name = pycompat.sysstr(
829 name = pycompat.sysstr(
830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
831 )
831 )
832
832
833 cls = type(name, tuple(bases), {})
833 cls = type(name, tuple(bases), {})
834
834
835 return cls(
835 return cls(
836 baseui=baseui,
836 baseui=baseui,
837 ui=ui,
837 ui=ui,
838 origroot=path,
838 origroot=path,
839 wdirvfs=wdirvfs,
839 wdirvfs=wdirvfs,
840 hgvfs=hgvfs,
840 hgvfs=hgvfs,
841 requirements=requirements,
841 requirements=requirements,
842 supportedrequirements=supportedrequirements,
842 supportedrequirements=supportedrequirements,
843 sharedpath=storebasepath,
843 sharedpath=storebasepath,
844 store=store,
844 store=store,
845 cachevfs=cachevfs,
845 cachevfs=cachevfs,
846 wcachevfs=wcachevfs,
846 wcachevfs=wcachevfs,
847 features=features,
847 features=features,
848 intents=intents,
848 intents=intents,
849 )
849 )
850
850
851
851
852 def loadhgrc(
852 def loadhgrc(
853 ui,
853 ui,
854 wdirvfs: vfsmod.vfs,
854 wdirvfs: vfsmod.vfs,
855 hgvfs: vfsmod.vfs,
855 hgvfs: vfsmod.vfs,
856 requirements,
856 requirements,
857 sharedvfs: Optional[vfsmod.vfs] = None,
857 sharedvfs: Optional[vfsmod.vfs] = None,
858 ):
858 ):
859 """Load hgrc files/content into a ui instance.
859 """Load hgrc files/content into a ui instance.
860
860
861 This is called during repository opening to load any additional
861 This is called during repository opening to load any additional
862 config files or settings relevant to the current repository.
862 config files or settings relevant to the current repository.
863
863
864 Returns a bool indicating whether any additional configs were loaded.
864 Returns a bool indicating whether any additional configs were loaded.
865
865
866 Extensions should monkeypatch this function to modify how per-repo
866 Extensions should monkeypatch this function to modify how per-repo
867 configs are loaded. For example, an extension may wish to pull in
867 configs are loaded. For example, an extension may wish to pull in
868 configs from alternate files or sources.
868 configs from alternate files or sources.
869
869
870 sharedvfs is vfs object pointing to source repo if the current one is a
870 sharedvfs is vfs object pointing to source repo if the current one is a
871 shared one
871 shared one
872 """
872 """
873 if not rcutil.use_repo_hgrc():
873 if not rcutil.use_repo_hgrc():
874 return False
874 return False
875
875
876 ret = False
876 ret = False
877 # first load config from shared source if we has to
877 # first load config from shared source if we has to
878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
879 try:
879 try:
880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
881 ret = True
881 ret = True
882 except IOError:
882 except IOError:
883 pass
883 pass
884
884
885 try:
885 try:
886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
887 ret = True
887 ret = True
888 except IOError:
888 except IOError:
889 pass
889 pass
890
890
891 try:
891 try:
892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
893 ret = True
893 ret = True
894 except IOError:
894 except IOError:
895 pass
895 pass
896
896
897 return ret
897 return ret
898
898
899
899
900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
901 """Perform additional actions after .hg/hgrc is loaded.
901 """Perform additional actions after .hg/hgrc is loaded.
902
902
903 This function is called during repository loading immediately after
903 This function is called during repository loading immediately after
904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
905
905
906 The function can be used to validate configs, automatically add
906 The function can be used to validate configs, automatically add
907 options (including extensions) based on requirements, etc.
907 options (including extensions) based on requirements, etc.
908 """
908 """
909
909
910 # Map of requirements to list of extensions to load automatically when
910 # Map of requirements to list of extensions to load automatically when
911 # requirement is present.
911 # requirement is present.
912 autoextensions = {
912 autoextensions = {
913 b'git': [b'git'],
913 b'git': [b'git'],
914 b'largefiles': [b'largefiles'],
914 b'largefiles': [b'largefiles'],
915 b'lfs': [b'lfs'],
915 b'lfs': [b'lfs'],
916 }
916 }
917
917
918 for requirement, names in sorted(autoextensions.items()):
918 for requirement, names in sorted(autoextensions.items()):
919 if requirement not in requirements:
919 if requirement not in requirements:
920 continue
920 continue
921
921
922 for name in names:
922 for name in names:
923 if not ui.hasconfig(b'extensions', name):
923 if not ui.hasconfig(b'extensions', name):
924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
925
925
926
926
927 def gathersupportedrequirements(ui):
927 def gathersupportedrequirements(ui):
928 """Determine the complete set of recognized requirements."""
928 """Determine the complete set of recognized requirements."""
929 # Start with all requirements supported by this file.
929 # Start with all requirements supported by this file.
930 supported = set(localrepository._basesupported)
930 supported = set(localrepository._basesupported)
931
931
932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
933 # relevant to this ui instance.
933 # relevant to this ui instance.
934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
935
935
936 for fn in featuresetupfuncs:
936 for fn in featuresetupfuncs:
937 if fn.__module__ in modules:
937 if fn.__module__ in modules:
938 fn(ui, supported)
938 fn(ui, supported)
939
939
940 # Add derived requirements from registered compression engines.
940 # Add derived requirements from registered compression engines.
941 for name in util.compengines:
941 for name in util.compengines:
942 engine = util.compengines[name]
942 engine = util.compengines[name]
943 if engine.available() and engine.revlogheader():
943 if engine.available() and engine.revlogheader():
944 supported.add(b'exp-compression-%s' % name)
944 supported.add(b'exp-compression-%s' % name)
945 if engine.name() == b'zstd':
945 if engine.name() == b'zstd':
946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
947
947
948 return supported
948 return supported
949
949
950
950
951 def ensurerequirementsrecognized(requirements, supported):
951 def ensurerequirementsrecognized(requirements, supported):
952 """Validate that a set of local requirements is recognized.
952 """Validate that a set of local requirements is recognized.
953
953
954 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 Receives a set of requirements. Raises an ``error.RepoError`` if there
955 exists any requirement in that set that currently loaded code doesn't
955 exists any requirement in that set that currently loaded code doesn't
956 recognize.
956 recognize.
957
957
958 Returns a set of supported requirements.
958 Returns a set of supported requirements.
959 """
959 """
960 missing = set()
960 missing = set()
961
961
962 for requirement in requirements:
962 for requirement in requirements:
963 if requirement in supported:
963 if requirement in supported:
964 continue
964 continue
965
965
966 if not requirement or not requirement[0:1].isalnum():
966 if not requirement or not requirement[0:1].isalnum():
967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
968
968
969 missing.add(requirement)
969 missing.add(requirement)
970
970
971 if missing:
971 if missing:
972 raise error.RequirementError(
972 raise error.RequirementError(
973 _(b'repository requires features unknown to this Mercurial: %s')
973 _(b'repository requires features unknown to this Mercurial: %s')
974 % b' '.join(sorted(missing)),
974 % b' '.join(sorted(missing)),
975 hint=_(
975 hint=_(
976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
977 b'for more information'
977 b'for more information'
978 ),
978 ),
979 )
979 )
980
980
981
981
982 def ensurerequirementscompatible(ui, requirements):
982 def ensurerequirementscompatible(ui, requirements):
983 """Validates that a set of recognized requirements is mutually compatible.
983 """Validates that a set of recognized requirements is mutually compatible.
984
984
985 Some requirements may not be compatible with others or require
985 Some requirements may not be compatible with others or require
986 config options that aren't enabled. This function is called during
986 config options that aren't enabled. This function is called during
987 repository opening to ensure that the set of requirements needed
987 repository opening to ensure that the set of requirements needed
988 to open a repository is sane and compatible with config options.
988 to open a repository is sane and compatible with config options.
989
989
990 Extensions can monkeypatch this function to perform additional
990 Extensions can monkeypatch this function to perform additional
991 checking.
991 checking.
992
992
993 ``error.RepoError`` should be raised on failure.
993 ``error.RepoError`` should be raised on failure.
994 """
994 """
995 if (
995 if (
996 requirementsmod.SPARSE_REQUIREMENT in requirements
996 requirementsmod.SPARSE_REQUIREMENT in requirements
997 and not sparse.enabled
997 and not sparse.enabled
998 ):
998 ):
999 raise error.RepoError(
999 raise error.RepoError(
1000 _(
1000 _(
1001 b'repository is using sparse feature but '
1001 b'repository is using sparse feature but '
1002 b'sparse is not enabled; enable the '
1002 b'sparse is not enabled; enable the '
1003 b'"sparse" extensions to access'
1003 b'"sparse" extensions to access'
1004 )
1004 )
1005 )
1005 )
1006
1006
1007
1007
1008 def makestore(requirements, path, vfstype):
1008 def makestore(requirements, path, vfstype):
1009 """Construct a storage object for a repository."""
1009 """Construct a storage object for a repository."""
1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1013 return storemod.fncachestore(path, vfstype, dotencode)
1013 return storemod.fncachestore(path, vfstype, dotencode)
1014
1014
1015 return storemod.encodedstore(path, vfstype)
1015 return storemod.encodedstore(path, vfstype)
1016
1016
1017 return storemod.basicstore(path, vfstype)
1017 return storemod.basicstore(path, vfstype)
1018
1018
1019
1019
1020 def resolvestorevfsoptions(ui, requirements, features):
1020 def resolvestorevfsoptions(ui, requirements, features):
1021 """Resolve the options to pass to the store vfs opener.
1021 """Resolve the options to pass to the store vfs opener.
1022
1022
1023 The returned dict is used to influence behavior of the storage layer.
1023 The returned dict is used to influence behavior of the storage layer.
1024 """
1024 """
1025 options = {}
1025 options = {}
1026
1026
1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1028 options[b'treemanifest'] = True
1028 options[b'treemanifest'] = True
1029
1029
1030 # experimental config: format.manifestcachesize
1030 # experimental config: format.manifestcachesize
1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1032 if manifestcachesize is not None:
1032 if manifestcachesize is not None:
1033 options[b'manifestcachesize'] = manifestcachesize
1033 options[b'manifestcachesize'] = manifestcachesize
1034
1034
1035 # In the absence of another requirement superseding a revlog-related
1035 # In the absence of another requirement superseding a revlog-related
1036 # requirement, we have to assume the repo is using revlog version 0.
1036 # requirement, we have to assume the repo is using revlog version 0.
1037 # This revlog format is super old and we don't bother trying to parse
1037 # This revlog format is super old and we don't bother trying to parse
1038 # opener options for it because those options wouldn't do anything
1038 # opener options for it because those options wouldn't do anything
1039 # meaningful on such old repos.
1039 # meaningful on such old repos.
1040 if (
1040 if (
1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1043 ):
1043 ):
1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1045 else: # explicitly mark repo as using revlogv0
1045 else: # explicitly mark repo as using revlogv0
1046 options[b'revlogv0'] = True
1046 options[b'revlogv0'] = True
1047
1047
1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1049 options[b'copies-storage'] = b'changeset-sidedata'
1049 options[b'copies-storage'] = b'changeset-sidedata'
1050 else:
1050 else:
1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1052 copiesextramode = (b'changeset-only', b'compatibility')
1052 copiesextramode = (b'changeset-only', b'compatibility')
1053 if writecopiesto in copiesextramode:
1053 if writecopiesto in copiesextramode:
1054 options[b'copies-storage'] = b'extra'
1054 options[b'copies-storage'] = b'extra'
1055
1055
1056 return options
1056 return options
1057
1057
1058
1058
1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1060 """Resolve opener options specific to revlogs."""
1060 """Resolve opener options specific to revlogs."""
1061
1061
1062 options = {}
1062 options = {}
1063 options[b'flagprocessors'] = {}
1063 options[b'flagprocessors'] = {}
1064
1064
1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1066 options[b'revlogv1'] = True
1066 options[b'revlogv1'] = True
1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1068 options[b'revlogv2'] = True
1068 options[b'revlogv2'] = True
1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1070 options[b'changelogv2'] = True
1070 options[b'changelogv2'] = True
1071 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1072 options[b'changelogv2.compute-rank'] = cmp_rank
1071
1073
1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 options[b'generaldelta'] = True
1075 options[b'generaldelta'] = True
1074
1076
1075 # experimental config: format.chunkcachesize
1077 # experimental config: format.chunkcachesize
1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 if chunkcachesize is not None:
1079 if chunkcachesize is not None:
1078 options[b'chunkcachesize'] = chunkcachesize
1080 options[b'chunkcachesize'] = chunkcachesize
1079
1081
1080 deltabothparents = ui.configbool(
1082 deltabothparents = ui.configbool(
1081 b'storage', b'revlog.optimize-delta-parent-choice'
1083 b'storage', b'revlog.optimize-delta-parent-choice'
1082 )
1084 )
1083 options[b'deltabothparents'] = deltabothparents
1085 options[b'deltabothparents'] = deltabothparents
1084 dps_cgds = ui.configint(
1086 dps_cgds = ui.configint(
1085 b'storage',
1087 b'storage',
1086 b'revlog.delta-parent-search.candidate-group-chunk-size',
1088 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 )
1089 )
1088 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1090 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1089 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1091 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1090
1092
1091 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1093 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1092 options[b'issue6528.fix-incoming'] = issue6528
1094 options[b'issue6528.fix-incoming'] = issue6528
1093
1095
1094 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1096 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1095 lazydeltabase = False
1097 lazydeltabase = False
1096 if lazydelta:
1098 if lazydelta:
1097 lazydeltabase = ui.configbool(
1099 lazydeltabase = ui.configbool(
1098 b'storage', b'revlog.reuse-external-delta-parent'
1100 b'storage', b'revlog.reuse-external-delta-parent'
1099 )
1101 )
1100 if lazydeltabase is None:
1102 if lazydeltabase is None:
1101 lazydeltabase = not scmutil.gddeltaconfig(ui)
1103 lazydeltabase = not scmutil.gddeltaconfig(ui)
1102 options[b'lazydelta'] = lazydelta
1104 options[b'lazydelta'] = lazydelta
1103 options[b'lazydeltabase'] = lazydeltabase
1105 options[b'lazydeltabase'] = lazydeltabase
1104
1106
1105 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1107 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1106 if 0 <= chainspan:
1108 if 0 <= chainspan:
1107 options[b'maxdeltachainspan'] = chainspan
1109 options[b'maxdeltachainspan'] = chainspan
1108
1110
1109 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1111 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1110 if mmapindexthreshold is not None:
1112 if mmapindexthreshold is not None:
1111 options[b'mmapindexthreshold'] = mmapindexthreshold
1113 options[b'mmapindexthreshold'] = mmapindexthreshold
1112
1114
1113 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1115 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1114 srdensitythres = float(
1116 srdensitythres = float(
1115 ui.config(b'experimental', b'sparse-read.density-threshold')
1117 ui.config(b'experimental', b'sparse-read.density-threshold')
1116 )
1118 )
1117 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1119 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1118 options[b'with-sparse-read'] = withsparseread
1120 options[b'with-sparse-read'] = withsparseread
1119 options[b'sparse-read-density-threshold'] = srdensitythres
1121 options[b'sparse-read-density-threshold'] = srdensitythres
1120 options[b'sparse-read-min-gap-size'] = srmingapsize
1122 options[b'sparse-read-min-gap-size'] = srmingapsize
1121
1123
1122 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1124 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1123 options[b'sparse-revlog'] = sparserevlog
1125 options[b'sparse-revlog'] = sparserevlog
1124 if sparserevlog:
1126 if sparserevlog:
1125 options[b'generaldelta'] = True
1127 options[b'generaldelta'] = True
1126
1128
1127 maxchainlen = None
1129 maxchainlen = None
1128 if sparserevlog:
1130 if sparserevlog:
1129 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1131 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1130 # experimental config: format.maxchainlen
1132 # experimental config: format.maxchainlen
1131 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1133 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1132 if maxchainlen is not None:
1134 if maxchainlen is not None:
1133 options[b'maxchainlen'] = maxchainlen
1135 options[b'maxchainlen'] = maxchainlen
1134
1136
1135 for r in requirements:
1137 for r in requirements:
1136 # we allow multiple compression engine requirement to co-exist because
1138 # we allow multiple compression engine requirement to co-exist because
1137 # strickly speaking, revlog seems to support mixed compression style.
1139 # strickly speaking, revlog seems to support mixed compression style.
1138 #
1140 #
1139 # The compression used for new entries will be "the last one"
1141 # The compression used for new entries will be "the last one"
1140 prefix = r.startswith
1142 prefix = r.startswith
1141 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1143 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1142 options[b'compengine'] = r.split(b'-', 2)[2]
1144 options[b'compengine'] = r.split(b'-', 2)[2]
1143
1145
1144 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1146 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1145 if options[b'zlib.level'] is not None:
1147 if options[b'zlib.level'] is not None:
1146 if not (0 <= options[b'zlib.level'] <= 9):
1148 if not (0 <= options[b'zlib.level'] <= 9):
1147 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1149 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1148 raise error.Abort(msg % options[b'zlib.level'])
1150 raise error.Abort(msg % options[b'zlib.level'])
1149 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1151 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1150 if options[b'zstd.level'] is not None:
1152 if options[b'zstd.level'] is not None:
1151 if not (0 <= options[b'zstd.level'] <= 22):
1153 if not (0 <= options[b'zstd.level'] <= 22):
1152 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1154 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1153 raise error.Abort(msg % options[b'zstd.level'])
1155 raise error.Abort(msg % options[b'zstd.level'])
1154
1156
1155 if requirementsmod.NARROW_REQUIREMENT in requirements:
1157 if requirementsmod.NARROW_REQUIREMENT in requirements:
1156 options[b'enableellipsis'] = True
1158 options[b'enableellipsis'] = True
1157
1159
1158 if ui.configbool(b'experimental', b'rust.index'):
1160 if ui.configbool(b'experimental', b'rust.index'):
1159 options[b'rust.index'] = True
1161 options[b'rust.index'] = True
1160 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1162 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1161 slow_path = ui.config(
1163 slow_path = ui.config(
1162 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 )
1165 )
1164 if slow_path not in (b'allow', b'warn', b'abort'):
1166 if slow_path not in (b'allow', b'warn', b'abort'):
1165 default = ui.config_default(
1167 default = ui.config_default(
1166 b'storage', b'revlog.persistent-nodemap.slow-path'
1168 b'storage', b'revlog.persistent-nodemap.slow-path'
1167 )
1169 )
1168 msg = _(
1170 msg = _(
1169 b'unknown value for config '
1171 b'unknown value for config '
1170 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1172 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1171 )
1173 )
1172 ui.warn(msg % slow_path)
1174 ui.warn(msg % slow_path)
1173 if not ui.quiet:
1175 if not ui.quiet:
1174 ui.warn(_(b'falling back to default value: %s\n') % default)
1176 ui.warn(_(b'falling back to default value: %s\n') % default)
1175 slow_path = default
1177 slow_path = default
1176
1178
1177 msg = _(
1179 msg = _(
1178 b"accessing `persistent-nodemap` repository without associated "
1180 b"accessing `persistent-nodemap` repository without associated "
1179 b"fast implementation."
1181 b"fast implementation."
1180 )
1182 )
1181 hint = _(
1183 hint = _(
1182 b"check `hg help config.format.use-persistent-nodemap` "
1184 b"check `hg help config.format.use-persistent-nodemap` "
1183 b"for details"
1185 b"for details"
1184 )
1186 )
1185 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1187 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1186 if slow_path == b'warn':
1188 if slow_path == b'warn':
1187 msg = b"warning: " + msg + b'\n'
1189 msg = b"warning: " + msg + b'\n'
1188 ui.warn(msg)
1190 ui.warn(msg)
1189 if not ui.quiet:
1191 if not ui.quiet:
1190 hint = b'(' + hint + b')\n'
1192 hint = b'(' + hint + b')\n'
1191 ui.warn(hint)
1193 ui.warn(hint)
1192 if slow_path == b'abort':
1194 if slow_path == b'abort':
1193 raise error.Abort(msg, hint=hint)
1195 raise error.Abort(msg, hint=hint)
1194 options[b'persistent-nodemap'] = True
1196 options[b'persistent-nodemap'] = True
1195 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1197 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1196 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1198 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1197 if slow_path not in (b'allow', b'warn', b'abort'):
1199 if slow_path not in (b'allow', b'warn', b'abort'):
1198 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1200 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1199 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1201 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1200 ui.warn(msg % slow_path)
1202 ui.warn(msg % slow_path)
1201 if not ui.quiet:
1203 if not ui.quiet:
1202 ui.warn(_(b'falling back to default value: %s\n') % default)
1204 ui.warn(_(b'falling back to default value: %s\n') % default)
1203 slow_path = default
1205 slow_path = default
1204
1206
1205 msg = _(
1207 msg = _(
1206 b"accessing `dirstate-v2` repository without associated "
1208 b"accessing `dirstate-v2` repository without associated "
1207 b"fast implementation."
1209 b"fast implementation."
1208 )
1210 )
1209 hint = _(
1211 hint = _(
1210 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1212 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1211 )
1213 )
1212 if not dirstate.HAS_FAST_DIRSTATE_V2:
1214 if not dirstate.HAS_FAST_DIRSTATE_V2:
1213 if slow_path == b'warn':
1215 if slow_path == b'warn':
1214 msg = b"warning: " + msg + b'\n'
1216 msg = b"warning: " + msg + b'\n'
1215 ui.warn(msg)
1217 ui.warn(msg)
1216 if not ui.quiet:
1218 if not ui.quiet:
1217 hint = b'(' + hint + b')\n'
1219 hint = b'(' + hint + b')\n'
1218 ui.warn(hint)
1220 ui.warn(hint)
1219 if slow_path == b'abort':
1221 if slow_path == b'abort':
1220 raise error.Abort(msg, hint=hint)
1222 raise error.Abort(msg, hint=hint)
1221 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1223 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1222 options[b'persistent-nodemap.mmap'] = True
1224 options[b'persistent-nodemap.mmap'] = True
1223 if ui.configbool(b'devel', b'persistent-nodemap'):
1225 if ui.configbool(b'devel', b'persistent-nodemap'):
1224 options[b'devel-force-nodemap'] = True
1226 options[b'devel-force-nodemap'] = True
1225
1227
1226 return options
1228 return options
1227
1229
1228
1230
1229 def makemain(**kwargs):
1231 def makemain(**kwargs):
1230 """Produce a type conforming to ``ilocalrepositorymain``."""
1232 """Produce a type conforming to ``ilocalrepositorymain``."""
1231 return localrepository
1233 return localrepository
1232
1234
1233
1235
1234 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1236 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 class revlogfilestorage:
1237 class revlogfilestorage:
1236 """File storage when using revlogs."""
1238 """File storage when using revlogs."""
1237
1239
1238 def file(self, path):
1240 def file(self, path):
1239 if path.startswith(b'/'):
1241 if path.startswith(b'/'):
1240 path = path[1:]
1242 path = path[1:]
1241
1243
1242 return filelog.filelog(self.svfs, path)
1244 return filelog.filelog(self.svfs, path)
1243
1245
1244
1246
1245 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1247 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 class revlognarrowfilestorage:
1248 class revlognarrowfilestorage:
1247 """File storage when using revlogs and narrow files."""
1249 """File storage when using revlogs and narrow files."""
1248
1250
1249 def file(self, path):
1251 def file(self, path):
1250 if path.startswith(b'/'):
1252 if path.startswith(b'/'):
1251 path = path[1:]
1253 path = path[1:]
1252
1254
1253 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1255 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1254
1256
1255
1257
1256 def makefilestorage(requirements, features, **kwargs):
1258 def makefilestorage(requirements, features, **kwargs):
1257 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1259 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1258 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1260 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1259 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1261 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1260
1262
1261 if requirementsmod.NARROW_REQUIREMENT in requirements:
1263 if requirementsmod.NARROW_REQUIREMENT in requirements:
1262 return revlognarrowfilestorage
1264 return revlognarrowfilestorage
1263 else:
1265 else:
1264 return revlogfilestorage
1266 return revlogfilestorage
1265
1267
1266
1268
1267 # List of repository interfaces and factory functions for them. Each
1269 # List of repository interfaces and factory functions for them. Each
1268 # will be called in order during ``makelocalrepository()`` to iteratively
1270 # will be called in order during ``makelocalrepository()`` to iteratively
1269 # derive the final type for a local repository instance. We capture the
1271 # derive the final type for a local repository instance. We capture the
1270 # function as a lambda so we don't hold a reference and the module-level
1272 # function as a lambda so we don't hold a reference and the module-level
1271 # functions can be wrapped.
1273 # functions can be wrapped.
1272 REPO_INTERFACES = [
1274 REPO_INTERFACES = [
1273 (repository.ilocalrepositorymain, lambda: makemain),
1275 (repository.ilocalrepositorymain, lambda: makemain),
1274 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1276 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1275 ]
1277 ]
1276
1278
1277
1279
1278 @interfaceutil.implementer(repository.ilocalrepositorymain)
1280 @interfaceutil.implementer(repository.ilocalrepositorymain)
1279 class localrepository:
1281 class localrepository:
1280 """Main class for representing local repositories.
1282 """Main class for representing local repositories.
1281
1283
1282 All local repositories are instances of this class.
1284 All local repositories are instances of this class.
1283
1285
1284 Constructed on its own, instances of this class are not usable as
1286 Constructed on its own, instances of this class are not usable as
1285 repository objects. To obtain a usable repository object, call
1287 repository objects. To obtain a usable repository object, call
1286 ``hg.repository()``, ``localrepo.instance()``, or
1288 ``hg.repository()``, ``localrepo.instance()``, or
1287 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1289 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1288 ``instance()`` adds support for creating new repositories.
1290 ``instance()`` adds support for creating new repositories.
1289 ``hg.repository()`` adds more extension integration, including calling
1291 ``hg.repository()`` adds more extension integration, including calling
1290 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1292 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1291 used.
1293 used.
1292 """
1294 """
1293
1295
1294 _basesupported = {
1296 _basesupported = {
1295 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1297 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1296 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1298 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1297 requirementsmod.CHANGELOGV2_REQUIREMENT,
1299 requirementsmod.CHANGELOGV2_REQUIREMENT,
1298 requirementsmod.COPIESSDC_REQUIREMENT,
1300 requirementsmod.COPIESSDC_REQUIREMENT,
1299 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1301 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1300 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1302 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1301 requirementsmod.DOTENCODE_REQUIREMENT,
1303 requirementsmod.DOTENCODE_REQUIREMENT,
1302 requirementsmod.FNCACHE_REQUIREMENT,
1304 requirementsmod.FNCACHE_REQUIREMENT,
1303 requirementsmod.GENERALDELTA_REQUIREMENT,
1305 requirementsmod.GENERALDELTA_REQUIREMENT,
1304 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1306 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1305 requirementsmod.NODEMAP_REQUIREMENT,
1307 requirementsmod.NODEMAP_REQUIREMENT,
1306 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1308 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1307 requirementsmod.REVLOGV1_REQUIREMENT,
1309 requirementsmod.REVLOGV1_REQUIREMENT,
1308 requirementsmod.REVLOGV2_REQUIREMENT,
1310 requirementsmod.REVLOGV2_REQUIREMENT,
1309 requirementsmod.SHARED_REQUIREMENT,
1311 requirementsmod.SHARED_REQUIREMENT,
1310 requirementsmod.SHARESAFE_REQUIREMENT,
1312 requirementsmod.SHARESAFE_REQUIREMENT,
1311 requirementsmod.SPARSE_REQUIREMENT,
1313 requirementsmod.SPARSE_REQUIREMENT,
1312 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1314 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1313 requirementsmod.STORE_REQUIREMENT,
1315 requirementsmod.STORE_REQUIREMENT,
1314 requirementsmod.TREEMANIFEST_REQUIREMENT,
1316 requirementsmod.TREEMANIFEST_REQUIREMENT,
1315 }
1317 }
1316
1318
1317 # list of prefix for file which can be written without 'wlock'
1319 # list of prefix for file which can be written without 'wlock'
1318 # Extensions should extend this list when needed
1320 # Extensions should extend this list when needed
1319 _wlockfreeprefix = {
1321 _wlockfreeprefix = {
1320 # We migh consider requiring 'wlock' for the next
1322 # We migh consider requiring 'wlock' for the next
1321 # two, but pretty much all the existing code assume
1323 # two, but pretty much all the existing code assume
1322 # wlock is not needed so we keep them excluded for
1324 # wlock is not needed so we keep them excluded for
1323 # now.
1325 # now.
1324 b'hgrc',
1326 b'hgrc',
1325 b'requires',
1327 b'requires',
1326 # XXX cache is a complicatged business someone
1328 # XXX cache is a complicatged business someone
1327 # should investigate this in depth at some point
1329 # should investigate this in depth at some point
1328 b'cache/',
1330 b'cache/',
1329 # XXX shouldn't be dirstate covered by the wlock?
1331 # XXX shouldn't be dirstate covered by the wlock?
1330 b'dirstate',
1332 b'dirstate',
1331 # XXX bisect was still a bit too messy at the time
1333 # XXX bisect was still a bit too messy at the time
1332 # this changeset was introduced. Someone should fix
1334 # this changeset was introduced. Someone should fix
1333 # the remainig bit and drop this line
1335 # the remainig bit and drop this line
1334 b'bisect.state',
1336 b'bisect.state',
1335 }
1337 }
1336
1338
1337 def __init__(
1339 def __init__(
1338 self,
1340 self,
1339 baseui,
1341 baseui,
1340 ui,
1342 ui,
1341 origroot: bytes,
1343 origroot: bytes,
1342 wdirvfs: vfsmod.vfs,
1344 wdirvfs: vfsmod.vfs,
1343 hgvfs: vfsmod.vfs,
1345 hgvfs: vfsmod.vfs,
1344 requirements,
1346 requirements,
1345 supportedrequirements,
1347 supportedrequirements,
1346 sharedpath: bytes,
1348 sharedpath: bytes,
1347 store,
1349 store,
1348 cachevfs: vfsmod.vfs,
1350 cachevfs: vfsmod.vfs,
1349 wcachevfs: vfsmod.vfs,
1351 wcachevfs: vfsmod.vfs,
1350 features,
1352 features,
1351 intents=None,
1353 intents=None,
1352 ):
1354 ):
1353 """Create a new local repository instance.
1355 """Create a new local repository instance.
1354
1356
1355 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1357 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1356 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1358 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1357 object.
1359 object.
1358
1360
1359 Arguments:
1361 Arguments:
1360
1362
1361 baseui
1363 baseui
1362 ``ui.ui`` instance that ``ui`` argument was based off of.
1364 ``ui.ui`` instance that ``ui`` argument was based off of.
1363
1365
1364 ui
1366 ui
1365 ``ui.ui`` instance for use by the repository.
1367 ``ui.ui`` instance for use by the repository.
1366
1368
1367 origroot
1369 origroot
1368 ``bytes`` path to working directory root of this repository.
1370 ``bytes`` path to working directory root of this repository.
1369
1371
1370 wdirvfs
1372 wdirvfs
1371 ``vfs.vfs`` rooted at the working directory.
1373 ``vfs.vfs`` rooted at the working directory.
1372
1374
1373 hgvfs
1375 hgvfs
1374 ``vfs.vfs`` rooted at .hg/
1376 ``vfs.vfs`` rooted at .hg/
1375
1377
1376 requirements
1378 requirements
1377 ``set`` of bytestrings representing repository opening requirements.
1379 ``set`` of bytestrings representing repository opening requirements.
1378
1380
1379 supportedrequirements
1381 supportedrequirements
1380 ``set`` of bytestrings representing repository requirements that we
1382 ``set`` of bytestrings representing repository requirements that we
1381 know how to open. May be a supetset of ``requirements``.
1383 know how to open. May be a supetset of ``requirements``.
1382
1384
1383 sharedpath
1385 sharedpath
1384 ``bytes`` Defining path to storage base directory. Points to a
1386 ``bytes`` Defining path to storage base directory. Points to a
1385 ``.hg/`` directory somewhere.
1387 ``.hg/`` directory somewhere.
1386
1388
1387 store
1389 store
1388 ``store.basicstore`` (or derived) instance providing access to
1390 ``store.basicstore`` (or derived) instance providing access to
1389 versioned storage.
1391 versioned storage.
1390
1392
1391 cachevfs
1393 cachevfs
1392 ``vfs.vfs`` used for cache files.
1394 ``vfs.vfs`` used for cache files.
1393
1395
1394 wcachevfs
1396 wcachevfs
1395 ``vfs.vfs`` used for cache files related to the working copy.
1397 ``vfs.vfs`` used for cache files related to the working copy.
1396
1398
1397 features
1399 features
1398 ``set`` of bytestrings defining features/capabilities of this
1400 ``set`` of bytestrings defining features/capabilities of this
1399 instance.
1401 instance.
1400
1402
1401 intents
1403 intents
1402 ``set`` of system strings indicating what this repo will be used
1404 ``set`` of system strings indicating what this repo will be used
1403 for.
1405 for.
1404 """
1406 """
1405 self.baseui = baseui
1407 self.baseui = baseui
1406 self.ui = ui
1408 self.ui = ui
1407 self.origroot = origroot
1409 self.origroot = origroot
1408 # vfs rooted at working directory.
1410 # vfs rooted at working directory.
1409 self.wvfs = wdirvfs
1411 self.wvfs = wdirvfs
1410 self.root = wdirvfs.base
1412 self.root = wdirvfs.base
1411 # vfs rooted at .hg/. Used to access most non-store paths.
1413 # vfs rooted at .hg/. Used to access most non-store paths.
1412 self.vfs = hgvfs
1414 self.vfs = hgvfs
1413 self.path = hgvfs.base
1415 self.path = hgvfs.base
1414 self.requirements = requirements
1416 self.requirements = requirements
1415 self.nodeconstants = sha1nodeconstants
1417 self.nodeconstants = sha1nodeconstants
1416 self.nullid = self.nodeconstants.nullid
1418 self.nullid = self.nodeconstants.nullid
1417 self.supported = supportedrequirements
1419 self.supported = supportedrequirements
1418 self.sharedpath = sharedpath
1420 self.sharedpath = sharedpath
1419 self.store = store
1421 self.store = store
1420 self.cachevfs = cachevfs
1422 self.cachevfs = cachevfs
1421 self.wcachevfs = wcachevfs
1423 self.wcachevfs = wcachevfs
1422 self.features = features
1424 self.features = features
1423
1425
1424 self.filtername = None
1426 self.filtername = None
1425
1427
1426 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1428 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1427 b'devel', b'check-locks'
1429 b'devel', b'check-locks'
1428 ):
1430 ):
1429 self.vfs.audit = self._getvfsward(self.vfs.audit)
1431 self.vfs.audit = self._getvfsward(self.vfs.audit)
1430 # A list of callback to shape the phase if no data were found.
1432 # A list of callback to shape the phase if no data were found.
1431 # Callback are in the form: func(repo, roots) --> processed root.
1433 # Callback are in the form: func(repo, roots) --> processed root.
1432 # This list it to be filled by extension during repo setup
1434 # This list it to be filled by extension during repo setup
1433 self._phasedefaults = []
1435 self._phasedefaults = []
1434
1436
1435 color.setup(self.ui)
1437 color.setup(self.ui)
1436
1438
1437 self.spath = self.store.path
1439 self.spath = self.store.path
1438 self.svfs = self.store.vfs
1440 self.svfs = self.store.vfs
1439 self.sjoin = self.store.join
1441 self.sjoin = self.store.join
1440 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1442 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1441 b'devel', b'check-locks'
1443 b'devel', b'check-locks'
1442 ):
1444 ):
1443 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1445 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1444 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1446 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1445 else: # standard vfs
1447 else: # standard vfs
1446 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1448 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1447
1449
1448 self._dirstatevalidatewarned = False
1450 self._dirstatevalidatewarned = False
1449
1451
1450 self._branchcaches = branchmap.BranchMapCache()
1452 self._branchcaches = branchmap.BranchMapCache()
1451 self._revbranchcache = None
1453 self._revbranchcache = None
1452 self._filterpats = {}
1454 self._filterpats = {}
1453 self._datafilters = {}
1455 self._datafilters = {}
1454 self._transref = self._lockref = self._wlockref = None
1456 self._transref = self._lockref = self._wlockref = None
1455
1457
1456 # A cache for various files under .hg/ that tracks file changes,
1458 # A cache for various files under .hg/ that tracks file changes,
1457 # (used by the filecache decorator)
1459 # (used by the filecache decorator)
1458 #
1460 #
1459 # Maps a property name to its util.filecacheentry
1461 # Maps a property name to its util.filecacheentry
1460 self._filecache = {}
1462 self._filecache = {}
1461
1463
1462 # hold sets of revision to be filtered
1464 # hold sets of revision to be filtered
1463 # should be cleared when something might have changed the filter value:
1465 # should be cleared when something might have changed the filter value:
1464 # - new changesets,
1466 # - new changesets,
1465 # - phase change,
1467 # - phase change,
1466 # - new obsolescence marker,
1468 # - new obsolescence marker,
1467 # - working directory parent change,
1469 # - working directory parent change,
1468 # - bookmark changes
1470 # - bookmark changes
1469 self.filteredrevcache = {}
1471 self.filteredrevcache = {}
1470
1472
1471 # post-dirstate-status hooks
1473 # post-dirstate-status hooks
1472 self._postdsstatus = []
1474 self._postdsstatus = []
1473
1475
1474 # generic mapping between names and nodes
1476 # generic mapping between names and nodes
1475 self.names = namespaces.namespaces()
1477 self.names = namespaces.namespaces()
1476
1478
1477 # Key to signature value.
1479 # Key to signature value.
1478 self._sparsesignaturecache = {}
1480 self._sparsesignaturecache = {}
1479 # Signature to cached matcher instance.
1481 # Signature to cached matcher instance.
1480 self._sparsematchercache = {}
1482 self._sparsematchercache = {}
1481
1483
1482 self._extrafilterid = repoview.extrafilter(ui)
1484 self._extrafilterid = repoview.extrafilter(ui)
1483
1485
1484 self.filecopiesmode = None
1486 self.filecopiesmode = None
1485 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1487 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1486 self.filecopiesmode = b'changeset-sidedata'
1488 self.filecopiesmode = b'changeset-sidedata'
1487
1489
1488 self._wanted_sidedata = set()
1490 self._wanted_sidedata = set()
1489 self._sidedata_computers = {}
1491 self._sidedata_computers = {}
1490 sidedatamod.set_sidedata_spec_for_repo(self)
1492 sidedatamod.set_sidedata_spec_for_repo(self)
1491
1493
1492 def _getvfsward(self, origfunc):
1494 def _getvfsward(self, origfunc):
1493 """build a ward for self.vfs"""
1495 """build a ward for self.vfs"""
1494 rref = weakref.ref(self)
1496 rref = weakref.ref(self)
1495
1497
1496 def checkvfs(path, mode=None):
1498 def checkvfs(path, mode=None):
1497 ret = origfunc(path, mode=mode)
1499 ret = origfunc(path, mode=mode)
1498 repo = rref()
1500 repo = rref()
1499 if (
1501 if (
1500 repo is None
1502 repo is None
1501 or not util.safehasattr(repo, b'_wlockref')
1503 or not util.safehasattr(repo, b'_wlockref')
1502 or not util.safehasattr(repo, b'_lockref')
1504 or not util.safehasattr(repo, b'_lockref')
1503 ):
1505 ):
1504 return
1506 return
1505 if mode in (None, b'r', b'rb'):
1507 if mode in (None, b'r', b'rb'):
1506 return
1508 return
1507 if path.startswith(repo.path):
1509 if path.startswith(repo.path):
1508 # truncate name relative to the repository (.hg)
1510 # truncate name relative to the repository (.hg)
1509 path = path[len(repo.path) + 1 :]
1511 path = path[len(repo.path) + 1 :]
1510 if path.startswith(b'cache/'):
1512 if path.startswith(b'cache/'):
1511 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1513 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1512 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1514 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1513 # path prefixes covered by 'lock'
1515 # path prefixes covered by 'lock'
1514 vfs_path_prefixes = (
1516 vfs_path_prefixes = (
1515 b'journal.',
1517 b'journal.',
1516 b'undo.',
1518 b'undo.',
1517 b'strip-backup/',
1519 b'strip-backup/',
1518 b'cache/',
1520 b'cache/',
1519 )
1521 )
1520 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1522 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1521 if repo._currentlock(repo._lockref) is None:
1523 if repo._currentlock(repo._lockref) is None:
1522 repo.ui.develwarn(
1524 repo.ui.develwarn(
1523 b'write with no lock: "%s"' % path,
1525 b'write with no lock: "%s"' % path,
1524 stacklevel=3,
1526 stacklevel=3,
1525 config=b'check-locks',
1527 config=b'check-locks',
1526 )
1528 )
1527 elif repo._currentlock(repo._wlockref) is None:
1529 elif repo._currentlock(repo._wlockref) is None:
1528 # rest of vfs files are covered by 'wlock'
1530 # rest of vfs files are covered by 'wlock'
1529 #
1531 #
1530 # exclude special files
1532 # exclude special files
1531 for prefix in self._wlockfreeprefix:
1533 for prefix in self._wlockfreeprefix:
1532 if path.startswith(prefix):
1534 if path.startswith(prefix):
1533 return
1535 return
1534 repo.ui.develwarn(
1536 repo.ui.develwarn(
1535 b'write with no wlock: "%s"' % path,
1537 b'write with no wlock: "%s"' % path,
1536 stacklevel=3,
1538 stacklevel=3,
1537 config=b'check-locks',
1539 config=b'check-locks',
1538 )
1540 )
1539 return ret
1541 return ret
1540
1542
1541 return checkvfs
1543 return checkvfs
1542
1544
1543 def _getsvfsward(self, origfunc):
1545 def _getsvfsward(self, origfunc):
1544 """build a ward for self.svfs"""
1546 """build a ward for self.svfs"""
1545 rref = weakref.ref(self)
1547 rref = weakref.ref(self)
1546
1548
1547 def checksvfs(path, mode=None):
1549 def checksvfs(path, mode=None):
1548 ret = origfunc(path, mode=mode)
1550 ret = origfunc(path, mode=mode)
1549 repo = rref()
1551 repo = rref()
1550 if repo is None or not util.safehasattr(repo, b'_lockref'):
1552 if repo is None or not util.safehasattr(repo, b'_lockref'):
1551 return
1553 return
1552 if mode in (None, b'r', b'rb'):
1554 if mode in (None, b'r', b'rb'):
1553 return
1555 return
1554 if path.startswith(repo.sharedpath):
1556 if path.startswith(repo.sharedpath):
1555 # truncate name relative to the repository (.hg)
1557 # truncate name relative to the repository (.hg)
1556 path = path[len(repo.sharedpath) + 1 :]
1558 path = path[len(repo.sharedpath) + 1 :]
1557 if repo._currentlock(repo._lockref) is None:
1559 if repo._currentlock(repo._lockref) is None:
1558 repo.ui.develwarn(
1560 repo.ui.develwarn(
1559 b'write with no lock: "%s"' % path, stacklevel=4
1561 b'write with no lock: "%s"' % path, stacklevel=4
1560 )
1562 )
1561 return ret
1563 return ret
1562
1564
1563 return checksvfs
1565 return checksvfs
1564
1566
1565 def close(self):
1567 def close(self):
1566 self._writecaches()
1568 self._writecaches()
1567
1569
1568 def _writecaches(self):
1570 def _writecaches(self):
1569 if self._revbranchcache:
1571 if self._revbranchcache:
1570 self._revbranchcache.write()
1572 self._revbranchcache.write()
1571
1573
1572 def _restrictcapabilities(self, caps):
1574 def _restrictcapabilities(self, caps):
1573 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1575 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1574 caps = set(caps)
1576 caps = set(caps)
1575 capsblob = bundle2.encodecaps(
1577 capsblob = bundle2.encodecaps(
1576 bundle2.getrepocaps(self, role=b'client')
1578 bundle2.getrepocaps(self, role=b'client')
1577 )
1579 )
1578 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1580 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1579 if self.ui.configbool(b'experimental', b'narrow'):
1581 if self.ui.configbool(b'experimental', b'narrow'):
1580 caps.add(wireprototypes.NARROWCAP)
1582 caps.add(wireprototypes.NARROWCAP)
1581 return caps
1583 return caps
1582
1584
1583 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1585 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1584 # self -> auditor -> self._checknested -> self
1586 # self -> auditor -> self._checknested -> self
1585
1587
1586 @property
1588 @property
1587 def auditor(self):
1589 def auditor(self):
1588 # This is only used by context.workingctx.match in order to
1590 # This is only used by context.workingctx.match in order to
1589 # detect files in subrepos.
1591 # detect files in subrepos.
1590 return pathutil.pathauditor(self.root, callback=self._checknested)
1592 return pathutil.pathauditor(self.root, callback=self._checknested)
1591
1593
1592 @property
1594 @property
1593 def nofsauditor(self):
1595 def nofsauditor(self):
1594 # This is only used by context.basectx.match in order to detect
1596 # This is only used by context.basectx.match in order to detect
1595 # files in subrepos.
1597 # files in subrepos.
1596 return pathutil.pathauditor(
1598 return pathutil.pathauditor(
1597 self.root, callback=self._checknested, realfs=False, cached=True
1599 self.root, callback=self._checknested, realfs=False, cached=True
1598 )
1600 )
1599
1601
1600 def _checknested(self, path):
1602 def _checknested(self, path):
1601 """Determine if path is a legal nested repository."""
1603 """Determine if path is a legal nested repository."""
1602 if not path.startswith(self.root):
1604 if not path.startswith(self.root):
1603 return False
1605 return False
1604 subpath = path[len(self.root) + 1 :]
1606 subpath = path[len(self.root) + 1 :]
1605 normsubpath = util.pconvert(subpath)
1607 normsubpath = util.pconvert(subpath)
1606
1608
1607 # XXX: Checking against the current working copy is wrong in
1609 # XXX: Checking against the current working copy is wrong in
1608 # the sense that it can reject things like
1610 # the sense that it can reject things like
1609 #
1611 #
1610 # $ hg cat -r 10 sub/x.txt
1612 # $ hg cat -r 10 sub/x.txt
1611 #
1613 #
1612 # if sub/ is no longer a subrepository in the working copy
1614 # if sub/ is no longer a subrepository in the working copy
1613 # parent revision.
1615 # parent revision.
1614 #
1616 #
1615 # However, it can of course also allow things that would have
1617 # However, it can of course also allow things that would have
1616 # been rejected before, such as the above cat command if sub/
1618 # been rejected before, such as the above cat command if sub/
1617 # is a subrepository now, but was a normal directory before.
1619 # is a subrepository now, but was a normal directory before.
1618 # The old path auditor would have rejected by mistake since it
1620 # The old path auditor would have rejected by mistake since it
1619 # panics when it sees sub/.hg/.
1621 # panics when it sees sub/.hg/.
1620 #
1622 #
1621 # All in all, checking against the working copy seems sensible
1623 # All in all, checking against the working copy seems sensible
1622 # since we want to prevent access to nested repositories on
1624 # since we want to prevent access to nested repositories on
1623 # the filesystem *now*.
1625 # the filesystem *now*.
1624 ctx = self[None]
1626 ctx = self[None]
1625 parts = util.splitpath(subpath)
1627 parts = util.splitpath(subpath)
1626 while parts:
1628 while parts:
1627 prefix = b'/'.join(parts)
1629 prefix = b'/'.join(parts)
1628 if prefix in ctx.substate:
1630 if prefix in ctx.substate:
1629 if prefix == normsubpath:
1631 if prefix == normsubpath:
1630 return True
1632 return True
1631 else:
1633 else:
1632 sub = ctx.sub(prefix)
1634 sub = ctx.sub(prefix)
1633 return sub.checknested(subpath[len(prefix) + 1 :])
1635 return sub.checknested(subpath[len(prefix) + 1 :])
1634 else:
1636 else:
1635 parts.pop()
1637 parts.pop()
1636 return False
1638 return False
1637
1639
1638 def peer(self):
1640 def peer(self):
1639 return localpeer(self) # not cached to avoid reference cycle
1641 return localpeer(self) # not cached to avoid reference cycle
1640
1642
1641 def unfiltered(self):
1643 def unfiltered(self):
1642 """Return unfiltered version of the repository
1644 """Return unfiltered version of the repository
1643
1645
1644 Intended to be overwritten by filtered repo."""
1646 Intended to be overwritten by filtered repo."""
1645 return self
1647 return self
1646
1648
1647 def filtered(self, name, visibilityexceptions=None):
1649 def filtered(self, name, visibilityexceptions=None):
1648 """Return a filtered version of a repository
1650 """Return a filtered version of a repository
1649
1651
1650 The `name` parameter is the identifier of the requested view. This
1652 The `name` parameter is the identifier of the requested view. This
1651 will return a repoview object set "exactly" to the specified view.
1653 will return a repoview object set "exactly" to the specified view.
1652
1654
1653 This function does not apply recursive filtering to a repository. For
1655 This function does not apply recursive filtering to a repository. For
1654 example calling `repo.filtered("served")` will return a repoview using
1656 example calling `repo.filtered("served")` will return a repoview using
1655 the "served" view, regardless of the initial view used by `repo`.
1657 the "served" view, regardless of the initial view used by `repo`.
1656
1658
1657 In other word, there is always only one level of `repoview` "filtering".
1659 In other word, there is always only one level of `repoview` "filtering".
1658 """
1660 """
1659 if self._extrafilterid is not None and b'%' not in name:
1661 if self._extrafilterid is not None and b'%' not in name:
1660 name = name + b'%' + self._extrafilterid
1662 name = name + b'%' + self._extrafilterid
1661
1663
1662 cls = repoview.newtype(self.unfiltered().__class__)
1664 cls = repoview.newtype(self.unfiltered().__class__)
1663 return cls(self, name, visibilityexceptions)
1665 return cls(self, name, visibilityexceptions)
1664
1666
1665 @mixedrepostorecache(
1667 @mixedrepostorecache(
1666 (b'bookmarks', b'plain'),
1668 (b'bookmarks', b'plain'),
1667 (b'bookmarks.current', b'plain'),
1669 (b'bookmarks.current', b'plain'),
1668 (b'bookmarks', b''),
1670 (b'bookmarks', b''),
1669 (b'00changelog.i', b''),
1671 (b'00changelog.i', b''),
1670 )
1672 )
1671 def _bookmarks(self):
1673 def _bookmarks(self):
1672 # Since the multiple files involved in the transaction cannot be
1674 # Since the multiple files involved in the transaction cannot be
1673 # written atomically (with current repository format), there is a race
1675 # written atomically (with current repository format), there is a race
1674 # condition here.
1676 # condition here.
1675 #
1677 #
1676 # 1) changelog content A is read
1678 # 1) changelog content A is read
1677 # 2) outside transaction update changelog to content B
1679 # 2) outside transaction update changelog to content B
1678 # 3) outside transaction update bookmark file referring to content B
1680 # 3) outside transaction update bookmark file referring to content B
1679 # 4) bookmarks file content is read and filtered against changelog-A
1681 # 4) bookmarks file content is read and filtered against changelog-A
1680 #
1682 #
1681 # When this happens, bookmarks against nodes missing from A are dropped.
1683 # When this happens, bookmarks against nodes missing from A are dropped.
1682 #
1684 #
1683 # Having this happening during read is not great, but it become worse
1685 # Having this happening during read is not great, but it become worse
1684 # when this happen during write because the bookmarks to the "unknown"
1686 # when this happen during write because the bookmarks to the "unknown"
1685 # nodes will be dropped for good. However, writes happen within locks.
1687 # nodes will be dropped for good. However, writes happen within locks.
1686 # This locking makes it possible to have a race free consistent read.
1688 # This locking makes it possible to have a race free consistent read.
1687 # For this purpose data read from disc before locking are
1689 # For this purpose data read from disc before locking are
1688 # "invalidated" right after the locks are taken. This invalidations are
1690 # "invalidated" right after the locks are taken. This invalidations are
1689 # "light", the `filecache` mechanism keep the data in memory and will
1691 # "light", the `filecache` mechanism keep the data in memory and will
1690 # reuse them if the underlying files did not changed. Not parsing the
1692 # reuse them if the underlying files did not changed. Not parsing the
1691 # same data multiple times helps performances.
1693 # same data multiple times helps performances.
1692 #
1694 #
1693 # Unfortunately in the case describe above, the files tracked by the
1695 # Unfortunately in the case describe above, the files tracked by the
1694 # bookmarks file cache might not have changed, but the in-memory
1696 # bookmarks file cache might not have changed, but the in-memory
1695 # content is still "wrong" because we used an older changelog content
1697 # content is still "wrong" because we used an older changelog content
1696 # to process the on-disk data. So after locking, the changelog would be
1698 # to process the on-disk data. So after locking, the changelog would be
1697 # refreshed but `_bookmarks` would be preserved.
1699 # refreshed but `_bookmarks` would be preserved.
1698 # Adding `00changelog.i` to the list of tracked file is not
1700 # Adding `00changelog.i` to the list of tracked file is not
1699 # enough, because at the time we build the content for `_bookmarks` in
1701 # enough, because at the time we build the content for `_bookmarks` in
1700 # (4), the changelog file has already diverged from the content used
1702 # (4), the changelog file has already diverged from the content used
1701 # for loading `changelog` in (1)
1703 # for loading `changelog` in (1)
1702 #
1704 #
1703 # To prevent the issue, we force the changelog to be explicitly
1705 # To prevent the issue, we force the changelog to be explicitly
1704 # reloaded while computing `_bookmarks`. The data race can still happen
1706 # reloaded while computing `_bookmarks`. The data race can still happen
1705 # without the lock (with a narrower window), but it would no longer go
1707 # without the lock (with a narrower window), but it would no longer go
1706 # undetected during the lock time refresh.
1708 # undetected during the lock time refresh.
1707 #
1709 #
1708 # The new schedule is as follow
1710 # The new schedule is as follow
1709 #
1711 #
1710 # 1) filecache logic detect that `_bookmarks` needs to be computed
1712 # 1) filecache logic detect that `_bookmarks` needs to be computed
1711 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1713 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1712 # 3) We force `changelog` filecache to be tested
1714 # 3) We force `changelog` filecache to be tested
1713 # 4) cachestat for `changelog` are captured (for changelog)
1715 # 4) cachestat for `changelog` are captured (for changelog)
1714 # 5) `_bookmarks` is computed and cached
1716 # 5) `_bookmarks` is computed and cached
1715 #
1717 #
1716 # The step in (3) ensure we have a changelog at least as recent as the
1718 # The step in (3) ensure we have a changelog at least as recent as the
1717 # cache stat computed in (1). As a result at locking time:
1719 # cache stat computed in (1). As a result at locking time:
1718 # * if the changelog did not changed since (1) -> we can reuse the data
1720 # * if the changelog did not changed since (1) -> we can reuse the data
1719 # * otherwise -> the bookmarks get refreshed.
1721 # * otherwise -> the bookmarks get refreshed.
1720 self._refreshchangelog()
1722 self._refreshchangelog()
1721 return bookmarks.bmstore(self)
1723 return bookmarks.bmstore(self)
1722
1724
1723 def _refreshchangelog(self):
1725 def _refreshchangelog(self):
1724 """make sure the in memory changelog match the on-disk one"""
1726 """make sure the in memory changelog match the on-disk one"""
1725 if 'changelog' in vars(self) and self.currenttransaction() is None:
1727 if 'changelog' in vars(self) and self.currenttransaction() is None:
1726 del self.changelog
1728 del self.changelog
1727
1729
1728 @property
1730 @property
1729 def _activebookmark(self):
1731 def _activebookmark(self):
1730 return self._bookmarks.active
1732 return self._bookmarks.active
1731
1733
1732 # _phasesets depend on changelog. what we need is to call
1734 # _phasesets depend on changelog. what we need is to call
1733 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1735 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1734 # can't be easily expressed in filecache mechanism.
1736 # can't be easily expressed in filecache mechanism.
1735 @storecache(b'phaseroots', b'00changelog.i')
1737 @storecache(b'phaseroots', b'00changelog.i')
1736 def _phasecache(self):
1738 def _phasecache(self):
1737 return phases.phasecache(self, self._phasedefaults)
1739 return phases.phasecache(self, self._phasedefaults)
1738
1740
1739 @storecache(b'obsstore')
1741 @storecache(b'obsstore')
1740 def obsstore(self):
1742 def obsstore(self):
1741 return obsolete.makestore(self.ui, self)
1743 return obsolete.makestore(self.ui, self)
1742
1744
1743 @changelogcache()
1745 @changelogcache()
1744 def changelog(repo):
1746 def changelog(repo):
1745 # load dirstate before changelog to avoid race see issue6303
1747 # load dirstate before changelog to avoid race see issue6303
1746 repo.dirstate.prefetch_parents()
1748 repo.dirstate.prefetch_parents()
1747 return repo.store.changelog(
1749 return repo.store.changelog(
1748 txnutil.mayhavepending(repo.root),
1750 txnutil.mayhavepending(repo.root),
1749 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1751 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1750 )
1752 )
1751
1753
1752 @manifestlogcache()
1754 @manifestlogcache()
1753 def manifestlog(self):
1755 def manifestlog(self):
1754 return self.store.manifestlog(self, self._storenarrowmatch)
1756 return self.store.manifestlog(self, self._storenarrowmatch)
1755
1757
1756 @repofilecache(b'dirstate')
1758 @repofilecache(b'dirstate')
1757 def dirstate(self):
1759 def dirstate(self):
1758 return self._makedirstate()
1760 return self._makedirstate()
1759
1761
1760 def _makedirstate(self):
1762 def _makedirstate(self):
1761 """Extension point for wrapping the dirstate per-repo."""
1763 """Extension point for wrapping the dirstate per-repo."""
1762 sparsematchfn = None
1764 sparsematchfn = None
1763 if sparse.use_sparse(self):
1765 if sparse.use_sparse(self):
1764 sparsematchfn = lambda: sparse.matcher(self)
1766 sparsematchfn = lambda: sparse.matcher(self)
1765 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1767 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1766 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1768 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1767 use_dirstate_v2 = v2_req in self.requirements
1769 use_dirstate_v2 = v2_req in self.requirements
1768 use_tracked_hint = th in self.requirements
1770 use_tracked_hint = th in self.requirements
1769
1771
1770 return dirstate.dirstate(
1772 return dirstate.dirstate(
1771 self.vfs,
1773 self.vfs,
1772 self.ui,
1774 self.ui,
1773 self.root,
1775 self.root,
1774 self._dirstatevalidate,
1776 self._dirstatevalidate,
1775 sparsematchfn,
1777 sparsematchfn,
1776 self.nodeconstants,
1778 self.nodeconstants,
1777 use_dirstate_v2,
1779 use_dirstate_v2,
1778 use_tracked_hint=use_tracked_hint,
1780 use_tracked_hint=use_tracked_hint,
1779 )
1781 )
1780
1782
1781 def _dirstatevalidate(self, node):
1783 def _dirstatevalidate(self, node):
1782 try:
1784 try:
1783 self.changelog.rev(node)
1785 self.changelog.rev(node)
1784 return node
1786 return node
1785 except error.LookupError:
1787 except error.LookupError:
1786 if not self._dirstatevalidatewarned:
1788 if not self._dirstatevalidatewarned:
1787 self._dirstatevalidatewarned = True
1789 self._dirstatevalidatewarned = True
1788 self.ui.warn(
1790 self.ui.warn(
1789 _(b"warning: ignoring unknown working parent %s!\n")
1791 _(b"warning: ignoring unknown working parent %s!\n")
1790 % short(node)
1792 % short(node)
1791 )
1793 )
1792 return self.nullid
1794 return self.nullid
1793
1795
1794 @storecache(narrowspec.FILENAME)
1796 @storecache(narrowspec.FILENAME)
1795 def narrowpats(self):
1797 def narrowpats(self):
1796 """matcher patterns for this repository's narrowspec
1798 """matcher patterns for this repository's narrowspec
1797
1799
1798 A tuple of (includes, excludes).
1800 A tuple of (includes, excludes).
1799 """
1801 """
1800 return narrowspec.load(self)
1802 return narrowspec.load(self)
1801
1803
1802 @storecache(narrowspec.FILENAME)
1804 @storecache(narrowspec.FILENAME)
1803 def _storenarrowmatch(self):
1805 def _storenarrowmatch(self):
1804 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1805 return matchmod.always()
1807 return matchmod.always()
1806 include, exclude = self.narrowpats
1808 include, exclude = self.narrowpats
1807 return narrowspec.match(self.root, include=include, exclude=exclude)
1809 return narrowspec.match(self.root, include=include, exclude=exclude)
1808
1810
1809 @storecache(narrowspec.FILENAME)
1811 @storecache(narrowspec.FILENAME)
1810 def _narrowmatch(self):
1812 def _narrowmatch(self):
1811 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1813 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1812 return matchmod.always()
1814 return matchmod.always()
1813 narrowspec.checkworkingcopynarrowspec(self)
1815 narrowspec.checkworkingcopynarrowspec(self)
1814 include, exclude = self.narrowpats
1816 include, exclude = self.narrowpats
1815 return narrowspec.match(self.root, include=include, exclude=exclude)
1817 return narrowspec.match(self.root, include=include, exclude=exclude)
1816
1818
1817 def narrowmatch(self, match=None, includeexact=False):
1819 def narrowmatch(self, match=None, includeexact=False):
1818 """matcher corresponding the the repo's narrowspec
1820 """matcher corresponding the the repo's narrowspec
1819
1821
1820 If `match` is given, then that will be intersected with the narrow
1822 If `match` is given, then that will be intersected with the narrow
1821 matcher.
1823 matcher.
1822
1824
1823 If `includeexact` is True, then any exact matches from `match` will
1825 If `includeexact` is True, then any exact matches from `match` will
1824 be included even if they're outside the narrowspec.
1826 be included even if they're outside the narrowspec.
1825 """
1827 """
1826 if match:
1828 if match:
1827 if includeexact and not self._narrowmatch.always():
1829 if includeexact and not self._narrowmatch.always():
1828 # do not exclude explicitly-specified paths so that they can
1830 # do not exclude explicitly-specified paths so that they can
1829 # be warned later on
1831 # be warned later on
1830 em = matchmod.exact(match.files())
1832 em = matchmod.exact(match.files())
1831 nm = matchmod.unionmatcher([self._narrowmatch, em])
1833 nm = matchmod.unionmatcher([self._narrowmatch, em])
1832 return matchmod.intersectmatchers(match, nm)
1834 return matchmod.intersectmatchers(match, nm)
1833 return matchmod.intersectmatchers(match, self._narrowmatch)
1835 return matchmod.intersectmatchers(match, self._narrowmatch)
1834 return self._narrowmatch
1836 return self._narrowmatch
1835
1837
1836 def setnarrowpats(self, newincludes, newexcludes):
1838 def setnarrowpats(self, newincludes, newexcludes):
1837 narrowspec.save(self, newincludes, newexcludes)
1839 narrowspec.save(self, newincludes, newexcludes)
1838 self.invalidate(clearfilecache=True)
1840 self.invalidate(clearfilecache=True)
1839
1841
1840 @unfilteredpropertycache
1842 @unfilteredpropertycache
1841 def _quick_access_changeid_null(self):
1843 def _quick_access_changeid_null(self):
1842 return {
1844 return {
1843 b'null': (nullrev, self.nodeconstants.nullid),
1845 b'null': (nullrev, self.nodeconstants.nullid),
1844 nullrev: (nullrev, self.nodeconstants.nullid),
1846 nullrev: (nullrev, self.nodeconstants.nullid),
1845 self.nullid: (nullrev, self.nullid),
1847 self.nullid: (nullrev, self.nullid),
1846 }
1848 }
1847
1849
1848 @unfilteredpropertycache
1850 @unfilteredpropertycache
1849 def _quick_access_changeid_wc(self):
1851 def _quick_access_changeid_wc(self):
1850 # also fast path access to the working copy parents
1852 # also fast path access to the working copy parents
1851 # however, only do it for filter that ensure wc is visible.
1853 # however, only do it for filter that ensure wc is visible.
1852 quick = self._quick_access_changeid_null.copy()
1854 quick = self._quick_access_changeid_null.copy()
1853 cl = self.unfiltered().changelog
1855 cl = self.unfiltered().changelog
1854 for node in self.dirstate.parents():
1856 for node in self.dirstate.parents():
1855 if node == self.nullid:
1857 if node == self.nullid:
1856 continue
1858 continue
1857 rev = cl.index.get_rev(node)
1859 rev = cl.index.get_rev(node)
1858 if rev is None:
1860 if rev is None:
1859 # unknown working copy parent case:
1861 # unknown working copy parent case:
1860 #
1862 #
1861 # skip the fast path and let higher code deal with it
1863 # skip the fast path and let higher code deal with it
1862 continue
1864 continue
1863 pair = (rev, node)
1865 pair = (rev, node)
1864 quick[rev] = pair
1866 quick[rev] = pair
1865 quick[node] = pair
1867 quick[node] = pair
1866 # also add the parents of the parents
1868 # also add the parents of the parents
1867 for r in cl.parentrevs(rev):
1869 for r in cl.parentrevs(rev):
1868 if r == nullrev:
1870 if r == nullrev:
1869 continue
1871 continue
1870 n = cl.node(r)
1872 n = cl.node(r)
1871 pair = (r, n)
1873 pair = (r, n)
1872 quick[r] = pair
1874 quick[r] = pair
1873 quick[n] = pair
1875 quick[n] = pair
1874 p1node = self.dirstate.p1()
1876 p1node = self.dirstate.p1()
1875 if p1node != self.nullid:
1877 if p1node != self.nullid:
1876 quick[b'.'] = quick[p1node]
1878 quick[b'.'] = quick[p1node]
1877 return quick
1879 return quick
1878
1880
1879 @unfilteredmethod
1881 @unfilteredmethod
1880 def _quick_access_changeid_invalidate(self):
1882 def _quick_access_changeid_invalidate(self):
1881 if '_quick_access_changeid_wc' in vars(self):
1883 if '_quick_access_changeid_wc' in vars(self):
1882 del self.__dict__['_quick_access_changeid_wc']
1884 del self.__dict__['_quick_access_changeid_wc']
1883
1885
1884 @property
1886 @property
1885 def _quick_access_changeid(self):
1887 def _quick_access_changeid(self):
1886 """an helper dictionnary for __getitem__ calls
1888 """an helper dictionnary for __getitem__ calls
1887
1889
1888 This contains a list of symbol we can recognise right away without
1890 This contains a list of symbol we can recognise right away without
1889 further processing.
1891 further processing.
1890 """
1892 """
1891 if self.filtername in repoview.filter_has_wc:
1893 if self.filtername in repoview.filter_has_wc:
1892 return self._quick_access_changeid_wc
1894 return self._quick_access_changeid_wc
1893 return self._quick_access_changeid_null
1895 return self._quick_access_changeid_null
1894
1896
1895 def __getitem__(self, changeid):
1897 def __getitem__(self, changeid):
1896 # dealing with special cases
1898 # dealing with special cases
1897 if changeid is None:
1899 if changeid is None:
1898 return context.workingctx(self)
1900 return context.workingctx(self)
1899 if isinstance(changeid, context.basectx):
1901 if isinstance(changeid, context.basectx):
1900 return changeid
1902 return changeid
1901
1903
1902 # dealing with multiple revisions
1904 # dealing with multiple revisions
1903 if isinstance(changeid, slice):
1905 if isinstance(changeid, slice):
1904 # wdirrev isn't contiguous so the slice shouldn't include it
1906 # wdirrev isn't contiguous so the slice shouldn't include it
1905 return [
1907 return [
1906 self[i]
1908 self[i]
1907 for i in range(*changeid.indices(len(self)))
1909 for i in range(*changeid.indices(len(self)))
1908 if i not in self.changelog.filteredrevs
1910 if i not in self.changelog.filteredrevs
1909 ]
1911 ]
1910
1912
1911 # dealing with some special values
1913 # dealing with some special values
1912 quick_access = self._quick_access_changeid.get(changeid)
1914 quick_access = self._quick_access_changeid.get(changeid)
1913 if quick_access is not None:
1915 if quick_access is not None:
1914 rev, node = quick_access
1916 rev, node = quick_access
1915 return context.changectx(self, rev, node, maybe_filtered=False)
1917 return context.changectx(self, rev, node, maybe_filtered=False)
1916 if changeid == b'tip':
1918 if changeid == b'tip':
1917 node = self.changelog.tip()
1919 node = self.changelog.tip()
1918 rev = self.changelog.rev(node)
1920 rev = self.changelog.rev(node)
1919 return context.changectx(self, rev, node)
1921 return context.changectx(self, rev, node)
1920
1922
1921 # dealing with arbitrary values
1923 # dealing with arbitrary values
1922 try:
1924 try:
1923 if isinstance(changeid, int):
1925 if isinstance(changeid, int):
1924 node = self.changelog.node(changeid)
1926 node = self.changelog.node(changeid)
1925 rev = changeid
1927 rev = changeid
1926 elif changeid == b'.':
1928 elif changeid == b'.':
1927 # this is a hack to delay/avoid loading obsmarkers
1929 # this is a hack to delay/avoid loading obsmarkers
1928 # when we know that '.' won't be hidden
1930 # when we know that '.' won't be hidden
1929 node = self.dirstate.p1()
1931 node = self.dirstate.p1()
1930 rev = self.unfiltered().changelog.rev(node)
1932 rev = self.unfiltered().changelog.rev(node)
1931 elif len(changeid) == self.nodeconstants.nodelen:
1933 elif len(changeid) == self.nodeconstants.nodelen:
1932 try:
1934 try:
1933 node = changeid
1935 node = changeid
1934 rev = self.changelog.rev(changeid)
1936 rev = self.changelog.rev(changeid)
1935 except error.FilteredLookupError:
1937 except error.FilteredLookupError:
1936 changeid = hex(changeid) # for the error message
1938 changeid = hex(changeid) # for the error message
1937 raise
1939 raise
1938 except LookupError:
1940 except LookupError:
1939 # check if it might have come from damaged dirstate
1941 # check if it might have come from damaged dirstate
1940 #
1942 #
1941 # XXX we could avoid the unfiltered if we had a recognizable
1943 # XXX we could avoid the unfiltered if we had a recognizable
1942 # exception for filtered changeset access
1944 # exception for filtered changeset access
1943 if (
1945 if (
1944 self.local()
1946 self.local()
1945 and changeid in self.unfiltered().dirstate.parents()
1947 and changeid in self.unfiltered().dirstate.parents()
1946 ):
1948 ):
1947 msg = _(b"working directory has unknown parent '%s'!")
1949 msg = _(b"working directory has unknown parent '%s'!")
1948 raise error.Abort(msg % short(changeid))
1950 raise error.Abort(msg % short(changeid))
1949 changeid = hex(changeid) # for the error message
1951 changeid = hex(changeid) # for the error message
1950 raise
1952 raise
1951
1953
1952 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1954 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1953 node = bin(changeid)
1955 node = bin(changeid)
1954 rev = self.changelog.rev(node)
1956 rev = self.changelog.rev(node)
1955 else:
1957 else:
1956 raise error.ProgrammingError(
1958 raise error.ProgrammingError(
1957 b"unsupported changeid '%s' of type %s"
1959 b"unsupported changeid '%s' of type %s"
1958 % (changeid, pycompat.bytestr(type(changeid)))
1960 % (changeid, pycompat.bytestr(type(changeid)))
1959 )
1961 )
1960
1962
1961 return context.changectx(self, rev, node)
1963 return context.changectx(self, rev, node)
1962
1964
1963 except (error.FilteredIndexError, error.FilteredLookupError):
1965 except (error.FilteredIndexError, error.FilteredLookupError):
1964 raise error.FilteredRepoLookupError(
1966 raise error.FilteredRepoLookupError(
1965 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1967 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1966 )
1968 )
1967 except (IndexError, LookupError):
1969 except (IndexError, LookupError):
1968 raise error.RepoLookupError(
1970 raise error.RepoLookupError(
1969 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1971 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1970 )
1972 )
1971 except error.WdirUnsupported:
1973 except error.WdirUnsupported:
1972 return context.workingctx(self)
1974 return context.workingctx(self)
1973
1975
1974 def __contains__(self, changeid):
1976 def __contains__(self, changeid):
1975 """True if the given changeid exists"""
1977 """True if the given changeid exists"""
1976 try:
1978 try:
1977 self[changeid]
1979 self[changeid]
1978 return True
1980 return True
1979 except error.RepoLookupError:
1981 except error.RepoLookupError:
1980 return False
1982 return False
1981
1983
1982 def __nonzero__(self):
1984 def __nonzero__(self):
1983 return True
1985 return True
1984
1986
1985 __bool__ = __nonzero__
1987 __bool__ = __nonzero__
1986
1988
1987 def __len__(self):
1989 def __len__(self):
1988 # no need to pay the cost of repoview.changelog
1990 # no need to pay the cost of repoview.changelog
1989 unfi = self.unfiltered()
1991 unfi = self.unfiltered()
1990 return len(unfi.changelog)
1992 return len(unfi.changelog)
1991
1993
1992 def __iter__(self):
1994 def __iter__(self):
1993 return iter(self.changelog)
1995 return iter(self.changelog)
1994
1996
1995 def revs(self, expr: bytes, *args):
1997 def revs(self, expr: bytes, *args):
1996 """Find revisions matching a revset.
1998 """Find revisions matching a revset.
1997
1999
1998 The revset is specified as a string ``expr`` that may contain
2000 The revset is specified as a string ``expr`` that may contain
1999 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2001 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2000
2002
2001 Revset aliases from the configuration are not expanded. To expand
2003 Revset aliases from the configuration are not expanded. To expand
2002 user aliases, consider calling ``scmutil.revrange()`` or
2004 user aliases, consider calling ``scmutil.revrange()`` or
2003 ``repo.anyrevs([expr], user=True)``.
2005 ``repo.anyrevs([expr], user=True)``.
2004
2006
2005 Returns a smartset.abstractsmartset, which is a list-like interface
2007 Returns a smartset.abstractsmartset, which is a list-like interface
2006 that contains integer revisions.
2008 that contains integer revisions.
2007 """
2009 """
2008 tree = revsetlang.spectree(expr, *args)
2010 tree = revsetlang.spectree(expr, *args)
2009 return revset.makematcher(tree)(self)
2011 return revset.makematcher(tree)(self)
2010
2012
2011 def set(self, expr: bytes, *args):
2013 def set(self, expr: bytes, *args):
2012 """Find revisions matching a revset and emit changectx instances.
2014 """Find revisions matching a revset and emit changectx instances.
2013
2015
2014 This is a convenience wrapper around ``revs()`` that iterates the
2016 This is a convenience wrapper around ``revs()`` that iterates the
2015 result and is a generator of changectx instances.
2017 result and is a generator of changectx instances.
2016
2018
2017 Revset aliases from the configuration are not expanded. To expand
2019 Revset aliases from the configuration are not expanded. To expand
2018 user aliases, consider calling ``scmutil.revrange()``.
2020 user aliases, consider calling ``scmutil.revrange()``.
2019 """
2021 """
2020 for r in self.revs(expr, *args):
2022 for r in self.revs(expr, *args):
2021 yield self[r]
2023 yield self[r]
2022
2024
2023 def anyrevs(self, specs: bytes, user=False, localalias=None):
2025 def anyrevs(self, specs: bytes, user=False, localalias=None):
2024 """Find revisions matching one of the given revsets.
2026 """Find revisions matching one of the given revsets.
2025
2027
2026 Revset aliases from the configuration are not expanded by default. To
2028 Revset aliases from the configuration are not expanded by default. To
2027 expand user aliases, specify ``user=True``. To provide some local
2029 expand user aliases, specify ``user=True``. To provide some local
2028 definitions overriding user aliases, set ``localalias`` to
2030 definitions overriding user aliases, set ``localalias`` to
2029 ``{name: definitionstring}``.
2031 ``{name: definitionstring}``.
2030 """
2032 """
2031 if specs == [b'null']:
2033 if specs == [b'null']:
2032 return revset.baseset([nullrev])
2034 return revset.baseset([nullrev])
2033 if specs == [b'.']:
2035 if specs == [b'.']:
2034 quick_data = self._quick_access_changeid.get(b'.')
2036 quick_data = self._quick_access_changeid.get(b'.')
2035 if quick_data is not None:
2037 if quick_data is not None:
2036 return revset.baseset([quick_data[0]])
2038 return revset.baseset([quick_data[0]])
2037 if user:
2039 if user:
2038 m = revset.matchany(
2040 m = revset.matchany(
2039 self.ui,
2041 self.ui,
2040 specs,
2042 specs,
2041 lookup=revset.lookupfn(self),
2043 lookup=revset.lookupfn(self),
2042 localalias=localalias,
2044 localalias=localalias,
2043 )
2045 )
2044 else:
2046 else:
2045 m = revset.matchany(None, specs, localalias=localalias)
2047 m = revset.matchany(None, specs, localalias=localalias)
2046 return m(self)
2048 return m(self)
2047
2049
2048 def url(self) -> bytes:
2050 def url(self) -> bytes:
2049 return b'file:' + self.root
2051 return b'file:' + self.root
2050
2052
2051 def hook(self, name, throw=False, **args):
2053 def hook(self, name, throw=False, **args):
2052 """Call a hook, passing this repo instance.
2054 """Call a hook, passing this repo instance.
2053
2055
2054 This a convenience method to aid invoking hooks. Extensions likely
2056 This a convenience method to aid invoking hooks. Extensions likely
2055 won't call this unless they have registered a custom hook or are
2057 won't call this unless they have registered a custom hook or are
2056 replacing code that is expected to call a hook.
2058 replacing code that is expected to call a hook.
2057 """
2059 """
2058 return hook.hook(self.ui, self, name, throw, **args)
2060 return hook.hook(self.ui, self, name, throw, **args)
2059
2061
2060 @filteredpropertycache
2062 @filteredpropertycache
2061 def _tagscache(self):
2063 def _tagscache(self):
2062 """Returns a tagscache object that contains various tags related
2064 """Returns a tagscache object that contains various tags related
2063 caches."""
2065 caches."""
2064
2066
2065 # This simplifies its cache management by having one decorated
2067 # This simplifies its cache management by having one decorated
2066 # function (this one) and the rest simply fetch things from it.
2068 # function (this one) and the rest simply fetch things from it.
2067 class tagscache:
2069 class tagscache:
2068 def __init__(self):
2070 def __init__(self):
2069 # These two define the set of tags for this repository. tags
2071 # These two define the set of tags for this repository. tags
2070 # maps tag name to node; tagtypes maps tag name to 'global' or
2072 # maps tag name to node; tagtypes maps tag name to 'global' or
2071 # 'local'. (Global tags are defined by .hgtags across all
2073 # 'local'. (Global tags are defined by .hgtags across all
2072 # heads, and local tags are defined in .hg/localtags.)
2074 # heads, and local tags are defined in .hg/localtags.)
2073 # They constitute the in-memory cache of tags.
2075 # They constitute the in-memory cache of tags.
2074 self.tags = self.tagtypes = None
2076 self.tags = self.tagtypes = None
2075
2077
2076 self.nodetagscache = self.tagslist = None
2078 self.nodetagscache = self.tagslist = None
2077
2079
2078 cache = tagscache()
2080 cache = tagscache()
2079 cache.tags, cache.tagtypes = self._findtags()
2081 cache.tags, cache.tagtypes = self._findtags()
2080
2082
2081 return cache
2083 return cache
2082
2084
2083 def tags(self):
2085 def tags(self):
2084 '''return a mapping of tag to node'''
2086 '''return a mapping of tag to node'''
2085 t = {}
2087 t = {}
2086 if self.changelog.filteredrevs:
2088 if self.changelog.filteredrevs:
2087 tags, tt = self._findtags()
2089 tags, tt = self._findtags()
2088 else:
2090 else:
2089 tags = self._tagscache.tags
2091 tags = self._tagscache.tags
2090 rev = self.changelog.rev
2092 rev = self.changelog.rev
2091 for k, v in tags.items():
2093 for k, v in tags.items():
2092 try:
2094 try:
2093 # ignore tags to unknown nodes
2095 # ignore tags to unknown nodes
2094 rev(v)
2096 rev(v)
2095 t[k] = v
2097 t[k] = v
2096 except (error.LookupError, ValueError):
2098 except (error.LookupError, ValueError):
2097 pass
2099 pass
2098 return t
2100 return t
2099
2101
2100 def _findtags(self):
2102 def _findtags(self):
2101 """Do the hard work of finding tags. Return a pair of dicts
2103 """Do the hard work of finding tags. Return a pair of dicts
2102 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2104 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2103 maps tag name to a string like \'global\' or \'local\'.
2105 maps tag name to a string like \'global\' or \'local\'.
2104 Subclasses or extensions are free to add their own tags, but
2106 Subclasses or extensions are free to add their own tags, but
2105 should be aware that the returned dicts will be retained for the
2107 should be aware that the returned dicts will be retained for the
2106 duration of the localrepo object."""
2108 duration of the localrepo object."""
2107
2109
2108 # XXX what tagtype should subclasses/extensions use? Currently
2110 # XXX what tagtype should subclasses/extensions use? Currently
2109 # mq and bookmarks add tags, but do not set the tagtype at all.
2111 # mq and bookmarks add tags, but do not set the tagtype at all.
2110 # Should each extension invent its own tag type? Should there
2112 # Should each extension invent its own tag type? Should there
2111 # be one tagtype for all such "virtual" tags? Or is the status
2113 # be one tagtype for all such "virtual" tags? Or is the status
2112 # quo fine?
2114 # quo fine?
2113
2115
2114 # map tag name to (node, hist)
2116 # map tag name to (node, hist)
2115 alltags = tagsmod.findglobaltags(self.ui, self)
2117 alltags = tagsmod.findglobaltags(self.ui, self)
2116 # map tag name to tag type
2118 # map tag name to tag type
2117 tagtypes = {tag: b'global' for tag in alltags}
2119 tagtypes = {tag: b'global' for tag in alltags}
2118
2120
2119 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2121 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2120
2122
2121 # Build the return dicts. Have to re-encode tag names because
2123 # Build the return dicts. Have to re-encode tag names because
2122 # the tags module always uses UTF-8 (in order not to lose info
2124 # the tags module always uses UTF-8 (in order not to lose info
2123 # writing to the cache), but the rest of Mercurial wants them in
2125 # writing to the cache), but the rest of Mercurial wants them in
2124 # local encoding.
2126 # local encoding.
2125 tags = {}
2127 tags = {}
2126 for (name, (node, hist)) in alltags.items():
2128 for (name, (node, hist)) in alltags.items():
2127 if node != self.nullid:
2129 if node != self.nullid:
2128 tags[encoding.tolocal(name)] = node
2130 tags[encoding.tolocal(name)] = node
2129 tags[b'tip'] = self.changelog.tip()
2131 tags[b'tip'] = self.changelog.tip()
2130 tagtypes = {
2132 tagtypes = {
2131 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2133 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2132 }
2134 }
2133 return (tags, tagtypes)
2135 return (tags, tagtypes)
2134
2136
2135 def tagtype(self, tagname):
2137 def tagtype(self, tagname):
2136 """
2138 """
2137 return the type of the given tag. result can be:
2139 return the type of the given tag. result can be:
2138
2140
2139 'local' : a local tag
2141 'local' : a local tag
2140 'global' : a global tag
2142 'global' : a global tag
2141 None : tag does not exist
2143 None : tag does not exist
2142 """
2144 """
2143
2145
2144 return self._tagscache.tagtypes.get(tagname)
2146 return self._tagscache.tagtypes.get(tagname)
2145
2147
2146 def tagslist(self):
2148 def tagslist(self):
2147 '''return a list of tags ordered by revision'''
2149 '''return a list of tags ordered by revision'''
2148 if not self._tagscache.tagslist:
2150 if not self._tagscache.tagslist:
2149 l = []
2151 l = []
2150 for t, n in self.tags().items():
2152 for t, n in self.tags().items():
2151 l.append((self.changelog.rev(n), t, n))
2153 l.append((self.changelog.rev(n), t, n))
2152 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2154 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2153
2155
2154 return self._tagscache.tagslist
2156 return self._tagscache.tagslist
2155
2157
2156 def nodetags(self, node):
2158 def nodetags(self, node):
2157 '''return the tags associated with a node'''
2159 '''return the tags associated with a node'''
2158 if not self._tagscache.nodetagscache:
2160 if not self._tagscache.nodetagscache:
2159 nodetagscache = {}
2161 nodetagscache = {}
2160 for t, n in self._tagscache.tags.items():
2162 for t, n in self._tagscache.tags.items():
2161 nodetagscache.setdefault(n, []).append(t)
2163 nodetagscache.setdefault(n, []).append(t)
2162 for tags in nodetagscache.values():
2164 for tags in nodetagscache.values():
2163 tags.sort()
2165 tags.sort()
2164 self._tagscache.nodetagscache = nodetagscache
2166 self._tagscache.nodetagscache = nodetagscache
2165 return self._tagscache.nodetagscache.get(node, [])
2167 return self._tagscache.nodetagscache.get(node, [])
2166
2168
2167 def nodebookmarks(self, node):
2169 def nodebookmarks(self, node):
2168 """return the list of bookmarks pointing to the specified node"""
2170 """return the list of bookmarks pointing to the specified node"""
2169 return self._bookmarks.names(node)
2171 return self._bookmarks.names(node)
2170
2172
2171 def branchmap(self):
2173 def branchmap(self):
2172 """returns a dictionary {branch: [branchheads]} with branchheads
2174 """returns a dictionary {branch: [branchheads]} with branchheads
2173 ordered by increasing revision number"""
2175 ordered by increasing revision number"""
2174 return self._branchcaches[self]
2176 return self._branchcaches[self]
2175
2177
2176 @unfilteredmethod
2178 @unfilteredmethod
2177 def revbranchcache(self):
2179 def revbranchcache(self):
2178 if not self._revbranchcache:
2180 if not self._revbranchcache:
2179 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2181 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2180 return self._revbranchcache
2182 return self._revbranchcache
2181
2183
2182 def register_changeset(self, rev, changelogrevision):
2184 def register_changeset(self, rev, changelogrevision):
2183 self.revbranchcache().setdata(rev, changelogrevision)
2185 self.revbranchcache().setdata(rev, changelogrevision)
2184
2186
2185 def branchtip(self, branch, ignoremissing=False):
2187 def branchtip(self, branch, ignoremissing=False):
2186 """return the tip node for a given branch
2188 """return the tip node for a given branch
2187
2189
2188 If ignoremissing is True, then this method will not raise an error.
2190 If ignoremissing is True, then this method will not raise an error.
2189 This is helpful for callers that only expect None for a missing branch
2191 This is helpful for callers that only expect None for a missing branch
2190 (e.g. namespace).
2192 (e.g. namespace).
2191
2193
2192 """
2194 """
2193 try:
2195 try:
2194 return self.branchmap().branchtip(branch)
2196 return self.branchmap().branchtip(branch)
2195 except KeyError:
2197 except KeyError:
2196 if not ignoremissing:
2198 if not ignoremissing:
2197 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2199 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2198 else:
2200 else:
2199 pass
2201 pass
2200
2202
2201 def lookup(self, key):
2203 def lookup(self, key):
2202 node = scmutil.revsymbol(self, key).node()
2204 node = scmutil.revsymbol(self, key).node()
2203 if node is None:
2205 if node is None:
2204 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2206 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2205 return node
2207 return node
2206
2208
2207 def lookupbranch(self, key):
2209 def lookupbranch(self, key):
2208 if self.branchmap().hasbranch(key):
2210 if self.branchmap().hasbranch(key):
2209 return key
2211 return key
2210
2212
2211 return scmutil.revsymbol(self, key).branch()
2213 return scmutil.revsymbol(self, key).branch()
2212
2214
2213 def known(self, nodes):
2215 def known(self, nodes):
2214 cl = self.changelog
2216 cl = self.changelog
2215 get_rev = cl.index.get_rev
2217 get_rev = cl.index.get_rev
2216 filtered = cl.filteredrevs
2218 filtered = cl.filteredrevs
2217 result = []
2219 result = []
2218 for n in nodes:
2220 for n in nodes:
2219 r = get_rev(n)
2221 r = get_rev(n)
2220 resp = not (r is None or r in filtered)
2222 resp = not (r is None or r in filtered)
2221 result.append(resp)
2223 result.append(resp)
2222 return result
2224 return result
2223
2225
2224 def local(self):
2226 def local(self):
2225 return self
2227 return self
2226
2228
2227 def publishing(self):
2229 def publishing(self):
2228 # it's safe (and desirable) to trust the publish flag unconditionally
2230 # it's safe (and desirable) to trust the publish flag unconditionally
2229 # so that we don't finalize changes shared between users via ssh or nfs
2231 # so that we don't finalize changes shared between users via ssh or nfs
2230 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2232 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2231
2233
2232 def cancopy(self):
2234 def cancopy(self):
2233 # so statichttprepo's override of local() works
2235 # so statichttprepo's override of local() works
2234 if not self.local():
2236 if not self.local():
2235 return False
2237 return False
2236 if not self.publishing():
2238 if not self.publishing():
2237 return True
2239 return True
2238 # if publishing we can't copy if there is filtered content
2240 # if publishing we can't copy if there is filtered content
2239 return not self.filtered(b'visible').changelog.filteredrevs
2241 return not self.filtered(b'visible').changelog.filteredrevs
2240
2242
2241 def shared(self):
2243 def shared(self):
2242 '''the type of shared repository (None if not shared)'''
2244 '''the type of shared repository (None if not shared)'''
2243 if self.sharedpath != self.path:
2245 if self.sharedpath != self.path:
2244 return b'store'
2246 return b'store'
2245 return None
2247 return None
2246
2248
2247 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2249 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2248 return self.vfs.reljoin(self.root, f, *insidef)
2250 return self.vfs.reljoin(self.root, f, *insidef)
2249
2251
2250 def setparents(self, p1, p2=None):
2252 def setparents(self, p1, p2=None):
2251 if p2 is None:
2253 if p2 is None:
2252 p2 = self.nullid
2254 p2 = self.nullid
2253 self[None].setparents(p1, p2)
2255 self[None].setparents(p1, p2)
2254 self._quick_access_changeid_invalidate()
2256 self._quick_access_changeid_invalidate()
2255
2257
2256 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2258 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2257 """changeid must be a changeset revision, if specified.
2259 """changeid must be a changeset revision, if specified.
2258 fileid can be a file revision or node."""
2260 fileid can be a file revision or node."""
2259 return context.filectx(
2261 return context.filectx(
2260 self, path, changeid, fileid, changectx=changectx
2262 self, path, changeid, fileid, changectx=changectx
2261 )
2263 )
2262
2264
2263 def getcwd(self) -> bytes:
2265 def getcwd(self) -> bytes:
2264 return self.dirstate.getcwd()
2266 return self.dirstate.getcwd()
2265
2267
2266 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2268 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2267 return self.dirstate.pathto(f, cwd)
2269 return self.dirstate.pathto(f, cwd)
2268
2270
2269 def _loadfilter(self, filter):
2271 def _loadfilter(self, filter):
2270 if filter not in self._filterpats:
2272 if filter not in self._filterpats:
2271 l = []
2273 l = []
2272 for pat, cmd in self.ui.configitems(filter):
2274 for pat, cmd in self.ui.configitems(filter):
2273 if cmd == b'!':
2275 if cmd == b'!':
2274 continue
2276 continue
2275 mf = matchmod.match(self.root, b'', [pat])
2277 mf = matchmod.match(self.root, b'', [pat])
2276 fn = None
2278 fn = None
2277 params = cmd
2279 params = cmd
2278 for name, filterfn in self._datafilters.items():
2280 for name, filterfn in self._datafilters.items():
2279 if cmd.startswith(name):
2281 if cmd.startswith(name):
2280 fn = filterfn
2282 fn = filterfn
2281 params = cmd[len(name) :].lstrip()
2283 params = cmd[len(name) :].lstrip()
2282 break
2284 break
2283 if not fn:
2285 if not fn:
2284 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2286 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2285 fn.__name__ = 'commandfilter'
2287 fn.__name__ = 'commandfilter'
2286 # Wrap old filters not supporting keyword arguments
2288 # Wrap old filters not supporting keyword arguments
2287 if not pycompat.getargspec(fn)[2]:
2289 if not pycompat.getargspec(fn)[2]:
2288 oldfn = fn
2290 oldfn = fn
2289 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2291 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2290 fn.__name__ = 'compat-' + oldfn.__name__
2292 fn.__name__ = 'compat-' + oldfn.__name__
2291 l.append((mf, fn, params))
2293 l.append((mf, fn, params))
2292 self._filterpats[filter] = l
2294 self._filterpats[filter] = l
2293 return self._filterpats[filter]
2295 return self._filterpats[filter]
2294
2296
2295 def _filter(self, filterpats, filename, data):
2297 def _filter(self, filterpats, filename, data):
2296 for mf, fn, cmd in filterpats:
2298 for mf, fn, cmd in filterpats:
2297 if mf(filename):
2299 if mf(filename):
2298 self.ui.debug(
2300 self.ui.debug(
2299 b"filtering %s through %s\n"
2301 b"filtering %s through %s\n"
2300 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2302 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2301 )
2303 )
2302 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2304 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2303 break
2305 break
2304
2306
2305 return data
2307 return data
2306
2308
2307 @unfilteredpropertycache
2309 @unfilteredpropertycache
2308 def _encodefilterpats(self):
2310 def _encodefilterpats(self):
2309 return self._loadfilter(b'encode')
2311 return self._loadfilter(b'encode')
2310
2312
2311 @unfilteredpropertycache
2313 @unfilteredpropertycache
2312 def _decodefilterpats(self):
2314 def _decodefilterpats(self):
2313 return self._loadfilter(b'decode')
2315 return self._loadfilter(b'decode')
2314
2316
2315 def adddatafilter(self, name, filter):
2317 def adddatafilter(self, name, filter):
2316 self._datafilters[name] = filter
2318 self._datafilters[name] = filter
2317
2319
2318 def wread(self, filename: bytes) -> bytes:
2320 def wread(self, filename: bytes) -> bytes:
2319 if self.wvfs.islink(filename):
2321 if self.wvfs.islink(filename):
2320 data = self.wvfs.readlink(filename)
2322 data = self.wvfs.readlink(filename)
2321 else:
2323 else:
2322 data = self.wvfs.read(filename)
2324 data = self.wvfs.read(filename)
2323 return self._filter(self._encodefilterpats, filename, data)
2325 return self._filter(self._encodefilterpats, filename, data)
2324
2326
2325 def wwrite(
2327 def wwrite(
2326 self,
2328 self,
2327 filename: bytes,
2329 filename: bytes,
2328 data: bytes,
2330 data: bytes,
2329 flags: bytes,
2331 flags: bytes,
2330 backgroundclose=False,
2332 backgroundclose=False,
2331 **kwargs
2333 **kwargs
2332 ) -> int:
2334 ) -> int:
2333 """write ``data`` into ``filename`` in the working directory
2335 """write ``data`` into ``filename`` in the working directory
2334
2336
2335 This returns length of written (maybe decoded) data.
2337 This returns length of written (maybe decoded) data.
2336 """
2338 """
2337 data = self._filter(self._decodefilterpats, filename, data)
2339 data = self._filter(self._decodefilterpats, filename, data)
2338 if b'l' in flags:
2340 if b'l' in flags:
2339 self.wvfs.symlink(data, filename)
2341 self.wvfs.symlink(data, filename)
2340 else:
2342 else:
2341 self.wvfs.write(
2343 self.wvfs.write(
2342 filename, data, backgroundclose=backgroundclose, **kwargs
2344 filename, data, backgroundclose=backgroundclose, **kwargs
2343 )
2345 )
2344 if b'x' in flags:
2346 if b'x' in flags:
2345 self.wvfs.setflags(filename, False, True)
2347 self.wvfs.setflags(filename, False, True)
2346 else:
2348 else:
2347 self.wvfs.setflags(filename, False, False)
2349 self.wvfs.setflags(filename, False, False)
2348 return len(data)
2350 return len(data)
2349
2351
2350 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2352 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2351 return self._filter(self._decodefilterpats, filename, data)
2353 return self._filter(self._decodefilterpats, filename, data)
2352
2354
2353 def currenttransaction(self):
2355 def currenttransaction(self):
2354 """return the current transaction or None if non exists"""
2356 """return the current transaction or None if non exists"""
2355 if self._transref:
2357 if self._transref:
2356 tr = self._transref()
2358 tr = self._transref()
2357 else:
2359 else:
2358 tr = None
2360 tr = None
2359
2361
2360 if tr and tr.running():
2362 if tr and tr.running():
2361 return tr
2363 return tr
2362 return None
2364 return None
2363
2365
2364 def transaction(self, desc, report=None):
2366 def transaction(self, desc, report=None):
2365 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2367 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2366 b'devel', b'check-locks'
2368 b'devel', b'check-locks'
2367 ):
2369 ):
2368 if self._currentlock(self._lockref) is None:
2370 if self._currentlock(self._lockref) is None:
2369 raise error.ProgrammingError(b'transaction requires locking')
2371 raise error.ProgrammingError(b'transaction requires locking')
2370 tr = self.currenttransaction()
2372 tr = self.currenttransaction()
2371 if tr is not None:
2373 if tr is not None:
2372 return tr.nest(name=desc)
2374 return tr.nest(name=desc)
2373
2375
2374 # abort here if the journal already exists
2376 # abort here if the journal already exists
2375 if self.svfs.exists(b"journal"):
2377 if self.svfs.exists(b"journal"):
2376 raise error.RepoError(
2378 raise error.RepoError(
2377 _(b"abandoned transaction found"),
2379 _(b"abandoned transaction found"),
2378 hint=_(b"run 'hg recover' to clean up transaction"),
2380 hint=_(b"run 'hg recover' to clean up transaction"),
2379 )
2381 )
2380
2382
2381 idbase = b"%.40f#%f" % (random.random(), time.time())
2383 idbase = b"%.40f#%f" % (random.random(), time.time())
2382 ha = hex(hashutil.sha1(idbase).digest())
2384 ha = hex(hashutil.sha1(idbase).digest())
2383 txnid = b'TXN:' + ha
2385 txnid = b'TXN:' + ha
2384 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2386 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2385
2387
2386 self._writejournal(desc)
2388 self._writejournal(desc)
2387 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2389 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2388 if report:
2390 if report:
2389 rp = report
2391 rp = report
2390 else:
2392 else:
2391 rp = self.ui.warn
2393 rp = self.ui.warn
2392 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2394 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2393 # we must avoid cyclic reference between repo and transaction.
2395 # we must avoid cyclic reference between repo and transaction.
2394 reporef = weakref.ref(self)
2396 reporef = weakref.ref(self)
2395 # Code to track tag movement
2397 # Code to track tag movement
2396 #
2398 #
2397 # Since tags are all handled as file content, it is actually quite hard
2399 # Since tags are all handled as file content, it is actually quite hard
2398 # to track these movement from a code perspective. So we fallback to a
2400 # to track these movement from a code perspective. So we fallback to a
2399 # tracking at the repository level. One could envision to track changes
2401 # tracking at the repository level. One could envision to track changes
2400 # to the '.hgtags' file through changegroup apply but that fails to
2402 # to the '.hgtags' file through changegroup apply but that fails to
2401 # cope with case where transaction expose new heads without changegroup
2403 # cope with case where transaction expose new heads without changegroup
2402 # being involved (eg: phase movement).
2404 # being involved (eg: phase movement).
2403 #
2405 #
2404 # For now, We gate the feature behind a flag since this likely comes
2406 # For now, We gate the feature behind a flag since this likely comes
2405 # with performance impacts. The current code run more often than needed
2407 # with performance impacts. The current code run more often than needed
2406 # and do not use caches as much as it could. The current focus is on
2408 # and do not use caches as much as it could. The current focus is on
2407 # the behavior of the feature so we disable it by default. The flag
2409 # the behavior of the feature so we disable it by default. The flag
2408 # will be removed when we are happy with the performance impact.
2410 # will be removed when we are happy with the performance impact.
2409 #
2411 #
2410 # Once this feature is no longer experimental move the following
2412 # Once this feature is no longer experimental move the following
2411 # documentation to the appropriate help section:
2413 # documentation to the appropriate help section:
2412 #
2414 #
2413 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2415 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2414 # tags (new or changed or deleted tags). In addition the details of
2416 # tags (new or changed or deleted tags). In addition the details of
2415 # these changes are made available in a file at:
2417 # these changes are made available in a file at:
2416 # ``REPOROOT/.hg/changes/tags.changes``.
2418 # ``REPOROOT/.hg/changes/tags.changes``.
2417 # Make sure you check for HG_TAG_MOVED before reading that file as it
2419 # Make sure you check for HG_TAG_MOVED before reading that file as it
2418 # might exist from a previous transaction even if no tag were touched
2420 # might exist from a previous transaction even if no tag were touched
2419 # in this one. Changes are recorded in a line base format::
2421 # in this one. Changes are recorded in a line base format::
2420 #
2422 #
2421 # <action> <hex-node> <tag-name>\n
2423 # <action> <hex-node> <tag-name>\n
2422 #
2424 #
2423 # Actions are defined as follow:
2425 # Actions are defined as follow:
2424 # "-R": tag is removed,
2426 # "-R": tag is removed,
2425 # "+A": tag is added,
2427 # "+A": tag is added,
2426 # "-M": tag is moved (old value),
2428 # "-M": tag is moved (old value),
2427 # "+M": tag is moved (new value),
2429 # "+M": tag is moved (new value),
2428 tracktags = lambda x: None
2430 tracktags = lambda x: None
2429 # experimental config: experimental.hook-track-tags
2431 # experimental config: experimental.hook-track-tags
2430 shouldtracktags = self.ui.configbool(
2432 shouldtracktags = self.ui.configbool(
2431 b'experimental', b'hook-track-tags'
2433 b'experimental', b'hook-track-tags'
2432 )
2434 )
2433 if desc != b'strip' and shouldtracktags:
2435 if desc != b'strip' and shouldtracktags:
2434 oldheads = self.changelog.headrevs()
2436 oldheads = self.changelog.headrevs()
2435
2437
2436 def tracktags(tr2):
2438 def tracktags(tr2):
2437 repo = reporef()
2439 repo = reporef()
2438 assert repo is not None # help pytype
2440 assert repo is not None # help pytype
2439 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2441 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2440 newheads = repo.changelog.headrevs()
2442 newheads = repo.changelog.headrevs()
2441 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2443 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2442 # notes: we compare lists here.
2444 # notes: we compare lists here.
2443 # As we do it only once buiding set would not be cheaper
2445 # As we do it only once buiding set would not be cheaper
2444 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2446 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2445 if changes:
2447 if changes:
2446 tr2.hookargs[b'tag_moved'] = b'1'
2448 tr2.hookargs[b'tag_moved'] = b'1'
2447 with repo.vfs(
2449 with repo.vfs(
2448 b'changes/tags.changes', b'w', atomictemp=True
2450 b'changes/tags.changes', b'w', atomictemp=True
2449 ) as changesfile:
2451 ) as changesfile:
2450 # note: we do not register the file to the transaction
2452 # note: we do not register the file to the transaction
2451 # because we needs it to still exist on the transaction
2453 # because we needs it to still exist on the transaction
2452 # is close (for txnclose hooks)
2454 # is close (for txnclose hooks)
2453 tagsmod.writediff(changesfile, changes)
2455 tagsmod.writediff(changesfile, changes)
2454
2456
2455 def validate(tr2):
2457 def validate(tr2):
2456 """will run pre-closing hooks"""
2458 """will run pre-closing hooks"""
2457 # XXX the transaction API is a bit lacking here so we take a hacky
2459 # XXX the transaction API is a bit lacking here so we take a hacky
2458 # path for now
2460 # path for now
2459 #
2461 #
2460 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2462 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2461 # dict is copied before these run. In addition we needs the data
2463 # dict is copied before these run. In addition we needs the data
2462 # available to in memory hooks too.
2464 # available to in memory hooks too.
2463 #
2465 #
2464 # Moreover, we also need to make sure this runs before txnclose
2466 # Moreover, we also need to make sure this runs before txnclose
2465 # hooks and there is no "pending" mechanism that would execute
2467 # hooks and there is no "pending" mechanism that would execute
2466 # logic only if hooks are about to run.
2468 # logic only if hooks are about to run.
2467 #
2469 #
2468 # Fixing this limitation of the transaction is also needed to track
2470 # Fixing this limitation of the transaction is also needed to track
2469 # other families of changes (bookmarks, phases, obsolescence).
2471 # other families of changes (bookmarks, phases, obsolescence).
2470 #
2472 #
2471 # This will have to be fixed before we remove the experimental
2473 # This will have to be fixed before we remove the experimental
2472 # gating.
2474 # gating.
2473 tracktags(tr2)
2475 tracktags(tr2)
2474 repo = reporef()
2476 repo = reporef()
2475 assert repo is not None # help pytype
2477 assert repo is not None # help pytype
2476
2478
2477 singleheadopt = (b'experimental', b'single-head-per-branch')
2479 singleheadopt = (b'experimental', b'single-head-per-branch')
2478 singlehead = repo.ui.configbool(*singleheadopt)
2480 singlehead = repo.ui.configbool(*singleheadopt)
2479 if singlehead:
2481 if singlehead:
2480 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2482 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2481 accountclosed = singleheadsub.get(
2483 accountclosed = singleheadsub.get(
2482 b"account-closed-heads", False
2484 b"account-closed-heads", False
2483 )
2485 )
2484 if singleheadsub.get(b"public-changes-only", False):
2486 if singleheadsub.get(b"public-changes-only", False):
2485 filtername = b"immutable"
2487 filtername = b"immutable"
2486 else:
2488 else:
2487 filtername = b"visible"
2489 filtername = b"visible"
2488 scmutil.enforcesinglehead(
2490 scmutil.enforcesinglehead(
2489 repo, tr2, desc, accountclosed, filtername
2491 repo, tr2, desc, accountclosed, filtername
2490 )
2492 )
2491 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2493 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2492 for name, (old, new) in sorted(
2494 for name, (old, new) in sorted(
2493 tr.changes[b'bookmarks'].items()
2495 tr.changes[b'bookmarks'].items()
2494 ):
2496 ):
2495 args = tr.hookargs.copy()
2497 args = tr.hookargs.copy()
2496 args.update(bookmarks.preparehookargs(name, old, new))
2498 args.update(bookmarks.preparehookargs(name, old, new))
2497 repo.hook(
2499 repo.hook(
2498 b'pretxnclose-bookmark',
2500 b'pretxnclose-bookmark',
2499 throw=True,
2501 throw=True,
2500 **pycompat.strkwargs(args)
2502 **pycompat.strkwargs(args)
2501 )
2503 )
2502 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2504 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2503 cl = repo.unfiltered().changelog
2505 cl = repo.unfiltered().changelog
2504 for revs, (old, new) in tr.changes[b'phases']:
2506 for revs, (old, new) in tr.changes[b'phases']:
2505 for rev in revs:
2507 for rev in revs:
2506 args = tr.hookargs.copy()
2508 args = tr.hookargs.copy()
2507 node = hex(cl.node(rev))
2509 node = hex(cl.node(rev))
2508 args.update(phases.preparehookargs(node, old, new))
2510 args.update(phases.preparehookargs(node, old, new))
2509 repo.hook(
2511 repo.hook(
2510 b'pretxnclose-phase',
2512 b'pretxnclose-phase',
2511 throw=True,
2513 throw=True,
2512 **pycompat.strkwargs(args)
2514 **pycompat.strkwargs(args)
2513 )
2515 )
2514
2516
2515 repo.hook(
2517 repo.hook(
2516 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2518 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2517 )
2519 )
2518
2520
2519 def releasefn(tr, success):
2521 def releasefn(tr, success):
2520 repo = reporef()
2522 repo = reporef()
2521 if repo is None:
2523 if repo is None:
2522 # If the repo has been GC'd (and this release function is being
2524 # If the repo has been GC'd (and this release function is being
2523 # called from transaction.__del__), there's not much we can do,
2525 # called from transaction.__del__), there's not much we can do,
2524 # so just leave the unfinished transaction there and let the
2526 # so just leave the unfinished transaction there and let the
2525 # user run `hg recover`.
2527 # user run `hg recover`.
2526 return
2528 return
2527 if success:
2529 if success:
2528 # this should be explicitly invoked here, because
2530 # this should be explicitly invoked here, because
2529 # in-memory changes aren't written out at closing
2531 # in-memory changes aren't written out at closing
2530 # transaction, if tr.addfilegenerator (via
2532 # transaction, if tr.addfilegenerator (via
2531 # dirstate.write or so) isn't invoked while
2533 # dirstate.write or so) isn't invoked while
2532 # transaction running
2534 # transaction running
2533 repo.dirstate.write(None)
2535 repo.dirstate.write(None)
2534 else:
2536 else:
2535 # discard all changes (including ones already written
2537 # discard all changes (including ones already written
2536 # out) in this transaction
2538 # out) in this transaction
2537 narrowspec.restorebackup(self, b'journal.narrowspec')
2539 narrowspec.restorebackup(self, b'journal.narrowspec')
2538 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2540 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2539 repo.dirstate.restorebackup(None, b'journal.dirstate')
2541 repo.dirstate.restorebackup(None, b'journal.dirstate')
2540
2542
2541 repo.invalidate(clearfilecache=True)
2543 repo.invalidate(clearfilecache=True)
2542
2544
2543 tr = transaction.transaction(
2545 tr = transaction.transaction(
2544 rp,
2546 rp,
2545 self.svfs,
2547 self.svfs,
2546 vfsmap,
2548 vfsmap,
2547 b"journal",
2549 b"journal",
2548 b"undo",
2550 b"undo",
2549 aftertrans(renames),
2551 aftertrans(renames),
2550 self.store.createmode,
2552 self.store.createmode,
2551 validator=validate,
2553 validator=validate,
2552 releasefn=releasefn,
2554 releasefn=releasefn,
2553 checkambigfiles=_cachedfiles,
2555 checkambigfiles=_cachedfiles,
2554 name=desc,
2556 name=desc,
2555 )
2557 )
2556 tr.changes[b'origrepolen'] = len(self)
2558 tr.changes[b'origrepolen'] = len(self)
2557 tr.changes[b'obsmarkers'] = set()
2559 tr.changes[b'obsmarkers'] = set()
2558 tr.changes[b'phases'] = []
2560 tr.changes[b'phases'] = []
2559 tr.changes[b'bookmarks'] = {}
2561 tr.changes[b'bookmarks'] = {}
2560
2562
2561 tr.hookargs[b'txnid'] = txnid
2563 tr.hookargs[b'txnid'] = txnid
2562 tr.hookargs[b'txnname'] = desc
2564 tr.hookargs[b'txnname'] = desc
2563 tr.hookargs[b'changes'] = tr.changes
2565 tr.hookargs[b'changes'] = tr.changes
2564 # note: writing the fncache only during finalize mean that the file is
2566 # note: writing the fncache only during finalize mean that the file is
2565 # outdated when running hooks. As fncache is used for streaming clone,
2567 # outdated when running hooks. As fncache is used for streaming clone,
2566 # this is not expected to break anything that happen during the hooks.
2568 # this is not expected to break anything that happen during the hooks.
2567 tr.addfinalize(b'flush-fncache', self.store.write)
2569 tr.addfinalize(b'flush-fncache', self.store.write)
2568
2570
2569 def txnclosehook(tr2):
2571 def txnclosehook(tr2):
2570 """To be run if transaction is successful, will schedule a hook run"""
2572 """To be run if transaction is successful, will schedule a hook run"""
2571 # Don't reference tr2 in hook() so we don't hold a reference.
2573 # Don't reference tr2 in hook() so we don't hold a reference.
2572 # This reduces memory consumption when there are multiple
2574 # This reduces memory consumption when there are multiple
2573 # transactions per lock. This can likely go away if issue5045
2575 # transactions per lock. This can likely go away if issue5045
2574 # fixes the function accumulation.
2576 # fixes the function accumulation.
2575 hookargs = tr2.hookargs
2577 hookargs = tr2.hookargs
2576
2578
2577 def hookfunc(unused_success):
2579 def hookfunc(unused_success):
2578 repo = reporef()
2580 repo = reporef()
2579 assert repo is not None # help pytype
2581 assert repo is not None # help pytype
2580
2582
2581 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2583 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2582 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2584 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2583 for name, (old, new) in bmchanges:
2585 for name, (old, new) in bmchanges:
2584 args = tr.hookargs.copy()
2586 args = tr.hookargs.copy()
2585 args.update(bookmarks.preparehookargs(name, old, new))
2587 args.update(bookmarks.preparehookargs(name, old, new))
2586 repo.hook(
2588 repo.hook(
2587 b'txnclose-bookmark',
2589 b'txnclose-bookmark',
2588 throw=False,
2590 throw=False,
2589 **pycompat.strkwargs(args)
2591 **pycompat.strkwargs(args)
2590 )
2592 )
2591
2593
2592 if hook.hashook(repo.ui, b'txnclose-phase'):
2594 if hook.hashook(repo.ui, b'txnclose-phase'):
2593 cl = repo.unfiltered().changelog
2595 cl = repo.unfiltered().changelog
2594 phasemv = sorted(
2596 phasemv = sorted(
2595 tr.changes[b'phases'], key=lambda r: r[0][0]
2597 tr.changes[b'phases'], key=lambda r: r[0][0]
2596 )
2598 )
2597 for revs, (old, new) in phasemv:
2599 for revs, (old, new) in phasemv:
2598 for rev in revs:
2600 for rev in revs:
2599 args = tr.hookargs.copy()
2601 args = tr.hookargs.copy()
2600 node = hex(cl.node(rev))
2602 node = hex(cl.node(rev))
2601 args.update(phases.preparehookargs(node, old, new))
2603 args.update(phases.preparehookargs(node, old, new))
2602 repo.hook(
2604 repo.hook(
2603 b'txnclose-phase',
2605 b'txnclose-phase',
2604 throw=False,
2606 throw=False,
2605 **pycompat.strkwargs(args)
2607 **pycompat.strkwargs(args)
2606 )
2608 )
2607
2609
2608 repo.hook(
2610 repo.hook(
2609 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2611 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2610 )
2612 )
2611
2613
2612 repo = reporef()
2614 repo = reporef()
2613 assert repo is not None # help pytype
2615 assert repo is not None # help pytype
2614 repo._afterlock(hookfunc)
2616 repo._afterlock(hookfunc)
2615
2617
2616 tr.addfinalize(b'txnclose-hook', txnclosehook)
2618 tr.addfinalize(b'txnclose-hook', txnclosehook)
2617 # Include a leading "-" to make it happen before the transaction summary
2619 # Include a leading "-" to make it happen before the transaction summary
2618 # reports registered via scmutil.registersummarycallback() whose names
2620 # reports registered via scmutil.registersummarycallback() whose names
2619 # are 00-txnreport etc. That way, the caches will be warm when the
2621 # are 00-txnreport etc. That way, the caches will be warm when the
2620 # callbacks run.
2622 # callbacks run.
2621 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2623 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2622
2624
2623 def txnaborthook(tr2):
2625 def txnaborthook(tr2):
2624 """To be run if transaction is aborted"""
2626 """To be run if transaction is aborted"""
2625 repo = reporef()
2627 repo = reporef()
2626 assert repo is not None # help pytype
2628 assert repo is not None # help pytype
2627 repo.hook(
2629 repo.hook(
2628 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2630 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2629 )
2631 )
2630
2632
2631 tr.addabort(b'txnabort-hook', txnaborthook)
2633 tr.addabort(b'txnabort-hook', txnaborthook)
2632 # avoid eager cache invalidation. in-memory data should be identical
2634 # avoid eager cache invalidation. in-memory data should be identical
2633 # to stored data if transaction has no error.
2635 # to stored data if transaction has no error.
2634 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2636 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2635 self._transref = weakref.ref(tr)
2637 self._transref = weakref.ref(tr)
2636 scmutil.registersummarycallback(self, tr, desc)
2638 scmutil.registersummarycallback(self, tr, desc)
2637 return tr
2639 return tr
2638
2640
2639 def _journalfiles(self):
2641 def _journalfiles(self):
2640 first = (
2642 first = (
2641 (self.svfs, b'journal'),
2643 (self.svfs, b'journal'),
2642 (self.svfs, b'journal.narrowspec'),
2644 (self.svfs, b'journal.narrowspec'),
2643 (self.vfs, b'journal.narrowspec.dirstate'),
2645 (self.vfs, b'journal.narrowspec.dirstate'),
2644 (self.vfs, b'journal.dirstate'),
2646 (self.vfs, b'journal.dirstate'),
2645 )
2647 )
2646 middle = []
2648 middle = []
2647 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2649 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2648 if dirstate_data is not None:
2650 if dirstate_data is not None:
2649 middle.append((self.vfs, dirstate_data))
2651 middle.append((self.vfs, dirstate_data))
2650 end = (
2652 end = (
2651 (self.vfs, b'journal.branch'),
2653 (self.vfs, b'journal.branch'),
2652 (self.vfs, b'journal.desc'),
2654 (self.vfs, b'journal.desc'),
2653 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2655 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2654 (self.svfs, b'journal.phaseroots'),
2656 (self.svfs, b'journal.phaseroots'),
2655 )
2657 )
2656 return first + tuple(middle) + end
2658 return first + tuple(middle) + end
2657
2659
2658 def undofiles(self):
2660 def undofiles(self):
2659 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2661 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2660
2662
2661 @unfilteredmethod
2663 @unfilteredmethod
2662 def _writejournal(self, desc):
2664 def _writejournal(self, desc):
2663 self.dirstate.savebackup(None, b'journal.dirstate')
2665 self.dirstate.savebackup(None, b'journal.dirstate')
2664 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2666 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2665 narrowspec.savebackup(self, b'journal.narrowspec')
2667 narrowspec.savebackup(self, b'journal.narrowspec')
2666 self.vfs.write(
2668 self.vfs.write(
2667 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2669 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2668 )
2670 )
2669 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2671 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2670 bookmarksvfs = bookmarks.bookmarksvfs(self)
2672 bookmarksvfs = bookmarks.bookmarksvfs(self)
2671 bookmarksvfs.write(
2673 bookmarksvfs.write(
2672 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2674 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2673 )
2675 )
2674 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2676 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2675
2677
2676 def recover(self):
2678 def recover(self):
2677 with self.lock():
2679 with self.lock():
2678 if self.svfs.exists(b"journal"):
2680 if self.svfs.exists(b"journal"):
2679 self.ui.status(_(b"rolling back interrupted transaction\n"))
2681 self.ui.status(_(b"rolling back interrupted transaction\n"))
2680 vfsmap = {
2682 vfsmap = {
2681 b'': self.svfs,
2683 b'': self.svfs,
2682 b'plain': self.vfs,
2684 b'plain': self.vfs,
2683 }
2685 }
2684 transaction.rollback(
2686 transaction.rollback(
2685 self.svfs,
2687 self.svfs,
2686 vfsmap,
2688 vfsmap,
2687 b"journal",
2689 b"journal",
2688 self.ui.warn,
2690 self.ui.warn,
2689 checkambigfiles=_cachedfiles,
2691 checkambigfiles=_cachedfiles,
2690 )
2692 )
2691 self.invalidate()
2693 self.invalidate()
2692 return True
2694 return True
2693 else:
2695 else:
2694 self.ui.warn(_(b"no interrupted transaction available\n"))
2696 self.ui.warn(_(b"no interrupted transaction available\n"))
2695 return False
2697 return False
2696
2698
2697 def rollback(self, dryrun=False, force=False):
2699 def rollback(self, dryrun=False, force=False):
2698 wlock = lock = dsguard = None
2700 wlock = lock = dsguard = None
2699 try:
2701 try:
2700 wlock = self.wlock()
2702 wlock = self.wlock()
2701 lock = self.lock()
2703 lock = self.lock()
2702 if self.svfs.exists(b"undo"):
2704 if self.svfs.exists(b"undo"):
2703 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2705 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2704
2706
2705 return self._rollback(dryrun, force, dsguard)
2707 return self._rollback(dryrun, force, dsguard)
2706 else:
2708 else:
2707 self.ui.warn(_(b"no rollback information available\n"))
2709 self.ui.warn(_(b"no rollback information available\n"))
2708 return 1
2710 return 1
2709 finally:
2711 finally:
2710 release(dsguard, lock, wlock)
2712 release(dsguard, lock, wlock)
2711
2713
2712 @unfilteredmethod # Until we get smarter cache management
2714 @unfilteredmethod # Until we get smarter cache management
2713 def _rollback(self, dryrun, force, dsguard):
2715 def _rollback(self, dryrun, force, dsguard):
2714 ui = self.ui
2716 ui = self.ui
2715 try:
2717 try:
2716 args = self.vfs.read(b'undo.desc').splitlines()
2718 args = self.vfs.read(b'undo.desc').splitlines()
2717 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2719 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2718 if len(args) >= 3:
2720 if len(args) >= 3:
2719 detail = args[2]
2721 detail = args[2]
2720 oldtip = oldlen - 1
2722 oldtip = oldlen - 1
2721
2723
2722 if detail and ui.verbose:
2724 if detail and ui.verbose:
2723 msg = _(
2725 msg = _(
2724 b'repository tip rolled back to revision %d'
2726 b'repository tip rolled back to revision %d'
2725 b' (undo %s: %s)\n'
2727 b' (undo %s: %s)\n'
2726 ) % (oldtip, desc, detail)
2728 ) % (oldtip, desc, detail)
2727 else:
2729 else:
2728 msg = _(
2730 msg = _(
2729 b'repository tip rolled back to revision %d (undo %s)\n'
2731 b'repository tip rolled back to revision %d (undo %s)\n'
2730 ) % (oldtip, desc)
2732 ) % (oldtip, desc)
2731 except IOError:
2733 except IOError:
2732 msg = _(b'rolling back unknown transaction\n')
2734 msg = _(b'rolling back unknown transaction\n')
2733 desc = None
2735 desc = None
2734
2736
2735 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2737 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2736 raise error.Abort(
2738 raise error.Abort(
2737 _(
2739 _(
2738 b'rollback of last commit while not checked out '
2740 b'rollback of last commit while not checked out '
2739 b'may lose data'
2741 b'may lose data'
2740 ),
2742 ),
2741 hint=_(b'use -f to force'),
2743 hint=_(b'use -f to force'),
2742 )
2744 )
2743
2745
2744 ui.status(msg)
2746 ui.status(msg)
2745 if dryrun:
2747 if dryrun:
2746 return 0
2748 return 0
2747
2749
2748 parents = self.dirstate.parents()
2750 parents = self.dirstate.parents()
2749 self.destroying()
2751 self.destroying()
2750 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2752 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2751 transaction.rollback(
2753 transaction.rollback(
2752 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2754 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2753 )
2755 )
2754 bookmarksvfs = bookmarks.bookmarksvfs(self)
2756 bookmarksvfs = bookmarks.bookmarksvfs(self)
2755 if bookmarksvfs.exists(b'undo.bookmarks'):
2757 if bookmarksvfs.exists(b'undo.bookmarks'):
2756 bookmarksvfs.rename(
2758 bookmarksvfs.rename(
2757 b'undo.bookmarks', b'bookmarks', checkambig=True
2759 b'undo.bookmarks', b'bookmarks', checkambig=True
2758 )
2760 )
2759 if self.svfs.exists(b'undo.phaseroots'):
2761 if self.svfs.exists(b'undo.phaseroots'):
2760 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2762 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2761 self.invalidate()
2763 self.invalidate()
2762
2764
2763 has_node = self.changelog.index.has_node
2765 has_node = self.changelog.index.has_node
2764 parentgone = any(not has_node(p) for p in parents)
2766 parentgone = any(not has_node(p) for p in parents)
2765 if parentgone:
2767 if parentgone:
2766 # prevent dirstateguard from overwriting already restored one
2768 # prevent dirstateguard from overwriting already restored one
2767 dsguard.close()
2769 dsguard.close()
2768
2770
2769 narrowspec.restorebackup(self, b'undo.narrowspec')
2771 narrowspec.restorebackup(self, b'undo.narrowspec')
2770 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2772 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2771 self.dirstate.restorebackup(None, b'undo.dirstate')
2773 self.dirstate.restorebackup(None, b'undo.dirstate')
2772 try:
2774 try:
2773 branch = self.vfs.read(b'undo.branch')
2775 branch = self.vfs.read(b'undo.branch')
2774 self.dirstate.setbranch(encoding.tolocal(branch))
2776 self.dirstate.setbranch(encoding.tolocal(branch))
2775 except IOError:
2777 except IOError:
2776 ui.warn(
2778 ui.warn(
2777 _(
2779 _(
2778 b'named branch could not be reset: '
2780 b'named branch could not be reset: '
2779 b'current branch is still \'%s\'\n'
2781 b'current branch is still \'%s\'\n'
2780 )
2782 )
2781 % self.dirstate.branch()
2783 % self.dirstate.branch()
2782 )
2784 )
2783
2785
2784 parents = tuple([p.rev() for p in self[None].parents()])
2786 parents = tuple([p.rev() for p in self[None].parents()])
2785 if len(parents) > 1:
2787 if len(parents) > 1:
2786 ui.status(
2788 ui.status(
2787 _(
2789 _(
2788 b'working directory now based on '
2790 b'working directory now based on '
2789 b'revisions %d and %d\n'
2791 b'revisions %d and %d\n'
2790 )
2792 )
2791 % parents
2793 % parents
2792 )
2794 )
2793 else:
2795 else:
2794 ui.status(
2796 ui.status(
2795 _(b'working directory now based on revision %d\n') % parents
2797 _(b'working directory now based on revision %d\n') % parents
2796 )
2798 )
2797 mergestatemod.mergestate.clean(self)
2799 mergestatemod.mergestate.clean(self)
2798
2800
2799 # TODO: if we know which new heads may result from this rollback, pass
2801 # TODO: if we know which new heads may result from this rollback, pass
2800 # them to destroy(), which will prevent the branchhead cache from being
2802 # them to destroy(), which will prevent the branchhead cache from being
2801 # invalidated.
2803 # invalidated.
2802 self.destroyed()
2804 self.destroyed()
2803 return 0
2805 return 0
2804
2806
2805 def _buildcacheupdater(self, newtransaction):
2807 def _buildcacheupdater(self, newtransaction):
2806 """called during transaction to build the callback updating cache
2808 """called during transaction to build the callback updating cache
2807
2809
2808 Lives on the repository to help extension who might want to augment
2810 Lives on the repository to help extension who might want to augment
2809 this logic. For this purpose, the created transaction is passed to the
2811 this logic. For this purpose, the created transaction is passed to the
2810 method.
2812 method.
2811 """
2813 """
2812 # we must avoid cyclic reference between repo and transaction.
2814 # we must avoid cyclic reference between repo and transaction.
2813 reporef = weakref.ref(self)
2815 reporef = weakref.ref(self)
2814
2816
2815 def updater(tr):
2817 def updater(tr):
2816 repo = reporef()
2818 repo = reporef()
2817 assert repo is not None # help pytype
2819 assert repo is not None # help pytype
2818 repo.updatecaches(tr)
2820 repo.updatecaches(tr)
2819
2821
2820 return updater
2822 return updater
2821
2823
2822 @unfilteredmethod
2824 @unfilteredmethod
2823 def updatecaches(self, tr=None, full=False, caches=None):
2825 def updatecaches(self, tr=None, full=False, caches=None):
2824 """warm appropriate caches
2826 """warm appropriate caches
2825
2827
2826 If this function is called after a transaction closed. The transaction
2828 If this function is called after a transaction closed. The transaction
2827 will be available in the 'tr' argument. This can be used to selectively
2829 will be available in the 'tr' argument. This can be used to selectively
2828 update caches relevant to the changes in that transaction.
2830 update caches relevant to the changes in that transaction.
2829
2831
2830 If 'full' is set, make sure all caches the function knows about have
2832 If 'full' is set, make sure all caches the function knows about have
2831 up-to-date data. Even the ones usually loaded more lazily.
2833 up-to-date data. Even the ones usually loaded more lazily.
2832
2834
2833 The `full` argument can take a special "post-clone" value. In this case
2835 The `full` argument can take a special "post-clone" value. In this case
2834 the cache warming is made after a clone and of the slower cache might
2836 the cache warming is made after a clone and of the slower cache might
2835 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2837 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2836 as we plan for a cleaner way to deal with this for 5.9.
2838 as we plan for a cleaner way to deal with this for 5.9.
2837 """
2839 """
2838 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2840 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2839 # During strip, many caches are invalid but
2841 # During strip, many caches are invalid but
2840 # later call to `destroyed` will refresh them.
2842 # later call to `destroyed` will refresh them.
2841 return
2843 return
2842
2844
2843 unfi = self.unfiltered()
2845 unfi = self.unfiltered()
2844
2846
2845 if full:
2847 if full:
2846 msg = (
2848 msg = (
2847 "`full` argument for `repo.updatecaches` is deprecated\n"
2849 "`full` argument for `repo.updatecaches` is deprecated\n"
2848 "(use `caches=repository.CACHE_ALL` instead)"
2850 "(use `caches=repository.CACHE_ALL` instead)"
2849 )
2851 )
2850 self.ui.deprecwarn(msg, b"5.9")
2852 self.ui.deprecwarn(msg, b"5.9")
2851 caches = repository.CACHES_ALL
2853 caches = repository.CACHES_ALL
2852 if full == b"post-clone":
2854 if full == b"post-clone":
2853 caches = repository.CACHES_POST_CLONE
2855 caches = repository.CACHES_POST_CLONE
2854 caches = repository.CACHES_ALL
2856 caches = repository.CACHES_ALL
2855 elif caches is None:
2857 elif caches is None:
2856 caches = repository.CACHES_DEFAULT
2858 caches = repository.CACHES_DEFAULT
2857
2859
2858 if repository.CACHE_BRANCHMAP_SERVED in caches:
2860 if repository.CACHE_BRANCHMAP_SERVED in caches:
2859 if tr is None or tr.changes[b'origrepolen'] < len(self):
2861 if tr is None or tr.changes[b'origrepolen'] < len(self):
2860 # accessing the 'served' branchmap should refresh all the others,
2862 # accessing the 'served' branchmap should refresh all the others,
2861 self.ui.debug(b'updating the branch cache\n')
2863 self.ui.debug(b'updating the branch cache\n')
2862 self.filtered(b'served').branchmap()
2864 self.filtered(b'served').branchmap()
2863 self.filtered(b'served.hidden').branchmap()
2865 self.filtered(b'served.hidden').branchmap()
2864 # flush all possibly delayed write.
2866 # flush all possibly delayed write.
2865 self._branchcaches.write_delayed(self)
2867 self._branchcaches.write_delayed(self)
2866
2868
2867 if repository.CACHE_CHANGELOG_CACHE in caches:
2869 if repository.CACHE_CHANGELOG_CACHE in caches:
2868 self.changelog.update_caches(transaction=tr)
2870 self.changelog.update_caches(transaction=tr)
2869
2871
2870 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2872 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2871 self.manifestlog.update_caches(transaction=tr)
2873 self.manifestlog.update_caches(transaction=tr)
2872
2874
2873 if repository.CACHE_REV_BRANCH in caches:
2875 if repository.CACHE_REV_BRANCH in caches:
2874 rbc = unfi.revbranchcache()
2876 rbc = unfi.revbranchcache()
2875 for r in unfi.changelog:
2877 for r in unfi.changelog:
2876 rbc.branchinfo(r)
2878 rbc.branchinfo(r)
2877 rbc.write()
2879 rbc.write()
2878
2880
2879 if repository.CACHE_FULL_MANIFEST in caches:
2881 if repository.CACHE_FULL_MANIFEST in caches:
2880 # ensure the working copy parents are in the manifestfulltextcache
2882 # ensure the working copy parents are in the manifestfulltextcache
2881 for ctx in self[b'.'].parents():
2883 for ctx in self[b'.'].parents():
2882 ctx.manifest() # accessing the manifest is enough
2884 ctx.manifest() # accessing the manifest is enough
2883
2885
2884 if repository.CACHE_FILE_NODE_TAGS in caches:
2886 if repository.CACHE_FILE_NODE_TAGS in caches:
2885 # accessing fnode cache warms the cache
2887 # accessing fnode cache warms the cache
2886 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2888 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2887
2889
2888 if repository.CACHE_TAGS_DEFAULT in caches:
2890 if repository.CACHE_TAGS_DEFAULT in caches:
2889 # accessing tags warm the cache
2891 # accessing tags warm the cache
2890 self.tags()
2892 self.tags()
2891 if repository.CACHE_TAGS_SERVED in caches:
2893 if repository.CACHE_TAGS_SERVED in caches:
2892 self.filtered(b'served').tags()
2894 self.filtered(b'served').tags()
2893
2895
2894 if repository.CACHE_BRANCHMAP_ALL in caches:
2896 if repository.CACHE_BRANCHMAP_ALL in caches:
2895 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2897 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2896 # so we're forcing a write to cause these caches to be warmed up
2898 # so we're forcing a write to cause these caches to be warmed up
2897 # even if they haven't explicitly been requested yet (if they've
2899 # even if they haven't explicitly been requested yet (if they've
2898 # never been used by hg, they won't ever have been written, even if
2900 # never been used by hg, they won't ever have been written, even if
2899 # they're a subset of another kind of cache that *has* been used).
2901 # they're a subset of another kind of cache that *has* been used).
2900 for filt in repoview.filtertable.keys():
2902 for filt in repoview.filtertable.keys():
2901 filtered = self.filtered(filt)
2903 filtered = self.filtered(filt)
2902 filtered.branchmap().write(filtered)
2904 filtered.branchmap().write(filtered)
2903
2905
2904 def invalidatecaches(self):
2906 def invalidatecaches(self):
2905
2907
2906 if '_tagscache' in vars(self):
2908 if '_tagscache' in vars(self):
2907 # can't use delattr on proxy
2909 # can't use delattr on proxy
2908 del self.__dict__['_tagscache']
2910 del self.__dict__['_tagscache']
2909
2911
2910 self._branchcaches.clear()
2912 self._branchcaches.clear()
2911 self.invalidatevolatilesets()
2913 self.invalidatevolatilesets()
2912 self._sparsesignaturecache.clear()
2914 self._sparsesignaturecache.clear()
2913
2915
2914 def invalidatevolatilesets(self):
2916 def invalidatevolatilesets(self):
2915 self.filteredrevcache.clear()
2917 self.filteredrevcache.clear()
2916 obsolete.clearobscaches(self)
2918 obsolete.clearobscaches(self)
2917 self._quick_access_changeid_invalidate()
2919 self._quick_access_changeid_invalidate()
2918
2920
2919 def invalidatedirstate(self):
2921 def invalidatedirstate(self):
2920 """Invalidates the dirstate, causing the next call to dirstate
2922 """Invalidates the dirstate, causing the next call to dirstate
2921 to check if it was modified since the last time it was read,
2923 to check if it was modified since the last time it was read,
2922 rereading it if it has.
2924 rereading it if it has.
2923
2925
2924 This is different to dirstate.invalidate() that it doesn't always
2926 This is different to dirstate.invalidate() that it doesn't always
2925 rereads the dirstate. Use dirstate.invalidate() if you want to
2927 rereads the dirstate. Use dirstate.invalidate() if you want to
2926 explicitly read the dirstate again (i.e. restoring it to a previous
2928 explicitly read the dirstate again (i.e. restoring it to a previous
2927 known good state)."""
2929 known good state)."""
2928 if hasunfilteredcache(self, 'dirstate'):
2930 if hasunfilteredcache(self, 'dirstate'):
2929 for k in self.dirstate._filecache:
2931 for k in self.dirstate._filecache:
2930 try:
2932 try:
2931 delattr(self.dirstate, k)
2933 delattr(self.dirstate, k)
2932 except AttributeError:
2934 except AttributeError:
2933 pass
2935 pass
2934 delattr(self.unfiltered(), 'dirstate')
2936 delattr(self.unfiltered(), 'dirstate')
2935
2937
2936 def invalidate(self, clearfilecache=False):
2938 def invalidate(self, clearfilecache=False):
2937 """Invalidates both store and non-store parts other than dirstate
2939 """Invalidates both store and non-store parts other than dirstate
2938
2940
2939 If a transaction is running, invalidation of store is omitted,
2941 If a transaction is running, invalidation of store is omitted,
2940 because discarding in-memory changes might cause inconsistency
2942 because discarding in-memory changes might cause inconsistency
2941 (e.g. incomplete fncache causes unintentional failure, but
2943 (e.g. incomplete fncache causes unintentional failure, but
2942 redundant one doesn't).
2944 redundant one doesn't).
2943 """
2945 """
2944 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2946 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2945 for k in list(self._filecache.keys()):
2947 for k in list(self._filecache.keys()):
2946 # dirstate is invalidated separately in invalidatedirstate()
2948 # dirstate is invalidated separately in invalidatedirstate()
2947 if k == b'dirstate':
2949 if k == b'dirstate':
2948 continue
2950 continue
2949 if (
2951 if (
2950 k == b'changelog'
2952 k == b'changelog'
2951 and self.currenttransaction()
2953 and self.currenttransaction()
2952 and self.changelog._delayed
2954 and self.changelog._delayed
2953 ):
2955 ):
2954 # The changelog object may store unwritten revisions. We don't
2956 # The changelog object may store unwritten revisions. We don't
2955 # want to lose them.
2957 # want to lose them.
2956 # TODO: Solve the problem instead of working around it.
2958 # TODO: Solve the problem instead of working around it.
2957 continue
2959 continue
2958
2960
2959 if clearfilecache:
2961 if clearfilecache:
2960 del self._filecache[k]
2962 del self._filecache[k]
2961 try:
2963 try:
2962 delattr(unfiltered, k)
2964 delattr(unfiltered, k)
2963 except AttributeError:
2965 except AttributeError:
2964 pass
2966 pass
2965 self.invalidatecaches()
2967 self.invalidatecaches()
2966 if not self.currenttransaction():
2968 if not self.currenttransaction():
2967 # TODO: Changing contents of store outside transaction
2969 # TODO: Changing contents of store outside transaction
2968 # causes inconsistency. We should make in-memory store
2970 # causes inconsistency. We should make in-memory store
2969 # changes detectable, and abort if changed.
2971 # changes detectable, and abort if changed.
2970 self.store.invalidatecaches()
2972 self.store.invalidatecaches()
2971
2973
2972 def invalidateall(self):
2974 def invalidateall(self):
2973 """Fully invalidates both store and non-store parts, causing the
2975 """Fully invalidates both store and non-store parts, causing the
2974 subsequent operation to reread any outside changes."""
2976 subsequent operation to reread any outside changes."""
2975 # extension should hook this to invalidate its caches
2977 # extension should hook this to invalidate its caches
2976 self.invalidate()
2978 self.invalidate()
2977 self.invalidatedirstate()
2979 self.invalidatedirstate()
2978
2980
2979 @unfilteredmethod
2981 @unfilteredmethod
2980 def _refreshfilecachestats(self, tr):
2982 def _refreshfilecachestats(self, tr):
2981 """Reload stats of cached files so that they are flagged as valid"""
2983 """Reload stats of cached files so that they are flagged as valid"""
2982 for k, ce in self._filecache.items():
2984 for k, ce in self._filecache.items():
2983 k = pycompat.sysstr(k)
2985 k = pycompat.sysstr(k)
2984 if k == 'dirstate' or k not in self.__dict__:
2986 if k == 'dirstate' or k not in self.__dict__:
2985 continue
2987 continue
2986 ce.refresh()
2988 ce.refresh()
2987
2989
2988 def _lock(
2990 def _lock(
2989 self,
2991 self,
2990 vfs,
2992 vfs,
2991 lockname,
2993 lockname,
2992 wait,
2994 wait,
2993 releasefn,
2995 releasefn,
2994 acquirefn,
2996 acquirefn,
2995 desc,
2997 desc,
2996 ):
2998 ):
2997 timeout = 0
2999 timeout = 0
2998 warntimeout = 0
3000 warntimeout = 0
2999 if wait:
3001 if wait:
3000 timeout = self.ui.configint(b"ui", b"timeout")
3002 timeout = self.ui.configint(b"ui", b"timeout")
3001 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3003 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3002 # internal config: ui.signal-safe-lock
3004 # internal config: ui.signal-safe-lock
3003 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3005 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3004
3006
3005 l = lockmod.trylock(
3007 l = lockmod.trylock(
3006 self.ui,
3008 self.ui,
3007 vfs,
3009 vfs,
3008 lockname,
3010 lockname,
3009 timeout,
3011 timeout,
3010 warntimeout,
3012 warntimeout,
3011 releasefn=releasefn,
3013 releasefn=releasefn,
3012 acquirefn=acquirefn,
3014 acquirefn=acquirefn,
3013 desc=desc,
3015 desc=desc,
3014 signalsafe=signalsafe,
3016 signalsafe=signalsafe,
3015 )
3017 )
3016 return l
3018 return l
3017
3019
3018 def _afterlock(self, callback):
3020 def _afterlock(self, callback):
3019 """add a callback to be run when the repository is fully unlocked
3021 """add a callback to be run when the repository is fully unlocked
3020
3022
3021 The callback will be executed when the outermost lock is released
3023 The callback will be executed when the outermost lock is released
3022 (with wlock being higher level than 'lock')."""
3024 (with wlock being higher level than 'lock')."""
3023 for ref in (self._wlockref, self._lockref):
3025 for ref in (self._wlockref, self._lockref):
3024 l = ref and ref()
3026 l = ref and ref()
3025 if l and l.held:
3027 if l and l.held:
3026 l.postrelease.append(callback)
3028 l.postrelease.append(callback)
3027 break
3029 break
3028 else: # no lock have been found.
3030 else: # no lock have been found.
3029 callback(True)
3031 callback(True)
3030
3032
3031 def lock(self, wait=True):
3033 def lock(self, wait=True):
3032 """Lock the repository store (.hg/store) and return a weak reference
3034 """Lock the repository store (.hg/store) and return a weak reference
3033 to the lock. Use this before modifying the store (e.g. committing or
3035 to the lock. Use this before modifying the store (e.g. committing or
3034 stripping). If you are opening a transaction, get a lock as well.)
3036 stripping). If you are opening a transaction, get a lock as well.)
3035
3037
3036 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3038 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3037 'wlock' first to avoid a dead-lock hazard."""
3039 'wlock' first to avoid a dead-lock hazard."""
3038 l = self._currentlock(self._lockref)
3040 l = self._currentlock(self._lockref)
3039 if l is not None:
3041 if l is not None:
3040 l.lock()
3042 l.lock()
3041 return l
3043 return l
3042
3044
3043 l = self._lock(
3045 l = self._lock(
3044 vfs=self.svfs,
3046 vfs=self.svfs,
3045 lockname=b"lock",
3047 lockname=b"lock",
3046 wait=wait,
3048 wait=wait,
3047 releasefn=None,
3049 releasefn=None,
3048 acquirefn=self.invalidate,
3050 acquirefn=self.invalidate,
3049 desc=_(b'repository %s') % self.origroot,
3051 desc=_(b'repository %s') % self.origroot,
3050 )
3052 )
3051 self._lockref = weakref.ref(l)
3053 self._lockref = weakref.ref(l)
3052 return l
3054 return l
3053
3055
3054 def wlock(self, wait=True):
3056 def wlock(self, wait=True):
3055 """Lock the non-store parts of the repository (everything under
3057 """Lock the non-store parts of the repository (everything under
3056 .hg except .hg/store) and return a weak reference to the lock.
3058 .hg except .hg/store) and return a weak reference to the lock.
3057
3059
3058 Use this before modifying files in .hg.
3060 Use this before modifying files in .hg.
3059
3061
3060 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3062 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3061 'wlock' first to avoid a dead-lock hazard."""
3063 'wlock' first to avoid a dead-lock hazard."""
3062 l = self._wlockref() if self._wlockref else None
3064 l = self._wlockref() if self._wlockref else None
3063 if l is not None and l.held:
3065 if l is not None and l.held:
3064 l.lock()
3066 l.lock()
3065 return l
3067 return l
3066
3068
3067 # We do not need to check for non-waiting lock acquisition. Such
3069 # We do not need to check for non-waiting lock acquisition. Such
3068 # acquisition would not cause dead-lock as they would just fail.
3070 # acquisition would not cause dead-lock as they would just fail.
3069 if wait and (
3071 if wait and (
3070 self.ui.configbool(b'devel', b'all-warnings')
3072 self.ui.configbool(b'devel', b'all-warnings')
3071 or self.ui.configbool(b'devel', b'check-locks')
3073 or self.ui.configbool(b'devel', b'check-locks')
3072 ):
3074 ):
3073 if self._currentlock(self._lockref) is not None:
3075 if self._currentlock(self._lockref) is not None:
3074 self.ui.develwarn(b'"wlock" acquired after "lock"')
3076 self.ui.develwarn(b'"wlock" acquired after "lock"')
3075
3077
3076 def unlock():
3078 def unlock():
3077 if self.dirstate.pendingparentchange():
3079 if self.dirstate.pendingparentchange():
3078 self.dirstate.invalidate()
3080 self.dirstate.invalidate()
3079 else:
3081 else:
3080 self.dirstate.write(None)
3082 self.dirstate.write(None)
3081
3083
3082 self._filecache[b'dirstate'].refresh()
3084 self._filecache[b'dirstate'].refresh()
3083
3085
3084 l = self._lock(
3086 l = self._lock(
3085 self.vfs,
3087 self.vfs,
3086 b"wlock",
3088 b"wlock",
3087 wait,
3089 wait,
3088 unlock,
3090 unlock,
3089 self.invalidatedirstate,
3091 self.invalidatedirstate,
3090 _(b'working directory of %s') % self.origroot,
3092 _(b'working directory of %s') % self.origroot,
3091 )
3093 )
3092 self._wlockref = weakref.ref(l)
3094 self._wlockref = weakref.ref(l)
3093 return l
3095 return l
3094
3096
3095 def _currentlock(self, lockref):
3097 def _currentlock(self, lockref):
3096 """Returns the lock if it's held, or None if it's not."""
3098 """Returns the lock if it's held, or None if it's not."""
3097 if lockref is None:
3099 if lockref is None:
3098 return None
3100 return None
3099 l = lockref()
3101 l = lockref()
3100 if l is None or not l.held:
3102 if l is None or not l.held:
3101 return None
3103 return None
3102 return l
3104 return l
3103
3105
3104 def currentwlock(self):
3106 def currentwlock(self):
3105 """Returns the wlock if it's held, or None if it's not."""
3107 """Returns the wlock if it's held, or None if it's not."""
3106 return self._currentlock(self._wlockref)
3108 return self._currentlock(self._wlockref)
3107
3109
3108 def checkcommitpatterns(self, wctx, match, status, fail):
3110 def checkcommitpatterns(self, wctx, match, status, fail):
3109 """check for commit arguments that aren't committable"""
3111 """check for commit arguments that aren't committable"""
3110 if match.isexact() or match.prefix():
3112 if match.isexact() or match.prefix():
3111 matched = set(status.modified + status.added + status.removed)
3113 matched = set(status.modified + status.added + status.removed)
3112
3114
3113 for f in match.files():
3115 for f in match.files():
3114 f = self.dirstate.normalize(f)
3116 f = self.dirstate.normalize(f)
3115 if f == b'.' or f in matched or f in wctx.substate:
3117 if f == b'.' or f in matched or f in wctx.substate:
3116 continue
3118 continue
3117 if f in status.deleted:
3119 if f in status.deleted:
3118 fail(f, _(b'file not found!'))
3120 fail(f, _(b'file not found!'))
3119 # Is it a directory that exists or used to exist?
3121 # Is it a directory that exists or used to exist?
3120 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3122 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3121 d = f + b'/'
3123 d = f + b'/'
3122 for mf in matched:
3124 for mf in matched:
3123 if mf.startswith(d):
3125 if mf.startswith(d):
3124 break
3126 break
3125 else:
3127 else:
3126 fail(f, _(b"no match under directory!"))
3128 fail(f, _(b"no match under directory!"))
3127 elif f not in self.dirstate:
3129 elif f not in self.dirstate:
3128 fail(f, _(b"file not tracked!"))
3130 fail(f, _(b"file not tracked!"))
3129
3131
3130 @unfilteredmethod
3132 @unfilteredmethod
3131 def commit(
3133 def commit(
3132 self,
3134 self,
3133 text=b"",
3135 text=b"",
3134 user=None,
3136 user=None,
3135 date=None,
3137 date=None,
3136 match=None,
3138 match=None,
3137 force=False,
3139 force=False,
3138 editor=None,
3140 editor=None,
3139 extra=None,
3141 extra=None,
3140 ):
3142 ):
3141 """Add a new revision to current repository.
3143 """Add a new revision to current repository.
3142
3144
3143 Revision information is gathered from the working directory,
3145 Revision information is gathered from the working directory,
3144 match can be used to filter the committed files. If editor is
3146 match can be used to filter the committed files. If editor is
3145 supplied, it is called to get a commit message.
3147 supplied, it is called to get a commit message.
3146 """
3148 """
3147 if extra is None:
3149 if extra is None:
3148 extra = {}
3150 extra = {}
3149
3151
3150 def fail(f, msg):
3152 def fail(f, msg):
3151 raise error.InputError(b'%s: %s' % (f, msg))
3153 raise error.InputError(b'%s: %s' % (f, msg))
3152
3154
3153 if not match:
3155 if not match:
3154 match = matchmod.always()
3156 match = matchmod.always()
3155
3157
3156 if not force:
3158 if not force:
3157 match.bad = fail
3159 match.bad = fail
3158
3160
3159 # lock() for recent changelog (see issue4368)
3161 # lock() for recent changelog (see issue4368)
3160 with self.wlock(), self.lock():
3162 with self.wlock(), self.lock():
3161 wctx = self[None]
3163 wctx = self[None]
3162 merge = len(wctx.parents()) > 1
3164 merge = len(wctx.parents()) > 1
3163
3165
3164 if not force and merge and not match.always():
3166 if not force and merge and not match.always():
3165 raise error.Abort(
3167 raise error.Abort(
3166 _(
3168 _(
3167 b'cannot partially commit a merge '
3169 b'cannot partially commit a merge '
3168 b'(do not specify files or patterns)'
3170 b'(do not specify files or patterns)'
3169 )
3171 )
3170 )
3172 )
3171
3173
3172 status = self.status(match=match, clean=force)
3174 status = self.status(match=match, clean=force)
3173 if force:
3175 if force:
3174 status.modified.extend(
3176 status.modified.extend(
3175 status.clean
3177 status.clean
3176 ) # mq may commit clean files
3178 ) # mq may commit clean files
3177
3179
3178 # check subrepos
3180 # check subrepos
3179 subs, commitsubs, newstate = subrepoutil.precommit(
3181 subs, commitsubs, newstate = subrepoutil.precommit(
3180 self.ui, wctx, status, match, force=force
3182 self.ui, wctx, status, match, force=force
3181 )
3183 )
3182
3184
3183 # make sure all explicit patterns are matched
3185 # make sure all explicit patterns are matched
3184 if not force:
3186 if not force:
3185 self.checkcommitpatterns(wctx, match, status, fail)
3187 self.checkcommitpatterns(wctx, match, status, fail)
3186
3188
3187 cctx = context.workingcommitctx(
3189 cctx = context.workingcommitctx(
3188 self, status, text, user, date, extra
3190 self, status, text, user, date, extra
3189 )
3191 )
3190
3192
3191 ms = mergestatemod.mergestate.read(self)
3193 ms = mergestatemod.mergestate.read(self)
3192 mergeutil.checkunresolved(ms)
3194 mergeutil.checkunresolved(ms)
3193
3195
3194 # internal config: ui.allowemptycommit
3196 # internal config: ui.allowemptycommit
3195 if cctx.isempty() and not self.ui.configbool(
3197 if cctx.isempty() and not self.ui.configbool(
3196 b'ui', b'allowemptycommit'
3198 b'ui', b'allowemptycommit'
3197 ):
3199 ):
3198 self.ui.debug(b'nothing to commit, clearing merge state\n')
3200 self.ui.debug(b'nothing to commit, clearing merge state\n')
3199 ms.reset()
3201 ms.reset()
3200 return None
3202 return None
3201
3203
3202 if merge and cctx.deleted():
3204 if merge and cctx.deleted():
3203 raise error.Abort(_(b"cannot commit merge with missing files"))
3205 raise error.Abort(_(b"cannot commit merge with missing files"))
3204
3206
3205 if editor:
3207 if editor:
3206 cctx._text = editor(self, cctx, subs)
3208 cctx._text = editor(self, cctx, subs)
3207 edited = text != cctx._text
3209 edited = text != cctx._text
3208
3210
3209 # Save commit message in case this transaction gets rolled back
3211 # Save commit message in case this transaction gets rolled back
3210 # (e.g. by a pretxncommit hook). Leave the content alone on
3212 # (e.g. by a pretxncommit hook). Leave the content alone on
3211 # the assumption that the user will use the same editor again.
3213 # the assumption that the user will use the same editor again.
3212 msg_path = self.savecommitmessage(cctx._text)
3214 msg_path = self.savecommitmessage(cctx._text)
3213
3215
3214 # commit subs and write new state
3216 # commit subs and write new state
3215 if subs:
3217 if subs:
3216 uipathfn = scmutil.getuipathfn(self)
3218 uipathfn = scmutil.getuipathfn(self)
3217 for s in sorted(commitsubs):
3219 for s in sorted(commitsubs):
3218 sub = wctx.sub(s)
3220 sub = wctx.sub(s)
3219 self.ui.status(
3221 self.ui.status(
3220 _(b'committing subrepository %s\n')
3222 _(b'committing subrepository %s\n')
3221 % uipathfn(subrepoutil.subrelpath(sub))
3223 % uipathfn(subrepoutil.subrelpath(sub))
3222 )
3224 )
3223 sr = sub.commit(cctx._text, user, date)
3225 sr = sub.commit(cctx._text, user, date)
3224 newstate[s] = (newstate[s][0], sr)
3226 newstate[s] = (newstate[s][0], sr)
3225 subrepoutil.writestate(self, newstate)
3227 subrepoutil.writestate(self, newstate)
3226
3228
3227 p1, p2 = self.dirstate.parents()
3229 p1, p2 = self.dirstate.parents()
3228 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3230 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3229 try:
3231 try:
3230 self.hook(
3232 self.hook(
3231 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3233 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3232 )
3234 )
3233 with self.transaction(b'commit'):
3235 with self.transaction(b'commit'):
3234 ret = self.commitctx(cctx, True)
3236 ret = self.commitctx(cctx, True)
3235 # update bookmarks, dirstate and mergestate
3237 # update bookmarks, dirstate and mergestate
3236 bookmarks.update(self, [p1, p2], ret)
3238 bookmarks.update(self, [p1, p2], ret)
3237 cctx.markcommitted(ret)
3239 cctx.markcommitted(ret)
3238 ms.reset()
3240 ms.reset()
3239 except: # re-raises
3241 except: # re-raises
3240 if edited:
3242 if edited:
3241 self.ui.write(
3243 self.ui.write(
3242 _(b'note: commit message saved in %s\n') % msg_path
3244 _(b'note: commit message saved in %s\n') % msg_path
3243 )
3245 )
3244 self.ui.write(
3246 self.ui.write(
3245 _(
3247 _(
3246 b"note: use 'hg commit --logfile "
3248 b"note: use 'hg commit --logfile "
3247 b"%s --edit' to reuse it\n"
3249 b"%s --edit' to reuse it\n"
3248 )
3250 )
3249 % msg_path
3251 % msg_path
3250 )
3252 )
3251 raise
3253 raise
3252
3254
3253 def commithook(unused_success):
3255 def commithook(unused_success):
3254 # hack for command that use a temporary commit (eg: histedit)
3256 # hack for command that use a temporary commit (eg: histedit)
3255 # temporary commit got stripped before hook release
3257 # temporary commit got stripped before hook release
3256 if self.changelog.hasnode(ret):
3258 if self.changelog.hasnode(ret):
3257 self.hook(
3259 self.hook(
3258 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3260 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3259 )
3261 )
3260
3262
3261 self._afterlock(commithook)
3263 self._afterlock(commithook)
3262 return ret
3264 return ret
3263
3265
3264 @unfilteredmethod
3266 @unfilteredmethod
3265 def commitctx(self, ctx, error=False, origctx=None):
3267 def commitctx(self, ctx, error=False, origctx=None):
3266 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3268 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3267
3269
3268 @unfilteredmethod
3270 @unfilteredmethod
3269 def destroying(self):
3271 def destroying(self):
3270 """Inform the repository that nodes are about to be destroyed.
3272 """Inform the repository that nodes are about to be destroyed.
3271 Intended for use by strip and rollback, so there's a common
3273 Intended for use by strip and rollback, so there's a common
3272 place for anything that has to be done before destroying history.
3274 place for anything that has to be done before destroying history.
3273
3275
3274 This is mostly useful for saving state that is in memory and waiting
3276 This is mostly useful for saving state that is in memory and waiting
3275 to be flushed when the current lock is released. Because a call to
3277 to be flushed when the current lock is released. Because a call to
3276 destroyed is imminent, the repo will be invalidated causing those
3278 destroyed is imminent, the repo will be invalidated causing those
3277 changes to stay in memory (waiting for the next unlock), or vanish
3279 changes to stay in memory (waiting for the next unlock), or vanish
3278 completely.
3280 completely.
3279 """
3281 """
3280 # When using the same lock to commit and strip, the phasecache is left
3282 # When using the same lock to commit and strip, the phasecache is left
3281 # dirty after committing. Then when we strip, the repo is invalidated,
3283 # dirty after committing. Then when we strip, the repo is invalidated,
3282 # causing those changes to disappear.
3284 # causing those changes to disappear.
3283 if '_phasecache' in vars(self):
3285 if '_phasecache' in vars(self):
3284 self._phasecache.write()
3286 self._phasecache.write()
3285
3287
3286 @unfilteredmethod
3288 @unfilteredmethod
3287 def destroyed(self):
3289 def destroyed(self):
3288 """Inform the repository that nodes have been destroyed.
3290 """Inform the repository that nodes have been destroyed.
3289 Intended for use by strip and rollback, so there's a common
3291 Intended for use by strip and rollback, so there's a common
3290 place for anything that has to be done after destroying history.
3292 place for anything that has to be done after destroying history.
3291 """
3293 """
3292 # When one tries to:
3294 # When one tries to:
3293 # 1) destroy nodes thus calling this method (e.g. strip)
3295 # 1) destroy nodes thus calling this method (e.g. strip)
3294 # 2) use phasecache somewhere (e.g. commit)
3296 # 2) use phasecache somewhere (e.g. commit)
3295 #
3297 #
3296 # then 2) will fail because the phasecache contains nodes that were
3298 # then 2) will fail because the phasecache contains nodes that were
3297 # removed. We can either remove phasecache from the filecache,
3299 # removed. We can either remove phasecache from the filecache,
3298 # causing it to reload next time it is accessed, or simply filter
3300 # causing it to reload next time it is accessed, or simply filter
3299 # the removed nodes now and write the updated cache.
3301 # the removed nodes now and write the updated cache.
3300 self._phasecache.filterunknown(self)
3302 self._phasecache.filterunknown(self)
3301 self._phasecache.write()
3303 self._phasecache.write()
3302
3304
3303 # refresh all repository caches
3305 # refresh all repository caches
3304 self.updatecaches()
3306 self.updatecaches()
3305
3307
3306 # Ensure the persistent tag cache is updated. Doing it now
3308 # Ensure the persistent tag cache is updated. Doing it now
3307 # means that the tag cache only has to worry about destroyed
3309 # means that the tag cache only has to worry about destroyed
3308 # heads immediately after a strip/rollback. That in turn
3310 # heads immediately after a strip/rollback. That in turn
3309 # guarantees that "cachetip == currenttip" (comparing both rev
3311 # guarantees that "cachetip == currenttip" (comparing both rev
3310 # and node) always means no nodes have been added or destroyed.
3312 # and node) always means no nodes have been added or destroyed.
3311
3313
3312 # XXX this is suboptimal when qrefresh'ing: we strip the current
3314 # XXX this is suboptimal when qrefresh'ing: we strip the current
3313 # head, refresh the tag cache, then immediately add a new head.
3315 # head, refresh the tag cache, then immediately add a new head.
3314 # But I think doing it this way is necessary for the "instant
3316 # But I think doing it this way is necessary for the "instant
3315 # tag cache retrieval" case to work.
3317 # tag cache retrieval" case to work.
3316 self.invalidate()
3318 self.invalidate()
3317
3319
3318 def status(
3320 def status(
3319 self,
3321 self,
3320 node1=b'.',
3322 node1=b'.',
3321 node2=None,
3323 node2=None,
3322 match=None,
3324 match=None,
3323 ignored=False,
3325 ignored=False,
3324 clean=False,
3326 clean=False,
3325 unknown=False,
3327 unknown=False,
3326 listsubrepos=False,
3328 listsubrepos=False,
3327 ):
3329 ):
3328 '''a convenience method that calls node1.status(node2)'''
3330 '''a convenience method that calls node1.status(node2)'''
3329 return self[node1].status(
3331 return self[node1].status(
3330 node2, match, ignored, clean, unknown, listsubrepos
3332 node2, match, ignored, clean, unknown, listsubrepos
3331 )
3333 )
3332
3334
3333 def addpostdsstatus(self, ps):
3335 def addpostdsstatus(self, ps):
3334 """Add a callback to run within the wlock, at the point at which status
3336 """Add a callback to run within the wlock, at the point at which status
3335 fixups happen.
3337 fixups happen.
3336
3338
3337 On status completion, callback(wctx, status) will be called with the
3339 On status completion, callback(wctx, status) will be called with the
3338 wlock held, unless the dirstate has changed from underneath or the wlock
3340 wlock held, unless the dirstate has changed from underneath or the wlock
3339 couldn't be grabbed.
3341 couldn't be grabbed.
3340
3342
3341 Callbacks should not capture and use a cached copy of the dirstate --
3343 Callbacks should not capture and use a cached copy of the dirstate --
3342 it might change in the meanwhile. Instead, they should access the
3344 it might change in the meanwhile. Instead, they should access the
3343 dirstate via wctx.repo().dirstate.
3345 dirstate via wctx.repo().dirstate.
3344
3346
3345 This list is emptied out after each status run -- extensions should
3347 This list is emptied out after each status run -- extensions should
3346 make sure it adds to this list each time dirstate.status is called.
3348 make sure it adds to this list each time dirstate.status is called.
3347 Extensions should also make sure they don't call this for statuses
3349 Extensions should also make sure they don't call this for statuses
3348 that don't involve the dirstate.
3350 that don't involve the dirstate.
3349 """
3351 """
3350
3352
3351 # The list is located here for uniqueness reasons -- it is actually
3353 # The list is located here for uniqueness reasons -- it is actually
3352 # managed by the workingctx, but that isn't unique per-repo.
3354 # managed by the workingctx, but that isn't unique per-repo.
3353 self._postdsstatus.append(ps)
3355 self._postdsstatus.append(ps)
3354
3356
3355 def postdsstatus(self):
3357 def postdsstatus(self):
3356 """Used by workingctx to get the list of post-dirstate-status hooks."""
3358 """Used by workingctx to get the list of post-dirstate-status hooks."""
3357 return self._postdsstatus
3359 return self._postdsstatus
3358
3360
3359 def clearpostdsstatus(self):
3361 def clearpostdsstatus(self):
3360 """Used by workingctx to clear post-dirstate-status hooks."""
3362 """Used by workingctx to clear post-dirstate-status hooks."""
3361 del self._postdsstatus[:]
3363 del self._postdsstatus[:]
3362
3364
3363 def heads(self, start=None):
3365 def heads(self, start=None):
3364 if start is None:
3366 if start is None:
3365 cl = self.changelog
3367 cl = self.changelog
3366 headrevs = reversed(cl.headrevs())
3368 headrevs = reversed(cl.headrevs())
3367 return [cl.node(rev) for rev in headrevs]
3369 return [cl.node(rev) for rev in headrevs]
3368
3370
3369 heads = self.changelog.heads(start)
3371 heads = self.changelog.heads(start)
3370 # sort the output in rev descending order
3372 # sort the output in rev descending order
3371 return sorted(heads, key=self.changelog.rev, reverse=True)
3373 return sorted(heads, key=self.changelog.rev, reverse=True)
3372
3374
3373 def branchheads(self, branch=None, start=None, closed=False):
3375 def branchheads(self, branch=None, start=None, closed=False):
3374 """return a (possibly filtered) list of heads for the given branch
3376 """return a (possibly filtered) list of heads for the given branch
3375
3377
3376 Heads are returned in topological order, from newest to oldest.
3378 Heads are returned in topological order, from newest to oldest.
3377 If branch is None, use the dirstate branch.
3379 If branch is None, use the dirstate branch.
3378 If start is not None, return only heads reachable from start.
3380 If start is not None, return only heads reachable from start.
3379 If closed is True, return heads that are marked as closed as well.
3381 If closed is True, return heads that are marked as closed as well.
3380 """
3382 """
3381 if branch is None:
3383 if branch is None:
3382 branch = self[None].branch()
3384 branch = self[None].branch()
3383 branches = self.branchmap()
3385 branches = self.branchmap()
3384 if not branches.hasbranch(branch):
3386 if not branches.hasbranch(branch):
3385 return []
3387 return []
3386 # the cache returns heads ordered lowest to highest
3388 # the cache returns heads ordered lowest to highest
3387 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3389 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3388 if start is not None:
3390 if start is not None:
3389 # filter out the heads that cannot be reached from startrev
3391 # filter out the heads that cannot be reached from startrev
3390 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3392 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3391 bheads = [h for h in bheads if h in fbheads]
3393 bheads = [h for h in bheads if h in fbheads]
3392 return bheads
3394 return bheads
3393
3395
3394 def branches(self, nodes):
3396 def branches(self, nodes):
3395 if not nodes:
3397 if not nodes:
3396 nodes = [self.changelog.tip()]
3398 nodes = [self.changelog.tip()]
3397 b = []
3399 b = []
3398 for n in nodes:
3400 for n in nodes:
3399 t = n
3401 t = n
3400 while True:
3402 while True:
3401 p = self.changelog.parents(n)
3403 p = self.changelog.parents(n)
3402 if p[1] != self.nullid or p[0] == self.nullid:
3404 if p[1] != self.nullid or p[0] == self.nullid:
3403 b.append((t, n, p[0], p[1]))
3405 b.append((t, n, p[0], p[1]))
3404 break
3406 break
3405 n = p[0]
3407 n = p[0]
3406 return b
3408 return b
3407
3409
3408 def between(self, pairs):
3410 def between(self, pairs):
3409 r = []
3411 r = []
3410
3412
3411 for top, bottom in pairs:
3413 for top, bottom in pairs:
3412 n, l, i = top, [], 0
3414 n, l, i = top, [], 0
3413 f = 1
3415 f = 1
3414
3416
3415 while n != bottom and n != self.nullid:
3417 while n != bottom and n != self.nullid:
3416 p = self.changelog.parents(n)[0]
3418 p = self.changelog.parents(n)[0]
3417 if i == f:
3419 if i == f:
3418 l.append(n)
3420 l.append(n)
3419 f = f * 2
3421 f = f * 2
3420 n = p
3422 n = p
3421 i += 1
3423 i += 1
3422
3424
3423 r.append(l)
3425 r.append(l)
3424
3426
3425 return r
3427 return r
3426
3428
3427 def checkpush(self, pushop):
3429 def checkpush(self, pushop):
3428 """Extensions can override this function if additional checks have
3430 """Extensions can override this function if additional checks have
3429 to be performed before pushing, or call it if they override push
3431 to be performed before pushing, or call it if they override push
3430 command.
3432 command.
3431 """
3433 """
3432
3434
3433 @unfilteredpropertycache
3435 @unfilteredpropertycache
3434 def prepushoutgoinghooks(self):
3436 def prepushoutgoinghooks(self):
3435 """Return util.hooks consists of a pushop with repo, remote, outgoing
3437 """Return util.hooks consists of a pushop with repo, remote, outgoing
3436 methods, which are called before pushing changesets.
3438 methods, which are called before pushing changesets.
3437 """
3439 """
3438 return util.hooks()
3440 return util.hooks()
3439
3441
3440 def pushkey(self, namespace, key, old, new):
3442 def pushkey(self, namespace, key, old, new):
3441 try:
3443 try:
3442 tr = self.currenttransaction()
3444 tr = self.currenttransaction()
3443 hookargs = {}
3445 hookargs = {}
3444 if tr is not None:
3446 if tr is not None:
3445 hookargs.update(tr.hookargs)
3447 hookargs.update(tr.hookargs)
3446 hookargs = pycompat.strkwargs(hookargs)
3448 hookargs = pycompat.strkwargs(hookargs)
3447 hookargs['namespace'] = namespace
3449 hookargs['namespace'] = namespace
3448 hookargs['key'] = key
3450 hookargs['key'] = key
3449 hookargs['old'] = old
3451 hookargs['old'] = old
3450 hookargs['new'] = new
3452 hookargs['new'] = new
3451 self.hook(b'prepushkey', throw=True, **hookargs)
3453 self.hook(b'prepushkey', throw=True, **hookargs)
3452 except error.HookAbort as exc:
3454 except error.HookAbort as exc:
3453 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3455 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3454 if exc.hint:
3456 if exc.hint:
3455 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3457 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3456 return False
3458 return False
3457 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3459 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3458 ret = pushkey.push(self, namespace, key, old, new)
3460 ret = pushkey.push(self, namespace, key, old, new)
3459
3461
3460 def runhook(unused_success):
3462 def runhook(unused_success):
3461 self.hook(
3463 self.hook(
3462 b'pushkey',
3464 b'pushkey',
3463 namespace=namespace,
3465 namespace=namespace,
3464 key=key,
3466 key=key,
3465 old=old,
3467 old=old,
3466 new=new,
3468 new=new,
3467 ret=ret,
3469 ret=ret,
3468 )
3470 )
3469
3471
3470 self._afterlock(runhook)
3472 self._afterlock(runhook)
3471 return ret
3473 return ret
3472
3474
3473 def listkeys(self, namespace):
3475 def listkeys(self, namespace):
3474 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3476 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3475 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3477 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3476 values = pushkey.list(self, namespace)
3478 values = pushkey.list(self, namespace)
3477 self.hook(b'listkeys', namespace=namespace, values=values)
3479 self.hook(b'listkeys', namespace=namespace, values=values)
3478 return values
3480 return values
3479
3481
3480 def debugwireargs(self, one, two, three=None, four=None, five=None):
3482 def debugwireargs(self, one, two, three=None, four=None, five=None):
3481 '''used to test argument passing over the wire'''
3483 '''used to test argument passing over the wire'''
3482 return b"%s %s %s %s %s" % (
3484 return b"%s %s %s %s %s" % (
3483 one,
3485 one,
3484 two,
3486 two,
3485 pycompat.bytestr(three),
3487 pycompat.bytestr(three),
3486 pycompat.bytestr(four),
3488 pycompat.bytestr(four),
3487 pycompat.bytestr(five),
3489 pycompat.bytestr(five),
3488 )
3490 )
3489
3491
3490 def savecommitmessage(self, text):
3492 def savecommitmessage(self, text):
3491 fp = self.vfs(b'last-message.txt', b'wb')
3493 fp = self.vfs(b'last-message.txt', b'wb')
3492 try:
3494 try:
3493 fp.write(text)
3495 fp.write(text)
3494 finally:
3496 finally:
3495 fp.close()
3497 fp.close()
3496 return self.pathto(fp.name[len(self.root) + 1 :])
3498 return self.pathto(fp.name[len(self.root) + 1 :])
3497
3499
3498 def register_wanted_sidedata(self, category):
3500 def register_wanted_sidedata(self, category):
3499 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3501 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3500 # Only revlogv2 repos can want sidedata.
3502 # Only revlogv2 repos can want sidedata.
3501 return
3503 return
3502 self._wanted_sidedata.add(pycompat.bytestr(category))
3504 self._wanted_sidedata.add(pycompat.bytestr(category))
3503
3505
3504 def register_sidedata_computer(
3506 def register_sidedata_computer(
3505 self, kind, category, keys, computer, flags, replace=False
3507 self, kind, category, keys, computer, flags, replace=False
3506 ):
3508 ):
3507 if kind not in revlogconst.ALL_KINDS:
3509 if kind not in revlogconst.ALL_KINDS:
3508 msg = _(b"unexpected revlog kind '%s'.")
3510 msg = _(b"unexpected revlog kind '%s'.")
3509 raise error.ProgrammingError(msg % kind)
3511 raise error.ProgrammingError(msg % kind)
3510 category = pycompat.bytestr(category)
3512 category = pycompat.bytestr(category)
3511 already_registered = category in self._sidedata_computers.get(kind, [])
3513 already_registered = category in self._sidedata_computers.get(kind, [])
3512 if already_registered and not replace:
3514 if already_registered and not replace:
3513 msg = _(
3515 msg = _(
3514 b"cannot register a sidedata computer twice for category '%s'."
3516 b"cannot register a sidedata computer twice for category '%s'."
3515 )
3517 )
3516 raise error.ProgrammingError(msg % category)
3518 raise error.ProgrammingError(msg % category)
3517 if replace and not already_registered:
3519 if replace and not already_registered:
3518 msg = _(
3520 msg = _(
3519 b"cannot replace a sidedata computer that isn't registered "
3521 b"cannot replace a sidedata computer that isn't registered "
3520 b"for category '%s'."
3522 b"for category '%s'."
3521 )
3523 )
3522 raise error.ProgrammingError(msg % category)
3524 raise error.ProgrammingError(msg % category)
3523 self._sidedata_computers.setdefault(kind, {})
3525 self._sidedata_computers.setdefault(kind, {})
3524 self._sidedata_computers[kind][category] = (keys, computer, flags)
3526 self._sidedata_computers[kind][category] = (keys, computer, flags)
3525
3527
3526
3528
3527 # used to avoid circular references so destructors work
3529 # used to avoid circular references so destructors work
3528 def aftertrans(files):
3530 def aftertrans(files):
3529 renamefiles = [tuple(t) for t in files]
3531 renamefiles = [tuple(t) for t in files]
3530
3532
3531 def a():
3533 def a():
3532 for vfs, src, dest in renamefiles:
3534 for vfs, src, dest in renamefiles:
3533 # if src and dest refer to a same file, vfs.rename is a no-op,
3535 # if src and dest refer to a same file, vfs.rename is a no-op,
3534 # leaving both src and dest on disk. delete dest to make sure
3536 # leaving both src and dest on disk. delete dest to make sure
3535 # the rename couldn't be such a no-op.
3537 # the rename couldn't be such a no-op.
3536 vfs.tryunlink(dest)
3538 vfs.tryunlink(dest)
3537 try:
3539 try:
3538 vfs.rename(src, dest)
3540 vfs.rename(src, dest)
3539 except FileNotFoundError: # journal file does not yet exist
3541 except FileNotFoundError: # journal file does not yet exist
3540 pass
3542 pass
3541
3543
3542 return a
3544 return a
3543
3545
3544
3546
3545 def undoname(fn: bytes) -> bytes:
3547 def undoname(fn: bytes) -> bytes:
3546 base, name = os.path.split(fn)
3548 base, name = os.path.split(fn)
3547 assert name.startswith(b'journal')
3549 assert name.startswith(b'journal')
3548 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3550 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3549
3551
3550
3552
3551 def instance(ui, path: bytes, create, intents=None, createopts=None):
3553 def instance(ui, path: bytes, create, intents=None, createopts=None):
3552
3554
3553 # prevent cyclic import localrepo -> upgrade -> localrepo
3555 # prevent cyclic import localrepo -> upgrade -> localrepo
3554 from . import upgrade
3556 from . import upgrade
3555
3557
3556 localpath = urlutil.urllocalpath(path)
3558 localpath = urlutil.urllocalpath(path)
3557 if create:
3559 if create:
3558 createrepository(ui, localpath, createopts=createopts)
3560 createrepository(ui, localpath, createopts=createopts)
3559
3561
3560 def repo_maker():
3562 def repo_maker():
3561 return makelocalrepository(ui, localpath, intents=intents)
3563 return makelocalrepository(ui, localpath, intents=intents)
3562
3564
3563 repo = repo_maker()
3565 repo = repo_maker()
3564 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3566 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3565 return repo
3567 return repo
3566
3568
3567
3569
3568 def islocal(path: bytes) -> bool:
3570 def islocal(path: bytes) -> bool:
3569 return True
3571 return True
3570
3572
3571
3573
3572 def defaultcreateopts(ui, createopts=None):
3574 def defaultcreateopts(ui, createopts=None):
3573 """Populate the default creation options for a repository.
3575 """Populate the default creation options for a repository.
3574
3576
3575 A dictionary of explicitly requested creation options can be passed
3577 A dictionary of explicitly requested creation options can be passed
3576 in. Missing keys will be populated.
3578 in. Missing keys will be populated.
3577 """
3579 """
3578 createopts = dict(createopts or {})
3580 createopts = dict(createopts or {})
3579
3581
3580 if b'backend' not in createopts:
3582 if b'backend' not in createopts:
3581 # experimental config: storage.new-repo-backend
3583 # experimental config: storage.new-repo-backend
3582 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3584 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3583
3585
3584 return createopts
3586 return createopts
3585
3587
3586
3588
3587 def clone_requirements(ui, createopts, srcrepo):
3589 def clone_requirements(ui, createopts, srcrepo):
3588 """clone the requirements of a local repo for a local clone
3590 """clone the requirements of a local repo for a local clone
3589
3591
3590 The store requirements are unchanged while the working copy requirements
3592 The store requirements are unchanged while the working copy requirements
3591 depends on the configuration
3593 depends on the configuration
3592 """
3594 """
3593 target_requirements = set()
3595 target_requirements = set()
3594 if not srcrepo.requirements:
3596 if not srcrepo.requirements:
3595 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3597 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3596 # with it.
3598 # with it.
3597 return target_requirements
3599 return target_requirements
3598 createopts = defaultcreateopts(ui, createopts=createopts)
3600 createopts = defaultcreateopts(ui, createopts=createopts)
3599 for r in newreporequirements(ui, createopts):
3601 for r in newreporequirements(ui, createopts):
3600 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3602 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3601 target_requirements.add(r)
3603 target_requirements.add(r)
3602
3604
3603 for r in srcrepo.requirements:
3605 for r in srcrepo.requirements:
3604 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3606 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3605 target_requirements.add(r)
3607 target_requirements.add(r)
3606 return target_requirements
3608 return target_requirements
3607
3609
3608
3610
3609 def newreporequirements(ui, createopts):
3611 def newreporequirements(ui, createopts):
3610 """Determine the set of requirements for a new local repository.
3612 """Determine the set of requirements for a new local repository.
3611
3613
3612 Extensions can wrap this function to specify custom requirements for
3614 Extensions can wrap this function to specify custom requirements for
3613 new repositories.
3615 new repositories.
3614 """
3616 """
3615
3617
3616 if b'backend' not in createopts:
3618 if b'backend' not in createopts:
3617 raise error.ProgrammingError(
3619 raise error.ProgrammingError(
3618 b'backend key not present in createopts; '
3620 b'backend key not present in createopts; '
3619 b'was defaultcreateopts() called?'
3621 b'was defaultcreateopts() called?'
3620 )
3622 )
3621
3623
3622 if createopts[b'backend'] != b'revlogv1':
3624 if createopts[b'backend'] != b'revlogv1':
3623 raise error.Abort(
3625 raise error.Abort(
3624 _(
3626 _(
3625 b'unable to determine repository requirements for '
3627 b'unable to determine repository requirements for '
3626 b'storage backend: %s'
3628 b'storage backend: %s'
3627 )
3629 )
3628 % createopts[b'backend']
3630 % createopts[b'backend']
3629 )
3631 )
3630
3632
3631 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3633 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3632 if ui.configbool(b'format', b'usestore'):
3634 if ui.configbool(b'format', b'usestore'):
3633 requirements.add(requirementsmod.STORE_REQUIREMENT)
3635 requirements.add(requirementsmod.STORE_REQUIREMENT)
3634 if ui.configbool(b'format', b'usefncache'):
3636 if ui.configbool(b'format', b'usefncache'):
3635 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3637 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3636 if ui.configbool(b'format', b'dotencode'):
3638 if ui.configbool(b'format', b'dotencode'):
3637 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3639 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3638
3640
3639 compengines = ui.configlist(b'format', b'revlog-compression')
3641 compengines = ui.configlist(b'format', b'revlog-compression')
3640 for compengine in compengines:
3642 for compengine in compengines:
3641 if compengine in util.compengines:
3643 if compengine in util.compengines:
3642 engine = util.compengines[compengine]
3644 engine = util.compengines[compengine]
3643 if engine.available() and engine.revlogheader():
3645 if engine.available() and engine.revlogheader():
3644 break
3646 break
3645 else:
3647 else:
3646 raise error.Abort(
3648 raise error.Abort(
3647 _(
3649 _(
3648 b'compression engines %s defined by '
3650 b'compression engines %s defined by '
3649 b'format.revlog-compression not available'
3651 b'format.revlog-compression not available'
3650 )
3652 )
3651 % b', '.join(b'"%s"' % e for e in compengines),
3653 % b', '.join(b'"%s"' % e for e in compengines),
3652 hint=_(
3654 hint=_(
3653 b'run "hg debuginstall" to list available '
3655 b'run "hg debuginstall" to list available '
3654 b'compression engines'
3656 b'compression engines'
3655 ),
3657 ),
3656 )
3658 )
3657
3659
3658 # zlib is the historical default and doesn't need an explicit requirement.
3660 # zlib is the historical default and doesn't need an explicit requirement.
3659 if compengine == b'zstd':
3661 if compengine == b'zstd':
3660 requirements.add(b'revlog-compression-zstd')
3662 requirements.add(b'revlog-compression-zstd')
3661 elif compengine != b'zlib':
3663 elif compengine != b'zlib':
3662 requirements.add(b'exp-compression-%s' % compengine)
3664 requirements.add(b'exp-compression-%s' % compengine)
3663
3665
3664 if scmutil.gdinitconfig(ui):
3666 if scmutil.gdinitconfig(ui):
3665 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3667 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3666 if ui.configbool(b'format', b'sparse-revlog'):
3668 if ui.configbool(b'format', b'sparse-revlog'):
3667 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3669 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3668
3670
3669 # experimental config: format.use-dirstate-v2
3671 # experimental config: format.use-dirstate-v2
3670 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3672 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3671 if ui.configbool(b'format', b'use-dirstate-v2'):
3673 if ui.configbool(b'format', b'use-dirstate-v2'):
3672 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3674 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3673
3675
3674 # experimental config: format.exp-use-copies-side-data-changeset
3676 # experimental config: format.exp-use-copies-side-data-changeset
3675 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3677 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3676 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3678 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3677 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3679 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3678 if ui.configbool(b'experimental', b'treemanifest'):
3680 if ui.configbool(b'experimental', b'treemanifest'):
3679 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3681 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3680
3682
3681 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3683 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3682 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3684 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3683 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3685 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3684
3686
3685 revlogv2 = ui.config(b'experimental', b'revlogv2')
3687 revlogv2 = ui.config(b'experimental', b'revlogv2')
3686 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3688 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3687 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3689 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3688 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3690 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3689 # experimental config: format.internal-phase
3691 # experimental config: format.internal-phase
3690 if ui.configbool(b'format', b'use-internal-phase'):
3692 if ui.configbool(b'format', b'use-internal-phase'):
3691 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3693 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3692
3694
3693 # experimental config: format.exp-archived-phase
3695 # experimental config: format.exp-archived-phase
3694 if ui.configbool(b'format', b'exp-archived-phase'):
3696 if ui.configbool(b'format', b'exp-archived-phase'):
3695 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3697 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3696
3698
3697 if createopts.get(b'narrowfiles'):
3699 if createopts.get(b'narrowfiles'):
3698 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3700 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3699
3701
3700 if createopts.get(b'lfs'):
3702 if createopts.get(b'lfs'):
3701 requirements.add(b'lfs')
3703 requirements.add(b'lfs')
3702
3704
3703 if ui.configbool(b'format', b'bookmarks-in-store'):
3705 if ui.configbool(b'format', b'bookmarks-in-store'):
3704 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3706 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3705
3707
3706 if ui.configbool(b'format', b'use-persistent-nodemap'):
3708 if ui.configbool(b'format', b'use-persistent-nodemap'):
3707 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3709 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3708
3710
3709 # if share-safe is enabled, let's create the new repository with the new
3711 # if share-safe is enabled, let's create the new repository with the new
3710 # requirement
3712 # requirement
3711 if ui.configbool(b'format', b'use-share-safe'):
3713 if ui.configbool(b'format', b'use-share-safe'):
3712 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3714 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3713
3715
3714 # if we are creating a share-repoΒΉ we have to handle requirement
3716 # if we are creating a share-repoΒΉ we have to handle requirement
3715 # differently.
3717 # differently.
3716 #
3718 #
3717 # [1] (i.e. reusing the store from another repository, just having a
3719 # [1] (i.e. reusing the store from another repository, just having a
3718 # working copy)
3720 # working copy)
3719 if b'sharedrepo' in createopts:
3721 if b'sharedrepo' in createopts:
3720 source_requirements = set(createopts[b'sharedrepo'].requirements)
3722 source_requirements = set(createopts[b'sharedrepo'].requirements)
3721
3723
3722 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3724 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3723 # share to an old school repository, we have to copy the
3725 # share to an old school repository, we have to copy the
3724 # requirements and hope for the best.
3726 # requirements and hope for the best.
3725 requirements = source_requirements
3727 requirements = source_requirements
3726 else:
3728 else:
3727 # We have control on the working copy only, so "copy" the non
3729 # We have control on the working copy only, so "copy" the non
3728 # working copy part over, ignoring previous logic.
3730 # working copy part over, ignoring previous logic.
3729 to_drop = set()
3731 to_drop = set()
3730 for req in requirements:
3732 for req in requirements:
3731 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3733 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3732 continue
3734 continue
3733 if req in source_requirements:
3735 if req in source_requirements:
3734 continue
3736 continue
3735 to_drop.add(req)
3737 to_drop.add(req)
3736 requirements -= to_drop
3738 requirements -= to_drop
3737 requirements |= source_requirements
3739 requirements |= source_requirements
3738
3740
3739 if createopts.get(b'sharedrelative'):
3741 if createopts.get(b'sharedrelative'):
3740 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3742 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3741 else:
3743 else:
3742 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3744 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3743
3745
3744 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3746 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3745 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3747 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3746 msg = _(b"ignoring unknown tracked key version: %d\n")
3748 msg = _(b"ignoring unknown tracked key version: %d\n")
3747 hint = _(
3749 hint = _(
3748 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3750 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3749 )
3751 )
3750 if version != 1:
3752 if version != 1:
3751 ui.warn(msg % version, hint=hint)
3753 ui.warn(msg % version, hint=hint)
3752 else:
3754 else:
3753 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3755 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3754
3756
3755 return requirements
3757 return requirements
3756
3758
3757
3759
3758 def checkrequirementscompat(ui, requirements):
3760 def checkrequirementscompat(ui, requirements):
3759 """Checks compatibility of repository requirements enabled and disabled.
3761 """Checks compatibility of repository requirements enabled and disabled.
3760
3762
3761 Returns a set of requirements which needs to be dropped because dependend
3763 Returns a set of requirements which needs to be dropped because dependend
3762 requirements are not enabled. Also warns users about it"""
3764 requirements are not enabled. Also warns users about it"""
3763
3765
3764 dropped = set()
3766 dropped = set()
3765
3767
3766 if requirementsmod.STORE_REQUIREMENT not in requirements:
3768 if requirementsmod.STORE_REQUIREMENT not in requirements:
3767 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3769 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3768 ui.warn(
3770 ui.warn(
3769 _(
3771 _(
3770 b'ignoring enabled \'format.bookmarks-in-store\' config '
3772 b'ignoring enabled \'format.bookmarks-in-store\' config '
3771 b'beacuse it is incompatible with disabled '
3773 b'beacuse it is incompatible with disabled '
3772 b'\'format.usestore\' config\n'
3774 b'\'format.usestore\' config\n'
3773 )
3775 )
3774 )
3776 )
3775 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3777 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3776
3778
3777 if (
3779 if (
3778 requirementsmod.SHARED_REQUIREMENT in requirements
3780 requirementsmod.SHARED_REQUIREMENT in requirements
3779 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3781 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3780 ):
3782 ):
3781 raise error.Abort(
3783 raise error.Abort(
3782 _(
3784 _(
3783 b"cannot create shared repository as source was created"
3785 b"cannot create shared repository as source was created"
3784 b" with 'format.usestore' config disabled"
3786 b" with 'format.usestore' config disabled"
3785 )
3787 )
3786 )
3788 )
3787
3789
3788 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3790 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3789 if ui.hasconfig(b'format', b'use-share-safe'):
3791 if ui.hasconfig(b'format', b'use-share-safe'):
3790 msg = _(
3792 msg = _(
3791 b"ignoring enabled 'format.use-share-safe' config because "
3793 b"ignoring enabled 'format.use-share-safe' config because "
3792 b"it is incompatible with disabled 'format.usestore'"
3794 b"it is incompatible with disabled 'format.usestore'"
3793 b" config\n"
3795 b" config\n"
3794 )
3796 )
3795 ui.warn(msg)
3797 ui.warn(msg)
3796 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3798 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3797
3799
3798 return dropped
3800 return dropped
3799
3801
3800
3802
3801 def filterknowncreateopts(ui, createopts):
3803 def filterknowncreateopts(ui, createopts):
3802 """Filters a dict of repo creation options against options that are known.
3804 """Filters a dict of repo creation options against options that are known.
3803
3805
3804 Receives a dict of repo creation options and returns a dict of those
3806 Receives a dict of repo creation options and returns a dict of those
3805 options that we don't know how to handle.
3807 options that we don't know how to handle.
3806
3808
3807 This function is called as part of repository creation. If the
3809 This function is called as part of repository creation. If the
3808 returned dict contains any items, repository creation will not
3810 returned dict contains any items, repository creation will not
3809 be allowed, as it means there was a request to create a repository
3811 be allowed, as it means there was a request to create a repository
3810 with options not recognized by loaded code.
3812 with options not recognized by loaded code.
3811
3813
3812 Extensions can wrap this function to filter out creation options
3814 Extensions can wrap this function to filter out creation options
3813 they know how to handle.
3815 they know how to handle.
3814 """
3816 """
3815 known = {
3817 known = {
3816 b'backend',
3818 b'backend',
3817 b'lfs',
3819 b'lfs',
3818 b'narrowfiles',
3820 b'narrowfiles',
3819 b'sharedrepo',
3821 b'sharedrepo',
3820 b'sharedrelative',
3822 b'sharedrelative',
3821 b'shareditems',
3823 b'shareditems',
3822 b'shallowfilestore',
3824 b'shallowfilestore',
3823 }
3825 }
3824
3826
3825 return {k: v for k, v in createopts.items() if k not in known}
3827 return {k: v for k, v in createopts.items() if k not in known}
3826
3828
3827
3829
3828 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3830 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3829 """Create a new repository in a vfs.
3831 """Create a new repository in a vfs.
3830
3832
3831 ``path`` path to the new repo's working directory.
3833 ``path`` path to the new repo's working directory.
3832 ``createopts`` options for the new repository.
3834 ``createopts`` options for the new repository.
3833 ``requirement`` predefined set of requirements.
3835 ``requirement`` predefined set of requirements.
3834 (incompatible with ``createopts``)
3836 (incompatible with ``createopts``)
3835
3837
3836 The following keys for ``createopts`` are recognized:
3838 The following keys for ``createopts`` are recognized:
3837
3839
3838 backend
3840 backend
3839 The storage backend to use.
3841 The storage backend to use.
3840 lfs
3842 lfs
3841 Repository will be created with ``lfs`` requirement. The lfs extension
3843 Repository will be created with ``lfs`` requirement. The lfs extension
3842 will automatically be loaded when the repository is accessed.
3844 will automatically be loaded when the repository is accessed.
3843 narrowfiles
3845 narrowfiles
3844 Set up repository to support narrow file storage.
3846 Set up repository to support narrow file storage.
3845 sharedrepo
3847 sharedrepo
3846 Repository object from which storage should be shared.
3848 Repository object from which storage should be shared.
3847 sharedrelative
3849 sharedrelative
3848 Boolean indicating if the path to the shared repo should be
3850 Boolean indicating if the path to the shared repo should be
3849 stored as relative. By default, the pointer to the "parent" repo
3851 stored as relative. By default, the pointer to the "parent" repo
3850 is stored as an absolute path.
3852 is stored as an absolute path.
3851 shareditems
3853 shareditems
3852 Set of items to share to the new repository (in addition to storage).
3854 Set of items to share to the new repository (in addition to storage).
3853 shallowfilestore
3855 shallowfilestore
3854 Indicates that storage for files should be shallow (not all ancestor
3856 Indicates that storage for files should be shallow (not all ancestor
3855 revisions are known).
3857 revisions are known).
3856 """
3858 """
3857
3859
3858 if requirements is not None:
3860 if requirements is not None:
3859 if createopts is not None:
3861 if createopts is not None:
3860 msg = b'cannot specify both createopts and requirements'
3862 msg = b'cannot specify both createopts and requirements'
3861 raise error.ProgrammingError(msg)
3863 raise error.ProgrammingError(msg)
3862 createopts = {}
3864 createopts = {}
3863 else:
3865 else:
3864 createopts = defaultcreateopts(ui, createopts=createopts)
3866 createopts = defaultcreateopts(ui, createopts=createopts)
3865
3867
3866 unknownopts = filterknowncreateopts(ui, createopts)
3868 unknownopts = filterknowncreateopts(ui, createopts)
3867
3869
3868 if not isinstance(unknownopts, dict):
3870 if not isinstance(unknownopts, dict):
3869 raise error.ProgrammingError(
3871 raise error.ProgrammingError(
3870 b'filterknowncreateopts() did not return a dict'
3872 b'filterknowncreateopts() did not return a dict'
3871 )
3873 )
3872
3874
3873 if unknownopts:
3875 if unknownopts:
3874 raise error.Abort(
3876 raise error.Abort(
3875 _(
3877 _(
3876 b'unable to create repository because of unknown '
3878 b'unable to create repository because of unknown '
3877 b'creation option: %s'
3879 b'creation option: %s'
3878 )
3880 )
3879 % b', '.join(sorted(unknownopts)),
3881 % b', '.join(sorted(unknownopts)),
3880 hint=_(b'is a required extension not loaded?'),
3882 hint=_(b'is a required extension not loaded?'),
3881 )
3883 )
3882
3884
3883 requirements = newreporequirements(ui, createopts=createopts)
3885 requirements = newreporequirements(ui, createopts=createopts)
3884 requirements -= checkrequirementscompat(ui, requirements)
3886 requirements -= checkrequirementscompat(ui, requirements)
3885
3887
3886 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3888 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3887
3889
3888 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3890 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3889 if hgvfs.exists():
3891 if hgvfs.exists():
3890 raise error.RepoError(_(b'repository %s already exists') % path)
3892 raise error.RepoError(_(b'repository %s already exists') % path)
3891
3893
3892 if b'sharedrepo' in createopts:
3894 if b'sharedrepo' in createopts:
3893 sharedpath = createopts[b'sharedrepo'].sharedpath
3895 sharedpath = createopts[b'sharedrepo'].sharedpath
3894
3896
3895 if createopts.get(b'sharedrelative'):
3897 if createopts.get(b'sharedrelative'):
3896 try:
3898 try:
3897 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3899 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3898 sharedpath = util.pconvert(sharedpath)
3900 sharedpath = util.pconvert(sharedpath)
3899 except (IOError, ValueError) as e:
3901 except (IOError, ValueError) as e:
3900 # ValueError is raised on Windows if the drive letters differ
3902 # ValueError is raised on Windows if the drive letters differ
3901 # on each path.
3903 # on each path.
3902 raise error.Abort(
3904 raise error.Abort(
3903 _(b'cannot calculate relative path'),
3905 _(b'cannot calculate relative path'),
3904 hint=stringutil.forcebytestr(e),
3906 hint=stringutil.forcebytestr(e),
3905 )
3907 )
3906
3908
3907 if not wdirvfs.exists():
3909 if not wdirvfs.exists():
3908 wdirvfs.makedirs()
3910 wdirvfs.makedirs()
3909
3911
3910 hgvfs.makedir(notindexed=True)
3912 hgvfs.makedir(notindexed=True)
3911 if b'sharedrepo' not in createopts:
3913 if b'sharedrepo' not in createopts:
3912 hgvfs.mkdir(b'cache')
3914 hgvfs.mkdir(b'cache')
3913 hgvfs.mkdir(b'wcache')
3915 hgvfs.mkdir(b'wcache')
3914
3916
3915 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3917 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3916 if has_store and b'sharedrepo' not in createopts:
3918 if has_store and b'sharedrepo' not in createopts:
3917 hgvfs.mkdir(b'store')
3919 hgvfs.mkdir(b'store')
3918
3920
3919 # We create an invalid changelog outside the store so very old
3921 # We create an invalid changelog outside the store so very old
3920 # Mercurial versions (which didn't know about the requirements
3922 # Mercurial versions (which didn't know about the requirements
3921 # file) encounter an error on reading the changelog. This
3923 # file) encounter an error on reading the changelog. This
3922 # effectively locks out old clients and prevents them from
3924 # effectively locks out old clients and prevents them from
3923 # mucking with a repo in an unknown format.
3925 # mucking with a repo in an unknown format.
3924 #
3926 #
3925 # The revlog header has version 65535, which won't be recognized by
3927 # The revlog header has version 65535, which won't be recognized by
3926 # such old clients.
3928 # such old clients.
3927 hgvfs.append(
3929 hgvfs.append(
3928 b'00changelog.i',
3930 b'00changelog.i',
3929 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3931 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3930 b'layout',
3932 b'layout',
3931 )
3933 )
3932
3934
3933 # Filter the requirements into working copy and store ones
3935 # Filter the requirements into working copy and store ones
3934 wcreq, storereq = scmutil.filterrequirements(requirements)
3936 wcreq, storereq = scmutil.filterrequirements(requirements)
3935 # write working copy ones
3937 # write working copy ones
3936 scmutil.writerequires(hgvfs, wcreq)
3938 scmutil.writerequires(hgvfs, wcreq)
3937 # If there are store requirements and the current repository
3939 # If there are store requirements and the current repository
3938 # is not a shared one, write stored requirements
3940 # is not a shared one, write stored requirements
3939 # For new shared repository, we don't need to write the store
3941 # For new shared repository, we don't need to write the store
3940 # requirements as they are already present in store requires
3942 # requirements as they are already present in store requires
3941 if storereq and b'sharedrepo' not in createopts:
3943 if storereq and b'sharedrepo' not in createopts:
3942 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3944 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3943 scmutil.writerequires(storevfs, storereq)
3945 scmutil.writerequires(storevfs, storereq)
3944
3946
3945 # Write out file telling readers where to find the shared store.
3947 # Write out file telling readers where to find the shared store.
3946 if b'sharedrepo' in createopts:
3948 if b'sharedrepo' in createopts:
3947 hgvfs.write(b'sharedpath', sharedpath)
3949 hgvfs.write(b'sharedpath', sharedpath)
3948
3950
3949 if createopts.get(b'shareditems'):
3951 if createopts.get(b'shareditems'):
3950 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3952 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3951 hgvfs.write(b'shared', shared)
3953 hgvfs.write(b'shared', shared)
3952
3954
3953
3955
3954 def poisonrepository(repo):
3956 def poisonrepository(repo):
3955 """Poison a repository instance so it can no longer be used."""
3957 """Poison a repository instance so it can no longer be used."""
3956 # Perform any cleanup on the instance.
3958 # Perform any cleanup on the instance.
3957 repo.close()
3959 repo.close()
3958
3960
3959 # Our strategy is to replace the type of the object with one that
3961 # Our strategy is to replace the type of the object with one that
3960 # has all attribute lookups result in error.
3962 # has all attribute lookups result in error.
3961 #
3963 #
3962 # But we have to allow the close() method because some constructors
3964 # But we have to allow the close() method because some constructors
3963 # of repos call close() on repo references.
3965 # of repos call close() on repo references.
3964 class poisonedrepository:
3966 class poisonedrepository:
3965 def __getattribute__(self, item):
3967 def __getattribute__(self, item):
3966 if item == 'close':
3968 if item == 'close':
3967 return object.__getattribute__(self, item)
3969 return object.__getattribute__(self, item)
3968
3970
3969 raise error.ProgrammingError(
3971 raise error.ProgrammingError(
3970 b'repo instances should not be used after unshare'
3972 b'repo instances should not be used after unshare'
3971 )
3973 )
3972
3974
3973 def close(self):
3975 def close(self):
3974 pass
3976 pass
3975
3977
3976 # We may have a repoview, which intercepts __setattr__. So be sure
3978 # We may have a repoview, which intercepts __setattr__. So be sure
3977 # we operate at the lowest level possible.
3979 # we operate at the lowest level possible.
3978 object.__setattr__(repo, '__class__', poisonedrepository)
3980 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3358 +1,3364 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
10
10
11 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
12 and O(changes) merge between branches.
12 and O(changes) merge between branches.
13 """
13 """
14
14
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullrev,
28 nullrev,
29 sha1nodeconstants,
29 sha1nodeconstants,
30 short,
30 short,
31 wdirrev,
31 wdirrev,
32 )
32 )
33 from .i18n import _
33 from .i18n import _
34 from .pycompat import getattr
34 from .pycompat import getattr
35 from .revlogutils.constants import (
35 from .revlogutils.constants import (
36 ALL_KINDS,
36 ALL_KINDS,
37 CHANGELOGV2,
37 CHANGELOGV2,
38 COMP_MODE_DEFAULT,
38 COMP_MODE_DEFAULT,
39 COMP_MODE_INLINE,
39 COMP_MODE_INLINE,
40 COMP_MODE_PLAIN,
40 COMP_MODE_PLAIN,
41 ENTRY_RANK,
41 ENTRY_RANK,
42 FEATURES_BY_VERSION,
42 FEATURES_BY_VERSION,
43 FLAG_GENERALDELTA,
43 FLAG_GENERALDELTA,
44 FLAG_INLINE_DATA,
44 FLAG_INLINE_DATA,
45 INDEX_HEADER,
45 INDEX_HEADER,
46 KIND_CHANGELOG,
46 KIND_CHANGELOG,
47 KIND_FILELOG,
47 KIND_FILELOG,
48 RANK_UNKNOWN,
48 RANK_UNKNOWN,
49 REVLOGV0,
49 REVLOGV0,
50 REVLOGV1,
50 REVLOGV1,
51 REVLOGV1_FLAGS,
51 REVLOGV1_FLAGS,
52 REVLOGV2,
52 REVLOGV2,
53 REVLOGV2_FLAGS,
53 REVLOGV2_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
55 REVLOG_DEFAULT_FORMAT,
55 REVLOG_DEFAULT_FORMAT,
56 REVLOG_DEFAULT_VERSION,
56 REVLOG_DEFAULT_VERSION,
57 SUPPORTED_FLAGS,
57 SUPPORTED_FLAGS,
58 )
58 )
59 from .revlogutils.flagutil import (
59 from .revlogutils.flagutil import (
60 REVIDX_DEFAULT_FLAGS,
60 REVIDX_DEFAULT_FLAGS,
61 REVIDX_ELLIPSIS,
61 REVIDX_ELLIPSIS,
62 REVIDX_EXTSTORED,
62 REVIDX_EXTSTORED,
63 REVIDX_FLAGS_ORDER,
63 REVIDX_FLAGS_ORDER,
64 REVIDX_HASCOPIESINFO,
64 REVIDX_HASCOPIESINFO,
65 REVIDX_ISCENSORED,
65 REVIDX_ISCENSORED,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
67 )
67 )
68 from .thirdparty import attr
68 from .thirdparty import attr
69 from . import (
69 from . import (
70 ancestor,
70 ancestor,
71 dagop,
71 dagop,
72 error,
72 error,
73 mdiff,
73 mdiff,
74 policy,
74 policy,
75 pycompat,
75 pycompat,
76 revlogutils,
76 revlogutils,
77 templatefilters,
77 templatefilters,
78 util,
78 util,
79 )
79 )
80 from .interfaces import (
80 from .interfaces import (
81 repository,
81 repository,
82 util as interfaceutil,
82 util as interfaceutil,
83 )
83 )
84 from .revlogutils import (
84 from .revlogutils import (
85 deltas as deltautil,
85 deltas as deltautil,
86 docket as docketutil,
86 docket as docketutil,
87 flagutil,
87 flagutil,
88 nodemap as nodemaputil,
88 nodemap as nodemaputil,
89 randomaccessfile,
89 randomaccessfile,
90 revlogv0,
90 revlogv0,
91 rewrite,
91 rewrite,
92 sidedata as sidedatautil,
92 sidedata as sidedatautil,
93 )
93 )
94 from .utils import (
94 from .utils import (
95 storageutil,
95 storageutil,
96 stringutil,
96 stringutil,
97 )
97 )
98
98
99 # blanked usage of all the name to prevent pyflakes constraints
99 # blanked usage of all the name to prevent pyflakes constraints
100 # We need these name available in the module for extensions.
100 # We need these name available in the module for extensions.
101
101
102 REVLOGV0
102 REVLOGV0
103 REVLOGV1
103 REVLOGV1
104 REVLOGV2
104 REVLOGV2
105 CHANGELOGV2
105 CHANGELOGV2
106 FLAG_INLINE_DATA
106 FLAG_INLINE_DATA
107 FLAG_GENERALDELTA
107 FLAG_GENERALDELTA
108 REVLOG_DEFAULT_FLAGS
108 REVLOG_DEFAULT_FLAGS
109 REVLOG_DEFAULT_FORMAT
109 REVLOG_DEFAULT_FORMAT
110 REVLOG_DEFAULT_VERSION
110 REVLOG_DEFAULT_VERSION
111 REVLOGV1_FLAGS
111 REVLOGV1_FLAGS
112 REVLOGV2_FLAGS
112 REVLOGV2_FLAGS
113 REVIDX_ISCENSORED
113 REVIDX_ISCENSORED
114 REVIDX_ELLIPSIS
114 REVIDX_ELLIPSIS
115 REVIDX_HASCOPIESINFO
115 REVIDX_HASCOPIESINFO
116 REVIDX_EXTSTORED
116 REVIDX_EXTSTORED
117 REVIDX_DEFAULT_FLAGS
117 REVIDX_DEFAULT_FLAGS
118 REVIDX_FLAGS_ORDER
118 REVIDX_FLAGS_ORDER
119 REVIDX_RAWTEXT_CHANGING_FLAGS
119 REVIDX_RAWTEXT_CHANGING_FLAGS
120
120
121 parsers = policy.importmod('parsers')
121 parsers = policy.importmod('parsers')
122 rustancestor = policy.importrust('ancestor')
122 rustancestor = policy.importrust('ancestor')
123 rustdagop = policy.importrust('dagop')
123 rustdagop = policy.importrust('dagop')
124 rustrevlog = policy.importrust('revlog')
124 rustrevlog = policy.importrust('revlog')
125
125
126 # Aliased for performance.
126 # Aliased for performance.
127 _zlibdecompress = zlib.decompress
127 _zlibdecompress = zlib.decompress
128
128
129 # max size of revlog with inline data
129 # max size of revlog with inline data
130 _maxinline = 131072
130 _maxinline = 131072
131
131
132 # Flag processors for REVIDX_ELLIPSIS.
132 # Flag processors for REVIDX_ELLIPSIS.
133 def ellipsisreadprocessor(rl, text):
133 def ellipsisreadprocessor(rl, text):
134 return text, False
134 return text, False
135
135
136
136
137 def ellipsiswriteprocessor(rl, text):
137 def ellipsiswriteprocessor(rl, text):
138 return text, False
138 return text, False
139
139
140
140
141 def ellipsisrawprocessor(rl, text):
141 def ellipsisrawprocessor(rl, text):
142 return False
142 return False
143
143
144
144
145 ellipsisprocessor = (
145 ellipsisprocessor = (
146 ellipsisreadprocessor,
146 ellipsisreadprocessor,
147 ellipsiswriteprocessor,
147 ellipsiswriteprocessor,
148 ellipsisrawprocessor,
148 ellipsisrawprocessor,
149 )
149 )
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 # True if a fast implementation for persistent-nodemap is available
162 # True if a fast implementation for persistent-nodemap is available
163 #
163 #
164 # We also consider we have a "fast" implementation in "pure" python because
164 # We also consider we have a "fast" implementation in "pure" python because
165 # people using pure don't really have performance consideration (and a
165 # people using pure don't really have performance consideration (and a
166 # wheelbarrow of other slowness source)
166 # wheelbarrow of other slowness source)
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 parsers, 'BaseIndexObject'
168 parsers, 'BaseIndexObject'
169 )
169 )
170
170
171
171
172 @interfaceutil.implementer(repository.irevisiondelta)
172 @interfaceutil.implementer(repository.irevisiondelta)
173 @attr.s(slots=True)
173 @attr.s(slots=True)
174 class revlogrevisiondelta:
174 class revlogrevisiondelta:
175 node = attr.ib()
175 node = attr.ib()
176 p1node = attr.ib()
176 p1node = attr.ib()
177 p2node = attr.ib()
177 p2node = attr.ib()
178 basenode = attr.ib()
178 basenode = attr.ib()
179 flags = attr.ib()
179 flags = attr.ib()
180 baserevisionsize = attr.ib()
180 baserevisionsize = attr.ib()
181 revision = attr.ib()
181 revision = attr.ib()
182 delta = attr.ib()
182 delta = attr.ib()
183 sidedata = attr.ib()
183 sidedata = attr.ib()
184 protocol_flags = attr.ib()
184 protocol_flags = attr.ib()
185 linknode = attr.ib(default=None)
185 linknode = attr.ib(default=None)
186
186
187
187
188 @interfaceutil.implementer(repository.iverifyproblem)
188 @interfaceutil.implementer(repository.iverifyproblem)
189 @attr.s(frozen=True)
189 @attr.s(frozen=True)
190 class revlogproblem:
190 class revlogproblem:
191 warning = attr.ib(default=None)
191 warning = attr.ib(default=None)
192 error = attr.ib(default=None)
192 error = attr.ib(default=None)
193 node = attr.ib(default=None)
193 node = attr.ib(default=None)
194
194
195
195
196 def parse_index_v1(data, inline):
196 def parse_index_v1(data, inline):
197 # call the C implementation to parse the index data
197 # call the C implementation to parse the index data
198 index, cache = parsers.parse_index2(data, inline)
198 index, cache = parsers.parse_index2(data, inline)
199 return index, cache
199 return index, cache
200
200
201
201
202 def parse_index_v2(data, inline):
202 def parse_index_v2(data, inline):
203 # call the C implementation to parse the index data
203 # call the C implementation to parse the index data
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
205 return index, cache
205 return index, cache
206
206
207
207
208 def parse_index_cl_v2(data, inline):
208 def parse_index_cl_v2(data, inline):
209 # call the C implementation to parse the index data
209 # call the C implementation to parse the index data
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
211 return index, cache
211 return index, cache
212
212
213
213
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
215
215
216 def parse_index_v1_nodemap(data, inline):
216 def parse_index_v1_nodemap(data, inline):
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
218 return index, cache
218 return index, cache
219
219
220
220
221 else:
221 else:
222 parse_index_v1_nodemap = None
222 parse_index_v1_nodemap = None
223
223
224
224
225 def parse_index_v1_mixed(data, inline):
225 def parse_index_v1_mixed(data, inline):
226 index, cache = parse_index_v1(data, inline)
226 index, cache = parse_index_v1(data, inline)
227 return rustrevlog.MixedIndex(index), cache
227 return rustrevlog.MixedIndex(index), cache
228
228
229
229
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
231 # signed integer)
231 # signed integer)
232 _maxentrysize = 0x7FFFFFFF
232 _maxentrysize = 0x7FFFFFFF
233
233
234 FILE_TOO_SHORT_MSG = _(
234 FILE_TOO_SHORT_MSG = _(
235 b'cannot read from revlog %s;'
235 b'cannot read from revlog %s;'
236 b' expected %d bytes from offset %d, data size is %d'
236 b' expected %d bytes from offset %d, data size is %d'
237 )
237 )
238
238
239 hexdigits = b'0123456789abcdefABCDEF'
239 hexdigits = b'0123456789abcdefABCDEF'
240
240
241
241
242 class revlog:
242 class revlog:
243 """
243 """
244 the underlying revision storage object
244 the underlying revision storage object
245
245
246 A revlog consists of two parts, an index and the revision data.
246 A revlog consists of two parts, an index and the revision data.
247
247
248 The index is a file with a fixed record size containing
248 The index is a file with a fixed record size containing
249 information on each revision, including its nodeid (hash), the
249 information on each revision, including its nodeid (hash), the
250 nodeids of its parents, the position and offset of its data within
250 nodeids of its parents, the position and offset of its data within
251 the data file, and the revision it's based on. Finally, each entry
251 the data file, and the revision it's based on. Finally, each entry
252 contains a linkrev entry that can serve as a pointer to external
252 contains a linkrev entry that can serve as a pointer to external
253 data.
253 data.
254
254
255 The revision data itself is a linear collection of data chunks.
255 The revision data itself is a linear collection of data chunks.
256 Each chunk represents a revision and is usually represented as a
256 Each chunk represents a revision and is usually represented as a
257 delta against the previous chunk. To bound lookup time, runs of
257 delta against the previous chunk. To bound lookup time, runs of
258 deltas are limited to about 2 times the length of the original
258 deltas are limited to about 2 times the length of the original
259 version data. This makes retrieval of a version proportional to
259 version data. This makes retrieval of a version proportional to
260 its size, or O(1) relative to the number of revisions.
260 its size, or O(1) relative to the number of revisions.
261
261
262 Both pieces of the revlog are written to in an append-only
262 Both pieces of the revlog are written to in an append-only
263 fashion, which means we never need to rewrite a file to insert or
263 fashion, which means we never need to rewrite a file to insert or
264 remove data, and can use some simple techniques to avoid the need
264 remove data, and can use some simple techniques to avoid the need
265 for locking while reading.
265 for locking while reading.
266
266
267 If checkambig, indexfile is opened with checkambig=True at
267 If checkambig, indexfile is opened with checkambig=True at
268 writing, to avoid file stat ambiguity.
268 writing, to avoid file stat ambiguity.
269
269
270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
271 index will be mmapped rather than read if it is larger than the
271 index will be mmapped rather than read if it is larger than the
272 configured threshold.
272 configured threshold.
273
273
274 If censorable is True, the revlog can have censored revisions.
274 If censorable is True, the revlog can have censored revisions.
275
275
276 If `upperboundcomp` is not None, this is the expected maximal gain from
276 If `upperboundcomp` is not None, this is the expected maximal gain from
277 compression for the data content.
277 compression for the data content.
278
278
279 `concurrencychecker` is an optional function that receives 3 arguments: a
279 `concurrencychecker` is an optional function that receives 3 arguments: a
280 file handle, a filename, and an expected position. It should check whether
280 file handle, a filename, and an expected position. It should check whether
281 the current position in the file handle is valid, and log/warn/fail (by
281 the current position in the file handle is valid, and log/warn/fail (by
282 raising).
282 raising).
283
283
284 See mercurial/revlogutils/contants.py for details about the content of an
284 See mercurial/revlogutils/contants.py for details about the content of an
285 index entry.
285 index entry.
286 """
286 """
287
287
288 _flagserrorclass = error.RevlogError
288 _flagserrorclass = error.RevlogError
289
289
290 def __init__(
290 def __init__(
291 self,
291 self,
292 opener,
292 opener,
293 target,
293 target,
294 radix,
294 radix,
295 postfix=None, # only exist for `tmpcensored` now
295 postfix=None, # only exist for `tmpcensored` now
296 checkambig=False,
296 checkambig=False,
297 mmaplargeindex=False,
297 mmaplargeindex=False,
298 censorable=False,
298 censorable=False,
299 upperboundcomp=None,
299 upperboundcomp=None,
300 persistentnodemap=False,
300 persistentnodemap=False,
301 concurrencychecker=None,
301 concurrencychecker=None,
302 trypending=False,
302 trypending=False,
303 canonical_parent_order=True,
303 canonical_parent_order=True,
304 ):
304 ):
305 """
305 """
306 create a revlog object
306 create a revlog object
307
307
308 opener is a function that abstracts the file opening operation
308 opener is a function that abstracts the file opening operation
309 and can be used to implement COW semantics or the like.
309 and can be used to implement COW semantics or the like.
310
310
311 `target`: a (KIND, ID) tuple that identify the content stored in
311 `target`: a (KIND, ID) tuple that identify the content stored in
312 this revlog. It help the rest of the code to understand what the revlog
312 this revlog. It help the rest of the code to understand what the revlog
313 is about without having to resort to heuristic and index filename
313 is about without having to resort to heuristic and index filename
314 analysis. Note: that this must be reliably be set by normal code, but
314 analysis. Note: that this must be reliably be set by normal code, but
315 that test, debug, or performance measurement code might not set this to
315 that test, debug, or performance measurement code might not set this to
316 accurate value.
316 accurate value.
317 """
317 """
318 self.upperboundcomp = upperboundcomp
318 self.upperboundcomp = upperboundcomp
319
319
320 self.radix = radix
320 self.radix = radix
321
321
322 self._docket_file = None
322 self._docket_file = None
323 self._indexfile = None
323 self._indexfile = None
324 self._datafile = None
324 self._datafile = None
325 self._sidedatafile = None
325 self._sidedatafile = None
326 self._nodemap_file = None
326 self._nodemap_file = None
327 self.postfix = postfix
327 self.postfix = postfix
328 self._trypending = trypending
328 self._trypending = trypending
329 self.opener = opener
329 self.opener = opener
330 if persistentnodemap:
330 if persistentnodemap:
331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
332
332
333 assert target[0] in ALL_KINDS
333 assert target[0] in ALL_KINDS
334 assert len(target) == 2
334 assert len(target) == 2
335 self.target = target
335 self.target = target
336 # When True, indexfile is opened with checkambig=True at writing, to
336 # When True, indexfile is opened with checkambig=True at writing, to
337 # avoid file stat ambiguity.
337 # avoid file stat ambiguity.
338 self._checkambig = checkambig
338 self._checkambig = checkambig
339 self._mmaplargeindex = mmaplargeindex
339 self._mmaplargeindex = mmaplargeindex
340 self._censorable = censorable
340 self._censorable = censorable
341 # 3-tuple of (node, rev, text) for a raw revision.
341 # 3-tuple of (node, rev, text) for a raw revision.
342 self._revisioncache = None
342 self._revisioncache = None
343 # Maps rev to chain base rev.
343 # Maps rev to chain base rev.
344 self._chainbasecache = util.lrucachedict(100)
344 self._chainbasecache = util.lrucachedict(100)
345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
346 self._chunkcache = (0, b'')
346 self._chunkcache = (0, b'')
347 # How much data to read and cache into the raw revlog data cache.
347 # How much data to read and cache into the raw revlog data cache.
348 self._chunkcachesize = 65536
348 self._chunkcachesize = 65536
349 self._maxchainlen = None
349 self._maxchainlen = None
350 self._deltabothparents = True
350 self._deltabothparents = True
351 self._candidate_group_chunk_size = 0
351 self._candidate_group_chunk_size = 0
352 self._debug_delta = False
352 self._debug_delta = False
353 self.index = None
353 self.index = None
354 self._docket = None
354 self._docket = None
355 self._nodemap_docket = None
355 self._nodemap_docket = None
356 # Mapping of partial identifiers to full nodes.
356 # Mapping of partial identifiers to full nodes.
357 self._pcache = {}
357 self._pcache = {}
358 # Mapping of revision integer to full node.
358 # Mapping of revision integer to full node.
359 self._compengine = b'zlib'
359 self._compengine = b'zlib'
360 self._compengineopts = {}
360 self._compengineopts = {}
361 self._maxdeltachainspan = -1
361 self._maxdeltachainspan = -1
362 self._withsparseread = False
362 self._withsparseread = False
363 self._sparserevlog = False
363 self._sparserevlog = False
364 self.hassidedata = False
364 self.hassidedata = False
365 self._srdensitythreshold = 0.50
365 self._srdensitythreshold = 0.50
366 self._srmingapsize = 262144
366 self._srmingapsize = 262144
367
367
368 # other optionnals features
369
370 # might remove rank configuration once the computation has no impact
371 self._compute_rank = False
372
368 # Make copy of flag processors so each revlog instance can support
373 # Make copy of flag processors so each revlog instance can support
369 # custom flags.
374 # custom flags.
370 self._flagprocessors = dict(flagutil.flagprocessors)
375 self._flagprocessors = dict(flagutil.flagprocessors)
371
376
372 # 3-tuple of file handles being used for active writing.
377 # 3-tuple of file handles being used for active writing.
373 self._writinghandles = None
378 self._writinghandles = None
374 # prevent nesting of addgroup
379 # prevent nesting of addgroup
375 self._adding_group = None
380 self._adding_group = None
376
381
377 self._loadindex()
382 self._loadindex()
378
383
379 self._concurrencychecker = concurrencychecker
384 self._concurrencychecker = concurrencychecker
380
385
381 # parent order is supposed to be semantically irrelevant, so we
386 # parent order is supposed to be semantically irrelevant, so we
382 # normally resort parents to ensure that the first parent is non-null,
387 # normally resort parents to ensure that the first parent is non-null,
383 # if there is a non-null parent at all.
388 # if there is a non-null parent at all.
384 # filelog abuses the parent order as flag to mark some instances of
389 # filelog abuses the parent order as flag to mark some instances of
385 # meta-encoded files, so allow it to disable this behavior.
390 # meta-encoded files, so allow it to disable this behavior.
386 self.canonical_parent_order = canonical_parent_order
391 self.canonical_parent_order = canonical_parent_order
387
392
388 def _init_opts(self):
393 def _init_opts(self):
389 """process options (from above/config) to setup associated default revlog mode
394 """process options (from above/config) to setup associated default revlog mode
390
395
391 These values might be affected when actually reading on disk information.
396 These values might be affected when actually reading on disk information.
392
397
393 The relevant values are returned for use in _loadindex().
398 The relevant values are returned for use in _loadindex().
394
399
395 * newversionflags:
400 * newversionflags:
396 version header to use if we need to create a new revlog
401 version header to use if we need to create a new revlog
397
402
398 * mmapindexthreshold:
403 * mmapindexthreshold:
399 minimal index size for start to use mmap
404 minimal index size for start to use mmap
400
405
401 * force_nodemap:
406 * force_nodemap:
402 force the usage of a "development" version of the nodemap code
407 force the usage of a "development" version of the nodemap code
403 """
408 """
404 mmapindexthreshold = None
409 mmapindexthreshold = None
405 opts = self.opener.options
410 opts = self.opener.options
406
411
407 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
412 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
408 new_header = CHANGELOGV2
413 new_header = CHANGELOGV2
414 self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
409 elif b'revlogv2' in opts:
415 elif b'revlogv2' in opts:
410 new_header = REVLOGV2
416 new_header = REVLOGV2
411 elif b'revlogv1' in opts:
417 elif b'revlogv1' in opts:
412 new_header = REVLOGV1 | FLAG_INLINE_DATA
418 new_header = REVLOGV1 | FLAG_INLINE_DATA
413 if b'generaldelta' in opts:
419 if b'generaldelta' in opts:
414 new_header |= FLAG_GENERALDELTA
420 new_header |= FLAG_GENERALDELTA
415 elif b'revlogv0' in self.opener.options:
421 elif b'revlogv0' in self.opener.options:
416 new_header = REVLOGV0
422 new_header = REVLOGV0
417 else:
423 else:
418 new_header = REVLOG_DEFAULT_VERSION
424 new_header = REVLOG_DEFAULT_VERSION
419
425
420 if b'chunkcachesize' in opts:
426 if b'chunkcachesize' in opts:
421 self._chunkcachesize = opts[b'chunkcachesize']
427 self._chunkcachesize = opts[b'chunkcachesize']
422 if b'maxchainlen' in opts:
428 if b'maxchainlen' in opts:
423 self._maxchainlen = opts[b'maxchainlen']
429 self._maxchainlen = opts[b'maxchainlen']
424 if b'deltabothparents' in opts:
430 if b'deltabothparents' in opts:
425 self._deltabothparents = opts[b'deltabothparents']
431 self._deltabothparents = opts[b'deltabothparents']
426 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
432 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
427 if dps_cgds:
433 if dps_cgds:
428 self._candidate_group_chunk_size = dps_cgds
434 self._candidate_group_chunk_size = dps_cgds
429 self._lazydelta = bool(opts.get(b'lazydelta', True))
435 self._lazydelta = bool(opts.get(b'lazydelta', True))
430 self._lazydeltabase = False
436 self._lazydeltabase = False
431 if self._lazydelta:
437 if self._lazydelta:
432 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
438 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
433 if b'debug-delta' in opts:
439 if b'debug-delta' in opts:
434 self._debug_delta = opts[b'debug-delta']
440 self._debug_delta = opts[b'debug-delta']
435 if b'compengine' in opts:
441 if b'compengine' in opts:
436 self._compengine = opts[b'compengine']
442 self._compengine = opts[b'compengine']
437 if b'zlib.level' in opts:
443 if b'zlib.level' in opts:
438 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
444 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
439 if b'zstd.level' in opts:
445 if b'zstd.level' in opts:
440 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
446 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
441 if b'maxdeltachainspan' in opts:
447 if b'maxdeltachainspan' in opts:
442 self._maxdeltachainspan = opts[b'maxdeltachainspan']
448 self._maxdeltachainspan = opts[b'maxdeltachainspan']
443 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
449 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
444 mmapindexthreshold = opts[b'mmapindexthreshold']
450 mmapindexthreshold = opts[b'mmapindexthreshold']
445 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
451 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
446 withsparseread = bool(opts.get(b'with-sparse-read', False))
452 withsparseread = bool(opts.get(b'with-sparse-read', False))
447 # sparse-revlog forces sparse-read
453 # sparse-revlog forces sparse-read
448 self._withsparseread = self._sparserevlog or withsparseread
454 self._withsparseread = self._sparserevlog or withsparseread
449 if b'sparse-read-density-threshold' in opts:
455 if b'sparse-read-density-threshold' in opts:
450 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
456 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
451 if b'sparse-read-min-gap-size' in opts:
457 if b'sparse-read-min-gap-size' in opts:
452 self._srmingapsize = opts[b'sparse-read-min-gap-size']
458 self._srmingapsize = opts[b'sparse-read-min-gap-size']
453 if opts.get(b'enableellipsis'):
459 if opts.get(b'enableellipsis'):
454 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
460 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
455
461
456 # revlog v0 doesn't have flag processors
462 # revlog v0 doesn't have flag processors
457 for flag, processor in opts.get(b'flagprocessors', {}).items():
463 for flag, processor in opts.get(b'flagprocessors', {}).items():
458 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
464 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
459
465
460 if self._chunkcachesize <= 0:
466 if self._chunkcachesize <= 0:
461 raise error.RevlogError(
467 raise error.RevlogError(
462 _(b'revlog chunk cache size %r is not greater than 0')
468 _(b'revlog chunk cache size %r is not greater than 0')
463 % self._chunkcachesize
469 % self._chunkcachesize
464 )
470 )
465 elif self._chunkcachesize & (self._chunkcachesize - 1):
471 elif self._chunkcachesize & (self._chunkcachesize - 1):
466 raise error.RevlogError(
472 raise error.RevlogError(
467 _(b'revlog chunk cache size %r is not a power of 2')
473 _(b'revlog chunk cache size %r is not a power of 2')
468 % self._chunkcachesize
474 % self._chunkcachesize
469 )
475 )
470 force_nodemap = opts.get(b'devel-force-nodemap', False)
476 force_nodemap = opts.get(b'devel-force-nodemap', False)
471 return new_header, mmapindexthreshold, force_nodemap
477 return new_header, mmapindexthreshold, force_nodemap
472
478
473 def _get_data(self, filepath, mmap_threshold, size=None):
479 def _get_data(self, filepath, mmap_threshold, size=None):
474 """return a file content with or without mmap
480 """return a file content with or without mmap
475
481
476 If the file is missing return the empty string"""
482 If the file is missing return the empty string"""
477 try:
483 try:
478 with self.opener(filepath) as fp:
484 with self.opener(filepath) as fp:
479 if mmap_threshold is not None:
485 if mmap_threshold is not None:
480 file_size = self.opener.fstat(fp).st_size
486 file_size = self.opener.fstat(fp).st_size
481 if file_size >= mmap_threshold:
487 if file_size >= mmap_threshold:
482 if size is not None:
488 if size is not None:
483 # avoid potentiel mmap crash
489 # avoid potentiel mmap crash
484 size = min(file_size, size)
490 size = min(file_size, size)
485 # TODO: should .close() to release resources without
491 # TODO: should .close() to release resources without
486 # relying on Python GC
492 # relying on Python GC
487 if size is None:
493 if size is None:
488 return util.buffer(util.mmapread(fp))
494 return util.buffer(util.mmapread(fp))
489 else:
495 else:
490 return util.buffer(util.mmapread(fp, size))
496 return util.buffer(util.mmapread(fp, size))
491 if size is None:
497 if size is None:
492 return fp.read()
498 return fp.read()
493 else:
499 else:
494 return fp.read(size)
500 return fp.read(size)
495 except FileNotFoundError:
501 except FileNotFoundError:
496 return b''
502 return b''
497
503
498 def _loadindex(self, docket=None):
504 def _loadindex(self, docket=None):
499
505
500 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
506 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
501
507
502 if self.postfix is not None:
508 if self.postfix is not None:
503 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
509 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
504 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
510 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
505 entry_point = b'%s.i.a' % self.radix
511 entry_point = b'%s.i.a' % self.radix
506 else:
512 else:
507 entry_point = b'%s.i' % self.radix
513 entry_point = b'%s.i' % self.radix
508
514
509 if docket is not None:
515 if docket is not None:
510 self._docket = docket
516 self._docket = docket
511 self._docket_file = entry_point
517 self._docket_file = entry_point
512 else:
518 else:
513 self._initempty = True
519 self._initempty = True
514 entry_data = self._get_data(entry_point, mmapindexthreshold)
520 entry_data = self._get_data(entry_point, mmapindexthreshold)
515 if len(entry_data) > 0:
521 if len(entry_data) > 0:
516 header = INDEX_HEADER.unpack(entry_data[:4])[0]
522 header = INDEX_HEADER.unpack(entry_data[:4])[0]
517 self._initempty = False
523 self._initempty = False
518 else:
524 else:
519 header = new_header
525 header = new_header
520
526
521 self._format_flags = header & ~0xFFFF
527 self._format_flags = header & ~0xFFFF
522 self._format_version = header & 0xFFFF
528 self._format_version = header & 0xFFFF
523
529
524 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
530 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
525 if supported_flags is None:
531 if supported_flags is None:
526 msg = _(b'unknown version (%d) in revlog %s')
532 msg = _(b'unknown version (%d) in revlog %s')
527 msg %= (self._format_version, self.display_id)
533 msg %= (self._format_version, self.display_id)
528 raise error.RevlogError(msg)
534 raise error.RevlogError(msg)
529 elif self._format_flags & ~supported_flags:
535 elif self._format_flags & ~supported_flags:
530 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
536 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
531 display_flag = self._format_flags >> 16
537 display_flag = self._format_flags >> 16
532 msg %= (display_flag, self._format_version, self.display_id)
538 msg %= (display_flag, self._format_version, self.display_id)
533 raise error.RevlogError(msg)
539 raise error.RevlogError(msg)
534
540
535 features = FEATURES_BY_VERSION[self._format_version]
541 features = FEATURES_BY_VERSION[self._format_version]
536 self._inline = features[b'inline'](self._format_flags)
542 self._inline = features[b'inline'](self._format_flags)
537 self._generaldelta = features[b'generaldelta'](self._format_flags)
543 self._generaldelta = features[b'generaldelta'](self._format_flags)
538 self.hassidedata = features[b'sidedata']
544 self.hassidedata = features[b'sidedata']
539
545
540 if not features[b'docket']:
546 if not features[b'docket']:
541 self._indexfile = entry_point
547 self._indexfile = entry_point
542 index_data = entry_data
548 index_data = entry_data
543 else:
549 else:
544 self._docket_file = entry_point
550 self._docket_file = entry_point
545 if self._initempty:
551 if self._initempty:
546 self._docket = docketutil.default_docket(self, header)
552 self._docket = docketutil.default_docket(self, header)
547 else:
553 else:
548 self._docket = docketutil.parse_docket(
554 self._docket = docketutil.parse_docket(
549 self, entry_data, use_pending=self._trypending
555 self, entry_data, use_pending=self._trypending
550 )
556 )
551
557
552 if self._docket is not None:
558 if self._docket is not None:
553 self._indexfile = self._docket.index_filepath()
559 self._indexfile = self._docket.index_filepath()
554 index_data = b''
560 index_data = b''
555 index_size = self._docket.index_end
561 index_size = self._docket.index_end
556 if index_size > 0:
562 if index_size > 0:
557 index_data = self._get_data(
563 index_data = self._get_data(
558 self._indexfile, mmapindexthreshold, size=index_size
564 self._indexfile, mmapindexthreshold, size=index_size
559 )
565 )
560 if len(index_data) < index_size:
566 if len(index_data) < index_size:
561 msg = _(b'too few index data for %s: got %d, expected %d')
567 msg = _(b'too few index data for %s: got %d, expected %d')
562 msg %= (self.display_id, len(index_data), index_size)
568 msg %= (self.display_id, len(index_data), index_size)
563 raise error.RevlogError(msg)
569 raise error.RevlogError(msg)
564
570
565 self._inline = False
571 self._inline = False
566 # generaldelta implied by version 2 revlogs.
572 # generaldelta implied by version 2 revlogs.
567 self._generaldelta = True
573 self._generaldelta = True
568 # the logic for persistent nodemap will be dealt with within the
574 # the logic for persistent nodemap will be dealt with within the
569 # main docket, so disable it for now.
575 # main docket, so disable it for now.
570 self._nodemap_file = None
576 self._nodemap_file = None
571
577
572 if self._docket is not None:
578 if self._docket is not None:
573 self._datafile = self._docket.data_filepath()
579 self._datafile = self._docket.data_filepath()
574 self._sidedatafile = self._docket.sidedata_filepath()
580 self._sidedatafile = self._docket.sidedata_filepath()
575 elif self.postfix is None:
581 elif self.postfix is None:
576 self._datafile = b'%s.d' % self.radix
582 self._datafile = b'%s.d' % self.radix
577 else:
583 else:
578 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
584 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
579
585
580 self.nodeconstants = sha1nodeconstants
586 self.nodeconstants = sha1nodeconstants
581 self.nullid = self.nodeconstants.nullid
587 self.nullid = self.nodeconstants.nullid
582
588
583 # sparse-revlog can't be on without general-delta (issue6056)
589 # sparse-revlog can't be on without general-delta (issue6056)
584 if not self._generaldelta:
590 if not self._generaldelta:
585 self._sparserevlog = False
591 self._sparserevlog = False
586
592
587 self._storedeltachains = True
593 self._storedeltachains = True
588
594
589 devel_nodemap = (
595 devel_nodemap = (
590 self._nodemap_file
596 self._nodemap_file
591 and force_nodemap
597 and force_nodemap
592 and parse_index_v1_nodemap is not None
598 and parse_index_v1_nodemap is not None
593 )
599 )
594
600
595 use_rust_index = False
601 use_rust_index = False
596 if rustrevlog is not None:
602 if rustrevlog is not None:
597 if self._nodemap_file is not None:
603 if self._nodemap_file is not None:
598 use_rust_index = True
604 use_rust_index = True
599 else:
605 else:
600 use_rust_index = self.opener.options.get(b'rust.index')
606 use_rust_index = self.opener.options.get(b'rust.index')
601
607
602 self._parse_index = parse_index_v1
608 self._parse_index = parse_index_v1
603 if self._format_version == REVLOGV0:
609 if self._format_version == REVLOGV0:
604 self._parse_index = revlogv0.parse_index_v0
610 self._parse_index = revlogv0.parse_index_v0
605 elif self._format_version == REVLOGV2:
611 elif self._format_version == REVLOGV2:
606 self._parse_index = parse_index_v2
612 self._parse_index = parse_index_v2
607 elif self._format_version == CHANGELOGV2:
613 elif self._format_version == CHANGELOGV2:
608 self._parse_index = parse_index_cl_v2
614 self._parse_index = parse_index_cl_v2
609 elif devel_nodemap:
615 elif devel_nodemap:
610 self._parse_index = parse_index_v1_nodemap
616 self._parse_index = parse_index_v1_nodemap
611 elif use_rust_index:
617 elif use_rust_index:
612 self._parse_index = parse_index_v1_mixed
618 self._parse_index = parse_index_v1_mixed
613 try:
619 try:
614 d = self._parse_index(index_data, self._inline)
620 d = self._parse_index(index_data, self._inline)
615 index, chunkcache = d
621 index, chunkcache = d
616 use_nodemap = (
622 use_nodemap = (
617 not self._inline
623 not self._inline
618 and self._nodemap_file is not None
624 and self._nodemap_file is not None
619 and util.safehasattr(index, 'update_nodemap_data')
625 and util.safehasattr(index, 'update_nodemap_data')
620 )
626 )
621 if use_nodemap:
627 if use_nodemap:
622 nodemap_data = nodemaputil.persisted_data(self)
628 nodemap_data = nodemaputil.persisted_data(self)
623 if nodemap_data is not None:
629 if nodemap_data is not None:
624 docket = nodemap_data[0]
630 docket = nodemap_data[0]
625 if (
631 if (
626 len(d[0]) > docket.tip_rev
632 len(d[0]) > docket.tip_rev
627 and d[0][docket.tip_rev][7] == docket.tip_node
633 and d[0][docket.tip_rev][7] == docket.tip_node
628 ):
634 ):
629 # no changelog tampering
635 # no changelog tampering
630 self._nodemap_docket = docket
636 self._nodemap_docket = docket
631 index.update_nodemap_data(*nodemap_data)
637 index.update_nodemap_data(*nodemap_data)
632 except (ValueError, IndexError):
638 except (ValueError, IndexError):
633 raise error.RevlogError(
639 raise error.RevlogError(
634 _(b"index %s is corrupted") % self.display_id
640 _(b"index %s is corrupted") % self.display_id
635 )
641 )
636 self.index = index
642 self.index = index
637 self._segmentfile = randomaccessfile.randomaccessfile(
643 self._segmentfile = randomaccessfile.randomaccessfile(
638 self.opener,
644 self.opener,
639 (self._indexfile if self._inline else self._datafile),
645 (self._indexfile if self._inline else self._datafile),
640 self._chunkcachesize,
646 self._chunkcachesize,
641 chunkcache,
647 chunkcache,
642 )
648 )
643 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
649 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
644 self.opener,
650 self.opener,
645 self._sidedatafile,
651 self._sidedatafile,
646 self._chunkcachesize,
652 self._chunkcachesize,
647 )
653 )
648 # revnum -> (chain-length, sum-delta-length)
654 # revnum -> (chain-length, sum-delta-length)
649 self._chaininfocache = util.lrucachedict(500)
655 self._chaininfocache = util.lrucachedict(500)
650 # revlog header -> revlog compressor
656 # revlog header -> revlog compressor
651 self._decompressors = {}
657 self._decompressors = {}
652
658
653 @util.propertycache
659 @util.propertycache
654 def revlog_kind(self):
660 def revlog_kind(self):
655 return self.target[0]
661 return self.target[0]
656
662
657 @util.propertycache
663 @util.propertycache
658 def display_id(self):
664 def display_id(self):
659 """The public facing "ID" of the revlog that we use in message"""
665 """The public facing "ID" of the revlog that we use in message"""
660 if self.revlog_kind == KIND_FILELOG:
666 if self.revlog_kind == KIND_FILELOG:
661 # Reference the file without the "data/" prefix, so it is familiar
667 # Reference the file without the "data/" prefix, so it is familiar
662 # to the user.
668 # to the user.
663 return self.target[1]
669 return self.target[1]
664 else:
670 else:
665 return self.radix
671 return self.radix
666
672
667 def _get_decompressor(self, t):
673 def _get_decompressor(self, t):
668 try:
674 try:
669 compressor = self._decompressors[t]
675 compressor = self._decompressors[t]
670 except KeyError:
676 except KeyError:
671 try:
677 try:
672 engine = util.compengines.forrevlogheader(t)
678 engine = util.compengines.forrevlogheader(t)
673 compressor = engine.revlogcompressor(self._compengineopts)
679 compressor = engine.revlogcompressor(self._compengineopts)
674 self._decompressors[t] = compressor
680 self._decompressors[t] = compressor
675 except KeyError:
681 except KeyError:
676 raise error.RevlogError(
682 raise error.RevlogError(
677 _(b'unknown compression type %s') % binascii.hexlify(t)
683 _(b'unknown compression type %s') % binascii.hexlify(t)
678 )
684 )
679 return compressor
685 return compressor
680
686
681 @util.propertycache
687 @util.propertycache
682 def _compressor(self):
688 def _compressor(self):
683 engine = util.compengines[self._compengine]
689 engine = util.compengines[self._compengine]
684 return engine.revlogcompressor(self._compengineopts)
690 return engine.revlogcompressor(self._compengineopts)
685
691
686 @util.propertycache
692 @util.propertycache
687 def _decompressor(self):
693 def _decompressor(self):
688 """the default decompressor"""
694 """the default decompressor"""
689 if self._docket is None:
695 if self._docket is None:
690 return None
696 return None
691 t = self._docket.default_compression_header
697 t = self._docket.default_compression_header
692 c = self._get_decompressor(t)
698 c = self._get_decompressor(t)
693 return c.decompress
699 return c.decompress
694
700
695 def _indexfp(self):
701 def _indexfp(self):
696 """file object for the revlog's index file"""
702 """file object for the revlog's index file"""
697 return self.opener(self._indexfile, mode=b"r")
703 return self.opener(self._indexfile, mode=b"r")
698
704
699 def __index_write_fp(self):
705 def __index_write_fp(self):
700 # You should not use this directly and use `_writing` instead
706 # You should not use this directly and use `_writing` instead
701 try:
707 try:
702 f = self.opener(
708 f = self.opener(
703 self._indexfile, mode=b"r+", checkambig=self._checkambig
709 self._indexfile, mode=b"r+", checkambig=self._checkambig
704 )
710 )
705 if self._docket is None:
711 if self._docket is None:
706 f.seek(0, os.SEEK_END)
712 f.seek(0, os.SEEK_END)
707 else:
713 else:
708 f.seek(self._docket.index_end, os.SEEK_SET)
714 f.seek(self._docket.index_end, os.SEEK_SET)
709 return f
715 return f
710 except FileNotFoundError:
716 except FileNotFoundError:
711 return self.opener(
717 return self.opener(
712 self._indexfile, mode=b"w+", checkambig=self._checkambig
718 self._indexfile, mode=b"w+", checkambig=self._checkambig
713 )
719 )
714
720
715 def __index_new_fp(self):
721 def __index_new_fp(self):
716 # You should not use this unless you are upgrading from inline revlog
722 # You should not use this unless you are upgrading from inline revlog
717 return self.opener(
723 return self.opener(
718 self._indexfile,
724 self._indexfile,
719 mode=b"w",
725 mode=b"w",
720 checkambig=self._checkambig,
726 checkambig=self._checkambig,
721 atomictemp=True,
727 atomictemp=True,
722 )
728 )
723
729
724 def _datafp(self, mode=b'r'):
730 def _datafp(self, mode=b'r'):
725 """file object for the revlog's data file"""
731 """file object for the revlog's data file"""
726 return self.opener(self._datafile, mode=mode)
732 return self.opener(self._datafile, mode=mode)
727
733
728 @contextlib.contextmanager
734 @contextlib.contextmanager
729 def _sidedatareadfp(self):
735 def _sidedatareadfp(self):
730 """file object suitable to read sidedata"""
736 """file object suitable to read sidedata"""
731 if self._writinghandles:
737 if self._writinghandles:
732 yield self._writinghandles[2]
738 yield self._writinghandles[2]
733 else:
739 else:
734 with self.opener(self._sidedatafile) as fp:
740 with self.opener(self._sidedatafile) as fp:
735 yield fp
741 yield fp
736
742
737 def tiprev(self):
743 def tiprev(self):
738 return len(self.index) - 1
744 return len(self.index) - 1
739
745
740 def tip(self):
746 def tip(self):
741 return self.node(self.tiprev())
747 return self.node(self.tiprev())
742
748
743 def __contains__(self, rev):
749 def __contains__(self, rev):
744 return 0 <= rev < len(self)
750 return 0 <= rev < len(self)
745
751
746 def __len__(self):
752 def __len__(self):
747 return len(self.index)
753 return len(self.index)
748
754
749 def __iter__(self):
755 def __iter__(self):
750 return iter(range(len(self)))
756 return iter(range(len(self)))
751
757
752 def revs(self, start=0, stop=None):
758 def revs(self, start=0, stop=None):
753 """iterate over all rev in this revlog (from start to stop)"""
759 """iterate over all rev in this revlog (from start to stop)"""
754 return storageutil.iterrevs(len(self), start=start, stop=stop)
760 return storageutil.iterrevs(len(self), start=start, stop=stop)
755
761
756 def hasnode(self, node):
762 def hasnode(self, node):
757 try:
763 try:
758 self.rev(node)
764 self.rev(node)
759 return True
765 return True
760 except KeyError:
766 except KeyError:
761 return False
767 return False
762
768
763 def candelta(self, baserev, rev):
769 def candelta(self, baserev, rev):
764 """whether two revisions (baserev, rev) can be delta-ed or not"""
770 """whether two revisions (baserev, rev) can be delta-ed or not"""
765 # Disable delta if either rev requires a content-changing flag
771 # Disable delta if either rev requires a content-changing flag
766 # processor (ex. LFS). This is because such flag processor can alter
772 # processor (ex. LFS). This is because such flag processor can alter
767 # the rawtext content that the delta will be based on, and two clients
773 # the rawtext content that the delta will be based on, and two clients
768 # could have a same revlog node with different flags (i.e. different
774 # could have a same revlog node with different flags (i.e. different
769 # rawtext contents) and the delta could be incompatible.
775 # rawtext contents) and the delta could be incompatible.
770 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
776 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
771 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
777 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
772 ):
778 ):
773 return False
779 return False
774 return True
780 return True
775
781
776 def update_caches(self, transaction):
782 def update_caches(self, transaction):
777 if self._nodemap_file is not None:
783 if self._nodemap_file is not None:
778 if transaction is None:
784 if transaction is None:
779 nodemaputil.update_persistent_nodemap(self)
785 nodemaputil.update_persistent_nodemap(self)
780 else:
786 else:
781 nodemaputil.setup_persistent_nodemap(transaction, self)
787 nodemaputil.setup_persistent_nodemap(transaction, self)
782
788
783 def clearcaches(self):
789 def clearcaches(self):
784 self._revisioncache = None
790 self._revisioncache = None
785 self._chainbasecache.clear()
791 self._chainbasecache.clear()
786 self._segmentfile.clear_cache()
792 self._segmentfile.clear_cache()
787 self._segmentfile_sidedata.clear_cache()
793 self._segmentfile_sidedata.clear_cache()
788 self._pcache = {}
794 self._pcache = {}
789 self._nodemap_docket = None
795 self._nodemap_docket = None
790 self.index.clearcaches()
796 self.index.clearcaches()
791 # The python code is the one responsible for validating the docket, we
797 # The python code is the one responsible for validating the docket, we
792 # end up having to refresh it here.
798 # end up having to refresh it here.
793 use_nodemap = (
799 use_nodemap = (
794 not self._inline
800 not self._inline
795 and self._nodemap_file is not None
801 and self._nodemap_file is not None
796 and util.safehasattr(self.index, 'update_nodemap_data')
802 and util.safehasattr(self.index, 'update_nodemap_data')
797 )
803 )
798 if use_nodemap:
804 if use_nodemap:
799 nodemap_data = nodemaputil.persisted_data(self)
805 nodemap_data = nodemaputil.persisted_data(self)
800 if nodemap_data is not None:
806 if nodemap_data is not None:
801 self._nodemap_docket = nodemap_data[0]
807 self._nodemap_docket = nodemap_data[0]
802 self.index.update_nodemap_data(*nodemap_data)
808 self.index.update_nodemap_data(*nodemap_data)
803
809
804 def rev(self, node):
810 def rev(self, node):
805 try:
811 try:
806 return self.index.rev(node)
812 return self.index.rev(node)
807 except TypeError:
813 except TypeError:
808 raise
814 raise
809 except error.RevlogError:
815 except error.RevlogError:
810 # parsers.c radix tree lookup failed
816 # parsers.c radix tree lookup failed
811 if (
817 if (
812 node == self.nodeconstants.wdirid
818 node == self.nodeconstants.wdirid
813 or node in self.nodeconstants.wdirfilenodeids
819 or node in self.nodeconstants.wdirfilenodeids
814 ):
820 ):
815 raise error.WdirUnsupported
821 raise error.WdirUnsupported
816 raise error.LookupError(node, self.display_id, _(b'no node'))
822 raise error.LookupError(node, self.display_id, _(b'no node'))
817
823
818 # Accessors for index entries.
824 # Accessors for index entries.
819
825
820 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
826 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
821 # are flags.
827 # are flags.
822 def start(self, rev):
828 def start(self, rev):
823 return int(self.index[rev][0] >> 16)
829 return int(self.index[rev][0] >> 16)
824
830
825 def sidedata_cut_off(self, rev):
831 def sidedata_cut_off(self, rev):
826 sd_cut_off = self.index[rev][8]
832 sd_cut_off = self.index[rev][8]
827 if sd_cut_off != 0:
833 if sd_cut_off != 0:
828 return sd_cut_off
834 return sd_cut_off
829 # This is some annoying dance, because entries without sidedata
835 # This is some annoying dance, because entries without sidedata
830 # currently use 0 as their ofsset. (instead of previous-offset +
836 # currently use 0 as their ofsset. (instead of previous-offset +
831 # previous-size)
837 # previous-size)
832 #
838 #
833 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
839 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
834 # In the meantime, we need this.
840 # In the meantime, we need this.
835 while 0 <= rev:
841 while 0 <= rev:
836 e = self.index[rev]
842 e = self.index[rev]
837 if e[9] != 0:
843 if e[9] != 0:
838 return e[8] + e[9]
844 return e[8] + e[9]
839 rev -= 1
845 rev -= 1
840 return 0
846 return 0
841
847
842 def flags(self, rev):
848 def flags(self, rev):
843 return self.index[rev][0] & 0xFFFF
849 return self.index[rev][0] & 0xFFFF
844
850
845 def length(self, rev):
851 def length(self, rev):
846 return self.index[rev][1]
852 return self.index[rev][1]
847
853
848 def sidedata_length(self, rev):
854 def sidedata_length(self, rev):
849 if not self.hassidedata:
855 if not self.hassidedata:
850 return 0
856 return 0
851 return self.index[rev][9]
857 return self.index[rev][9]
852
858
853 def rawsize(self, rev):
859 def rawsize(self, rev):
854 """return the length of the uncompressed text for a given revision"""
860 """return the length of the uncompressed text for a given revision"""
855 l = self.index[rev][2]
861 l = self.index[rev][2]
856 if l >= 0:
862 if l >= 0:
857 return l
863 return l
858
864
859 t = self.rawdata(rev)
865 t = self.rawdata(rev)
860 return len(t)
866 return len(t)
861
867
862 def size(self, rev):
868 def size(self, rev):
863 """length of non-raw text (processed by a "read" flag processor)"""
869 """length of non-raw text (processed by a "read" flag processor)"""
864 # fast path: if no "read" flag processor could change the content,
870 # fast path: if no "read" flag processor could change the content,
865 # size is rawsize. note: ELLIPSIS is known to not change the content.
871 # size is rawsize. note: ELLIPSIS is known to not change the content.
866 flags = self.flags(rev)
872 flags = self.flags(rev)
867 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
873 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
868 return self.rawsize(rev)
874 return self.rawsize(rev)
869
875
870 return len(self.revision(rev))
876 return len(self.revision(rev))
871
877
872 def fast_rank(self, rev):
878 def fast_rank(self, rev):
873 """Return the rank of a revision if already known, or None otherwise.
879 """Return the rank of a revision if already known, or None otherwise.
874
880
875 The rank of a revision is the size of the sub-graph it defines as a
881 The rank of a revision is the size of the sub-graph it defines as a
876 head. Equivalently, the rank of a revision `r` is the size of the set
882 head. Equivalently, the rank of a revision `r` is the size of the set
877 `ancestors(r)`, `r` included.
883 `ancestors(r)`, `r` included.
878
884
879 This method returns the rank retrieved from the revlog in constant
885 This method returns the rank retrieved from the revlog in constant
880 time. It makes no attempt at computing unknown values for versions of
886 time. It makes no attempt at computing unknown values for versions of
881 the revlog which do not persist the rank.
887 the revlog which do not persist the rank.
882 """
888 """
883 rank = self.index[rev][ENTRY_RANK]
889 rank = self.index[rev][ENTRY_RANK]
884 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
890 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
885 return None
891 return None
886 if rev == nullrev:
892 if rev == nullrev:
887 return 0 # convention
893 return 0 # convention
888 return rank
894 return rank
889
895
890 def chainbase(self, rev):
896 def chainbase(self, rev):
891 base = self._chainbasecache.get(rev)
897 base = self._chainbasecache.get(rev)
892 if base is not None:
898 if base is not None:
893 return base
899 return base
894
900
895 index = self.index
901 index = self.index
896 iterrev = rev
902 iterrev = rev
897 base = index[iterrev][3]
903 base = index[iterrev][3]
898 while base != iterrev:
904 while base != iterrev:
899 iterrev = base
905 iterrev = base
900 base = index[iterrev][3]
906 base = index[iterrev][3]
901
907
902 self._chainbasecache[rev] = base
908 self._chainbasecache[rev] = base
903 return base
909 return base
904
910
905 def linkrev(self, rev):
911 def linkrev(self, rev):
906 return self.index[rev][4]
912 return self.index[rev][4]
907
913
908 def parentrevs(self, rev):
914 def parentrevs(self, rev):
909 try:
915 try:
910 entry = self.index[rev]
916 entry = self.index[rev]
911 except IndexError:
917 except IndexError:
912 if rev == wdirrev:
918 if rev == wdirrev:
913 raise error.WdirUnsupported
919 raise error.WdirUnsupported
914 raise
920 raise
915
921
916 if self.canonical_parent_order and entry[5] == nullrev:
922 if self.canonical_parent_order and entry[5] == nullrev:
917 return entry[6], entry[5]
923 return entry[6], entry[5]
918 else:
924 else:
919 return entry[5], entry[6]
925 return entry[5], entry[6]
920
926
921 # fast parentrevs(rev) where rev isn't filtered
927 # fast parentrevs(rev) where rev isn't filtered
922 _uncheckedparentrevs = parentrevs
928 _uncheckedparentrevs = parentrevs
923
929
924 def node(self, rev):
930 def node(self, rev):
925 try:
931 try:
926 return self.index[rev][7]
932 return self.index[rev][7]
927 except IndexError:
933 except IndexError:
928 if rev == wdirrev:
934 if rev == wdirrev:
929 raise error.WdirUnsupported
935 raise error.WdirUnsupported
930 raise
936 raise
931
937
932 # Derived from index values.
938 # Derived from index values.
933
939
934 def end(self, rev):
940 def end(self, rev):
935 return self.start(rev) + self.length(rev)
941 return self.start(rev) + self.length(rev)
936
942
937 def parents(self, node):
943 def parents(self, node):
938 i = self.index
944 i = self.index
939 d = i[self.rev(node)]
945 d = i[self.rev(node)]
940 # inline node() to avoid function call overhead
946 # inline node() to avoid function call overhead
941 if self.canonical_parent_order and d[5] == self.nullid:
947 if self.canonical_parent_order and d[5] == self.nullid:
942 return i[d[6]][7], i[d[5]][7]
948 return i[d[6]][7], i[d[5]][7]
943 else:
949 else:
944 return i[d[5]][7], i[d[6]][7]
950 return i[d[5]][7], i[d[6]][7]
945
951
946 def chainlen(self, rev):
952 def chainlen(self, rev):
947 return self._chaininfo(rev)[0]
953 return self._chaininfo(rev)[0]
948
954
949 def _chaininfo(self, rev):
955 def _chaininfo(self, rev):
950 chaininfocache = self._chaininfocache
956 chaininfocache = self._chaininfocache
951 if rev in chaininfocache:
957 if rev in chaininfocache:
952 return chaininfocache[rev]
958 return chaininfocache[rev]
953 index = self.index
959 index = self.index
954 generaldelta = self._generaldelta
960 generaldelta = self._generaldelta
955 iterrev = rev
961 iterrev = rev
956 e = index[iterrev]
962 e = index[iterrev]
957 clen = 0
963 clen = 0
958 compresseddeltalen = 0
964 compresseddeltalen = 0
959 while iterrev != e[3]:
965 while iterrev != e[3]:
960 clen += 1
966 clen += 1
961 compresseddeltalen += e[1]
967 compresseddeltalen += e[1]
962 if generaldelta:
968 if generaldelta:
963 iterrev = e[3]
969 iterrev = e[3]
964 else:
970 else:
965 iterrev -= 1
971 iterrev -= 1
966 if iterrev in chaininfocache:
972 if iterrev in chaininfocache:
967 t = chaininfocache[iterrev]
973 t = chaininfocache[iterrev]
968 clen += t[0]
974 clen += t[0]
969 compresseddeltalen += t[1]
975 compresseddeltalen += t[1]
970 break
976 break
971 e = index[iterrev]
977 e = index[iterrev]
972 else:
978 else:
973 # Add text length of base since decompressing that also takes
979 # Add text length of base since decompressing that also takes
974 # work. For cache hits the length is already included.
980 # work. For cache hits the length is already included.
975 compresseddeltalen += e[1]
981 compresseddeltalen += e[1]
976 r = (clen, compresseddeltalen)
982 r = (clen, compresseddeltalen)
977 chaininfocache[rev] = r
983 chaininfocache[rev] = r
978 return r
984 return r
979
985
980 def _deltachain(self, rev, stoprev=None):
986 def _deltachain(self, rev, stoprev=None):
981 """Obtain the delta chain for a revision.
987 """Obtain the delta chain for a revision.
982
988
983 ``stoprev`` specifies a revision to stop at. If not specified, we
989 ``stoprev`` specifies a revision to stop at. If not specified, we
984 stop at the base of the chain.
990 stop at the base of the chain.
985
991
986 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
992 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
987 revs in ascending order and ``stopped`` is a bool indicating whether
993 revs in ascending order and ``stopped`` is a bool indicating whether
988 ``stoprev`` was hit.
994 ``stoprev`` was hit.
989 """
995 """
990 # Try C implementation.
996 # Try C implementation.
991 try:
997 try:
992 return self.index.deltachain(rev, stoprev, self._generaldelta)
998 return self.index.deltachain(rev, stoprev, self._generaldelta)
993 except AttributeError:
999 except AttributeError:
994 pass
1000 pass
995
1001
996 chain = []
1002 chain = []
997
1003
998 # Alias to prevent attribute lookup in tight loop.
1004 # Alias to prevent attribute lookup in tight loop.
999 index = self.index
1005 index = self.index
1000 generaldelta = self._generaldelta
1006 generaldelta = self._generaldelta
1001
1007
1002 iterrev = rev
1008 iterrev = rev
1003 e = index[iterrev]
1009 e = index[iterrev]
1004 while iterrev != e[3] and iterrev != stoprev:
1010 while iterrev != e[3] and iterrev != stoprev:
1005 chain.append(iterrev)
1011 chain.append(iterrev)
1006 if generaldelta:
1012 if generaldelta:
1007 iterrev = e[3]
1013 iterrev = e[3]
1008 else:
1014 else:
1009 iterrev -= 1
1015 iterrev -= 1
1010 e = index[iterrev]
1016 e = index[iterrev]
1011
1017
1012 if iterrev == stoprev:
1018 if iterrev == stoprev:
1013 stopped = True
1019 stopped = True
1014 else:
1020 else:
1015 chain.append(iterrev)
1021 chain.append(iterrev)
1016 stopped = False
1022 stopped = False
1017
1023
1018 chain.reverse()
1024 chain.reverse()
1019 return chain, stopped
1025 return chain, stopped
1020
1026
1021 def ancestors(self, revs, stoprev=0, inclusive=False):
1027 def ancestors(self, revs, stoprev=0, inclusive=False):
1022 """Generate the ancestors of 'revs' in reverse revision order.
1028 """Generate the ancestors of 'revs' in reverse revision order.
1023 Does not generate revs lower than stoprev.
1029 Does not generate revs lower than stoprev.
1024
1030
1025 See the documentation for ancestor.lazyancestors for more details."""
1031 See the documentation for ancestor.lazyancestors for more details."""
1026
1032
1027 # first, make sure start revisions aren't filtered
1033 # first, make sure start revisions aren't filtered
1028 revs = list(revs)
1034 revs = list(revs)
1029 checkrev = self.node
1035 checkrev = self.node
1030 for r in revs:
1036 for r in revs:
1031 checkrev(r)
1037 checkrev(r)
1032 # and we're sure ancestors aren't filtered as well
1038 # and we're sure ancestors aren't filtered as well
1033
1039
1034 if rustancestor is not None and self.index.rust_ext_compat:
1040 if rustancestor is not None and self.index.rust_ext_compat:
1035 lazyancestors = rustancestor.LazyAncestors
1041 lazyancestors = rustancestor.LazyAncestors
1036 arg = self.index
1042 arg = self.index
1037 else:
1043 else:
1038 lazyancestors = ancestor.lazyancestors
1044 lazyancestors = ancestor.lazyancestors
1039 arg = self._uncheckedparentrevs
1045 arg = self._uncheckedparentrevs
1040 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1046 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1041
1047
1042 def descendants(self, revs):
1048 def descendants(self, revs):
1043 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1049 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1044
1050
1045 def findcommonmissing(self, common=None, heads=None):
1051 def findcommonmissing(self, common=None, heads=None):
1046 """Return a tuple of the ancestors of common and the ancestors of heads
1052 """Return a tuple of the ancestors of common and the ancestors of heads
1047 that are not ancestors of common. In revset terminology, we return the
1053 that are not ancestors of common. In revset terminology, we return the
1048 tuple:
1054 tuple:
1049
1055
1050 ::common, (::heads) - (::common)
1056 ::common, (::heads) - (::common)
1051
1057
1052 The list is sorted by revision number, meaning it is
1058 The list is sorted by revision number, meaning it is
1053 topologically sorted.
1059 topologically sorted.
1054
1060
1055 'heads' and 'common' are both lists of node IDs. If heads is
1061 'heads' and 'common' are both lists of node IDs. If heads is
1056 not supplied, uses all of the revlog's heads. If common is not
1062 not supplied, uses all of the revlog's heads. If common is not
1057 supplied, uses nullid."""
1063 supplied, uses nullid."""
1058 if common is None:
1064 if common is None:
1059 common = [self.nullid]
1065 common = [self.nullid]
1060 if heads is None:
1066 if heads is None:
1061 heads = self.heads()
1067 heads = self.heads()
1062
1068
1063 common = [self.rev(n) for n in common]
1069 common = [self.rev(n) for n in common]
1064 heads = [self.rev(n) for n in heads]
1070 heads = [self.rev(n) for n in heads]
1065
1071
1066 # we want the ancestors, but inclusive
1072 # we want the ancestors, but inclusive
1067 class lazyset:
1073 class lazyset:
1068 def __init__(self, lazyvalues):
1074 def __init__(self, lazyvalues):
1069 self.addedvalues = set()
1075 self.addedvalues = set()
1070 self.lazyvalues = lazyvalues
1076 self.lazyvalues = lazyvalues
1071
1077
1072 def __contains__(self, value):
1078 def __contains__(self, value):
1073 return value in self.addedvalues or value in self.lazyvalues
1079 return value in self.addedvalues or value in self.lazyvalues
1074
1080
1075 def __iter__(self):
1081 def __iter__(self):
1076 added = self.addedvalues
1082 added = self.addedvalues
1077 for r in added:
1083 for r in added:
1078 yield r
1084 yield r
1079 for r in self.lazyvalues:
1085 for r in self.lazyvalues:
1080 if not r in added:
1086 if not r in added:
1081 yield r
1087 yield r
1082
1088
1083 def add(self, value):
1089 def add(self, value):
1084 self.addedvalues.add(value)
1090 self.addedvalues.add(value)
1085
1091
1086 def update(self, values):
1092 def update(self, values):
1087 self.addedvalues.update(values)
1093 self.addedvalues.update(values)
1088
1094
1089 has = lazyset(self.ancestors(common))
1095 has = lazyset(self.ancestors(common))
1090 has.add(nullrev)
1096 has.add(nullrev)
1091 has.update(common)
1097 has.update(common)
1092
1098
1093 # take all ancestors from heads that aren't in has
1099 # take all ancestors from heads that aren't in has
1094 missing = set()
1100 missing = set()
1095 visit = collections.deque(r for r in heads if r not in has)
1101 visit = collections.deque(r for r in heads if r not in has)
1096 while visit:
1102 while visit:
1097 r = visit.popleft()
1103 r = visit.popleft()
1098 if r in missing:
1104 if r in missing:
1099 continue
1105 continue
1100 else:
1106 else:
1101 missing.add(r)
1107 missing.add(r)
1102 for p in self.parentrevs(r):
1108 for p in self.parentrevs(r):
1103 if p not in has:
1109 if p not in has:
1104 visit.append(p)
1110 visit.append(p)
1105 missing = list(missing)
1111 missing = list(missing)
1106 missing.sort()
1112 missing.sort()
1107 return has, [self.node(miss) for miss in missing]
1113 return has, [self.node(miss) for miss in missing]
1108
1114
1109 def incrementalmissingrevs(self, common=None):
1115 def incrementalmissingrevs(self, common=None):
1110 """Return an object that can be used to incrementally compute the
1116 """Return an object that can be used to incrementally compute the
1111 revision numbers of the ancestors of arbitrary sets that are not
1117 revision numbers of the ancestors of arbitrary sets that are not
1112 ancestors of common. This is an ancestor.incrementalmissingancestors
1118 ancestors of common. This is an ancestor.incrementalmissingancestors
1113 object.
1119 object.
1114
1120
1115 'common' is a list of revision numbers. If common is not supplied, uses
1121 'common' is a list of revision numbers. If common is not supplied, uses
1116 nullrev.
1122 nullrev.
1117 """
1123 """
1118 if common is None:
1124 if common is None:
1119 common = [nullrev]
1125 common = [nullrev]
1120
1126
1121 if rustancestor is not None and self.index.rust_ext_compat:
1127 if rustancestor is not None and self.index.rust_ext_compat:
1122 return rustancestor.MissingAncestors(self.index, common)
1128 return rustancestor.MissingAncestors(self.index, common)
1123 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1129 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1124
1130
1125 def findmissingrevs(self, common=None, heads=None):
1131 def findmissingrevs(self, common=None, heads=None):
1126 """Return the revision numbers of the ancestors of heads that
1132 """Return the revision numbers of the ancestors of heads that
1127 are not ancestors of common.
1133 are not ancestors of common.
1128
1134
1129 More specifically, return a list of revision numbers corresponding to
1135 More specifically, return a list of revision numbers corresponding to
1130 nodes N such that every N satisfies the following constraints:
1136 nodes N such that every N satisfies the following constraints:
1131
1137
1132 1. N is an ancestor of some node in 'heads'
1138 1. N is an ancestor of some node in 'heads'
1133 2. N is not an ancestor of any node in 'common'
1139 2. N is not an ancestor of any node in 'common'
1134
1140
1135 The list is sorted by revision number, meaning it is
1141 The list is sorted by revision number, meaning it is
1136 topologically sorted.
1142 topologically sorted.
1137
1143
1138 'heads' and 'common' are both lists of revision numbers. If heads is
1144 'heads' and 'common' are both lists of revision numbers. If heads is
1139 not supplied, uses all of the revlog's heads. If common is not
1145 not supplied, uses all of the revlog's heads. If common is not
1140 supplied, uses nullid."""
1146 supplied, uses nullid."""
1141 if common is None:
1147 if common is None:
1142 common = [nullrev]
1148 common = [nullrev]
1143 if heads is None:
1149 if heads is None:
1144 heads = self.headrevs()
1150 heads = self.headrevs()
1145
1151
1146 inc = self.incrementalmissingrevs(common=common)
1152 inc = self.incrementalmissingrevs(common=common)
1147 return inc.missingancestors(heads)
1153 return inc.missingancestors(heads)
1148
1154
1149 def findmissing(self, common=None, heads=None):
1155 def findmissing(self, common=None, heads=None):
1150 """Return the ancestors of heads that are not ancestors of common.
1156 """Return the ancestors of heads that are not ancestors of common.
1151
1157
1152 More specifically, return a list of nodes N such that every N
1158 More specifically, return a list of nodes N such that every N
1153 satisfies the following constraints:
1159 satisfies the following constraints:
1154
1160
1155 1. N is an ancestor of some node in 'heads'
1161 1. N is an ancestor of some node in 'heads'
1156 2. N is not an ancestor of any node in 'common'
1162 2. N is not an ancestor of any node in 'common'
1157
1163
1158 The list is sorted by revision number, meaning it is
1164 The list is sorted by revision number, meaning it is
1159 topologically sorted.
1165 topologically sorted.
1160
1166
1161 'heads' and 'common' are both lists of node IDs. If heads is
1167 'heads' and 'common' are both lists of node IDs. If heads is
1162 not supplied, uses all of the revlog's heads. If common is not
1168 not supplied, uses all of the revlog's heads. If common is not
1163 supplied, uses nullid."""
1169 supplied, uses nullid."""
1164 if common is None:
1170 if common is None:
1165 common = [self.nullid]
1171 common = [self.nullid]
1166 if heads is None:
1172 if heads is None:
1167 heads = self.heads()
1173 heads = self.heads()
1168
1174
1169 common = [self.rev(n) for n in common]
1175 common = [self.rev(n) for n in common]
1170 heads = [self.rev(n) for n in heads]
1176 heads = [self.rev(n) for n in heads]
1171
1177
1172 inc = self.incrementalmissingrevs(common=common)
1178 inc = self.incrementalmissingrevs(common=common)
1173 return [self.node(r) for r in inc.missingancestors(heads)]
1179 return [self.node(r) for r in inc.missingancestors(heads)]
1174
1180
1175 def nodesbetween(self, roots=None, heads=None):
1181 def nodesbetween(self, roots=None, heads=None):
1176 """Return a topological path from 'roots' to 'heads'.
1182 """Return a topological path from 'roots' to 'heads'.
1177
1183
1178 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1184 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1179 topologically sorted list of all nodes N that satisfy both of
1185 topologically sorted list of all nodes N that satisfy both of
1180 these constraints:
1186 these constraints:
1181
1187
1182 1. N is a descendant of some node in 'roots'
1188 1. N is a descendant of some node in 'roots'
1183 2. N is an ancestor of some node in 'heads'
1189 2. N is an ancestor of some node in 'heads'
1184
1190
1185 Every node is considered to be both a descendant and an ancestor
1191 Every node is considered to be both a descendant and an ancestor
1186 of itself, so every reachable node in 'roots' and 'heads' will be
1192 of itself, so every reachable node in 'roots' and 'heads' will be
1187 included in 'nodes'.
1193 included in 'nodes'.
1188
1194
1189 'outroots' is the list of reachable nodes in 'roots', i.e., the
1195 'outroots' is the list of reachable nodes in 'roots', i.e., the
1190 subset of 'roots' that is returned in 'nodes'. Likewise,
1196 subset of 'roots' that is returned in 'nodes'. Likewise,
1191 'outheads' is the subset of 'heads' that is also in 'nodes'.
1197 'outheads' is the subset of 'heads' that is also in 'nodes'.
1192
1198
1193 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1199 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1194 unspecified, uses nullid as the only root. If 'heads' is
1200 unspecified, uses nullid as the only root. If 'heads' is
1195 unspecified, uses list of all of the revlog's heads."""
1201 unspecified, uses list of all of the revlog's heads."""
1196 nonodes = ([], [], [])
1202 nonodes = ([], [], [])
1197 if roots is not None:
1203 if roots is not None:
1198 roots = list(roots)
1204 roots = list(roots)
1199 if not roots:
1205 if not roots:
1200 return nonodes
1206 return nonodes
1201 lowestrev = min([self.rev(n) for n in roots])
1207 lowestrev = min([self.rev(n) for n in roots])
1202 else:
1208 else:
1203 roots = [self.nullid] # Everybody's a descendant of nullid
1209 roots = [self.nullid] # Everybody's a descendant of nullid
1204 lowestrev = nullrev
1210 lowestrev = nullrev
1205 if (lowestrev == nullrev) and (heads is None):
1211 if (lowestrev == nullrev) and (heads is None):
1206 # We want _all_ the nodes!
1212 # We want _all_ the nodes!
1207 return (
1213 return (
1208 [self.node(r) for r in self],
1214 [self.node(r) for r in self],
1209 [self.nullid],
1215 [self.nullid],
1210 list(self.heads()),
1216 list(self.heads()),
1211 )
1217 )
1212 if heads is None:
1218 if heads is None:
1213 # All nodes are ancestors, so the latest ancestor is the last
1219 # All nodes are ancestors, so the latest ancestor is the last
1214 # node.
1220 # node.
1215 highestrev = len(self) - 1
1221 highestrev = len(self) - 1
1216 # Set ancestors to None to signal that every node is an ancestor.
1222 # Set ancestors to None to signal that every node is an ancestor.
1217 ancestors = None
1223 ancestors = None
1218 # Set heads to an empty dictionary for later discovery of heads
1224 # Set heads to an empty dictionary for later discovery of heads
1219 heads = {}
1225 heads = {}
1220 else:
1226 else:
1221 heads = list(heads)
1227 heads = list(heads)
1222 if not heads:
1228 if not heads:
1223 return nonodes
1229 return nonodes
1224 ancestors = set()
1230 ancestors = set()
1225 # Turn heads into a dictionary so we can remove 'fake' heads.
1231 # Turn heads into a dictionary so we can remove 'fake' heads.
1226 # Also, later we will be using it to filter out the heads we can't
1232 # Also, later we will be using it to filter out the heads we can't
1227 # find from roots.
1233 # find from roots.
1228 heads = dict.fromkeys(heads, False)
1234 heads = dict.fromkeys(heads, False)
1229 # Start at the top and keep marking parents until we're done.
1235 # Start at the top and keep marking parents until we're done.
1230 nodestotag = set(heads)
1236 nodestotag = set(heads)
1231 # Remember where the top was so we can use it as a limit later.
1237 # Remember where the top was so we can use it as a limit later.
1232 highestrev = max([self.rev(n) for n in nodestotag])
1238 highestrev = max([self.rev(n) for n in nodestotag])
1233 while nodestotag:
1239 while nodestotag:
1234 # grab a node to tag
1240 # grab a node to tag
1235 n = nodestotag.pop()
1241 n = nodestotag.pop()
1236 # Never tag nullid
1242 # Never tag nullid
1237 if n == self.nullid:
1243 if n == self.nullid:
1238 continue
1244 continue
1239 # A node's revision number represents its place in a
1245 # A node's revision number represents its place in a
1240 # topologically sorted list of nodes.
1246 # topologically sorted list of nodes.
1241 r = self.rev(n)
1247 r = self.rev(n)
1242 if r >= lowestrev:
1248 if r >= lowestrev:
1243 if n not in ancestors:
1249 if n not in ancestors:
1244 # If we are possibly a descendant of one of the roots
1250 # If we are possibly a descendant of one of the roots
1245 # and we haven't already been marked as an ancestor
1251 # and we haven't already been marked as an ancestor
1246 ancestors.add(n) # Mark as ancestor
1252 ancestors.add(n) # Mark as ancestor
1247 # Add non-nullid parents to list of nodes to tag.
1253 # Add non-nullid parents to list of nodes to tag.
1248 nodestotag.update(
1254 nodestotag.update(
1249 [p for p in self.parents(n) if p != self.nullid]
1255 [p for p in self.parents(n) if p != self.nullid]
1250 )
1256 )
1251 elif n in heads: # We've seen it before, is it a fake head?
1257 elif n in heads: # We've seen it before, is it a fake head?
1252 # So it is, real heads should not be the ancestors of
1258 # So it is, real heads should not be the ancestors of
1253 # any other heads.
1259 # any other heads.
1254 heads.pop(n)
1260 heads.pop(n)
1255 if not ancestors:
1261 if not ancestors:
1256 return nonodes
1262 return nonodes
1257 # Now that we have our set of ancestors, we want to remove any
1263 # Now that we have our set of ancestors, we want to remove any
1258 # roots that are not ancestors.
1264 # roots that are not ancestors.
1259
1265
1260 # If one of the roots was nullid, everything is included anyway.
1266 # If one of the roots was nullid, everything is included anyway.
1261 if lowestrev > nullrev:
1267 if lowestrev > nullrev:
1262 # But, since we weren't, let's recompute the lowest rev to not
1268 # But, since we weren't, let's recompute the lowest rev to not
1263 # include roots that aren't ancestors.
1269 # include roots that aren't ancestors.
1264
1270
1265 # Filter out roots that aren't ancestors of heads
1271 # Filter out roots that aren't ancestors of heads
1266 roots = [root for root in roots if root in ancestors]
1272 roots = [root for root in roots if root in ancestors]
1267 # Recompute the lowest revision
1273 # Recompute the lowest revision
1268 if roots:
1274 if roots:
1269 lowestrev = min([self.rev(root) for root in roots])
1275 lowestrev = min([self.rev(root) for root in roots])
1270 else:
1276 else:
1271 # No more roots? Return empty list
1277 # No more roots? Return empty list
1272 return nonodes
1278 return nonodes
1273 else:
1279 else:
1274 # We are descending from nullid, and don't need to care about
1280 # We are descending from nullid, and don't need to care about
1275 # any other roots.
1281 # any other roots.
1276 lowestrev = nullrev
1282 lowestrev = nullrev
1277 roots = [self.nullid]
1283 roots = [self.nullid]
1278 # Transform our roots list into a set.
1284 # Transform our roots list into a set.
1279 descendants = set(roots)
1285 descendants = set(roots)
1280 # Also, keep the original roots so we can filter out roots that aren't
1286 # Also, keep the original roots so we can filter out roots that aren't
1281 # 'real' roots (i.e. are descended from other roots).
1287 # 'real' roots (i.e. are descended from other roots).
1282 roots = descendants.copy()
1288 roots = descendants.copy()
1283 # Our topologically sorted list of output nodes.
1289 # Our topologically sorted list of output nodes.
1284 orderedout = []
1290 orderedout = []
1285 # Don't start at nullid since we don't want nullid in our output list,
1291 # Don't start at nullid since we don't want nullid in our output list,
1286 # and if nullid shows up in descendants, empty parents will look like
1292 # and if nullid shows up in descendants, empty parents will look like
1287 # they're descendants.
1293 # they're descendants.
1288 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1294 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1289 n = self.node(r)
1295 n = self.node(r)
1290 isdescendant = False
1296 isdescendant = False
1291 if lowestrev == nullrev: # Everybody is a descendant of nullid
1297 if lowestrev == nullrev: # Everybody is a descendant of nullid
1292 isdescendant = True
1298 isdescendant = True
1293 elif n in descendants:
1299 elif n in descendants:
1294 # n is already a descendant
1300 # n is already a descendant
1295 isdescendant = True
1301 isdescendant = True
1296 # This check only needs to be done here because all the roots
1302 # This check only needs to be done here because all the roots
1297 # will start being marked is descendants before the loop.
1303 # will start being marked is descendants before the loop.
1298 if n in roots:
1304 if n in roots:
1299 # If n was a root, check if it's a 'real' root.
1305 # If n was a root, check if it's a 'real' root.
1300 p = tuple(self.parents(n))
1306 p = tuple(self.parents(n))
1301 # If any of its parents are descendants, it's not a root.
1307 # If any of its parents are descendants, it's not a root.
1302 if (p[0] in descendants) or (p[1] in descendants):
1308 if (p[0] in descendants) or (p[1] in descendants):
1303 roots.remove(n)
1309 roots.remove(n)
1304 else:
1310 else:
1305 p = tuple(self.parents(n))
1311 p = tuple(self.parents(n))
1306 # A node is a descendant if either of its parents are
1312 # A node is a descendant if either of its parents are
1307 # descendants. (We seeded the dependents list with the roots
1313 # descendants. (We seeded the dependents list with the roots
1308 # up there, remember?)
1314 # up there, remember?)
1309 if (p[0] in descendants) or (p[1] in descendants):
1315 if (p[0] in descendants) or (p[1] in descendants):
1310 descendants.add(n)
1316 descendants.add(n)
1311 isdescendant = True
1317 isdescendant = True
1312 if isdescendant and ((ancestors is None) or (n in ancestors)):
1318 if isdescendant and ((ancestors is None) or (n in ancestors)):
1313 # Only include nodes that are both descendants and ancestors.
1319 # Only include nodes that are both descendants and ancestors.
1314 orderedout.append(n)
1320 orderedout.append(n)
1315 if (ancestors is not None) and (n in heads):
1321 if (ancestors is not None) and (n in heads):
1316 # We're trying to figure out which heads are reachable
1322 # We're trying to figure out which heads are reachable
1317 # from roots.
1323 # from roots.
1318 # Mark this head as having been reached
1324 # Mark this head as having been reached
1319 heads[n] = True
1325 heads[n] = True
1320 elif ancestors is None:
1326 elif ancestors is None:
1321 # Otherwise, we're trying to discover the heads.
1327 # Otherwise, we're trying to discover the heads.
1322 # Assume this is a head because if it isn't, the next step
1328 # Assume this is a head because if it isn't, the next step
1323 # will eventually remove it.
1329 # will eventually remove it.
1324 heads[n] = True
1330 heads[n] = True
1325 # But, obviously its parents aren't.
1331 # But, obviously its parents aren't.
1326 for p in self.parents(n):
1332 for p in self.parents(n):
1327 heads.pop(p, None)
1333 heads.pop(p, None)
1328 heads = [head for head, flag in heads.items() if flag]
1334 heads = [head for head, flag in heads.items() if flag]
1329 roots = list(roots)
1335 roots = list(roots)
1330 assert orderedout
1336 assert orderedout
1331 assert roots
1337 assert roots
1332 assert heads
1338 assert heads
1333 return (orderedout, roots, heads)
1339 return (orderedout, roots, heads)
1334
1340
1335 def headrevs(self, revs=None):
1341 def headrevs(self, revs=None):
1336 if revs is None:
1342 if revs is None:
1337 try:
1343 try:
1338 return self.index.headrevs()
1344 return self.index.headrevs()
1339 except AttributeError:
1345 except AttributeError:
1340 return self._headrevs()
1346 return self._headrevs()
1341 if rustdagop is not None and self.index.rust_ext_compat:
1347 if rustdagop is not None and self.index.rust_ext_compat:
1342 return rustdagop.headrevs(self.index, revs)
1348 return rustdagop.headrevs(self.index, revs)
1343 return dagop.headrevs(revs, self._uncheckedparentrevs)
1349 return dagop.headrevs(revs, self._uncheckedparentrevs)
1344
1350
1345 def computephases(self, roots):
1351 def computephases(self, roots):
1346 return self.index.computephasesmapsets(roots)
1352 return self.index.computephasesmapsets(roots)
1347
1353
1348 def _headrevs(self):
1354 def _headrevs(self):
1349 count = len(self)
1355 count = len(self)
1350 if not count:
1356 if not count:
1351 return [nullrev]
1357 return [nullrev]
1352 # we won't iter over filtered rev so nobody is a head at start
1358 # we won't iter over filtered rev so nobody is a head at start
1353 ishead = [0] * (count + 1)
1359 ishead = [0] * (count + 1)
1354 index = self.index
1360 index = self.index
1355 for r in self:
1361 for r in self:
1356 ishead[r] = 1 # I may be an head
1362 ishead[r] = 1 # I may be an head
1357 e = index[r]
1363 e = index[r]
1358 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1364 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1359 return [r for r, val in enumerate(ishead) if val]
1365 return [r for r, val in enumerate(ishead) if val]
1360
1366
1361 def heads(self, start=None, stop=None):
1367 def heads(self, start=None, stop=None):
1362 """return the list of all nodes that have no children
1368 """return the list of all nodes that have no children
1363
1369
1364 if start is specified, only heads that are descendants of
1370 if start is specified, only heads that are descendants of
1365 start will be returned
1371 start will be returned
1366 if stop is specified, it will consider all the revs from stop
1372 if stop is specified, it will consider all the revs from stop
1367 as if they had no children
1373 as if they had no children
1368 """
1374 """
1369 if start is None and stop is None:
1375 if start is None and stop is None:
1370 if not len(self):
1376 if not len(self):
1371 return [self.nullid]
1377 return [self.nullid]
1372 return [self.node(r) for r in self.headrevs()]
1378 return [self.node(r) for r in self.headrevs()]
1373
1379
1374 if start is None:
1380 if start is None:
1375 start = nullrev
1381 start = nullrev
1376 else:
1382 else:
1377 start = self.rev(start)
1383 start = self.rev(start)
1378
1384
1379 stoprevs = {self.rev(n) for n in stop or []}
1385 stoprevs = {self.rev(n) for n in stop or []}
1380
1386
1381 revs = dagop.headrevssubset(
1387 revs = dagop.headrevssubset(
1382 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1388 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1383 )
1389 )
1384
1390
1385 return [self.node(rev) for rev in revs]
1391 return [self.node(rev) for rev in revs]
1386
1392
1387 def children(self, node):
1393 def children(self, node):
1388 """find the children of a given node"""
1394 """find the children of a given node"""
1389 c = []
1395 c = []
1390 p = self.rev(node)
1396 p = self.rev(node)
1391 for r in self.revs(start=p + 1):
1397 for r in self.revs(start=p + 1):
1392 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1398 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1393 if prevs:
1399 if prevs:
1394 for pr in prevs:
1400 for pr in prevs:
1395 if pr == p:
1401 if pr == p:
1396 c.append(self.node(r))
1402 c.append(self.node(r))
1397 elif p == nullrev:
1403 elif p == nullrev:
1398 c.append(self.node(r))
1404 c.append(self.node(r))
1399 return c
1405 return c
1400
1406
1401 def commonancestorsheads(self, a, b):
1407 def commonancestorsheads(self, a, b):
1402 """calculate all the heads of the common ancestors of nodes a and b"""
1408 """calculate all the heads of the common ancestors of nodes a and b"""
1403 a, b = self.rev(a), self.rev(b)
1409 a, b = self.rev(a), self.rev(b)
1404 ancs = self._commonancestorsheads(a, b)
1410 ancs = self._commonancestorsheads(a, b)
1405 return pycompat.maplist(self.node, ancs)
1411 return pycompat.maplist(self.node, ancs)
1406
1412
1407 def _commonancestorsheads(self, *revs):
1413 def _commonancestorsheads(self, *revs):
1408 """calculate all the heads of the common ancestors of revs"""
1414 """calculate all the heads of the common ancestors of revs"""
1409 try:
1415 try:
1410 ancs = self.index.commonancestorsheads(*revs)
1416 ancs = self.index.commonancestorsheads(*revs)
1411 except (AttributeError, OverflowError): # C implementation failed
1417 except (AttributeError, OverflowError): # C implementation failed
1412 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1418 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1413 return ancs
1419 return ancs
1414
1420
1415 def isancestor(self, a, b):
1421 def isancestor(self, a, b):
1416 """return True if node a is an ancestor of node b
1422 """return True if node a is an ancestor of node b
1417
1423
1418 A revision is considered an ancestor of itself."""
1424 A revision is considered an ancestor of itself."""
1419 a, b = self.rev(a), self.rev(b)
1425 a, b = self.rev(a), self.rev(b)
1420 return self.isancestorrev(a, b)
1426 return self.isancestorrev(a, b)
1421
1427
1422 def isancestorrev(self, a, b):
1428 def isancestorrev(self, a, b):
1423 """return True if revision a is an ancestor of revision b
1429 """return True if revision a is an ancestor of revision b
1424
1430
1425 A revision is considered an ancestor of itself.
1431 A revision is considered an ancestor of itself.
1426
1432
1427 The implementation of this is trivial but the use of
1433 The implementation of this is trivial but the use of
1428 reachableroots is not."""
1434 reachableroots is not."""
1429 if a == nullrev:
1435 if a == nullrev:
1430 return True
1436 return True
1431 elif a == b:
1437 elif a == b:
1432 return True
1438 return True
1433 elif a > b:
1439 elif a > b:
1434 return False
1440 return False
1435 return bool(self.reachableroots(a, [b], [a], includepath=False))
1441 return bool(self.reachableroots(a, [b], [a], includepath=False))
1436
1442
1437 def reachableroots(self, minroot, heads, roots, includepath=False):
1443 def reachableroots(self, minroot, heads, roots, includepath=False):
1438 """return (heads(::(<roots> and <roots>::<heads>)))
1444 """return (heads(::(<roots> and <roots>::<heads>)))
1439
1445
1440 If includepath is True, return (<roots>::<heads>)."""
1446 If includepath is True, return (<roots>::<heads>)."""
1441 try:
1447 try:
1442 return self.index.reachableroots2(
1448 return self.index.reachableroots2(
1443 minroot, heads, roots, includepath
1449 minroot, heads, roots, includepath
1444 )
1450 )
1445 except AttributeError:
1451 except AttributeError:
1446 return dagop._reachablerootspure(
1452 return dagop._reachablerootspure(
1447 self.parentrevs, minroot, roots, heads, includepath
1453 self.parentrevs, minroot, roots, heads, includepath
1448 )
1454 )
1449
1455
1450 def ancestor(self, a, b):
1456 def ancestor(self, a, b):
1451 """calculate the "best" common ancestor of nodes a and b"""
1457 """calculate the "best" common ancestor of nodes a and b"""
1452
1458
1453 a, b = self.rev(a), self.rev(b)
1459 a, b = self.rev(a), self.rev(b)
1454 try:
1460 try:
1455 ancs = self.index.ancestors(a, b)
1461 ancs = self.index.ancestors(a, b)
1456 except (AttributeError, OverflowError):
1462 except (AttributeError, OverflowError):
1457 ancs = ancestor.ancestors(self.parentrevs, a, b)
1463 ancs = ancestor.ancestors(self.parentrevs, a, b)
1458 if ancs:
1464 if ancs:
1459 # choose a consistent winner when there's a tie
1465 # choose a consistent winner when there's a tie
1460 return min(map(self.node, ancs))
1466 return min(map(self.node, ancs))
1461 return self.nullid
1467 return self.nullid
1462
1468
1463 def _match(self, id):
1469 def _match(self, id):
1464 if isinstance(id, int):
1470 if isinstance(id, int):
1465 # rev
1471 # rev
1466 return self.node(id)
1472 return self.node(id)
1467 if len(id) == self.nodeconstants.nodelen:
1473 if len(id) == self.nodeconstants.nodelen:
1468 # possibly a binary node
1474 # possibly a binary node
1469 # odds of a binary node being all hex in ASCII are 1 in 10**25
1475 # odds of a binary node being all hex in ASCII are 1 in 10**25
1470 try:
1476 try:
1471 node = id
1477 node = id
1472 self.rev(node) # quick search the index
1478 self.rev(node) # quick search the index
1473 return node
1479 return node
1474 except error.LookupError:
1480 except error.LookupError:
1475 pass # may be partial hex id
1481 pass # may be partial hex id
1476 try:
1482 try:
1477 # str(rev)
1483 # str(rev)
1478 rev = int(id)
1484 rev = int(id)
1479 if b"%d" % rev != id:
1485 if b"%d" % rev != id:
1480 raise ValueError
1486 raise ValueError
1481 if rev < 0:
1487 if rev < 0:
1482 rev = len(self) + rev
1488 rev = len(self) + rev
1483 if rev < 0 or rev >= len(self):
1489 if rev < 0 or rev >= len(self):
1484 raise ValueError
1490 raise ValueError
1485 return self.node(rev)
1491 return self.node(rev)
1486 except (ValueError, OverflowError):
1492 except (ValueError, OverflowError):
1487 pass
1493 pass
1488 if len(id) == 2 * self.nodeconstants.nodelen:
1494 if len(id) == 2 * self.nodeconstants.nodelen:
1489 try:
1495 try:
1490 # a full hex nodeid?
1496 # a full hex nodeid?
1491 node = bin(id)
1497 node = bin(id)
1492 self.rev(node)
1498 self.rev(node)
1493 return node
1499 return node
1494 except (binascii.Error, error.LookupError):
1500 except (binascii.Error, error.LookupError):
1495 pass
1501 pass
1496
1502
1497 def _partialmatch(self, id):
1503 def _partialmatch(self, id):
1498 # we don't care wdirfilenodeids as they should be always full hash
1504 # we don't care wdirfilenodeids as they should be always full hash
1499 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1505 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1500 ambiguous = False
1506 ambiguous = False
1501 try:
1507 try:
1502 partial = self.index.partialmatch(id)
1508 partial = self.index.partialmatch(id)
1503 if partial and self.hasnode(partial):
1509 if partial and self.hasnode(partial):
1504 if maybewdir:
1510 if maybewdir:
1505 # single 'ff...' match in radix tree, ambiguous with wdir
1511 # single 'ff...' match in radix tree, ambiguous with wdir
1506 ambiguous = True
1512 ambiguous = True
1507 else:
1513 else:
1508 return partial
1514 return partial
1509 elif maybewdir:
1515 elif maybewdir:
1510 # no 'ff...' match in radix tree, wdir identified
1516 # no 'ff...' match in radix tree, wdir identified
1511 raise error.WdirUnsupported
1517 raise error.WdirUnsupported
1512 else:
1518 else:
1513 return None
1519 return None
1514 except error.RevlogError:
1520 except error.RevlogError:
1515 # parsers.c radix tree lookup gave multiple matches
1521 # parsers.c radix tree lookup gave multiple matches
1516 # fast path: for unfiltered changelog, radix tree is accurate
1522 # fast path: for unfiltered changelog, radix tree is accurate
1517 if not getattr(self, 'filteredrevs', None):
1523 if not getattr(self, 'filteredrevs', None):
1518 ambiguous = True
1524 ambiguous = True
1519 # fall through to slow path that filters hidden revisions
1525 # fall through to slow path that filters hidden revisions
1520 except (AttributeError, ValueError):
1526 except (AttributeError, ValueError):
1521 # we are pure python, or key is not hex
1527 # we are pure python, or key is not hex
1522 pass
1528 pass
1523 if ambiguous:
1529 if ambiguous:
1524 raise error.AmbiguousPrefixLookupError(
1530 raise error.AmbiguousPrefixLookupError(
1525 id, self.display_id, _(b'ambiguous identifier')
1531 id, self.display_id, _(b'ambiguous identifier')
1526 )
1532 )
1527
1533
1528 if id in self._pcache:
1534 if id in self._pcache:
1529 return self._pcache[id]
1535 return self._pcache[id]
1530
1536
1531 if len(id) <= 40:
1537 if len(id) <= 40:
1532 # hex(node)[:...]
1538 # hex(node)[:...]
1533 l = len(id) // 2 * 2 # grab an even number of digits
1539 l = len(id) // 2 * 2 # grab an even number of digits
1534 try:
1540 try:
1535 # we're dropping the last digit, so let's check that it's hex,
1541 # we're dropping the last digit, so let's check that it's hex,
1536 # to avoid the expensive computation below if it's not
1542 # to avoid the expensive computation below if it's not
1537 if len(id) % 2 > 0:
1543 if len(id) % 2 > 0:
1538 if not (id[-1] in hexdigits):
1544 if not (id[-1] in hexdigits):
1539 return None
1545 return None
1540 prefix = bin(id[:l])
1546 prefix = bin(id[:l])
1541 except binascii.Error:
1547 except binascii.Error:
1542 pass
1548 pass
1543 else:
1549 else:
1544 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1550 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1545 nl = [
1551 nl = [
1546 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1552 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1547 ]
1553 ]
1548 if self.nodeconstants.nullhex.startswith(id):
1554 if self.nodeconstants.nullhex.startswith(id):
1549 nl.append(self.nullid)
1555 nl.append(self.nullid)
1550 if len(nl) > 0:
1556 if len(nl) > 0:
1551 if len(nl) == 1 and not maybewdir:
1557 if len(nl) == 1 and not maybewdir:
1552 self._pcache[id] = nl[0]
1558 self._pcache[id] = nl[0]
1553 return nl[0]
1559 return nl[0]
1554 raise error.AmbiguousPrefixLookupError(
1560 raise error.AmbiguousPrefixLookupError(
1555 id, self.display_id, _(b'ambiguous identifier')
1561 id, self.display_id, _(b'ambiguous identifier')
1556 )
1562 )
1557 if maybewdir:
1563 if maybewdir:
1558 raise error.WdirUnsupported
1564 raise error.WdirUnsupported
1559 return None
1565 return None
1560
1566
1561 def lookup(self, id):
1567 def lookup(self, id):
1562 """locate a node based on:
1568 """locate a node based on:
1563 - revision number or str(revision number)
1569 - revision number or str(revision number)
1564 - nodeid or subset of hex nodeid
1570 - nodeid or subset of hex nodeid
1565 """
1571 """
1566 n = self._match(id)
1572 n = self._match(id)
1567 if n is not None:
1573 if n is not None:
1568 return n
1574 return n
1569 n = self._partialmatch(id)
1575 n = self._partialmatch(id)
1570 if n:
1576 if n:
1571 return n
1577 return n
1572
1578
1573 raise error.LookupError(id, self.display_id, _(b'no match found'))
1579 raise error.LookupError(id, self.display_id, _(b'no match found'))
1574
1580
1575 def shortest(self, node, minlength=1):
1581 def shortest(self, node, minlength=1):
1576 """Find the shortest unambiguous prefix that matches node."""
1582 """Find the shortest unambiguous prefix that matches node."""
1577
1583
1578 def isvalid(prefix):
1584 def isvalid(prefix):
1579 try:
1585 try:
1580 matchednode = self._partialmatch(prefix)
1586 matchednode = self._partialmatch(prefix)
1581 except error.AmbiguousPrefixLookupError:
1587 except error.AmbiguousPrefixLookupError:
1582 return False
1588 return False
1583 except error.WdirUnsupported:
1589 except error.WdirUnsupported:
1584 # single 'ff...' match
1590 # single 'ff...' match
1585 return True
1591 return True
1586 if matchednode is None:
1592 if matchednode is None:
1587 raise error.LookupError(node, self.display_id, _(b'no node'))
1593 raise error.LookupError(node, self.display_id, _(b'no node'))
1588 return True
1594 return True
1589
1595
1590 def maybewdir(prefix):
1596 def maybewdir(prefix):
1591 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1597 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1592
1598
1593 hexnode = hex(node)
1599 hexnode = hex(node)
1594
1600
1595 def disambiguate(hexnode, minlength):
1601 def disambiguate(hexnode, minlength):
1596 """Disambiguate against wdirid."""
1602 """Disambiguate against wdirid."""
1597 for length in range(minlength, len(hexnode) + 1):
1603 for length in range(minlength, len(hexnode) + 1):
1598 prefix = hexnode[:length]
1604 prefix = hexnode[:length]
1599 if not maybewdir(prefix):
1605 if not maybewdir(prefix):
1600 return prefix
1606 return prefix
1601
1607
1602 if not getattr(self, 'filteredrevs', None):
1608 if not getattr(self, 'filteredrevs', None):
1603 try:
1609 try:
1604 length = max(self.index.shortest(node), minlength)
1610 length = max(self.index.shortest(node), minlength)
1605 return disambiguate(hexnode, length)
1611 return disambiguate(hexnode, length)
1606 except error.RevlogError:
1612 except error.RevlogError:
1607 if node != self.nodeconstants.wdirid:
1613 if node != self.nodeconstants.wdirid:
1608 raise error.LookupError(
1614 raise error.LookupError(
1609 node, self.display_id, _(b'no node')
1615 node, self.display_id, _(b'no node')
1610 )
1616 )
1611 except AttributeError:
1617 except AttributeError:
1612 # Fall through to pure code
1618 # Fall through to pure code
1613 pass
1619 pass
1614
1620
1615 if node == self.nodeconstants.wdirid:
1621 if node == self.nodeconstants.wdirid:
1616 for length in range(minlength, len(hexnode) + 1):
1622 for length in range(minlength, len(hexnode) + 1):
1617 prefix = hexnode[:length]
1623 prefix = hexnode[:length]
1618 if isvalid(prefix):
1624 if isvalid(prefix):
1619 return prefix
1625 return prefix
1620
1626
1621 for length in range(minlength, len(hexnode) + 1):
1627 for length in range(minlength, len(hexnode) + 1):
1622 prefix = hexnode[:length]
1628 prefix = hexnode[:length]
1623 if isvalid(prefix):
1629 if isvalid(prefix):
1624 return disambiguate(hexnode, length)
1630 return disambiguate(hexnode, length)
1625
1631
1626 def cmp(self, node, text):
1632 def cmp(self, node, text):
1627 """compare text with a given file revision
1633 """compare text with a given file revision
1628
1634
1629 returns True if text is different than what is stored.
1635 returns True if text is different than what is stored.
1630 """
1636 """
1631 p1, p2 = self.parents(node)
1637 p1, p2 = self.parents(node)
1632 return storageutil.hashrevisionsha1(text, p1, p2) != node
1638 return storageutil.hashrevisionsha1(text, p1, p2) != node
1633
1639
1634 def _getsegmentforrevs(self, startrev, endrev, df=None):
1640 def _getsegmentforrevs(self, startrev, endrev, df=None):
1635 """Obtain a segment of raw data corresponding to a range of revisions.
1641 """Obtain a segment of raw data corresponding to a range of revisions.
1636
1642
1637 Accepts the start and end revisions and an optional already-open
1643 Accepts the start and end revisions and an optional already-open
1638 file handle to be used for reading. If the file handle is read, its
1644 file handle to be used for reading. If the file handle is read, its
1639 seek position will not be preserved.
1645 seek position will not be preserved.
1640
1646
1641 Requests for data may be satisfied by a cache.
1647 Requests for data may be satisfied by a cache.
1642
1648
1643 Returns a 2-tuple of (offset, data) for the requested range of
1649 Returns a 2-tuple of (offset, data) for the requested range of
1644 revisions. Offset is the integer offset from the beginning of the
1650 revisions. Offset is the integer offset from the beginning of the
1645 revlog and data is a str or buffer of the raw byte data.
1651 revlog and data is a str or buffer of the raw byte data.
1646
1652
1647 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1653 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1648 to determine where each revision's data begins and ends.
1654 to determine where each revision's data begins and ends.
1649 """
1655 """
1650 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1656 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1651 # (functions are expensive).
1657 # (functions are expensive).
1652 index = self.index
1658 index = self.index
1653 istart = index[startrev]
1659 istart = index[startrev]
1654 start = int(istart[0] >> 16)
1660 start = int(istart[0] >> 16)
1655 if startrev == endrev:
1661 if startrev == endrev:
1656 end = start + istart[1]
1662 end = start + istart[1]
1657 else:
1663 else:
1658 iend = index[endrev]
1664 iend = index[endrev]
1659 end = int(iend[0] >> 16) + iend[1]
1665 end = int(iend[0] >> 16) + iend[1]
1660
1666
1661 if self._inline:
1667 if self._inline:
1662 start += (startrev + 1) * self.index.entry_size
1668 start += (startrev + 1) * self.index.entry_size
1663 end += (endrev + 1) * self.index.entry_size
1669 end += (endrev + 1) * self.index.entry_size
1664 length = end - start
1670 length = end - start
1665
1671
1666 return start, self._segmentfile.read_chunk(start, length, df)
1672 return start, self._segmentfile.read_chunk(start, length, df)
1667
1673
1668 def _chunk(self, rev, df=None):
1674 def _chunk(self, rev, df=None):
1669 """Obtain a single decompressed chunk for a revision.
1675 """Obtain a single decompressed chunk for a revision.
1670
1676
1671 Accepts an integer revision and an optional already-open file handle
1677 Accepts an integer revision and an optional already-open file handle
1672 to be used for reading. If used, the seek position of the file will not
1678 to be used for reading. If used, the seek position of the file will not
1673 be preserved.
1679 be preserved.
1674
1680
1675 Returns a str holding uncompressed data for the requested revision.
1681 Returns a str holding uncompressed data for the requested revision.
1676 """
1682 """
1677 compression_mode = self.index[rev][10]
1683 compression_mode = self.index[rev][10]
1678 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1684 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1679 if compression_mode == COMP_MODE_PLAIN:
1685 if compression_mode == COMP_MODE_PLAIN:
1680 return data
1686 return data
1681 elif compression_mode == COMP_MODE_DEFAULT:
1687 elif compression_mode == COMP_MODE_DEFAULT:
1682 return self._decompressor(data)
1688 return self._decompressor(data)
1683 elif compression_mode == COMP_MODE_INLINE:
1689 elif compression_mode == COMP_MODE_INLINE:
1684 return self.decompress(data)
1690 return self.decompress(data)
1685 else:
1691 else:
1686 msg = b'unknown compression mode %d'
1692 msg = b'unknown compression mode %d'
1687 msg %= compression_mode
1693 msg %= compression_mode
1688 raise error.RevlogError(msg)
1694 raise error.RevlogError(msg)
1689
1695
1690 def _chunks(self, revs, df=None, targetsize=None):
1696 def _chunks(self, revs, df=None, targetsize=None):
1691 """Obtain decompressed chunks for the specified revisions.
1697 """Obtain decompressed chunks for the specified revisions.
1692
1698
1693 Accepts an iterable of numeric revisions that are assumed to be in
1699 Accepts an iterable of numeric revisions that are assumed to be in
1694 ascending order. Also accepts an optional already-open file handle
1700 ascending order. Also accepts an optional already-open file handle
1695 to be used for reading. If used, the seek position of the file will
1701 to be used for reading. If used, the seek position of the file will
1696 not be preserved.
1702 not be preserved.
1697
1703
1698 This function is similar to calling ``self._chunk()`` multiple times,
1704 This function is similar to calling ``self._chunk()`` multiple times,
1699 but is faster.
1705 but is faster.
1700
1706
1701 Returns a list with decompressed data for each requested revision.
1707 Returns a list with decompressed data for each requested revision.
1702 """
1708 """
1703 if not revs:
1709 if not revs:
1704 return []
1710 return []
1705 start = self.start
1711 start = self.start
1706 length = self.length
1712 length = self.length
1707 inline = self._inline
1713 inline = self._inline
1708 iosize = self.index.entry_size
1714 iosize = self.index.entry_size
1709 buffer = util.buffer
1715 buffer = util.buffer
1710
1716
1711 l = []
1717 l = []
1712 ladd = l.append
1718 ladd = l.append
1713
1719
1714 if not self._withsparseread:
1720 if not self._withsparseread:
1715 slicedchunks = (revs,)
1721 slicedchunks = (revs,)
1716 else:
1722 else:
1717 slicedchunks = deltautil.slicechunk(
1723 slicedchunks = deltautil.slicechunk(
1718 self, revs, targetsize=targetsize
1724 self, revs, targetsize=targetsize
1719 )
1725 )
1720
1726
1721 for revschunk in slicedchunks:
1727 for revschunk in slicedchunks:
1722 firstrev = revschunk[0]
1728 firstrev = revschunk[0]
1723 # Skip trailing revisions with empty diff
1729 # Skip trailing revisions with empty diff
1724 for lastrev in revschunk[::-1]:
1730 for lastrev in revschunk[::-1]:
1725 if length(lastrev) != 0:
1731 if length(lastrev) != 0:
1726 break
1732 break
1727
1733
1728 try:
1734 try:
1729 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1735 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1730 except OverflowError:
1736 except OverflowError:
1731 # issue4215 - we can't cache a run of chunks greater than
1737 # issue4215 - we can't cache a run of chunks greater than
1732 # 2G on Windows
1738 # 2G on Windows
1733 return [self._chunk(rev, df=df) for rev in revschunk]
1739 return [self._chunk(rev, df=df) for rev in revschunk]
1734
1740
1735 decomp = self.decompress
1741 decomp = self.decompress
1736 # self._decompressor might be None, but will not be used in that case
1742 # self._decompressor might be None, but will not be used in that case
1737 def_decomp = self._decompressor
1743 def_decomp = self._decompressor
1738 for rev in revschunk:
1744 for rev in revschunk:
1739 chunkstart = start(rev)
1745 chunkstart = start(rev)
1740 if inline:
1746 if inline:
1741 chunkstart += (rev + 1) * iosize
1747 chunkstart += (rev + 1) * iosize
1742 chunklength = length(rev)
1748 chunklength = length(rev)
1743 comp_mode = self.index[rev][10]
1749 comp_mode = self.index[rev][10]
1744 c = buffer(data, chunkstart - offset, chunklength)
1750 c = buffer(data, chunkstart - offset, chunklength)
1745 if comp_mode == COMP_MODE_PLAIN:
1751 if comp_mode == COMP_MODE_PLAIN:
1746 ladd(c)
1752 ladd(c)
1747 elif comp_mode == COMP_MODE_INLINE:
1753 elif comp_mode == COMP_MODE_INLINE:
1748 ladd(decomp(c))
1754 ladd(decomp(c))
1749 elif comp_mode == COMP_MODE_DEFAULT:
1755 elif comp_mode == COMP_MODE_DEFAULT:
1750 ladd(def_decomp(c))
1756 ladd(def_decomp(c))
1751 else:
1757 else:
1752 msg = b'unknown compression mode %d'
1758 msg = b'unknown compression mode %d'
1753 msg %= comp_mode
1759 msg %= comp_mode
1754 raise error.RevlogError(msg)
1760 raise error.RevlogError(msg)
1755
1761
1756 return l
1762 return l
1757
1763
1758 def deltaparent(self, rev):
1764 def deltaparent(self, rev):
1759 """return deltaparent of the given revision"""
1765 """return deltaparent of the given revision"""
1760 base = self.index[rev][3]
1766 base = self.index[rev][3]
1761 if base == rev:
1767 if base == rev:
1762 return nullrev
1768 return nullrev
1763 elif self._generaldelta:
1769 elif self._generaldelta:
1764 return base
1770 return base
1765 else:
1771 else:
1766 return rev - 1
1772 return rev - 1
1767
1773
1768 def issnapshot(self, rev):
1774 def issnapshot(self, rev):
1769 """tells whether rev is a snapshot"""
1775 """tells whether rev is a snapshot"""
1770 if not self._sparserevlog:
1776 if not self._sparserevlog:
1771 return self.deltaparent(rev) == nullrev
1777 return self.deltaparent(rev) == nullrev
1772 elif util.safehasattr(self.index, b'issnapshot'):
1778 elif util.safehasattr(self.index, b'issnapshot'):
1773 # directly assign the method to cache the testing and access
1779 # directly assign the method to cache the testing and access
1774 self.issnapshot = self.index.issnapshot
1780 self.issnapshot = self.index.issnapshot
1775 return self.issnapshot(rev)
1781 return self.issnapshot(rev)
1776 if rev == nullrev:
1782 if rev == nullrev:
1777 return True
1783 return True
1778 entry = self.index[rev]
1784 entry = self.index[rev]
1779 base = entry[3]
1785 base = entry[3]
1780 if base == rev:
1786 if base == rev:
1781 return True
1787 return True
1782 if base == nullrev:
1788 if base == nullrev:
1783 return True
1789 return True
1784 p1 = entry[5]
1790 p1 = entry[5]
1785 while self.length(p1) == 0:
1791 while self.length(p1) == 0:
1786 b = self.deltaparent(p1)
1792 b = self.deltaparent(p1)
1787 if b == p1:
1793 if b == p1:
1788 break
1794 break
1789 p1 = b
1795 p1 = b
1790 p2 = entry[6]
1796 p2 = entry[6]
1791 while self.length(p2) == 0:
1797 while self.length(p2) == 0:
1792 b = self.deltaparent(p2)
1798 b = self.deltaparent(p2)
1793 if b == p2:
1799 if b == p2:
1794 break
1800 break
1795 p2 = b
1801 p2 = b
1796 if base == p1 or base == p2:
1802 if base == p1 or base == p2:
1797 return False
1803 return False
1798 return self.issnapshot(base)
1804 return self.issnapshot(base)
1799
1805
1800 def snapshotdepth(self, rev):
1806 def snapshotdepth(self, rev):
1801 """number of snapshot in the chain before this one"""
1807 """number of snapshot in the chain before this one"""
1802 if not self.issnapshot(rev):
1808 if not self.issnapshot(rev):
1803 raise error.ProgrammingError(b'revision %d not a snapshot')
1809 raise error.ProgrammingError(b'revision %d not a snapshot')
1804 return len(self._deltachain(rev)[0]) - 1
1810 return len(self._deltachain(rev)[0]) - 1
1805
1811
1806 def revdiff(self, rev1, rev2):
1812 def revdiff(self, rev1, rev2):
1807 """return or calculate a delta between two revisions
1813 """return or calculate a delta between two revisions
1808
1814
1809 The delta calculated is in binary form and is intended to be written to
1815 The delta calculated is in binary form and is intended to be written to
1810 revlog data directly. So this function needs raw revision data.
1816 revlog data directly. So this function needs raw revision data.
1811 """
1817 """
1812 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1818 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1813 return bytes(self._chunk(rev2))
1819 return bytes(self._chunk(rev2))
1814
1820
1815 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1821 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1816
1822
1817 def revision(self, nodeorrev, _df=None):
1823 def revision(self, nodeorrev, _df=None):
1818 """return an uncompressed revision of a given node or revision
1824 """return an uncompressed revision of a given node or revision
1819 number.
1825 number.
1820
1826
1821 _df - an existing file handle to read from. (internal-only)
1827 _df - an existing file handle to read from. (internal-only)
1822 """
1828 """
1823 return self._revisiondata(nodeorrev, _df)
1829 return self._revisiondata(nodeorrev, _df)
1824
1830
1825 def sidedata(self, nodeorrev, _df=None):
1831 def sidedata(self, nodeorrev, _df=None):
1826 """a map of extra data related to the changeset but not part of the hash
1832 """a map of extra data related to the changeset but not part of the hash
1827
1833
1828 This function currently return a dictionary. However, more advanced
1834 This function currently return a dictionary. However, more advanced
1829 mapping object will likely be used in the future for a more
1835 mapping object will likely be used in the future for a more
1830 efficient/lazy code.
1836 efficient/lazy code.
1831 """
1837 """
1832 # deal with <nodeorrev> argument type
1838 # deal with <nodeorrev> argument type
1833 if isinstance(nodeorrev, int):
1839 if isinstance(nodeorrev, int):
1834 rev = nodeorrev
1840 rev = nodeorrev
1835 else:
1841 else:
1836 rev = self.rev(nodeorrev)
1842 rev = self.rev(nodeorrev)
1837 return self._sidedata(rev)
1843 return self._sidedata(rev)
1838
1844
1839 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1845 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1840 # deal with <nodeorrev> argument type
1846 # deal with <nodeorrev> argument type
1841 if isinstance(nodeorrev, int):
1847 if isinstance(nodeorrev, int):
1842 rev = nodeorrev
1848 rev = nodeorrev
1843 node = self.node(rev)
1849 node = self.node(rev)
1844 else:
1850 else:
1845 node = nodeorrev
1851 node = nodeorrev
1846 rev = None
1852 rev = None
1847
1853
1848 # fast path the special `nullid` rev
1854 # fast path the special `nullid` rev
1849 if node == self.nullid:
1855 if node == self.nullid:
1850 return b""
1856 return b""
1851
1857
1852 # ``rawtext`` is the text as stored inside the revlog. Might be the
1858 # ``rawtext`` is the text as stored inside the revlog. Might be the
1853 # revision or might need to be processed to retrieve the revision.
1859 # revision or might need to be processed to retrieve the revision.
1854 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1860 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1855
1861
1856 if raw and validated:
1862 if raw and validated:
1857 # if we don't want to process the raw text and that raw
1863 # if we don't want to process the raw text and that raw
1858 # text is cached, we can exit early.
1864 # text is cached, we can exit early.
1859 return rawtext
1865 return rawtext
1860 if rev is None:
1866 if rev is None:
1861 rev = self.rev(node)
1867 rev = self.rev(node)
1862 # the revlog's flag for this revision
1868 # the revlog's flag for this revision
1863 # (usually alter its state or content)
1869 # (usually alter its state or content)
1864 flags = self.flags(rev)
1870 flags = self.flags(rev)
1865
1871
1866 if validated and flags == REVIDX_DEFAULT_FLAGS:
1872 if validated and flags == REVIDX_DEFAULT_FLAGS:
1867 # no extra flags set, no flag processor runs, text = rawtext
1873 # no extra flags set, no flag processor runs, text = rawtext
1868 return rawtext
1874 return rawtext
1869
1875
1870 if raw:
1876 if raw:
1871 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1877 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1872 text = rawtext
1878 text = rawtext
1873 else:
1879 else:
1874 r = flagutil.processflagsread(self, rawtext, flags)
1880 r = flagutil.processflagsread(self, rawtext, flags)
1875 text, validatehash = r
1881 text, validatehash = r
1876 if validatehash:
1882 if validatehash:
1877 self.checkhash(text, node, rev=rev)
1883 self.checkhash(text, node, rev=rev)
1878 if not validated:
1884 if not validated:
1879 self._revisioncache = (node, rev, rawtext)
1885 self._revisioncache = (node, rev, rawtext)
1880
1886
1881 return text
1887 return text
1882
1888
1883 def _rawtext(self, node, rev, _df=None):
1889 def _rawtext(self, node, rev, _df=None):
1884 """return the possibly unvalidated rawtext for a revision
1890 """return the possibly unvalidated rawtext for a revision
1885
1891
1886 returns (rev, rawtext, validated)
1892 returns (rev, rawtext, validated)
1887 """
1893 """
1888
1894
1889 # revision in the cache (could be useful to apply delta)
1895 # revision in the cache (could be useful to apply delta)
1890 cachedrev = None
1896 cachedrev = None
1891 # An intermediate text to apply deltas to
1897 # An intermediate text to apply deltas to
1892 basetext = None
1898 basetext = None
1893
1899
1894 # Check if we have the entry in cache
1900 # Check if we have the entry in cache
1895 # The cache entry looks like (node, rev, rawtext)
1901 # The cache entry looks like (node, rev, rawtext)
1896 if self._revisioncache:
1902 if self._revisioncache:
1897 if self._revisioncache[0] == node:
1903 if self._revisioncache[0] == node:
1898 return (rev, self._revisioncache[2], True)
1904 return (rev, self._revisioncache[2], True)
1899 cachedrev = self._revisioncache[1]
1905 cachedrev = self._revisioncache[1]
1900
1906
1901 if rev is None:
1907 if rev is None:
1902 rev = self.rev(node)
1908 rev = self.rev(node)
1903
1909
1904 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1910 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1905 if stopped:
1911 if stopped:
1906 basetext = self._revisioncache[2]
1912 basetext = self._revisioncache[2]
1907
1913
1908 # drop cache to save memory, the caller is expected to
1914 # drop cache to save memory, the caller is expected to
1909 # update self._revisioncache after validating the text
1915 # update self._revisioncache after validating the text
1910 self._revisioncache = None
1916 self._revisioncache = None
1911
1917
1912 targetsize = None
1918 targetsize = None
1913 rawsize = self.index[rev][2]
1919 rawsize = self.index[rev][2]
1914 if 0 <= rawsize:
1920 if 0 <= rawsize:
1915 targetsize = 4 * rawsize
1921 targetsize = 4 * rawsize
1916
1922
1917 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1923 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1918 if basetext is None:
1924 if basetext is None:
1919 basetext = bytes(bins[0])
1925 basetext = bytes(bins[0])
1920 bins = bins[1:]
1926 bins = bins[1:]
1921
1927
1922 rawtext = mdiff.patches(basetext, bins)
1928 rawtext = mdiff.patches(basetext, bins)
1923 del basetext # let us have a chance to free memory early
1929 del basetext # let us have a chance to free memory early
1924 return (rev, rawtext, False)
1930 return (rev, rawtext, False)
1925
1931
1926 def _sidedata(self, rev):
1932 def _sidedata(self, rev):
1927 """Return the sidedata for a given revision number."""
1933 """Return the sidedata for a given revision number."""
1928 index_entry = self.index[rev]
1934 index_entry = self.index[rev]
1929 sidedata_offset = index_entry[8]
1935 sidedata_offset = index_entry[8]
1930 sidedata_size = index_entry[9]
1936 sidedata_size = index_entry[9]
1931
1937
1932 if self._inline:
1938 if self._inline:
1933 sidedata_offset += self.index.entry_size * (1 + rev)
1939 sidedata_offset += self.index.entry_size * (1 + rev)
1934 if sidedata_size == 0:
1940 if sidedata_size == 0:
1935 return {}
1941 return {}
1936
1942
1937 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1943 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1938 filename = self._sidedatafile
1944 filename = self._sidedatafile
1939 end = self._docket.sidedata_end
1945 end = self._docket.sidedata_end
1940 offset = sidedata_offset
1946 offset = sidedata_offset
1941 length = sidedata_size
1947 length = sidedata_size
1942 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1948 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1943 raise error.RevlogError(m)
1949 raise error.RevlogError(m)
1944
1950
1945 comp_segment = self._segmentfile_sidedata.read_chunk(
1951 comp_segment = self._segmentfile_sidedata.read_chunk(
1946 sidedata_offset, sidedata_size
1952 sidedata_offset, sidedata_size
1947 )
1953 )
1948
1954
1949 comp = self.index[rev][11]
1955 comp = self.index[rev][11]
1950 if comp == COMP_MODE_PLAIN:
1956 if comp == COMP_MODE_PLAIN:
1951 segment = comp_segment
1957 segment = comp_segment
1952 elif comp == COMP_MODE_DEFAULT:
1958 elif comp == COMP_MODE_DEFAULT:
1953 segment = self._decompressor(comp_segment)
1959 segment = self._decompressor(comp_segment)
1954 elif comp == COMP_MODE_INLINE:
1960 elif comp == COMP_MODE_INLINE:
1955 segment = self.decompress(comp_segment)
1961 segment = self.decompress(comp_segment)
1956 else:
1962 else:
1957 msg = b'unknown compression mode %d'
1963 msg = b'unknown compression mode %d'
1958 msg %= comp
1964 msg %= comp
1959 raise error.RevlogError(msg)
1965 raise error.RevlogError(msg)
1960
1966
1961 sidedata = sidedatautil.deserialize_sidedata(segment)
1967 sidedata = sidedatautil.deserialize_sidedata(segment)
1962 return sidedata
1968 return sidedata
1963
1969
1964 def rawdata(self, nodeorrev, _df=None):
1970 def rawdata(self, nodeorrev, _df=None):
1965 """return an uncompressed raw data of a given node or revision number.
1971 """return an uncompressed raw data of a given node or revision number.
1966
1972
1967 _df - an existing file handle to read from. (internal-only)
1973 _df - an existing file handle to read from. (internal-only)
1968 """
1974 """
1969 return self._revisiondata(nodeorrev, _df, raw=True)
1975 return self._revisiondata(nodeorrev, _df, raw=True)
1970
1976
1971 def hash(self, text, p1, p2):
1977 def hash(self, text, p1, p2):
1972 """Compute a node hash.
1978 """Compute a node hash.
1973
1979
1974 Available as a function so that subclasses can replace the hash
1980 Available as a function so that subclasses can replace the hash
1975 as needed.
1981 as needed.
1976 """
1982 """
1977 return storageutil.hashrevisionsha1(text, p1, p2)
1983 return storageutil.hashrevisionsha1(text, p1, p2)
1978
1984
1979 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1985 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1980 """Check node hash integrity.
1986 """Check node hash integrity.
1981
1987
1982 Available as a function so that subclasses can extend hash mismatch
1988 Available as a function so that subclasses can extend hash mismatch
1983 behaviors as needed.
1989 behaviors as needed.
1984 """
1990 """
1985 try:
1991 try:
1986 if p1 is None and p2 is None:
1992 if p1 is None and p2 is None:
1987 p1, p2 = self.parents(node)
1993 p1, p2 = self.parents(node)
1988 if node != self.hash(text, p1, p2):
1994 if node != self.hash(text, p1, p2):
1989 # Clear the revision cache on hash failure. The revision cache
1995 # Clear the revision cache on hash failure. The revision cache
1990 # only stores the raw revision and clearing the cache does have
1996 # only stores the raw revision and clearing the cache does have
1991 # the side-effect that we won't have a cache hit when the raw
1997 # the side-effect that we won't have a cache hit when the raw
1992 # revision data is accessed. But this case should be rare and
1998 # revision data is accessed. But this case should be rare and
1993 # it is extra work to teach the cache about the hash
1999 # it is extra work to teach the cache about the hash
1994 # verification state.
2000 # verification state.
1995 if self._revisioncache and self._revisioncache[0] == node:
2001 if self._revisioncache and self._revisioncache[0] == node:
1996 self._revisioncache = None
2002 self._revisioncache = None
1997
2003
1998 revornode = rev
2004 revornode = rev
1999 if revornode is None:
2005 if revornode is None:
2000 revornode = templatefilters.short(hex(node))
2006 revornode = templatefilters.short(hex(node))
2001 raise error.RevlogError(
2007 raise error.RevlogError(
2002 _(b"integrity check failed on %s:%s")
2008 _(b"integrity check failed on %s:%s")
2003 % (self.display_id, pycompat.bytestr(revornode))
2009 % (self.display_id, pycompat.bytestr(revornode))
2004 )
2010 )
2005 except error.RevlogError:
2011 except error.RevlogError:
2006 if self._censorable and storageutil.iscensoredtext(text):
2012 if self._censorable and storageutil.iscensoredtext(text):
2007 raise error.CensoredNodeError(self.display_id, node, text)
2013 raise error.CensoredNodeError(self.display_id, node, text)
2008 raise
2014 raise
2009
2015
2010 def _enforceinlinesize(self, tr):
2016 def _enforceinlinesize(self, tr):
2011 """Check if the revlog is too big for inline and convert if so.
2017 """Check if the revlog is too big for inline and convert if so.
2012
2018
2013 This should be called after revisions are added to the revlog. If the
2019 This should be called after revisions are added to the revlog. If the
2014 revlog has grown too large to be an inline revlog, it will convert it
2020 revlog has grown too large to be an inline revlog, it will convert it
2015 to use multiple index and data files.
2021 to use multiple index and data files.
2016 """
2022 """
2017 tiprev = len(self) - 1
2023 tiprev = len(self) - 1
2018 total_size = self.start(tiprev) + self.length(tiprev)
2024 total_size = self.start(tiprev) + self.length(tiprev)
2019 if not self._inline or total_size < _maxinline:
2025 if not self._inline or total_size < _maxinline:
2020 return
2026 return
2021
2027
2022 troffset = tr.findoffset(self._indexfile)
2028 troffset = tr.findoffset(self._indexfile)
2023 if troffset is None:
2029 if troffset is None:
2024 raise error.RevlogError(
2030 raise error.RevlogError(
2025 _(b"%s not found in the transaction") % self._indexfile
2031 _(b"%s not found in the transaction") % self._indexfile
2026 )
2032 )
2027 trindex = None
2033 trindex = None
2028 tr.add(self._datafile, 0)
2034 tr.add(self._datafile, 0)
2029
2035
2030 existing_handles = False
2036 existing_handles = False
2031 if self._writinghandles is not None:
2037 if self._writinghandles is not None:
2032 existing_handles = True
2038 existing_handles = True
2033 fp = self._writinghandles[0]
2039 fp = self._writinghandles[0]
2034 fp.flush()
2040 fp.flush()
2035 fp.close()
2041 fp.close()
2036 # We can't use the cached file handle after close(). So prevent
2042 # We can't use the cached file handle after close(). So prevent
2037 # its usage.
2043 # its usage.
2038 self._writinghandles = None
2044 self._writinghandles = None
2039 self._segmentfile.writing_handle = None
2045 self._segmentfile.writing_handle = None
2040 # No need to deal with sidedata writing handle as it is only
2046 # No need to deal with sidedata writing handle as it is only
2041 # relevant with revlog-v2 which is never inline, not reaching
2047 # relevant with revlog-v2 which is never inline, not reaching
2042 # this code
2048 # this code
2043
2049
2044 new_dfh = self._datafp(b'w+')
2050 new_dfh = self._datafp(b'w+')
2045 new_dfh.truncate(0) # drop any potentially existing data
2051 new_dfh.truncate(0) # drop any potentially existing data
2046 try:
2052 try:
2047 with self._indexfp() as read_ifh:
2053 with self._indexfp() as read_ifh:
2048 for r in self:
2054 for r in self:
2049 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2055 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2050 if (
2056 if (
2051 trindex is None
2057 trindex is None
2052 and troffset
2058 and troffset
2053 <= self.start(r) + r * self.index.entry_size
2059 <= self.start(r) + r * self.index.entry_size
2054 ):
2060 ):
2055 trindex = r
2061 trindex = r
2056 new_dfh.flush()
2062 new_dfh.flush()
2057
2063
2058 if trindex is None:
2064 if trindex is None:
2059 trindex = 0
2065 trindex = 0
2060
2066
2061 with self.__index_new_fp() as fp:
2067 with self.__index_new_fp() as fp:
2062 self._format_flags &= ~FLAG_INLINE_DATA
2068 self._format_flags &= ~FLAG_INLINE_DATA
2063 self._inline = False
2069 self._inline = False
2064 for i in self:
2070 for i in self:
2065 e = self.index.entry_binary(i)
2071 e = self.index.entry_binary(i)
2066 if i == 0 and self._docket is None:
2072 if i == 0 and self._docket is None:
2067 header = self._format_flags | self._format_version
2073 header = self._format_flags | self._format_version
2068 header = self.index.pack_header(header)
2074 header = self.index.pack_header(header)
2069 e = header + e
2075 e = header + e
2070 fp.write(e)
2076 fp.write(e)
2071 if self._docket is not None:
2077 if self._docket is not None:
2072 self._docket.index_end = fp.tell()
2078 self._docket.index_end = fp.tell()
2073
2079
2074 # There is a small transactional race here. If the rename of
2080 # There is a small transactional race here. If the rename of
2075 # the index fails, we should remove the datafile. It is more
2081 # the index fails, we should remove the datafile. It is more
2076 # important to ensure that the data file is not truncated
2082 # important to ensure that the data file is not truncated
2077 # when the index is replaced as otherwise data is lost.
2083 # when the index is replaced as otherwise data is lost.
2078 tr.replace(self._datafile, self.start(trindex))
2084 tr.replace(self._datafile, self.start(trindex))
2079
2085
2080 # the temp file replace the real index when we exit the context
2086 # the temp file replace the real index when we exit the context
2081 # manager
2087 # manager
2082
2088
2083 tr.replace(self._indexfile, trindex * self.index.entry_size)
2089 tr.replace(self._indexfile, trindex * self.index.entry_size)
2084 nodemaputil.setup_persistent_nodemap(tr, self)
2090 nodemaputil.setup_persistent_nodemap(tr, self)
2085 self._segmentfile = randomaccessfile.randomaccessfile(
2091 self._segmentfile = randomaccessfile.randomaccessfile(
2086 self.opener,
2092 self.opener,
2087 self._datafile,
2093 self._datafile,
2088 self._chunkcachesize,
2094 self._chunkcachesize,
2089 )
2095 )
2090
2096
2091 if existing_handles:
2097 if existing_handles:
2092 # switched from inline to conventional reopen the index
2098 # switched from inline to conventional reopen the index
2093 ifh = self.__index_write_fp()
2099 ifh = self.__index_write_fp()
2094 self._writinghandles = (ifh, new_dfh, None)
2100 self._writinghandles = (ifh, new_dfh, None)
2095 self._segmentfile.writing_handle = new_dfh
2101 self._segmentfile.writing_handle = new_dfh
2096 new_dfh = None
2102 new_dfh = None
2097 # No need to deal with sidedata writing handle as it is only
2103 # No need to deal with sidedata writing handle as it is only
2098 # relevant with revlog-v2 which is never inline, not reaching
2104 # relevant with revlog-v2 which is never inline, not reaching
2099 # this code
2105 # this code
2100 finally:
2106 finally:
2101 if new_dfh is not None:
2107 if new_dfh is not None:
2102 new_dfh.close()
2108 new_dfh.close()
2103
2109
2104 def _nodeduplicatecallback(self, transaction, node):
2110 def _nodeduplicatecallback(self, transaction, node):
2105 """called when trying to add a node already stored."""
2111 """called when trying to add a node already stored."""
2106
2112
2107 @contextlib.contextmanager
2113 @contextlib.contextmanager
2108 def reading(self):
2114 def reading(self):
2109 """Context manager that keeps data and sidedata files open for reading"""
2115 """Context manager that keeps data and sidedata files open for reading"""
2110 with self._segmentfile.reading():
2116 with self._segmentfile.reading():
2111 with self._segmentfile_sidedata.reading():
2117 with self._segmentfile_sidedata.reading():
2112 yield
2118 yield
2113
2119
2114 @contextlib.contextmanager
2120 @contextlib.contextmanager
2115 def _writing(self, transaction):
2121 def _writing(self, transaction):
2116 if self._trypending:
2122 if self._trypending:
2117 msg = b'try to write in a `trypending` revlog: %s'
2123 msg = b'try to write in a `trypending` revlog: %s'
2118 msg %= self.display_id
2124 msg %= self.display_id
2119 raise error.ProgrammingError(msg)
2125 raise error.ProgrammingError(msg)
2120 if self._writinghandles is not None:
2126 if self._writinghandles is not None:
2121 yield
2127 yield
2122 else:
2128 else:
2123 ifh = dfh = sdfh = None
2129 ifh = dfh = sdfh = None
2124 try:
2130 try:
2125 r = len(self)
2131 r = len(self)
2126 # opening the data file.
2132 # opening the data file.
2127 dsize = 0
2133 dsize = 0
2128 if r:
2134 if r:
2129 dsize = self.end(r - 1)
2135 dsize = self.end(r - 1)
2130 dfh = None
2136 dfh = None
2131 if not self._inline:
2137 if not self._inline:
2132 try:
2138 try:
2133 dfh = self._datafp(b"r+")
2139 dfh = self._datafp(b"r+")
2134 if self._docket is None:
2140 if self._docket is None:
2135 dfh.seek(0, os.SEEK_END)
2141 dfh.seek(0, os.SEEK_END)
2136 else:
2142 else:
2137 dfh.seek(self._docket.data_end, os.SEEK_SET)
2143 dfh.seek(self._docket.data_end, os.SEEK_SET)
2138 except FileNotFoundError:
2144 except FileNotFoundError:
2139 dfh = self._datafp(b"w+")
2145 dfh = self._datafp(b"w+")
2140 transaction.add(self._datafile, dsize)
2146 transaction.add(self._datafile, dsize)
2141 if self._sidedatafile is not None:
2147 if self._sidedatafile is not None:
2142 # revlog-v2 does not inline, help Pytype
2148 # revlog-v2 does not inline, help Pytype
2143 assert dfh is not None
2149 assert dfh is not None
2144 try:
2150 try:
2145 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2151 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2146 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2152 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2147 except FileNotFoundError:
2153 except FileNotFoundError:
2148 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2154 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2149 transaction.add(
2155 transaction.add(
2150 self._sidedatafile, self._docket.sidedata_end
2156 self._sidedatafile, self._docket.sidedata_end
2151 )
2157 )
2152
2158
2153 # opening the index file.
2159 # opening the index file.
2154 isize = r * self.index.entry_size
2160 isize = r * self.index.entry_size
2155 ifh = self.__index_write_fp()
2161 ifh = self.__index_write_fp()
2156 if self._inline:
2162 if self._inline:
2157 transaction.add(self._indexfile, dsize + isize)
2163 transaction.add(self._indexfile, dsize + isize)
2158 else:
2164 else:
2159 transaction.add(self._indexfile, isize)
2165 transaction.add(self._indexfile, isize)
2160 # exposing all file handle for writing.
2166 # exposing all file handle for writing.
2161 self._writinghandles = (ifh, dfh, sdfh)
2167 self._writinghandles = (ifh, dfh, sdfh)
2162 self._segmentfile.writing_handle = ifh if self._inline else dfh
2168 self._segmentfile.writing_handle = ifh if self._inline else dfh
2163 self._segmentfile_sidedata.writing_handle = sdfh
2169 self._segmentfile_sidedata.writing_handle = sdfh
2164 yield
2170 yield
2165 if self._docket is not None:
2171 if self._docket is not None:
2166 self._write_docket(transaction)
2172 self._write_docket(transaction)
2167 finally:
2173 finally:
2168 self._writinghandles = None
2174 self._writinghandles = None
2169 self._segmentfile.writing_handle = None
2175 self._segmentfile.writing_handle = None
2170 self._segmentfile_sidedata.writing_handle = None
2176 self._segmentfile_sidedata.writing_handle = None
2171 if dfh is not None:
2177 if dfh is not None:
2172 dfh.close()
2178 dfh.close()
2173 if sdfh is not None:
2179 if sdfh is not None:
2174 sdfh.close()
2180 sdfh.close()
2175 # closing the index file last to avoid exposing referent to
2181 # closing the index file last to avoid exposing referent to
2176 # potential unflushed data content.
2182 # potential unflushed data content.
2177 if ifh is not None:
2183 if ifh is not None:
2178 ifh.close()
2184 ifh.close()
2179
2185
2180 def _write_docket(self, transaction):
2186 def _write_docket(self, transaction):
2181 """write the current docket on disk
2187 """write the current docket on disk
2182
2188
2183 Exist as a method to help changelog to implement transaction logic
2189 Exist as a method to help changelog to implement transaction logic
2184
2190
2185 We could also imagine using the same transaction logic for all revlog
2191 We could also imagine using the same transaction logic for all revlog
2186 since docket are cheap."""
2192 since docket are cheap."""
2187 self._docket.write(transaction)
2193 self._docket.write(transaction)
2188
2194
2189 def addrevision(
2195 def addrevision(
2190 self,
2196 self,
2191 text,
2197 text,
2192 transaction,
2198 transaction,
2193 link,
2199 link,
2194 p1,
2200 p1,
2195 p2,
2201 p2,
2196 cachedelta=None,
2202 cachedelta=None,
2197 node=None,
2203 node=None,
2198 flags=REVIDX_DEFAULT_FLAGS,
2204 flags=REVIDX_DEFAULT_FLAGS,
2199 deltacomputer=None,
2205 deltacomputer=None,
2200 sidedata=None,
2206 sidedata=None,
2201 ):
2207 ):
2202 """add a revision to the log
2208 """add a revision to the log
2203
2209
2204 text - the revision data to add
2210 text - the revision data to add
2205 transaction - the transaction object used for rollback
2211 transaction - the transaction object used for rollback
2206 link - the linkrev data to add
2212 link - the linkrev data to add
2207 p1, p2 - the parent nodeids of the revision
2213 p1, p2 - the parent nodeids of the revision
2208 cachedelta - an optional precomputed delta
2214 cachedelta - an optional precomputed delta
2209 node - nodeid of revision; typically node is not specified, and it is
2215 node - nodeid of revision; typically node is not specified, and it is
2210 computed by default as hash(text, p1, p2), however subclasses might
2216 computed by default as hash(text, p1, p2), however subclasses might
2211 use different hashing method (and override checkhash() in such case)
2217 use different hashing method (and override checkhash() in such case)
2212 flags - the known flags to set on the revision
2218 flags - the known flags to set on the revision
2213 deltacomputer - an optional deltacomputer instance shared between
2219 deltacomputer - an optional deltacomputer instance shared between
2214 multiple calls
2220 multiple calls
2215 """
2221 """
2216 if link == nullrev:
2222 if link == nullrev:
2217 raise error.RevlogError(
2223 raise error.RevlogError(
2218 _(b"attempted to add linkrev -1 to %s") % self.display_id
2224 _(b"attempted to add linkrev -1 to %s") % self.display_id
2219 )
2225 )
2220
2226
2221 if sidedata is None:
2227 if sidedata is None:
2222 sidedata = {}
2228 sidedata = {}
2223 elif sidedata and not self.hassidedata:
2229 elif sidedata and not self.hassidedata:
2224 raise error.ProgrammingError(
2230 raise error.ProgrammingError(
2225 _(b"trying to add sidedata to a revlog who don't support them")
2231 _(b"trying to add sidedata to a revlog who don't support them")
2226 )
2232 )
2227
2233
2228 if flags:
2234 if flags:
2229 node = node or self.hash(text, p1, p2)
2235 node = node or self.hash(text, p1, p2)
2230
2236
2231 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2237 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2232
2238
2233 # If the flag processor modifies the revision data, ignore any provided
2239 # If the flag processor modifies the revision data, ignore any provided
2234 # cachedelta.
2240 # cachedelta.
2235 if rawtext != text:
2241 if rawtext != text:
2236 cachedelta = None
2242 cachedelta = None
2237
2243
2238 if len(rawtext) > _maxentrysize:
2244 if len(rawtext) > _maxentrysize:
2239 raise error.RevlogError(
2245 raise error.RevlogError(
2240 _(
2246 _(
2241 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2247 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2242 )
2248 )
2243 % (self.display_id, len(rawtext))
2249 % (self.display_id, len(rawtext))
2244 )
2250 )
2245
2251
2246 node = node or self.hash(rawtext, p1, p2)
2252 node = node or self.hash(rawtext, p1, p2)
2247 rev = self.index.get_rev(node)
2253 rev = self.index.get_rev(node)
2248 if rev is not None:
2254 if rev is not None:
2249 return rev
2255 return rev
2250
2256
2251 if validatehash:
2257 if validatehash:
2252 self.checkhash(rawtext, node, p1=p1, p2=p2)
2258 self.checkhash(rawtext, node, p1=p1, p2=p2)
2253
2259
2254 return self.addrawrevision(
2260 return self.addrawrevision(
2255 rawtext,
2261 rawtext,
2256 transaction,
2262 transaction,
2257 link,
2263 link,
2258 p1,
2264 p1,
2259 p2,
2265 p2,
2260 node,
2266 node,
2261 flags,
2267 flags,
2262 cachedelta=cachedelta,
2268 cachedelta=cachedelta,
2263 deltacomputer=deltacomputer,
2269 deltacomputer=deltacomputer,
2264 sidedata=sidedata,
2270 sidedata=sidedata,
2265 )
2271 )
2266
2272
2267 def addrawrevision(
2273 def addrawrevision(
2268 self,
2274 self,
2269 rawtext,
2275 rawtext,
2270 transaction,
2276 transaction,
2271 link,
2277 link,
2272 p1,
2278 p1,
2273 p2,
2279 p2,
2274 node,
2280 node,
2275 flags,
2281 flags,
2276 cachedelta=None,
2282 cachedelta=None,
2277 deltacomputer=None,
2283 deltacomputer=None,
2278 sidedata=None,
2284 sidedata=None,
2279 ):
2285 ):
2280 """add a raw revision with known flags, node and parents
2286 """add a raw revision with known flags, node and parents
2281 useful when reusing a revision not stored in this revlog (ex: received
2287 useful when reusing a revision not stored in this revlog (ex: received
2282 over wire, or read from an external bundle).
2288 over wire, or read from an external bundle).
2283 """
2289 """
2284 with self._writing(transaction):
2290 with self._writing(transaction):
2285 return self._addrevision(
2291 return self._addrevision(
2286 node,
2292 node,
2287 rawtext,
2293 rawtext,
2288 transaction,
2294 transaction,
2289 link,
2295 link,
2290 p1,
2296 p1,
2291 p2,
2297 p2,
2292 flags,
2298 flags,
2293 cachedelta,
2299 cachedelta,
2294 deltacomputer=deltacomputer,
2300 deltacomputer=deltacomputer,
2295 sidedata=sidedata,
2301 sidedata=sidedata,
2296 )
2302 )
2297
2303
2298 def compress(self, data):
2304 def compress(self, data):
2299 """Generate a possibly-compressed representation of data."""
2305 """Generate a possibly-compressed representation of data."""
2300 if not data:
2306 if not data:
2301 return b'', data
2307 return b'', data
2302
2308
2303 compressed = self._compressor.compress(data)
2309 compressed = self._compressor.compress(data)
2304
2310
2305 if compressed:
2311 if compressed:
2306 # The revlog compressor added the header in the returned data.
2312 # The revlog compressor added the header in the returned data.
2307 return b'', compressed
2313 return b'', compressed
2308
2314
2309 if data[0:1] == b'\0':
2315 if data[0:1] == b'\0':
2310 return b'', data
2316 return b'', data
2311 return b'u', data
2317 return b'u', data
2312
2318
2313 def decompress(self, data):
2319 def decompress(self, data):
2314 """Decompress a revlog chunk.
2320 """Decompress a revlog chunk.
2315
2321
2316 The chunk is expected to begin with a header identifying the
2322 The chunk is expected to begin with a header identifying the
2317 format type so it can be routed to an appropriate decompressor.
2323 format type so it can be routed to an appropriate decompressor.
2318 """
2324 """
2319 if not data:
2325 if not data:
2320 return data
2326 return data
2321
2327
2322 # Revlogs are read much more frequently than they are written and many
2328 # Revlogs are read much more frequently than they are written and many
2323 # chunks only take microseconds to decompress, so performance is
2329 # chunks only take microseconds to decompress, so performance is
2324 # important here.
2330 # important here.
2325 #
2331 #
2326 # We can make a few assumptions about revlogs:
2332 # We can make a few assumptions about revlogs:
2327 #
2333 #
2328 # 1) the majority of chunks will be compressed (as opposed to inline
2334 # 1) the majority of chunks will be compressed (as opposed to inline
2329 # raw data).
2335 # raw data).
2330 # 2) decompressing *any* data will likely by at least 10x slower than
2336 # 2) decompressing *any* data will likely by at least 10x slower than
2331 # returning raw inline data.
2337 # returning raw inline data.
2332 # 3) we want to prioritize common and officially supported compression
2338 # 3) we want to prioritize common and officially supported compression
2333 # engines
2339 # engines
2334 #
2340 #
2335 # It follows that we want to optimize for "decompress compressed data
2341 # It follows that we want to optimize for "decompress compressed data
2336 # when encoded with common and officially supported compression engines"
2342 # when encoded with common and officially supported compression engines"
2337 # case over "raw data" and "data encoded by less common or non-official
2343 # case over "raw data" and "data encoded by less common or non-official
2338 # compression engines." That is why we have the inline lookup first
2344 # compression engines." That is why we have the inline lookup first
2339 # followed by the compengines lookup.
2345 # followed by the compengines lookup.
2340 #
2346 #
2341 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2347 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2342 # compressed chunks. And this matters for changelog and manifest reads.
2348 # compressed chunks. And this matters for changelog and manifest reads.
2343 t = data[0:1]
2349 t = data[0:1]
2344
2350
2345 if t == b'x':
2351 if t == b'x':
2346 try:
2352 try:
2347 return _zlibdecompress(data)
2353 return _zlibdecompress(data)
2348 except zlib.error as e:
2354 except zlib.error as e:
2349 raise error.RevlogError(
2355 raise error.RevlogError(
2350 _(b'revlog decompress error: %s')
2356 _(b'revlog decompress error: %s')
2351 % stringutil.forcebytestr(e)
2357 % stringutil.forcebytestr(e)
2352 )
2358 )
2353 # '\0' is more common than 'u' so it goes first.
2359 # '\0' is more common than 'u' so it goes first.
2354 elif t == b'\0':
2360 elif t == b'\0':
2355 return data
2361 return data
2356 elif t == b'u':
2362 elif t == b'u':
2357 return util.buffer(data, 1)
2363 return util.buffer(data, 1)
2358
2364
2359 compressor = self._get_decompressor(t)
2365 compressor = self._get_decompressor(t)
2360
2366
2361 return compressor.decompress(data)
2367 return compressor.decompress(data)
2362
2368
2363 def _addrevision(
2369 def _addrevision(
2364 self,
2370 self,
2365 node,
2371 node,
2366 rawtext,
2372 rawtext,
2367 transaction,
2373 transaction,
2368 link,
2374 link,
2369 p1,
2375 p1,
2370 p2,
2376 p2,
2371 flags,
2377 flags,
2372 cachedelta,
2378 cachedelta,
2373 alwayscache=False,
2379 alwayscache=False,
2374 deltacomputer=None,
2380 deltacomputer=None,
2375 sidedata=None,
2381 sidedata=None,
2376 ):
2382 ):
2377 """internal function to add revisions to the log
2383 """internal function to add revisions to the log
2378
2384
2379 see addrevision for argument descriptions.
2385 see addrevision for argument descriptions.
2380
2386
2381 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2387 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2382
2388
2383 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2389 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2384 be used.
2390 be used.
2385
2391
2386 invariants:
2392 invariants:
2387 - rawtext is optional (can be None); if not set, cachedelta must be set.
2393 - rawtext is optional (can be None); if not set, cachedelta must be set.
2388 if both are set, they must correspond to each other.
2394 if both are set, they must correspond to each other.
2389 """
2395 """
2390 if node == self.nullid:
2396 if node == self.nullid:
2391 raise error.RevlogError(
2397 raise error.RevlogError(
2392 _(b"%s: attempt to add null revision") % self.display_id
2398 _(b"%s: attempt to add null revision") % self.display_id
2393 )
2399 )
2394 if (
2400 if (
2395 node == self.nodeconstants.wdirid
2401 node == self.nodeconstants.wdirid
2396 or node in self.nodeconstants.wdirfilenodeids
2402 or node in self.nodeconstants.wdirfilenodeids
2397 ):
2403 ):
2398 raise error.RevlogError(
2404 raise error.RevlogError(
2399 _(b"%s: attempt to add wdir revision") % self.display_id
2405 _(b"%s: attempt to add wdir revision") % self.display_id
2400 )
2406 )
2401 if self._writinghandles is None:
2407 if self._writinghandles is None:
2402 msg = b'adding revision outside `revlog._writing` context'
2408 msg = b'adding revision outside `revlog._writing` context'
2403 raise error.ProgrammingError(msg)
2409 raise error.ProgrammingError(msg)
2404
2410
2405 if self._inline:
2411 if self._inline:
2406 fh = self._writinghandles[0]
2412 fh = self._writinghandles[0]
2407 else:
2413 else:
2408 fh = self._writinghandles[1]
2414 fh = self._writinghandles[1]
2409
2415
2410 btext = [rawtext]
2416 btext = [rawtext]
2411
2417
2412 curr = len(self)
2418 curr = len(self)
2413 prev = curr - 1
2419 prev = curr - 1
2414
2420
2415 offset = self._get_data_offset(prev)
2421 offset = self._get_data_offset(prev)
2416
2422
2417 if self._concurrencychecker:
2423 if self._concurrencychecker:
2418 ifh, dfh, sdfh = self._writinghandles
2424 ifh, dfh, sdfh = self._writinghandles
2419 # XXX no checking for the sidedata file
2425 # XXX no checking for the sidedata file
2420 if self._inline:
2426 if self._inline:
2421 # offset is "as if" it were in the .d file, so we need to add on
2427 # offset is "as if" it were in the .d file, so we need to add on
2422 # the size of the entry metadata.
2428 # the size of the entry metadata.
2423 self._concurrencychecker(
2429 self._concurrencychecker(
2424 ifh, self._indexfile, offset + curr * self.index.entry_size
2430 ifh, self._indexfile, offset + curr * self.index.entry_size
2425 )
2431 )
2426 else:
2432 else:
2427 # Entries in the .i are a consistent size.
2433 # Entries in the .i are a consistent size.
2428 self._concurrencychecker(
2434 self._concurrencychecker(
2429 ifh, self._indexfile, curr * self.index.entry_size
2435 ifh, self._indexfile, curr * self.index.entry_size
2430 )
2436 )
2431 self._concurrencychecker(dfh, self._datafile, offset)
2437 self._concurrencychecker(dfh, self._datafile, offset)
2432
2438
2433 p1r, p2r = self.rev(p1), self.rev(p2)
2439 p1r, p2r = self.rev(p1), self.rev(p2)
2434
2440
2435 # full versions are inserted when the needed deltas
2441 # full versions are inserted when the needed deltas
2436 # become comparable to the uncompressed text
2442 # become comparable to the uncompressed text
2437 if rawtext is None:
2443 if rawtext is None:
2438 # need rawtext size, before changed by flag processors, which is
2444 # need rawtext size, before changed by flag processors, which is
2439 # the non-raw size. use revlog explicitly to avoid filelog's extra
2445 # the non-raw size. use revlog explicitly to avoid filelog's extra
2440 # logic that might remove metadata size.
2446 # logic that might remove metadata size.
2441 textlen = mdiff.patchedsize(
2447 textlen = mdiff.patchedsize(
2442 revlog.size(self, cachedelta[0]), cachedelta[1]
2448 revlog.size(self, cachedelta[0]), cachedelta[1]
2443 )
2449 )
2444 else:
2450 else:
2445 textlen = len(rawtext)
2451 textlen = len(rawtext)
2446
2452
2447 if deltacomputer is None:
2453 if deltacomputer is None:
2448 write_debug = None
2454 write_debug = None
2449 if self._debug_delta:
2455 if self._debug_delta:
2450 write_debug = transaction._report
2456 write_debug = transaction._report
2451 deltacomputer = deltautil.deltacomputer(
2457 deltacomputer = deltautil.deltacomputer(
2452 self, write_debug=write_debug
2458 self, write_debug=write_debug
2453 )
2459 )
2454
2460
2455 revinfo = revlogutils.revisioninfo(
2461 revinfo = revlogutils.revisioninfo(
2456 node,
2462 node,
2457 p1,
2463 p1,
2458 p2,
2464 p2,
2459 btext,
2465 btext,
2460 textlen,
2466 textlen,
2461 cachedelta,
2467 cachedelta,
2462 flags,
2468 flags,
2463 )
2469 )
2464
2470
2465 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2471 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2466
2472
2467 compression_mode = COMP_MODE_INLINE
2473 compression_mode = COMP_MODE_INLINE
2468 if self._docket is not None:
2474 if self._docket is not None:
2469 default_comp = self._docket.default_compression_header
2475 default_comp = self._docket.default_compression_header
2470 r = deltautil.delta_compression(default_comp, deltainfo)
2476 r = deltautil.delta_compression(default_comp, deltainfo)
2471 compression_mode, deltainfo = r
2477 compression_mode, deltainfo = r
2472
2478
2473 sidedata_compression_mode = COMP_MODE_INLINE
2479 sidedata_compression_mode = COMP_MODE_INLINE
2474 if sidedata and self.hassidedata:
2480 if sidedata and self.hassidedata:
2475 sidedata_compression_mode = COMP_MODE_PLAIN
2481 sidedata_compression_mode = COMP_MODE_PLAIN
2476 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2482 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2477 sidedata_offset = self._docket.sidedata_end
2483 sidedata_offset = self._docket.sidedata_end
2478 h, comp_sidedata = self.compress(serialized_sidedata)
2484 h, comp_sidedata = self.compress(serialized_sidedata)
2479 if (
2485 if (
2480 h != b'u'
2486 h != b'u'
2481 and comp_sidedata[0:1] != b'\0'
2487 and comp_sidedata[0:1] != b'\0'
2482 and len(comp_sidedata) < len(serialized_sidedata)
2488 and len(comp_sidedata) < len(serialized_sidedata)
2483 ):
2489 ):
2484 assert not h
2490 assert not h
2485 if (
2491 if (
2486 comp_sidedata[0:1]
2492 comp_sidedata[0:1]
2487 == self._docket.default_compression_header
2493 == self._docket.default_compression_header
2488 ):
2494 ):
2489 sidedata_compression_mode = COMP_MODE_DEFAULT
2495 sidedata_compression_mode = COMP_MODE_DEFAULT
2490 serialized_sidedata = comp_sidedata
2496 serialized_sidedata = comp_sidedata
2491 else:
2497 else:
2492 sidedata_compression_mode = COMP_MODE_INLINE
2498 sidedata_compression_mode = COMP_MODE_INLINE
2493 serialized_sidedata = comp_sidedata
2499 serialized_sidedata = comp_sidedata
2494 else:
2500 else:
2495 serialized_sidedata = b""
2501 serialized_sidedata = b""
2496 # Don't store the offset if the sidedata is empty, that way
2502 # Don't store the offset if the sidedata is empty, that way
2497 # we can easily detect empty sidedata and they will be no different
2503 # we can easily detect empty sidedata and they will be no different
2498 # than ones we manually add.
2504 # than ones we manually add.
2499 sidedata_offset = 0
2505 sidedata_offset = 0
2500
2506
2501 rank = RANK_UNKNOWN
2507 rank = RANK_UNKNOWN
2502 if self._format_version == CHANGELOGV2:
2508 if self._compute_rank:
2503 if (p1r, p2r) == (nullrev, nullrev):
2509 if (p1r, p2r) == (nullrev, nullrev):
2504 rank = 1
2510 rank = 1
2505 elif p1r != nullrev and p2r == nullrev:
2511 elif p1r != nullrev and p2r == nullrev:
2506 rank = 1 + self.fast_rank(p1r)
2512 rank = 1 + self.fast_rank(p1r)
2507 elif p1r == nullrev and p2r != nullrev:
2513 elif p1r == nullrev and p2r != nullrev:
2508 rank = 1 + self.fast_rank(p2r)
2514 rank = 1 + self.fast_rank(p2r)
2509 else: # merge node
2515 else: # merge node
2510 if rustdagop is not None and self.index.rust_ext_compat:
2516 if rustdagop is not None and self.index.rust_ext_compat:
2511 rank = rustdagop.rank(self.index, p1r, p2r)
2517 rank = rustdagop.rank(self.index, p1r, p2r)
2512 else:
2518 else:
2513 pmin, pmax = sorted((p1r, p2r))
2519 pmin, pmax = sorted((p1r, p2r))
2514 rank = 1 + self.fast_rank(pmax)
2520 rank = 1 + self.fast_rank(pmax)
2515 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2521 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2516
2522
2517 e = revlogutils.entry(
2523 e = revlogutils.entry(
2518 flags=flags,
2524 flags=flags,
2519 data_offset=offset,
2525 data_offset=offset,
2520 data_compressed_length=deltainfo.deltalen,
2526 data_compressed_length=deltainfo.deltalen,
2521 data_uncompressed_length=textlen,
2527 data_uncompressed_length=textlen,
2522 data_compression_mode=compression_mode,
2528 data_compression_mode=compression_mode,
2523 data_delta_base=deltainfo.base,
2529 data_delta_base=deltainfo.base,
2524 link_rev=link,
2530 link_rev=link,
2525 parent_rev_1=p1r,
2531 parent_rev_1=p1r,
2526 parent_rev_2=p2r,
2532 parent_rev_2=p2r,
2527 node_id=node,
2533 node_id=node,
2528 sidedata_offset=sidedata_offset,
2534 sidedata_offset=sidedata_offset,
2529 sidedata_compressed_length=len(serialized_sidedata),
2535 sidedata_compressed_length=len(serialized_sidedata),
2530 sidedata_compression_mode=sidedata_compression_mode,
2536 sidedata_compression_mode=sidedata_compression_mode,
2531 rank=rank,
2537 rank=rank,
2532 )
2538 )
2533
2539
2534 self.index.append(e)
2540 self.index.append(e)
2535 entry = self.index.entry_binary(curr)
2541 entry = self.index.entry_binary(curr)
2536 if curr == 0 and self._docket is None:
2542 if curr == 0 and self._docket is None:
2537 header = self._format_flags | self._format_version
2543 header = self._format_flags | self._format_version
2538 header = self.index.pack_header(header)
2544 header = self.index.pack_header(header)
2539 entry = header + entry
2545 entry = header + entry
2540 self._writeentry(
2546 self._writeentry(
2541 transaction,
2547 transaction,
2542 entry,
2548 entry,
2543 deltainfo.data,
2549 deltainfo.data,
2544 link,
2550 link,
2545 offset,
2551 offset,
2546 serialized_sidedata,
2552 serialized_sidedata,
2547 sidedata_offset,
2553 sidedata_offset,
2548 )
2554 )
2549
2555
2550 rawtext = btext[0]
2556 rawtext = btext[0]
2551
2557
2552 if alwayscache and rawtext is None:
2558 if alwayscache and rawtext is None:
2553 rawtext = deltacomputer.buildtext(revinfo, fh)
2559 rawtext = deltacomputer.buildtext(revinfo, fh)
2554
2560
2555 if type(rawtext) == bytes: # only accept immutable objects
2561 if type(rawtext) == bytes: # only accept immutable objects
2556 self._revisioncache = (node, curr, rawtext)
2562 self._revisioncache = (node, curr, rawtext)
2557 self._chainbasecache[curr] = deltainfo.chainbase
2563 self._chainbasecache[curr] = deltainfo.chainbase
2558 return curr
2564 return curr
2559
2565
2560 def _get_data_offset(self, prev):
2566 def _get_data_offset(self, prev):
2561 """Returns the current offset in the (in-transaction) data file.
2567 """Returns the current offset in the (in-transaction) data file.
2562 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2568 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2563 file to store that information: since sidedata can be rewritten to the
2569 file to store that information: since sidedata can be rewritten to the
2564 end of the data file within a transaction, you can have cases where, for
2570 end of the data file within a transaction, you can have cases where, for
2565 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2571 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2566 to `n - 1`'s sidedata being written after `n`'s data.
2572 to `n - 1`'s sidedata being written after `n`'s data.
2567
2573
2568 TODO cache this in a docket file before getting out of experimental."""
2574 TODO cache this in a docket file before getting out of experimental."""
2569 if self._docket is None:
2575 if self._docket is None:
2570 return self.end(prev)
2576 return self.end(prev)
2571 else:
2577 else:
2572 return self._docket.data_end
2578 return self._docket.data_end
2573
2579
2574 def _writeentry(
2580 def _writeentry(
2575 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2581 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2576 ):
2582 ):
2577 # Files opened in a+ mode have inconsistent behavior on various
2583 # Files opened in a+ mode have inconsistent behavior on various
2578 # platforms. Windows requires that a file positioning call be made
2584 # platforms. Windows requires that a file positioning call be made
2579 # when the file handle transitions between reads and writes. See
2585 # when the file handle transitions between reads and writes. See
2580 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2586 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2581 # platforms, Python or the platform itself can be buggy. Some versions
2587 # platforms, Python or the platform itself can be buggy. Some versions
2582 # of Solaris have been observed to not append at the end of the file
2588 # of Solaris have been observed to not append at the end of the file
2583 # if the file was seeked to before the end. See issue4943 for more.
2589 # if the file was seeked to before the end. See issue4943 for more.
2584 #
2590 #
2585 # We work around this issue by inserting a seek() before writing.
2591 # We work around this issue by inserting a seek() before writing.
2586 # Note: This is likely not necessary on Python 3. However, because
2592 # Note: This is likely not necessary on Python 3. However, because
2587 # the file handle is reused for reads and may be seeked there, we need
2593 # the file handle is reused for reads and may be seeked there, we need
2588 # to be careful before changing this.
2594 # to be careful before changing this.
2589 if self._writinghandles is None:
2595 if self._writinghandles is None:
2590 msg = b'adding revision outside `revlog._writing` context'
2596 msg = b'adding revision outside `revlog._writing` context'
2591 raise error.ProgrammingError(msg)
2597 raise error.ProgrammingError(msg)
2592 ifh, dfh, sdfh = self._writinghandles
2598 ifh, dfh, sdfh = self._writinghandles
2593 if self._docket is None:
2599 if self._docket is None:
2594 ifh.seek(0, os.SEEK_END)
2600 ifh.seek(0, os.SEEK_END)
2595 else:
2601 else:
2596 ifh.seek(self._docket.index_end, os.SEEK_SET)
2602 ifh.seek(self._docket.index_end, os.SEEK_SET)
2597 if dfh:
2603 if dfh:
2598 if self._docket is None:
2604 if self._docket is None:
2599 dfh.seek(0, os.SEEK_END)
2605 dfh.seek(0, os.SEEK_END)
2600 else:
2606 else:
2601 dfh.seek(self._docket.data_end, os.SEEK_SET)
2607 dfh.seek(self._docket.data_end, os.SEEK_SET)
2602 if sdfh:
2608 if sdfh:
2603 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2609 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2604
2610
2605 curr = len(self) - 1
2611 curr = len(self) - 1
2606 if not self._inline:
2612 if not self._inline:
2607 transaction.add(self._datafile, offset)
2613 transaction.add(self._datafile, offset)
2608 if self._sidedatafile:
2614 if self._sidedatafile:
2609 transaction.add(self._sidedatafile, sidedata_offset)
2615 transaction.add(self._sidedatafile, sidedata_offset)
2610 transaction.add(self._indexfile, curr * len(entry))
2616 transaction.add(self._indexfile, curr * len(entry))
2611 if data[0]:
2617 if data[0]:
2612 dfh.write(data[0])
2618 dfh.write(data[0])
2613 dfh.write(data[1])
2619 dfh.write(data[1])
2614 if sidedata:
2620 if sidedata:
2615 sdfh.write(sidedata)
2621 sdfh.write(sidedata)
2616 ifh.write(entry)
2622 ifh.write(entry)
2617 else:
2623 else:
2618 offset += curr * self.index.entry_size
2624 offset += curr * self.index.entry_size
2619 transaction.add(self._indexfile, offset)
2625 transaction.add(self._indexfile, offset)
2620 ifh.write(entry)
2626 ifh.write(entry)
2621 ifh.write(data[0])
2627 ifh.write(data[0])
2622 ifh.write(data[1])
2628 ifh.write(data[1])
2623 assert not sidedata
2629 assert not sidedata
2624 self._enforceinlinesize(transaction)
2630 self._enforceinlinesize(transaction)
2625 if self._docket is not None:
2631 if self._docket is not None:
2626 # revlog-v2 always has 3 writing handles, help Pytype
2632 # revlog-v2 always has 3 writing handles, help Pytype
2627 wh1 = self._writinghandles[0]
2633 wh1 = self._writinghandles[0]
2628 wh2 = self._writinghandles[1]
2634 wh2 = self._writinghandles[1]
2629 wh3 = self._writinghandles[2]
2635 wh3 = self._writinghandles[2]
2630 assert wh1 is not None
2636 assert wh1 is not None
2631 assert wh2 is not None
2637 assert wh2 is not None
2632 assert wh3 is not None
2638 assert wh3 is not None
2633 self._docket.index_end = wh1.tell()
2639 self._docket.index_end = wh1.tell()
2634 self._docket.data_end = wh2.tell()
2640 self._docket.data_end = wh2.tell()
2635 self._docket.sidedata_end = wh3.tell()
2641 self._docket.sidedata_end = wh3.tell()
2636
2642
2637 nodemaputil.setup_persistent_nodemap(transaction, self)
2643 nodemaputil.setup_persistent_nodemap(transaction, self)
2638
2644
2639 def addgroup(
2645 def addgroup(
2640 self,
2646 self,
2641 deltas,
2647 deltas,
2642 linkmapper,
2648 linkmapper,
2643 transaction,
2649 transaction,
2644 alwayscache=False,
2650 alwayscache=False,
2645 addrevisioncb=None,
2651 addrevisioncb=None,
2646 duplicaterevisioncb=None,
2652 duplicaterevisioncb=None,
2647 debug_info=None,
2653 debug_info=None,
2648 ):
2654 ):
2649 """
2655 """
2650 add a delta group
2656 add a delta group
2651
2657
2652 given a set of deltas, add them to the revision log. the
2658 given a set of deltas, add them to the revision log. the
2653 first delta is against its parent, which should be in our
2659 first delta is against its parent, which should be in our
2654 log, the rest are against the previous delta.
2660 log, the rest are against the previous delta.
2655
2661
2656 If ``addrevisioncb`` is defined, it will be called with arguments of
2662 If ``addrevisioncb`` is defined, it will be called with arguments of
2657 this revlog and the node that was added.
2663 this revlog and the node that was added.
2658 """
2664 """
2659
2665
2660 if self._adding_group:
2666 if self._adding_group:
2661 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2667 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2662
2668
2663 self._adding_group = True
2669 self._adding_group = True
2664 empty = True
2670 empty = True
2665 try:
2671 try:
2666 with self._writing(transaction):
2672 with self._writing(transaction):
2667 write_debug = None
2673 write_debug = None
2668 if self._debug_delta:
2674 if self._debug_delta:
2669 write_debug = transaction._report
2675 write_debug = transaction._report
2670 deltacomputer = deltautil.deltacomputer(
2676 deltacomputer = deltautil.deltacomputer(
2671 self,
2677 self,
2672 write_debug=write_debug,
2678 write_debug=write_debug,
2673 debug_info=debug_info,
2679 debug_info=debug_info,
2674 )
2680 )
2675 # loop through our set of deltas
2681 # loop through our set of deltas
2676 for data in deltas:
2682 for data in deltas:
2677 (
2683 (
2678 node,
2684 node,
2679 p1,
2685 p1,
2680 p2,
2686 p2,
2681 linknode,
2687 linknode,
2682 deltabase,
2688 deltabase,
2683 delta,
2689 delta,
2684 flags,
2690 flags,
2685 sidedata,
2691 sidedata,
2686 ) = data
2692 ) = data
2687 link = linkmapper(linknode)
2693 link = linkmapper(linknode)
2688 flags = flags or REVIDX_DEFAULT_FLAGS
2694 flags = flags or REVIDX_DEFAULT_FLAGS
2689
2695
2690 rev = self.index.get_rev(node)
2696 rev = self.index.get_rev(node)
2691 if rev is not None:
2697 if rev is not None:
2692 # this can happen if two branches make the same change
2698 # this can happen if two branches make the same change
2693 self._nodeduplicatecallback(transaction, rev)
2699 self._nodeduplicatecallback(transaction, rev)
2694 if duplicaterevisioncb:
2700 if duplicaterevisioncb:
2695 duplicaterevisioncb(self, rev)
2701 duplicaterevisioncb(self, rev)
2696 empty = False
2702 empty = False
2697 continue
2703 continue
2698
2704
2699 for p in (p1, p2):
2705 for p in (p1, p2):
2700 if not self.index.has_node(p):
2706 if not self.index.has_node(p):
2701 raise error.LookupError(
2707 raise error.LookupError(
2702 p, self.radix, _(b'unknown parent')
2708 p, self.radix, _(b'unknown parent')
2703 )
2709 )
2704
2710
2705 if not self.index.has_node(deltabase):
2711 if not self.index.has_node(deltabase):
2706 raise error.LookupError(
2712 raise error.LookupError(
2707 deltabase, self.display_id, _(b'unknown delta base')
2713 deltabase, self.display_id, _(b'unknown delta base')
2708 )
2714 )
2709
2715
2710 baserev = self.rev(deltabase)
2716 baserev = self.rev(deltabase)
2711
2717
2712 if baserev != nullrev and self.iscensored(baserev):
2718 if baserev != nullrev and self.iscensored(baserev):
2713 # if base is censored, delta must be full replacement in a
2719 # if base is censored, delta must be full replacement in a
2714 # single patch operation
2720 # single patch operation
2715 hlen = struct.calcsize(b">lll")
2721 hlen = struct.calcsize(b">lll")
2716 oldlen = self.rawsize(baserev)
2722 oldlen = self.rawsize(baserev)
2717 newlen = len(delta) - hlen
2723 newlen = len(delta) - hlen
2718 if delta[:hlen] != mdiff.replacediffheader(
2724 if delta[:hlen] != mdiff.replacediffheader(
2719 oldlen, newlen
2725 oldlen, newlen
2720 ):
2726 ):
2721 raise error.CensoredBaseError(
2727 raise error.CensoredBaseError(
2722 self.display_id, self.node(baserev)
2728 self.display_id, self.node(baserev)
2723 )
2729 )
2724
2730
2725 if not flags and self._peek_iscensored(baserev, delta):
2731 if not flags and self._peek_iscensored(baserev, delta):
2726 flags |= REVIDX_ISCENSORED
2732 flags |= REVIDX_ISCENSORED
2727
2733
2728 # We assume consumers of addrevisioncb will want to retrieve
2734 # We assume consumers of addrevisioncb will want to retrieve
2729 # the added revision, which will require a call to
2735 # the added revision, which will require a call to
2730 # revision(). revision() will fast path if there is a cache
2736 # revision(). revision() will fast path if there is a cache
2731 # hit. So, we tell _addrevision() to always cache in this case.
2737 # hit. So, we tell _addrevision() to always cache in this case.
2732 # We're only using addgroup() in the context of changegroup
2738 # We're only using addgroup() in the context of changegroup
2733 # generation so the revision data can always be handled as raw
2739 # generation so the revision data can always be handled as raw
2734 # by the flagprocessor.
2740 # by the flagprocessor.
2735 rev = self._addrevision(
2741 rev = self._addrevision(
2736 node,
2742 node,
2737 None,
2743 None,
2738 transaction,
2744 transaction,
2739 link,
2745 link,
2740 p1,
2746 p1,
2741 p2,
2747 p2,
2742 flags,
2748 flags,
2743 (baserev, delta),
2749 (baserev, delta),
2744 alwayscache=alwayscache,
2750 alwayscache=alwayscache,
2745 deltacomputer=deltacomputer,
2751 deltacomputer=deltacomputer,
2746 sidedata=sidedata,
2752 sidedata=sidedata,
2747 )
2753 )
2748
2754
2749 if addrevisioncb:
2755 if addrevisioncb:
2750 addrevisioncb(self, rev)
2756 addrevisioncb(self, rev)
2751 empty = False
2757 empty = False
2752 finally:
2758 finally:
2753 self._adding_group = False
2759 self._adding_group = False
2754 return not empty
2760 return not empty
2755
2761
2756 def iscensored(self, rev):
2762 def iscensored(self, rev):
2757 """Check if a file revision is censored."""
2763 """Check if a file revision is censored."""
2758 if not self._censorable:
2764 if not self._censorable:
2759 return False
2765 return False
2760
2766
2761 return self.flags(rev) & REVIDX_ISCENSORED
2767 return self.flags(rev) & REVIDX_ISCENSORED
2762
2768
2763 def _peek_iscensored(self, baserev, delta):
2769 def _peek_iscensored(self, baserev, delta):
2764 """Quickly check if a delta produces a censored revision."""
2770 """Quickly check if a delta produces a censored revision."""
2765 if not self._censorable:
2771 if not self._censorable:
2766 return False
2772 return False
2767
2773
2768 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2774 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2769
2775
2770 def getstrippoint(self, minlink):
2776 def getstrippoint(self, minlink):
2771 """find the minimum rev that must be stripped to strip the linkrev
2777 """find the minimum rev that must be stripped to strip the linkrev
2772
2778
2773 Returns a tuple containing the minimum rev and a set of all revs that
2779 Returns a tuple containing the minimum rev and a set of all revs that
2774 have linkrevs that will be broken by this strip.
2780 have linkrevs that will be broken by this strip.
2775 """
2781 """
2776 return storageutil.resolvestripinfo(
2782 return storageutil.resolvestripinfo(
2777 minlink,
2783 minlink,
2778 len(self) - 1,
2784 len(self) - 1,
2779 self.headrevs(),
2785 self.headrevs(),
2780 self.linkrev,
2786 self.linkrev,
2781 self.parentrevs,
2787 self.parentrevs,
2782 )
2788 )
2783
2789
2784 def strip(self, minlink, transaction):
2790 def strip(self, minlink, transaction):
2785 """truncate the revlog on the first revision with a linkrev >= minlink
2791 """truncate the revlog on the first revision with a linkrev >= minlink
2786
2792
2787 This function is called when we're stripping revision minlink and
2793 This function is called when we're stripping revision minlink and
2788 its descendants from the repository.
2794 its descendants from the repository.
2789
2795
2790 We have to remove all revisions with linkrev >= minlink, because
2796 We have to remove all revisions with linkrev >= minlink, because
2791 the equivalent changelog revisions will be renumbered after the
2797 the equivalent changelog revisions will be renumbered after the
2792 strip.
2798 strip.
2793
2799
2794 So we truncate the revlog on the first of these revisions, and
2800 So we truncate the revlog on the first of these revisions, and
2795 trust that the caller has saved the revisions that shouldn't be
2801 trust that the caller has saved the revisions that shouldn't be
2796 removed and that it'll re-add them after this truncation.
2802 removed and that it'll re-add them after this truncation.
2797 """
2803 """
2798 if len(self) == 0:
2804 if len(self) == 0:
2799 return
2805 return
2800
2806
2801 rev, _ = self.getstrippoint(minlink)
2807 rev, _ = self.getstrippoint(minlink)
2802 if rev == len(self):
2808 if rev == len(self):
2803 return
2809 return
2804
2810
2805 # first truncate the files on disk
2811 # first truncate the files on disk
2806 data_end = self.start(rev)
2812 data_end = self.start(rev)
2807 if not self._inline:
2813 if not self._inline:
2808 transaction.add(self._datafile, data_end)
2814 transaction.add(self._datafile, data_end)
2809 end = rev * self.index.entry_size
2815 end = rev * self.index.entry_size
2810 else:
2816 else:
2811 end = data_end + (rev * self.index.entry_size)
2817 end = data_end + (rev * self.index.entry_size)
2812
2818
2813 if self._sidedatafile:
2819 if self._sidedatafile:
2814 sidedata_end = self.sidedata_cut_off(rev)
2820 sidedata_end = self.sidedata_cut_off(rev)
2815 transaction.add(self._sidedatafile, sidedata_end)
2821 transaction.add(self._sidedatafile, sidedata_end)
2816
2822
2817 transaction.add(self._indexfile, end)
2823 transaction.add(self._indexfile, end)
2818 if self._docket is not None:
2824 if self._docket is not None:
2819 # XXX we could, leverage the docket while stripping. However it is
2825 # XXX we could, leverage the docket while stripping. However it is
2820 # not powerfull enough at the time of this comment
2826 # not powerfull enough at the time of this comment
2821 self._docket.index_end = end
2827 self._docket.index_end = end
2822 self._docket.data_end = data_end
2828 self._docket.data_end = data_end
2823 self._docket.sidedata_end = sidedata_end
2829 self._docket.sidedata_end = sidedata_end
2824 self._docket.write(transaction, stripping=True)
2830 self._docket.write(transaction, stripping=True)
2825
2831
2826 # then reset internal state in memory to forget those revisions
2832 # then reset internal state in memory to forget those revisions
2827 self._revisioncache = None
2833 self._revisioncache = None
2828 self._chaininfocache = util.lrucachedict(500)
2834 self._chaininfocache = util.lrucachedict(500)
2829 self._segmentfile.clear_cache()
2835 self._segmentfile.clear_cache()
2830 self._segmentfile_sidedata.clear_cache()
2836 self._segmentfile_sidedata.clear_cache()
2831
2837
2832 del self.index[rev:-1]
2838 del self.index[rev:-1]
2833
2839
2834 def checksize(self):
2840 def checksize(self):
2835 """Check size of index and data files
2841 """Check size of index and data files
2836
2842
2837 return a (dd, di) tuple.
2843 return a (dd, di) tuple.
2838 - dd: extra bytes for the "data" file
2844 - dd: extra bytes for the "data" file
2839 - di: extra bytes for the "index" file
2845 - di: extra bytes for the "index" file
2840
2846
2841 A healthy revlog will return (0, 0).
2847 A healthy revlog will return (0, 0).
2842 """
2848 """
2843 expected = 0
2849 expected = 0
2844 if len(self):
2850 if len(self):
2845 expected = max(0, self.end(len(self) - 1))
2851 expected = max(0, self.end(len(self) - 1))
2846
2852
2847 try:
2853 try:
2848 with self._datafp() as f:
2854 with self._datafp() as f:
2849 f.seek(0, io.SEEK_END)
2855 f.seek(0, io.SEEK_END)
2850 actual = f.tell()
2856 actual = f.tell()
2851 dd = actual - expected
2857 dd = actual - expected
2852 except FileNotFoundError:
2858 except FileNotFoundError:
2853 dd = 0
2859 dd = 0
2854
2860
2855 try:
2861 try:
2856 f = self.opener(self._indexfile)
2862 f = self.opener(self._indexfile)
2857 f.seek(0, io.SEEK_END)
2863 f.seek(0, io.SEEK_END)
2858 actual = f.tell()
2864 actual = f.tell()
2859 f.close()
2865 f.close()
2860 s = self.index.entry_size
2866 s = self.index.entry_size
2861 i = max(0, actual // s)
2867 i = max(0, actual // s)
2862 di = actual - (i * s)
2868 di = actual - (i * s)
2863 if self._inline:
2869 if self._inline:
2864 databytes = 0
2870 databytes = 0
2865 for r in self:
2871 for r in self:
2866 databytes += max(0, self.length(r))
2872 databytes += max(0, self.length(r))
2867 dd = 0
2873 dd = 0
2868 di = actual - len(self) * s - databytes
2874 di = actual - len(self) * s - databytes
2869 except FileNotFoundError:
2875 except FileNotFoundError:
2870 di = 0
2876 di = 0
2871
2877
2872 return (dd, di)
2878 return (dd, di)
2873
2879
2874 def files(self):
2880 def files(self):
2875 res = [self._indexfile]
2881 res = [self._indexfile]
2876 if self._docket_file is None:
2882 if self._docket_file is None:
2877 if not self._inline:
2883 if not self._inline:
2878 res.append(self._datafile)
2884 res.append(self._datafile)
2879 else:
2885 else:
2880 res.append(self._docket_file)
2886 res.append(self._docket_file)
2881 res.extend(self._docket.old_index_filepaths(include_empty=False))
2887 res.extend(self._docket.old_index_filepaths(include_empty=False))
2882 if self._docket.data_end:
2888 if self._docket.data_end:
2883 res.append(self._datafile)
2889 res.append(self._datafile)
2884 res.extend(self._docket.old_data_filepaths(include_empty=False))
2890 res.extend(self._docket.old_data_filepaths(include_empty=False))
2885 if self._docket.sidedata_end:
2891 if self._docket.sidedata_end:
2886 res.append(self._sidedatafile)
2892 res.append(self._sidedatafile)
2887 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2893 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2888 return res
2894 return res
2889
2895
2890 def emitrevisions(
2896 def emitrevisions(
2891 self,
2897 self,
2892 nodes,
2898 nodes,
2893 nodesorder=None,
2899 nodesorder=None,
2894 revisiondata=False,
2900 revisiondata=False,
2895 assumehaveparentrevisions=False,
2901 assumehaveparentrevisions=False,
2896 deltamode=repository.CG_DELTAMODE_STD,
2902 deltamode=repository.CG_DELTAMODE_STD,
2897 sidedata_helpers=None,
2903 sidedata_helpers=None,
2898 debug_info=None,
2904 debug_info=None,
2899 ):
2905 ):
2900 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2906 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2901 raise error.ProgrammingError(
2907 raise error.ProgrammingError(
2902 b'unhandled value for nodesorder: %s' % nodesorder
2908 b'unhandled value for nodesorder: %s' % nodesorder
2903 )
2909 )
2904
2910
2905 if nodesorder is None and not self._generaldelta:
2911 if nodesorder is None and not self._generaldelta:
2906 nodesorder = b'storage'
2912 nodesorder = b'storage'
2907
2913
2908 if (
2914 if (
2909 not self._storedeltachains
2915 not self._storedeltachains
2910 and deltamode != repository.CG_DELTAMODE_PREV
2916 and deltamode != repository.CG_DELTAMODE_PREV
2911 ):
2917 ):
2912 deltamode = repository.CG_DELTAMODE_FULL
2918 deltamode = repository.CG_DELTAMODE_FULL
2913
2919
2914 return storageutil.emitrevisions(
2920 return storageutil.emitrevisions(
2915 self,
2921 self,
2916 nodes,
2922 nodes,
2917 nodesorder,
2923 nodesorder,
2918 revlogrevisiondelta,
2924 revlogrevisiondelta,
2919 deltaparentfn=self.deltaparent,
2925 deltaparentfn=self.deltaparent,
2920 candeltafn=self.candelta,
2926 candeltafn=self.candelta,
2921 rawsizefn=self.rawsize,
2927 rawsizefn=self.rawsize,
2922 revdifffn=self.revdiff,
2928 revdifffn=self.revdiff,
2923 flagsfn=self.flags,
2929 flagsfn=self.flags,
2924 deltamode=deltamode,
2930 deltamode=deltamode,
2925 revisiondata=revisiondata,
2931 revisiondata=revisiondata,
2926 assumehaveparentrevisions=assumehaveparentrevisions,
2932 assumehaveparentrevisions=assumehaveparentrevisions,
2927 sidedata_helpers=sidedata_helpers,
2933 sidedata_helpers=sidedata_helpers,
2928 debug_info=debug_info,
2934 debug_info=debug_info,
2929 )
2935 )
2930
2936
2931 DELTAREUSEALWAYS = b'always'
2937 DELTAREUSEALWAYS = b'always'
2932 DELTAREUSESAMEREVS = b'samerevs'
2938 DELTAREUSESAMEREVS = b'samerevs'
2933 DELTAREUSENEVER = b'never'
2939 DELTAREUSENEVER = b'never'
2934
2940
2935 DELTAREUSEFULLADD = b'fulladd'
2941 DELTAREUSEFULLADD = b'fulladd'
2936
2942
2937 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2943 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2938
2944
2939 def clone(
2945 def clone(
2940 self,
2946 self,
2941 tr,
2947 tr,
2942 destrevlog,
2948 destrevlog,
2943 addrevisioncb=None,
2949 addrevisioncb=None,
2944 deltareuse=DELTAREUSESAMEREVS,
2950 deltareuse=DELTAREUSESAMEREVS,
2945 forcedeltabothparents=None,
2951 forcedeltabothparents=None,
2946 sidedata_helpers=None,
2952 sidedata_helpers=None,
2947 ):
2953 ):
2948 """Copy this revlog to another, possibly with format changes.
2954 """Copy this revlog to another, possibly with format changes.
2949
2955
2950 The destination revlog will contain the same revisions and nodes.
2956 The destination revlog will contain the same revisions and nodes.
2951 However, it may not be bit-for-bit identical due to e.g. delta encoding
2957 However, it may not be bit-for-bit identical due to e.g. delta encoding
2952 differences.
2958 differences.
2953
2959
2954 The ``deltareuse`` argument control how deltas from the existing revlog
2960 The ``deltareuse`` argument control how deltas from the existing revlog
2955 are preserved in the destination revlog. The argument can have the
2961 are preserved in the destination revlog. The argument can have the
2956 following values:
2962 following values:
2957
2963
2958 DELTAREUSEALWAYS
2964 DELTAREUSEALWAYS
2959 Deltas will always be reused (if possible), even if the destination
2965 Deltas will always be reused (if possible), even if the destination
2960 revlog would not select the same revisions for the delta. This is the
2966 revlog would not select the same revisions for the delta. This is the
2961 fastest mode of operation.
2967 fastest mode of operation.
2962 DELTAREUSESAMEREVS
2968 DELTAREUSESAMEREVS
2963 Deltas will be reused if the destination revlog would pick the same
2969 Deltas will be reused if the destination revlog would pick the same
2964 revisions for the delta. This mode strikes a balance between speed
2970 revisions for the delta. This mode strikes a balance between speed
2965 and optimization.
2971 and optimization.
2966 DELTAREUSENEVER
2972 DELTAREUSENEVER
2967 Deltas will never be reused. This is the slowest mode of execution.
2973 Deltas will never be reused. This is the slowest mode of execution.
2968 This mode can be used to recompute deltas (e.g. if the diff/delta
2974 This mode can be used to recompute deltas (e.g. if the diff/delta
2969 algorithm changes).
2975 algorithm changes).
2970 DELTAREUSEFULLADD
2976 DELTAREUSEFULLADD
2971 Revision will be re-added as if their were new content. This is
2977 Revision will be re-added as if their were new content. This is
2972 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2978 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2973 eg: large file detection and handling.
2979 eg: large file detection and handling.
2974
2980
2975 Delta computation can be slow, so the choice of delta reuse policy can
2981 Delta computation can be slow, so the choice of delta reuse policy can
2976 significantly affect run time.
2982 significantly affect run time.
2977
2983
2978 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2984 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2979 two extremes. Deltas will be reused if they are appropriate. But if the
2985 two extremes. Deltas will be reused if they are appropriate. But if the
2980 delta could choose a better revision, it will do so. This means if you
2986 delta could choose a better revision, it will do so. This means if you
2981 are converting a non-generaldelta revlog to a generaldelta revlog,
2987 are converting a non-generaldelta revlog to a generaldelta revlog,
2982 deltas will be recomputed if the delta's parent isn't a parent of the
2988 deltas will be recomputed if the delta's parent isn't a parent of the
2983 revision.
2989 revision.
2984
2990
2985 In addition to the delta policy, the ``forcedeltabothparents``
2991 In addition to the delta policy, the ``forcedeltabothparents``
2986 argument controls whether to force compute deltas against both parents
2992 argument controls whether to force compute deltas against both parents
2987 for merges. By default, the current default is used.
2993 for merges. By default, the current default is used.
2988
2994
2989 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2995 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2990 `sidedata_helpers`.
2996 `sidedata_helpers`.
2991 """
2997 """
2992 if deltareuse not in self.DELTAREUSEALL:
2998 if deltareuse not in self.DELTAREUSEALL:
2993 raise ValueError(
2999 raise ValueError(
2994 _(b'value for deltareuse invalid: %s') % deltareuse
3000 _(b'value for deltareuse invalid: %s') % deltareuse
2995 )
3001 )
2996
3002
2997 if len(destrevlog):
3003 if len(destrevlog):
2998 raise ValueError(_(b'destination revlog is not empty'))
3004 raise ValueError(_(b'destination revlog is not empty'))
2999
3005
3000 if getattr(self, 'filteredrevs', None):
3006 if getattr(self, 'filteredrevs', None):
3001 raise ValueError(_(b'source revlog has filtered revisions'))
3007 raise ValueError(_(b'source revlog has filtered revisions'))
3002 if getattr(destrevlog, 'filteredrevs', None):
3008 if getattr(destrevlog, 'filteredrevs', None):
3003 raise ValueError(_(b'destination revlog has filtered revisions'))
3009 raise ValueError(_(b'destination revlog has filtered revisions'))
3004
3010
3005 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3011 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3006 # if possible.
3012 # if possible.
3007 oldlazydelta = destrevlog._lazydelta
3013 oldlazydelta = destrevlog._lazydelta
3008 oldlazydeltabase = destrevlog._lazydeltabase
3014 oldlazydeltabase = destrevlog._lazydeltabase
3009 oldamd = destrevlog._deltabothparents
3015 oldamd = destrevlog._deltabothparents
3010
3016
3011 try:
3017 try:
3012 if deltareuse == self.DELTAREUSEALWAYS:
3018 if deltareuse == self.DELTAREUSEALWAYS:
3013 destrevlog._lazydeltabase = True
3019 destrevlog._lazydeltabase = True
3014 destrevlog._lazydelta = True
3020 destrevlog._lazydelta = True
3015 elif deltareuse == self.DELTAREUSESAMEREVS:
3021 elif deltareuse == self.DELTAREUSESAMEREVS:
3016 destrevlog._lazydeltabase = False
3022 destrevlog._lazydeltabase = False
3017 destrevlog._lazydelta = True
3023 destrevlog._lazydelta = True
3018 elif deltareuse == self.DELTAREUSENEVER:
3024 elif deltareuse == self.DELTAREUSENEVER:
3019 destrevlog._lazydeltabase = False
3025 destrevlog._lazydeltabase = False
3020 destrevlog._lazydelta = False
3026 destrevlog._lazydelta = False
3021
3027
3022 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3028 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3023
3029
3024 self._clone(
3030 self._clone(
3025 tr,
3031 tr,
3026 destrevlog,
3032 destrevlog,
3027 addrevisioncb,
3033 addrevisioncb,
3028 deltareuse,
3034 deltareuse,
3029 forcedeltabothparents,
3035 forcedeltabothparents,
3030 sidedata_helpers,
3036 sidedata_helpers,
3031 )
3037 )
3032
3038
3033 finally:
3039 finally:
3034 destrevlog._lazydelta = oldlazydelta
3040 destrevlog._lazydelta = oldlazydelta
3035 destrevlog._lazydeltabase = oldlazydeltabase
3041 destrevlog._lazydeltabase = oldlazydeltabase
3036 destrevlog._deltabothparents = oldamd
3042 destrevlog._deltabothparents = oldamd
3037
3043
3038 def _clone(
3044 def _clone(
3039 self,
3045 self,
3040 tr,
3046 tr,
3041 destrevlog,
3047 destrevlog,
3042 addrevisioncb,
3048 addrevisioncb,
3043 deltareuse,
3049 deltareuse,
3044 forcedeltabothparents,
3050 forcedeltabothparents,
3045 sidedata_helpers,
3051 sidedata_helpers,
3046 ):
3052 ):
3047 """perform the core duty of `revlog.clone` after parameter processing"""
3053 """perform the core duty of `revlog.clone` after parameter processing"""
3048 write_debug = None
3054 write_debug = None
3049 if self._debug_delta:
3055 if self._debug_delta:
3050 write_debug = tr._report
3056 write_debug = tr._report
3051 deltacomputer = deltautil.deltacomputer(
3057 deltacomputer = deltautil.deltacomputer(
3052 destrevlog,
3058 destrevlog,
3053 write_debug=write_debug,
3059 write_debug=write_debug,
3054 )
3060 )
3055 index = self.index
3061 index = self.index
3056 for rev in self:
3062 for rev in self:
3057 entry = index[rev]
3063 entry = index[rev]
3058
3064
3059 # Some classes override linkrev to take filtered revs into
3065 # Some classes override linkrev to take filtered revs into
3060 # account. Use raw entry from index.
3066 # account. Use raw entry from index.
3061 flags = entry[0] & 0xFFFF
3067 flags = entry[0] & 0xFFFF
3062 linkrev = entry[4]
3068 linkrev = entry[4]
3063 p1 = index[entry[5]][7]
3069 p1 = index[entry[5]][7]
3064 p2 = index[entry[6]][7]
3070 p2 = index[entry[6]][7]
3065 node = entry[7]
3071 node = entry[7]
3066
3072
3067 # (Possibly) reuse the delta from the revlog if allowed and
3073 # (Possibly) reuse the delta from the revlog if allowed and
3068 # the revlog chunk is a delta.
3074 # the revlog chunk is a delta.
3069 cachedelta = None
3075 cachedelta = None
3070 rawtext = None
3076 rawtext = None
3071 if deltareuse == self.DELTAREUSEFULLADD:
3077 if deltareuse == self.DELTAREUSEFULLADD:
3072 text = self._revisiondata(rev)
3078 text = self._revisiondata(rev)
3073 sidedata = self.sidedata(rev)
3079 sidedata = self.sidedata(rev)
3074
3080
3075 if sidedata_helpers is not None:
3081 if sidedata_helpers is not None:
3076 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3082 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3077 self, sidedata_helpers, sidedata, rev
3083 self, sidedata_helpers, sidedata, rev
3078 )
3084 )
3079 flags = flags | new_flags[0] & ~new_flags[1]
3085 flags = flags | new_flags[0] & ~new_flags[1]
3080
3086
3081 destrevlog.addrevision(
3087 destrevlog.addrevision(
3082 text,
3088 text,
3083 tr,
3089 tr,
3084 linkrev,
3090 linkrev,
3085 p1,
3091 p1,
3086 p2,
3092 p2,
3087 cachedelta=cachedelta,
3093 cachedelta=cachedelta,
3088 node=node,
3094 node=node,
3089 flags=flags,
3095 flags=flags,
3090 deltacomputer=deltacomputer,
3096 deltacomputer=deltacomputer,
3091 sidedata=sidedata,
3097 sidedata=sidedata,
3092 )
3098 )
3093 else:
3099 else:
3094 if destrevlog._lazydelta:
3100 if destrevlog._lazydelta:
3095 dp = self.deltaparent(rev)
3101 dp = self.deltaparent(rev)
3096 if dp != nullrev:
3102 if dp != nullrev:
3097 cachedelta = (dp, bytes(self._chunk(rev)))
3103 cachedelta = (dp, bytes(self._chunk(rev)))
3098
3104
3099 sidedata = None
3105 sidedata = None
3100 if not cachedelta:
3106 if not cachedelta:
3101 rawtext = self._revisiondata(rev)
3107 rawtext = self._revisiondata(rev)
3102 sidedata = self.sidedata(rev)
3108 sidedata = self.sidedata(rev)
3103 if sidedata is None:
3109 if sidedata is None:
3104 sidedata = self.sidedata(rev)
3110 sidedata = self.sidedata(rev)
3105
3111
3106 if sidedata_helpers is not None:
3112 if sidedata_helpers is not None:
3107 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3113 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3108 self, sidedata_helpers, sidedata, rev
3114 self, sidedata_helpers, sidedata, rev
3109 )
3115 )
3110 flags = flags | new_flags[0] & ~new_flags[1]
3116 flags = flags | new_flags[0] & ~new_flags[1]
3111
3117
3112 with destrevlog._writing(tr):
3118 with destrevlog._writing(tr):
3113 destrevlog._addrevision(
3119 destrevlog._addrevision(
3114 node,
3120 node,
3115 rawtext,
3121 rawtext,
3116 tr,
3122 tr,
3117 linkrev,
3123 linkrev,
3118 p1,
3124 p1,
3119 p2,
3125 p2,
3120 flags,
3126 flags,
3121 cachedelta,
3127 cachedelta,
3122 deltacomputer=deltacomputer,
3128 deltacomputer=deltacomputer,
3123 sidedata=sidedata,
3129 sidedata=sidedata,
3124 )
3130 )
3125
3131
3126 if addrevisioncb:
3132 if addrevisioncb:
3127 addrevisioncb(self, rev, node)
3133 addrevisioncb(self, rev, node)
3128
3134
3129 def censorrevision(self, tr, censornode, tombstone=b''):
3135 def censorrevision(self, tr, censornode, tombstone=b''):
3130 if self._format_version == REVLOGV0:
3136 if self._format_version == REVLOGV0:
3131 raise error.RevlogError(
3137 raise error.RevlogError(
3132 _(b'cannot censor with version %d revlogs')
3138 _(b'cannot censor with version %d revlogs')
3133 % self._format_version
3139 % self._format_version
3134 )
3140 )
3135 elif self._format_version == REVLOGV1:
3141 elif self._format_version == REVLOGV1:
3136 rewrite.v1_censor(self, tr, censornode, tombstone)
3142 rewrite.v1_censor(self, tr, censornode, tombstone)
3137 else:
3143 else:
3138 rewrite.v2_censor(self, tr, censornode, tombstone)
3144 rewrite.v2_censor(self, tr, censornode, tombstone)
3139
3145
3140 def verifyintegrity(self, state):
3146 def verifyintegrity(self, state):
3141 """Verifies the integrity of the revlog.
3147 """Verifies the integrity of the revlog.
3142
3148
3143 Yields ``revlogproblem`` instances describing problems that are
3149 Yields ``revlogproblem`` instances describing problems that are
3144 found.
3150 found.
3145 """
3151 """
3146 dd, di = self.checksize()
3152 dd, di = self.checksize()
3147 if dd:
3153 if dd:
3148 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3154 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3149 if di:
3155 if di:
3150 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3156 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3151
3157
3152 version = self._format_version
3158 version = self._format_version
3153
3159
3154 # The verifier tells us what version revlog we should be.
3160 # The verifier tells us what version revlog we should be.
3155 if version != state[b'expectedversion']:
3161 if version != state[b'expectedversion']:
3156 yield revlogproblem(
3162 yield revlogproblem(
3157 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3163 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3158 % (self.display_id, version, state[b'expectedversion'])
3164 % (self.display_id, version, state[b'expectedversion'])
3159 )
3165 )
3160
3166
3161 state[b'skipread'] = set()
3167 state[b'skipread'] = set()
3162 state[b'safe_renamed'] = set()
3168 state[b'safe_renamed'] = set()
3163
3169
3164 for rev in self:
3170 for rev in self:
3165 node = self.node(rev)
3171 node = self.node(rev)
3166
3172
3167 # Verify contents. 4 cases to care about:
3173 # Verify contents. 4 cases to care about:
3168 #
3174 #
3169 # common: the most common case
3175 # common: the most common case
3170 # rename: with a rename
3176 # rename: with a rename
3171 # meta: file content starts with b'\1\n', the metadata
3177 # meta: file content starts with b'\1\n', the metadata
3172 # header defined in filelog.py, but without a rename
3178 # header defined in filelog.py, but without a rename
3173 # ext: content stored externally
3179 # ext: content stored externally
3174 #
3180 #
3175 # More formally, their differences are shown below:
3181 # More formally, their differences are shown below:
3176 #
3182 #
3177 # | common | rename | meta | ext
3183 # | common | rename | meta | ext
3178 # -------------------------------------------------------
3184 # -------------------------------------------------------
3179 # flags() | 0 | 0 | 0 | not 0
3185 # flags() | 0 | 0 | 0 | not 0
3180 # renamed() | False | True | False | ?
3186 # renamed() | False | True | False | ?
3181 # rawtext[0:2]=='\1\n'| False | True | True | ?
3187 # rawtext[0:2]=='\1\n'| False | True | True | ?
3182 #
3188 #
3183 # "rawtext" means the raw text stored in revlog data, which
3189 # "rawtext" means the raw text stored in revlog data, which
3184 # could be retrieved by "rawdata(rev)". "text"
3190 # could be retrieved by "rawdata(rev)". "text"
3185 # mentioned below is "revision(rev)".
3191 # mentioned below is "revision(rev)".
3186 #
3192 #
3187 # There are 3 different lengths stored physically:
3193 # There are 3 different lengths stored physically:
3188 # 1. L1: rawsize, stored in revlog index
3194 # 1. L1: rawsize, stored in revlog index
3189 # 2. L2: len(rawtext), stored in revlog data
3195 # 2. L2: len(rawtext), stored in revlog data
3190 # 3. L3: len(text), stored in revlog data if flags==0, or
3196 # 3. L3: len(text), stored in revlog data if flags==0, or
3191 # possibly somewhere else if flags!=0
3197 # possibly somewhere else if flags!=0
3192 #
3198 #
3193 # L1 should be equal to L2. L3 could be different from them.
3199 # L1 should be equal to L2. L3 could be different from them.
3194 # "text" may or may not affect commit hash depending on flag
3200 # "text" may or may not affect commit hash depending on flag
3195 # processors (see flagutil.addflagprocessor).
3201 # processors (see flagutil.addflagprocessor).
3196 #
3202 #
3197 # | common | rename | meta | ext
3203 # | common | rename | meta | ext
3198 # -------------------------------------------------
3204 # -------------------------------------------------
3199 # rawsize() | L1 | L1 | L1 | L1
3205 # rawsize() | L1 | L1 | L1 | L1
3200 # size() | L1 | L2-LM | L1(*) | L1 (?)
3206 # size() | L1 | L2-LM | L1(*) | L1 (?)
3201 # len(rawtext) | L2 | L2 | L2 | L2
3207 # len(rawtext) | L2 | L2 | L2 | L2
3202 # len(text) | L2 | L2 | L2 | L3
3208 # len(text) | L2 | L2 | L2 | L3
3203 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3209 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3204 #
3210 #
3205 # LM: length of metadata, depending on rawtext
3211 # LM: length of metadata, depending on rawtext
3206 # (*): not ideal, see comment in filelog.size
3212 # (*): not ideal, see comment in filelog.size
3207 # (?): could be "- len(meta)" if the resolved content has
3213 # (?): could be "- len(meta)" if the resolved content has
3208 # rename metadata
3214 # rename metadata
3209 #
3215 #
3210 # Checks needed to be done:
3216 # Checks needed to be done:
3211 # 1. length check: L1 == L2, in all cases.
3217 # 1. length check: L1 == L2, in all cases.
3212 # 2. hash check: depending on flag processor, we may need to
3218 # 2. hash check: depending on flag processor, we may need to
3213 # use either "text" (external), or "rawtext" (in revlog).
3219 # use either "text" (external), or "rawtext" (in revlog).
3214
3220
3215 try:
3221 try:
3216 skipflags = state.get(b'skipflags', 0)
3222 skipflags = state.get(b'skipflags', 0)
3217 if skipflags:
3223 if skipflags:
3218 skipflags &= self.flags(rev)
3224 skipflags &= self.flags(rev)
3219
3225
3220 _verify_revision(self, skipflags, state, node)
3226 _verify_revision(self, skipflags, state, node)
3221
3227
3222 l1 = self.rawsize(rev)
3228 l1 = self.rawsize(rev)
3223 l2 = len(self.rawdata(node))
3229 l2 = len(self.rawdata(node))
3224
3230
3225 if l1 != l2:
3231 if l1 != l2:
3226 yield revlogproblem(
3232 yield revlogproblem(
3227 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3233 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3228 node=node,
3234 node=node,
3229 )
3235 )
3230
3236
3231 except error.CensoredNodeError:
3237 except error.CensoredNodeError:
3232 if state[b'erroroncensored']:
3238 if state[b'erroroncensored']:
3233 yield revlogproblem(
3239 yield revlogproblem(
3234 error=_(b'censored file data'), node=node
3240 error=_(b'censored file data'), node=node
3235 )
3241 )
3236 state[b'skipread'].add(node)
3242 state[b'skipread'].add(node)
3237 except Exception as e:
3243 except Exception as e:
3238 yield revlogproblem(
3244 yield revlogproblem(
3239 error=_(b'unpacking %s: %s')
3245 error=_(b'unpacking %s: %s')
3240 % (short(node), stringutil.forcebytestr(e)),
3246 % (short(node), stringutil.forcebytestr(e)),
3241 node=node,
3247 node=node,
3242 )
3248 )
3243 state[b'skipread'].add(node)
3249 state[b'skipread'].add(node)
3244
3250
3245 def storageinfo(
3251 def storageinfo(
3246 self,
3252 self,
3247 exclusivefiles=False,
3253 exclusivefiles=False,
3248 sharedfiles=False,
3254 sharedfiles=False,
3249 revisionscount=False,
3255 revisionscount=False,
3250 trackedsize=False,
3256 trackedsize=False,
3251 storedsize=False,
3257 storedsize=False,
3252 ):
3258 ):
3253 d = {}
3259 d = {}
3254
3260
3255 if exclusivefiles:
3261 if exclusivefiles:
3256 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3262 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3257 if not self._inline:
3263 if not self._inline:
3258 d[b'exclusivefiles'].append((self.opener, self._datafile))
3264 d[b'exclusivefiles'].append((self.opener, self._datafile))
3259
3265
3260 if sharedfiles:
3266 if sharedfiles:
3261 d[b'sharedfiles'] = []
3267 d[b'sharedfiles'] = []
3262
3268
3263 if revisionscount:
3269 if revisionscount:
3264 d[b'revisionscount'] = len(self)
3270 d[b'revisionscount'] = len(self)
3265
3271
3266 if trackedsize:
3272 if trackedsize:
3267 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3273 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3268
3274
3269 if storedsize:
3275 if storedsize:
3270 d[b'storedsize'] = sum(
3276 d[b'storedsize'] = sum(
3271 self.opener.stat(path).st_size for path in self.files()
3277 self.opener.stat(path).st_size for path in self.files()
3272 )
3278 )
3273
3279
3274 return d
3280 return d
3275
3281
3276 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3282 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3277 if not self.hassidedata:
3283 if not self.hassidedata:
3278 return
3284 return
3279 # revlog formats with sidedata support does not support inline
3285 # revlog formats with sidedata support does not support inline
3280 assert not self._inline
3286 assert not self._inline
3281 if not helpers[1] and not helpers[2]:
3287 if not helpers[1] and not helpers[2]:
3282 # Nothing to generate or remove
3288 # Nothing to generate or remove
3283 return
3289 return
3284
3290
3285 new_entries = []
3291 new_entries = []
3286 # append the new sidedata
3292 # append the new sidedata
3287 with self._writing(transaction):
3293 with self._writing(transaction):
3288 ifh, dfh, sdfh = self._writinghandles
3294 ifh, dfh, sdfh = self._writinghandles
3289 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3295 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3290
3296
3291 current_offset = sdfh.tell()
3297 current_offset = sdfh.tell()
3292 for rev in range(startrev, endrev + 1):
3298 for rev in range(startrev, endrev + 1):
3293 entry = self.index[rev]
3299 entry = self.index[rev]
3294 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3300 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3295 store=self,
3301 store=self,
3296 sidedata_helpers=helpers,
3302 sidedata_helpers=helpers,
3297 sidedata={},
3303 sidedata={},
3298 rev=rev,
3304 rev=rev,
3299 )
3305 )
3300
3306
3301 serialized_sidedata = sidedatautil.serialize_sidedata(
3307 serialized_sidedata = sidedatautil.serialize_sidedata(
3302 new_sidedata
3308 new_sidedata
3303 )
3309 )
3304
3310
3305 sidedata_compression_mode = COMP_MODE_INLINE
3311 sidedata_compression_mode = COMP_MODE_INLINE
3306 if serialized_sidedata and self.hassidedata:
3312 if serialized_sidedata and self.hassidedata:
3307 sidedata_compression_mode = COMP_MODE_PLAIN
3313 sidedata_compression_mode = COMP_MODE_PLAIN
3308 h, comp_sidedata = self.compress(serialized_sidedata)
3314 h, comp_sidedata = self.compress(serialized_sidedata)
3309 if (
3315 if (
3310 h != b'u'
3316 h != b'u'
3311 and comp_sidedata[0] != b'\0'
3317 and comp_sidedata[0] != b'\0'
3312 and len(comp_sidedata) < len(serialized_sidedata)
3318 and len(comp_sidedata) < len(serialized_sidedata)
3313 ):
3319 ):
3314 assert not h
3320 assert not h
3315 if (
3321 if (
3316 comp_sidedata[0]
3322 comp_sidedata[0]
3317 == self._docket.default_compression_header
3323 == self._docket.default_compression_header
3318 ):
3324 ):
3319 sidedata_compression_mode = COMP_MODE_DEFAULT
3325 sidedata_compression_mode = COMP_MODE_DEFAULT
3320 serialized_sidedata = comp_sidedata
3326 serialized_sidedata = comp_sidedata
3321 else:
3327 else:
3322 sidedata_compression_mode = COMP_MODE_INLINE
3328 sidedata_compression_mode = COMP_MODE_INLINE
3323 serialized_sidedata = comp_sidedata
3329 serialized_sidedata = comp_sidedata
3324 if entry[8] != 0 or entry[9] != 0:
3330 if entry[8] != 0 or entry[9] != 0:
3325 # rewriting entries that already have sidedata is not
3331 # rewriting entries that already have sidedata is not
3326 # supported yet, because it introduces garbage data in the
3332 # supported yet, because it introduces garbage data in the
3327 # revlog.
3333 # revlog.
3328 msg = b"rewriting existing sidedata is not supported yet"
3334 msg = b"rewriting existing sidedata is not supported yet"
3329 raise error.Abort(msg)
3335 raise error.Abort(msg)
3330
3336
3331 # Apply (potential) flags to add and to remove after running
3337 # Apply (potential) flags to add and to remove after running
3332 # the sidedata helpers
3338 # the sidedata helpers
3333 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3339 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3334 entry_update = (
3340 entry_update = (
3335 current_offset,
3341 current_offset,
3336 len(serialized_sidedata),
3342 len(serialized_sidedata),
3337 new_offset_flags,
3343 new_offset_flags,
3338 sidedata_compression_mode,
3344 sidedata_compression_mode,
3339 )
3345 )
3340
3346
3341 # the sidedata computation might have move the file cursors around
3347 # the sidedata computation might have move the file cursors around
3342 sdfh.seek(current_offset, os.SEEK_SET)
3348 sdfh.seek(current_offset, os.SEEK_SET)
3343 sdfh.write(serialized_sidedata)
3349 sdfh.write(serialized_sidedata)
3344 new_entries.append(entry_update)
3350 new_entries.append(entry_update)
3345 current_offset += len(serialized_sidedata)
3351 current_offset += len(serialized_sidedata)
3346 self._docket.sidedata_end = sdfh.tell()
3352 self._docket.sidedata_end = sdfh.tell()
3347
3353
3348 # rewrite the new index entries
3354 # rewrite the new index entries
3349 ifh.seek(startrev * self.index.entry_size)
3355 ifh.seek(startrev * self.index.entry_size)
3350 for i, e in enumerate(new_entries):
3356 for i, e in enumerate(new_entries):
3351 rev = startrev + i
3357 rev = startrev + i
3352 self.index.replace_sidedata_info(rev, *e)
3358 self.index.replace_sidedata_info(rev, *e)
3353 packed = self.index.entry_binary(rev)
3359 packed = self.index.entry_binary(rev)
3354 if rev == 0 and self._docket is None:
3360 if rev == 0 and self._docket is None:
3355 header = self._format_flags | self._format_version
3361 header = self._format_flags | self._format_version
3356 header = self.index.pack_header(header)
3362 header = self.index.pack_header(header)
3357 packed = header + packed
3363 packed = header + packed
3358 ifh.write(packed)
3364 ifh.write(packed)
General Comments 0
You need to be logged in to leave comments. Login now