##// END OF EJS Templates
deltas: add a `debug.revlog.debug-delta` config option enable output...
marmoute -
r50122:2bcf5e14 default
parent child Browse files
Show More
@@ -1,2804 +1,2809 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = b"extension '%s' overwrite config item '%s.%s'"
25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config=b'warn-config')
27 ui.develwarn(msg, config=b'warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31
31
32 class configitem:
32 class configitem:
33 """represent a known config item
33 """represent a known config item
34
34
35 :section: the official config section where to find this item,
35 :section: the official config section where to find this item,
36 :name: the official name within the section,
36 :name: the official name within the section,
37 :default: default value for this item,
37 :default: default value for this item,
38 :alias: optional list of tuples as alternatives,
38 :alias: optional list of tuples as alternatives,
39 :generic: this is a generic definition, match name using regular expression.
39 :generic: this is a generic definition, match name using regular expression.
40 """
40 """
41
41
42 def __init__(
42 def __init__(
43 self,
43 self,
44 section,
44 section,
45 name,
45 name,
46 default=None,
46 default=None,
47 alias=(),
47 alias=(),
48 generic=False,
48 generic=False,
49 priority=0,
49 priority=0,
50 experimental=False,
50 experimental=False,
51 ):
51 ):
52 self.section = section
52 self.section = section
53 self.name = name
53 self.name = name
54 self.default = default
54 self.default = default
55 self.alias = list(alias)
55 self.alias = list(alias)
56 self.generic = generic
56 self.generic = generic
57 self.priority = priority
57 self.priority = priority
58 self.experimental = experimental
58 self.experimental = experimental
59 self._re = None
59 self._re = None
60 if generic:
60 if generic:
61 self._re = re.compile(self.name)
61 self._re = re.compile(self.name)
62
62
63
63
64 class itemregister(dict):
64 class itemregister(dict):
65 """A specialized dictionary that can handle wild-card selection"""
65 """A specialized dictionary that can handle wild-card selection"""
66
66
67 def __init__(self):
67 def __init__(self):
68 super(itemregister, self).__init__()
68 super(itemregister, self).__init__()
69 self._generics = set()
69 self._generics = set()
70
70
71 def update(self, other):
71 def update(self, other):
72 super(itemregister, self).update(other)
72 super(itemregister, self).update(other)
73 self._generics.update(other._generics)
73 self._generics.update(other._generics)
74
74
75 def __setitem__(self, key, item):
75 def __setitem__(self, key, item):
76 super(itemregister, self).__setitem__(key, item)
76 super(itemregister, self).__setitem__(key, item)
77 if item.generic:
77 if item.generic:
78 self._generics.add(item)
78 self._generics.add(item)
79
79
80 def get(self, key):
80 def get(self, key):
81 baseitem = super(itemregister, self).get(key)
81 baseitem = super(itemregister, self).get(key)
82 if baseitem is not None and not baseitem.generic:
82 if baseitem is not None and not baseitem.generic:
83 return baseitem
83 return baseitem
84
84
85 # search for a matching generic item
85 # search for a matching generic item
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 for item in generics:
87 for item in generics:
88 # we use 'match' instead of 'search' to make the matching simpler
88 # we use 'match' instead of 'search' to make the matching simpler
89 # for people unfamiliar with regular expression. Having the match
89 # for people unfamiliar with regular expression. Having the match
90 # rooted to the start of the string will produce less surprising
90 # rooted to the start of the string will produce less surprising
91 # result for user writing simple regex for sub-attribute.
91 # result for user writing simple regex for sub-attribute.
92 #
92 #
93 # For example using "color\..*" match produces an unsurprising
93 # For example using "color\..*" match produces an unsurprising
94 # result, while using search could suddenly match apparently
94 # result, while using search could suddenly match apparently
95 # unrelated configuration that happens to contains "color."
95 # unrelated configuration that happens to contains "color."
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # some match to avoid the need to prefix most pattern with "^".
97 # some match to avoid the need to prefix most pattern with "^".
98 # The "^" seems more error prone.
98 # The "^" seems more error prone.
99 if item._re.match(key):
99 if item._re.match(key):
100 return item
100 return item
101
101
102 return None
102 return None
103
103
104
104
105 coreitems = {}
105 coreitems = {}
106
106
107
107
108 def _register(configtable, *args, **kwargs):
108 def _register(configtable, *args, **kwargs):
109 item = configitem(*args, **kwargs)
109 item = configitem(*args, **kwargs)
110 section = configtable.setdefault(item.section, itemregister())
110 section = configtable.setdefault(item.section, itemregister())
111 if item.name in section:
111 if item.name in section:
112 msg = b"duplicated config item registration for '%s.%s'"
112 msg = b"duplicated config item registration for '%s.%s'"
113 raise error.ProgrammingError(msg % (item.section, item.name))
113 raise error.ProgrammingError(msg % (item.section, item.name))
114 section[item.name] = item
114 section[item.name] = item
115
115
116
116
117 # special value for case where the default is derived from other values
117 # special value for case where the default is derived from other values
118 dynamicdefault = object()
118 dynamicdefault = object()
119
119
120 # Registering actual config items
120 # Registering actual config items
121
121
122
122
123 def getitemregister(configtable):
123 def getitemregister(configtable):
124 f = functools.partial(_register, configtable)
124 f = functools.partial(_register, configtable)
125 # export pseudo enum as configitem.*
125 # export pseudo enum as configitem.*
126 f.dynamicdefault = dynamicdefault
126 f.dynamicdefault = dynamicdefault
127 return f
127 return f
128
128
129
129
130 coreconfigitem = getitemregister(coreitems)
130 coreconfigitem = getitemregister(coreitems)
131
131
132
132
133 def _registerdiffopts(section, configprefix=b''):
133 def _registerdiffopts(section, configprefix=b''):
134 coreconfigitem(
134 coreconfigitem(
135 section,
135 section,
136 configprefix + b'nodates',
136 configprefix + b'nodates',
137 default=False,
137 default=False,
138 )
138 )
139 coreconfigitem(
139 coreconfigitem(
140 section,
140 section,
141 configprefix + b'showfunc',
141 configprefix + b'showfunc',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section,
145 section,
146 configprefix + b'unified',
146 configprefix + b'unified',
147 default=None,
147 default=None,
148 )
148 )
149 coreconfigitem(
149 coreconfigitem(
150 section,
150 section,
151 configprefix + b'git',
151 configprefix + b'git',
152 default=False,
152 default=False,
153 )
153 )
154 coreconfigitem(
154 coreconfigitem(
155 section,
155 section,
156 configprefix + b'ignorews',
156 configprefix + b'ignorews',
157 default=False,
157 default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section,
160 section,
161 configprefix + b'ignorewsamount',
161 configprefix + b'ignorewsamount',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem(
164 coreconfigitem(
165 section,
165 section,
166 configprefix + b'ignoreblanklines',
166 configprefix + b'ignoreblanklines',
167 default=False,
167 default=False,
168 )
168 )
169 coreconfigitem(
169 coreconfigitem(
170 section,
170 section,
171 configprefix + b'ignorewseol',
171 configprefix + b'ignorewseol',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem(
174 coreconfigitem(
175 section,
175 section,
176 configprefix + b'nobinary',
176 configprefix + b'nobinary',
177 default=False,
177 default=False,
178 )
178 )
179 coreconfigitem(
179 coreconfigitem(
180 section,
180 section,
181 configprefix + b'noprefix',
181 configprefix + b'noprefix',
182 default=False,
182 default=False,
183 )
183 )
184 coreconfigitem(
184 coreconfigitem(
185 section,
185 section,
186 configprefix + b'word-diff',
186 configprefix + b'word-diff',
187 default=False,
187 default=False,
188 )
188 )
189
189
190
190
191 coreconfigitem(
191 coreconfigitem(
192 b'alias',
192 b'alias',
193 b'.*',
193 b'.*',
194 default=dynamicdefault,
194 default=dynamicdefault,
195 generic=True,
195 generic=True,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'auth',
198 b'auth',
199 b'cookiefile',
199 b'cookiefile',
200 default=None,
200 default=None,
201 )
201 )
202 _registerdiffopts(section=b'annotate')
202 _registerdiffopts(section=b'annotate')
203 # bookmarks.pushing: internal hack for discovery
203 # bookmarks.pushing: internal hack for discovery
204 coreconfigitem(
204 coreconfigitem(
205 b'bookmarks',
205 b'bookmarks',
206 b'pushing',
206 b'pushing',
207 default=list,
207 default=list,
208 )
208 )
209 # bundle.mainreporoot: internal hack for bundlerepo
209 # bundle.mainreporoot: internal hack for bundlerepo
210 coreconfigitem(
210 coreconfigitem(
211 b'bundle',
211 b'bundle',
212 b'mainreporoot',
212 b'mainreporoot',
213 default=b'',
213 default=b'',
214 )
214 )
215 coreconfigitem(
215 coreconfigitem(
216 b'censor',
216 b'censor',
217 b'policy',
217 b'policy',
218 default=b'abort',
218 default=b'abort',
219 experimental=True,
219 experimental=True,
220 )
220 )
221 coreconfigitem(
221 coreconfigitem(
222 b'chgserver',
222 b'chgserver',
223 b'idletimeout',
223 b'idletimeout',
224 default=3600,
224 default=3600,
225 )
225 )
226 coreconfigitem(
226 coreconfigitem(
227 b'chgserver',
227 b'chgserver',
228 b'skiphash',
228 b'skiphash',
229 default=False,
229 default=False,
230 )
230 )
231 coreconfigitem(
231 coreconfigitem(
232 b'cmdserver',
232 b'cmdserver',
233 b'log',
233 b'log',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'cmdserver',
237 b'cmdserver',
238 b'max-log-files',
238 b'max-log-files',
239 default=7,
239 default=7,
240 )
240 )
241 coreconfigitem(
241 coreconfigitem(
242 b'cmdserver',
242 b'cmdserver',
243 b'max-log-size',
243 b'max-log-size',
244 default=b'1 MB',
244 default=b'1 MB',
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'cmdserver',
247 b'cmdserver',
248 b'max-repo-cache',
248 b'max-repo-cache',
249 default=0,
249 default=0,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'cmdserver',
253 b'cmdserver',
254 b'message-encodings',
254 b'message-encodings',
255 default=list,
255 default=list,
256 )
256 )
257 coreconfigitem(
257 coreconfigitem(
258 b'cmdserver',
258 b'cmdserver',
259 b'track-log',
259 b'track-log',
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 )
261 )
262 coreconfigitem(
262 coreconfigitem(
263 b'cmdserver',
263 b'cmdserver',
264 b'shutdown-on-interrupt',
264 b'shutdown-on-interrupt',
265 default=True,
265 default=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'color',
268 b'color',
269 b'.*',
269 b'.*',
270 default=None,
270 default=None,
271 generic=True,
271 generic=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'color',
274 b'color',
275 b'mode',
275 b'mode',
276 default=b'auto',
276 default=b'auto',
277 )
277 )
278 coreconfigitem(
278 coreconfigitem(
279 b'color',
279 b'color',
280 b'pagermode',
280 b'pagermode',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem(
283 coreconfigitem(
284 b'command-templates',
284 b'command-templates',
285 b'graphnode',
285 b'graphnode',
286 default=None,
286 default=None,
287 alias=[(b'ui', b'graphnodetemplate')],
287 alias=[(b'ui', b'graphnodetemplate')],
288 )
288 )
289 coreconfigitem(
289 coreconfigitem(
290 b'command-templates',
290 b'command-templates',
291 b'log',
291 b'log',
292 default=None,
292 default=None,
293 alias=[(b'ui', b'logtemplate')],
293 alias=[(b'ui', b'logtemplate')],
294 )
294 )
295 coreconfigitem(
295 coreconfigitem(
296 b'command-templates',
296 b'command-templates',
297 b'mergemarker',
297 b'mergemarker',
298 default=(
298 default=(
299 b'{node|short} '
299 b'{node|short} '
300 b'{ifeq(tags, "tip", "", '
300 b'{ifeq(tags, "tip", "", '
301 b'ifeq(tags, "", "", "{tags} "))}'
301 b'ifeq(tags, "", "", "{tags} "))}'
302 b'{if(bookmarks, "{bookmarks} ")}'
302 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'- {author|user}: {desc|firstline}'
304 b'- {author|user}: {desc|firstline}'
305 ),
305 ),
306 alias=[(b'ui', b'mergemarkertemplate')],
306 alias=[(b'ui', b'mergemarkertemplate')],
307 )
307 )
308 coreconfigitem(
308 coreconfigitem(
309 b'command-templates',
309 b'command-templates',
310 b'pre-merge-tool-output',
310 b'pre-merge-tool-output',
311 default=None,
311 default=None,
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 )
313 )
314 coreconfigitem(
314 coreconfigitem(
315 b'command-templates',
315 b'command-templates',
316 b'oneline-summary',
316 b'oneline-summary',
317 default=None,
317 default=None,
318 )
318 )
319 coreconfigitem(
319 coreconfigitem(
320 b'command-templates',
320 b'command-templates',
321 b'oneline-summary.*',
321 b'oneline-summary.*',
322 default=dynamicdefault,
322 default=dynamicdefault,
323 generic=True,
323 generic=True,
324 )
324 )
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 coreconfigitem(
326 coreconfigitem(
327 b'commands',
327 b'commands',
328 b'commit.post-status',
328 b'commit.post-status',
329 default=False,
329 default=False,
330 )
330 )
331 coreconfigitem(
331 coreconfigitem(
332 b'commands',
332 b'commands',
333 b'grep.all-files',
333 b'grep.all-files',
334 default=False,
334 default=False,
335 experimental=True,
335 experimental=True,
336 )
336 )
337 coreconfigitem(
337 coreconfigitem(
338 b'commands',
338 b'commands',
339 b'merge.require-rev',
339 b'merge.require-rev',
340 default=False,
340 default=False,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'commands',
343 b'commands',
344 b'push.require-revs',
344 b'push.require-revs',
345 default=False,
345 default=False,
346 )
346 )
347 coreconfigitem(
347 coreconfigitem(
348 b'commands',
348 b'commands',
349 b'resolve.confirm',
349 b'resolve.confirm',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem(
352 coreconfigitem(
353 b'commands',
353 b'commands',
354 b'resolve.explicit-re-merge',
354 b'resolve.explicit-re-merge',
355 default=False,
355 default=False,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'commands',
358 b'commands',
359 b'resolve.mark-check',
359 b'resolve.mark-check',
360 default=b'none',
360 default=b'none',
361 )
361 )
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 coreconfigitem(
363 coreconfigitem(
364 b'commands',
364 b'commands',
365 b'show.aliasprefix',
365 b'show.aliasprefix',
366 default=list,
366 default=list,
367 )
367 )
368 coreconfigitem(
368 coreconfigitem(
369 b'commands',
369 b'commands',
370 b'status.relative',
370 b'status.relative',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem(
373 coreconfigitem(
374 b'commands',
374 b'commands',
375 b'status.skipstates',
375 b'status.skipstates',
376 default=[],
376 default=[],
377 experimental=True,
377 experimental=True,
378 )
378 )
379 coreconfigitem(
379 coreconfigitem(
380 b'commands',
380 b'commands',
381 b'status.terse',
381 b'status.terse',
382 default=b'',
382 default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'commands',
385 b'commands',
386 b'status.verbose',
386 b'status.verbose',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem(
389 coreconfigitem(
390 b'commands',
390 b'commands',
391 b'update.check',
391 b'update.check',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem(
394 coreconfigitem(
395 b'commands',
395 b'commands',
396 b'update.requiredest',
396 b'update.requiredest',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'committemplate',
400 b'committemplate',
401 b'.*',
401 b'.*',
402 default=None,
402 default=None,
403 generic=True,
403 generic=True,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'convert',
406 b'convert',
407 b'bzr.saverev',
407 b'bzr.saverev',
408 default=True,
408 default=True,
409 )
409 )
410 coreconfigitem(
410 coreconfigitem(
411 b'convert',
411 b'convert',
412 b'cvsps.cache',
412 b'cvsps.cache',
413 default=True,
413 default=True,
414 )
414 )
415 coreconfigitem(
415 coreconfigitem(
416 b'convert',
416 b'convert',
417 b'cvsps.fuzz',
417 b'cvsps.fuzz',
418 default=60,
418 default=60,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'convert',
421 b'convert',
422 b'cvsps.logencoding',
422 b'cvsps.logencoding',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem(
425 coreconfigitem(
426 b'convert',
426 b'convert',
427 b'cvsps.mergefrom',
427 b'cvsps.mergefrom',
428 default=None,
428 default=None,
429 )
429 )
430 coreconfigitem(
430 coreconfigitem(
431 b'convert',
431 b'convert',
432 b'cvsps.mergeto',
432 b'cvsps.mergeto',
433 default=None,
433 default=None,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'convert',
436 b'convert',
437 b'git.committeractions',
437 b'git.committeractions',
438 default=lambda: [b'messagedifferent'],
438 default=lambda: [b'messagedifferent'],
439 )
439 )
440 coreconfigitem(
440 coreconfigitem(
441 b'convert',
441 b'convert',
442 b'git.extrakeys',
442 b'git.extrakeys',
443 default=list,
443 default=list,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'convert',
446 b'convert',
447 b'git.findcopiesharder',
447 b'git.findcopiesharder',
448 default=False,
448 default=False,
449 )
449 )
450 coreconfigitem(
450 coreconfigitem(
451 b'convert',
451 b'convert',
452 b'git.remoteprefix',
452 b'git.remoteprefix',
453 default=b'remote',
453 default=b'remote',
454 )
454 )
455 coreconfigitem(
455 coreconfigitem(
456 b'convert',
456 b'convert',
457 b'git.renamelimit',
457 b'git.renamelimit',
458 default=400,
458 default=400,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'convert',
461 b'convert',
462 b'git.saverev',
462 b'git.saverev',
463 default=True,
463 default=True,
464 )
464 )
465 coreconfigitem(
465 coreconfigitem(
466 b'convert',
466 b'convert',
467 b'git.similarity',
467 b'git.similarity',
468 default=50,
468 default=50,
469 )
469 )
470 coreconfigitem(
470 coreconfigitem(
471 b'convert',
471 b'convert',
472 b'git.skipsubmodules',
472 b'git.skipsubmodules',
473 default=False,
473 default=False,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'convert',
476 b'convert',
477 b'hg.clonebranches',
477 b'hg.clonebranches',
478 default=False,
478 default=False,
479 )
479 )
480 coreconfigitem(
480 coreconfigitem(
481 b'convert',
481 b'convert',
482 b'hg.ignoreerrors',
482 b'hg.ignoreerrors',
483 default=False,
483 default=False,
484 )
484 )
485 coreconfigitem(
485 coreconfigitem(
486 b'convert',
486 b'convert',
487 b'hg.preserve-hash',
487 b'hg.preserve-hash',
488 default=False,
488 default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'convert',
491 b'convert',
492 b'hg.revs',
492 b'hg.revs',
493 default=None,
493 default=None,
494 )
494 )
495 coreconfigitem(
495 coreconfigitem(
496 b'convert',
496 b'convert',
497 b'hg.saverev',
497 b'hg.saverev',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem(
500 coreconfigitem(
501 b'convert',
501 b'convert',
502 b'hg.sourcename',
502 b'hg.sourcename',
503 default=None,
503 default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'convert',
506 b'convert',
507 b'hg.startrev',
507 b'hg.startrev',
508 default=None,
508 default=None,
509 )
509 )
510 coreconfigitem(
510 coreconfigitem(
511 b'convert',
511 b'convert',
512 b'hg.tagsbranch',
512 b'hg.tagsbranch',
513 default=b'default',
513 default=b'default',
514 )
514 )
515 coreconfigitem(
515 coreconfigitem(
516 b'convert',
516 b'convert',
517 b'hg.usebranchnames',
517 b'hg.usebranchnames',
518 default=True,
518 default=True,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'convert',
521 b'convert',
522 b'ignoreancestorcheck',
522 b'ignoreancestorcheck',
523 default=False,
523 default=False,
524 experimental=True,
524 experimental=True,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'convert',
527 b'convert',
528 b'localtimezone',
528 b'localtimezone',
529 default=False,
529 default=False,
530 )
530 )
531 coreconfigitem(
531 coreconfigitem(
532 b'convert',
532 b'convert',
533 b'p4.encoding',
533 b'p4.encoding',
534 default=dynamicdefault,
534 default=dynamicdefault,
535 )
535 )
536 coreconfigitem(
536 coreconfigitem(
537 b'convert',
537 b'convert',
538 b'p4.startrev',
538 b'p4.startrev',
539 default=0,
539 default=0,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'convert',
542 b'convert',
543 b'skiptags',
543 b'skiptags',
544 default=False,
544 default=False,
545 )
545 )
546 coreconfigitem(
546 coreconfigitem(
547 b'convert',
547 b'convert',
548 b'svn.debugsvnlog',
548 b'svn.debugsvnlog',
549 default=True,
549 default=True,
550 )
550 )
551 coreconfigitem(
551 coreconfigitem(
552 b'convert',
552 b'convert',
553 b'svn.trunk',
553 b'svn.trunk',
554 default=None,
554 default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'convert',
557 b'convert',
558 b'svn.tags',
558 b'svn.tags',
559 default=None,
559 default=None,
560 )
560 )
561 coreconfigitem(
561 coreconfigitem(
562 b'convert',
562 b'convert',
563 b'svn.branches',
563 b'svn.branches',
564 default=None,
564 default=None,
565 )
565 )
566 coreconfigitem(
566 coreconfigitem(
567 b'convert',
567 b'convert',
568 b'svn.startrev',
568 b'svn.startrev',
569 default=0,
569 default=0,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'convert',
572 b'convert',
573 b'svn.dangerous-set-commit-dates',
573 b'svn.dangerous-set-commit-dates',
574 default=False,
574 default=False,
575 )
575 )
576 coreconfigitem(
576 coreconfigitem(
577 b'debug',
577 b'debug',
578 b'dirstate.delaywrite',
578 b'dirstate.delaywrite',
579 default=0,
579 default=0,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'debug',
582 b'debug',
583 b'revlog.verifyposition.changelog',
583 b'revlog.verifyposition.changelog',
584 default=b'',
584 default=b'',
585 )
585 )
586 coreconfigitem(
586 coreconfigitem(
587 b'debug',
588 b'revlog.debug-delta',
589 default=False,
590 )
591 coreconfigitem(
587 b'defaults',
592 b'defaults',
588 b'.*',
593 b'.*',
589 default=None,
594 default=None,
590 generic=True,
595 generic=True,
591 )
596 )
592 coreconfigitem(
597 coreconfigitem(
593 b'devel',
598 b'devel',
594 b'all-warnings',
599 b'all-warnings',
595 default=False,
600 default=False,
596 )
601 )
597 coreconfigitem(
602 coreconfigitem(
598 b'devel',
603 b'devel',
599 b'bundle2.debug',
604 b'bundle2.debug',
600 default=False,
605 default=False,
601 )
606 )
602 coreconfigitem(
607 coreconfigitem(
603 b'devel',
608 b'devel',
604 b'bundle.delta',
609 b'bundle.delta',
605 default=b'',
610 default=b'',
606 )
611 )
607 coreconfigitem(
612 coreconfigitem(
608 b'devel',
613 b'devel',
609 b'cache-vfs',
614 b'cache-vfs',
610 default=None,
615 default=None,
611 )
616 )
612 coreconfigitem(
617 coreconfigitem(
613 b'devel',
618 b'devel',
614 b'check-locks',
619 b'check-locks',
615 default=False,
620 default=False,
616 )
621 )
617 coreconfigitem(
622 coreconfigitem(
618 b'devel',
623 b'devel',
619 b'check-relroot',
624 b'check-relroot',
620 default=False,
625 default=False,
621 )
626 )
622 # Track copy information for all file, not just "added" one (very slow)
627 # Track copy information for all file, not just "added" one (very slow)
623 coreconfigitem(
628 coreconfigitem(
624 b'devel',
629 b'devel',
625 b'copy-tracing.trace-all-files',
630 b'copy-tracing.trace-all-files',
626 default=False,
631 default=False,
627 )
632 )
628 coreconfigitem(
633 coreconfigitem(
629 b'devel',
634 b'devel',
630 b'default-date',
635 b'default-date',
631 default=None,
636 default=None,
632 )
637 )
633 coreconfigitem(
638 coreconfigitem(
634 b'devel',
639 b'devel',
635 b'deprec-warn',
640 b'deprec-warn',
636 default=False,
641 default=False,
637 )
642 )
638 coreconfigitem(
643 coreconfigitem(
639 b'devel',
644 b'devel',
640 b'disableloaddefaultcerts',
645 b'disableloaddefaultcerts',
641 default=False,
646 default=False,
642 )
647 )
643 coreconfigitem(
648 coreconfigitem(
644 b'devel',
649 b'devel',
645 b'warn-empty-changegroup',
650 b'warn-empty-changegroup',
646 default=False,
651 default=False,
647 )
652 )
648 coreconfigitem(
653 coreconfigitem(
649 b'devel',
654 b'devel',
650 b'legacy.exchange',
655 b'legacy.exchange',
651 default=list,
656 default=list,
652 )
657 )
653 # When True, revlogs use a special reference version of the nodemap, that is not
658 # When True, revlogs use a special reference version of the nodemap, that is not
654 # performant but is "known" to behave properly.
659 # performant but is "known" to behave properly.
655 coreconfigitem(
660 coreconfigitem(
656 b'devel',
661 b'devel',
657 b'persistent-nodemap',
662 b'persistent-nodemap',
658 default=False,
663 default=False,
659 )
664 )
660 coreconfigitem(
665 coreconfigitem(
661 b'devel',
666 b'devel',
662 b'servercafile',
667 b'servercafile',
663 default=b'',
668 default=b'',
664 )
669 )
665 coreconfigitem(
670 coreconfigitem(
666 b'devel',
671 b'devel',
667 b'serverexactprotocol',
672 b'serverexactprotocol',
668 default=b'',
673 default=b'',
669 )
674 )
670 coreconfigitem(
675 coreconfigitem(
671 b'devel',
676 b'devel',
672 b'serverrequirecert',
677 b'serverrequirecert',
673 default=False,
678 default=False,
674 )
679 )
675 coreconfigitem(
680 coreconfigitem(
676 b'devel',
681 b'devel',
677 b'strip-obsmarkers',
682 b'strip-obsmarkers',
678 default=True,
683 default=True,
679 )
684 )
680 coreconfigitem(
685 coreconfigitem(
681 b'devel',
686 b'devel',
682 b'warn-config',
687 b'warn-config',
683 default=None,
688 default=None,
684 )
689 )
685 coreconfigitem(
690 coreconfigitem(
686 b'devel',
691 b'devel',
687 b'warn-config-default',
692 b'warn-config-default',
688 default=None,
693 default=None,
689 )
694 )
690 coreconfigitem(
695 coreconfigitem(
691 b'devel',
696 b'devel',
692 b'user.obsmarker',
697 b'user.obsmarker',
693 default=None,
698 default=None,
694 )
699 )
695 coreconfigitem(
700 coreconfigitem(
696 b'devel',
701 b'devel',
697 b'warn-config-unknown',
702 b'warn-config-unknown',
698 default=None,
703 default=None,
699 )
704 )
700 coreconfigitem(
705 coreconfigitem(
701 b'devel',
706 b'devel',
702 b'debug.copies',
707 b'debug.copies',
703 default=False,
708 default=False,
704 )
709 )
705 coreconfigitem(
710 coreconfigitem(
706 b'devel',
711 b'devel',
707 b'copy-tracing.multi-thread',
712 b'copy-tracing.multi-thread',
708 default=True,
713 default=True,
709 )
714 )
710 coreconfigitem(
715 coreconfigitem(
711 b'devel',
716 b'devel',
712 b'debug.extensions',
717 b'debug.extensions',
713 default=False,
718 default=False,
714 )
719 )
715 coreconfigitem(
720 coreconfigitem(
716 b'devel',
721 b'devel',
717 b'debug.repo-filters',
722 b'debug.repo-filters',
718 default=False,
723 default=False,
719 )
724 )
720 coreconfigitem(
725 coreconfigitem(
721 b'devel',
726 b'devel',
722 b'debug.peer-request',
727 b'debug.peer-request',
723 default=False,
728 default=False,
724 )
729 )
725 # If discovery.exchange-heads is False, the discovery will not start with
730 # If discovery.exchange-heads is False, the discovery will not start with
726 # remote head fetching and local head querying.
731 # remote head fetching and local head querying.
727 coreconfigitem(
732 coreconfigitem(
728 b'devel',
733 b'devel',
729 b'discovery.exchange-heads',
734 b'discovery.exchange-heads',
730 default=True,
735 default=True,
731 )
736 )
732 # If discovery.grow-sample is False, the sample size used in set discovery will
737 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # not be increased through the process
738 # not be increased through the process
734 coreconfigitem(
739 coreconfigitem(
735 b'devel',
740 b'devel',
736 b'discovery.grow-sample',
741 b'discovery.grow-sample',
737 default=True,
742 default=True,
738 )
743 )
739 # When discovery.grow-sample.dynamic is True, the default, the sample size is
744 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # adapted to the shape of the undecided set (it is set to the max of:
745 # adapted to the shape of the undecided set (it is set to the max of:
741 # <target-size>, len(roots(undecided)), len(heads(undecided)
746 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 coreconfigitem(
747 coreconfigitem(
743 b'devel',
748 b'devel',
744 b'discovery.grow-sample.dynamic',
749 b'discovery.grow-sample.dynamic',
745 default=True,
750 default=True,
746 )
751 )
747 # discovery.grow-sample.rate control the rate at which the sample grow
752 # discovery.grow-sample.rate control the rate at which the sample grow
748 coreconfigitem(
753 coreconfigitem(
749 b'devel',
754 b'devel',
750 b'discovery.grow-sample.rate',
755 b'discovery.grow-sample.rate',
751 default=1.05,
756 default=1.05,
752 )
757 )
753 # If discovery.randomize is False, random sampling during discovery are
758 # If discovery.randomize is False, random sampling during discovery are
754 # deterministic. It is meant for integration tests.
759 # deterministic. It is meant for integration tests.
755 coreconfigitem(
760 coreconfigitem(
756 b'devel',
761 b'devel',
757 b'discovery.randomize',
762 b'discovery.randomize',
758 default=True,
763 default=True,
759 )
764 )
760 # Control the initial size of the discovery sample
765 # Control the initial size of the discovery sample
761 coreconfigitem(
766 coreconfigitem(
762 b'devel',
767 b'devel',
763 b'discovery.sample-size',
768 b'discovery.sample-size',
764 default=200,
769 default=200,
765 )
770 )
766 # Control the initial size of the discovery for initial change
771 # Control the initial size of the discovery for initial change
767 coreconfigitem(
772 coreconfigitem(
768 b'devel',
773 b'devel',
769 b'discovery.sample-size.initial',
774 b'discovery.sample-size.initial',
770 default=100,
775 default=100,
771 )
776 )
772 _registerdiffopts(section=b'diff')
777 _registerdiffopts(section=b'diff')
773 coreconfigitem(
778 coreconfigitem(
774 b'diff',
779 b'diff',
775 b'merge',
780 b'merge',
776 default=False,
781 default=False,
777 experimental=True,
782 experimental=True,
778 )
783 )
779 coreconfigitem(
784 coreconfigitem(
780 b'email',
785 b'email',
781 b'bcc',
786 b'bcc',
782 default=None,
787 default=None,
783 )
788 )
784 coreconfigitem(
789 coreconfigitem(
785 b'email',
790 b'email',
786 b'cc',
791 b'cc',
787 default=None,
792 default=None,
788 )
793 )
789 coreconfigitem(
794 coreconfigitem(
790 b'email',
795 b'email',
791 b'charsets',
796 b'charsets',
792 default=list,
797 default=list,
793 )
798 )
794 coreconfigitem(
799 coreconfigitem(
795 b'email',
800 b'email',
796 b'from',
801 b'from',
797 default=None,
802 default=None,
798 )
803 )
799 coreconfigitem(
804 coreconfigitem(
800 b'email',
805 b'email',
801 b'method',
806 b'method',
802 default=b'smtp',
807 default=b'smtp',
803 )
808 )
804 coreconfigitem(
809 coreconfigitem(
805 b'email',
810 b'email',
806 b'reply-to',
811 b'reply-to',
807 default=None,
812 default=None,
808 )
813 )
809 coreconfigitem(
814 coreconfigitem(
810 b'email',
815 b'email',
811 b'to',
816 b'to',
812 default=None,
817 default=None,
813 )
818 )
814 coreconfigitem(
819 coreconfigitem(
815 b'experimental',
820 b'experimental',
816 b'archivemetatemplate',
821 b'archivemetatemplate',
817 default=dynamicdefault,
822 default=dynamicdefault,
818 )
823 )
819 coreconfigitem(
824 coreconfigitem(
820 b'experimental',
825 b'experimental',
821 b'auto-publish',
826 b'auto-publish',
822 default=b'publish',
827 default=b'publish',
823 )
828 )
824 coreconfigitem(
829 coreconfigitem(
825 b'experimental',
830 b'experimental',
826 b'bundle-phases',
831 b'bundle-phases',
827 default=False,
832 default=False,
828 )
833 )
829 coreconfigitem(
834 coreconfigitem(
830 b'experimental',
835 b'experimental',
831 b'bundle2-advertise',
836 b'bundle2-advertise',
832 default=True,
837 default=True,
833 )
838 )
834 coreconfigitem(
839 coreconfigitem(
835 b'experimental',
840 b'experimental',
836 b'bundle2-output-capture',
841 b'bundle2-output-capture',
837 default=False,
842 default=False,
838 )
843 )
839 coreconfigitem(
844 coreconfigitem(
840 b'experimental',
845 b'experimental',
841 b'bundle2.pushback',
846 b'bundle2.pushback',
842 default=False,
847 default=False,
843 )
848 )
844 coreconfigitem(
849 coreconfigitem(
845 b'experimental',
850 b'experimental',
846 b'bundle2lazylocking',
851 b'bundle2lazylocking',
847 default=False,
852 default=False,
848 )
853 )
849 coreconfigitem(
854 coreconfigitem(
850 b'experimental',
855 b'experimental',
851 b'bundlecomplevel',
856 b'bundlecomplevel',
852 default=None,
857 default=None,
853 )
858 )
854 coreconfigitem(
859 coreconfigitem(
855 b'experimental',
860 b'experimental',
856 b'bundlecomplevel.bzip2',
861 b'bundlecomplevel.bzip2',
857 default=None,
862 default=None,
858 )
863 )
859 coreconfigitem(
864 coreconfigitem(
860 b'experimental',
865 b'experimental',
861 b'bundlecomplevel.gzip',
866 b'bundlecomplevel.gzip',
862 default=None,
867 default=None,
863 )
868 )
864 coreconfigitem(
869 coreconfigitem(
865 b'experimental',
870 b'experimental',
866 b'bundlecomplevel.none',
871 b'bundlecomplevel.none',
867 default=None,
872 default=None,
868 )
873 )
869 coreconfigitem(
874 coreconfigitem(
870 b'experimental',
875 b'experimental',
871 b'bundlecomplevel.zstd',
876 b'bundlecomplevel.zstd',
872 default=None,
877 default=None,
873 )
878 )
874 coreconfigitem(
879 coreconfigitem(
875 b'experimental',
880 b'experimental',
876 b'bundlecompthreads',
881 b'bundlecompthreads',
877 default=None,
882 default=None,
878 )
883 )
879 coreconfigitem(
884 coreconfigitem(
880 b'experimental',
885 b'experimental',
881 b'bundlecompthreads.bzip2',
886 b'bundlecompthreads.bzip2',
882 default=None,
887 default=None,
883 )
888 )
884 coreconfigitem(
889 coreconfigitem(
885 b'experimental',
890 b'experimental',
886 b'bundlecompthreads.gzip',
891 b'bundlecompthreads.gzip',
887 default=None,
892 default=None,
888 )
893 )
889 coreconfigitem(
894 coreconfigitem(
890 b'experimental',
895 b'experimental',
891 b'bundlecompthreads.none',
896 b'bundlecompthreads.none',
892 default=None,
897 default=None,
893 )
898 )
894 coreconfigitem(
899 coreconfigitem(
895 b'experimental',
900 b'experimental',
896 b'bundlecompthreads.zstd',
901 b'bundlecompthreads.zstd',
897 default=None,
902 default=None,
898 )
903 )
899 coreconfigitem(
904 coreconfigitem(
900 b'experimental',
905 b'experimental',
901 b'changegroup3',
906 b'changegroup3',
902 default=False,
907 default=False,
903 )
908 )
904 coreconfigitem(
909 coreconfigitem(
905 b'experimental',
910 b'experimental',
906 b'changegroup4',
911 b'changegroup4',
907 default=False,
912 default=False,
908 )
913 )
909 coreconfigitem(
914 coreconfigitem(
910 b'experimental',
915 b'experimental',
911 b'cleanup-as-archived',
916 b'cleanup-as-archived',
912 default=False,
917 default=False,
913 )
918 )
914 coreconfigitem(
919 coreconfigitem(
915 b'experimental',
920 b'experimental',
916 b'clientcompressionengines',
921 b'clientcompressionengines',
917 default=list,
922 default=list,
918 )
923 )
919 coreconfigitem(
924 coreconfigitem(
920 b'experimental',
925 b'experimental',
921 b'copytrace',
926 b'copytrace',
922 default=b'on',
927 default=b'on',
923 )
928 )
924 coreconfigitem(
929 coreconfigitem(
925 b'experimental',
930 b'experimental',
926 b'copytrace.movecandidateslimit',
931 b'copytrace.movecandidateslimit',
927 default=100,
932 default=100,
928 )
933 )
929 coreconfigitem(
934 coreconfigitem(
930 b'experimental',
935 b'experimental',
931 b'copytrace.sourcecommitlimit',
936 b'copytrace.sourcecommitlimit',
932 default=100,
937 default=100,
933 )
938 )
934 coreconfigitem(
939 coreconfigitem(
935 b'experimental',
940 b'experimental',
936 b'copies.read-from',
941 b'copies.read-from',
937 default=b"filelog-only",
942 default=b"filelog-only",
938 )
943 )
939 coreconfigitem(
944 coreconfigitem(
940 b'experimental',
945 b'experimental',
941 b'copies.write-to',
946 b'copies.write-to',
942 default=b'filelog-only',
947 default=b'filelog-only',
943 )
948 )
944 coreconfigitem(
949 coreconfigitem(
945 b'experimental',
950 b'experimental',
946 b'crecordtest',
951 b'crecordtest',
947 default=None,
952 default=None,
948 )
953 )
949 coreconfigitem(
954 coreconfigitem(
950 b'experimental',
955 b'experimental',
951 b'directaccess',
956 b'directaccess',
952 default=False,
957 default=False,
953 )
958 )
954 coreconfigitem(
959 coreconfigitem(
955 b'experimental',
960 b'experimental',
956 b'directaccess.revnums',
961 b'directaccess.revnums',
957 default=False,
962 default=False,
958 )
963 )
959 coreconfigitem(
964 coreconfigitem(
960 b'experimental',
965 b'experimental',
961 b'editortmpinhg',
966 b'editortmpinhg',
962 default=False,
967 default=False,
963 )
968 )
964 coreconfigitem(
969 coreconfigitem(
965 b'experimental',
970 b'experimental',
966 b'evolution',
971 b'evolution',
967 default=list,
972 default=list,
968 )
973 )
969 coreconfigitem(
974 coreconfigitem(
970 b'experimental',
975 b'experimental',
971 b'evolution.allowdivergence',
976 b'evolution.allowdivergence',
972 default=False,
977 default=False,
973 alias=[(b'experimental', b'allowdivergence')],
978 alias=[(b'experimental', b'allowdivergence')],
974 )
979 )
975 coreconfigitem(
980 coreconfigitem(
976 b'experimental',
981 b'experimental',
977 b'evolution.allowunstable',
982 b'evolution.allowunstable',
978 default=None,
983 default=None,
979 )
984 )
980 coreconfigitem(
985 coreconfigitem(
981 b'experimental',
986 b'experimental',
982 b'evolution.createmarkers',
987 b'evolution.createmarkers',
983 default=None,
988 default=None,
984 )
989 )
985 coreconfigitem(
990 coreconfigitem(
986 b'experimental',
991 b'experimental',
987 b'evolution.effect-flags',
992 b'evolution.effect-flags',
988 default=True,
993 default=True,
989 alias=[(b'experimental', b'effect-flags')],
994 alias=[(b'experimental', b'effect-flags')],
990 )
995 )
991 coreconfigitem(
996 coreconfigitem(
992 b'experimental',
997 b'experimental',
993 b'evolution.exchange',
998 b'evolution.exchange',
994 default=None,
999 default=None,
995 )
1000 )
996 coreconfigitem(
1001 coreconfigitem(
997 b'experimental',
1002 b'experimental',
998 b'evolution.bundle-obsmarker',
1003 b'evolution.bundle-obsmarker',
999 default=False,
1004 default=False,
1000 )
1005 )
1001 coreconfigitem(
1006 coreconfigitem(
1002 b'experimental',
1007 b'experimental',
1003 b'evolution.bundle-obsmarker:mandatory',
1008 b'evolution.bundle-obsmarker:mandatory',
1004 default=True,
1009 default=True,
1005 )
1010 )
1006 coreconfigitem(
1011 coreconfigitem(
1007 b'experimental',
1012 b'experimental',
1008 b'log.topo',
1013 b'log.topo',
1009 default=False,
1014 default=False,
1010 )
1015 )
1011 coreconfigitem(
1016 coreconfigitem(
1012 b'experimental',
1017 b'experimental',
1013 b'evolution.report-instabilities',
1018 b'evolution.report-instabilities',
1014 default=True,
1019 default=True,
1015 )
1020 )
1016 coreconfigitem(
1021 coreconfigitem(
1017 b'experimental',
1022 b'experimental',
1018 b'evolution.track-operation',
1023 b'evolution.track-operation',
1019 default=True,
1024 default=True,
1020 )
1025 )
1021 # repo-level config to exclude a revset visibility
1026 # repo-level config to exclude a revset visibility
1022 #
1027 #
1023 # The target use case is to use `share` to expose different subset of the same
1028 # The target use case is to use `share` to expose different subset of the same
1024 # repository, especially server side. See also `server.view`.
1029 # repository, especially server side. See also `server.view`.
1025 coreconfigitem(
1030 coreconfigitem(
1026 b'experimental',
1031 b'experimental',
1027 b'extra-filter-revs',
1032 b'extra-filter-revs',
1028 default=None,
1033 default=None,
1029 )
1034 )
1030 coreconfigitem(
1035 coreconfigitem(
1031 b'experimental',
1036 b'experimental',
1032 b'maxdeltachainspan',
1037 b'maxdeltachainspan',
1033 default=-1,
1038 default=-1,
1034 )
1039 )
1035 # tracks files which were undeleted (merge might delete them but we explicitly
1040 # tracks files which were undeleted (merge might delete them but we explicitly
1036 # kept/undeleted them) and creates new filenodes for them
1041 # kept/undeleted them) and creates new filenodes for them
1037 coreconfigitem(
1042 coreconfigitem(
1038 b'experimental',
1043 b'experimental',
1039 b'merge-track-salvaged',
1044 b'merge-track-salvaged',
1040 default=False,
1045 default=False,
1041 )
1046 )
1042 coreconfigitem(
1047 coreconfigitem(
1043 b'experimental',
1048 b'experimental',
1044 b'mmapindexthreshold',
1049 b'mmapindexthreshold',
1045 default=None,
1050 default=None,
1046 )
1051 )
1047 coreconfigitem(
1052 coreconfigitem(
1048 b'experimental',
1053 b'experimental',
1049 b'narrow',
1054 b'narrow',
1050 default=False,
1055 default=False,
1051 )
1056 )
1052 coreconfigitem(
1057 coreconfigitem(
1053 b'experimental',
1058 b'experimental',
1054 b'nonnormalparanoidcheck',
1059 b'nonnormalparanoidcheck',
1055 default=False,
1060 default=False,
1056 )
1061 )
1057 coreconfigitem(
1062 coreconfigitem(
1058 b'experimental',
1063 b'experimental',
1059 b'exportableenviron',
1064 b'exportableenviron',
1060 default=list,
1065 default=list,
1061 )
1066 )
1062 coreconfigitem(
1067 coreconfigitem(
1063 b'experimental',
1068 b'experimental',
1064 b'extendedheader.index',
1069 b'extendedheader.index',
1065 default=None,
1070 default=None,
1066 )
1071 )
1067 coreconfigitem(
1072 coreconfigitem(
1068 b'experimental',
1073 b'experimental',
1069 b'extendedheader.similarity',
1074 b'extendedheader.similarity',
1070 default=False,
1075 default=False,
1071 )
1076 )
1072 coreconfigitem(
1077 coreconfigitem(
1073 b'experimental',
1078 b'experimental',
1074 b'graphshorten',
1079 b'graphshorten',
1075 default=False,
1080 default=False,
1076 )
1081 )
1077 coreconfigitem(
1082 coreconfigitem(
1078 b'experimental',
1083 b'experimental',
1079 b'graphstyle.parent',
1084 b'graphstyle.parent',
1080 default=dynamicdefault,
1085 default=dynamicdefault,
1081 )
1086 )
1082 coreconfigitem(
1087 coreconfigitem(
1083 b'experimental',
1088 b'experimental',
1084 b'graphstyle.missing',
1089 b'graphstyle.missing',
1085 default=dynamicdefault,
1090 default=dynamicdefault,
1086 )
1091 )
1087 coreconfigitem(
1092 coreconfigitem(
1088 b'experimental',
1093 b'experimental',
1089 b'graphstyle.grandparent',
1094 b'graphstyle.grandparent',
1090 default=dynamicdefault,
1095 default=dynamicdefault,
1091 )
1096 )
1092 coreconfigitem(
1097 coreconfigitem(
1093 b'experimental',
1098 b'experimental',
1094 b'hook-track-tags',
1099 b'hook-track-tags',
1095 default=False,
1100 default=False,
1096 )
1101 )
1097 coreconfigitem(
1102 coreconfigitem(
1098 b'experimental',
1103 b'experimental',
1099 b'httppostargs',
1104 b'httppostargs',
1100 default=False,
1105 default=False,
1101 )
1106 )
1102 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1107 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1103 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1108 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1104
1109
1105 coreconfigitem(
1110 coreconfigitem(
1106 b'experimental',
1111 b'experimental',
1107 b'obsmarkers-exchange-debug',
1112 b'obsmarkers-exchange-debug',
1108 default=False,
1113 default=False,
1109 )
1114 )
1110 coreconfigitem(
1115 coreconfigitem(
1111 b'experimental',
1116 b'experimental',
1112 b'remotenames',
1117 b'remotenames',
1113 default=False,
1118 default=False,
1114 )
1119 )
1115 coreconfigitem(
1120 coreconfigitem(
1116 b'experimental',
1121 b'experimental',
1117 b'removeemptydirs',
1122 b'removeemptydirs',
1118 default=True,
1123 default=True,
1119 )
1124 )
1120 coreconfigitem(
1125 coreconfigitem(
1121 b'experimental',
1126 b'experimental',
1122 b'revert.interactive.select-to-keep',
1127 b'revert.interactive.select-to-keep',
1123 default=False,
1128 default=False,
1124 )
1129 )
1125 coreconfigitem(
1130 coreconfigitem(
1126 b'experimental',
1131 b'experimental',
1127 b'revisions.prefixhexnode',
1132 b'revisions.prefixhexnode',
1128 default=False,
1133 default=False,
1129 )
1134 )
1130 # "out of experimental" todo list.
1135 # "out of experimental" todo list.
1131 #
1136 #
1132 # * include management of a persistent nodemap in the main docket
1137 # * include management of a persistent nodemap in the main docket
1133 # * enforce a "no-truncate" policy for mmap safety
1138 # * enforce a "no-truncate" policy for mmap safety
1134 # - for censoring operation
1139 # - for censoring operation
1135 # - for stripping operation
1140 # - for stripping operation
1136 # - for rollback operation
1141 # - for rollback operation
1137 # * proper streaming (race free) of the docket file
1142 # * proper streaming (race free) of the docket file
1138 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1143 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1139 # * Exchange-wise, we will also need to do something more efficient than
1144 # * Exchange-wise, we will also need to do something more efficient than
1140 # keeping references to the affected revlogs, especially memory-wise when
1145 # keeping references to the affected revlogs, especially memory-wise when
1141 # rewriting sidedata.
1146 # rewriting sidedata.
1142 # * introduce a proper solution to reduce the number of filelog related files.
1147 # * introduce a proper solution to reduce the number of filelog related files.
1143 # * use caching for reading sidedata (similar to what we do for data).
1148 # * use caching for reading sidedata (similar to what we do for data).
1144 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1149 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1145 # * Improvement to consider
1150 # * Improvement to consider
1146 # - avoid compression header in chunk using the default compression?
1151 # - avoid compression header in chunk using the default compression?
1147 # - forbid "inline" compression mode entirely?
1152 # - forbid "inline" compression mode entirely?
1148 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1153 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1149 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1154 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1150 # - keep track of chain base or size (probably not that useful anymore)
1155 # - keep track of chain base or size (probably not that useful anymore)
1151 coreconfigitem(
1156 coreconfigitem(
1152 b'experimental',
1157 b'experimental',
1153 b'revlogv2',
1158 b'revlogv2',
1154 default=None,
1159 default=None,
1155 )
1160 )
1156 coreconfigitem(
1161 coreconfigitem(
1157 b'experimental',
1162 b'experimental',
1158 b'revisions.disambiguatewithin',
1163 b'revisions.disambiguatewithin',
1159 default=None,
1164 default=None,
1160 )
1165 )
1161 coreconfigitem(
1166 coreconfigitem(
1162 b'experimental',
1167 b'experimental',
1163 b'rust.index',
1168 b'rust.index',
1164 default=False,
1169 default=False,
1165 )
1170 )
1166 coreconfigitem(
1171 coreconfigitem(
1167 b'experimental',
1172 b'experimental',
1168 b'server.filesdata.recommended-batch-size',
1173 b'server.filesdata.recommended-batch-size',
1169 default=50000,
1174 default=50000,
1170 )
1175 )
1171 coreconfigitem(
1176 coreconfigitem(
1172 b'experimental',
1177 b'experimental',
1173 b'server.manifestdata.recommended-batch-size',
1178 b'server.manifestdata.recommended-batch-size',
1174 default=100000,
1179 default=100000,
1175 )
1180 )
1176 coreconfigitem(
1181 coreconfigitem(
1177 b'experimental',
1182 b'experimental',
1178 b'server.stream-narrow-clones',
1183 b'server.stream-narrow-clones',
1179 default=False,
1184 default=False,
1180 )
1185 )
1181 coreconfigitem(
1186 coreconfigitem(
1182 b'experimental',
1187 b'experimental',
1183 b'single-head-per-branch',
1188 b'single-head-per-branch',
1184 default=False,
1189 default=False,
1185 )
1190 )
1186 coreconfigitem(
1191 coreconfigitem(
1187 b'experimental',
1192 b'experimental',
1188 b'single-head-per-branch:account-closed-heads',
1193 b'single-head-per-branch:account-closed-heads',
1189 default=False,
1194 default=False,
1190 )
1195 )
1191 coreconfigitem(
1196 coreconfigitem(
1192 b'experimental',
1197 b'experimental',
1193 b'single-head-per-branch:public-changes-only',
1198 b'single-head-per-branch:public-changes-only',
1194 default=False,
1199 default=False,
1195 )
1200 )
1196 coreconfigitem(
1201 coreconfigitem(
1197 b'experimental',
1202 b'experimental',
1198 b'sparse-read',
1203 b'sparse-read',
1199 default=False,
1204 default=False,
1200 )
1205 )
1201 coreconfigitem(
1206 coreconfigitem(
1202 b'experimental',
1207 b'experimental',
1203 b'sparse-read.density-threshold',
1208 b'sparse-read.density-threshold',
1204 default=0.50,
1209 default=0.50,
1205 )
1210 )
1206 coreconfigitem(
1211 coreconfigitem(
1207 b'experimental',
1212 b'experimental',
1208 b'sparse-read.min-gap-size',
1213 b'sparse-read.min-gap-size',
1209 default=b'65K',
1214 default=b'65K',
1210 )
1215 )
1211 coreconfigitem(
1216 coreconfigitem(
1212 b'experimental',
1217 b'experimental',
1213 b'treemanifest',
1218 b'treemanifest',
1214 default=False,
1219 default=False,
1215 )
1220 )
1216 coreconfigitem(
1221 coreconfigitem(
1217 b'experimental',
1222 b'experimental',
1218 b'update.atomic-file',
1223 b'update.atomic-file',
1219 default=False,
1224 default=False,
1220 )
1225 )
1221 coreconfigitem(
1226 coreconfigitem(
1222 b'experimental',
1227 b'experimental',
1223 b'web.full-garbage-collection-rate',
1228 b'web.full-garbage-collection-rate',
1224 default=1, # still forcing a full collection on each request
1229 default=1, # still forcing a full collection on each request
1225 )
1230 )
1226 coreconfigitem(
1231 coreconfigitem(
1227 b'experimental',
1232 b'experimental',
1228 b'worker.wdir-get-thread-safe',
1233 b'worker.wdir-get-thread-safe',
1229 default=False,
1234 default=False,
1230 )
1235 )
1231 coreconfigitem(
1236 coreconfigitem(
1232 b'experimental',
1237 b'experimental',
1233 b'worker.repository-upgrade',
1238 b'worker.repository-upgrade',
1234 default=False,
1239 default=False,
1235 )
1240 )
1236 coreconfigitem(
1241 coreconfigitem(
1237 b'experimental',
1242 b'experimental',
1238 b'xdiff',
1243 b'xdiff',
1239 default=False,
1244 default=False,
1240 )
1245 )
1241 coreconfigitem(
1246 coreconfigitem(
1242 b'extensions',
1247 b'extensions',
1243 b'[^:]*',
1248 b'[^:]*',
1244 default=None,
1249 default=None,
1245 generic=True,
1250 generic=True,
1246 )
1251 )
1247 coreconfigitem(
1252 coreconfigitem(
1248 b'extensions',
1253 b'extensions',
1249 b'[^:]*:required',
1254 b'[^:]*:required',
1250 default=False,
1255 default=False,
1251 generic=True,
1256 generic=True,
1252 )
1257 )
1253 coreconfigitem(
1258 coreconfigitem(
1254 b'extdata',
1259 b'extdata',
1255 b'.*',
1260 b'.*',
1256 default=None,
1261 default=None,
1257 generic=True,
1262 generic=True,
1258 )
1263 )
1259 coreconfigitem(
1264 coreconfigitem(
1260 b'format',
1265 b'format',
1261 b'bookmarks-in-store',
1266 b'bookmarks-in-store',
1262 default=False,
1267 default=False,
1263 )
1268 )
1264 coreconfigitem(
1269 coreconfigitem(
1265 b'format',
1270 b'format',
1266 b'chunkcachesize',
1271 b'chunkcachesize',
1267 default=None,
1272 default=None,
1268 experimental=True,
1273 experimental=True,
1269 )
1274 )
1270 coreconfigitem(
1275 coreconfigitem(
1271 # Enable this dirstate format *when creating a new repository*.
1276 # Enable this dirstate format *when creating a new repository*.
1272 # Which format to use for existing repos is controlled by .hg/requires
1277 # Which format to use for existing repos is controlled by .hg/requires
1273 b'format',
1278 b'format',
1274 b'use-dirstate-v2',
1279 b'use-dirstate-v2',
1275 default=False,
1280 default=False,
1276 experimental=True,
1281 experimental=True,
1277 alias=[(b'format', b'exp-rc-dirstate-v2')],
1282 alias=[(b'format', b'exp-rc-dirstate-v2')],
1278 )
1283 )
1279 coreconfigitem(
1284 coreconfigitem(
1280 b'format',
1285 b'format',
1281 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1286 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1282 default=False,
1287 default=False,
1283 experimental=True,
1288 experimental=True,
1284 )
1289 )
1285 coreconfigitem(
1290 coreconfigitem(
1286 b'format',
1291 b'format',
1287 b'use-dirstate-tracked-hint',
1292 b'use-dirstate-tracked-hint',
1288 default=False,
1293 default=False,
1289 experimental=True,
1294 experimental=True,
1290 )
1295 )
1291 coreconfigitem(
1296 coreconfigitem(
1292 b'format',
1297 b'format',
1293 b'use-dirstate-tracked-hint.version',
1298 b'use-dirstate-tracked-hint.version',
1294 default=1,
1299 default=1,
1295 experimental=True,
1300 experimental=True,
1296 )
1301 )
1297 coreconfigitem(
1302 coreconfigitem(
1298 b'format',
1303 b'format',
1299 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1304 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1300 default=False,
1305 default=False,
1301 experimental=True,
1306 experimental=True,
1302 )
1307 )
1303 coreconfigitem(
1308 coreconfigitem(
1304 b'format',
1309 b'format',
1305 b'dotencode',
1310 b'dotencode',
1306 default=True,
1311 default=True,
1307 )
1312 )
1308 coreconfigitem(
1313 coreconfigitem(
1309 b'format',
1314 b'format',
1310 b'generaldelta',
1315 b'generaldelta',
1311 default=False,
1316 default=False,
1312 experimental=True,
1317 experimental=True,
1313 )
1318 )
1314 coreconfigitem(
1319 coreconfigitem(
1315 b'format',
1320 b'format',
1316 b'manifestcachesize',
1321 b'manifestcachesize',
1317 default=None,
1322 default=None,
1318 experimental=True,
1323 experimental=True,
1319 )
1324 )
1320 coreconfigitem(
1325 coreconfigitem(
1321 b'format',
1326 b'format',
1322 b'maxchainlen',
1327 b'maxchainlen',
1323 default=dynamicdefault,
1328 default=dynamicdefault,
1324 experimental=True,
1329 experimental=True,
1325 )
1330 )
1326 coreconfigitem(
1331 coreconfigitem(
1327 b'format',
1332 b'format',
1328 b'obsstore-version',
1333 b'obsstore-version',
1329 default=None,
1334 default=None,
1330 )
1335 )
1331 coreconfigitem(
1336 coreconfigitem(
1332 b'format',
1337 b'format',
1333 b'sparse-revlog',
1338 b'sparse-revlog',
1334 default=True,
1339 default=True,
1335 )
1340 )
1336 coreconfigitem(
1341 coreconfigitem(
1337 b'format',
1342 b'format',
1338 b'revlog-compression',
1343 b'revlog-compression',
1339 default=lambda: [b'zstd', b'zlib'],
1344 default=lambda: [b'zstd', b'zlib'],
1340 alias=[(b'experimental', b'format.compression')],
1345 alias=[(b'experimental', b'format.compression')],
1341 )
1346 )
1342 # Experimental TODOs:
1347 # Experimental TODOs:
1343 #
1348 #
1344 # * Same as for revlogv2 (but for the reduction of the number of files)
1349 # * Same as for revlogv2 (but for the reduction of the number of files)
1345 # * Actually computing the rank of changesets
1350 # * Actually computing the rank of changesets
1346 # * Improvement to investigate
1351 # * Improvement to investigate
1347 # - storing .hgtags fnode
1352 # - storing .hgtags fnode
1348 # - storing branch related identifier
1353 # - storing branch related identifier
1349
1354
1350 coreconfigitem(
1355 coreconfigitem(
1351 b'format',
1356 b'format',
1352 b'exp-use-changelog-v2',
1357 b'exp-use-changelog-v2',
1353 default=None,
1358 default=None,
1354 experimental=True,
1359 experimental=True,
1355 )
1360 )
1356 coreconfigitem(
1361 coreconfigitem(
1357 b'format',
1362 b'format',
1358 b'usefncache',
1363 b'usefncache',
1359 default=True,
1364 default=True,
1360 )
1365 )
1361 coreconfigitem(
1366 coreconfigitem(
1362 b'format',
1367 b'format',
1363 b'usegeneraldelta',
1368 b'usegeneraldelta',
1364 default=True,
1369 default=True,
1365 )
1370 )
1366 coreconfigitem(
1371 coreconfigitem(
1367 b'format',
1372 b'format',
1368 b'usestore',
1373 b'usestore',
1369 default=True,
1374 default=True,
1370 )
1375 )
1371
1376
1372
1377
1373 def _persistent_nodemap_default():
1378 def _persistent_nodemap_default():
1374 """compute `use-persistent-nodemap` default value
1379 """compute `use-persistent-nodemap` default value
1375
1380
1376 The feature is disabled unless a fast implementation is available.
1381 The feature is disabled unless a fast implementation is available.
1377 """
1382 """
1378 from . import policy
1383 from . import policy
1379
1384
1380 return policy.importrust('revlog') is not None
1385 return policy.importrust('revlog') is not None
1381
1386
1382
1387
1383 coreconfigitem(
1388 coreconfigitem(
1384 b'format',
1389 b'format',
1385 b'use-persistent-nodemap',
1390 b'use-persistent-nodemap',
1386 default=_persistent_nodemap_default,
1391 default=_persistent_nodemap_default,
1387 )
1392 )
1388 coreconfigitem(
1393 coreconfigitem(
1389 b'format',
1394 b'format',
1390 b'exp-use-copies-side-data-changeset',
1395 b'exp-use-copies-side-data-changeset',
1391 default=False,
1396 default=False,
1392 experimental=True,
1397 experimental=True,
1393 )
1398 )
1394 coreconfigitem(
1399 coreconfigitem(
1395 b'format',
1400 b'format',
1396 b'use-share-safe',
1401 b'use-share-safe',
1397 default=True,
1402 default=True,
1398 )
1403 )
1399 coreconfigitem(
1404 coreconfigitem(
1400 b'format',
1405 b'format',
1401 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1406 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1402 default=False,
1407 default=False,
1403 experimental=True,
1408 experimental=True,
1404 )
1409 )
1405 coreconfigitem(
1410 coreconfigitem(
1406 b'format',
1411 b'format',
1407 b'internal-phase',
1412 b'internal-phase',
1408 default=False,
1413 default=False,
1409 experimental=True,
1414 experimental=True,
1410 )
1415 )
1411 coreconfigitem(
1416 coreconfigitem(
1412 b'fsmonitor',
1417 b'fsmonitor',
1413 b'warn_when_unused',
1418 b'warn_when_unused',
1414 default=True,
1419 default=True,
1415 )
1420 )
1416 coreconfigitem(
1421 coreconfigitem(
1417 b'fsmonitor',
1422 b'fsmonitor',
1418 b'warn_update_file_count',
1423 b'warn_update_file_count',
1419 default=50000,
1424 default=50000,
1420 )
1425 )
1421 coreconfigitem(
1426 coreconfigitem(
1422 b'fsmonitor',
1427 b'fsmonitor',
1423 b'warn_update_file_count_rust',
1428 b'warn_update_file_count_rust',
1424 default=400000,
1429 default=400000,
1425 )
1430 )
1426 coreconfigitem(
1431 coreconfigitem(
1427 b'help',
1432 b'help',
1428 br'hidden-command\..*',
1433 br'hidden-command\..*',
1429 default=False,
1434 default=False,
1430 generic=True,
1435 generic=True,
1431 )
1436 )
1432 coreconfigitem(
1437 coreconfigitem(
1433 b'help',
1438 b'help',
1434 br'hidden-topic\..*',
1439 br'hidden-topic\..*',
1435 default=False,
1440 default=False,
1436 generic=True,
1441 generic=True,
1437 )
1442 )
1438 coreconfigitem(
1443 coreconfigitem(
1439 b'hooks',
1444 b'hooks',
1440 b'[^:]*',
1445 b'[^:]*',
1441 default=dynamicdefault,
1446 default=dynamicdefault,
1442 generic=True,
1447 generic=True,
1443 )
1448 )
1444 coreconfigitem(
1449 coreconfigitem(
1445 b'hooks',
1450 b'hooks',
1446 b'.*:run-with-plain',
1451 b'.*:run-with-plain',
1447 default=True,
1452 default=True,
1448 generic=True,
1453 generic=True,
1449 )
1454 )
1450 coreconfigitem(
1455 coreconfigitem(
1451 b'hgweb-paths',
1456 b'hgweb-paths',
1452 b'.*',
1457 b'.*',
1453 default=list,
1458 default=list,
1454 generic=True,
1459 generic=True,
1455 )
1460 )
1456 coreconfigitem(
1461 coreconfigitem(
1457 b'hostfingerprints',
1462 b'hostfingerprints',
1458 b'.*',
1463 b'.*',
1459 default=list,
1464 default=list,
1460 generic=True,
1465 generic=True,
1461 )
1466 )
1462 coreconfigitem(
1467 coreconfigitem(
1463 b'hostsecurity',
1468 b'hostsecurity',
1464 b'ciphers',
1469 b'ciphers',
1465 default=None,
1470 default=None,
1466 )
1471 )
1467 coreconfigitem(
1472 coreconfigitem(
1468 b'hostsecurity',
1473 b'hostsecurity',
1469 b'minimumprotocol',
1474 b'minimumprotocol',
1470 default=dynamicdefault,
1475 default=dynamicdefault,
1471 )
1476 )
1472 coreconfigitem(
1477 coreconfigitem(
1473 b'hostsecurity',
1478 b'hostsecurity',
1474 b'.*:minimumprotocol$',
1479 b'.*:minimumprotocol$',
1475 default=dynamicdefault,
1480 default=dynamicdefault,
1476 generic=True,
1481 generic=True,
1477 )
1482 )
1478 coreconfigitem(
1483 coreconfigitem(
1479 b'hostsecurity',
1484 b'hostsecurity',
1480 b'.*:ciphers$',
1485 b'.*:ciphers$',
1481 default=dynamicdefault,
1486 default=dynamicdefault,
1482 generic=True,
1487 generic=True,
1483 )
1488 )
1484 coreconfigitem(
1489 coreconfigitem(
1485 b'hostsecurity',
1490 b'hostsecurity',
1486 b'.*:fingerprints$',
1491 b'.*:fingerprints$',
1487 default=list,
1492 default=list,
1488 generic=True,
1493 generic=True,
1489 )
1494 )
1490 coreconfigitem(
1495 coreconfigitem(
1491 b'hostsecurity',
1496 b'hostsecurity',
1492 b'.*:verifycertsfile$',
1497 b'.*:verifycertsfile$',
1493 default=None,
1498 default=None,
1494 generic=True,
1499 generic=True,
1495 )
1500 )
1496
1501
1497 coreconfigitem(
1502 coreconfigitem(
1498 b'http_proxy',
1503 b'http_proxy',
1499 b'always',
1504 b'always',
1500 default=False,
1505 default=False,
1501 )
1506 )
1502 coreconfigitem(
1507 coreconfigitem(
1503 b'http_proxy',
1508 b'http_proxy',
1504 b'host',
1509 b'host',
1505 default=None,
1510 default=None,
1506 )
1511 )
1507 coreconfigitem(
1512 coreconfigitem(
1508 b'http_proxy',
1513 b'http_proxy',
1509 b'no',
1514 b'no',
1510 default=list,
1515 default=list,
1511 )
1516 )
1512 coreconfigitem(
1517 coreconfigitem(
1513 b'http_proxy',
1518 b'http_proxy',
1514 b'passwd',
1519 b'passwd',
1515 default=None,
1520 default=None,
1516 )
1521 )
1517 coreconfigitem(
1522 coreconfigitem(
1518 b'http_proxy',
1523 b'http_proxy',
1519 b'user',
1524 b'user',
1520 default=None,
1525 default=None,
1521 )
1526 )
1522
1527
1523 coreconfigitem(
1528 coreconfigitem(
1524 b'http',
1529 b'http',
1525 b'timeout',
1530 b'timeout',
1526 default=None,
1531 default=None,
1527 )
1532 )
1528
1533
1529 coreconfigitem(
1534 coreconfigitem(
1530 b'logtoprocess',
1535 b'logtoprocess',
1531 b'commandexception',
1536 b'commandexception',
1532 default=None,
1537 default=None,
1533 )
1538 )
1534 coreconfigitem(
1539 coreconfigitem(
1535 b'logtoprocess',
1540 b'logtoprocess',
1536 b'commandfinish',
1541 b'commandfinish',
1537 default=None,
1542 default=None,
1538 )
1543 )
1539 coreconfigitem(
1544 coreconfigitem(
1540 b'logtoprocess',
1545 b'logtoprocess',
1541 b'command',
1546 b'command',
1542 default=None,
1547 default=None,
1543 )
1548 )
1544 coreconfigitem(
1549 coreconfigitem(
1545 b'logtoprocess',
1550 b'logtoprocess',
1546 b'develwarn',
1551 b'develwarn',
1547 default=None,
1552 default=None,
1548 )
1553 )
1549 coreconfigitem(
1554 coreconfigitem(
1550 b'logtoprocess',
1555 b'logtoprocess',
1551 b'uiblocked',
1556 b'uiblocked',
1552 default=None,
1557 default=None,
1553 )
1558 )
1554 coreconfigitem(
1559 coreconfigitem(
1555 b'merge',
1560 b'merge',
1556 b'checkunknown',
1561 b'checkunknown',
1557 default=b'abort',
1562 default=b'abort',
1558 )
1563 )
1559 coreconfigitem(
1564 coreconfigitem(
1560 b'merge',
1565 b'merge',
1561 b'checkignored',
1566 b'checkignored',
1562 default=b'abort',
1567 default=b'abort',
1563 )
1568 )
1564 coreconfigitem(
1569 coreconfigitem(
1565 b'experimental',
1570 b'experimental',
1566 b'merge.checkpathconflicts',
1571 b'merge.checkpathconflicts',
1567 default=False,
1572 default=False,
1568 )
1573 )
1569 coreconfigitem(
1574 coreconfigitem(
1570 b'merge',
1575 b'merge',
1571 b'followcopies',
1576 b'followcopies',
1572 default=True,
1577 default=True,
1573 )
1578 )
1574 coreconfigitem(
1579 coreconfigitem(
1575 b'merge',
1580 b'merge',
1576 b'on-failure',
1581 b'on-failure',
1577 default=b'continue',
1582 default=b'continue',
1578 )
1583 )
1579 coreconfigitem(
1584 coreconfigitem(
1580 b'merge',
1585 b'merge',
1581 b'preferancestor',
1586 b'preferancestor',
1582 default=lambda: [b'*'],
1587 default=lambda: [b'*'],
1583 experimental=True,
1588 experimental=True,
1584 )
1589 )
1585 coreconfigitem(
1590 coreconfigitem(
1586 b'merge',
1591 b'merge',
1587 b'strict-capability-check',
1592 b'strict-capability-check',
1588 default=False,
1593 default=False,
1589 )
1594 )
1590 coreconfigitem(
1595 coreconfigitem(
1591 b'merge',
1596 b'merge',
1592 b'disable-partial-tools',
1597 b'disable-partial-tools',
1593 default=False,
1598 default=False,
1594 experimental=True,
1599 experimental=True,
1595 )
1600 )
1596 coreconfigitem(
1601 coreconfigitem(
1597 b'partial-merge-tools',
1602 b'partial-merge-tools',
1598 b'.*',
1603 b'.*',
1599 default=None,
1604 default=None,
1600 generic=True,
1605 generic=True,
1601 experimental=True,
1606 experimental=True,
1602 )
1607 )
1603 coreconfigitem(
1608 coreconfigitem(
1604 b'partial-merge-tools',
1609 b'partial-merge-tools',
1605 br'.*\.patterns',
1610 br'.*\.patterns',
1606 default=dynamicdefault,
1611 default=dynamicdefault,
1607 generic=True,
1612 generic=True,
1608 priority=-1,
1613 priority=-1,
1609 experimental=True,
1614 experimental=True,
1610 )
1615 )
1611 coreconfigitem(
1616 coreconfigitem(
1612 b'partial-merge-tools',
1617 b'partial-merge-tools',
1613 br'.*\.executable$',
1618 br'.*\.executable$',
1614 default=dynamicdefault,
1619 default=dynamicdefault,
1615 generic=True,
1620 generic=True,
1616 priority=-1,
1621 priority=-1,
1617 experimental=True,
1622 experimental=True,
1618 )
1623 )
1619 coreconfigitem(
1624 coreconfigitem(
1620 b'partial-merge-tools',
1625 b'partial-merge-tools',
1621 br'.*\.order',
1626 br'.*\.order',
1622 default=0,
1627 default=0,
1623 generic=True,
1628 generic=True,
1624 priority=-1,
1629 priority=-1,
1625 experimental=True,
1630 experimental=True,
1626 )
1631 )
1627 coreconfigitem(
1632 coreconfigitem(
1628 b'partial-merge-tools',
1633 b'partial-merge-tools',
1629 br'.*\.args',
1634 br'.*\.args',
1630 default=b"$local $base $other",
1635 default=b"$local $base $other",
1631 generic=True,
1636 generic=True,
1632 priority=-1,
1637 priority=-1,
1633 experimental=True,
1638 experimental=True,
1634 )
1639 )
1635 coreconfigitem(
1640 coreconfigitem(
1636 b'partial-merge-tools',
1641 b'partial-merge-tools',
1637 br'.*\.disable',
1642 br'.*\.disable',
1638 default=False,
1643 default=False,
1639 generic=True,
1644 generic=True,
1640 priority=-1,
1645 priority=-1,
1641 experimental=True,
1646 experimental=True,
1642 )
1647 )
1643 coreconfigitem(
1648 coreconfigitem(
1644 b'merge-tools',
1649 b'merge-tools',
1645 b'.*',
1650 b'.*',
1646 default=None,
1651 default=None,
1647 generic=True,
1652 generic=True,
1648 )
1653 )
1649 coreconfigitem(
1654 coreconfigitem(
1650 b'merge-tools',
1655 b'merge-tools',
1651 br'.*\.args$',
1656 br'.*\.args$',
1652 default=b"$local $base $other",
1657 default=b"$local $base $other",
1653 generic=True,
1658 generic=True,
1654 priority=-1,
1659 priority=-1,
1655 )
1660 )
1656 coreconfigitem(
1661 coreconfigitem(
1657 b'merge-tools',
1662 b'merge-tools',
1658 br'.*\.binary$',
1663 br'.*\.binary$',
1659 default=False,
1664 default=False,
1660 generic=True,
1665 generic=True,
1661 priority=-1,
1666 priority=-1,
1662 )
1667 )
1663 coreconfigitem(
1668 coreconfigitem(
1664 b'merge-tools',
1669 b'merge-tools',
1665 br'.*\.check$',
1670 br'.*\.check$',
1666 default=list,
1671 default=list,
1667 generic=True,
1672 generic=True,
1668 priority=-1,
1673 priority=-1,
1669 )
1674 )
1670 coreconfigitem(
1675 coreconfigitem(
1671 b'merge-tools',
1676 b'merge-tools',
1672 br'.*\.checkchanged$',
1677 br'.*\.checkchanged$',
1673 default=False,
1678 default=False,
1674 generic=True,
1679 generic=True,
1675 priority=-1,
1680 priority=-1,
1676 )
1681 )
1677 coreconfigitem(
1682 coreconfigitem(
1678 b'merge-tools',
1683 b'merge-tools',
1679 br'.*\.executable$',
1684 br'.*\.executable$',
1680 default=dynamicdefault,
1685 default=dynamicdefault,
1681 generic=True,
1686 generic=True,
1682 priority=-1,
1687 priority=-1,
1683 )
1688 )
1684 coreconfigitem(
1689 coreconfigitem(
1685 b'merge-tools',
1690 b'merge-tools',
1686 br'.*\.fixeol$',
1691 br'.*\.fixeol$',
1687 default=False,
1692 default=False,
1688 generic=True,
1693 generic=True,
1689 priority=-1,
1694 priority=-1,
1690 )
1695 )
1691 coreconfigitem(
1696 coreconfigitem(
1692 b'merge-tools',
1697 b'merge-tools',
1693 br'.*\.gui$',
1698 br'.*\.gui$',
1694 default=False,
1699 default=False,
1695 generic=True,
1700 generic=True,
1696 priority=-1,
1701 priority=-1,
1697 )
1702 )
1698 coreconfigitem(
1703 coreconfigitem(
1699 b'merge-tools',
1704 b'merge-tools',
1700 br'.*\.mergemarkers$',
1705 br'.*\.mergemarkers$',
1701 default=b'basic',
1706 default=b'basic',
1702 generic=True,
1707 generic=True,
1703 priority=-1,
1708 priority=-1,
1704 )
1709 )
1705 coreconfigitem(
1710 coreconfigitem(
1706 b'merge-tools',
1711 b'merge-tools',
1707 br'.*\.mergemarkertemplate$',
1712 br'.*\.mergemarkertemplate$',
1708 default=dynamicdefault, # take from command-templates.mergemarker
1713 default=dynamicdefault, # take from command-templates.mergemarker
1709 generic=True,
1714 generic=True,
1710 priority=-1,
1715 priority=-1,
1711 )
1716 )
1712 coreconfigitem(
1717 coreconfigitem(
1713 b'merge-tools',
1718 b'merge-tools',
1714 br'.*\.priority$',
1719 br'.*\.priority$',
1715 default=0,
1720 default=0,
1716 generic=True,
1721 generic=True,
1717 priority=-1,
1722 priority=-1,
1718 )
1723 )
1719 coreconfigitem(
1724 coreconfigitem(
1720 b'merge-tools',
1725 b'merge-tools',
1721 br'.*\.premerge$',
1726 br'.*\.premerge$',
1722 default=dynamicdefault,
1727 default=dynamicdefault,
1723 generic=True,
1728 generic=True,
1724 priority=-1,
1729 priority=-1,
1725 )
1730 )
1726 coreconfigitem(
1731 coreconfigitem(
1727 b'merge-tools',
1732 b'merge-tools',
1728 br'.*\.symlink$',
1733 br'.*\.symlink$',
1729 default=False,
1734 default=False,
1730 generic=True,
1735 generic=True,
1731 priority=-1,
1736 priority=-1,
1732 )
1737 )
1733 coreconfigitem(
1738 coreconfigitem(
1734 b'pager',
1739 b'pager',
1735 b'attend-.*',
1740 b'attend-.*',
1736 default=dynamicdefault,
1741 default=dynamicdefault,
1737 generic=True,
1742 generic=True,
1738 )
1743 )
1739 coreconfigitem(
1744 coreconfigitem(
1740 b'pager',
1745 b'pager',
1741 b'ignore',
1746 b'ignore',
1742 default=list,
1747 default=list,
1743 )
1748 )
1744 coreconfigitem(
1749 coreconfigitem(
1745 b'pager',
1750 b'pager',
1746 b'pager',
1751 b'pager',
1747 default=dynamicdefault,
1752 default=dynamicdefault,
1748 )
1753 )
1749 coreconfigitem(
1754 coreconfigitem(
1750 b'patch',
1755 b'patch',
1751 b'eol',
1756 b'eol',
1752 default=b'strict',
1757 default=b'strict',
1753 )
1758 )
1754 coreconfigitem(
1759 coreconfigitem(
1755 b'patch',
1760 b'patch',
1756 b'fuzz',
1761 b'fuzz',
1757 default=2,
1762 default=2,
1758 )
1763 )
1759 coreconfigitem(
1764 coreconfigitem(
1760 b'paths',
1765 b'paths',
1761 b'default',
1766 b'default',
1762 default=None,
1767 default=None,
1763 )
1768 )
1764 coreconfigitem(
1769 coreconfigitem(
1765 b'paths',
1770 b'paths',
1766 b'default-push',
1771 b'default-push',
1767 default=None,
1772 default=None,
1768 )
1773 )
1769 coreconfigitem(
1774 coreconfigitem(
1770 b'paths',
1775 b'paths',
1771 b'.*',
1776 b'.*',
1772 default=None,
1777 default=None,
1773 generic=True,
1778 generic=True,
1774 )
1779 )
1775 coreconfigitem(
1780 coreconfigitem(
1776 b'paths',
1781 b'paths',
1777 b'.*:bookmarks.mode',
1782 b'.*:bookmarks.mode',
1778 default='default',
1783 default='default',
1779 generic=True,
1784 generic=True,
1780 )
1785 )
1781 coreconfigitem(
1786 coreconfigitem(
1782 b'paths',
1787 b'paths',
1783 b'.*:multi-urls',
1788 b'.*:multi-urls',
1784 default=False,
1789 default=False,
1785 generic=True,
1790 generic=True,
1786 )
1791 )
1787 coreconfigitem(
1792 coreconfigitem(
1788 b'paths',
1793 b'paths',
1789 b'.*:pushrev',
1794 b'.*:pushrev',
1790 default=None,
1795 default=None,
1791 generic=True,
1796 generic=True,
1792 )
1797 )
1793 coreconfigitem(
1798 coreconfigitem(
1794 b'paths',
1799 b'paths',
1795 b'.*:pushurl',
1800 b'.*:pushurl',
1796 default=None,
1801 default=None,
1797 generic=True,
1802 generic=True,
1798 )
1803 )
1799 coreconfigitem(
1804 coreconfigitem(
1800 b'phases',
1805 b'phases',
1801 b'checksubrepos',
1806 b'checksubrepos',
1802 default=b'follow',
1807 default=b'follow',
1803 )
1808 )
1804 coreconfigitem(
1809 coreconfigitem(
1805 b'phases',
1810 b'phases',
1806 b'new-commit',
1811 b'new-commit',
1807 default=b'draft',
1812 default=b'draft',
1808 )
1813 )
1809 coreconfigitem(
1814 coreconfigitem(
1810 b'phases',
1815 b'phases',
1811 b'publish',
1816 b'publish',
1812 default=True,
1817 default=True,
1813 )
1818 )
1814 coreconfigitem(
1819 coreconfigitem(
1815 b'profiling',
1820 b'profiling',
1816 b'enabled',
1821 b'enabled',
1817 default=False,
1822 default=False,
1818 )
1823 )
1819 coreconfigitem(
1824 coreconfigitem(
1820 b'profiling',
1825 b'profiling',
1821 b'format',
1826 b'format',
1822 default=b'text',
1827 default=b'text',
1823 )
1828 )
1824 coreconfigitem(
1829 coreconfigitem(
1825 b'profiling',
1830 b'profiling',
1826 b'freq',
1831 b'freq',
1827 default=1000,
1832 default=1000,
1828 )
1833 )
1829 coreconfigitem(
1834 coreconfigitem(
1830 b'profiling',
1835 b'profiling',
1831 b'limit',
1836 b'limit',
1832 default=30,
1837 default=30,
1833 )
1838 )
1834 coreconfigitem(
1839 coreconfigitem(
1835 b'profiling',
1840 b'profiling',
1836 b'nested',
1841 b'nested',
1837 default=0,
1842 default=0,
1838 )
1843 )
1839 coreconfigitem(
1844 coreconfigitem(
1840 b'profiling',
1845 b'profiling',
1841 b'output',
1846 b'output',
1842 default=None,
1847 default=None,
1843 )
1848 )
1844 coreconfigitem(
1849 coreconfigitem(
1845 b'profiling',
1850 b'profiling',
1846 b'showmax',
1851 b'showmax',
1847 default=0.999,
1852 default=0.999,
1848 )
1853 )
1849 coreconfigitem(
1854 coreconfigitem(
1850 b'profiling',
1855 b'profiling',
1851 b'showmin',
1856 b'showmin',
1852 default=dynamicdefault,
1857 default=dynamicdefault,
1853 )
1858 )
1854 coreconfigitem(
1859 coreconfigitem(
1855 b'profiling',
1860 b'profiling',
1856 b'showtime',
1861 b'showtime',
1857 default=True,
1862 default=True,
1858 )
1863 )
1859 coreconfigitem(
1864 coreconfigitem(
1860 b'profiling',
1865 b'profiling',
1861 b'sort',
1866 b'sort',
1862 default=b'inlinetime',
1867 default=b'inlinetime',
1863 )
1868 )
1864 coreconfigitem(
1869 coreconfigitem(
1865 b'profiling',
1870 b'profiling',
1866 b'statformat',
1871 b'statformat',
1867 default=b'hotpath',
1872 default=b'hotpath',
1868 )
1873 )
1869 coreconfigitem(
1874 coreconfigitem(
1870 b'profiling',
1875 b'profiling',
1871 b'time-track',
1876 b'time-track',
1872 default=dynamicdefault,
1877 default=dynamicdefault,
1873 )
1878 )
1874 coreconfigitem(
1879 coreconfigitem(
1875 b'profiling',
1880 b'profiling',
1876 b'type',
1881 b'type',
1877 default=b'stat',
1882 default=b'stat',
1878 )
1883 )
1879 coreconfigitem(
1884 coreconfigitem(
1880 b'progress',
1885 b'progress',
1881 b'assume-tty',
1886 b'assume-tty',
1882 default=False,
1887 default=False,
1883 )
1888 )
1884 coreconfigitem(
1889 coreconfigitem(
1885 b'progress',
1890 b'progress',
1886 b'changedelay',
1891 b'changedelay',
1887 default=1,
1892 default=1,
1888 )
1893 )
1889 coreconfigitem(
1894 coreconfigitem(
1890 b'progress',
1895 b'progress',
1891 b'clear-complete',
1896 b'clear-complete',
1892 default=True,
1897 default=True,
1893 )
1898 )
1894 coreconfigitem(
1899 coreconfigitem(
1895 b'progress',
1900 b'progress',
1896 b'debug',
1901 b'debug',
1897 default=False,
1902 default=False,
1898 )
1903 )
1899 coreconfigitem(
1904 coreconfigitem(
1900 b'progress',
1905 b'progress',
1901 b'delay',
1906 b'delay',
1902 default=3,
1907 default=3,
1903 )
1908 )
1904 coreconfigitem(
1909 coreconfigitem(
1905 b'progress',
1910 b'progress',
1906 b'disable',
1911 b'disable',
1907 default=False,
1912 default=False,
1908 )
1913 )
1909 coreconfigitem(
1914 coreconfigitem(
1910 b'progress',
1915 b'progress',
1911 b'estimateinterval',
1916 b'estimateinterval',
1912 default=60.0,
1917 default=60.0,
1913 )
1918 )
1914 coreconfigitem(
1919 coreconfigitem(
1915 b'progress',
1920 b'progress',
1916 b'format',
1921 b'format',
1917 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1922 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1918 )
1923 )
1919 coreconfigitem(
1924 coreconfigitem(
1920 b'progress',
1925 b'progress',
1921 b'refresh',
1926 b'refresh',
1922 default=0.1,
1927 default=0.1,
1923 )
1928 )
1924 coreconfigitem(
1929 coreconfigitem(
1925 b'progress',
1930 b'progress',
1926 b'width',
1931 b'width',
1927 default=dynamicdefault,
1932 default=dynamicdefault,
1928 )
1933 )
1929 coreconfigitem(
1934 coreconfigitem(
1930 b'pull',
1935 b'pull',
1931 b'confirm',
1936 b'confirm',
1932 default=False,
1937 default=False,
1933 )
1938 )
1934 coreconfigitem(
1939 coreconfigitem(
1935 b'push',
1940 b'push',
1936 b'pushvars.server',
1941 b'pushvars.server',
1937 default=False,
1942 default=False,
1938 )
1943 )
1939 coreconfigitem(
1944 coreconfigitem(
1940 b'rewrite',
1945 b'rewrite',
1941 b'backup-bundle',
1946 b'backup-bundle',
1942 default=True,
1947 default=True,
1943 alias=[(b'ui', b'history-editing-backup')],
1948 alias=[(b'ui', b'history-editing-backup')],
1944 )
1949 )
1945 coreconfigitem(
1950 coreconfigitem(
1946 b'rewrite',
1951 b'rewrite',
1947 b'update-timestamp',
1952 b'update-timestamp',
1948 default=False,
1953 default=False,
1949 )
1954 )
1950 coreconfigitem(
1955 coreconfigitem(
1951 b'rewrite',
1956 b'rewrite',
1952 b'empty-successor',
1957 b'empty-successor',
1953 default=b'skip',
1958 default=b'skip',
1954 experimental=True,
1959 experimental=True,
1955 )
1960 )
1956 # experimental as long as format.use-dirstate-v2 is.
1961 # experimental as long as format.use-dirstate-v2 is.
1957 coreconfigitem(
1962 coreconfigitem(
1958 b'storage',
1963 b'storage',
1959 b'dirstate-v2.slow-path',
1964 b'dirstate-v2.slow-path',
1960 default=b"abort",
1965 default=b"abort",
1961 experimental=True,
1966 experimental=True,
1962 )
1967 )
1963 coreconfigitem(
1968 coreconfigitem(
1964 b'storage',
1969 b'storage',
1965 b'new-repo-backend',
1970 b'new-repo-backend',
1966 default=b'revlogv1',
1971 default=b'revlogv1',
1967 experimental=True,
1972 experimental=True,
1968 )
1973 )
1969 coreconfigitem(
1974 coreconfigitem(
1970 b'storage',
1975 b'storage',
1971 b'revlog.optimize-delta-parent-choice',
1976 b'revlog.optimize-delta-parent-choice',
1972 default=True,
1977 default=True,
1973 alias=[(b'format', b'aggressivemergedeltas')],
1978 alias=[(b'format', b'aggressivemergedeltas')],
1974 )
1979 )
1975 coreconfigitem(
1980 coreconfigitem(
1976 b'storage',
1981 b'storage',
1977 b'revlog.issue6528.fix-incoming',
1982 b'revlog.issue6528.fix-incoming',
1978 default=True,
1983 default=True,
1979 )
1984 )
1980 # experimental as long as rust is experimental (or a C version is implemented)
1985 # experimental as long as rust is experimental (or a C version is implemented)
1981 coreconfigitem(
1986 coreconfigitem(
1982 b'storage',
1987 b'storage',
1983 b'revlog.persistent-nodemap.mmap',
1988 b'revlog.persistent-nodemap.mmap',
1984 default=True,
1989 default=True,
1985 )
1990 )
1986 # experimental as long as format.use-persistent-nodemap is.
1991 # experimental as long as format.use-persistent-nodemap is.
1987 coreconfigitem(
1992 coreconfigitem(
1988 b'storage',
1993 b'storage',
1989 b'revlog.persistent-nodemap.slow-path',
1994 b'revlog.persistent-nodemap.slow-path',
1990 default=b"abort",
1995 default=b"abort",
1991 )
1996 )
1992
1997
1993 coreconfigitem(
1998 coreconfigitem(
1994 b'storage',
1999 b'storage',
1995 b'revlog.reuse-external-delta',
2000 b'revlog.reuse-external-delta',
1996 default=True,
2001 default=True,
1997 )
2002 )
1998 coreconfigitem(
2003 coreconfigitem(
1999 b'storage',
2004 b'storage',
2000 b'revlog.reuse-external-delta-parent',
2005 b'revlog.reuse-external-delta-parent',
2001 default=None,
2006 default=None,
2002 )
2007 )
2003 coreconfigitem(
2008 coreconfigitem(
2004 b'storage',
2009 b'storage',
2005 b'revlog.zlib.level',
2010 b'revlog.zlib.level',
2006 default=None,
2011 default=None,
2007 )
2012 )
2008 coreconfigitem(
2013 coreconfigitem(
2009 b'storage',
2014 b'storage',
2010 b'revlog.zstd.level',
2015 b'revlog.zstd.level',
2011 default=None,
2016 default=None,
2012 )
2017 )
2013 coreconfigitem(
2018 coreconfigitem(
2014 b'server',
2019 b'server',
2015 b'bookmarks-pushkey-compat',
2020 b'bookmarks-pushkey-compat',
2016 default=True,
2021 default=True,
2017 )
2022 )
2018 coreconfigitem(
2023 coreconfigitem(
2019 b'server',
2024 b'server',
2020 b'bundle1',
2025 b'bundle1',
2021 default=True,
2026 default=True,
2022 )
2027 )
2023 coreconfigitem(
2028 coreconfigitem(
2024 b'server',
2029 b'server',
2025 b'bundle1gd',
2030 b'bundle1gd',
2026 default=None,
2031 default=None,
2027 )
2032 )
2028 coreconfigitem(
2033 coreconfigitem(
2029 b'server',
2034 b'server',
2030 b'bundle1.pull',
2035 b'bundle1.pull',
2031 default=None,
2036 default=None,
2032 )
2037 )
2033 coreconfigitem(
2038 coreconfigitem(
2034 b'server',
2039 b'server',
2035 b'bundle1gd.pull',
2040 b'bundle1gd.pull',
2036 default=None,
2041 default=None,
2037 )
2042 )
2038 coreconfigitem(
2043 coreconfigitem(
2039 b'server',
2044 b'server',
2040 b'bundle1.push',
2045 b'bundle1.push',
2041 default=None,
2046 default=None,
2042 )
2047 )
2043 coreconfigitem(
2048 coreconfigitem(
2044 b'server',
2049 b'server',
2045 b'bundle1gd.push',
2050 b'bundle1gd.push',
2046 default=None,
2051 default=None,
2047 )
2052 )
2048 coreconfigitem(
2053 coreconfigitem(
2049 b'server',
2054 b'server',
2050 b'bundle2.stream',
2055 b'bundle2.stream',
2051 default=True,
2056 default=True,
2052 alias=[(b'experimental', b'bundle2.stream')],
2057 alias=[(b'experimental', b'bundle2.stream')],
2053 )
2058 )
2054 coreconfigitem(
2059 coreconfigitem(
2055 b'server',
2060 b'server',
2056 b'compressionengines',
2061 b'compressionengines',
2057 default=list,
2062 default=list,
2058 )
2063 )
2059 coreconfigitem(
2064 coreconfigitem(
2060 b'server',
2065 b'server',
2061 b'concurrent-push-mode',
2066 b'concurrent-push-mode',
2062 default=b'check-related',
2067 default=b'check-related',
2063 )
2068 )
2064 coreconfigitem(
2069 coreconfigitem(
2065 b'server',
2070 b'server',
2066 b'disablefullbundle',
2071 b'disablefullbundle',
2067 default=False,
2072 default=False,
2068 )
2073 )
2069 coreconfigitem(
2074 coreconfigitem(
2070 b'server',
2075 b'server',
2071 b'maxhttpheaderlen',
2076 b'maxhttpheaderlen',
2072 default=1024,
2077 default=1024,
2073 )
2078 )
2074 coreconfigitem(
2079 coreconfigitem(
2075 b'server',
2080 b'server',
2076 b'pullbundle',
2081 b'pullbundle',
2077 default=False,
2082 default=False,
2078 )
2083 )
2079 coreconfigitem(
2084 coreconfigitem(
2080 b'server',
2085 b'server',
2081 b'preferuncompressed',
2086 b'preferuncompressed',
2082 default=False,
2087 default=False,
2083 )
2088 )
2084 coreconfigitem(
2089 coreconfigitem(
2085 b'server',
2090 b'server',
2086 b'streamunbundle',
2091 b'streamunbundle',
2087 default=False,
2092 default=False,
2088 )
2093 )
2089 coreconfigitem(
2094 coreconfigitem(
2090 b'server',
2095 b'server',
2091 b'uncompressed',
2096 b'uncompressed',
2092 default=True,
2097 default=True,
2093 )
2098 )
2094 coreconfigitem(
2099 coreconfigitem(
2095 b'server',
2100 b'server',
2096 b'uncompressedallowsecret',
2101 b'uncompressedallowsecret',
2097 default=False,
2102 default=False,
2098 )
2103 )
2099 coreconfigitem(
2104 coreconfigitem(
2100 b'server',
2105 b'server',
2101 b'view',
2106 b'view',
2102 default=b'served',
2107 default=b'served',
2103 )
2108 )
2104 coreconfigitem(
2109 coreconfigitem(
2105 b'server',
2110 b'server',
2106 b'validate',
2111 b'validate',
2107 default=False,
2112 default=False,
2108 )
2113 )
2109 coreconfigitem(
2114 coreconfigitem(
2110 b'server',
2115 b'server',
2111 b'zliblevel',
2116 b'zliblevel',
2112 default=-1,
2117 default=-1,
2113 )
2118 )
2114 coreconfigitem(
2119 coreconfigitem(
2115 b'server',
2120 b'server',
2116 b'zstdlevel',
2121 b'zstdlevel',
2117 default=3,
2122 default=3,
2118 )
2123 )
2119 coreconfigitem(
2124 coreconfigitem(
2120 b'share',
2125 b'share',
2121 b'pool',
2126 b'pool',
2122 default=None,
2127 default=None,
2123 )
2128 )
2124 coreconfigitem(
2129 coreconfigitem(
2125 b'share',
2130 b'share',
2126 b'poolnaming',
2131 b'poolnaming',
2127 default=b'identity',
2132 default=b'identity',
2128 )
2133 )
2129 coreconfigitem(
2134 coreconfigitem(
2130 b'share',
2135 b'share',
2131 b'safe-mismatch.source-not-safe',
2136 b'safe-mismatch.source-not-safe',
2132 default=b'abort',
2137 default=b'abort',
2133 )
2138 )
2134 coreconfigitem(
2139 coreconfigitem(
2135 b'share',
2140 b'share',
2136 b'safe-mismatch.source-safe',
2141 b'safe-mismatch.source-safe',
2137 default=b'abort',
2142 default=b'abort',
2138 )
2143 )
2139 coreconfigitem(
2144 coreconfigitem(
2140 b'share',
2145 b'share',
2141 b'safe-mismatch.source-not-safe.warn',
2146 b'safe-mismatch.source-not-safe.warn',
2142 default=True,
2147 default=True,
2143 )
2148 )
2144 coreconfigitem(
2149 coreconfigitem(
2145 b'share',
2150 b'share',
2146 b'safe-mismatch.source-safe.warn',
2151 b'safe-mismatch.source-safe.warn',
2147 default=True,
2152 default=True,
2148 )
2153 )
2149 coreconfigitem(
2154 coreconfigitem(
2150 b'shelve',
2155 b'shelve',
2151 b'maxbackups',
2156 b'maxbackups',
2152 default=10,
2157 default=10,
2153 )
2158 )
2154 coreconfigitem(
2159 coreconfigitem(
2155 b'smtp',
2160 b'smtp',
2156 b'host',
2161 b'host',
2157 default=None,
2162 default=None,
2158 )
2163 )
2159 coreconfigitem(
2164 coreconfigitem(
2160 b'smtp',
2165 b'smtp',
2161 b'local_hostname',
2166 b'local_hostname',
2162 default=None,
2167 default=None,
2163 )
2168 )
2164 coreconfigitem(
2169 coreconfigitem(
2165 b'smtp',
2170 b'smtp',
2166 b'password',
2171 b'password',
2167 default=None,
2172 default=None,
2168 )
2173 )
2169 coreconfigitem(
2174 coreconfigitem(
2170 b'smtp',
2175 b'smtp',
2171 b'port',
2176 b'port',
2172 default=dynamicdefault,
2177 default=dynamicdefault,
2173 )
2178 )
2174 coreconfigitem(
2179 coreconfigitem(
2175 b'smtp',
2180 b'smtp',
2176 b'tls',
2181 b'tls',
2177 default=b'none',
2182 default=b'none',
2178 )
2183 )
2179 coreconfigitem(
2184 coreconfigitem(
2180 b'smtp',
2185 b'smtp',
2181 b'username',
2186 b'username',
2182 default=None,
2187 default=None,
2183 )
2188 )
2184 coreconfigitem(
2189 coreconfigitem(
2185 b'sparse',
2190 b'sparse',
2186 b'missingwarning',
2191 b'missingwarning',
2187 default=True,
2192 default=True,
2188 experimental=True,
2193 experimental=True,
2189 )
2194 )
2190 coreconfigitem(
2195 coreconfigitem(
2191 b'subrepos',
2196 b'subrepos',
2192 b'allowed',
2197 b'allowed',
2193 default=dynamicdefault, # to make backporting simpler
2198 default=dynamicdefault, # to make backporting simpler
2194 )
2199 )
2195 coreconfigitem(
2200 coreconfigitem(
2196 b'subrepos',
2201 b'subrepos',
2197 b'hg:allowed',
2202 b'hg:allowed',
2198 default=dynamicdefault,
2203 default=dynamicdefault,
2199 )
2204 )
2200 coreconfigitem(
2205 coreconfigitem(
2201 b'subrepos',
2206 b'subrepos',
2202 b'git:allowed',
2207 b'git:allowed',
2203 default=dynamicdefault,
2208 default=dynamicdefault,
2204 )
2209 )
2205 coreconfigitem(
2210 coreconfigitem(
2206 b'subrepos',
2211 b'subrepos',
2207 b'svn:allowed',
2212 b'svn:allowed',
2208 default=dynamicdefault,
2213 default=dynamicdefault,
2209 )
2214 )
2210 coreconfigitem(
2215 coreconfigitem(
2211 b'templates',
2216 b'templates',
2212 b'.*',
2217 b'.*',
2213 default=None,
2218 default=None,
2214 generic=True,
2219 generic=True,
2215 )
2220 )
2216 coreconfigitem(
2221 coreconfigitem(
2217 b'templateconfig',
2222 b'templateconfig',
2218 b'.*',
2223 b'.*',
2219 default=dynamicdefault,
2224 default=dynamicdefault,
2220 generic=True,
2225 generic=True,
2221 )
2226 )
2222 coreconfigitem(
2227 coreconfigitem(
2223 b'trusted',
2228 b'trusted',
2224 b'groups',
2229 b'groups',
2225 default=list,
2230 default=list,
2226 )
2231 )
2227 coreconfigitem(
2232 coreconfigitem(
2228 b'trusted',
2233 b'trusted',
2229 b'users',
2234 b'users',
2230 default=list,
2235 default=list,
2231 )
2236 )
2232 coreconfigitem(
2237 coreconfigitem(
2233 b'ui',
2238 b'ui',
2234 b'_usedassubrepo',
2239 b'_usedassubrepo',
2235 default=False,
2240 default=False,
2236 )
2241 )
2237 coreconfigitem(
2242 coreconfigitem(
2238 b'ui',
2243 b'ui',
2239 b'allowemptycommit',
2244 b'allowemptycommit',
2240 default=False,
2245 default=False,
2241 )
2246 )
2242 coreconfigitem(
2247 coreconfigitem(
2243 b'ui',
2248 b'ui',
2244 b'archivemeta',
2249 b'archivemeta',
2245 default=True,
2250 default=True,
2246 )
2251 )
2247 coreconfigitem(
2252 coreconfigitem(
2248 b'ui',
2253 b'ui',
2249 b'askusername',
2254 b'askusername',
2250 default=False,
2255 default=False,
2251 )
2256 )
2252 coreconfigitem(
2257 coreconfigitem(
2253 b'ui',
2258 b'ui',
2254 b'available-memory',
2259 b'available-memory',
2255 default=None,
2260 default=None,
2256 )
2261 )
2257
2262
2258 coreconfigitem(
2263 coreconfigitem(
2259 b'ui',
2264 b'ui',
2260 b'clonebundlefallback',
2265 b'clonebundlefallback',
2261 default=False,
2266 default=False,
2262 )
2267 )
2263 coreconfigitem(
2268 coreconfigitem(
2264 b'ui',
2269 b'ui',
2265 b'clonebundleprefers',
2270 b'clonebundleprefers',
2266 default=list,
2271 default=list,
2267 )
2272 )
2268 coreconfigitem(
2273 coreconfigitem(
2269 b'ui',
2274 b'ui',
2270 b'clonebundles',
2275 b'clonebundles',
2271 default=True,
2276 default=True,
2272 )
2277 )
2273 coreconfigitem(
2278 coreconfigitem(
2274 b'ui',
2279 b'ui',
2275 b'color',
2280 b'color',
2276 default=b'auto',
2281 default=b'auto',
2277 )
2282 )
2278 coreconfigitem(
2283 coreconfigitem(
2279 b'ui',
2284 b'ui',
2280 b'commitsubrepos',
2285 b'commitsubrepos',
2281 default=False,
2286 default=False,
2282 )
2287 )
2283 coreconfigitem(
2288 coreconfigitem(
2284 b'ui',
2289 b'ui',
2285 b'debug',
2290 b'debug',
2286 default=False,
2291 default=False,
2287 )
2292 )
2288 coreconfigitem(
2293 coreconfigitem(
2289 b'ui',
2294 b'ui',
2290 b'debugger',
2295 b'debugger',
2291 default=None,
2296 default=None,
2292 )
2297 )
2293 coreconfigitem(
2298 coreconfigitem(
2294 b'ui',
2299 b'ui',
2295 b'editor',
2300 b'editor',
2296 default=dynamicdefault,
2301 default=dynamicdefault,
2297 )
2302 )
2298 coreconfigitem(
2303 coreconfigitem(
2299 b'ui',
2304 b'ui',
2300 b'detailed-exit-code',
2305 b'detailed-exit-code',
2301 default=False,
2306 default=False,
2302 experimental=True,
2307 experimental=True,
2303 )
2308 )
2304 coreconfigitem(
2309 coreconfigitem(
2305 b'ui',
2310 b'ui',
2306 b'fallbackencoding',
2311 b'fallbackencoding',
2307 default=None,
2312 default=None,
2308 )
2313 )
2309 coreconfigitem(
2314 coreconfigitem(
2310 b'ui',
2315 b'ui',
2311 b'forcecwd',
2316 b'forcecwd',
2312 default=None,
2317 default=None,
2313 )
2318 )
2314 coreconfigitem(
2319 coreconfigitem(
2315 b'ui',
2320 b'ui',
2316 b'forcemerge',
2321 b'forcemerge',
2317 default=None,
2322 default=None,
2318 )
2323 )
2319 coreconfigitem(
2324 coreconfigitem(
2320 b'ui',
2325 b'ui',
2321 b'formatdebug',
2326 b'formatdebug',
2322 default=False,
2327 default=False,
2323 )
2328 )
2324 coreconfigitem(
2329 coreconfigitem(
2325 b'ui',
2330 b'ui',
2326 b'formatjson',
2331 b'formatjson',
2327 default=False,
2332 default=False,
2328 )
2333 )
2329 coreconfigitem(
2334 coreconfigitem(
2330 b'ui',
2335 b'ui',
2331 b'formatted',
2336 b'formatted',
2332 default=None,
2337 default=None,
2333 )
2338 )
2334 coreconfigitem(
2339 coreconfigitem(
2335 b'ui',
2340 b'ui',
2336 b'interactive',
2341 b'interactive',
2337 default=None,
2342 default=None,
2338 )
2343 )
2339 coreconfigitem(
2344 coreconfigitem(
2340 b'ui',
2345 b'ui',
2341 b'interface',
2346 b'interface',
2342 default=None,
2347 default=None,
2343 )
2348 )
2344 coreconfigitem(
2349 coreconfigitem(
2345 b'ui',
2350 b'ui',
2346 b'interface.chunkselector',
2351 b'interface.chunkselector',
2347 default=None,
2352 default=None,
2348 )
2353 )
2349 coreconfigitem(
2354 coreconfigitem(
2350 b'ui',
2355 b'ui',
2351 b'large-file-limit',
2356 b'large-file-limit',
2352 default=10 * (2 ** 20),
2357 default=10 * (2 ** 20),
2353 )
2358 )
2354 coreconfigitem(
2359 coreconfigitem(
2355 b'ui',
2360 b'ui',
2356 b'logblockedtimes',
2361 b'logblockedtimes',
2357 default=False,
2362 default=False,
2358 )
2363 )
2359 coreconfigitem(
2364 coreconfigitem(
2360 b'ui',
2365 b'ui',
2361 b'merge',
2366 b'merge',
2362 default=None,
2367 default=None,
2363 )
2368 )
2364 coreconfigitem(
2369 coreconfigitem(
2365 b'ui',
2370 b'ui',
2366 b'mergemarkers',
2371 b'mergemarkers',
2367 default=b'basic',
2372 default=b'basic',
2368 )
2373 )
2369 coreconfigitem(
2374 coreconfigitem(
2370 b'ui',
2375 b'ui',
2371 b'message-output',
2376 b'message-output',
2372 default=b'stdio',
2377 default=b'stdio',
2373 )
2378 )
2374 coreconfigitem(
2379 coreconfigitem(
2375 b'ui',
2380 b'ui',
2376 b'nontty',
2381 b'nontty',
2377 default=False,
2382 default=False,
2378 )
2383 )
2379 coreconfigitem(
2384 coreconfigitem(
2380 b'ui',
2385 b'ui',
2381 b'origbackuppath',
2386 b'origbackuppath',
2382 default=None,
2387 default=None,
2383 )
2388 )
2384 coreconfigitem(
2389 coreconfigitem(
2385 b'ui',
2390 b'ui',
2386 b'paginate',
2391 b'paginate',
2387 default=True,
2392 default=True,
2388 )
2393 )
2389 coreconfigitem(
2394 coreconfigitem(
2390 b'ui',
2395 b'ui',
2391 b'patch',
2396 b'patch',
2392 default=None,
2397 default=None,
2393 )
2398 )
2394 coreconfigitem(
2399 coreconfigitem(
2395 b'ui',
2400 b'ui',
2396 b'portablefilenames',
2401 b'portablefilenames',
2397 default=b'warn',
2402 default=b'warn',
2398 )
2403 )
2399 coreconfigitem(
2404 coreconfigitem(
2400 b'ui',
2405 b'ui',
2401 b'promptecho',
2406 b'promptecho',
2402 default=False,
2407 default=False,
2403 )
2408 )
2404 coreconfigitem(
2409 coreconfigitem(
2405 b'ui',
2410 b'ui',
2406 b'quiet',
2411 b'quiet',
2407 default=False,
2412 default=False,
2408 )
2413 )
2409 coreconfigitem(
2414 coreconfigitem(
2410 b'ui',
2415 b'ui',
2411 b'quietbookmarkmove',
2416 b'quietbookmarkmove',
2412 default=False,
2417 default=False,
2413 )
2418 )
2414 coreconfigitem(
2419 coreconfigitem(
2415 b'ui',
2420 b'ui',
2416 b'relative-paths',
2421 b'relative-paths',
2417 default=b'legacy',
2422 default=b'legacy',
2418 )
2423 )
2419 coreconfigitem(
2424 coreconfigitem(
2420 b'ui',
2425 b'ui',
2421 b'remotecmd',
2426 b'remotecmd',
2422 default=b'hg',
2427 default=b'hg',
2423 )
2428 )
2424 coreconfigitem(
2429 coreconfigitem(
2425 b'ui',
2430 b'ui',
2426 b'report_untrusted',
2431 b'report_untrusted',
2427 default=True,
2432 default=True,
2428 )
2433 )
2429 coreconfigitem(
2434 coreconfigitem(
2430 b'ui',
2435 b'ui',
2431 b'rollback',
2436 b'rollback',
2432 default=True,
2437 default=True,
2433 )
2438 )
2434 coreconfigitem(
2439 coreconfigitem(
2435 b'ui',
2440 b'ui',
2436 b'signal-safe-lock',
2441 b'signal-safe-lock',
2437 default=True,
2442 default=True,
2438 )
2443 )
2439 coreconfigitem(
2444 coreconfigitem(
2440 b'ui',
2445 b'ui',
2441 b'slash',
2446 b'slash',
2442 default=False,
2447 default=False,
2443 )
2448 )
2444 coreconfigitem(
2449 coreconfigitem(
2445 b'ui',
2450 b'ui',
2446 b'ssh',
2451 b'ssh',
2447 default=b'ssh',
2452 default=b'ssh',
2448 )
2453 )
2449 coreconfigitem(
2454 coreconfigitem(
2450 b'ui',
2455 b'ui',
2451 b'ssherrorhint',
2456 b'ssherrorhint',
2452 default=None,
2457 default=None,
2453 )
2458 )
2454 coreconfigitem(
2459 coreconfigitem(
2455 b'ui',
2460 b'ui',
2456 b'statuscopies',
2461 b'statuscopies',
2457 default=False,
2462 default=False,
2458 )
2463 )
2459 coreconfigitem(
2464 coreconfigitem(
2460 b'ui',
2465 b'ui',
2461 b'strict',
2466 b'strict',
2462 default=False,
2467 default=False,
2463 )
2468 )
2464 coreconfigitem(
2469 coreconfigitem(
2465 b'ui',
2470 b'ui',
2466 b'style',
2471 b'style',
2467 default=b'',
2472 default=b'',
2468 )
2473 )
2469 coreconfigitem(
2474 coreconfigitem(
2470 b'ui',
2475 b'ui',
2471 b'supportcontact',
2476 b'supportcontact',
2472 default=None,
2477 default=None,
2473 )
2478 )
2474 coreconfigitem(
2479 coreconfigitem(
2475 b'ui',
2480 b'ui',
2476 b'textwidth',
2481 b'textwidth',
2477 default=78,
2482 default=78,
2478 )
2483 )
2479 coreconfigitem(
2484 coreconfigitem(
2480 b'ui',
2485 b'ui',
2481 b'timeout',
2486 b'timeout',
2482 default=b'600',
2487 default=b'600',
2483 )
2488 )
2484 coreconfigitem(
2489 coreconfigitem(
2485 b'ui',
2490 b'ui',
2486 b'timeout.warn',
2491 b'timeout.warn',
2487 default=0,
2492 default=0,
2488 )
2493 )
2489 coreconfigitem(
2494 coreconfigitem(
2490 b'ui',
2495 b'ui',
2491 b'timestamp-output',
2496 b'timestamp-output',
2492 default=False,
2497 default=False,
2493 )
2498 )
2494 coreconfigitem(
2499 coreconfigitem(
2495 b'ui',
2500 b'ui',
2496 b'traceback',
2501 b'traceback',
2497 default=False,
2502 default=False,
2498 )
2503 )
2499 coreconfigitem(
2504 coreconfigitem(
2500 b'ui',
2505 b'ui',
2501 b'tweakdefaults',
2506 b'tweakdefaults',
2502 default=False,
2507 default=False,
2503 )
2508 )
2504 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2509 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2505 coreconfigitem(
2510 coreconfigitem(
2506 b'ui',
2511 b'ui',
2507 b'verbose',
2512 b'verbose',
2508 default=False,
2513 default=False,
2509 )
2514 )
2510 coreconfigitem(
2515 coreconfigitem(
2511 b'verify',
2516 b'verify',
2512 b'skipflags',
2517 b'skipflags',
2513 default=None,
2518 default=None,
2514 )
2519 )
2515 coreconfigitem(
2520 coreconfigitem(
2516 b'web',
2521 b'web',
2517 b'allowbz2',
2522 b'allowbz2',
2518 default=False,
2523 default=False,
2519 )
2524 )
2520 coreconfigitem(
2525 coreconfigitem(
2521 b'web',
2526 b'web',
2522 b'allowgz',
2527 b'allowgz',
2523 default=False,
2528 default=False,
2524 )
2529 )
2525 coreconfigitem(
2530 coreconfigitem(
2526 b'web',
2531 b'web',
2527 b'allow-pull',
2532 b'allow-pull',
2528 alias=[(b'web', b'allowpull')],
2533 alias=[(b'web', b'allowpull')],
2529 default=True,
2534 default=True,
2530 )
2535 )
2531 coreconfigitem(
2536 coreconfigitem(
2532 b'web',
2537 b'web',
2533 b'allow-push',
2538 b'allow-push',
2534 alias=[(b'web', b'allow_push')],
2539 alias=[(b'web', b'allow_push')],
2535 default=list,
2540 default=list,
2536 )
2541 )
2537 coreconfigitem(
2542 coreconfigitem(
2538 b'web',
2543 b'web',
2539 b'allowzip',
2544 b'allowzip',
2540 default=False,
2545 default=False,
2541 )
2546 )
2542 coreconfigitem(
2547 coreconfigitem(
2543 b'web',
2548 b'web',
2544 b'archivesubrepos',
2549 b'archivesubrepos',
2545 default=False,
2550 default=False,
2546 )
2551 )
2547 coreconfigitem(
2552 coreconfigitem(
2548 b'web',
2553 b'web',
2549 b'cache',
2554 b'cache',
2550 default=True,
2555 default=True,
2551 )
2556 )
2552 coreconfigitem(
2557 coreconfigitem(
2553 b'web',
2558 b'web',
2554 b'comparisoncontext',
2559 b'comparisoncontext',
2555 default=5,
2560 default=5,
2556 )
2561 )
2557 coreconfigitem(
2562 coreconfigitem(
2558 b'web',
2563 b'web',
2559 b'contact',
2564 b'contact',
2560 default=None,
2565 default=None,
2561 )
2566 )
2562 coreconfigitem(
2567 coreconfigitem(
2563 b'web',
2568 b'web',
2564 b'deny_push',
2569 b'deny_push',
2565 default=list,
2570 default=list,
2566 )
2571 )
2567 coreconfigitem(
2572 coreconfigitem(
2568 b'web',
2573 b'web',
2569 b'guessmime',
2574 b'guessmime',
2570 default=False,
2575 default=False,
2571 )
2576 )
2572 coreconfigitem(
2577 coreconfigitem(
2573 b'web',
2578 b'web',
2574 b'hidden',
2579 b'hidden',
2575 default=False,
2580 default=False,
2576 )
2581 )
2577 coreconfigitem(
2582 coreconfigitem(
2578 b'web',
2583 b'web',
2579 b'labels',
2584 b'labels',
2580 default=list,
2585 default=list,
2581 )
2586 )
2582 coreconfigitem(
2587 coreconfigitem(
2583 b'web',
2588 b'web',
2584 b'logoimg',
2589 b'logoimg',
2585 default=b'hglogo.png',
2590 default=b'hglogo.png',
2586 )
2591 )
2587 coreconfigitem(
2592 coreconfigitem(
2588 b'web',
2593 b'web',
2589 b'logourl',
2594 b'logourl',
2590 default=b'https://mercurial-scm.org/',
2595 default=b'https://mercurial-scm.org/',
2591 )
2596 )
2592 coreconfigitem(
2597 coreconfigitem(
2593 b'web',
2598 b'web',
2594 b'accesslog',
2599 b'accesslog',
2595 default=b'-',
2600 default=b'-',
2596 )
2601 )
2597 coreconfigitem(
2602 coreconfigitem(
2598 b'web',
2603 b'web',
2599 b'address',
2604 b'address',
2600 default=b'',
2605 default=b'',
2601 )
2606 )
2602 coreconfigitem(
2607 coreconfigitem(
2603 b'web',
2608 b'web',
2604 b'allow-archive',
2609 b'allow-archive',
2605 alias=[(b'web', b'allow_archive')],
2610 alias=[(b'web', b'allow_archive')],
2606 default=list,
2611 default=list,
2607 )
2612 )
2608 coreconfigitem(
2613 coreconfigitem(
2609 b'web',
2614 b'web',
2610 b'allow_read',
2615 b'allow_read',
2611 default=list,
2616 default=list,
2612 )
2617 )
2613 coreconfigitem(
2618 coreconfigitem(
2614 b'web',
2619 b'web',
2615 b'baseurl',
2620 b'baseurl',
2616 default=None,
2621 default=None,
2617 )
2622 )
2618 coreconfigitem(
2623 coreconfigitem(
2619 b'web',
2624 b'web',
2620 b'cacerts',
2625 b'cacerts',
2621 default=None,
2626 default=None,
2622 )
2627 )
2623 coreconfigitem(
2628 coreconfigitem(
2624 b'web',
2629 b'web',
2625 b'certificate',
2630 b'certificate',
2626 default=None,
2631 default=None,
2627 )
2632 )
2628 coreconfigitem(
2633 coreconfigitem(
2629 b'web',
2634 b'web',
2630 b'collapse',
2635 b'collapse',
2631 default=False,
2636 default=False,
2632 )
2637 )
2633 coreconfigitem(
2638 coreconfigitem(
2634 b'web',
2639 b'web',
2635 b'csp',
2640 b'csp',
2636 default=None,
2641 default=None,
2637 )
2642 )
2638 coreconfigitem(
2643 coreconfigitem(
2639 b'web',
2644 b'web',
2640 b'deny_read',
2645 b'deny_read',
2641 default=list,
2646 default=list,
2642 )
2647 )
2643 coreconfigitem(
2648 coreconfigitem(
2644 b'web',
2649 b'web',
2645 b'descend',
2650 b'descend',
2646 default=True,
2651 default=True,
2647 )
2652 )
2648 coreconfigitem(
2653 coreconfigitem(
2649 b'web',
2654 b'web',
2650 b'description',
2655 b'description',
2651 default=b"",
2656 default=b"",
2652 )
2657 )
2653 coreconfigitem(
2658 coreconfigitem(
2654 b'web',
2659 b'web',
2655 b'encoding',
2660 b'encoding',
2656 default=lambda: encoding.encoding,
2661 default=lambda: encoding.encoding,
2657 )
2662 )
2658 coreconfigitem(
2663 coreconfigitem(
2659 b'web',
2664 b'web',
2660 b'errorlog',
2665 b'errorlog',
2661 default=b'-',
2666 default=b'-',
2662 )
2667 )
2663 coreconfigitem(
2668 coreconfigitem(
2664 b'web',
2669 b'web',
2665 b'ipv6',
2670 b'ipv6',
2666 default=False,
2671 default=False,
2667 )
2672 )
2668 coreconfigitem(
2673 coreconfigitem(
2669 b'web',
2674 b'web',
2670 b'maxchanges',
2675 b'maxchanges',
2671 default=10,
2676 default=10,
2672 )
2677 )
2673 coreconfigitem(
2678 coreconfigitem(
2674 b'web',
2679 b'web',
2675 b'maxfiles',
2680 b'maxfiles',
2676 default=10,
2681 default=10,
2677 )
2682 )
2678 coreconfigitem(
2683 coreconfigitem(
2679 b'web',
2684 b'web',
2680 b'maxshortchanges',
2685 b'maxshortchanges',
2681 default=60,
2686 default=60,
2682 )
2687 )
2683 coreconfigitem(
2688 coreconfigitem(
2684 b'web',
2689 b'web',
2685 b'motd',
2690 b'motd',
2686 default=b'',
2691 default=b'',
2687 )
2692 )
2688 coreconfigitem(
2693 coreconfigitem(
2689 b'web',
2694 b'web',
2690 b'name',
2695 b'name',
2691 default=dynamicdefault,
2696 default=dynamicdefault,
2692 )
2697 )
2693 coreconfigitem(
2698 coreconfigitem(
2694 b'web',
2699 b'web',
2695 b'port',
2700 b'port',
2696 default=8000,
2701 default=8000,
2697 )
2702 )
2698 coreconfigitem(
2703 coreconfigitem(
2699 b'web',
2704 b'web',
2700 b'prefix',
2705 b'prefix',
2701 default=b'',
2706 default=b'',
2702 )
2707 )
2703 coreconfigitem(
2708 coreconfigitem(
2704 b'web',
2709 b'web',
2705 b'push_ssl',
2710 b'push_ssl',
2706 default=True,
2711 default=True,
2707 )
2712 )
2708 coreconfigitem(
2713 coreconfigitem(
2709 b'web',
2714 b'web',
2710 b'refreshinterval',
2715 b'refreshinterval',
2711 default=20,
2716 default=20,
2712 )
2717 )
2713 coreconfigitem(
2718 coreconfigitem(
2714 b'web',
2719 b'web',
2715 b'server-header',
2720 b'server-header',
2716 default=None,
2721 default=None,
2717 )
2722 )
2718 coreconfigitem(
2723 coreconfigitem(
2719 b'web',
2724 b'web',
2720 b'static',
2725 b'static',
2721 default=None,
2726 default=None,
2722 )
2727 )
2723 coreconfigitem(
2728 coreconfigitem(
2724 b'web',
2729 b'web',
2725 b'staticurl',
2730 b'staticurl',
2726 default=None,
2731 default=None,
2727 )
2732 )
2728 coreconfigitem(
2733 coreconfigitem(
2729 b'web',
2734 b'web',
2730 b'stripes',
2735 b'stripes',
2731 default=1,
2736 default=1,
2732 )
2737 )
2733 coreconfigitem(
2738 coreconfigitem(
2734 b'web',
2739 b'web',
2735 b'style',
2740 b'style',
2736 default=b'paper',
2741 default=b'paper',
2737 )
2742 )
2738 coreconfigitem(
2743 coreconfigitem(
2739 b'web',
2744 b'web',
2740 b'templates',
2745 b'templates',
2741 default=None,
2746 default=None,
2742 )
2747 )
2743 coreconfigitem(
2748 coreconfigitem(
2744 b'web',
2749 b'web',
2745 b'view',
2750 b'view',
2746 default=b'served',
2751 default=b'served',
2747 experimental=True,
2752 experimental=True,
2748 )
2753 )
2749 coreconfigitem(
2754 coreconfigitem(
2750 b'worker',
2755 b'worker',
2751 b'backgroundclose',
2756 b'backgroundclose',
2752 default=dynamicdefault,
2757 default=dynamicdefault,
2753 )
2758 )
2754 # Windows defaults to a limit of 512 open files. A buffer of 128
2759 # Windows defaults to a limit of 512 open files. A buffer of 128
2755 # should give us enough headway.
2760 # should give us enough headway.
2756 coreconfigitem(
2761 coreconfigitem(
2757 b'worker',
2762 b'worker',
2758 b'backgroundclosemaxqueue',
2763 b'backgroundclosemaxqueue',
2759 default=384,
2764 default=384,
2760 )
2765 )
2761 coreconfigitem(
2766 coreconfigitem(
2762 b'worker',
2767 b'worker',
2763 b'backgroundcloseminfilecount',
2768 b'backgroundcloseminfilecount',
2764 default=2048,
2769 default=2048,
2765 )
2770 )
2766 coreconfigitem(
2771 coreconfigitem(
2767 b'worker',
2772 b'worker',
2768 b'backgroundclosethreadcount',
2773 b'backgroundclosethreadcount',
2769 default=4,
2774 default=4,
2770 )
2775 )
2771 coreconfigitem(
2776 coreconfigitem(
2772 b'worker',
2777 b'worker',
2773 b'enabled',
2778 b'enabled',
2774 default=True,
2779 default=True,
2775 )
2780 )
2776 coreconfigitem(
2781 coreconfigitem(
2777 b'worker',
2782 b'worker',
2778 b'numcpus',
2783 b'numcpus',
2779 default=None,
2784 default=None,
2780 )
2785 )
2781
2786
2782 # Rebase related configuration moved to core because other extension are doing
2787 # Rebase related configuration moved to core because other extension are doing
2783 # strange things. For example, shelve import the extensions to reuse some bit
2788 # strange things. For example, shelve import the extensions to reuse some bit
2784 # without formally loading it.
2789 # without formally loading it.
2785 coreconfigitem(
2790 coreconfigitem(
2786 b'commands',
2791 b'commands',
2787 b'rebase.requiredest',
2792 b'rebase.requiredest',
2788 default=False,
2793 default=False,
2789 )
2794 )
2790 coreconfigitem(
2795 coreconfigitem(
2791 b'experimental',
2796 b'experimental',
2792 b'rebaseskipobsolete',
2797 b'rebaseskipobsolete',
2793 default=True,
2798 default=True,
2794 )
2799 )
2795 coreconfigitem(
2800 coreconfigitem(
2796 b'rebase',
2801 b'rebase',
2797 b'singletransaction',
2802 b'singletransaction',
2798 default=False,
2803 default=False,
2799 )
2804 )
2800 coreconfigitem(
2805 coreconfigitem(
2801 b'rebase',
2806 b'rebase',
2802 b'experimental.inmemory',
2807 b'experimental.inmemory',
2803 default=False,
2808 default=False,
2804 )
2809 )
@@ -1,3939 +1,3940 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullrev,
23 nullrev,
24 sha1nodeconstants,
24 sha1nodeconstants,
25 short,
25 short,
26 )
26 )
27 from .pycompat import (
27 from .pycompat import (
28 delattr,
28 delattr,
29 getattr,
29 getattr,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 dirstateguard,
41 dirstateguard,
42 discovery,
42 discovery,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filelog,
47 filelog,
48 hook,
48 hook,
49 lock as lockmod,
49 lock as lockmod,
50 match as matchmod,
50 match as matchmod,
51 mergestate as mergestatemod,
51 mergestate as mergestatemod,
52 mergeutil,
52 mergeutil,
53 namespaces,
53 namespaces,
54 narrowspec,
54 narrowspec,
55 obsolete,
55 obsolete,
56 pathutil,
56 pathutil,
57 phases,
57 phases,
58 pushkey,
58 pushkey,
59 pycompat,
59 pycompat,
60 rcutil,
60 rcutil,
61 repoview,
61 repoview,
62 requirements as requirementsmod,
62 requirements as requirementsmod,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 sparse,
67 sparse,
68 store as storemod,
68 store as storemod,
69 subrepoutil,
69 subrepoutil,
70 tags as tagsmod,
70 tags as tagsmod,
71 transaction,
71 transaction,
72 txnutil,
72 txnutil,
73 util,
73 util,
74 vfs as vfsmod,
74 vfs as vfsmod,
75 wireprototypes,
75 wireprototypes,
76 )
76 )
77
77
78 from .interfaces import (
78 from .interfaces import (
79 repository,
79 repository,
80 util as interfaceutil,
80 util as interfaceutil,
81 )
81 )
82
82
83 from .utils import (
83 from .utils import (
84 hashutil,
84 hashutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 urlutil,
87 urlutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
92 constants as revlogconst,
92 constants as revlogconst,
93 sidedata as sidedatamod,
93 sidedata as sidedatamod,
94 )
94 )
95
95
96 release = lockmod.release
96 release = lockmod.release
97 urlerr = util.urlerr
97 urlerr = util.urlerr
98 urlreq = util.urlreq
98 urlreq = util.urlreq
99
99
100 # set of (path, vfs-location) tuples. vfs-location is:
100 # set of (path, vfs-location) tuples. vfs-location is:
101 # - 'plain for vfs relative paths
101 # - 'plain for vfs relative paths
102 # - '' for svfs relative paths
102 # - '' for svfs relative paths
103 _cachedfiles = set()
103 _cachedfiles = set()
104
104
105
105
106 class _basefilecache(scmutil.filecache):
106 class _basefilecache(scmutil.filecache):
107 """All filecache usage on repo are done for logic that should be unfiltered"""
107 """All filecache usage on repo are done for logic that should be unfiltered"""
108
108
109 def __get__(self, repo, type=None):
109 def __get__(self, repo, type=None):
110 if repo is None:
110 if repo is None:
111 return self
111 return self
112 # proxy to unfiltered __dict__ since filtered repo has no entry
112 # proxy to unfiltered __dict__ since filtered repo has no entry
113 unfi = repo.unfiltered()
113 unfi = repo.unfiltered()
114 try:
114 try:
115 return unfi.__dict__[self.sname]
115 return unfi.__dict__[self.sname]
116 except KeyError:
116 except KeyError:
117 pass
117 pass
118 return super(_basefilecache, self).__get__(unfi, type)
118 return super(_basefilecache, self).__get__(unfi, type)
119
119
120 def set(self, repo, value):
120 def set(self, repo, value):
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
122
122
123
123
124 class repofilecache(_basefilecache):
124 class repofilecache(_basefilecache):
125 """filecache for files in .hg but outside of .hg/store"""
125 """filecache for files in .hg but outside of .hg/store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(repofilecache, self).__init__(*paths)
128 super(repofilecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b'plain'))
130 _cachedfiles.add((path, b'plain'))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.vfs.join(fname)
133 return obj.vfs.join(fname)
134
134
135
135
136 class storecache(_basefilecache):
136 class storecache(_basefilecache):
137 """filecache for files in the store"""
137 """filecache for files in the store"""
138
138
139 def __init__(self, *paths):
139 def __init__(self, *paths):
140 super(storecache, self).__init__(*paths)
140 super(storecache, self).__init__(*paths)
141 for path in paths:
141 for path in paths:
142 _cachedfiles.add((path, b''))
142 _cachedfiles.add((path, b''))
143
143
144 def join(self, obj, fname):
144 def join(self, obj, fname):
145 return obj.sjoin(fname)
145 return obj.sjoin(fname)
146
146
147
147
148 class changelogcache(storecache):
148 class changelogcache(storecache):
149 """filecache for the changelog"""
149 """filecache for the changelog"""
150
150
151 def __init__(self):
151 def __init__(self):
152 super(changelogcache, self).__init__()
152 super(changelogcache, self).__init__()
153 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.i', b''))
154 _cachedfiles.add((b'00changelog.n', b''))
154 _cachedfiles.add((b'00changelog.n', b''))
155
155
156 def tracked_paths(self, obj):
156 def tracked_paths(self, obj):
157 paths = [self.join(obj, b'00changelog.i')]
157 paths = [self.join(obj, b'00changelog.i')]
158 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 if obj.store.opener.options.get(b'persistent-nodemap', False):
159 paths.append(self.join(obj, b'00changelog.n'))
159 paths.append(self.join(obj, b'00changelog.n'))
160 return paths
160 return paths
161
161
162
162
163 class manifestlogcache(storecache):
163 class manifestlogcache(storecache):
164 """filecache for the manifestlog"""
164 """filecache for the manifestlog"""
165
165
166 def __init__(self):
166 def __init__(self):
167 super(manifestlogcache, self).__init__()
167 super(manifestlogcache, self).__init__()
168 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.i', b''))
169 _cachedfiles.add((b'00manifest.n', b''))
169 _cachedfiles.add((b'00manifest.n', b''))
170
170
171 def tracked_paths(self, obj):
171 def tracked_paths(self, obj):
172 paths = [self.join(obj, b'00manifest.i')]
172 paths = [self.join(obj, b'00manifest.i')]
173 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 if obj.store.opener.options.get(b'persistent-nodemap', False):
174 paths.append(self.join(obj, b'00manifest.n'))
174 paths.append(self.join(obj, b'00manifest.n'))
175 return paths
175 return paths
176
176
177
177
178 class mixedrepostorecache(_basefilecache):
178 class mixedrepostorecache(_basefilecache):
179 """filecache for a mix files in .hg/store and outside"""
179 """filecache for a mix files in .hg/store and outside"""
180
180
181 def __init__(self, *pathsandlocations):
181 def __init__(self, *pathsandlocations):
182 # scmutil.filecache only uses the path for passing back into our
182 # scmutil.filecache only uses the path for passing back into our
183 # join(), so we can safely pass a list of paths and locations
183 # join(), so we can safely pass a list of paths and locations
184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
185 _cachedfiles.update(pathsandlocations)
185 _cachedfiles.update(pathsandlocations)
186
186
187 def join(self, obj, fnameandlocation):
187 def join(self, obj, fnameandlocation):
188 fname, location = fnameandlocation
188 fname, location = fnameandlocation
189 if location == b'plain':
189 if location == b'plain':
190 return obj.vfs.join(fname)
190 return obj.vfs.join(fname)
191 else:
191 else:
192 if location != b'':
192 if location != b'':
193 raise error.ProgrammingError(
193 raise error.ProgrammingError(
194 b'unexpected location: %s' % location
194 b'unexpected location: %s' % location
195 )
195 )
196 return obj.sjoin(fname)
196 return obj.sjoin(fname)
197
197
198
198
199 def isfilecached(repo, name):
199 def isfilecached(repo, name):
200 """check if a repo has already cached "name" filecache-ed property
200 """check if a repo has already cached "name" filecache-ed property
201
201
202 This returns (cachedobj-or-None, iscached) tuple.
202 This returns (cachedobj-or-None, iscached) tuple.
203 """
203 """
204 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 cacheentry = repo.unfiltered()._filecache.get(name, None)
205 if not cacheentry:
205 if not cacheentry:
206 return None, False
206 return None, False
207 return cacheentry.obj, True
207 return cacheentry.obj, True
208
208
209
209
210 class unfilteredpropertycache(util.propertycache):
210 class unfilteredpropertycache(util.propertycache):
211 """propertycache that apply to unfiltered repo only"""
211 """propertycache that apply to unfiltered repo only"""
212
212
213 def __get__(self, repo, type=None):
213 def __get__(self, repo, type=None):
214 unfi = repo.unfiltered()
214 unfi = repo.unfiltered()
215 if unfi is repo:
215 if unfi is repo:
216 return super(unfilteredpropertycache, self).__get__(unfi)
216 return super(unfilteredpropertycache, self).__get__(unfi)
217 return getattr(unfi, self.name)
217 return getattr(unfi, self.name)
218
218
219
219
220 class filteredpropertycache(util.propertycache):
220 class filteredpropertycache(util.propertycache):
221 """propertycache that must take filtering in account"""
221 """propertycache that must take filtering in account"""
222
222
223 def cachevalue(self, obj, value):
223 def cachevalue(self, obj, value):
224 object.__setattr__(obj, self.name, value)
224 object.__setattr__(obj, self.name, value)
225
225
226
226
227 def hasunfilteredcache(repo, name):
227 def hasunfilteredcache(repo, name):
228 """check if a repo has an unfilteredpropertycache value for <name>"""
228 """check if a repo has an unfilteredpropertycache value for <name>"""
229 return name in vars(repo.unfiltered())
229 return name in vars(repo.unfiltered())
230
230
231
231
232 def unfilteredmethod(orig):
232 def unfilteredmethod(orig):
233 """decorate method that always need to be run on unfiltered version"""
233 """decorate method that always need to be run on unfiltered version"""
234
234
235 @functools.wraps(orig)
235 @functools.wraps(orig)
236 def wrapper(repo, *args, **kwargs):
236 def wrapper(repo, *args, **kwargs):
237 return orig(repo.unfiltered(), *args, **kwargs)
237 return orig(repo.unfiltered(), *args, **kwargs)
238
238
239 return wrapper
239 return wrapper
240
240
241
241
242 moderncaps = {
242 moderncaps = {
243 b'lookup',
243 b'lookup',
244 b'branchmap',
244 b'branchmap',
245 b'pushkey',
245 b'pushkey',
246 b'known',
246 b'known',
247 b'getbundle',
247 b'getbundle',
248 b'unbundle',
248 b'unbundle',
249 }
249 }
250 legacycaps = moderncaps.union({b'changegroupsubset'})
250 legacycaps = moderncaps.union({b'changegroupsubset'})
251
251
252
252
253 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 @interfaceutil.implementer(repository.ipeercommandexecutor)
254 class localcommandexecutor:
254 class localcommandexecutor:
255 def __init__(self, peer):
255 def __init__(self, peer):
256 self._peer = peer
256 self._peer = peer
257 self._sent = False
257 self._sent = False
258 self._closed = False
258 self._closed = False
259
259
260 def __enter__(self):
260 def __enter__(self):
261 return self
261 return self
262
262
263 def __exit__(self, exctype, excvalue, exctb):
263 def __exit__(self, exctype, excvalue, exctb):
264 self.close()
264 self.close()
265
265
266 def callcommand(self, command, args):
266 def callcommand(self, command, args):
267 if self._sent:
267 if self._sent:
268 raise error.ProgrammingError(
268 raise error.ProgrammingError(
269 b'callcommand() cannot be used after sendcommands()'
269 b'callcommand() cannot be used after sendcommands()'
270 )
270 )
271
271
272 if self._closed:
272 if self._closed:
273 raise error.ProgrammingError(
273 raise error.ProgrammingError(
274 b'callcommand() cannot be used after close()'
274 b'callcommand() cannot be used after close()'
275 )
275 )
276
276
277 # We don't need to support anything fancy. Just call the named
277 # We don't need to support anything fancy. Just call the named
278 # method on the peer and return a resolved future.
278 # method on the peer and return a resolved future.
279 fn = getattr(self._peer, pycompat.sysstr(command))
279 fn = getattr(self._peer, pycompat.sysstr(command))
280
280
281 f = futures.Future()
281 f = futures.Future()
282
282
283 try:
283 try:
284 result = fn(**pycompat.strkwargs(args))
284 result = fn(**pycompat.strkwargs(args))
285 except Exception:
285 except Exception:
286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
287 else:
287 else:
288 f.set_result(result)
288 f.set_result(result)
289
289
290 return f
290 return f
291
291
292 def sendcommands(self):
292 def sendcommands(self):
293 self._sent = True
293 self._sent = True
294
294
295 def close(self):
295 def close(self):
296 self._closed = True
296 self._closed = True
297
297
298
298
299 @interfaceutil.implementer(repository.ipeercommands)
299 @interfaceutil.implementer(repository.ipeercommands)
300 class localpeer(repository.peer):
300 class localpeer(repository.peer):
301 '''peer for a local repo; reflects only the most recent API'''
301 '''peer for a local repo; reflects only the most recent API'''
302
302
303 def __init__(self, repo, caps=None):
303 def __init__(self, repo, caps=None):
304 super(localpeer, self).__init__()
304 super(localpeer, self).__init__()
305
305
306 if caps is None:
306 if caps is None:
307 caps = moderncaps.copy()
307 caps = moderncaps.copy()
308 self._repo = repo.filtered(b'served')
308 self._repo = repo.filtered(b'served')
309 self.ui = repo.ui
309 self.ui = repo.ui
310
310
311 if repo._wanted_sidedata:
311 if repo._wanted_sidedata:
312 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 formatted = bundle2.format_remote_wanted_sidedata(repo)
313 caps.add(b'exp-wanted-sidedata=' + formatted)
313 caps.add(b'exp-wanted-sidedata=' + formatted)
314
314
315 self._caps = repo._restrictcapabilities(caps)
315 self._caps = repo._restrictcapabilities(caps)
316
316
317 # Begin of _basepeer interface.
317 # Begin of _basepeer interface.
318
318
319 def url(self):
319 def url(self):
320 return self._repo.url()
320 return self._repo.url()
321
321
322 def local(self):
322 def local(self):
323 return self._repo
323 return self._repo
324
324
325 def peer(self):
325 def peer(self):
326 return self
326 return self
327
327
328 def canpush(self):
328 def canpush(self):
329 return True
329 return True
330
330
331 def close(self):
331 def close(self):
332 self._repo.close()
332 self._repo.close()
333
333
334 # End of _basepeer interface.
334 # End of _basepeer interface.
335
335
336 # Begin of _basewirecommands interface.
336 # Begin of _basewirecommands interface.
337
337
338 def branchmap(self):
338 def branchmap(self):
339 return self._repo.branchmap()
339 return self._repo.branchmap()
340
340
341 def capabilities(self):
341 def capabilities(self):
342 return self._caps
342 return self._caps
343
343
344 def clonebundles(self):
344 def clonebundles(self):
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346
346
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 """Used to test argument passing over the wire"""
348 """Used to test argument passing over the wire"""
349 return b"%s %s %s %s %s" % (
349 return b"%s %s %s %s %s" % (
350 one,
350 one,
351 two,
351 two,
352 pycompat.bytestr(three),
352 pycompat.bytestr(three),
353 pycompat.bytestr(four),
353 pycompat.bytestr(four),
354 pycompat.bytestr(five),
354 pycompat.bytestr(five),
355 )
355 )
356
356
357 def getbundle(
357 def getbundle(
358 self,
358 self,
359 source,
359 source,
360 heads=None,
360 heads=None,
361 common=None,
361 common=None,
362 bundlecaps=None,
362 bundlecaps=None,
363 remote_sidedata=None,
363 remote_sidedata=None,
364 **kwargs
364 **kwargs
365 ):
365 ):
366 chunks = exchange.getbundlechunks(
366 chunks = exchange.getbundlechunks(
367 self._repo,
367 self._repo,
368 source,
368 source,
369 heads=heads,
369 heads=heads,
370 common=common,
370 common=common,
371 bundlecaps=bundlecaps,
371 bundlecaps=bundlecaps,
372 remote_sidedata=remote_sidedata,
372 remote_sidedata=remote_sidedata,
373 **kwargs
373 **kwargs
374 )[1]
374 )[1]
375 cb = util.chunkbuffer(chunks)
375 cb = util.chunkbuffer(chunks)
376
376
377 if exchange.bundle2requested(bundlecaps):
377 if exchange.bundle2requested(bundlecaps):
378 # When requesting a bundle2, getbundle returns a stream to make the
378 # When requesting a bundle2, getbundle returns a stream to make the
379 # wire level function happier. We need to build a proper object
379 # wire level function happier. We need to build a proper object
380 # from it in local peer.
380 # from it in local peer.
381 return bundle2.getunbundler(self.ui, cb)
381 return bundle2.getunbundler(self.ui, cb)
382 else:
382 else:
383 return changegroup.getunbundler(b'01', cb, None)
383 return changegroup.getunbundler(b'01', cb, None)
384
384
385 def heads(self):
385 def heads(self):
386 return self._repo.heads()
386 return self._repo.heads()
387
387
388 def known(self, nodes):
388 def known(self, nodes):
389 return self._repo.known(nodes)
389 return self._repo.known(nodes)
390
390
391 def listkeys(self, namespace):
391 def listkeys(self, namespace):
392 return self._repo.listkeys(namespace)
392 return self._repo.listkeys(namespace)
393
393
394 def lookup(self, key):
394 def lookup(self, key):
395 return self._repo.lookup(key)
395 return self._repo.lookup(key)
396
396
397 def pushkey(self, namespace, key, old, new):
397 def pushkey(self, namespace, key, old, new):
398 return self._repo.pushkey(namespace, key, old, new)
398 return self._repo.pushkey(namespace, key, old, new)
399
399
400 def stream_out(self):
400 def stream_out(self):
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402
402
403 def unbundle(self, bundle, heads, url):
403 def unbundle(self, bundle, heads, url):
404 """apply a bundle on a repo
404 """apply a bundle on a repo
405
405
406 This function handles the repo locking itself."""
406 This function handles the repo locking itself."""
407 try:
407 try:
408 try:
408 try:
409 bundle = exchange.readbundle(self.ui, bundle, None)
409 bundle = exchange.readbundle(self.ui, bundle, None)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 if util.safehasattr(ret, b'getchunks'):
411 if util.safehasattr(ret, b'getchunks'):
412 # This is a bundle20 object, turn it into an unbundler.
412 # This is a bundle20 object, turn it into an unbundler.
413 # This little dance should be dropped eventually when the
413 # This little dance should be dropped eventually when the
414 # API is finally improved.
414 # API is finally improved.
415 stream = util.chunkbuffer(ret.getchunks())
415 stream = util.chunkbuffer(ret.getchunks())
416 ret = bundle2.getunbundler(self.ui, stream)
416 ret = bundle2.getunbundler(self.ui, stream)
417 return ret
417 return ret
418 except Exception as exc:
418 except Exception as exc:
419 # If the exception contains output salvaged from a bundle2
419 # If the exception contains output salvaged from a bundle2
420 # reply, we need to make sure it is printed before continuing
420 # reply, we need to make sure it is printed before continuing
421 # to fail. So we build a bundle2 with such output and consume
421 # to fail. So we build a bundle2 with such output and consume
422 # it directly.
422 # it directly.
423 #
423 #
424 # This is not very elegant but allows a "simple" solution for
424 # This is not very elegant but allows a "simple" solution for
425 # issue4594
425 # issue4594
426 output = getattr(exc, '_bundle2salvagedoutput', ())
426 output = getattr(exc, '_bundle2salvagedoutput', ())
427 if output:
427 if output:
428 bundler = bundle2.bundle20(self._repo.ui)
428 bundler = bundle2.bundle20(self._repo.ui)
429 for out in output:
429 for out in output:
430 bundler.addpart(out)
430 bundler.addpart(out)
431 stream = util.chunkbuffer(bundler.getchunks())
431 stream = util.chunkbuffer(bundler.getchunks())
432 b = bundle2.getunbundler(self.ui, stream)
432 b = bundle2.getunbundler(self.ui, stream)
433 bundle2.processbundle(self._repo, b)
433 bundle2.processbundle(self._repo, b)
434 raise
434 raise
435 except error.PushRaced as exc:
435 except error.PushRaced as exc:
436 raise error.ResponseError(
436 raise error.ResponseError(
437 _(b'push failed:'), stringutil.forcebytestr(exc)
437 _(b'push failed:'), stringutil.forcebytestr(exc)
438 )
438 )
439
439
440 # End of _basewirecommands interface.
440 # End of _basewirecommands interface.
441
441
442 # Begin of peer interface.
442 # Begin of peer interface.
443
443
444 def commandexecutor(self):
444 def commandexecutor(self):
445 return localcommandexecutor(self)
445 return localcommandexecutor(self)
446
446
447 # End of peer interface.
447 # End of peer interface.
448
448
449
449
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 class locallegacypeer(localpeer):
451 class locallegacypeer(localpeer):
452 """peer extension which implements legacy methods too; used for tests with
452 """peer extension which implements legacy methods too; used for tests with
453 restricted capabilities"""
453 restricted capabilities"""
454
454
455 def __init__(self, repo):
455 def __init__(self, repo):
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
457
457
458 # Begin of baselegacywirecommands interface.
458 # Begin of baselegacywirecommands interface.
459
459
460 def between(self, pairs):
460 def between(self, pairs):
461 return self._repo.between(pairs)
461 return self._repo.between(pairs)
462
462
463 def branches(self, nodes):
463 def branches(self, nodes):
464 return self._repo.branches(nodes)
464 return self._repo.branches(nodes)
465
465
466 def changegroup(self, nodes, source):
466 def changegroup(self, nodes, source):
467 outgoing = discovery.outgoing(
467 outgoing = discovery.outgoing(
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 )
469 )
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471
471
472 def changegroupsubset(self, bases, heads, source):
472 def changegroupsubset(self, bases, heads, source):
473 outgoing = discovery.outgoing(
473 outgoing = discovery.outgoing(
474 self._repo, missingroots=bases, ancestorsof=heads
474 self._repo, missingroots=bases, ancestorsof=heads
475 )
475 )
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477
477
478 # End of baselegacywirecommands interface.
478 # End of baselegacywirecommands interface.
479
479
480
480
481 # Functions receiving (ui, features) that extensions can register to impact
481 # Functions receiving (ui, features) that extensions can register to impact
482 # the ability to load repositories with custom requirements. Only
482 # the ability to load repositories with custom requirements. Only
483 # functions defined in loaded extensions are called.
483 # functions defined in loaded extensions are called.
484 #
484 #
485 # The function receives a set of requirement strings that the repository
485 # The function receives a set of requirement strings that the repository
486 # is capable of opening. Functions will typically add elements to the
486 # is capable of opening. Functions will typically add elements to the
487 # set to reflect that the extension knows how to handle that requirements.
487 # set to reflect that the extension knows how to handle that requirements.
488 featuresetupfuncs = set()
488 featuresetupfuncs = set()
489
489
490
490
491 def _getsharedvfs(hgvfs, requirements):
491 def _getsharedvfs(hgvfs, requirements):
492 """returns the vfs object pointing to root of shared source
492 """returns the vfs object pointing to root of shared source
493 repo for a shared repository
493 repo for a shared repository
494
494
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
497 """
497 """
498 # The ``shared`` or ``relshared`` requirements indicate the
498 # The ``shared`` or ``relshared`` requirements indicate the
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 # This is an absolute path for ``shared`` and relative to
500 # This is an absolute path for ``shared`` and relative to
501 # ``.hg/`` for ``relshared``.
501 # ``.hg/`` for ``relshared``.
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
505
505
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507
507
508 if not sharedvfs.exists():
508 if not sharedvfs.exists():
509 raise error.RepoError(
509 raise error.RepoError(
510 _(b'.hg/sharedpath points to nonexistent directory %s')
510 _(b'.hg/sharedpath points to nonexistent directory %s')
511 % sharedvfs.base
511 % sharedvfs.base
512 )
512 )
513 return sharedvfs
513 return sharedvfs
514
514
515
515
516 def _readrequires(vfs, allowmissing):
516 def _readrequires(vfs, allowmissing):
517 """reads the require file present at root of this vfs
517 """reads the require file present at root of this vfs
518 and return a set of requirements
518 and return a set of requirements
519
519
520 If allowmissing is True, we suppress ENOENT if raised"""
520 If allowmissing is True, we suppress ENOENT if raised"""
521 # requires file contains a newline-delimited list of
521 # requires file contains a newline-delimited list of
522 # features/capabilities the opener (us) must have in order to use
522 # features/capabilities the opener (us) must have in order to use
523 # the repository. This file was introduced in Mercurial 0.9.2,
523 # the repository. This file was introduced in Mercurial 0.9.2,
524 # which means very old repositories may not have one. We assume
524 # which means very old repositories may not have one. We assume
525 # a missing file translates to no requirements.
525 # a missing file translates to no requirements.
526 try:
526 try:
527 requirements = set(vfs.read(b'requires').splitlines())
527 requirements = set(vfs.read(b'requires').splitlines())
528 except IOError as e:
528 except IOError as e:
529 if not (allowmissing and e.errno == errno.ENOENT):
529 if not (allowmissing and e.errno == errno.ENOENT):
530 raise
530 raise
531 requirements = set()
531 requirements = set()
532 return requirements
532 return requirements
533
533
534
534
535 def makelocalrepository(baseui, path, intents=None):
535 def makelocalrepository(baseui, path, intents=None):
536 """Create a local repository object.
536 """Create a local repository object.
537
537
538 Given arguments needed to construct a local repository, this function
538 Given arguments needed to construct a local repository, this function
539 performs various early repository loading functionality (such as
539 performs various early repository loading functionality (such as
540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
541 the repository can be opened, derives a type suitable for representing
541 the repository can be opened, derives a type suitable for representing
542 that repository, and returns an instance of it.
542 that repository, and returns an instance of it.
543
543
544 The returned object conforms to the ``repository.completelocalrepository``
544 The returned object conforms to the ``repository.completelocalrepository``
545 interface.
545 interface.
546
546
547 The repository type is derived by calling a series of factory functions
547 The repository type is derived by calling a series of factory functions
548 for each aspect/interface of the final repository. These are defined by
548 for each aspect/interface of the final repository. These are defined by
549 ``REPO_INTERFACES``.
549 ``REPO_INTERFACES``.
550
550
551 Each factory function is called to produce a type implementing a specific
551 Each factory function is called to produce a type implementing a specific
552 interface. The cumulative list of returned types will be combined into a
552 interface. The cumulative list of returned types will be combined into a
553 new type and that type will be instantiated to represent the local
553 new type and that type will be instantiated to represent the local
554 repository.
554 repository.
555
555
556 The factory functions each receive various state that may be consulted
556 The factory functions each receive various state that may be consulted
557 as part of deriving a type.
557 as part of deriving a type.
558
558
559 Extensions should wrap these factory functions to customize repository type
559 Extensions should wrap these factory functions to customize repository type
560 creation. Note that an extension's wrapped function may be called even if
560 creation. Note that an extension's wrapped function may be called even if
561 that extension is not loaded for the repo being constructed. Extensions
561 that extension is not loaded for the repo being constructed. Extensions
562 should check if their ``__name__`` appears in the
562 should check if their ``__name__`` appears in the
563 ``extensionmodulenames`` set passed to the factory function and no-op if
563 ``extensionmodulenames`` set passed to the factory function and no-op if
564 not.
564 not.
565 """
565 """
566 ui = baseui.copy()
566 ui = baseui.copy()
567 # Prevent copying repo configuration.
567 # Prevent copying repo configuration.
568 ui.copy = baseui.copy
568 ui.copy = baseui.copy
569
569
570 # Working directory VFS rooted at repository root.
570 # Working directory VFS rooted at repository root.
571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
572
572
573 # Main VFS for .hg/ directory.
573 # Main VFS for .hg/ directory.
574 hgpath = wdirvfs.join(b'.hg')
574 hgpath = wdirvfs.join(b'.hg')
575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
576 # Whether this repository is shared one or not
576 # Whether this repository is shared one or not
577 shared = False
577 shared = False
578 # If this repository is shared, vfs pointing to shared repo
578 # If this repository is shared, vfs pointing to shared repo
579 sharedvfs = None
579 sharedvfs = None
580
580
581 # The .hg/ path should exist and should be a directory. All other
581 # The .hg/ path should exist and should be a directory. All other
582 # cases are errors.
582 # cases are errors.
583 if not hgvfs.isdir():
583 if not hgvfs.isdir():
584 try:
584 try:
585 hgvfs.stat()
585 hgvfs.stat()
586 except OSError as e:
586 except OSError as e:
587 if e.errno != errno.ENOENT:
587 if e.errno != errno.ENOENT:
588 raise
588 raise
589 except ValueError as e:
589 except ValueError as e:
590 # Can be raised on Python 3.8 when path is invalid.
590 # Can be raised on Python 3.8 when path is invalid.
591 raise error.Abort(
591 raise error.Abort(
592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
593 )
593 )
594
594
595 raise error.RepoError(_(b'repository %s not found') % path)
595 raise error.RepoError(_(b'repository %s not found') % path)
596
596
597 requirements = _readrequires(hgvfs, True)
597 requirements = _readrequires(hgvfs, True)
598 shared = (
598 shared = (
599 requirementsmod.SHARED_REQUIREMENT in requirements
599 requirementsmod.SHARED_REQUIREMENT in requirements
600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
601 )
601 )
602 storevfs = None
602 storevfs = None
603 if shared:
603 if shared:
604 # This is a shared repo
604 # This is a shared repo
605 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 sharedvfs = _getsharedvfs(hgvfs, requirements)
606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
607 else:
607 else:
608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
609
609
610 # if .hg/requires contains the sharesafe requirement, it means
610 # if .hg/requires contains the sharesafe requirement, it means
611 # there exists a `.hg/store/requires` too and we should read it
611 # there exists a `.hg/store/requires` too and we should read it
612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
614 # is not present, refer checkrequirementscompat() for that
614 # is not present, refer checkrequirementscompat() for that
615 #
615 #
616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
617 # repository was shared the old way. We check the share source .hg/requires
617 # repository was shared the old way. We check the share source .hg/requires
618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
619 # to be reshared
619 # to be reshared
620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
622
622
623 if (
623 if (
624 shared
624 shared
625 and requirementsmod.SHARESAFE_REQUIREMENT
625 and requirementsmod.SHARESAFE_REQUIREMENT
626 not in _readrequires(sharedvfs, True)
626 not in _readrequires(sharedvfs, True)
627 ):
627 ):
628 mismatch_warn = ui.configbool(
628 mismatch_warn = ui.configbool(
629 b'share', b'safe-mismatch.source-not-safe.warn'
629 b'share', b'safe-mismatch.source-not-safe.warn'
630 )
630 )
631 mismatch_config = ui.config(
631 mismatch_config = ui.config(
632 b'share', b'safe-mismatch.source-not-safe'
632 b'share', b'safe-mismatch.source-not-safe'
633 )
633 )
634 if mismatch_config in (
634 if mismatch_config in (
635 b'downgrade-allow',
635 b'downgrade-allow',
636 b'allow',
636 b'allow',
637 b'downgrade-abort',
637 b'downgrade-abort',
638 ):
638 ):
639 # prevent cyclic import localrepo -> upgrade -> localrepo
639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 from . import upgrade
640 from . import upgrade
641
641
642 upgrade.downgrade_share_to_non_safe(
642 upgrade.downgrade_share_to_non_safe(
643 ui,
643 ui,
644 hgvfs,
644 hgvfs,
645 sharedvfs,
645 sharedvfs,
646 requirements,
646 requirements,
647 mismatch_config,
647 mismatch_config,
648 mismatch_warn,
648 mismatch_warn,
649 )
649 )
650 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
651 raise error.Abort(
651 raise error.Abort(
652 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
653 hint=hint,
653 hint=hint,
654 )
654 )
655 else:
655 else:
656 raise error.Abort(
656 raise error.Abort(
657 _(
657 _(
658 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" set."
660 b" set."
661 )
661 )
662 % mismatch_config,
662 % mismatch_config,
663 hint=hint,
663 hint=hint,
664 )
664 )
665 else:
665 else:
666 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
667 elif shared:
667 elif shared:
668 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
672 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
673 )
673 )
674 if mismatch_config in (
674 if mismatch_config in (
675 b'upgrade-allow',
675 b'upgrade-allow',
676 b'allow',
676 b'allow',
677 b'upgrade-abort',
677 b'upgrade-abort',
678 ):
678 ):
679 # prevent cyclic import localrepo -> upgrade -> localrepo
679 # prevent cyclic import localrepo -> upgrade -> localrepo
680 from . import upgrade
680 from . import upgrade
681
681
682 upgrade.upgrade_share_to_safe(
682 upgrade.upgrade_share_to_safe(
683 ui,
683 ui,
684 hgvfs,
684 hgvfs,
685 storevfs,
685 storevfs,
686 requirements,
686 requirements,
687 mismatch_config,
687 mismatch_config,
688 mismatch_warn,
688 mismatch_warn,
689 )
689 )
690 elif mismatch_config == b'abort':
690 elif mismatch_config == b'abort':
691 raise error.Abort(
691 raise error.Abort(
692 _(
692 _(
693 b'version mismatch: source uses share-safe'
693 b'version mismatch: source uses share-safe'
694 b' functionality while the current share does not'
694 b' functionality while the current share does not'
695 ),
695 ),
696 hint=hint,
696 hint=hint,
697 )
697 )
698 else:
698 else:
699 raise error.Abort(
699 raise error.Abort(
700 _(
700 _(
701 b"share-safe mismatch with source.\nUnrecognized"
701 b"share-safe mismatch with source.\nUnrecognized"
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 )
703 )
704 % mismatch_config,
704 % mismatch_config,
705 hint=hint,
705 hint=hint,
706 )
706 )
707
707
708 # The .hg/hgrc file may load extensions or contain config options
708 # The .hg/hgrc file may load extensions or contain config options
709 # that influence repository construction. Attempt to load it and
709 # that influence repository construction. Attempt to load it and
710 # process any new extensions that it may have pulled in.
710 # process any new extensions that it may have pulled in.
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 extensions.loadall(ui)
713 extensions.loadall(ui)
714 extensions.populateui(ui)
714 extensions.populateui(ui)
715
715
716 # Set of module names of extensions loaded for this repository.
716 # Set of module names of extensions loaded for this repository.
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718
718
719 supportedrequirements = gathersupportedrequirements(ui)
719 supportedrequirements = gathersupportedrequirements(ui)
720
720
721 # We first validate the requirements are known.
721 # We first validate the requirements are known.
722 ensurerequirementsrecognized(requirements, supportedrequirements)
722 ensurerequirementsrecognized(requirements, supportedrequirements)
723
723
724 # Then we validate that the known set is reasonable to use together.
724 # Then we validate that the known set is reasonable to use together.
725 ensurerequirementscompatible(ui, requirements)
725 ensurerequirementscompatible(ui, requirements)
726
726
727 # TODO there are unhandled edge cases related to opening repositories with
727 # TODO there are unhandled edge cases related to opening repositories with
728 # shared storage. If storage is shared, we should also test for requirements
728 # shared storage. If storage is shared, we should also test for requirements
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 # that repo, as that repo may load extensions needed to open it. This is a
730 # that repo, as that repo may load extensions needed to open it. This is a
731 # bit complicated because we don't want the other hgrc to overwrite settings
731 # bit complicated because we don't want the other hgrc to overwrite settings
732 # in this hgrc.
732 # in this hgrc.
733 #
733 #
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 # file when sharing repos. But if a requirement is added after the share is
735 # file when sharing repos. But if a requirement is added after the share is
736 # performed, thereby introducing a new requirement for the opener, we may
736 # performed, thereby introducing a new requirement for the opener, we may
737 # will not see that and could encounter a run-time error interacting with
737 # will not see that and could encounter a run-time error interacting with
738 # that shared store since it has an unknown-to-us requirement.
738 # that shared store since it has an unknown-to-us requirement.
739
739
740 # At this point, we know we should be capable of opening the repository.
740 # At this point, we know we should be capable of opening the repository.
741 # Now get on with doing that.
741 # Now get on with doing that.
742
742
743 features = set()
743 features = set()
744
744
745 # The "store" part of the repository holds versioned data. How it is
745 # The "store" part of the repository holds versioned data. How it is
746 # accessed is determined by various requirements. If `shared` or
746 # accessed is determined by various requirements. If `shared` or
747 # `relshared` requirements are present, this indicates current repository
747 # `relshared` requirements are present, this indicates current repository
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 if shared:
749 if shared:
750 storebasepath = sharedvfs.base
750 storebasepath = sharedvfs.base
751 cachepath = sharedvfs.join(b'cache')
751 cachepath = sharedvfs.join(b'cache')
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 else:
753 else:
754 storebasepath = hgvfs.base
754 storebasepath = hgvfs.base
755 cachepath = hgvfs.join(b'cache')
755 cachepath = hgvfs.join(b'cache')
756 wcachepath = hgvfs.join(b'wcache')
756 wcachepath = hgvfs.join(b'wcache')
757
757
758 # The store has changed over time and the exact layout is dictated by
758 # The store has changed over time and the exact layout is dictated by
759 # requirements. The store interface abstracts differences across all
759 # requirements. The store interface abstracts differences across all
760 # of them.
760 # of them.
761 store = makestore(
761 store = makestore(
762 requirements,
762 requirements,
763 storebasepath,
763 storebasepath,
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 )
765 )
766 hgvfs.createmode = store.createmode
766 hgvfs.createmode = store.createmode
767
767
768 storevfs = store.vfs
768 storevfs = store.vfs
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770
770
771 if (
771 if (
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 ):
774 ):
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 # the revlogv2 docket introduced race condition that we need to fix
776 # the revlogv2 docket introduced race condition that we need to fix
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778
778
779 # The cache vfs is used to manage cache files.
779 # The cache vfs is used to manage cache files.
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 cachevfs.createmode = store.createmode
781 cachevfs.createmode = store.createmode
782 # The cache vfs is used to manage cache files related to the working copy
782 # The cache vfs is used to manage cache files related to the working copy
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 wcachevfs.createmode = store.createmode
784 wcachevfs.createmode = store.createmode
785
785
786 # Now resolve the type for the repository object. We do this by repeatedly
786 # Now resolve the type for the repository object. We do this by repeatedly
787 # calling a factory function to produces types for specific aspects of the
787 # calling a factory function to produces types for specific aspects of the
788 # repo's operation. The aggregate returned types are used as base classes
788 # repo's operation. The aggregate returned types are used as base classes
789 # for a dynamically-derived type, which will represent our new repository.
789 # for a dynamically-derived type, which will represent our new repository.
790
790
791 bases = []
791 bases = []
792 extrastate = {}
792 extrastate = {}
793
793
794 for iface, fn in REPO_INTERFACES:
794 for iface, fn in REPO_INTERFACES:
795 # We pass all potentially useful state to give extensions tons of
795 # We pass all potentially useful state to give extensions tons of
796 # flexibility.
796 # flexibility.
797 typ = fn()(
797 typ = fn()(
798 ui=ui,
798 ui=ui,
799 intents=intents,
799 intents=intents,
800 requirements=requirements,
800 requirements=requirements,
801 features=features,
801 features=features,
802 wdirvfs=wdirvfs,
802 wdirvfs=wdirvfs,
803 hgvfs=hgvfs,
803 hgvfs=hgvfs,
804 store=store,
804 store=store,
805 storevfs=storevfs,
805 storevfs=storevfs,
806 storeoptions=storevfs.options,
806 storeoptions=storevfs.options,
807 cachevfs=cachevfs,
807 cachevfs=cachevfs,
808 wcachevfs=wcachevfs,
808 wcachevfs=wcachevfs,
809 extensionmodulenames=extensionmodulenames,
809 extensionmodulenames=extensionmodulenames,
810 extrastate=extrastate,
810 extrastate=extrastate,
811 baseclasses=bases,
811 baseclasses=bases,
812 )
812 )
813
813
814 if not isinstance(typ, type):
814 if not isinstance(typ, type):
815 raise error.ProgrammingError(
815 raise error.ProgrammingError(
816 b'unable to construct type for %s' % iface
816 b'unable to construct type for %s' % iface
817 )
817 )
818
818
819 bases.append(typ)
819 bases.append(typ)
820
820
821 # type() allows you to use characters in type names that wouldn't be
821 # type() allows you to use characters in type names that wouldn't be
822 # recognized as Python symbols in source code. We abuse that to add
822 # recognized as Python symbols in source code. We abuse that to add
823 # rich information about our constructed repo.
823 # rich information about our constructed repo.
824 name = pycompat.sysstr(
824 name = pycompat.sysstr(
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 )
826 )
827
827
828 cls = type(name, tuple(bases), {})
828 cls = type(name, tuple(bases), {})
829
829
830 return cls(
830 return cls(
831 baseui=baseui,
831 baseui=baseui,
832 ui=ui,
832 ui=ui,
833 origroot=path,
833 origroot=path,
834 wdirvfs=wdirvfs,
834 wdirvfs=wdirvfs,
835 hgvfs=hgvfs,
835 hgvfs=hgvfs,
836 requirements=requirements,
836 requirements=requirements,
837 supportedrequirements=supportedrequirements,
837 supportedrequirements=supportedrequirements,
838 sharedpath=storebasepath,
838 sharedpath=storebasepath,
839 store=store,
839 store=store,
840 cachevfs=cachevfs,
840 cachevfs=cachevfs,
841 wcachevfs=wcachevfs,
841 wcachevfs=wcachevfs,
842 features=features,
842 features=features,
843 intents=intents,
843 intents=intents,
844 )
844 )
845
845
846
846
847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
848 """Load hgrc files/content into a ui instance.
848 """Load hgrc files/content into a ui instance.
849
849
850 This is called during repository opening to load any additional
850 This is called during repository opening to load any additional
851 config files or settings relevant to the current repository.
851 config files or settings relevant to the current repository.
852
852
853 Returns a bool indicating whether any additional configs were loaded.
853 Returns a bool indicating whether any additional configs were loaded.
854
854
855 Extensions should monkeypatch this function to modify how per-repo
855 Extensions should monkeypatch this function to modify how per-repo
856 configs are loaded. For example, an extension may wish to pull in
856 configs are loaded. For example, an extension may wish to pull in
857 configs from alternate files or sources.
857 configs from alternate files or sources.
858
858
859 sharedvfs is vfs object pointing to source repo if the current one is a
859 sharedvfs is vfs object pointing to source repo if the current one is a
860 shared one
860 shared one
861 """
861 """
862 if not rcutil.use_repo_hgrc():
862 if not rcutil.use_repo_hgrc():
863 return False
863 return False
864
864
865 ret = False
865 ret = False
866 # first load config from shared source if we has to
866 # first load config from shared source if we has to
867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
868 try:
868 try:
869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
870 ret = True
870 ret = True
871 except IOError:
871 except IOError:
872 pass
872 pass
873
873
874 try:
874 try:
875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
876 ret = True
876 ret = True
877 except IOError:
877 except IOError:
878 pass
878 pass
879
879
880 try:
880 try:
881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
882 ret = True
882 ret = True
883 except IOError:
883 except IOError:
884 pass
884 pass
885
885
886 return ret
886 return ret
887
887
888
888
889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
890 """Perform additional actions after .hg/hgrc is loaded.
890 """Perform additional actions after .hg/hgrc is loaded.
891
891
892 This function is called during repository loading immediately after
892 This function is called during repository loading immediately after
893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
894
894
895 The function can be used to validate configs, automatically add
895 The function can be used to validate configs, automatically add
896 options (including extensions) based on requirements, etc.
896 options (including extensions) based on requirements, etc.
897 """
897 """
898
898
899 # Map of requirements to list of extensions to load automatically when
899 # Map of requirements to list of extensions to load automatically when
900 # requirement is present.
900 # requirement is present.
901 autoextensions = {
901 autoextensions = {
902 b'git': [b'git'],
902 b'git': [b'git'],
903 b'largefiles': [b'largefiles'],
903 b'largefiles': [b'largefiles'],
904 b'lfs': [b'lfs'],
904 b'lfs': [b'lfs'],
905 }
905 }
906
906
907 for requirement, names in sorted(autoextensions.items()):
907 for requirement, names in sorted(autoextensions.items()):
908 if requirement not in requirements:
908 if requirement not in requirements:
909 continue
909 continue
910
910
911 for name in names:
911 for name in names:
912 if not ui.hasconfig(b'extensions', name):
912 if not ui.hasconfig(b'extensions', name):
913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
914
914
915
915
916 def gathersupportedrequirements(ui):
916 def gathersupportedrequirements(ui):
917 """Determine the complete set of recognized requirements."""
917 """Determine the complete set of recognized requirements."""
918 # Start with all requirements supported by this file.
918 # Start with all requirements supported by this file.
919 supported = set(localrepository._basesupported)
919 supported = set(localrepository._basesupported)
920
920
921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
922 # relevant to this ui instance.
922 # relevant to this ui instance.
923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
924
924
925 for fn in featuresetupfuncs:
925 for fn in featuresetupfuncs:
926 if fn.__module__ in modules:
926 if fn.__module__ in modules:
927 fn(ui, supported)
927 fn(ui, supported)
928
928
929 # Add derived requirements from registered compression engines.
929 # Add derived requirements from registered compression engines.
930 for name in util.compengines:
930 for name in util.compengines:
931 engine = util.compengines[name]
931 engine = util.compengines[name]
932 if engine.available() and engine.revlogheader():
932 if engine.available() and engine.revlogheader():
933 supported.add(b'exp-compression-%s' % name)
933 supported.add(b'exp-compression-%s' % name)
934 if engine.name() == b'zstd':
934 if engine.name() == b'zstd':
935 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
935 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
936
936
937 return supported
937 return supported
938
938
939
939
940 def ensurerequirementsrecognized(requirements, supported):
940 def ensurerequirementsrecognized(requirements, supported):
941 """Validate that a set of local requirements is recognized.
941 """Validate that a set of local requirements is recognized.
942
942
943 Receives a set of requirements. Raises an ``error.RepoError`` if there
943 Receives a set of requirements. Raises an ``error.RepoError`` if there
944 exists any requirement in that set that currently loaded code doesn't
944 exists any requirement in that set that currently loaded code doesn't
945 recognize.
945 recognize.
946
946
947 Returns a set of supported requirements.
947 Returns a set of supported requirements.
948 """
948 """
949 missing = set()
949 missing = set()
950
950
951 for requirement in requirements:
951 for requirement in requirements:
952 if requirement in supported:
952 if requirement in supported:
953 continue
953 continue
954
954
955 if not requirement or not requirement[0:1].isalnum():
955 if not requirement or not requirement[0:1].isalnum():
956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
957
957
958 missing.add(requirement)
958 missing.add(requirement)
959
959
960 if missing:
960 if missing:
961 raise error.RequirementError(
961 raise error.RequirementError(
962 _(b'repository requires features unknown to this Mercurial: %s')
962 _(b'repository requires features unknown to this Mercurial: %s')
963 % b' '.join(sorted(missing)),
963 % b' '.join(sorted(missing)),
964 hint=_(
964 hint=_(
965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
966 b'for more information'
966 b'for more information'
967 ),
967 ),
968 )
968 )
969
969
970
970
971 def ensurerequirementscompatible(ui, requirements):
971 def ensurerequirementscompatible(ui, requirements):
972 """Validates that a set of recognized requirements is mutually compatible.
972 """Validates that a set of recognized requirements is mutually compatible.
973
973
974 Some requirements may not be compatible with others or require
974 Some requirements may not be compatible with others or require
975 config options that aren't enabled. This function is called during
975 config options that aren't enabled. This function is called during
976 repository opening to ensure that the set of requirements needed
976 repository opening to ensure that the set of requirements needed
977 to open a repository is sane and compatible with config options.
977 to open a repository is sane and compatible with config options.
978
978
979 Extensions can monkeypatch this function to perform additional
979 Extensions can monkeypatch this function to perform additional
980 checking.
980 checking.
981
981
982 ``error.RepoError`` should be raised on failure.
982 ``error.RepoError`` should be raised on failure.
983 """
983 """
984 if (
984 if (
985 requirementsmod.SPARSE_REQUIREMENT in requirements
985 requirementsmod.SPARSE_REQUIREMENT in requirements
986 and not sparse.enabled
986 and not sparse.enabled
987 ):
987 ):
988 raise error.RepoError(
988 raise error.RepoError(
989 _(
989 _(
990 b'repository is using sparse feature but '
990 b'repository is using sparse feature but '
991 b'sparse is not enabled; enable the '
991 b'sparse is not enabled; enable the '
992 b'"sparse" extensions to access'
992 b'"sparse" extensions to access'
993 )
993 )
994 )
994 )
995
995
996
996
997 def makestore(requirements, path, vfstype):
997 def makestore(requirements, path, vfstype):
998 """Construct a storage object for a repository."""
998 """Construct a storage object for a repository."""
999 if requirementsmod.STORE_REQUIREMENT in requirements:
999 if requirementsmod.STORE_REQUIREMENT in requirements:
1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1002 return storemod.fncachestore(path, vfstype, dotencode)
1002 return storemod.fncachestore(path, vfstype, dotencode)
1003
1003
1004 return storemod.encodedstore(path, vfstype)
1004 return storemod.encodedstore(path, vfstype)
1005
1005
1006 return storemod.basicstore(path, vfstype)
1006 return storemod.basicstore(path, vfstype)
1007
1007
1008
1008
1009 def resolvestorevfsoptions(ui, requirements, features):
1009 def resolvestorevfsoptions(ui, requirements, features):
1010 """Resolve the options to pass to the store vfs opener.
1010 """Resolve the options to pass to the store vfs opener.
1011
1011
1012 The returned dict is used to influence behavior of the storage layer.
1012 The returned dict is used to influence behavior of the storage layer.
1013 """
1013 """
1014 options = {}
1014 options = {}
1015
1015
1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1017 options[b'treemanifest'] = True
1017 options[b'treemanifest'] = True
1018
1018
1019 # experimental config: format.manifestcachesize
1019 # experimental config: format.manifestcachesize
1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1021 if manifestcachesize is not None:
1021 if manifestcachesize is not None:
1022 options[b'manifestcachesize'] = manifestcachesize
1022 options[b'manifestcachesize'] = manifestcachesize
1023
1023
1024 # In the absence of another requirement superseding a revlog-related
1024 # In the absence of another requirement superseding a revlog-related
1025 # requirement, we have to assume the repo is using revlog version 0.
1025 # requirement, we have to assume the repo is using revlog version 0.
1026 # This revlog format is super old and we don't bother trying to parse
1026 # This revlog format is super old and we don't bother trying to parse
1027 # opener options for it because those options wouldn't do anything
1027 # opener options for it because those options wouldn't do anything
1028 # meaningful on such old repos.
1028 # meaningful on such old repos.
1029 if (
1029 if (
1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1032 ):
1032 ):
1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1034 else: # explicitly mark repo as using revlogv0
1034 else: # explicitly mark repo as using revlogv0
1035 options[b'revlogv0'] = True
1035 options[b'revlogv0'] = True
1036
1036
1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1038 options[b'copies-storage'] = b'changeset-sidedata'
1038 options[b'copies-storage'] = b'changeset-sidedata'
1039 else:
1039 else:
1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1041 copiesextramode = (b'changeset-only', b'compatibility')
1041 copiesextramode = (b'changeset-only', b'compatibility')
1042 if writecopiesto in copiesextramode:
1042 if writecopiesto in copiesextramode:
1043 options[b'copies-storage'] = b'extra'
1043 options[b'copies-storage'] = b'extra'
1044
1044
1045 return options
1045 return options
1046
1046
1047
1047
1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1049 """Resolve opener options specific to revlogs."""
1049 """Resolve opener options specific to revlogs."""
1050
1050
1051 options = {}
1051 options = {}
1052 options[b'flagprocessors'] = {}
1052 options[b'flagprocessors'] = {}
1053
1053
1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1055 options[b'revlogv1'] = True
1055 options[b'revlogv1'] = True
1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1057 options[b'revlogv2'] = True
1057 options[b'revlogv2'] = True
1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1059 options[b'changelogv2'] = True
1059 options[b'changelogv2'] = True
1060
1060
1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1062 options[b'generaldelta'] = True
1062 options[b'generaldelta'] = True
1063
1063
1064 # experimental config: format.chunkcachesize
1064 # experimental config: format.chunkcachesize
1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1066 if chunkcachesize is not None:
1066 if chunkcachesize is not None:
1067 options[b'chunkcachesize'] = chunkcachesize
1067 options[b'chunkcachesize'] = chunkcachesize
1068
1068
1069 deltabothparents = ui.configbool(
1069 deltabothparents = ui.configbool(
1070 b'storage', b'revlog.optimize-delta-parent-choice'
1070 b'storage', b'revlog.optimize-delta-parent-choice'
1071 )
1071 )
1072 options[b'deltabothparents'] = deltabothparents
1072 options[b'deltabothparents'] = deltabothparents
1073 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1073
1074
1074 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1075 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1075 options[b'issue6528.fix-incoming'] = issue6528
1076 options[b'issue6528.fix-incoming'] = issue6528
1076
1077
1077 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1078 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1078 lazydeltabase = False
1079 lazydeltabase = False
1079 if lazydelta:
1080 if lazydelta:
1080 lazydeltabase = ui.configbool(
1081 lazydeltabase = ui.configbool(
1081 b'storage', b'revlog.reuse-external-delta-parent'
1082 b'storage', b'revlog.reuse-external-delta-parent'
1082 )
1083 )
1083 if lazydeltabase is None:
1084 if lazydeltabase is None:
1084 lazydeltabase = not scmutil.gddeltaconfig(ui)
1085 lazydeltabase = not scmutil.gddeltaconfig(ui)
1085 options[b'lazydelta'] = lazydelta
1086 options[b'lazydelta'] = lazydelta
1086 options[b'lazydeltabase'] = lazydeltabase
1087 options[b'lazydeltabase'] = lazydeltabase
1087
1088
1088 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1089 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1089 if 0 <= chainspan:
1090 if 0 <= chainspan:
1090 options[b'maxdeltachainspan'] = chainspan
1091 options[b'maxdeltachainspan'] = chainspan
1091
1092
1092 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1093 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1093 if mmapindexthreshold is not None:
1094 if mmapindexthreshold is not None:
1094 options[b'mmapindexthreshold'] = mmapindexthreshold
1095 options[b'mmapindexthreshold'] = mmapindexthreshold
1095
1096
1096 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1097 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1097 srdensitythres = float(
1098 srdensitythres = float(
1098 ui.config(b'experimental', b'sparse-read.density-threshold')
1099 ui.config(b'experimental', b'sparse-read.density-threshold')
1099 )
1100 )
1100 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1101 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1101 options[b'with-sparse-read'] = withsparseread
1102 options[b'with-sparse-read'] = withsparseread
1102 options[b'sparse-read-density-threshold'] = srdensitythres
1103 options[b'sparse-read-density-threshold'] = srdensitythres
1103 options[b'sparse-read-min-gap-size'] = srmingapsize
1104 options[b'sparse-read-min-gap-size'] = srmingapsize
1104
1105
1105 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1106 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1106 options[b'sparse-revlog'] = sparserevlog
1107 options[b'sparse-revlog'] = sparserevlog
1107 if sparserevlog:
1108 if sparserevlog:
1108 options[b'generaldelta'] = True
1109 options[b'generaldelta'] = True
1109
1110
1110 maxchainlen = None
1111 maxchainlen = None
1111 if sparserevlog:
1112 if sparserevlog:
1112 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1113 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1113 # experimental config: format.maxchainlen
1114 # experimental config: format.maxchainlen
1114 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1115 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1115 if maxchainlen is not None:
1116 if maxchainlen is not None:
1116 options[b'maxchainlen'] = maxchainlen
1117 options[b'maxchainlen'] = maxchainlen
1117
1118
1118 for r in requirements:
1119 for r in requirements:
1119 # we allow multiple compression engine requirement to co-exist because
1120 # we allow multiple compression engine requirement to co-exist because
1120 # strickly speaking, revlog seems to support mixed compression style.
1121 # strickly speaking, revlog seems to support mixed compression style.
1121 #
1122 #
1122 # The compression used for new entries will be "the last one"
1123 # The compression used for new entries will be "the last one"
1123 prefix = r.startswith
1124 prefix = r.startswith
1124 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1125 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1125 options[b'compengine'] = r.split(b'-', 2)[2]
1126 options[b'compengine'] = r.split(b'-', 2)[2]
1126
1127
1127 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1128 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1128 if options[b'zlib.level'] is not None:
1129 if options[b'zlib.level'] is not None:
1129 if not (0 <= options[b'zlib.level'] <= 9):
1130 if not (0 <= options[b'zlib.level'] <= 9):
1130 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1131 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1131 raise error.Abort(msg % options[b'zlib.level'])
1132 raise error.Abort(msg % options[b'zlib.level'])
1132 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1133 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1133 if options[b'zstd.level'] is not None:
1134 if options[b'zstd.level'] is not None:
1134 if not (0 <= options[b'zstd.level'] <= 22):
1135 if not (0 <= options[b'zstd.level'] <= 22):
1135 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1136 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1136 raise error.Abort(msg % options[b'zstd.level'])
1137 raise error.Abort(msg % options[b'zstd.level'])
1137
1138
1138 if requirementsmod.NARROW_REQUIREMENT in requirements:
1139 if requirementsmod.NARROW_REQUIREMENT in requirements:
1139 options[b'enableellipsis'] = True
1140 options[b'enableellipsis'] = True
1140
1141
1141 if ui.configbool(b'experimental', b'rust.index'):
1142 if ui.configbool(b'experimental', b'rust.index'):
1142 options[b'rust.index'] = True
1143 options[b'rust.index'] = True
1143 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1144 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1144 slow_path = ui.config(
1145 slow_path = ui.config(
1145 b'storage', b'revlog.persistent-nodemap.slow-path'
1146 b'storage', b'revlog.persistent-nodemap.slow-path'
1146 )
1147 )
1147 if slow_path not in (b'allow', b'warn', b'abort'):
1148 if slow_path not in (b'allow', b'warn', b'abort'):
1148 default = ui.config_default(
1149 default = ui.config_default(
1149 b'storage', b'revlog.persistent-nodemap.slow-path'
1150 b'storage', b'revlog.persistent-nodemap.slow-path'
1150 )
1151 )
1151 msg = _(
1152 msg = _(
1152 b'unknown value for config '
1153 b'unknown value for config '
1153 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1154 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1154 )
1155 )
1155 ui.warn(msg % slow_path)
1156 ui.warn(msg % slow_path)
1156 if not ui.quiet:
1157 if not ui.quiet:
1157 ui.warn(_(b'falling back to default value: %s\n') % default)
1158 ui.warn(_(b'falling back to default value: %s\n') % default)
1158 slow_path = default
1159 slow_path = default
1159
1160
1160 msg = _(
1161 msg = _(
1161 b"accessing `persistent-nodemap` repository without associated "
1162 b"accessing `persistent-nodemap` repository without associated "
1162 b"fast implementation."
1163 b"fast implementation."
1163 )
1164 )
1164 hint = _(
1165 hint = _(
1165 b"check `hg help config.format.use-persistent-nodemap` "
1166 b"check `hg help config.format.use-persistent-nodemap` "
1166 b"for details"
1167 b"for details"
1167 )
1168 )
1168 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1169 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1169 if slow_path == b'warn':
1170 if slow_path == b'warn':
1170 msg = b"warning: " + msg + b'\n'
1171 msg = b"warning: " + msg + b'\n'
1171 ui.warn(msg)
1172 ui.warn(msg)
1172 if not ui.quiet:
1173 if not ui.quiet:
1173 hint = b'(' + hint + b')\n'
1174 hint = b'(' + hint + b')\n'
1174 ui.warn(hint)
1175 ui.warn(hint)
1175 if slow_path == b'abort':
1176 if slow_path == b'abort':
1176 raise error.Abort(msg, hint=hint)
1177 raise error.Abort(msg, hint=hint)
1177 options[b'persistent-nodemap'] = True
1178 options[b'persistent-nodemap'] = True
1178 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1179 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1179 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1180 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1180 if slow_path not in (b'allow', b'warn', b'abort'):
1181 if slow_path not in (b'allow', b'warn', b'abort'):
1181 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1182 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1182 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1183 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1183 ui.warn(msg % slow_path)
1184 ui.warn(msg % slow_path)
1184 if not ui.quiet:
1185 if not ui.quiet:
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 slow_path = default
1187 slow_path = default
1187
1188
1188 msg = _(
1189 msg = _(
1189 b"accessing `dirstate-v2` repository without associated "
1190 b"accessing `dirstate-v2` repository without associated "
1190 b"fast implementation."
1191 b"fast implementation."
1191 )
1192 )
1192 hint = _(
1193 hint = _(
1193 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1194 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1194 )
1195 )
1195 if not dirstate.HAS_FAST_DIRSTATE_V2:
1196 if not dirstate.HAS_FAST_DIRSTATE_V2:
1196 if slow_path == b'warn':
1197 if slow_path == b'warn':
1197 msg = b"warning: " + msg + b'\n'
1198 msg = b"warning: " + msg + b'\n'
1198 ui.warn(msg)
1199 ui.warn(msg)
1199 if not ui.quiet:
1200 if not ui.quiet:
1200 hint = b'(' + hint + b')\n'
1201 hint = b'(' + hint + b')\n'
1201 ui.warn(hint)
1202 ui.warn(hint)
1202 if slow_path == b'abort':
1203 if slow_path == b'abort':
1203 raise error.Abort(msg, hint=hint)
1204 raise error.Abort(msg, hint=hint)
1204 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1205 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1205 options[b'persistent-nodemap.mmap'] = True
1206 options[b'persistent-nodemap.mmap'] = True
1206 if ui.configbool(b'devel', b'persistent-nodemap'):
1207 if ui.configbool(b'devel', b'persistent-nodemap'):
1207 options[b'devel-force-nodemap'] = True
1208 options[b'devel-force-nodemap'] = True
1208
1209
1209 return options
1210 return options
1210
1211
1211
1212
1212 def makemain(**kwargs):
1213 def makemain(**kwargs):
1213 """Produce a type conforming to ``ilocalrepositorymain``."""
1214 """Produce a type conforming to ``ilocalrepositorymain``."""
1214 return localrepository
1215 return localrepository
1215
1216
1216
1217
1217 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1218 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1218 class revlogfilestorage:
1219 class revlogfilestorage:
1219 """File storage when using revlogs."""
1220 """File storage when using revlogs."""
1220
1221
1221 def file(self, path):
1222 def file(self, path):
1222 if path.startswith(b'/'):
1223 if path.startswith(b'/'):
1223 path = path[1:]
1224 path = path[1:]
1224
1225
1225 return filelog.filelog(self.svfs, path)
1226 return filelog.filelog(self.svfs, path)
1226
1227
1227
1228
1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 class revlognarrowfilestorage:
1230 class revlognarrowfilestorage:
1230 """File storage when using revlogs and narrow files."""
1231 """File storage when using revlogs and narrow files."""
1231
1232
1232 def file(self, path):
1233 def file(self, path):
1233 if path.startswith(b'/'):
1234 if path.startswith(b'/'):
1234 path = path[1:]
1235 path = path[1:]
1235
1236
1236 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1237 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1237
1238
1238
1239
1239 def makefilestorage(requirements, features, **kwargs):
1240 def makefilestorage(requirements, features, **kwargs):
1240 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1241 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1241 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1242 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1242 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1243 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1243
1244
1244 if requirementsmod.NARROW_REQUIREMENT in requirements:
1245 if requirementsmod.NARROW_REQUIREMENT in requirements:
1245 return revlognarrowfilestorage
1246 return revlognarrowfilestorage
1246 else:
1247 else:
1247 return revlogfilestorage
1248 return revlogfilestorage
1248
1249
1249
1250
1250 # List of repository interfaces and factory functions for them. Each
1251 # List of repository interfaces and factory functions for them. Each
1251 # will be called in order during ``makelocalrepository()`` to iteratively
1252 # will be called in order during ``makelocalrepository()`` to iteratively
1252 # derive the final type for a local repository instance. We capture the
1253 # derive the final type for a local repository instance. We capture the
1253 # function as a lambda so we don't hold a reference and the module-level
1254 # function as a lambda so we don't hold a reference and the module-level
1254 # functions can be wrapped.
1255 # functions can be wrapped.
1255 REPO_INTERFACES = [
1256 REPO_INTERFACES = [
1256 (repository.ilocalrepositorymain, lambda: makemain),
1257 (repository.ilocalrepositorymain, lambda: makemain),
1257 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1258 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1258 ]
1259 ]
1259
1260
1260
1261
1261 @interfaceutil.implementer(repository.ilocalrepositorymain)
1262 @interfaceutil.implementer(repository.ilocalrepositorymain)
1262 class localrepository:
1263 class localrepository:
1263 """Main class for representing local repositories.
1264 """Main class for representing local repositories.
1264
1265
1265 All local repositories are instances of this class.
1266 All local repositories are instances of this class.
1266
1267
1267 Constructed on its own, instances of this class are not usable as
1268 Constructed on its own, instances of this class are not usable as
1268 repository objects. To obtain a usable repository object, call
1269 repository objects. To obtain a usable repository object, call
1269 ``hg.repository()``, ``localrepo.instance()``, or
1270 ``hg.repository()``, ``localrepo.instance()``, or
1270 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1271 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1271 ``instance()`` adds support for creating new repositories.
1272 ``instance()`` adds support for creating new repositories.
1272 ``hg.repository()`` adds more extension integration, including calling
1273 ``hg.repository()`` adds more extension integration, including calling
1273 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1274 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1274 used.
1275 used.
1275 """
1276 """
1276
1277
1277 _basesupported = {
1278 _basesupported = {
1278 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1279 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1279 requirementsmod.CHANGELOGV2_REQUIREMENT,
1280 requirementsmod.CHANGELOGV2_REQUIREMENT,
1280 requirementsmod.COPIESSDC_REQUIREMENT,
1281 requirementsmod.COPIESSDC_REQUIREMENT,
1281 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1282 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1282 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1283 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1283 requirementsmod.DOTENCODE_REQUIREMENT,
1284 requirementsmod.DOTENCODE_REQUIREMENT,
1284 requirementsmod.FNCACHE_REQUIREMENT,
1285 requirementsmod.FNCACHE_REQUIREMENT,
1285 requirementsmod.GENERALDELTA_REQUIREMENT,
1286 requirementsmod.GENERALDELTA_REQUIREMENT,
1286 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1287 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1287 requirementsmod.NODEMAP_REQUIREMENT,
1288 requirementsmod.NODEMAP_REQUIREMENT,
1288 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1289 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1289 requirementsmod.REVLOGV1_REQUIREMENT,
1290 requirementsmod.REVLOGV1_REQUIREMENT,
1290 requirementsmod.REVLOGV2_REQUIREMENT,
1291 requirementsmod.REVLOGV2_REQUIREMENT,
1291 requirementsmod.SHARED_REQUIREMENT,
1292 requirementsmod.SHARED_REQUIREMENT,
1292 requirementsmod.SHARESAFE_REQUIREMENT,
1293 requirementsmod.SHARESAFE_REQUIREMENT,
1293 requirementsmod.SPARSE_REQUIREMENT,
1294 requirementsmod.SPARSE_REQUIREMENT,
1294 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1295 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1295 requirementsmod.STORE_REQUIREMENT,
1296 requirementsmod.STORE_REQUIREMENT,
1296 requirementsmod.TREEMANIFEST_REQUIREMENT,
1297 requirementsmod.TREEMANIFEST_REQUIREMENT,
1297 }
1298 }
1298
1299
1299 # list of prefix for file which can be written without 'wlock'
1300 # list of prefix for file which can be written without 'wlock'
1300 # Extensions should extend this list when needed
1301 # Extensions should extend this list when needed
1301 _wlockfreeprefix = {
1302 _wlockfreeprefix = {
1302 # We migh consider requiring 'wlock' for the next
1303 # We migh consider requiring 'wlock' for the next
1303 # two, but pretty much all the existing code assume
1304 # two, but pretty much all the existing code assume
1304 # wlock is not needed so we keep them excluded for
1305 # wlock is not needed so we keep them excluded for
1305 # now.
1306 # now.
1306 b'hgrc',
1307 b'hgrc',
1307 b'requires',
1308 b'requires',
1308 # XXX cache is a complicatged business someone
1309 # XXX cache is a complicatged business someone
1309 # should investigate this in depth at some point
1310 # should investigate this in depth at some point
1310 b'cache/',
1311 b'cache/',
1311 # XXX shouldn't be dirstate covered by the wlock?
1312 # XXX shouldn't be dirstate covered by the wlock?
1312 b'dirstate',
1313 b'dirstate',
1313 # XXX bisect was still a bit too messy at the time
1314 # XXX bisect was still a bit too messy at the time
1314 # this changeset was introduced. Someone should fix
1315 # this changeset was introduced. Someone should fix
1315 # the remainig bit and drop this line
1316 # the remainig bit and drop this line
1316 b'bisect.state',
1317 b'bisect.state',
1317 }
1318 }
1318
1319
1319 def __init__(
1320 def __init__(
1320 self,
1321 self,
1321 baseui,
1322 baseui,
1322 ui,
1323 ui,
1323 origroot,
1324 origroot,
1324 wdirvfs,
1325 wdirvfs,
1325 hgvfs,
1326 hgvfs,
1326 requirements,
1327 requirements,
1327 supportedrequirements,
1328 supportedrequirements,
1328 sharedpath,
1329 sharedpath,
1329 store,
1330 store,
1330 cachevfs,
1331 cachevfs,
1331 wcachevfs,
1332 wcachevfs,
1332 features,
1333 features,
1333 intents=None,
1334 intents=None,
1334 ):
1335 ):
1335 """Create a new local repository instance.
1336 """Create a new local repository instance.
1336
1337
1337 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1338 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1338 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1339 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1339 object.
1340 object.
1340
1341
1341 Arguments:
1342 Arguments:
1342
1343
1343 baseui
1344 baseui
1344 ``ui.ui`` instance that ``ui`` argument was based off of.
1345 ``ui.ui`` instance that ``ui`` argument was based off of.
1345
1346
1346 ui
1347 ui
1347 ``ui.ui`` instance for use by the repository.
1348 ``ui.ui`` instance for use by the repository.
1348
1349
1349 origroot
1350 origroot
1350 ``bytes`` path to working directory root of this repository.
1351 ``bytes`` path to working directory root of this repository.
1351
1352
1352 wdirvfs
1353 wdirvfs
1353 ``vfs.vfs`` rooted at the working directory.
1354 ``vfs.vfs`` rooted at the working directory.
1354
1355
1355 hgvfs
1356 hgvfs
1356 ``vfs.vfs`` rooted at .hg/
1357 ``vfs.vfs`` rooted at .hg/
1357
1358
1358 requirements
1359 requirements
1359 ``set`` of bytestrings representing repository opening requirements.
1360 ``set`` of bytestrings representing repository opening requirements.
1360
1361
1361 supportedrequirements
1362 supportedrequirements
1362 ``set`` of bytestrings representing repository requirements that we
1363 ``set`` of bytestrings representing repository requirements that we
1363 know how to open. May be a supetset of ``requirements``.
1364 know how to open. May be a supetset of ``requirements``.
1364
1365
1365 sharedpath
1366 sharedpath
1366 ``bytes`` Defining path to storage base directory. Points to a
1367 ``bytes`` Defining path to storage base directory. Points to a
1367 ``.hg/`` directory somewhere.
1368 ``.hg/`` directory somewhere.
1368
1369
1369 store
1370 store
1370 ``store.basicstore`` (or derived) instance providing access to
1371 ``store.basicstore`` (or derived) instance providing access to
1371 versioned storage.
1372 versioned storage.
1372
1373
1373 cachevfs
1374 cachevfs
1374 ``vfs.vfs`` used for cache files.
1375 ``vfs.vfs`` used for cache files.
1375
1376
1376 wcachevfs
1377 wcachevfs
1377 ``vfs.vfs`` used for cache files related to the working copy.
1378 ``vfs.vfs`` used for cache files related to the working copy.
1378
1379
1379 features
1380 features
1380 ``set`` of bytestrings defining features/capabilities of this
1381 ``set`` of bytestrings defining features/capabilities of this
1381 instance.
1382 instance.
1382
1383
1383 intents
1384 intents
1384 ``set`` of system strings indicating what this repo will be used
1385 ``set`` of system strings indicating what this repo will be used
1385 for.
1386 for.
1386 """
1387 """
1387 self.baseui = baseui
1388 self.baseui = baseui
1388 self.ui = ui
1389 self.ui = ui
1389 self.origroot = origroot
1390 self.origroot = origroot
1390 # vfs rooted at working directory.
1391 # vfs rooted at working directory.
1391 self.wvfs = wdirvfs
1392 self.wvfs = wdirvfs
1392 self.root = wdirvfs.base
1393 self.root = wdirvfs.base
1393 # vfs rooted at .hg/. Used to access most non-store paths.
1394 # vfs rooted at .hg/. Used to access most non-store paths.
1394 self.vfs = hgvfs
1395 self.vfs = hgvfs
1395 self.path = hgvfs.base
1396 self.path = hgvfs.base
1396 self.requirements = requirements
1397 self.requirements = requirements
1397 self.nodeconstants = sha1nodeconstants
1398 self.nodeconstants = sha1nodeconstants
1398 self.nullid = self.nodeconstants.nullid
1399 self.nullid = self.nodeconstants.nullid
1399 self.supported = supportedrequirements
1400 self.supported = supportedrequirements
1400 self.sharedpath = sharedpath
1401 self.sharedpath = sharedpath
1401 self.store = store
1402 self.store = store
1402 self.cachevfs = cachevfs
1403 self.cachevfs = cachevfs
1403 self.wcachevfs = wcachevfs
1404 self.wcachevfs = wcachevfs
1404 self.features = features
1405 self.features = features
1405
1406
1406 self.filtername = None
1407 self.filtername = None
1407
1408
1408 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1409 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1409 b'devel', b'check-locks'
1410 b'devel', b'check-locks'
1410 ):
1411 ):
1411 self.vfs.audit = self._getvfsward(self.vfs.audit)
1412 self.vfs.audit = self._getvfsward(self.vfs.audit)
1412 # A list of callback to shape the phase if no data were found.
1413 # A list of callback to shape the phase if no data were found.
1413 # Callback are in the form: func(repo, roots) --> processed root.
1414 # Callback are in the form: func(repo, roots) --> processed root.
1414 # This list it to be filled by extension during repo setup
1415 # This list it to be filled by extension during repo setup
1415 self._phasedefaults = []
1416 self._phasedefaults = []
1416
1417
1417 color.setup(self.ui)
1418 color.setup(self.ui)
1418
1419
1419 self.spath = self.store.path
1420 self.spath = self.store.path
1420 self.svfs = self.store.vfs
1421 self.svfs = self.store.vfs
1421 self.sjoin = self.store.join
1422 self.sjoin = self.store.join
1422 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1423 b'devel', b'check-locks'
1424 b'devel', b'check-locks'
1424 ):
1425 ):
1425 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1426 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1426 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1427 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1427 else: # standard vfs
1428 else: # standard vfs
1428 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1429 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1429
1430
1430 self._dirstatevalidatewarned = False
1431 self._dirstatevalidatewarned = False
1431
1432
1432 self._branchcaches = branchmap.BranchMapCache()
1433 self._branchcaches = branchmap.BranchMapCache()
1433 self._revbranchcache = None
1434 self._revbranchcache = None
1434 self._filterpats = {}
1435 self._filterpats = {}
1435 self._datafilters = {}
1436 self._datafilters = {}
1436 self._transref = self._lockref = self._wlockref = None
1437 self._transref = self._lockref = self._wlockref = None
1437
1438
1438 # A cache for various files under .hg/ that tracks file changes,
1439 # A cache for various files under .hg/ that tracks file changes,
1439 # (used by the filecache decorator)
1440 # (used by the filecache decorator)
1440 #
1441 #
1441 # Maps a property name to its util.filecacheentry
1442 # Maps a property name to its util.filecacheentry
1442 self._filecache = {}
1443 self._filecache = {}
1443
1444
1444 # hold sets of revision to be filtered
1445 # hold sets of revision to be filtered
1445 # should be cleared when something might have changed the filter value:
1446 # should be cleared when something might have changed the filter value:
1446 # - new changesets,
1447 # - new changesets,
1447 # - phase change,
1448 # - phase change,
1448 # - new obsolescence marker,
1449 # - new obsolescence marker,
1449 # - working directory parent change,
1450 # - working directory parent change,
1450 # - bookmark changes
1451 # - bookmark changes
1451 self.filteredrevcache = {}
1452 self.filteredrevcache = {}
1452
1453
1453 # post-dirstate-status hooks
1454 # post-dirstate-status hooks
1454 self._postdsstatus = []
1455 self._postdsstatus = []
1455
1456
1456 # generic mapping between names and nodes
1457 # generic mapping between names and nodes
1457 self.names = namespaces.namespaces()
1458 self.names = namespaces.namespaces()
1458
1459
1459 # Key to signature value.
1460 # Key to signature value.
1460 self._sparsesignaturecache = {}
1461 self._sparsesignaturecache = {}
1461 # Signature to cached matcher instance.
1462 # Signature to cached matcher instance.
1462 self._sparsematchercache = {}
1463 self._sparsematchercache = {}
1463
1464
1464 self._extrafilterid = repoview.extrafilter(ui)
1465 self._extrafilterid = repoview.extrafilter(ui)
1465
1466
1466 self.filecopiesmode = None
1467 self.filecopiesmode = None
1467 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1468 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1468 self.filecopiesmode = b'changeset-sidedata'
1469 self.filecopiesmode = b'changeset-sidedata'
1469
1470
1470 self._wanted_sidedata = set()
1471 self._wanted_sidedata = set()
1471 self._sidedata_computers = {}
1472 self._sidedata_computers = {}
1472 sidedatamod.set_sidedata_spec_for_repo(self)
1473 sidedatamod.set_sidedata_spec_for_repo(self)
1473
1474
1474 def _getvfsward(self, origfunc):
1475 def _getvfsward(self, origfunc):
1475 """build a ward for self.vfs"""
1476 """build a ward for self.vfs"""
1476 rref = weakref.ref(self)
1477 rref = weakref.ref(self)
1477
1478
1478 def checkvfs(path, mode=None):
1479 def checkvfs(path, mode=None):
1479 ret = origfunc(path, mode=mode)
1480 ret = origfunc(path, mode=mode)
1480 repo = rref()
1481 repo = rref()
1481 if (
1482 if (
1482 repo is None
1483 repo is None
1483 or not util.safehasattr(repo, b'_wlockref')
1484 or not util.safehasattr(repo, b'_wlockref')
1484 or not util.safehasattr(repo, b'_lockref')
1485 or not util.safehasattr(repo, b'_lockref')
1485 ):
1486 ):
1486 return
1487 return
1487 if mode in (None, b'r', b'rb'):
1488 if mode in (None, b'r', b'rb'):
1488 return
1489 return
1489 if path.startswith(repo.path):
1490 if path.startswith(repo.path):
1490 # truncate name relative to the repository (.hg)
1491 # truncate name relative to the repository (.hg)
1491 path = path[len(repo.path) + 1 :]
1492 path = path[len(repo.path) + 1 :]
1492 if path.startswith(b'cache/'):
1493 if path.startswith(b'cache/'):
1493 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1494 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1494 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1495 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1495 # path prefixes covered by 'lock'
1496 # path prefixes covered by 'lock'
1496 vfs_path_prefixes = (
1497 vfs_path_prefixes = (
1497 b'journal.',
1498 b'journal.',
1498 b'undo.',
1499 b'undo.',
1499 b'strip-backup/',
1500 b'strip-backup/',
1500 b'cache/',
1501 b'cache/',
1501 )
1502 )
1502 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1503 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1503 if repo._currentlock(repo._lockref) is None:
1504 if repo._currentlock(repo._lockref) is None:
1504 repo.ui.develwarn(
1505 repo.ui.develwarn(
1505 b'write with no lock: "%s"' % path,
1506 b'write with no lock: "%s"' % path,
1506 stacklevel=3,
1507 stacklevel=3,
1507 config=b'check-locks',
1508 config=b'check-locks',
1508 )
1509 )
1509 elif repo._currentlock(repo._wlockref) is None:
1510 elif repo._currentlock(repo._wlockref) is None:
1510 # rest of vfs files are covered by 'wlock'
1511 # rest of vfs files are covered by 'wlock'
1511 #
1512 #
1512 # exclude special files
1513 # exclude special files
1513 for prefix in self._wlockfreeprefix:
1514 for prefix in self._wlockfreeprefix:
1514 if path.startswith(prefix):
1515 if path.startswith(prefix):
1515 return
1516 return
1516 repo.ui.develwarn(
1517 repo.ui.develwarn(
1517 b'write with no wlock: "%s"' % path,
1518 b'write with no wlock: "%s"' % path,
1518 stacklevel=3,
1519 stacklevel=3,
1519 config=b'check-locks',
1520 config=b'check-locks',
1520 )
1521 )
1521 return ret
1522 return ret
1522
1523
1523 return checkvfs
1524 return checkvfs
1524
1525
1525 def _getsvfsward(self, origfunc):
1526 def _getsvfsward(self, origfunc):
1526 """build a ward for self.svfs"""
1527 """build a ward for self.svfs"""
1527 rref = weakref.ref(self)
1528 rref = weakref.ref(self)
1528
1529
1529 def checksvfs(path, mode=None):
1530 def checksvfs(path, mode=None):
1530 ret = origfunc(path, mode=mode)
1531 ret = origfunc(path, mode=mode)
1531 repo = rref()
1532 repo = rref()
1532 if repo is None or not util.safehasattr(repo, b'_lockref'):
1533 if repo is None or not util.safehasattr(repo, b'_lockref'):
1533 return
1534 return
1534 if mode in (None, b'r', b'rb'):
1535 if mode in (None, b'r', b'rb'):
1535 return
1536 return
1536 if path.startswith(repo.sharedpath):
1537 if path.startswith(repo.sharedpath):
1537 # truncate name relative to the repository (.hg)
1538 # truncate name relative to the repository (.hg)
1538 path = path[len(repo.sharedpath) + 1 :]
1539 path = path[len(repo.sharedpath) + 1 :]
1539 if repo._currentlock(repo._lockref) is None:
1540 if repo._currentlock(repo._lockref) is None:
1540 repo.ui.develwarn(
1541 repo.ui.develwarn(
1541 b'write with no lock: "%s"' % path, stacklevel=4
1542 b'write with no lock: "%s"' % path, stacklevel=4
1542 )
1543 )
1543 return ret
1544 return ret
1544
1545
1545 return checksvfs
1546 return checksvfs
1546
1547
1547 def close(self):
1548 def close(self):
1548 self._writecaches()
1549 self._writecaches()
1549
1550
1550 def _writecaches(self):
1551 def _writecaches(self):
1551 if self._revbranchcache:
1552 if self._revbranchcache:
1552 self._revbranchcache.write()
1553 self._revbranchcache.write()
1553
1554
1554 def _restrictcapabilities(self, caps):
1555 def _restrictcapabilities(self, caps):
1555 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1556 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1556 caps = set(caps)
1557 caps = set(caps)
1557 capsblob = bundle2.encodecaps(
1558 capsblob = bundle2.encodecaps(
1558 bundle2.getrepocaps(self, role=b'client')
1559 bundle2.getrepocaps(self, role=b'client')
1559 )
1560 )
1560 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1561 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1561 if self.ui.configbool(b'experimental', b'narrow'):
1562 if self.ui.configbool(b'experimental', b'narrow'):
1562 caps.add(wireprototypes.NARROWCAP)
1563 caps.add(wireprototypes.NARROWCAP)
1563 return caps
1564 return caps
1564
1565
1565 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1566 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1566 # self -> auditor -> self._checknested -> self
1567 # self -> auditor -> self._checknested -> self
1567
1568
1568 @property
1569 @property
1569 def auditor(self):
1570 def auditor(self):
1570 # This is only used by context.workingctx.match in order to
1571 # This is only used by context.workingctx.match in order to
1571 # detect files in subrepos.
1572 # detect files in subrepos.
1572 return pathutil.pathauditor(self.root, callback=self._checknested)
1573 return pathutil.pathauditor(self.root, callback=self._checknested)
1573
1574
1574 @property
1575 @property
1575 def nofsauditor(self):
1576 def nofsauditor(self):
1576 # This is only used by context.basectx.match in order to detect
1577 # This is only used by context.basectx.match in order to detect
1577 # files in subrepos.
1578 # files in subrepos.
1578 return pathutil.pathauditor(
1579 return pathutil.pathauditor(
1579 self.root, callback=self._checknested, realfs=False, cached=True
1580 self.root, callback=self._checknested, realfs=False, cached=True
1580 )
1581 )
1581
1582
1582 def _checknested(self, path):
1583 def _checknested(self, path):
1583 """Determine if path is a legal nested repository."""
1584 """Determine if path is a legal nested repository."""
1584 if not path.startswith(self.root):
1585 if not path.startswith(self.root):
1585 return False
1586 return False
1586 subpath = path[len(self.root) + 1 :]
1587 subpath = path[len(self.root) + 1 :]
1587 normsubpath = util.pconvert(subpath)
1588 normsubpath = util.pconvert(subpath)
1588
1589
1589 # XXX: Checking against the current working copy is wrong in
1590 # XXX: Checking against the current working copy is wrong in
1590 # the sense that it can reject things like
1591 # the sense that it can reject things like
1591 #
1592 #
1592 # $ hg cat -r 10 sub/x.txt
1593 # $ hg cat -r 10 sub/x.txt
1593 #
1594 #
1594 # if sub/ is no longer a subrepository in the working copy
1595 # if sub/ is no longer a subrepository in the working copy
1595 # parent revision.
1596 # parent revision.
1596 #
1597 #
1597 # However, it can of course also allow things that would have
1598 # However, it can of course also allow things that would have
1598 # been rejected before, such as the above cat command if sub/
1599 # been rejected before, such as the above cat command if sub/
1599 # is a subrepository now, but was a normal directory before.
1600 # is a subrepository now, but was a normal directory before.
1600 # The old path auditor would have rejected by mistake since it
1601 # The old path auditor would have rejected by mistake since it
1601 # panics when it sees sub/.hg/.
1602 # panics when it sees sub/.hg/.
1602 #
1603 #
1603 # All in all, checking against the working copy seems sensible
1604 # All in all, checking against the working copy seems sensible
1604 # since we want to prevent access to nested repositories on
1605 # since we want to prevent access to nested repositories on
1605 # the filesystem *now*.
1606 # the filesystem *now*.
1606 ctx = self[None]
1607 ctx = self[None]
1607 parts = util.splitpath(subpath)
1608 parts = util.splitpath(subpath)
1608 while parts:
1609 while parts:
1609 prefix = b'/'.join(parts)
1610 prefix = b'/'.join(parts)
1610 if prefix in ctx.substate:
1611 if prefix in ctx.substate:
1611 if prefix == normsubpath:
1612 if prefix == normsubpath:
1612 return True
1613 return True
1613 else:
1614 else:
1614 sub = ctx.sub(prefix)
1615 sub = ctx.sub(prefix)
1615 return sub.checknested(subpath[len(prefix) + 1 :])
1616 return sub.checknested(subpath[len(prefix) + 1 :])
1616 else:
1617 else:
1617 parts.pop()
1618 parts.pop()
1618 return False
1619 return False
1619
1620
1620 def peer(self):
1621 def peer(self):
1621 return localpeer(self) # not cached to avoid reference cycle
1622 return localpeer(self) # not cached to avoid reference cycle
1622
1623
1623 def unfiltered(self):
1624 def unfiltered(self):
1624 """Return unfiltered version of the repository
1625 """Return unfiltered version of the repository
1625
1626
1626 Intended to be overwritten by filtered repo."""
1627 Intended to be overwritten by filtered repo."""
1627 return self
1628 return self
1628
1629
1629 def filtered(self, name, visibilityexceptions=None):
1630 def filtered(self, name, visibilityexceptions=None):
1630 """Return a filtered version of a repository
1631 """Return a filtered version of a repository
1631
1632
1632 The `name` parameter is the identifier of the requested view. This
1633 The `name` parameter is the identifier of the requested view. This
1633 will return a repoview object set "exactly" to the specified view.
1634 will return a repoview object set "exactly" to the specified view.
1634
1635
1635 This function does not apply recursive filtering to a repository. For
1636 This function does not apply recursive filtering to a repository. For
1636 example calling `repo.filtered("served")` will return a repoview using
1637 example calling `repo.filtered("served")` will return a repoview using
1637 the "served" view, regardless of the initial view used by `repo`.
1638 the "served" view, regardless of the initial view used by `repo`.
1638
1639
1639 In other word, there is always only one level of `repoview` "filtering".
1640 In other word, there is always only one level of `repoview` "filtering".
1640 """
1641 """
1641 if self._extrafilterid is not None and b'%' not in name:
1642 if self._extrafilterid is not None and b'%' not in name:
1642 name = name + b'%' + self._extrafilterid
1643 name = name + b'%' + self._extrafilterid
1643
1644
1644 cls = repoview.newtype(self.unfiltered().__class__)
1645 cls = repoview.newtype(self.unfiltered().__class__)
1645 return cls(self, name, visibilityexceptions)
1646 return cls(self, name, visibilityexceptions)
1646
1647
1647 @mixedrepostorecache(
1648 @mixedrepostorecache(
1648 (b'bookmarks', b'plain'),
1649 (b'bookmarks', b'plain'),
1649 (b'bookmarks.current', b'plain'),
1650 (b'bookmarks.current', b'plain'),
1650 (b'bookmarks', b''),
1651 (b'bookmarks', b''),
1651 (b'00changelog.i', b''),
1652 (b'00changelog.i', b''),
1652 )
1653 )
1653 def _bookmarks(self):
1654 def _bookmarks(self):
1654 # Since the multiple files involved in the transaction cannot be
1655 # Since the multiple files involved in the transaction cannot be
1655 # written atomically (with current repository format), there is a race
1656 # written atomically (with current repository format), there is a race
1656 # condition here.
1657 # condition here.
1657 #
1658 #
1658 # 1) changelog content A is read
1659 # 1) changelog content A is read
1659 # 2) outside transaction update changelog to content B
1660 # 2) outside transaction update changelog to content B
1660 # 3) outside transaction update bookmark file referring to content B
1661 # 3) outside transaction update bookmark file referring to content B
1661 # 4) bookmarks file content is read and filtered against changelog-A
1662 # 4) bookmarks file content is read and filtered against changelog-A
1662 #
1663 #
1663 # When this happens, bookmarks against nodes missing from A are dropped.
1664 # When this happens, bookmarks against nodes missing from A are dropped.
1664 #
1665 #
1665 # Having this happening during read is not great, but it become worse
1666 # Having this happening during read is not great, but it become worse
1666 # when this happen during write because the bookmarks to the "unknown"
1667 # when this happen during write because the bookmarks to the "unknown"
1667 # nodes will be dropped for good. However, writes happen within locks.
1668 # nodes will be dropped for good. However, writes happen within locks.
1668 # This locking makes it possible to have a race free consistent read.
1669 # This locking makes it possible to have a race free consistent read.
1669 # For this purpose data read from disc before locking are
1670 # For this purpose data read from disc before locking are
1670 # "invalidated" right after the locks are taken. This invalidations are
1671 # "invalidated" right after the locks are taken. This invalidations are
1671 # "light", the `filecache` mechanism keep the data in memory and will
1672 # "light", the `filecache` mechanism keep the data in memory and will
1672 # reuse them if the underlying files did not changed. Not parsing the
1673 # reuse them if the underlying files did not changed. Not parsing the
1673 # same data multiple times helps performances.
1674 # same data multiple times helps performances.
1674 #
1675 #
1675 # Unfortunately in the case describe above, the files tracked by the
1676 # Unfortunately in the case describe above, the files tracked by the
1676 # bookmarks file cache might not have changed, but the in-memory
1677 # bookmarks file cache might not have changed, but the in-memory
1677 # content is still "wrong" because we used an older changelog content
1678 # content is still "wrong" because we used an older changelog content
1678 # to process the on-disk data. So after locking, the changelog would be
1679 # to process the on-disk data. So after locking, the changelog would be
1679 # refreshed but `_bookmarks` would be preserved.
1680 # refreshed but `_bookmarks` would be preserved.
1680 # Adding `00changelog.i` to the list of tracked file is not
1681 # Adding `00changelog.i` to the list of tracked file is not
1681 # enough, because at the time we build the content for `_bookmarks` in
1682 # enough, because at the time we build the content for `_bookmarks` in
1682 # (4), the changelog file has already diverged from the content used
1683 # (4), the changelog file has already diverged from the content used
1683 # for loading `changelog` in (1)
1684 # for loading `changelog` in (1)
1684 #
1685 #
1685 # To prevent the issue, we force the changelog to be explicitly
1686 # To prevent the issue, we force the changelog to be explicitly
1686 # reloaded while computing `_bookmarks`. The data race can still happen
1687 # reloaded while computing `_bookmarks`. The data race can still happen
1687 # without the lock (with a narrower window), but it would no longer go
1688 # without the lock (with a narrower window), but it would no longer go
1688 # undetected during the lock time refresh.
1689 # undetected during the lock time refresh.
1689 #
1690 #
1690 # The new schedule is as follow
1691 # The new schedule is as follow
1691 #
1692 #
1692 # 1) filecache logic detect that `_bookmarks` needs to be computed
1693 # 1) filecache logic detect that `_bookmarks` needs to be computed
1693 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1694 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1694 # 3) We force `changelog` filecache to be tested
1695 # 3) We force `changelog` filecache to be tested
1695 # 4) cachestat for `changelog` are captured (for changelog)
1696 # 4) cachestat for `changelog` are captured (for changelog)
1696 # 5) `_bookmarks` is computed and cached
1697 # 5) `_bookmarks` is computed and cached
1697 #
1698 #
1698 # The step in (3) ensure we have a changelog at least as recent as the
1699 # The step in (3) ensure we have a changelog at least as recent as the
1699 # cache stat computed in (1). As a result at locking time:
1700 # cache stat computed in (1). As a result at locking time:
1700 # * if the changelog did not changed since (1) -> we can reuse the data
1701 # * if the changelog did not changed since (1) -> we can reuse the data
1701 # * otherwise -> the bookmarks get refreshed.
1702 # * otherwise -> the bookmarks get refreshed.
1702 self._refreshchangelog()
1703 self._refreshchangelog()
1703 return bookmarks.bmstore(self)
1704 return bookmarks.bmstore(self)
1704
1705
1705 def _refreshchangelog(self):
1706 def _refreshchangelog(self):
1706 """make sure the in memory changelog match the on-disk one"""
1707 """make sure the in memory changelog match the on-disk one"""
1707 if 'changelog' in vars(self) and self.currenttransaction() is None:
1708 if 'changelog' in vars(self) and self.currenttransaction() is None:
1708 del self.changelog
1709 del self.changelog
1709
1710
1710 @property
1711 @property
1711 def _activebookmark(self):
1712 def _activebookmark(self):
1712 return self._bookmarks.active
1713 return self._bookmarks.active
1713
1714
1714 # _phasesets depend on changelog. what we need is to call
1715 # _phasesets depend on changelog. what we need is to call
1715 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1716 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1716 # can't be easily expressed in filecache mechanism.
1717 # can't be easily expressed in filecache mechanism.
1717 @storecache(b'phaseroots', b'00changelog.i')
1718 @storecache(b'phaseroots', b'00changelog.i')
1718 def _phasecache(self):
1719 def _phasecache(self):
1719 return phases.phasecache(self, self._phasedefaults)
1720 return phases.phasecache(self, self._phasedefaults)
1720
1721
1721 @storecache(b'obsstore')
1722 @storecache(b'obsstore')
1722 def obsstore(self):
1723 def obsstore(self):
1723 return obsolete.makestore(self.ui, self)
1724 return obsolete.makestore(self.ui, self)
1724
1725
1725 @changelogcache()
1726 @changelogcache()
1726 def changelog(repo):
1727 def changelog(repo):
1727 # load dirstate before changelog to avoid race see issue6303
1728 # load dirstate before changelog to avoid race see issue6303
1728 repo.dirstate.prefetch_parents()
1729 repo.dirstate.prefetch_parents()
1729 return repo.store.changelog(
1730 return repo.store.changelog(
1730 txnutil.mayhavepending(repo.root),
1731 txnutil.mayhavepending(repo.root),
1731 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1732 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1732 )
1733 )
1733
1734
1734 @manifestlogcache()
1735 @manifestlogcache()
1735 def manifestlog(self):
1736 def manifestlog(self):
1736 return self.store.manifestlog(self, self._storenarrowmatch)
1737 return self.store.manifestlog(self, self._storenarrowmatch)
1737
1738
1738 @repofilecache(b'dirstate')
1739 @repofilecache(b'dirstate')
1739 def dirstate(self):
1740 def dirstate(self):
1740 return self._makedirstate()
1741 return self._makedirstate()
1741
1742
1742 def _makedirstate(self):
1743 def _makedirstate(self):
1743 """Extension point for wrapping the dirstate per-repo."""
1744 """Extension point for wrapping the dirstate per-repo."""
1744 sparsematchfn = lambda: sparse.matcher(self)
1745 sparsematchfn = lambda: sparse.matcher(self)
1745 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1746 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1746 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1747 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1747 use_dirstate_v2 = v2_req in self.requirements
1748 use_dirstate_v2 = v2_req in self.requirements
1748 use_tracked_hint = th in self.requirements
1749 use_tracked_hint = th in self.requirements
1749
1750
1750 return dirstate.dirstate(
1751 return dirstate.dirstate(
1751 self.vfs,
1752 self.vfs,
1752 self.ui,
1753 self.ui,
1753 self.root,
1754 self.root,
1754 self._dirstatevalidate,
1755 self._dirstatevalidate,
1755 sparsematchfn,
1756 sparsematchfn,
1756 self.nodeconstants,
1757 self.nodeconstants,
1757 use_dirstate_v2,
1758 use_dirstate_v2,
1758 use_tracked_hint=use_tracked_hint,
1759 use_tracked_hint=use_tracked_hint,
1759 )
1760 )
1760
1761
1761 def _dirstatevalidate(self, node):
1762 def _dirstatevalidate(self, node):
1762 try:
1763 try:
1763 self.changelog.rev(node)
1764 self.changelog.rev(node)
1764 return node
1765 return node
1765 except error.LookupError:
1766 except error.LookupError:
1766 if not self._dirstatevalidatewarned:
1767 if not self._dirstatevalidatewarned:
1767 self._dirstatevalidatewarned = True
1768 self._dirstatevalidatewarned = True
1768 self.ui.warn(
1769 self.ui.warn(
1769 _(b"warning: ignoring unknown working parent %s!\n")
1770 _(b"warning: ignoring unknown working parent %s!\n")
1770 % short(node)
1771 % short(node)
1771 )
1772 )
1772 return self.nullid
1773 return self.nullid
1773
1774
1774 @storecache(narrowspec.FILENAME)
1775 @storecache(narrowspec.FILENAME)
1775 def narrowpats(self):
1776 def narrowpats(self):
1776 """matcher patterns for this repository's narrowspec
1777 """matcher patterns for this repository's narrowspec
1777
1778
1778 A tuple of (includes, excludes).
1779 A tuple of (includes, excludes).
1779 """
1780 """
1780 return narrowspec.load(self)
1781 return narrowspec.load(self)
1781
1782
1782 @storecache(narrowspec.FILENAME)
1783 @storecache(narrowspec.FILENAME)
1783 def _storenarrowmatch(self):
1784 def _storenarrowmatch(self):
1784 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1785 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1785 return matchmod.always()
1786 return matchmod.always()
1786 include, exclude = self.narrowpats
1787 include, exclude = self.narrowpats
1787 return narrowspec.match(self.root, include=include, exclude=exclude)
1788 return narrowspec.match(self.root, include=include, exclude=exclude)
1788
1789
1789 @storecache(narrowspec.FILENAME)
1790 @storecache(narrowspec.FILENAME)
1790 def _narrowmatch(self):
1791 def _narrowmatch(self):
1791 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1792 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1792 return matchmod.always()
1793 return matchmod.always()
1793 narrowspec.checkworkingcopynarrowspec(self)
1794 narrowspec.checkworkingcopynarrowspec(self)
1794 include, exclude = self.narrowpats
1795 include, exclude = self.narrowpats
1795 return narrowspec.match(self.root, include=include, exclude=exclude)
1796 return narrowspec.match(self.root, include=include, exclude=exclude)
1796
1797
1797 def narrowmatch(self, match=None, includeexact=False):
1798 def narrowmatch(self, match=None, includeexact=False):
1798 """matcher corresponding the the repo's narrowspec
1799 """matcher corresponding the the repo's narrowspec
1799
1800
1800 If `match` is given, then that will be intersected with the narrow
1801 If `match` is given, then that will be intersected with the narrow
1801 matcher.
1802 matcher.
1802
1803
1803 If `includeexact` is True, then any exact matches from `match` will
1804 If `includeexact` is True, then any exact matches from `match` will
1804 be included even if they're outside the narrowspec.
1805 be included even if they're outside the narrowspec.
1805 """
1806 """
1806 if match:
1807 if match:
1807 if includeexact and not self._narrowmatch.always():
1808 if includeexact and not self._narrowmatch.always():
1808 # do not exclude explicitly-specified paths so that they can
1809 # do not exclude explicitly-specified paths so that they can
1809 # be warned later on
1810 # be warned later on
1810 em = matchmod.exact(match.files())
1811 em = matchmod.exact(match.files())
1811 nm = matchmod.unionmatcher([self._narrowmatch, em])
1812 nm = matchmod.unionmatcher([self._narrowmatch, em])
1812 return matchmod.intersectmatchers(match, nm)
1813 return matchmod.intersectmatchers(match, nm)
1813 return matchmod.intersectmatchers(match, self._narrowmatch)
1814 return matchmod.intersectmatchers(match, self._narrowmatch)
1814 return self._narrowmatch
1815 return self._narrowmatch
1815
1816
1816 def setnarrowpats(self, newincludes, newexcludes):
1817 def setnarrowpats(self, newincludes, newexcludes):
1817 narrowspec.save(self, newincludes, newexcludes)
1818 narrowspec.save(self, newincludes, newexcludes)
1818 self.invalidate(clearfilecache=True)
1819 self.invalidate(clearfilecache=True)
1819
1820
1820 @unfilteredpropertycache
1821 @unfilteredpropertycache
1821 def _quick_access_changeid_null(self):
1822 def _quick_access_changeid_null(self):
1822 return {
1823 return {
1823 b'null': (nullrev, self.nodeconstants.nullid),
1824 b'null': (nullrev, self.nodeconstants.nullid),
1824 nullrev: (nullrev, self.nodeconstants.nullid),
1825 nullrev: (nullrev, self.nodeconstants.nullid),
1825 self.nullid: (nullrev, self.nullid),
1826 self.nullid: (nullrev, self.nullid),
1826 }
1827 }
1827
1828
1828 @unfilteredpropertycache
1829 @unfilteredpropertycache
1829 def _quick_access_changeid_wc(self):
1830 def _quick_access_changeid_wc(self):
1830 # also fast path access to the working copy parents
1831 # also fast path access to the working copy parents
1831 # however, only do it for filter that ensure wc is visible.
1832 # however, only do it for filter that ensure wc is visible.
1832 quick = self._quick_access_changeid_null.copy()
1833 quick = self._quick_access_changeid_null.copy()
1833 cl = self.unfiltered().changelog
1834 cl = self.unfiltered().changelog
1834 for node in self.dirstate.parents():
1835 for node in self.dirstate.parents():
1835 if node == self.nullid:
1836 if node == self.nullid:
1836 continue
1837 continue
1837 rev = cl.index.get_rev(node)
1838 rev = cl.index.get_rev(node)
1838 if rev is None:
1839 if rev is None:
1839 # unknown working copy parent case:
1840 # unknown working copy parent case:
1840 #
1841 #
1841 # skip the fast path and let higher code deal with it
1842 # skip the fast path and let higher code deal with it
1842 continue
1843 continue
1843 pair = (rev, node)
1844 pair = (rev, node)
1844 quick[rev] = pair
1845 quick[rev] = pair
1845 quick[node] = pair
1846 quick[node] = pair
1846 # also add the parents of the parents
1847 # also add the parents of the parents
1847 for r in cl.parentrevs(rev):
1848 for r in cl.parentrevs(rev):
1848 if r == nullrev:
1849 if r == nullrev:
1849 continue
1850 continue
1850 n = cl.node(r)
1851 n = cl.node(r)
1851 pair = (r, n)
1852 pair = (r, n)
1852 quick[r] = pair
1853 quick[r] = pair
1853 quick[n] = pair
1854 quick[n] = pair
1854 p1node = self.dirstate.p1()
1855 p1node = self.dirstate.p1()
1855 if p1node != self.nullid:
1856 if p1node != self.nullid:
1856 quick[b'.'] = quick[p1node]
1857 quick[b'.'] = quick[p1node]
1857 return quick
1858 return quick
1858
1859
1859 @unfilteredmethod
1860 @unfilteredmethod
1860 def _quick_access_changeid_invalidate(self):
1861 def _quick_access_changeid_invalidate(self):
1861 if '_quick_access_changeid_wc' in vars(self):
1862 if '_quick_access_changeid_wc' in vars(self):
1862 del self.__dict__['_quick_access_changeid_wc']
1863 del self.__dict__['_quick_access_changeid_wc']
1863
1864
1864 @property
1865 @property
1865 def _quick_access_changeid(self):
1866 def _quick_access_changeid(self):
1866 """an helper dictionnary for __getitem__ calls
1867 """an helper dictionnary for __getitem__ calls
1867
1868
1868 This contains a list of symbol we can recognise right away without
1869 This contains a list of symbol we can recognise right away without
1869 further processing.
1870 further processing.
1870 """
1871 """
1871 if self.filtername in repoview.filter_has_wc:
1872 if self.filtername in repoview.filter_has_wc:
1872 return self._quick_access_changeid_wc
1873 return self._quick_access_changeid_wc
1873 return self._quick_access_changeid_null
1874 return self._quick_access_changeid_null
1874
1875
1875 def __getitem__(self, changeid):
1876 def __getitem__(self, changeid):
1876 # dealing with special cases
1877 # dealing with special cases
1877 if changeid is None:
1878 if changeid is None:
1878 return context.workingctx(self)
1879 return context.workingctx(self)
1879 if isinstance(changeid, context.basectx):
1880 if isinstance(changeid, context.basectx):
1880 return changeid
1881 return changeid
1881
1882
1882 # dealing with multiple revisions
1883 # dealing with multiple revisions
1883 if isinstance(changeid, slice):
1884 if isinstance(changeid, slice):
1884 # wdirrev isn't contiguous so the slice shouldn't include it
1885 # wdirrev isn't contiguous so the slice shouldn't include it
1885 return [
1886 return [
1886 self[i]
1887 self[i]
1887 for i in pycompat.xrange(*changeid.indices(len(self)))
1888 for i in pycompat.xrange(*changeid.indices(len(self)))
1888 if i not in self.changelog.filteredrevs
1889 if i not in self.changelog.filteredrevs
1889 ]
1890 ]
1890
1891
1891 # dealing with some special values
1892 # dealing with some special values
1892 quick_access = self._quick_access_changeid.get(changeid)
1893 quick_access = self._quick_access_changeid.get(changeid)
1893 if quick_access is not None:
1894 if quick_access is not None:
1894 rev, node = quick_access
1895 rev, node = quick_access
1895 return context.changectx(self, rev, node, maybe_filtered=False)
1896 return context.changectx(self, rev, node, maybe_filtered=False)
1896 if changeid == b'tip':
1897 if changeid == b'tip':
1897 node = self.changelog.tip()
1898 node = self.changelog.tip()
1898 rev = self.changelog.rev(node)
1899 rev = self.changelog.rev(node)
1899 return context.changectx(self, rev, node)
1900 return context.changectx(self, rev, node)
1900
1901
1901 # dealing with arbitrary values
1902 # dealing with arbitrary values
1902 try:
1903 try:
1903 if isinstance(changeid, int):
1904 if isinstance(changeid, int):
1904 node = self.changelog.node(changeid)
1905 node = self.changelog.node(changeid)
1905 rev = changeid
1906 rev = changeid
1906 elif changeid == b'.':
1907 elif changeid == b'.':
1907 # this is a hack to delay/avoid loading obsmarkers
1908 # this is a hack to delay/avoid loading obsmarkers
1908 # when we know that '.' won't be hidden
1909 # when we know that '.' won't be hidden
1909 node = self.dirstate.p1()
1910 node = self.dirstate.p1()
1910 rev = self.unfiltered().changelog.rev(node)
1911 rev = self.unfiltered().changelog.rev(node)
1911 elif len(changeid) == self.nodeconstants.nodelen:
1912 elif len(changeid) == self.nodeconstants.nodelen:
1912 try:
1913 try:
1913 node = changeid
1914 node = changeid
1914 rev = self.changelog.rev(changeid)
1915 rev = self.changelog.rev(changeid)
1915 except error.FilteredLookupError:
1916 except error.FilteredLookupError:
1916 changeid = hex(changeid) # for the error message
1917 changeid = hex(changeid) # for the error message
1917 raise
1918 raise
1918 except LookupError:
1919 except LookupError:
1919 # check if it might have come from damaged dirstate
1920 # check if it might have come from damaged dirstate
1920 #
1921 #
1921 # XXX we could avoid the unfiltered if we had a recognizable
1922 # XXX we could avoid the unfiltered if we had a recognizable
1922 # exception for filtered changeset access
1923 # exception for filtered changeset access
1923 if (
1924 if (
1924 self.local()
1925 self.local()
1925 and changeid in self.unfiltered().dirstate.parents()
1926 and changeid in self.unfiltered().dirstate.parents()
1926 ):
1927 ):
1927 msg = _(b"working directory has unknown parent '%s'!")
1928 msg = _(b"working directory has unknown parent '%s'!")
1928 raise error.Abort(msg % short(changeid))
1929 raise error.Abort(msg % short(changeid))
1929 changeid = hex(changeid) # for the error message
1930 changeid = hex(changeid) # for the error message
1930 raise
1931 raise
1931
1932
1932 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1933 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1933 node = bin(changeid)
1934 node = bin(changeid)
1934 rev = self.changelog.rev(node)
1935 rev = self.changelog.rev(node)
1935 else:
1936 else:
1936 raise error.ProgrammingError(
1937 raise error.ProgrammingError(
1937 b"unsupported changeid '%s' of type %s"
1938 b"unsupported changeid '%s' of type %s"
1938 % (changeid, pycompat.bytestr(type(changeid)))
1939 % (changeid, pycompat.bytestr(type(changeid)))
1939 )
1940 )
1940
1941
1941 return context.changectx(self, rev, node)
1942 return context.changectx(self, rev, node)
1942
1943
1943 except (error.FilteredIndexError, error.FilteredLookupError):
1944 except (error.FilteredIndexError, error.FilteredLookupError):
1944 raise error.FilteredRepoLookupError(
1945 raise error.FilteredRepoLookupError(
1945 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1946 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1946 )
1947 )
1947 except (IndexError, LookupError):
1948 except (IndexError, LookupError):
1948 raise error.RepoLookupError(
1949 raise error.RepoLookupError(
1949 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1950 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1950 )
1951 )
1951 except error.WdirUnsupported:
1952 except error.WdirUnsupported:
1952 return context.workingctx(self)
1953 return context.workingctx(self)
1953
1954
1954 def __contains__(self, changeid):
1955 def __contains__(self, changeid):
1955 """True if the given changeid exists"""
1956 """True if the given changeid exists"""
1956 try:
1957 try:
1957 self[changeid]
1958 self[changeid]
1958 return True
1959 return True
1959 except error.RepoLookupError:
1960 except error.RepoLookupError:
1960 return False
1961 return False
1961
1962
1962 def __nonzero__(self):
1963 def __nonzero__(self):
1963 return True
1964 return True
1964
1965
1965 __bool__ = __nonzero__
1966 __bool__ = __nonzero__
1966
1967
1967 def __len__(self):
1968 def __len__(self):
1968 # no need to pay the cost of repoview.changelog
1969 # no need to pay the cost of repoview.changelog
1969 unfi = self.unfiltered()
1970 unfi = self.unfiltered()
1970 return len(unfi.changelog)
1971 return len(unfi.changelog)
1971
1972
1972 def __iter__(self):
1973 def __iter__(self):
1973 return iter(self.changelog)
1974 return iter(self.changelog)
1974
1975
1975 def revs(self, expr, *args):
1976 def revs(self, expr, *args):
1976 """Find revisions matching a revset.
1977 """Find revisions matching a revset.
1977
1978
1978 The revset is specified as a string ``expr`` that may contain
1979 The revset is specified as a string ``expr`` that may contain
1979 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1980 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1980
1981
1981 Revset aliases from the configuration are not expanded. To expand
1982 Revset aliases from the configuration are not expanded. To expand
1982 user aliases, consider calling ``scmutil.revrange()`` or
1983 user aliases, consider calling ``scmutil.revrange()`` or
1983 ``repo.anyrevs([expr], user=True)``.
1984 ``repo.anyrevs([expr], user=True)``.
1984
1985
1985 Returns a smartset.abstractsmartset, which is a list-like interface
1986 Returns a smartset.abstractsmartset, which is a list-like interface
1986 that contains integer revisions.
1987 that contains integer revisions.
1987 """
1988 """
1988 tree = revsetlang.spectree(expr, *args)
1989 tree = revsetlang.spectree(expr, *args)
1989 return revset.makematcher(tree)(self)
1990 return revset.makematcher(tree)(self)
1990
1991
1991 def set(self, expr, *args):
1992 def set(self, expr, *args):
1992 """Find revisions matching a revset and emit changectx instances.
1993 """Find revisions matching a revset and emit changectx instances.
1993
1994
1994 This is a convenience wrapper around ``revs()`` that iterates the
1995 This is a convenience wrapper around ``revs()`` that iterates the
1995 result and is a generator of changectx instances.
1996 result and is a generator of changectx instances.
1996
1997
1997 Revset aliases from the configuration are not expanded. To expand
1998 Revset aliases from the configuration are not expanded. To expand
1998 user aliases, consider calling ``scmutil.revrange()``.
1999 user aliases, consider calling ``scmutil.revrange()``.
1999 """
2000 """
2000 for r in self.revs(expr, *args):
2001 for r in self.revs(expr, *args):
2001 yield self[r]
2002 yield self[r]
2002
2003
2003 def anyrevs(self, specs, user=False, localalias=None):
2004 def anyrevs(self, specs, user=False, localalias=None):
2004 """Find revisions matching one of the given revsets.
2005 """Find revisions matching one of the given revsets.
2005
2006
2006 Revset aliases from the configuration are not expanded by default. To
2007 Revset aliases from the configuration are not expanded by default. To
2007 expand user aliases, specify ``user=True``. To provide some local
2008 expand user aliases, specify ``user=True``. To provide some local
2008 definitions overriding user aliases, set ``localalias`` to
2009 definitions overriding user aliases, set ``localalias`` to
2009 ``{name: definitionstring}``.
2010 ``{name: definitionstring}``.
2010 """
2011 """
2011 if specs == [b'null']:
2012 if specs == [b'null']:
2012 return revset.baseset([nullrev])
2013 return revset.baseset([nullrev])
2013 if specs == [b'.']:
2014 if specs == [b'.']:
2014 quick_data = self._quick_access_changeid.get(b'.')
2015 quick_data = self._quick_access_changeid.get(b'.')
2015 if quick_data is not None:
2016 if quick_data is not None:
2016 return revset.baseset([quick_data[0]])
2017 return revset.baseset([quick_data[0]])
2017 if user:
2018 if user:
2018 m = revset.matchany(
2019 m = revset.matchany(
2019 self.ui,
2020 self.ui,
2020 specs,
2021 specs,
2021 lookup=revset.lookupfn(self),
2022 lookup=revset.lookupfn(self),
2022 localalias=localalias,
2023 localalias=localalias,
2023 )
2024 )
2024 else:
2025 else:
2025 m = revset.matchany(None, specs, localalias=localalias)
2026 m = revset.matchany(None, specs, localalias=localalias)
2026 return m(self)
2027 return m(self)
2027
2028
2028 def url(self):
2029 def url(self):
2029 return b'file:' + self.root
2030 return b'file:' + self.root
2030
2031
2031 def hook(self, name, throw=False, **args):
2032 def hook(self, name, throw=False, **args):
2032 """Call a hook, passing this repo instance.
2033 """Call a hook, passing this repo instance.
2033
2034
2034 This a convenience method to aid invoking hooks. Extensions likely
2035 This a convenience method to aid invoking hooks. Extensions likely
2035 won't call this unless they have registered a custom hook or are
2036 won't call this unless they have registered a custom hook or are
2036 replacing code that is expected to call a hook.
2037 replacing code that is expected to call a hook.
2037 """
2038 """
2038 return hook.hook(self.ui, self, name, throw, **args)
2039 return hook.hook(self.ui, self, name, throw, **args)
2039
2040
2040 @filteredpropertycache
2041 @filteredpropertycache
2041 def _tagscache(self):
2042 def _tagscache(self):
2042 """Returns a tagscache object that contains various tags related
2043 """Returns a tagscache object that contains various tags related
2043 caches."""
2044 caches."""
2044
2045
2045 # This simplifies its cache management by having one decorated
2046 # This simplifies its cache management by having one decorated
2046 # function (this one) and the rest simply fetch things from it.
2047 # function (this one) and the rest simply fetch things from it.
2047 class tagscache:
2048 class tagscache:
2048 def __init__(self):
2049 def __init__(self):
2049 # These two define the set of tags for this repository. tags
2050 # These two define the set of tags for this repository. tags
2050 # maps tag name to node; tagtypes maps tag name to 'global' or
2051 # maps tag name to node; tagtypes maps tag name to 'global' or
2051 # 'local'. (Global tags are defined by .hgtags across all
2052 # 'local'. (Global tags are defined by .hgtags across all
2052 # heads, and local tags are defined in .hg/localtags.)
2053 # heads, and local tags are defined in .hg/localtags.)
2053 # They constitute the in-memory cache of tags.
2054 # They constitute the in-memory cache of tags.
2054 self.tags = self.tagtypes = None
2055 self.tags = self.tagtypes = None
2055
2056
2056 self.nodetagscache = self.tagslist = None
2057 self.nodetagscache = self.tagslist = None
2057
2058
2058 cache = tagscache()
2059 cache = tagscache()
2059 cache.tags, cache.tagtypes = self._findtags()
2060 cache.tags, cache.tagtypes = self._findtags()
2060
2061
2061 return cache
2062 return cache
2062
2063
2063 def tags(self):
2064 def tags(self):
2064 '''return a mapping of tag to node'''
2065 '''return a mapping of tag to node'''
2065 t = {}
2066 t = {}
2066 if self.changelog.filteredrevs:
2067 if self.changelog.filteredrevs:
2067 tags, tt = self._findtags()
2068 tags, tt = self._findtags()
2068 else:
2069 else:
2069 tags = self._tagscache.tags
2070 tags = self._tagscache.tags
2070 rev = self.changelog.rev
2071 rev = self.changelog.rev
2071 for k, v in tags.items():
2072 for k, v in tags.items():
2072 try:
2073 try:
2073 # ignore tags to unknown nodes
2074 # ignore tags to unknown nodes
2074 rev(v)
2075 rev(v)
2075 t[k] = v
2076 t[k] = v
2076 except (error.LookupError, ValueError):
2077 except (error.LookupError, ValueError):
2077 pass
2078 pass
2078 return t
2079 return t
2079
2080
2080 def _findtags(self):
2081 def _findtags(self):
2081 """Do the hard work of finding tags. Return a pair of dicts
2082 """Do the hard work of finding tags. Return a pair of dicts
2082 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2083 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2083 maps tag name to a string like \'global\' or \'local\'.
2084 maps tag name to a string like \'global\' or \'local\'.
2084 Subclasses or extensions are free to add their own tags, but
2085 Subclasses or extensions are free to add their own tags, but
2085 should be aware that the returned dicts will be retained for the
2086 should be aware that the returned dicts will be retained for the
2086 duration of the localrepo object."""
2087 duration of the localrepo object."""
2087
2088
2088 # XXX what tagtype should subclasses/extensions use? Currently
2089 # XXX what tagtype should subclasses/extensions use? Currently
2089 # mq and bookmarks add tags, but do not set the tagtype at all.
2090 # mq and bookmarks add tags, but do not set the tagtype at all.
2090 # Should each extension invent its own tag type? Should there
2091 # Should each extension invent its own tag type? Should there
2091 # be one tagtype for all such "virtual" tags? Or is the status
2092 # be one tagtype for all such "virtual" tags? Or is the status
2092 # quo fine?
2093 # quo fine?
2093
2094
2094 # map tag name to (node, hist)
2095 # map tag name to (node, hist)
2095 alltags = tagsmod.findglobaltags(self.ui, self)
2096 alltags = tagsmod.findglobaltags(self.ui, self)
2096 # map tag name to tag type
2097 # map tag name to tag type
2097 tagtypes = {tag: b'global' for tag in alltags}
2098 tagtypes = {tag: b'global' for tag in alltags}
2098
2099
2099 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2100 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2100
2101
2101 # Build the return dicts. Have to re-encode tag names because
2102 # Build the return dicts. Have to re-encode tag names because
2102 # the tags module always uses UTF-8 (in order not to lose info
2103 # the tags module always uses UTF-8 (in order not to lose info
2103 # writing to the cache), but the rest of Mercurial wants them in
2104 # writing to the cache), but the rest of Mercurial wants them in
2104 # local encoding.
2105 # local encoding.
2105 tags = {}
2106 tags = {}
2106 for (name, (node, hist)) in alltags.items():
2107 for (name, (node, hist)) in alltags.items():
2107 if node != self.nullid:
2108 if node != self.nullid:
2108 tags[encoding.tolocal(name)] = node
2109 tags[encoding.tolocal(name)] = node
2109 tags[b'tip'] = self.changelog.tip()
2110 tags[b'tip'] = self.changelog.tip()
2110 tagtypes = {
2111 tagtypes = {
2111 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2112 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2112 }
2113 }
2113 return (tags, tagtypes)
2114 return (tags, tagtypes)
2114
2115
2115 def tagtype(self, tagname):
2116 def tagtype(self, tagname):
2116 """
2117 """
2117 return the type of the given tag. result can be:
2118 return the type of the given tag. result can be:
2118
2119
2119 'local' : a local tag
2120 'local' : a local tag
2120 'global' : a global tag
2121 'global' : a global tag
2121 None : tag does not exist
2122 None : tag does not exist
2122 """
2123 """
2123
2124
2124 return self._tagscache.tagtypes.get(tagname)
2125 return self._tagscache.tagtypes.get(tagname)
2125
2126
2126 def tagslist(self):
2127 def tagslist(self):
2127 '''return a list of tags ordered by revision'''
2128 '''return a list of tags ordered by revision'''
2128 if not self._tagscache.tagslist:
2129 if not self._tagscache.tagslist:
2129 l = []
2130 l = []
2130 for t, n in self.tags().items():
2131 for t, n in self.tags().items():
2131 l.append((self.changelog.rev(n), t, n))
2132 l.append((self.changelog.rev(n), t, n))
2132 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2133 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2133
2134
2134 return self._tagscache.tagslist
2135 return self._tagscache.tagslist
2135
2136
2136 def nodetags(self, node):
2137 def nodetags(self, node):
2137 '''return the tags associated with a node'''
2138 '''return the tags associated with a node'''
2138 if not self._tagscache.nodetagscache:
2139 if not self._tagscache.nodetagscache:
2139 nodetagscache = {}
2140 nodetagscache = {}
2140 for t, n in self._tagscache.tags.items():
2141 for t, n in self._tagscache.tags.items():
2141 nodetagscache.setdefault(n, []).append(t)
2142 nodetagscache.setdefault(n, []).append(t)
2142 for tags in nodetagscache.values():
2143 for tags in nodetagscache.values():
2143 tags.sort()
2144 tags.sort()
2144 self._tagscache.nodetagscache = nodetagscache
2145 self._tagscache.nodetagscache = nodetagscache
2145 return self._tagscache.nodetagscache.get(node, [])
2146 return self._tagscache.nodetagscache.get(node, [])
2146
2147
2147 def nodebookmarks(self, node):
2148 def nodebookmarks(self, node):
2148 """return the list of bookmarks pointing to the specified node"""
2149 """return the list of bookmarks pointing to the specified node"""
2149 return self._bookmarks.names(node)
2150 return self._bookmarks.names(node)
2150
2151
2151 def branchmap(self):
2152 def branchmap(self):
2152 """returns a dictionary {branch: [branchheads]} with branchheads
2153 """returns a dictionary {branch: [branchheads]} with branchheads
2153 ordered by increasing revision number"""
2154 ordered by increasing revision number"""
2154 return self._branchcaches[self]
2155 return self._branchcaches[self]
2155
2156
2156 @unfilteredmethod
2157 @unfilteredmethod
2157 def revbranchcache(self):
2158 def revbranchcache(self):
2158 if not self._revbranchcache:
2159 if not self._revbranchcache:
2159 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2160 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2160 return self._revbranchcache
2161 return self._revbranchcache
2161
2162
2162 def register_changeset(self, rev, changelogrevision):
2163 def register_changeset(self, rev, changelogrevision):
2163 self.revbranchcache().setdata(rev, changelogrevision)
2164 self.revbranchcache().setdata(rev, changelogrevision)
2164
2165
2165 def branchtip(self, branch, ignoremissing=False):
2166 def branchtip(self, branch, ignoremissing=False):
2166 """return the tip node for a given branch
2167 """return the tip node for a given branch
2167
2168
2168 If ignoremissing is True, then this method will not raise an error.
2169 If ignoremissing is True, then this method will not raise an error.
2169 This is helpful for callers that only expect None for a missing branch
2170 This is helpful for callers that only expect None for a missing branch
2170 (e.g. namespace).
2171 (e.g. namespace).
2171
2172
2172 """
2173 """
2173 try:
2174 try:
2174 return self.branchmap().branchtip(branch)
2175 return self.branchmap().branchtip(branch)
2175 except KeyError:
2176 except KeyError:
2176 if not ignoremissing:
2177 if not ignoremissing:
2177 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2178 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2178 else:
2179 else:
2179 pass
2180 pass
2180
2181
2181 def lookup(self, key):
2182 def lookup(self, key):
2182 node = scmutil.revsymbol(self, key).node()
2183 node = scmutil.revsymbol(self, key).node()
2183 if node is None:
2184 if node is None:
2184 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2185 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2185 return node
2186 return node
2186
2187
2187 def lookupbranch(self, key):
2188 def lookupbranch(self, key):
2188 if self.branchmap().hasbranch(key):
2189 if self.branchmap().hasbranch(key):
2189 return key
2190 return key
2190
2191
2191 return scmutil.revsymbol(self, key).branch()
2192 return scmutil.revsymbol(self, key).branch()
2192
2193
2193 def known(self, nodes):
2194 def known(self, nodes):
2194 cl = self.changelog
2195 cl = self.changelog
2195 get_rev = cl.index.get_rev
2196 get_rev = cl.index.get_rev
2196 filtered = cl.filteredrevs
2197 filtered = cl.filteredrevs
2197 result = []
2198 result = []
2198 for n in nodes:
2199 for n in nodes:
2199 r = get_rev(n)
2200 r = get_rev(n)
2200 resp = not (r is None or r in filtered)
2201 resp = not (r is None or r in filtered)
2201 result.append(resp)
2202 result.append(resp)
2202 return result
2203 return result
2203
2204
2204 def local(self):
2205 def local(self):
2205 return self
2206 return self
2206
2207
2207 def publishing(self):
2208 def publishing(self):
2208 # it's safe (and desirable) to trust the publish flag unconditionally
2209 # it's safe (and desirable) to trust the publish flag unconditionally
2209 # so that we don't finalize changes shared between users via ssh or nfs
2210 # so that we don't finalize changes shared between users via ssh or nfs
2210 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2211 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2211
2212
2212 def cancopy(self):
2213 def cancopy(self):
2213 # so statichttprepo's override of local() works
2214 # so statichttprepo's override of local() works
2214 if not self.local():
2215 if not self.local():
2215 return False
2216 return False
2216 if not self.publishing():
2217 if not self.publishing():
2217 return True
2218 return True
2218 # if publishing we can't copy if there is filtered content
2219 # if publishing we can't copy if there is filtered content
2219 return not self.filtered(b'visible').changelog.filteredrevs
2220 return not self.filtered(b'visible').changelog.filteredrevs
2220
2221
2221 def shared(self):
2222 def shared(self):
2222 '''the type of shared repository (None if not shared)'''
2223 '''the type of shared repository (None if not shared)'''
2223 if self.sharedpath != self.path:
2224 if self.sharedpath != self.path:
2224 return b'store'
2225 return b'store'
2225 return None
2226 return None
2226
2227
2227 def wjoin(self, f, *insidef):
2228 def wjoin(self, f, *insidef):
2228 return self.vfs.reljoin(self.root, f, *insidef)
2229 return self.vfs.reljoin(self.root, f, *insidef)
2229
2230
2230 def setparents(self, p1, p2=None):
2231 def setparents(self, p1, p2=None):
2231 if p2 is None:
2232 if p2 is None:
2232 p2 = self.nullid
2233 p2 = self.nullid
2233 self[None].setparents(p1, p2)
2234 self[None].setparents(p1, p2)
2234 self._quick_access_changeid_invalidate()
2235 self._quick_access_changeid_invalidate()
2235
2236
2236 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2237 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2237 """changeid must be a changeset revision, if specified.
2238 """changeid must be a changeset revision, if specified.
2238 fileid can be a file revision or node."""
2239 fileid can be a file revision or node."""
2239 return context.filectx(
2240 return context.filectx(
2240 self, path, changeid, fileid, changectx=changectx
2241 self, path, changeid, fileid, changectx=changectx
2241 )
2242 )
2242
2243
2243 def getcwd(self):
2244 def getcwd(self):
2244 return self.dirstate.getcwd()
2245 return self.dirstate.getcwd()
2245
2246
2246 def pathto(self, f, cwd=None):
2247 def pathto(self, f, cwd=None):
2247 return self.dirstate.pathto(f, cwd)
2248 return self.dirstate.pathto(f, cwd)
2248
2249
2249 def _loadfilter(self, filter):
2250 def _loadfilter(self, filter):
2250 if filter not in self._filterpats:
2251 if filter not in self._filterpats:
2251 l = []
2252 l = []
2252 for pat, cmd in self.ui.configitems(filter):
2253 for pat, cmd in self.ui.configitems(filter):
2253 if cmd == b'!':
2254 if cmd == b'!':
2254 continue
2255 continue
2255 mf = matchmod.match(self.root, b'', [pat])
2256 mf = matchmod.match(self.root, b'', [pat])
2256 fn = None
2257 fn = None
2257 params = cmd
2258 params = cmd
2258 for name, filterfn in self._datafilters.items():
2259 for name, filterfn in self._datafilters.items():
2259 if cmd.startswith(name):
2260 if cmd.startswith(name):
2260 fn = filterfn
2261 fn = filterfn
2261 params = cmd[len(name) :].lstrip()
2262 params = cmd[len(name) :].lstrip()
2262 break
2263 break
2263 if not fn:
2264 if not fn:
2264 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2265 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2265 fn.__name__ = 'commandfilter'
2266 fn.__name__ = 'commandfilter'
2266 # Wrap old filters not supporting keyword arguments
2267 # Wrap old filters not supporting keyword arguments
2267 if not pycompat.getargspec(fn)[2]:
2268 if not pycompat.getargspec(fn)[2]:
2268 oldfn = fn
2269 oldfn = fn
2269 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2270 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2270 fn.__name__ = 'compat-' + oldfn.__name__
2271 fn.__name__ = 'compat-' + oldfn.__name__
2271 l.append((mf, fn, params))
2272 l.append((mf, fn, params))
2272 self._filterpats[filter] = l
2273 self._filterpats[filter] = l
2273 return self._filterpats[filter]
2274 return self._filterpats[filter]
2274
2275
2275 def _filter(self, filterpats, filename, data):
2276 def _filter(self, filterpats, filename, data):
2276 for mf, fn, cmd in filterpats:
2277 for mf, fn, cmd in filterpats:
2277 if mf(filename):
2278 if mf(filename):
2278 self.ui.debug(
2279 self.ui.debug(
2279 b"filtering %s through %s\n"
2280 b"filtering %s through %s\n"
2280 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2281 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2281 )
2282 )
2282 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2283 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2283 break
2284 break
2284
2285
2285 return data
2286 return data
2286
2287
2287 @unfilteredpropertycache
2288 @unfilteredpropertycache
2288 def _encodefilterpats(self):
2289 def _encodefilterpats(self):
2289 return self._loadfilter(b'encode')
2290 return self._loadfilter(b'encode')
2290
2291
2291 @unfilteredpropertycache
2292 @unfilteredpropertycache
2292 def _decodefilterpats(self):
2293 def _decodefilterpats(self):
2293 return self._loadfilter(b'decode')
2294 return self._loadfilter(b'decode')
2294
2295
2295 def adddatafilter(self, name, filter):
2296 def adddatafilter(self, name, filter):
2296 self._datafilters[name] = filter
2297 self._datafilters[name] = filter
2297
2298
2298 def wread(self, filename):
2299 def wread(self, filename):
2299 if self.wvfs.islink(filename):
2300 if self.wvfs.islink(filename):
2300 data = self.wvfs.readlink(filename)
2301 data = self.wvfs.readlink(filename)
2301 else:
2302 else:
2302 data = self.wvfs.read(filename)
2303 data = self.wvfs.read(filename)
2303 return self._filter(self._encodefilterpats, filename, data)
2304 return self._filter(self._encodefilterpats, filename, data)
2304
2305
2305 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2306 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2306 """write ``data`` into ``filename`` in the working directory
2307 """write ``data`` into ``filename`` in the working directory
2307
2308
2308 This returns length of written (maybe decoded) data.
2309 This returns length of written (maybe decoded) data.
2309 """
2310 """
2310 data = self._filter(self._decodefilterpats, filename, data)
2311 data = self._filter(self._decodefilterpats, filename, data)
2311 if b'l' in flags:
2312 if b'l' in flags:
2312 self.wvfs.symlink(data, filename)
2313 self.wvfs.symlink(data, filename)
2313 else:
2314 else:
2314 self.wvfs.write(
2315 self.wvfs.write(
2315 filename, data, backgroundclose=backgroundclose, **kwargs
2316 filename, data, backgroundclose=backgroundclose, **kwargs
2316 )
2317 )
2317 if b'x' in flags:
2318 if b'x' in flags:
2318 self.wvfs.setflags(filename, False, True)
2319 self.wvfs.setflags(filename, False, True)
2319 else:
2320 else:
2320 self.wvfs.setflags(filename, False, False)
2321 self.wvfs.setflags(filename, False, False)
2321 return len(data)
2322 return len(data)
2322
2323
2323 def wwritedata(self, filename, data):
2324 def wwritedata(self, filename, data):
2324 return self._filter(self._decodefilterpats, filename, data)
2325 return self._filter(self._decodefilterpats, filename, data)
2325
2326
2326 def currenttransaction(self):
2327 def currenttransaction(self):
2327 """return the current transaction or None if non exists"""
2328 """return the current transaction or None if non exists"""
2328 if self._transref:
2329 if self._transref:
2329 tr = self._transref()
2330 tr = self._transref()
2330 else:
2331 else:
2331 tr = None
2332 tr = None
2332
2333
2333 if tr and tr.running():
2334 if tr and tr.running():
2334 return tr
2335 return tr
2335 return None
2336 return None
2336
2337
2337 def transaction(self, desc, report=None):
2338 def transaction(self, desc, report=None):
2338 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2339 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2339 b'devel', b'check-locks'
2340 b'devel', b'check-locks'
2340 ):
2341 ):
2341 if self._currentlock(self._lockref) is None:
2342 if self._currentlock(self._lockref) is None:
2342 raise error.ProgrammingError(b'transaction requires locking')
2343 raise error.ProgrammingError(b'transaction requires locking')
2343 tr = self.currenttransaction()
2344 tr = self.currenttransaction()
2344 if tr is not None:
2345 if tr is not None:
2345 return tr.nest(name=desc)
2346 return tr.nest(name=desc)
2346
2347
2347 # abort here if the journal already exists
2348 # abort here if the journal already exists
2348 if self.svfs.exists(b"journal"):
2349 if self.svfs.exists(b"journal"):
2349 raise error.RepoError(
2350 raise error.RepoError(
2350 _(b"abandoned transaction found"),
2351 _(b"abandoned transaction found"),
2351 hint=_(b"run 'hg recover' to clean up transaction"),
2352 hint=_(b"run 'hg recover' to clean up transaction"),
2352 )
2353 )
2353
2354
2354 idbase = b"%.40f#%f" % (random.random(), time.time())
2355 idbase = b"%.40f#%f" % (random.random(), time.time())
2355 ha = hex(hashutil.sha1(idbase).digest())
2356 ha = hex(hashutil.sha1(idbase).digest())
2356 txnid = b'TXN:' + ha
2357 txnid = b'TXN:' + ha
2357 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2358 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2358
2359
2359 self._writejournal(desc)
2360 self._writejournal(desc)
2360 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2361 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2361 if report:
2362 if report:
2362 rp = report
2363 rp = report
2363 else:
2364 else:
2364 rp = self.ui.warn
2365 rp = self.ui.warn
2365 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2366 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2366 # we must avoid cyclic reference between repo and transaction.
2367 # we must avoid cyclic reference between repo and transaction.
2367 reporef = weakref.ref(self)
2368 reporef = weakref.ref(self)
2368 # Code to track tag movement
2369 # Code to track tag movement
2369 #
2370 #
2370 # Since tags are all handled as file content, it is actually quite hard
2371 # Since tags are all handled as file content, it is actually quite hard
2371 # to track these movement from a code perspective. So we fallback to a
2372 # to track these movement from a code perspective. So we fallback to a
2372 # tracking at the repository level. One could envision to track changes
2373 # tracking at the repository level. One could envision to track changes
2373 # to the '.hgtags' file through changegroup apply but that fails to
2374 # to the '.hgtags' file through changegroup apply but that fails to
2374 # cope with case where transaction expose new heads without changegroup
2375 # cope with case where transaction expose new heads without changegroup
2375 # being involved (eg: phase movement).
2376 # being involved (eg: phase movement).
2376 #
2377 #
2377 # For now, We gate the feature behind a flag since this likely comes
2378 # For now, We gate the feature behind a flag since this likely comes
2378 # with performance impacts. The current code run more often than needed
2379 # with performance impacts. The current code run more often than needed
2379 # and do not use caches as much as it could. The current focus is on
2380 # and do not use caches as much as it could. The current focus is on
2380 # the behavior of the feature so we disable it by default. The flag
2381 # the behavior of the feature so we disable it by default. The flag
2381 # will be removed when we are happy with the performance impact.
2382 # will be removed when we are happy with the performance impact.
2382 #
2383 #
2383 # Once this feature is no longer experimental move the following
2384 # Once this feature is no longer experimental move the following
2384 # documentation to the appropriate help section:
2385 # documentation to the appropriate help section:
2385 #
2386 #
2386 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2387 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2387 # tags (new or changed or deleted tags). In addition the details of
2388 # tags (new or changed or deleted tags). In addition the details of
2388 # these changes are made available in a file at:
2389 # these changes are made available in a file at:
2389 # ``REPOROOT/.hg/changes/tags.changes``.
2390 # ``REPOROOT/.hg/changes/tags.changes``.
2390 # Make sure you check for HG_TAG_MOVED before reading that file as it
2391 # Make sure you check for HG_TAG_MOVED before reading that file as it
2391 # might exist from a previous transaction even if no tag were touched
2392 # might exist from a previous transaction even if no tag were touched
2392 # in this one. Changes are recorded in a line base format::
2393 # in this one. Changes are recorded in a line base format::
2393 #
2394 #
2394 # <action> <hex-node> <tag-name>\n
2395 # <action> <hex-node> <tag-name>\n
2395 #
2396 #
2396 # Actions are defined as follow:
2397 # Actions are defined as follow:
2397 # "-R": tag is removed,
2398 # "-R": tag is removed,
2398 # "+A": tag is added,
2399 # "+A": tag is added,
2399 # "-M": tag is moved (old value),
2400 # "-M": tag is moved (old value),
2400 # "+M": tag is moved (new value),
2401 # "+M": tag is moved (new value),
2401 tracktags = lambda x: None
2402 tracktags = lambda x: None
2402 # experimental config: experimental.hook-track-tags
2403 # experimental config: experimental.hook-track-tags
2403 shouldtracktags = self.ui.configbool(
2404 shouldtracktags = self.ui.configbool(
2404 b'experimental', b'hook-track-tags'
2405 b'experimental', b'hook-track-tags'
2405 )
2406 )
2406 if desc != b'strip' and shouldtracktags:
2407 if desc != b'strip' and shouldtracktags:
2407 oldheads = self.changelog.headrevs()
2408 oldheads = self.changelog.headrevs()
2408
2409
2409 def tracktags(tr2):
2410 def tracktags(tr2):
2410 repo = reporef()
2411 repo = reporef()
2411 assert repo is not None # help pytype
2412 assert repo is not None # help pytype
2412 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2413 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2413 newheads = repo.changelog.headrevs()
2414 newheads = repo.changelog.headrevs()
2414 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2415 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2415 # notes: we compare lists here.
2416 # notes: we compare lists here.
2416 # As we do it only once buiding set would not be cheaper
2417 # As we do it only once buiding set would not be cheaper
2417 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2418 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2418 if changes:
2419 if changes:
2419 tr2.hookargs[b'tag_moved'] = b'1'
2420 tr2.hookargs[b'tag_moved'] = b'1'
2420 with repo.vfs(
2421 with repo.vfs(
2421 b'changes/tags.changes', b'w', atomictemp=True
2422 b'changes/tags.changes', b'w', atomictemp=True
2422 ) as changesfile:
2423 ) as changesfile:
2423 # note: we do not register the file to the transaction
2424 # note: we do not register the file to the transaction
2424 # because we needs it to still exist on the transaction
2425 # because we needs it to still exist on the transaction
2425 # is close (for txnclose hooks)
2426 # is close (for txnclose hooks)
2426 tagsmod.writediff(changesfile, changes)
2427 tagsmod.writediff(changesfile, changes)
2427
2428
2428 def validate(tr2):
2429 def validate(tr2):
2429 """will run pre-closing hooks"""
2430 """will run pre-closing hooks"""
2430 # XXX the transaction API is a bit lacking here so we take a hacky
2431 # XXX the transaction API is a bit lacking here so we take a hacky
2431 # path for now
2432 # path for now
2432 #
2433 #
2433 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2434 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2434 # dict is copied before these run. In addition we needs the data
2435 # dict is copied before these run. In addition we needs the data
2435 # available to in memory hooks too.
2436 # available to in memory hooks too.
2436 #
2437 #
2437 # Moreover, we also need to make sure this runs before txnclose
2438 # Moreover, we also need to make sure this runs before txnclose
2438 # hooks and there is no "pending" mechanism that would execute
2439 # hooks and there is no "pending" mechanism that would execute
2439 # logic only if hooks are about to run.
2440 # logic only if hooks are about to run.
2440 #
2441 #
2441 # Fixing this limitation of the transaction is also needed to track
2442 # Fixing this limitation of the transaction is also needed to track
2442 # other families of changes (bookmarks, phases, obsolescence).
2443 # other families of changes (bookmarks, phases, obsolescence).
2443 #
2444 #
2444 # This will have to be fixed before we remove the experimental
2445 # This will have to be fixed before we remove the experimental
2445 # gating.
2446 # gating.
2446 tracktags(tr2)
2447 tracktags(tr2)
2447 repo = reporef()
2448 repo = reporef()
2448 assert repo is not None # help pytype
2449 assert repo is not None # help pytype
2449
2450
2450 singleheadopt = (b'experimental', b'single-head-per-branch')
2451 singleheadopt = (b'experimental', b'single-head-per-branch')
2451 singlehead = repo.ui.configbool(*singleheadopt)
2452 singlehead = repo.ui.configbool(*singleheadopt)
2452 if singlehead:
2453 if singlehead:
2453 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2454 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2454 accountclosed = singleheadsub.get(
2455 accountclosed = singleheadsub.get(
2455 b"account-closed-heads", False
2456 b"account-closed-heads", False
2456 )
2457 )
2457 if singleheadsub.get(b"public-changes-only", False):
2458 if singleheadsub.get(b"public-changes-only", False):
2458 filtername = b"immutable"
2459 filtername = b"immutable"
2459 else:
2460 else:
2460 filtername = b"visible"
2461 filtername = b"visible"
2461 scmutil.enforcesinglehead(
2462 scmutil.enforcesinglehead(
2462 repo, tr2, desc, accountclosed, filtername
2463 repo, tr2, desc, accountclosed, filtername
2463 )
2464 )
2464 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2465 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2465 for name, (old, new) in sorted(
2466 for name, (old, new) in sorted(
2466 tr.changes[b'bookmarks'].items()
2467 tr.changes[b'bookmarks'].items()
2467 ):
2468 ):
2468 args = tr.hookargs.copy()
2469 args = tr.hookargs.copy()
2469 args.update(bookmarks.preparehookargs(name, old, new))
2470 args.update(bookmarks.preparehookargs(name, old, new))
2470 repo.hook(
2471 repo.hook(
2471 b'pretxnclose-bookmark',
2472 b'pretxnclose-bookmark',
2472 throw=True,
2473 throw=True,
2473 **pycompat.strkwargs(args)
2474 **pycompat.strkwargs(args)
2474 )
2475 )
2475 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2476 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2476 cl = repo.unfiltered().changelog
2477 cl = repo.unfiltered().changelog
2477 for revs, (old, new) in tr.changes[b'phases']:
2478 for revs, (old, new) in tr.changes[b'phases']:
2478 for rev in revs:
2479 for rev in revs:
2479 args = tr.hookargs.copy()
2480 args = tr.hookargs.copy()
2480 node = hex(cl.node(rev))
2481 node = hex(cl.node(rev))
2481 args.update(phases.preparehookargs(node, old, new))
2482 args.update(phases.preparehookargs(node, old, new))
2482 repo.hook(
2483 repo.hook(
2483 b'pretxnclose-phase',
2484 b'pretxnclose-phase',
2484 throw=True,
2485 throw=True,
2485 **pycompat.strkwargs(args)
2486 **pycompat.strkwargs(args)
2486 )
2487 )
2487
2488
2488 repo.hook(
2489 repo.hook(
2489 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2490 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2490 )
2491 )
2491
2492
2492 def releasefn(tr, success):
2493 def releasefn(tr, success):
2493 repo = reporef()
2494 repo = reporef()
2494 if repo is None:
2495 if repo is None:
2495 # If the repo has been GC'd (and this release function is being
2496 # If the repo has been GC'd (and this release function is being
2496 # called from transaction.__del__), there's not much we can do,
2497 # called from transaction.__del__), there's not much we can do,
2497 # so just leave the unfinished transaction there and let the
2498 # so just leave the unfinished transaction there and let the
2498 # user run `hg recover`.
2499 # user run `hg recover`.
2499 return
2500 return
2500 if success:
2501 if success:
2501 # this should be explicitly invoked here, because
2502 # this should be explicitly invoked here, because
2502 # in-memory changes aren't written out at closing
2503 # in-memory changes aren't written out at closing
2503 # transaction, if tr.addfilegenerator (via
2504 # transaction, if tr.addfilegenerator (via
2504 # dirstate.write or so) isn't invoked while
2505 # dirstate.write or so) isn't invoked while
2505 # transaction running
2506 # transaction running
2506 repo.dirstate.write(None)
2507 repo.dirstate.write(None)
2507 else:
2508 else:
2508 # discard all changes (including ones already written
2509 # discard all changes (including ones already written
2509 # out) in this transaction
2510 # out) in this transaction
2510 narrowspec.restorebackup(self, b'journal.narrowspec')
2511 narrowspec.restorebackup(self, b'journal.narrowspec')
2511 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2512 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2512 repo.dirstate.restorebackup(None, b'journal.dirstate')
2513 repo.dirstate.restorebackup(None, b'journal.dirstate')
2513
2514
2514 repo.invalidate(clearfilecache=True)
2515 repo.invalidate(clearfilecache=True)
2515
2516
2516 tr = transaction.transaction(
2517 tr = transaction.transaction(
2517 rp,
2518 rp,
2518 self.svfs,
2519 self.svfs,
2519 vfsmap,
2520 vfsmap,
2520 b"journal",
2521 b"journal",
2521 b"undo",
2522 b"undo",
2522 aftertrans(renames),
2523 aftertrans(renames),
2523 self.store.createmode,
2524 self.store.createmode,
2524 validator=validate,
2525 validator=validate,
2525 releasefn=releasefn,
2526 releasefn=releasefn,
2526 checkambigfiles=_cachedfiles,
2527 checkambigfiles=_cachedfiles,
2527 name=desc,
2528 name=desc,
2528 )
2529 )
2529 tr.changes[b'origrepolen'] = len(self)
2530 tr.changes[b'origrepolen'] = len(self)
2530 tr.changes[b'obsmarkers'] = set()
2531 tr.changes[b'obsmarkers'] = set()
2531 tr.changes[b'phases'] = []
2532 tr.changes[b'phases'] = []
2532 tr.changes[b'bookmarks'] = {}
2533 tr.changes[b'bookmarks'] = {}
2533
2534
2534 tr.hookargs[b'txnid'] = txnid
2535 tr.hookargs[b'txnid'] = txnid
2535 tr.hookargs[b'txnname'] = desc
2536 tr.hookargs[b'txnname'] = desc
2536 tr.hookargs[b'changes'] = tr.changes
2537 tr.hookargs[b'changes'] = tr.changes
2537 # note: writing the fncache only during finalize mean that the file is
2538 # note: writing the fncache only during finalize mean that the file is
2538 # outdated when running hooks. As fncache is used for streaming clone,
2539 # outdated when running hooks. As fncache is used for streaming clone,
2539 # this is not expected to break anything that happen during the hooks.
2540 # this is not expected to break anything that happen during the hooks.
2540 tr.addfinalize(b'flush-fncache', self.store.write)
2541 tr.addfinalize(b'flush-fncache', self.store.write)
2541
2542
2542 def txnclosehook(tr2):
2543 def txnclosehook(tr2):
2543 """To be run if transaction is successful, will schedule a hook run"""
2544 """To be run if transaction is successful, will schedule a hook run"""
2544 # Don't reference tr2 in hook() so we don't hold a reference.
2545 # Don't reference tr2 in hook() so we don't hold a reference.
2545 # This reduces memory consumption when there are multiple
2546 # This reduces memory consumption when there are multiple
2546 # transactions per lock. This can likely go away if issue5045
2547 # transactions per lock. This can likely go away if issue5045
2547 # fixes the function accumulation.
2548 # fixes the function accumulation.
2548 hookargs = tr2.hookargs
2549 hookargs = tr2.hookargs
2549
2550
2550 def hookfunc(unused_success):
2551 def hookfunc(unused_success):
2551 repo = reporef()
2552 repo = reporef()
2552 assert repo is not None # help pytype
2553 assert repo is not None # help pytype
2553
2554
2554 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2555 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2555 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2556 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2556 for name, (old, new) in bmchanges:
2557 for name, (old, new) in bmchanges:
2557 args = tr.hookargs.copy()
2558 args = tr.hookargs.copy()
2558 args.update(bookmarks.preparehookargs(name, old, new))
2559 args.update(bookmarks.preparehookargs(name, old, new))
2559 repo.hook(
2560 repo.hook(
2560 b'txnclose-bookmark',
2561 b'txnclose-bookmark',
2561 throw=False,
2562 throw=False,
2562 **pycompat.strkwargs(args)
2563 **pycompat.strkwargs(args)
2563 )
2564 )
2564
2565
2565 if hook.hashook(repo.ui, b'txnclose-phase'):
2566 if hook.hashook(repo.ui, b'txnclose-phase'):
2566 cl = repo.unfiltered().changelog
2567 cl = repo.unfiltered().changelog
2567 phasemv = sorted(
2568 phasemv = sorted(
2568 tr.changes[b'phases'], key=lambda r: r[0][0]
2569 tr.changes[b'phases'], key=lambda r: r[0][0]
2569 )
2570 )
2570 for revs, (old, new) in phasemv:
2571 for revs, (old, new) in phasemv:
2571 for rev in revs:
2572 for rev in revs:
2572 args = tr.hookargs.copy()
2573 args = tr.hookargs.copy()
2573 node = hex(cl.node(rev))
2574 node = hex(cl.node(rev))
2574 args.update(phases.preparehookargs(node, old, new))
2575 args.update(phases.preparehookargs(node, old, new))
2575 repo.hook(
2576 repo.hook(
2576 b'txnclose-phase',
2577 b'txnclose-phase',
2577 throw=False,
2578 throw=False,
2578 **pycompat.strkwargs(args)
2579 **pycompat.strkwargs(args)
2579 )
2580 )
2580
2581
2581 repo.hook(
2582 repo.hook(
2582 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2583 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2583 )
2584 )
2584
2585
2585 repo = reporef()
2586 repo = reporef()
2586 assert repo is not None # help pytype
2587 assert repo is not None # help pytype
2587 repo._afterlock(hookfunc)
2588 repo._afterlock(hookfunc)
2588
2589
2589 tr.addfinalize(b'txnclose-hook', txnclosehook)
2590 tr.addfinalize(b'txnclose-hook', txnclosehook)
2590 # Include a leading "-" to make it happen before the transaction summary
2591 # Include a leading "-" to make it happen before the transaction summary
2591 # reports registered via scmutil.registersummarycallback() whose names
2592 # reports registered via scmutil.registersummarycallback() whose names
2592 # are 00-txnreport etc. That way, the caches will be warm when the
2593 # are 00-txnreport etc. That way, the caches will be warm when the
2593 # callbacks run.
2594 # callbacks run.
2594 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2595 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2595
2596
2596 def txnaborthook(tr2):
2597 def txnaborthook(tr2):
2597 """To be run if transaction is aborted"""
2598 """To be run if transaction is aborted"""
2598 repo = reporef()
2599 repo = reporef()
2599 assert repo is not None # help pytype
2600 assert repo is not None # help pytype
2600 repo.hook(
2601 repo.hook(
2601 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2602 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2602 )
2603 )
2603
2604
2604 tr.addabort(b'txnabort-hook', txnaborthook)
2605 tr.addabort(b'txnabort-hook', txnaborthook)
2605 # avoid eager cache invalidation. in-memory data should be identical
2606 # avoid eager cache invalidation. in-memory data should be identical
2606 # to stored data if transaction has no error.
2607 # to stored data if transaction has no error.
2607 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2608 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2608 self._transref = weakref.ref(tr)
2609 self._transref = weakref.ref(tr)
2609 scmutil.registersummarycallback(self, tr, desc)
2610 scmutil.registersummarycallback(self, tr, desc)
2610 return tr
2611 return tr
2611
2612
2612 def _journalfiles(self):
2613 def _journalfiles(self):
2613 return (
2614 return (
2614 (self.svfs, b'journal'),
2615 (self.svfs, b'journal'),
2615 (self.svfs, b'journal.narrowspec'),
2616 (self.svfs, b'journal.narrowspec'),
2616 (self.vfs, b'journal.narrowspec.dirstate'),
2617 (self.vfs, b'journal.narrowspec.dirstate'),
2617 (self.vfs, b'journal.dirstate'),
2618 (self.vfs, b'journal.dirstate'),
2618 (self.vfs, b'journal.branch'),
2619 (self.vfs, b'journal.branch'),
2619 (self.vfs, b'journal.desc'),
2620 (self.vfs, b'journal.desc'),
2620 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2621 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2621 (self.svfs, b'journal.phaseroots'),
2622 (self.svfs, b'journal.phaseroots'),
2622 )
2623 )
2623
2624
2624 def undofiles(self):
2625 def undofiles(self):
2625 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2626 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2626
2627
2627 @unfilteredmethod
2628 @unfilteredmethod
2628 def _writejournal(self, desc):
2629 def _writejournal(self, desc):
2629 self.dirstate.savebackup(None, b'journal.dirstate')
2630 self.dirstate.savebackup(None, b'journal.dirstate')
2630 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2631 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2631 narrowspec.savebackup(self, b'journal.narrowspec')
2632 narrowspec.savebackup(self, b'journal.narrowspec')
2632 self.vfs.write(
2633 self.vfs.write(
2633 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2634 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2634 )
2635 )
2635 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2636 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2636 bookmarksvfs = bookmarks.bookmarksvfs(self)
2637 bookmarksvfs = bookmarks.bookmarksvfs(self)
2637 bookmarksvfs.write(
2638 bookmarksvfs.write(
2638 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2639 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2639 )
2640 )
2640 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2641 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2641
2642
2642 def recover(self):
2643 def recover(self):
2643 with self.lock():
2644 with self.lock():
2644 if self.svfs.exists(b"journal"):
2645 if self.svfs.exists(b"journal"):
2645 self.ui.status(_(b"rolling back interrupted transaction\n"))
2646 self.ui.status(_(b"rolling back interrupted transaction\n"))
2646 vfsmap = {
2647 vfsmap = {
2647 b'': self.svfs,
2648 b'': self.svfs,
2648 b'plain': self.vfs,
2649 b'plain': self.vfs,
2649 }
2650 }
2650 transaction.rollback(
2651 transaction.rollback(
2651 self.svfs,
2652 self.svfs,
2652 vfsmap,
2653 vfsmap,
2653 b"journal",
2654 b"journal",
2654 self.ui.warn,
2655 self.ui.warn,
2655 checkambigfiles=_cachedfiles,
2656 checkambigfiles=_cachedfiles,
2656 )
2657 )
2657 self.invalidate()
2658 self.invalidate()
2658 return True
2659 return True
2659 else:
2660 else:
2660 self.ui.warn(_(b"no interrupted transaction available\n"))
2661 self.ui.warn(_(b"no interrupted transaction available\n"))
2661 return False
2662 return False
2662
2663
2663 def rollback(self, dryrun=False, force=False):
2664 def rollback(self, dryrun=False, force=False):
2664 wlock = lock = dsguard = None
2665 wlock = lock = dsguard = None
2665 try:
2666 try:
2666 wlock = self.wlock()
2667 wlock = self.wlock()
2667 lock = self.lock()
2668 lock = self.lock()
2668 if self.svfs.exists(b"undo"):
2669 if self.svfs.exists(b"undo"):
2669 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2670 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2670
2671
2671 return self._rollback(dryrun, force, dsguard)
2672 return self._rollback(dryrun, force, dsguard)
2672 else:
2673 else:
2673 self.ui.warn(_(b"no rollback information available\n"))
2674 self.ui.warn(_(b"no rollback information available\n"))
2674 return 1
2675 return 1
2675 finally:
2676 finally:
2676 release(dsguard, lock, wlock)
2677 release(dsguard, lock, wlock)
2677
2678
2678 @unfilteredmethod # Until we get smarter cache management
2679 @unfilteredmethod # Until we get smarter cache management
2679 def _rollback(self, dryrun, force, dsguard):
2680 def _rollback(self, dryrun, force, dsguard):
2680 ui = self.ui
2681 ui = self.ui
2681 try:
2682 try:
2682 args = self.vfs.read(b'undo.desc').splitlines()
2683 args = self.vfs.read(b'undo.desc').splitlines()
2683 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2684 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2684 if len(args) >= 3:
2685 if len(args) >= 3:
2685 detail = args[2]
2686 detail = args[2]
2686 oldtip = oldlen - 1
2687 oldtip = oldlen - 1
2687
2688
2688 if detail and ui.verbose:
2689 if detail and ui.verbose:
2689 msg = _(
2690 msg = _(
2690 b'repository tip rolled back to revision %d'
2691 b'repository tip rolled back to revision %d'
2691 b' (undo %s: %s)\n'
2692 b' (undo %s: %s)\n'
2692 ) % (oldtip, desc, detail)
2693 ) % (oldtip, desc, detail)
2693 else:
2694 else:
2694 msg = _(
2695 msg = _(
2695 b'repository tip rolled back to revision %d (undo %s)\n'
2696 b'repository tip rolled back to revision %d (undo %s)\n'
2696 ) % (oldtip, desc)
2697 ) % (oldtip, desc)
2697 except IOError:
2698 except IOError:
2698 msg = _(b'rolling back unknown transaction\n')
2699 msg = _(b'rolling back unknown transaction\n')
2699 desc = None
2700 desc = None
2700
2701
2701 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2702 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2702 raise error.Abort(
2703 raise error.Abort(
2703 _(
2704 _(
2704 b'rollback of last commit while not checked out '
2705 b'rollback of last commit while not checked out '
2705 b'may lose data'
2706 b'may lose data'
2706 ),
2707 ),
2707 hint=_(b'use -f to force'),
2708 hint=_(b'use -f to force'),
2708 )
2709 )
2709
2710
2710 ui.status(msg)
2711 ui.status(msg)
2711 if dryrun:
2712 if dryrun:
2712 return 0
2713 return 0
2713
2714
2714 parents = self.dirstate.parents()
2715 parents = self.dirstate.parents()
2715 self.destroying()
2716 self.destroying()
2716 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2717 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2717 transaction.rollback(
2718 transaction.rollback(
2718 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2719 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2719 )
2720 )
2720 bookmarksvfs = bookmarks.bookmarksvfs(self)
2721 bookmarksvfs = bookmarks.bookmarksvfs(self)
2721 if bookmarksvfs.exists(b'undo.bookmarks'):
2722 if bookmarksvfs.exists(b'undo.bookmarks'):
2722 bookmarksvfs.rename(
2723 bookmarksvfs.rename(
2723 b'undo.bookmarks', b'bookmarks', checkambig=True
2724 b'undo.bookmarks', b'bookmarks', checkambig=True
2724 )
2725 )
2725 if self.svfs.exists(b'undo.phaseroots'):
2726 if self.svfs.exists(b'undo.phaseroots'):
2726 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2727 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2727 self.invalidate()
2728 self.invalidate()
2728
2729
2729 has_node = self.changelog.index.has_node
2730 has_node = self.changelog.index.has_node
2730 parentgone = any(not has_node(p) for p in parents)
2731 parentgone = any(not has_node(p) for p in parents)
2731 if parentgone:
2732 if parentgone:
2732 # prevent dirstateguard from overwriting already restored one
2733 # prevent dirstateguard from overwriting already restored one
2733 dsguard.close()
2734 dsguard.close()
2734
2735
2735 narrowspec.restorebackup(self, b'undo.narrowspec')
2736 narrowspec.restorebackup(self, b'undo.narrowspec')
2736 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2737 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2737 self.dirstate.restorebackup(None, b'undo.dirstate')
2738 self.dirstate.restorebackup(None, b'undo.dirstate')
2738 try:
2739 try:
2739 branch = self.vfs.read(b'undo.branch')
2740 branch = self.vfs.read(b'undo.branch')
2740 self.dirstate.setbranch(encoding.tolocal(branch))
2741 self.dirstate.setbranch(encoding.tolocal(branch))
2741 except IOError:
2742 except IOError:
2742 ui.warn(
2743 ui.warn(
2743 _(
2744 _(
2744 b'named branch could not be reset: '
2745 b'named branch could not be reset: '
2745 b'current branch is still \'%s\'\n'
2746 b'current branch is still \'%s\'\n'
2746 )
2747 )
2747 % self.dirstate.branch()
2748 % self.dirstate.branch()
2748 )
2749 )
2749
2750
2750 parents = tuple([p.rev() for p in self[None].parents()])
2751 parents = tuple([p.rev() for p in self[None].parents()])
2751 if len(parents) > 1:
2752 if len(parents) > 1:
2752 ui.status(
2753 ui.status(
2753 _(
2754 _(
2754 b'working directory now based on '
2755 b'working directory now based on '
2755 b'revisions %d and %d\n'
2756 b'revisions %d and %d\n'
2756 )
2757 )
2757 % parents
2758 % parents
2758 )
2759 )
2759 else:
2760 else:
2760 ui.status(
2761 ui.status(
2761 _(b'working directory now based on revision %d\n') % parents
2762 _(b'working directory now based on revision %d\n') % parents
2762 )
2763 )
2763 mergestatemod.mergestate.clean(self)
2764 mergestatemod.mergestate.clean(self)
2764
2765
2765 # TODO: if we know which new heads may result from this rollback, pass
2766 # TODO: if we know which new heads may result from this rollback, pass
2766 # them to destroy(), which will prevent the branchhead cache from being
2767 # them to destroy(), which will prevent the branchhead cache from being
2767 # invalidated.
2768 # invalidated.
2768 self.destroyed()
2769 self.destroyed()
2769 return 0
2770 return 0
2770
2771
2771 def _buildcacheupdater(self, newtransaction):
2772 def _buildcacheupdater(self, newtransaction):
2772 """called during transaction to build the callback updating cache
2773 """called during transaction to build the callback updating cache
2773
2774
2774 Lives on the repository to help extension who might want to augment
2775 Lives on the repository to help extension who might want to augment
2775 this logic. For this purpose, the created transaction is passed to the
2776 this logic. For this purpose, the created transaction is passed to the
2776 method.
2777 method.
2777 """
2778 """
2778 # we must avoid cyclic reference between repo and transaction.
2779 # we must avoid cyclic reference between repo and transaction.
2779 reporef = weakref.ref(self)
2780 reporef = weakref.ref(self)
2780
2781
2781 def updater(tr):
2782 def updater(tr):
2782 repo = reporef()
2783 repo = reporef()
2783 assert repo is not None # help pytype
2784 assert repo is not None # help pytype
2784 repo.updatecaches(tr)
2785 repo.updatecaches(tr)
2785
2786
2786 return updater
2787 return updater
2787
2788
2788 @unfilteredmethod
2789 @unfilteredmethod
2789 def updatecaches(self, tr=None, full=False, caches=None):
2790 def updatecaches(self, tr=None, full=False, caches=None):
2790 """warm appropriate caches
2791 """warm appropriate caches
2791
2792
2792 If this function is called after a transaction closed. The transaction
2793 If this function is called after a transaction closed. The transaction
2793 will be available in the 'tr' argument. This can be used to selectively
2794 will be available in the 'tr' argument. This can be used to selectively
2794 update caches relevant to the changes in that transaction.
2795 update caches relevant to the changes in that transaction.
2795
2796
2796 If 'full' is set, make sure all caches the function knows about have
2797 If 'full' is set, make sure all caches the function knows about have
2797 up-to-date data. Even the ones usually loaded more lazily.
2798 up-to-date data. Even the ones usually loaded more lazily.
2798
2799
2799 The `full` argument can take a special "post-clone" value. In this case
2800 The `full` argument can take a special "post-clone" value. In this case
2800 the cache warming is made after a clone and of the slower cache might
2801 the cache warming is made after a clone and of the slower cache might
2801 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2802 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2802 as we plan for a cleaner way to deal with this for 5.9.
2803 as we plan for a cleaner way to deal with this for 5.9.
2803 """
2804 """
2804 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2805 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2805 # During strip, many caches are invalid but
2806 # During strip, many caches are invalid but
2806 # later call to `destroyed` will refresh them.
2807 # later call to `destroyed` will refresh them.
2807 return
2808 return
2808
2809
2809 unfi = self.unfiltered()
2810 unfi = self.unfiltered()
2810
2811
2811 if full:
2812 if full:
2812 msg = (
2813 msg = (
2813 "`full` argument for `repo.updatecaches` is deprecated\n"
2814 "`full` argument for `repo.updatecaches` is deprecated\n"
2814 "(use `caches=repository.CACHE_ALL` instead)"
2815 "(use `caches=repository.CACHE_ALL` instead)"
2815 )
2816 )
2816 self.ui.deprecwarn(msg, b"5.9")
2817 self.ui.deprecwarn(msg, b"5.9")
2817 caches = repository.CACHES_ALL
2818 caches = repository.CACHES_ALL
2818 if full == b"post-clone":
2819 if full == b"post-clone":
2819 caches = repository.CACHES_POST_CLONE
2820 caches = repository.CACHES_POST_CLONE
2820 caches = repository.CACHES_ALL
2821 caches = repository.CACHES_ALL
2821 elif caches is None:
2822 elif caches is None:
2822 caches = repository.CACHES_DEFAULT
2823 caches = repository.CACHES_DEFAULT
2823
2824
2824 if repository.CACHE_BRANCHMAP_SERVED in caches:
2825 if repository.CACHE_BRANCHMAP_SERVED in caches:
2825 if tr is None or tr.changes[b'origrepolen'] < len(self):
2826 if tr is None or tr.changes[b'origrepolen'] < len(self):
2826 # accessing the 'served' branchmap should refresh all the others,
2827 # accessing the 'served' branchmap should refresh all the others,
2827 self.ui.debug(b'updating the branch cache\n')
2828 self.ui.debug(b'updating the branch cache\n')
2828 self.filtered(b'served').branchmap()
2829 self.filtered(b'served').branchmap()
2829 self.filtered(b'served.hidden').branchmap()
2830 self.filtered(b'served.hidden').branchmap()
2830 # flush all possibly delayed write.
2831 # flush all possibly delayed write.
2831 self._branchcaches.write_delayed(self)
2832 self._branchcaches.write_delayed(self)
2832
2833
2833 if repository.CACHE_CHANGELOG_CACHE in caches:
2834 if repository.CACHE_CHANGELOG_CACHE in caches:
2834 self.changelog.update_caches(transaction=tr)
2835 self.changelog.update_caches(transaction=tr)
2835
2836
2836 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2837 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2837 self.manifestlog.update_caches(transaction=tr)
2838 self.manifestlog.update_caches(transaction=tr)
2838
2839
2839 if repository.CACHE_REV_BRANCH in caches:
2840 if repository.CACHE_REV_BRANCH in caches:
2840 rbc = unfi.revbranchcache()
2841 rbc = unfi.revbranchcache()
2841 for r in unfi.changelog:
2842 for r in unfi.changelog:
2842 rbc.branchinfo(r)
2843 rbc.branchinfo(r)
2843 rbc.write()
2844 rbc.write()
2844
2845
2845 if repository.CACHE_FULL_MANIFEST in caches:
2846 if repository.CACHE_FULL_MANIFEST in caches:
2846 # ensure the working copy parents are in the manifestfulltextcache
2847 # ensure the working copy parents are in the manifestfulltextcache
2847 for ctx in self[b'.'].parents():
2848 for ctx in self[b'.'].parents():
2848 ctx.manifest() # accessing the manifest is enough
2849 ctx.manifest() # accessing the manifest is enough
2849
2850
2850 if repository.CACHE_FILE_NODE_TAGS in caches:
2851 if repository.CACHE_FILE_NODE_TAGS in caches:
2851 # accessing fnode cache warms the cache
2852 # accessing fnode cache warms the cache
2852 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2853 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2853
2854
2854 if repository.CACHE_TAGS_DEFAULT in caches:
2855 if repository.CACHE_TAGS_DEFAULT in caches:
2855 # accessing tags warm the cache
2856 # accessing tags warm the cache
2856 self.tags()
2857 self.tags()
2857 if repository.CACHE_TAGS_SERVED in caches:
2858 if repository.CACHE_TAGS_SERVED in caches:
2858 self.filtered(b'served').tags()
2859 self.filtered(b'served').tags()
2859
2860
2860 if repository.CACHE_BRANCHMAP_ALL in caches:
2861 if repository.CACHE_BRANCHMAP_ALL in caches:
2861 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2862 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2862 # so we're forcing a write to cause these caches to be warmed up
2863 # so we're forcing a write to cause these caches to be warmed up
2863 # even if they haven't explicitly been requested yet (if they've
2864 # even if they haven't explicitly been requested yet (if they've
2864 # never been used by hg, they won't ever have been written, even if
2865 # never been used by hg, they won't ever have been written, even if
2865 # they're a subset of another kind of cache that *has* been used).
2866 # they're a subset of another kind of cache that *has* been used).
2866 for filt in repoview.filtertable.keys():
2867 for filt in repoview.filtertable.keys():
2867 filtered = self.filtered(filt)
2868 filtered = self.filtered(filt)
2868 filtered.branchmap().write(filtered)
2869 filtered.branchmap().write(filtered)
2869
2870
2870 def invalidatecaches(self):
2871 def invalidatecaches(self):
2871
2872
2872 if '_tagscache' in vars(self):
2873 if '_tagscache' in vars(self):
2873 # can't use delattr on proxy
2874 # can't use delattr on proxy
2874 del self.__dict__['_tagscache']
2875 del self.__dict__['_tagscache']
2875
2876
2876 self._branchcaches.clear()
2877 self._branchcaches.clear()
2877 self.invalidatevolatilesets()
2878 self.invalidatevolatilesets()
2878 self._sparsesignaturecache.clear()
2879 self._sparsesignaturecache.clear()
2879
2880
2880 def invalidatevolatilesets(self):
2881 def invalidatevolatilesets(self):
2881 self.filteredrevcache.clear()
2882 self.filteredrevcache.clear()
2882 obsolete.clearobscaches(self)
2883 obsolete.clearobscaches(self)
2883 self._quick_access_changeid_invalidate()
2884 self._quick_access_changeid_invalidate()
2884
2885
2885 def invalidatedirstate(self):
2886 def invalidatedirstate(self):
2886 """Invalidates the dirstate, causing the next call to dirstate
2887 """Invalidates the dirstate, causing the next call to dirstate
2887 to check if it was modified since the last time it was read,
2888 to check if it was modified since the last time it was read,
2888 rereading it if it has.
2889 rereading it if it has.
2889
2890
2890 This is different to dirstate.invalidate() that it doesn't always
2891 This is different to dirstate.invalidate() that it doesn't always
2891 rereads the dirstate. Use dirstate.invalidate() if you want to
2892 rereads the dirstate. Use dirstate.invalidate() if you want to
2892 explicitly read the dirstate again (i.e. restoring it to a previous
2893 explicitly read the dirstate again (i.e. restoring it to a previous
2893 known good state)."""
2894 known good state)."""
2894 if hasunfilteredcache(self, 'dirstate'):
2895 if hasunfilteredcache(self, 'dirstate'):
2895 for k in self.dirstate._filecache:
2896 for k in self.dirstate._filecache:
2896 try:
2897 try:
2897 delattr(self.dirstate, k)
2898 delattr(self.dirstate, k)
2898 except AttributeError:
2899 except AttributeError:
2899 pass
2900 pass
2900 delattr(self.unfiltered(), 'dirstate')
2901 delattr(self.unfiltered(), 'dirstate')
2901
2902
2902 def invalidate(self, clearfilecache=False):
2903 def invalidate(self, clearfilecache=False):
2903 """Invalidates both store and non-store parts other than dirstate
2904 """Invalidates both store and non-store parts other than dirstate
2904
2905
2905 If a transaction is running, invalidation of store is omitted,
2906 If a transaction is running, invalidation of store is omitted,
2906 because discarding in-memory changes might cause inconsistency
2907 because discarding in-memory changes might cause inconsistency
2907 (e.g. incomplete fncache causes unintentional failure, but
2908 (e.g. incomplete fncache causes unintentional failure, but
2908 redundant one doesn't).
2909 redundant one doesn't).
2909 """
2910 """
2910 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2911 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2911 for k in list(self._filecache.keys()):
2912 for k in list(self._filecache.keys()):
2912 # dirstate is invalidated separately in invalidatedirstate()
2913 # dirstate is invalidated separately in invalidatedirstate()
2913 if k == b'dirstate':
2914 if k == b'dirstate':
2914 continue
2915 continue
2915 if (
2916 if (
2916 k == b'changelog'
2917 k == b'changelog'
2917 and self.currenttransaction()
2918 and self.currenttransaction()
2918 and self.changelog._delayed
2919 and self.changelog._delayed
2919 ):
2920 ):
2920 # The changelog object may store unwritten revisions. We don't
2921 # The changelog object may store unwritten revisions. We don't
2921 # want to lose them.
2922 # want to lose them.
2922 # TODO: Solve the problem instead of working around it.
2923 # TODO: Solve the problem instead of working around it.
2923 continue
2924 continue
2924
2925
2925 if clearfilecache:
2926 if clearfilecache:
2926 del self._filecache[k]
2927 del self._filecache[k]
2927 try:
2928 try:
2928 delattr(unfiltered, k)
2929 delattr(unfiltered, k)
2929 except AttributeError:
2930 except AttributeError:
2930 pass
2931 pass
2931 self.invalidatecaches()
2932 self.invalidatecaches()
2932 if not self.currenttransaction():
2933 if not self.currenttransaction():
2933 # TODO: Changing contents of store outside transaction
2934 # TODO: Changing contents of store outside transaction
2934 # causes inconsistency. We should make in-memory store
2935 # causes inconsistency. We should make in-memory store
2935 # changes detectable, and abort if changed.
2936 # changes detectable, and abort if changed.
2936 self.store.invalidatecaches()
2937 self.store.invalidatecaches()
2937
2938
2938 def invalidateall(self):
2939 def invalidateall(self):
2939 """Fully invalidates both store and non-store parts, causing the
2940 """Fully invalidates both store and non-store parts, causing the
2940 subsequent operation to reread any outside changes."""
2941 subsequent operation to reread any outside changes."""
2941 # extension should hook this to invalidate its caches
2942 # extension should hook this to invalidate its caches
2942 self.invalidate()
2943 self.invalidate()
2943 self.invalidatedirstate()
2944 self.invalidatedirstate()
2944
2945
2945 @unfilteredmethod
2946 @unfilteredmethod
2946 def _refreshfilecachestats(self, tr):
2947 def _refreshfilecachestats(self, tr):
2947 """Reload stats of cached files so that they are flagged as valid"""
2948 """Reload stats of cached files so that they are flagged as valid"""
2948 for k, ce in self._filecache.items():
2949 for k, ce in self._filecache.items():
2949 k = pycompat.sysstr(k)
2950 k = pycompat.sysstr(k)
2950 if k == 'dirstate' or k not in self.__dict__:
2951 if k == 'dirstate' or k not in self.__dict__:
2951 continue
2952 continue
2952 ce.refresh()
2953 ce.refresh()
2953
2954
2954 def _lock(
2955 def _lock(
2955 self,
2956 self,
2956 vfs,
2957 vfs,
2957 lockname,
2958 lockname,
2958 wait,
2959 wait,
2959 releasefn,
2960 releasefn,
2960 acquirefn,
2961 acquirefn,
2961 desc,
2962 desc,
2962 ):
2963 ):
2963 timeout = 0
2964 timeout = 0
2964 warntimeout = 0
2965 warntimeout = 0
2965 if wait:
2966 if wait:
2966 timeout = self.ui.configint(b"ui", b"timeout")
2967 timeout = self.ui.configint(b"ui", b"timeout")
2967 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2968 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2968 # internal config: ui.signal-safe-lock
2969 # internal config: ui.signal-safe-lock
2969 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2970 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2970
2971
2971 l = lockmod.trylock(
2972 l = lockmod.trylock(
2972 self.ui,
2973 self.ui,
2973 vfs,
2974 vfs,
2974 lockname,
2975 lockname,
2975 timeout,
2976 timeout,
2976 warntimeout,
2977 warntimeout,
2977 releasefn=releasefn,
2978 releasefn=releasefn,
2978 acquirefn=acquirefn,
2979 acquirefn=acquirefn,
2979 desc=desc,
2980 desc=desc,
2980 signalsafe=signalsafe,
2981 signalsafe=signalsafe,
2981 )
2982 )
2982 return l
2983 return l
2983
2984
2984 def _afterlock(self, callback):
2985 def _afterlock(self, callback):
2985 """add a callback to be run when the repository is fully unlocked
2986 """add a callback to be run when the repository is fully unlocked
2986
2987
2987 The callback will be executed when the outermost lock is released
2988 The callback will be executed when the outermost lock is released
2988 (with wlock being higher level than 'lock')."""
2989 (with wlock being higher level than 'lock')."""
2989 for ref in (self._wlockref, self._lockref):
2990 for ref in (self._wlockref, self._lockref):
2990 l = ref and ref()
2991 l = ref and ref()
2991 if l and l.held:
2992 if l and l.held:
2992 l.postrelease.append(callback)
2993 l.postrelease.append(callback)
2993 break
2994 break
2994 else: # no lock have been found.
2995 else: # no lock have been found.
2995 callback(True)
2996 callback(True)
2996
2997
2997 def lock(self, wait=True):
2998 def lock(self, wait=True):
2998 """Lock the repository store (.hg/store) and return a weak reference
2999 """Lock the repository store (.hg/store) and return a weak reference
2999 to the lock. Use this before modifying the store (e.g. committing or
3000 to the lock. Use this before modifying the store (e.g. committing or
3000 stripping). If you are opening a transaction, get a lock as well.)
3001 stripping). If you are opening a transaction, get a lock as well.)
3001
3002
3002 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3003 'wlock' first to avoid a dead-lock hazard."""
3004 'wlock' first to avoid a dead-lock hazard."""
3004 l = self._currentlock(self._lockref)
3005 l = self._currentlock(self._lockref)
3005 if l is not None:
3006 if l is not None:
3006 l.lock()
3007 l.lock()
3007 return l
3008 return l
3008
3009
3009 l = self._lock(
3010 l = self._lock(
3010 vfs=self.svfs,
3011 vfs=self.svfs,
3011 lockname=b"lock",
3012 lockname=b"lock",
3012 wait=wait,
3013 wait=wait,
3013 releasefn=None,
3014 releasefn=None,
3014 acquirefn=self.invalidate,
3015 acquirefn=self.invalidate,
3015 desc=_(b'repository %s') % self.origroot,
3016 desc=_(b'repository %s') % self.origroot,
3016 )
3017 )
3017 self._lockref = weakref.ref(l)
3018 self._lockref = weakref.ref(l)
3018 return l
3019 return l
3019
3020
3020 def wlock(self, wait=True):
3021 def wlock(self, wait=True):
3021 """Lock the non-store parts of the repository (everything under
3022 """Lock the non-store parts of the repository (everything under
3022 .hg except .hg/store) and return a weak reference to the lock.
3023 .hg except .hg/store) and return a weak reference to the lock.
3023
3024
3024 Use this before modifying files in .hg.
3025 Use this before modifying files in .hg.
3025
3026
3026 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3027 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3027 'wlock' first to avoid a dead-lock hazard."""
3028 'wlock' first to avoid a dead-lock hazard."""
3028 l = self._wlockref() if self._wlockref else None
3029 l = self._wlockref() if self._wlockref else None
3029 if l is not None and l.held:
3030 if l is not None and l.held:
3030 l.lock()
3031 l.lock()
3031 return l
3032 return l
3032
3033
3033 # We do not need to check for non-waiting lock acquisition. Such
3034 # We do not need to check for non-waiting lock acquisition. Such
3034 # acquisition would not cause dead-lock as they would just fail.
3035 # acquisition would not cause dead-lock as they would just fail.
3035 if wait and (
3036 if wait and (
3036 self.ui.configbool(b'devel', b'all-warnings')
3037 self.ui.configbool(b'devel', b'all-warnings')
3037 or self.ui.configbool(b'devel', b'check-locks')
3038 or self.ui.configbool(b'devel', b'check-locks')
3038 ):
3039 ):
3039 if self._currentlock(self._lockref) is not None:
3040 if self._currentlock(self._lockref) is not None:
3040 self.ui.develwarn(b'"wlock" acquired after "lock"')
3041 self.ui.develwarn(b'"wlock" acquired after "lock"')
3041
3042
3042 def unlock():
3043 def unlock():
3043 if self.dirstate.pendingparentchange():
3044 if self.dirstate.pendingparentchange():
3044 self.dirstate.invalidate()
3045 self.dirstate.invalidate()
3045 else:
3046 else:
3046 self.dirstate.write(None)
3047 self.dirstate.write(None)
3047
3048
3048 self._filecache[b'dirstate'].refresh()
3049 self._filecache[b'dirstate'].refresh()
3049
3050
3050 l = self._lock(
3051 l = self._lock(
3051 self.vfs,
3052 self.vfs,
3052 b"wlock",
3053 b"wlock",
3053 wait,
3054 wait,
3054 unlock,
3055 unlock,
3055 self.invalidatedirstate,
3056 self.invalidatedirstate,
3056 _(b'working directory of %s') % self.origroot,
3057 _(b'working directory of %s') % self.origroot,
3057 )
3058 )
3058 self._wlockref = weakref.ref(l)
3059 self._wlockref = weakref.ref(l)
3059 return l
3060 return l
3060
3061
3061 def _currentlock(self, lockref):
3062 def _currentlock(self, lockref):
3062 """Returns the lock if it's held, or None if it's not."""
3063 """Returns the lock if it's held, or None if it's not."""
3063 if lockref is None:
3064 if lockref is None:
3064 return None
3065 return None
3065 l = lockref()
3066 l = lockref()
3066 if l is None or not l.held:
3067 if l is None or not l.held:
3067 return None
3068 return None
3068 return l
3069 return l
3069
3070
3070 def currentwlock(self):
3071 def currentwlock(self):
3071 """Returns the wlock if it's held, or None if it's not."""
3072 """Returns the wlock if it's held, or None if it's not."""
3072 return self._currentlock(self._wlockref)
3073 return self._currentlock(self._wlockref)
3073
3074
3074 def checkcommitpatterns(self, wctx, match, status, fail):
3075 def checkcommitpatterns(self, wctx, match, status, fail):
3075 """check for commit arguments that aren't committable"""
3076 """check for commit arguments that aren't committable"""
3076 if match.isexact() or match.prefix():
3077 if match.isexact() or match.prefix():
3077 matched = set(status.modified + status.added + status.removed)
3078 matched = set(status.modified + status.added + status.removed)
3078
3079
3079 for f in match.files():
3080 for f in match.files():
3080 f = self.dirstate.normalize(f)
3081 f = self.dirstate.normalize(f)
3081 if f == b'.' or f in matched or f in wctx.substate:
3082 if f == b'.' or f in matched or f in wctx.substate:
3082 continue
3083 continue
3083 if f in status.deleted:
3084 if f in status.deleted:
3084 fail(f, _(b'file not found!'))
3085 fail(f, _(b'file not found!'))
3085 # Is it a directory that exists or used to exist?
3086 # Is it a directory that exists or used to exist?
3086 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3087 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3087 d = f + b'/'
3088 d = f + b'/'
3088 for mf in matched:
3089 for mf in matched:
3089 if mf.startswith(d):
3090 if mf.startswith(d):
3090 break
3091 break
3091 else:
3092 else:
3092 fail(f, _(b"no match under directory!"))
3093 fail(f, _(b"no match under directory!"))
3093 elif f not in self.dirstate:
3094 elif f not in self.dirstate:
3094 fail(f, _(b"file not tracked!"))
3095 fail(f, _(b"file not tracked!"))
3095
3096
3096 @unfilteredmethod
3097 @unfilteredmethod
3097 def commit(
3098 def commit(
3098 self,
3099 self,
3099 text=b"",
3100 text=b"",
3100 user=None,
3101 user=None,
3101 date=None,
3102 date=None,
3102 match=None,
3103 match=None,
3103 force=False,
3104 force=False,
3104 editor=None,
3105 editor=None,
3105 extra=None,
3106 extra=None,
3106 ):
3107 ):
3107 """Add a new revision to current repository.
3108 """Add a new revision to current repository.
3108
3109
3109 Revision information is gathered from the working directory,
3110 Revision information is gathered from the working directory,
3110 match can be used to filter the committed files. If editor is
3111 match can be used to filter the committed files. If editor is
3111 supplied, it is called to get a commit message.
3112 supplied, it is called to get a commit message.
3112 """
3113 """
3113 if extra is None:
3114 if extra is None:
3114 extra = {}
3115 extra = {}
3115
3116
3116 def fail(f, msg):
3117 def fail(f, msg):
3117 raise error.InputError(b'%s: %s' % (f, msg))
3118 raise error.InputError(b'%s: %s' % (f, msg))
3118
3119
3119 if not match:
3120 if not match:
3120 match = matchmod.always()
3121 match = matchmod.always()
3121
3122
3122 if not force:
3123 if not force:
3123 match.bad = fail
3124 match.bad = fail
3124
3125
3125 # lock() for recent changelog (see issue4368)
3126 # lock() for recent changelog (see issue4368)
3126 with self.wlock(), self.lock():
3127 with self.wlock(), self.lock():
3127 wctx = self[None]
3128 wctx = self[None]
3128 merge = len(wctx.parents()) > 1
3129 merge = len(wctx.parents()) > 1
3129
3130
3130 if not force and merge and not match.always():
3131 if not force and merge and not match.always():
3131 raise error.Abort(
3132 raise error.Abort(
3132 _(
3133 _(
3133 b'cannot partially commit a merge '
3134 b'cannot partially commit a merge '
3134 b'(do not specify files or patterns)'
3135 b'(do not specify files or patterns)'
3135 )
3136 )
3136 )
3137 )
3137
3138
3138 status = self.status(match=match, clean=force)
3139 status = self.status(match=match, clean=force)
3139 if force:
3140 if force:
3140 status.modified.extend(
3141 status.modified.extend(
3141 status.clean
3142 status.clean
3142 ) # mq may commit clean files
3143 ) # mq may commit clean files
3143
3144
3144 # check subrepos
3145 # check subrepos
3145 subs, commitsubs, newstate = subrepoutil.precommit(
3146 subs, commitsubs, newstate = subrepoutil.precommit(
3146 self.ui, wctx, status, match, force=force
3147 self.ui, wctx, status, match, force=force
3147 )
3148 )
3148
3149
3149 # make sure all explicit patterns are matched
3150 # make sure all explicit patterns are matched
3150 if not force:
3151 if not force:
3151 self.checkcommitpatterns(wctx, match, status, fail)
3152 self.checkcommitpatterns(wctx, match, status, fail)
3152
3153
3153 cctx = context.workingcommitctx(
3154 cctx = context.workingcommitctx(
3154 self, status, text, user, date, extra
3155 self, status, text, user, date, extra
3155 )
3156 )
3156
3157
3157 ms = mergestatemod.mergestate.read(self)
3158 ms = mergestatemod.mergestate.read(self)
3158 mergeutil.checkunresolved(ms)
3159 mergeutil.checkunresolved(ms)
3159
3160
3160 # internal config: ui.allowemptycommit
3161 # internal config: ui.allowemptycommit
3161 if cctx.isempty() and not self.ui.configbool(
3162 if cctx.isempty() and not self.ui.configbool(
3162 b'ui', b'allowemptycommit'
3163 b'ui', b'allowemptycommit'
3163 ):
3164 ):
3164 self.ui.debug(b'nothing to commit, clearing merge state\n')
3165 self.ui.debug(b'nothing to commit, clearing merge state\n')
3165 ms.reset()
3166 ms.reset()
3166 return None
3167 return None
3167
3168
3168 if merge and cctx.deleted():
3169 if merge and cctx.deleted():
3169 raise error.Abort(_(b"cannot commit merge with missing files"))
3170 raise error.Abort(_(b"cannot commit merge with missing files"))
3170
3171
3171 if editor:
3172 if editor:
3172 cctx._text = editor(self, cctx, subs)
3173 cctx._text = editor(self, cctx, subs)
3173 edited = text != cctx._text
3174 edited = text != cctx._text
3174
3175
3175 # Save commit message in case this transaction gets rolled back
3176 # Save commit message in case this transaction gets rolled back
3176 # (e.g. by a pretxncommit hook). Leave the content alone on
3177 # (e.g. by a pretxncommit hook). Leave the content alone on
3177 # the assumption that the user will use the same editor again.
3178 # the assumption that the user will use the same editor again.
3178 msg_path = self.savecommitmessage(cctx._text)
3179 msg_path = self.savecommitmessage(cctx._text)
3179
3180
3180 # commit subs and write new state
3181 # commit subs and write new state
3181 if subs:
3182 if subs:
3182 uipathfn = scmutil.getuipathfn(self)
3183 uipathfn = scmutil.getuipathfn(self)
3183 for s in sorted(commitsubs):
3184 for s in sorted(commitsubs):
3184 sub = wctx.sub(s)
3185 sub = wctx.sub(s)
3185 self.ui.status(
3186 self.ui.status(
3186 _(b'committing subrepository %s\n')
3187 _(b'committing subrepository %s\n')
3187 % uipathfn(subrepoutil.subrelpath(sub))
3188 % uipathfn(subrepoutil.subrelpath(sub))
3188 )
3189 )
3189 sr = sub.commit(cctx._text, user, date)
3190 sr = sub.commit(cctx._text, user, date)
3190 newstate[s] = (newstate[s][0], sr)
3191 newstate[s] = (newstate[s][0], sr)
3191 subrepoutil.writestate(self, newstate)
3192 subrepoutil.writestate(self, newstate)
3192
3193
3193 p1, p2 = self.dirstate.parents()
3194 p1, p2 = self.dirstate.parents()
3194 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3195 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3195 try:
3196 try:
3196 self.hook(
3197 self.hook(
3197 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3198 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3198 )
3199 )
3199 with self.transaction(b'commit'):
3200 with self.transaction(b'commit'):
3200 ret = self.commitctx(cctx, True)
3201 ret = self.commitctx(cctx, True)
3201 # update bookmarks, dirstate and mergestate
3202 # update bookmarks, dirstate and mergestate
3202 bookmarks.update(self, [p1, p2], ret)
3203 bookmarks.update(self, [p1, p2], ret)
3203 cctx.markcommitted(ret)
3204 cctx.markcommitted(ret)
3204 ms.reset()
3205 ms.reset()
3205 except: # re-raises
3206 except: # re-raises
3206 if edited:
3207 if edited:
3207 self.ui.write(
3208 self.ui.write(
3208 _(b'note: commit message saved in %s\n') % msg_path
3209 _(b'note: commit message saved in %s\n') % msg_path
3209 )
3210 )
3210 self.ui.write(
3211 self.ui.write(
3211 _(
3212 _(
3212 b"note: use 'hg commit --logfile "
3213 b"note: use 'hg commit --logfile "
3213 b"%s --edit' to reuse it\n"
3214 b"%s --edit' to reuse it\n"
3214 )
3215 )
3215 % msg_path
3216 % msg_path
3216 )
3217 )
3217 raise
3218 raise
3218
3219
3219 def commithook(unused_success):
3220 def commithook(unused_success):
3220 # hack for command that use a temporary commit (eg: histedit)
3221 # hack for command that use a temporary commit (eg: histedit)
3221 # temporary commit got stripped before hook release
3222 # temporary commit got stripped before hook release
3222 if self.changelog.hasnode(ret):
3223 if self.changelog.hasnode(ret):
3223 self.hook(
3224 self.hook(
3224 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3225 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3225 )
3226 )
3226
3227
3227 self._afterlock(commithook)
3228 self._afterlock(commithook)
3228 return ret
3229 return ret
3229
3230
3230 @unfilteredmethod
3231 @unfilteredmethod
3231 def commitctx(self, ctx, error=False, origctx=None):
3232 def commitctx(self, ctx, error=False, origctx=None):
3232 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3233 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3233
3234
3234 @unfilteredmethod
3235 @unfilteredmethod
3235 def destroying(self):
3236 def destroying(self):
3236 """Inform the repository that nodes are about to be destroyed.
3237 """Inform the repository that nodes are about to be destroyed.
3237 Intended for use by strip and rollback, so there's a common
3238 Intended for use by strip and rollback, so there's a common
3238 place for anything that has to be done before destroying history.
3239 place for anything that has to be done before destroying history.
3239
3240
3240 This is mostly useful for saving state that is in memory and waiting
3241 This is mostly useful for saving state that is in memory and waiting
3241 to be flushed when the current lock is released. Because a call to
3242 to be flushed when the current lock is released. Because a call to
3242 destroyed is imminent, the repo will be invalidated causing those
3243 destroyed is imminent, the repo will be invalidated causing those
3243 changes to stay in memory (waiting for the next unlock), or vanish
3244 changes to stay in memory (waiting for the next unlock), or vanish
3244 completely.
3245 completely.
3245 """
3246 """
3246 # When using the same lock to commit and strip, the phasecache is left
3247 # When using the same lock to commit and strip, the phasecache is left
3247 # dirty after committing. Then when we strip, the repo is invalidated,
3248 # dirty after committing. Then when we strip, the repo is invalidated,
3248 # causing those changes to disappear.
3249 # causing those changes to disappear.
3249 if '_phasecache' in vars(self):
3250 if '_phasecache' in vars(self):
3250 self._phasecache.write()
3251 self._phasecache.write()
3251
3252
3252 @unfilteredmethod
3253 @unfilteredmethod
3253 def destroyed(self):
3254 def destroyed(self):
3254 """Inform the repository that nodes have been destroyed.
3255 """Inform the repository that nodes have been destroyed.
3255 Intended for use by strip and rollback, so there's a common
3256 Intended for use by strip and rollback, so there's a common
3256 place for anything that has to be done after destroying history.
3257 place for anything that has to be done after destroying history.
3257 """
3258 """
3258 # When one tries to:
3259 # When one tries to:
3259 # 1) destroy nodes thus calling this method (e.g. strip)
3260 # 1) destroy nodes thus calling this method (e.g. strip)
3260 # 2) use phasecache somewhere (e.g. commit)
3261 # 2) use phasecache somewhere (e.g. commit)
3261 #
3262 #
3262 # then 2) will fail because the phasecache contains nodes that were
3263 # then 2) will fail because the phasecache contains nodes that were
3263 # removed. We can either remove phasecache from the filecache,
3264 # removed. We can either remove phasecache from the filecache,
3264 # causing it to reload next time it is accessed, or simply filter
3265 # causing it to reload next time it is accessed, or simply filter
3265 # the removed nodes now and write the updated cache.
3266 # the removed nodes now and write the updated cache.
3266 self._phasecache.filterunknown(self)
3267 self._phasecache.filterunknown(self)
3267 self._phasecache.write()
3268 self._phasecache.write()
3268
3269
3269 # refresh all repository caches
3270 # refresh all repository caches
3270 self.updatecaches()
3271 self.updatecaches()
3271
3272
3272 # Ensure the persistent tag cache is updated. Doing it now
3273 # Ensure the persistent tag cache is updated. Doing it now
3273 # means that the tag cache only has to worry about destroyed
3274 # means that the tag cache only has to worry about destroyed
3274 # heads immediately after a strip/rollback. That in turn
3275 # heads immediately after a strip/rollback. That in turn
3275 # guarantees that "cachetip == currenttip" (comparing both rev
3276 # guarantees that "cachetip == currenttip" (comparing both rev
3276 # and node) always means no nodes have been added or destroyed.
3277 # and node) always means no nodes have been added or destroyed.
3277
3278
3278 # XXX this is suboptimal when qrefresh'ing: we strip the current
3279 # XXX this is suboptimal when qrefresh'ing: we strip the current
3279 # head, refresh the tag cache, then immediately add a new head.
3280 # head, refresh the tag cache, then immediately add a new head.
3280 # But I think doing it this way is necessary for the "instant
3281 # But I think doing it this way is necessary for the "instant
3281 # tag cache retrieval" case to work.
3282 # tag cache retrieval" case to work.
3282 self.invalidate()
3283 self.invalidate()
3283
3284
3284 def status(
3285 def status(
3285 self,
3286 self,
3286 node1=b'.',
3287 node1=b'.',
3287 node2=None,
3288 node2=None,
3288 match=None,
3289 match=None,
3289 ignored=False,
3290 ignored=False,
3290 clean=False,
3291 clean=False,
3291 unknown=False,
3292 unknown=False,
3292 listsubrepos=False,
3293 listsubrepos=False,
3293 ):
3294 ):
3294 '''a convenience method that calls node1.status(node2)'''
3295 '''a convenience method that calls node1.status(node2)'''
3295 return self[node1].status(
3296 return self[node1].status(
3296 node2, match, ignored, clean, unknown, listsubrepos
3297 node2, match, ignored, clean, unknown, listsubrepos
3297 )
3298 )
3298
3299
3299 def addpostdsstatus(self, ps):
3300 def addpostdsstatus(self, ps):
3300 """Add a callback to run within the wlock, at the point at which status
3301 """Add a callback to run within the wlock, at the point at which status
3301 fixups happen.
3302 fixups happen.
3302
3303
3303 On status completion, callback(wctx, status) will be called with the
3304 On status completion, callback(wctx, status) will be called with the
3304 wlock held, unless the dirstate has changed from underneath or the wlock
3305 wlock held, unless the dirstate has changed from underneath or the wlock
3305 couldn't be grabbed.
3306 couldn't be grabbed.
3306
3307
3307 Callbacks should not capture and use a cached copy of the dirstate --
3308 Callbacks should not capture and use a cached copy of the dirstate --
3308 it might change in the meanwhile. Instead, they should access the
3309 it might change in the meanwhile. Instead, they should access the
3309 dirstate via wctx.repo().dirstate.
3310 dirstate via wctx.repo().dirstate.
3310
3311
3311 This list is emptied out after each status run -- extensions should
3312 This list is emptied out after each status run -- extensions should
3312 make sure it adds to this list each time dirstate.status is called.
3313 make sure it adds to this list each time dirstate.status is called.
3313 Extensions should also make sure they don't call this for statuses
3314 Extensions should also make sure they don't call this for statuses
3314 that don't involve the dirstate.
3315 that don't involve the dirstate.
3315 """
3316 """
3316
3317
3317 # The list is located here for uniqueness reasons -- it is actually
3318 # The list is located here for uniqueness reasons -- it is actually
3318 # managed by the workingctx, but that isn't unique per-repo.
3319 # managed by the workingctx, but that isn't unique per-repo.
3319 self._postdsstatus.append(ps)
3320 self._postdsstatus.append(ps)
3320
3321
3321 def postdsstatus(self):
3322 def postdsstatus(self):
3322 """Used by workingctx to get the list of post-dirstate-status hooks."""
3323 """Used by workingctx to get the list of post-dirstate-status hooks."""
3323 return self._postdsstatus
3324 return self._postdsstatus
3324
3325
3325 def clearpostdsstatus(self):
3326 def clearpostdsstatus(self):
3326 """Used by workingctx to clear post-dirstate-status hooks."""
3327 """Used by workingctx to clear post-dirstate-status hooks."""
3327 del self._postdsstatus[:]
3328 del self._postdsstatus[:]
3328
3329
3329 def heads(self, start=None):
3330 def heads(self, start=None):
3330 if start is None:
3331 if start is None:
3331 cl = self.changelog
3332 cl = self.changelog
3332 headrevs = reversed(cl.headrevs())
3333 headrevs = reversed(cl.headrevs())
3333 return [cl.node(rev) for rev in headrevs]
3334 return [cl.node(rev) for rev in headrevs]
3334
3335
3335 heads = self.changelog.heads(start)
3336 heads = self.changelog.heads(start)
3336 # sort the output in rev descending order
3337 # sort the output in rev descending order
3337 return sorted(heads, key=self.changelog.rev, reverse=True)
3338 return sorted(heads, key=self.changelog.rev, reverse=True)
3338
3339
3339 def branchheads(self, branch=None, start=None, closed=False):
3340 def branchheads(self, branch=None, start=None, closed=False):
3340 """return a (possibly filtered) list of heads for the given branch
3341 """return a (possibly filtered) list of heads for the given branch
3341
3342
3342 Heads are returned in topological order, from newest to oldest.
3343 Heads are returned in topological order, from newest to oldest.
3343 If branch is None, use the dirstate branch.
3344 If branch is None, use the dirstate branch.
3344 If start is not None, return only heads reachable from start.
3345 If start is not None, return only heads reachable from start.
3345 If closed is True, return heads that are marked as closed as well.
3346 If closed is True, return heads that are marked as closed as well.
3346 """
3347 """
3347 if branch is None:
3348 if branch is None:
3348 branch = self[None].branch()
3349 branch = self[None].branch()
3349 branches = self.branchmap()
3350 branches = self.branchmap()
3350 if not branches.hasbranch(branch):
3351 if not branches.hasbranch(branch):
3351 return []
3352 return []
3352 # the cache returns heads ordered lowest to highest
3353 # the cache returns heads ordered lowest to highest
3353 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3354 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3354 if start is not None:
3355 if start is not None:
3355 # filter out the heads that cannot be reached from startrev
3356 # filter out the heads that cannot be reached from startrev
3356 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3357 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3357 bheads = [h for h in bheads if h in fbheads]
3358 bheads = [h for h in bheads if h in fbheads]
3358 return bheads
3359 return bheads
3359
3360
3360 def branches(self, nodes):
3361 def branches(self, nodes):
3361 if not nodes:
3362 if not nodes:
3362 nodes = [self.changelog.tip()]
3363 nodes = [self.changelog.tip()]
3363 b = []
3364 b = []
3364 for n in nodes:
3365 for n in nodes:
3365 t = n
3366 t = n
3366 while True:
3367 while True:
3367 p = self.changelog.parents(n)
3368 p = self.changelog.parents(n)
3368 if p[1] != self.nullid or p[0] == self.nullid:
3369 if p[1] != self.nullid or p[0] == self.nullid:
3369 b.append((t, n, p[0], p[1]))
3370 b.append((t, n, p[0], p[1]))
3370 break
3371 break
3371 n = p[0]
3372 n = p[0]
3372 return b
3373 return b
3373
3374
3374 def between(self, pairs):
3375 def between(self, pairs):
3375 r = []
3376 r = []
3376
3377
3377 for top, bottom in pairs:
3378 for top, bottom in pairs:
3378 n, l, i = top, [], 0
3379 n, l, i = top, [], 0
3379 f = 1
3380 f = 1
3380
3381
3381 while n != bottom and n != self.nullid:
3382 while n != bottom and n != self.nullid:
3382 p = self.changelog.parents(n)[0]
3383 p = self.changelog.parents(n)[0]
3383 if i == f:
3384 if i == f:
3384 l.append(n)
3385 l.append(n)
3385 f = f * 2
3386 f = f * 2
3386 n = p
3387 n = p
3387 i += 1
3388 i += 1
3388
3389
3389 r.append(l)
3390 r.append(l)
3390
3391
3391 return r
3392 return r
3392
3393
3393 def checkpush(self, pushop):
3394 def checkpush(self, pushop):
3394 """Extensions can override this function if additional checks have
3395 """Extensions can override this function if additional checks have
3395 to be performed before pushing, or call it if they override push
3396 to be performed before pushing, or call it if they override push
3396 command.
3397 command.
3397 """
3398 """
3398
3399
3399 @unfilteredpropertycache
3400 @unfilteredpropertycache
3400 def prepushoutgoinghooks(self):
3401 def prepushoutgoinghooks(self):
3401 """Return util.hooks consists of a pushop with repo, remote, outgoing
3402 """Return util.hooks consists of a pushop with repo, remote, outgoing
3402 methods, which are called before pushing changesets.
3403 methods, which are called before pushing changesets.
3403 """
3404 """
3404 return util.hooks()
3405 return util.hooks()
3405
3406
3406 def pushkey(self, namespace, key, old, new):
3407 def pushkey(self, namespace, key, old, new):
3407 try:
3408 try:
3408 tr = self.currenttransaction()
3409 tr = self.currenttransaction()
3409 hookargs = {}
3410 hookargs = {}
3410 if tr is not None:
3411 if tr is not None:
3411 hookargs.update(tr.hookargs)
3412 hookargs.update(tr.hookargs)
3412 hookargs = pycompat.strkwargs(hookargs)
3413 hookargs = pycompat.strkwargs(hookargs)
3413 hookargs['namespace'] = namespace
3414 hookargs['namespace'] = namespace
3414 hookargs['key'] = key
3415 hookargs['key'] = key
3415 hookargs['old'] = old
3416 hookargs['old'] = old
3416 hookargs['new'] = new
3417 hookargs['new'] = new
3417 self.hook(b'prepushkey', throw=True, **hookargs)
3418 self.hook(b'prepushkey', throw=True, **hookargs)
3418 except error.HookAbort as exc:
3419 except error.HookAbort as exc:
3419 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3420 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3420 if exc.hint:
3421 if exc.hint:
3421 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3422 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3422 return False
3423 return False
3423 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3424 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3424 ret = pushkey.push(self, namespace, key, old, new)
3425 ret = pushkey.push(self, namespace, key, old, new)
3425
3426
3426 def runhook(unused_success):
3427 def runhook(unused_success):
3427 self.hook(
3428 self.hook(
3428 b'pushkey',
3429 b'pushkey',
3429 namespace=namespace,
3430 namespace=namespace,
3430 key=key,
3431 key=key,
3431 old=old,
3432 old=old,
3432 new=new,
3433 new=new,
3433 ret=ret,
3434 ret=ret,
3434 )
3435 )
3435
3436
3436 self._afterlock(runhook)
3437 self._afterlock(runhook)
3437 return ret
3438 return ret
3438
3439
3439 def listkeys(self, namespace):
3440 def listkeys(self, namespace):
3440 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3441 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3441 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3442 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3442 values = pushkey.list(self, namespace)
3443 values = pushkey.list(self, namespace)
3443 self.hook(b'listkeys', namespace=namespace, values=values)
3444 self.hook(b'listkeys', namespace=namespace, values=values)
3444 return values
3445 return values
3445
3446
3446 def debugwireargs(self, one, two, three=None, four=None, five=None):
3447 def debugwireargs(self, one, two, three=None, four=None, five=None):
3447 '''used to test argument passing over the wire'''
3448 '''used to test argument passing over the wire'''
3448 return b"%s %s %s %s %s" % (
3449 return b"%s %s %s %s %s" % (
3449 one,
3450 one,
3450 two,
3451 two,
3451 pycompat.bytestr(three),
3452 pycompat.bytestr(three),
3452 pycompat.bytestr(four),
3453 pycompat.bytestr(four),
3453 pycompat.bytestr(five),
3454 pycompat.bytestr(five),
3454 )
3455 )
3455
3456
3456 def savecommitmessage(self, text):
3457 def savecommitmessage(self, text):
3457 fp = self.vfs(b'last-message.txt', b'wb')
3458 fp = self.vfs(b'last-message.txt', b'wb')
3458 try:
3459 try:
3459 fp.write(text)
3460 fp.write(text)
3460 finally:
3461 finally:
3461 fp.close()
3462 fp.close()
3462 return self.pathto(fp.name[len(self.root) + 1 :])
3463 return self.pathto(fp.name[len(self.root) + 1 :])
3463
3464
3464 def register_wanted_sidedata(self, category):
3465 def register_wanted_sidedata(self, category):
3465 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3466 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3466 # Only revlogv2 repos can want sidedata.
3467 # Only revlogv2 repos can want sidedata.
3467 return
3468 return
3468 self._wanted_sidedata.add(pycompat.bytestr(category))
3469 self._wanted_sidedata.add(pycompat.bytestr(category))
3469
3470
3470 def register_sidedata_computer(
3471 def register_sidedata_computer(
3471 self, kind, category, keys, computer, flags, replace=False
3472 self, kind, category, keys, computer, flags, replace=False
3472 ):
3473 ):
3473 if kind not in revlogconst.ALL_KINDS:
3474 if kind not in revlogconst.ALL_KINDS:
3474 msg = _(b"unexpected revlog kind '%s'.")
3475 msg = _(b"unexpected revlog kind '%s'.")
3475 raise error.ProgrammingError(msg % kind)
3476 raise error.ProgrammingError(msg % kind)
3476 category = pycompat.bytestr(category)
3477 category = pycompat.bytestr(category)
3477 already_registered = category in self._sidedata_computers.get(kind, [])
3478 already_registered = category in self._sidedata_computers.get(kind, [])
3478 if already_registered and not replace:
3479 if already_registered and not replace:
3479 msg = _(
3480 msg = _(
3480 b"cannot register a sidedata computer twice for category '%s'."
3481 b"cannot register a sidedata computer twice for category '%s'."
3481 )
3482 )
3482 raise error.ProgrammingError(msg % category)
3483 raise error.ProgrammingError(msg % category)
3483 if replace and not already_registered:
3484 if replace and not already_registered:
3484 msg = _(
3485 msg = _(
3485 b"cannot replace a sidedata computer that isn't registered "
3486 b"cannot replace a sidedata computer that isn't registered "
3486 b"for category '%s'."
3487 b"for category '%s'."
3487 )
3488 )
3488 raise error.ProgrammingError(msg % category)
3489 raise error.ProgrammingError(msg % category)
3489 self._sidedata_computers.setdefault(kind, {})
3490 self._sidedata_computers.setdefault(kind, {})
3490 self._sidedata_computers[kind][category] = (keys, computer, flags)
3491 self._sidedata_computers[kind][category] = (keys, computer, flags)
3491
3492
3492
3493
3493 # used to avoid circular references so destructors work
3494 # used to avoid circular references so destructors work
3494 def aftertrans(files):
3495 def aftertrans(files):
3495 renamefiles = [tuple(t) for t in files]
3496 renamefiles = [tuple(t) for t in files]
3496
3497
3497 def a():
3498 def a():
3498 for vfs, src, dest in renamefiles:
3499 for vfs, src, dest in renamefiles:
3499 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 # leaving both src and dest on disk. delete dest to make sure
3501 # leaving both src and dest on disk. delete dest to make sure
3501 # the rename couldn't be such a no-op.
3502 # the rename couldn't be such a no-op.
3502 vfs.tryunlink(dest)
3503 vfs.tryunlink(dest)
3503 try:
3504 try:
3504 vfs.rename(src, dest)
3505 vfs.rename(src, dest)
3505 except OSError as exc: # journal file does not yet exist
3506 except OSError as exc: # journal file does not yet exist
3506 if exc.errno != errno.ENOENT:
3507 if exc.errno != errno.ENOENT:
3507 raise
3508 raise
3508
3509
3509 return a
3510 return a
3510
3511
3511
3512
3512 def undoname(fn):
3513 def undoname(fn):
3513 base, name = os.path.split(fn)
3514 base, name = os.path.split(fn)
3514 assert name.startswith(b'journal')
3515 assert name.startswith(b'journal')
3515 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516
3517
3517
3518
3518 def instance(ui, path, create, intents=None, createopts=None):
3519 def instance(ui, path, create, intents=None, createopts=None):
3519
3520
3520 # prevent cyclic import localrepo -> upgrade -> localrepo
3521 # prevent cyclic import localrepo -> upgrade -> localrepo
3521 from . import upgrade
3522 from . import upgrade
3522
3523
3523 localpath = urlutil.urllocalpath(path)
3524 localpath = urlutil.urllocalpath(path)
3524 if create:
3525 if create:
3525 createrepository(ui, localpath, createopts=createopts)
3526 createrepository(ui, localpath, createopts=createopts)
3526
3527
3527 def repo_maker():
3528 def repo_maker():
3528 return makelocalrepository(ui, localpath, intents=intents)
3529 return makelocalrepository(ui, localpath, intents=intents)
3529
3530
3530 repo = repo_maker()
3531 repo = repo_maker()
3531 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3532 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3532 return repo
3533 return repo
3533
3534
3534
3535
3535 def islocal(path):
3536 def islocal(path):
3536 return True
3537 return True
3537
3538
3538
3539
3539 def defaultcreateopts(ui, createopts=None):
3540 def defaultcreateopts(ui, createopts=None):
3540 """Populate the default creation options for a repository.
3541 """Populate the default creation options for a repository.
3541
3542
3542 A dictionary of explicitly requested creation options can be passed
3543 A dictionary of explicitly requested creation options can be passed
3543 in. Missing keys will be populated.
3544 in. Missing keys will be populated.
3544 """
3545 """
3545 createopts = dict(createopts or {})
3546 createopts = dict(createopts or {})
3546
3547
3547 if b'backend' not in createopts:
3548 if b'backend' not in createopts:
3548 # experimental config: storage.new-repo-backend
3549 # experimental config: storage.new-repo-backend
3549 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3550 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3550
3551
3551 return createopts
3552 return createopts
3552
3553
3553
3554
3554 def clone_requirements(ui, createopts, srcrepo):
3555 def clone_requirements(ui, createopts, srcrepo):
3555 """clone the requirements of a local repo for a local clone
3556 """clone the requirements of a local repo for a local clone
3556
3557
3557 The store requirements are unchanged while the working copy requirements
3558 The store requirements are unchanged while the working copy requirements
3558 depends on the configuration
3559 depends on the configuration
3559 """
3560 """
3560 target_requirements = set()
3561 target_requirements = set()
3561 if not srcrepo.requirements:
3562 if not srcrepo.requirements:
3562 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3563 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3563 # with it.
3564 # with it.
3564 return target_requirements
3565 return target_requirements
3565 createopts = defaultcreateopts(ui, createopts=createopts)
3566 createopts = defaultcreateopts(ui, createopts=createopts)
3566 for r in newreporequirements(ui, createopts):
3567 for r in newreporequirements(ui, createopts):
3567 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3568 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3568 target_requirements.add(r)
3569 target_requirements.add(r)
3569
3570
3570 for r in srcrepo.requirements:
3571 for r in srcrepo.requirements:
3571 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3572 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3572 target_requirements.add(r)
3573 target_requirements.add(r)
3573 return target_requirements
3574 return target_requirements
3574
3575
3575
3576
3576 def newreporequirements(ui, createopts):
3577 def newreporequirements(ui, createopts):
3577 """Determine the set of requirements for a new local repository.
3578 """Determine the set of requirements for a new local repository.
3578
3579
3579 Extensions can wrap this function to specify custom requirements for
3580 Extensions can wrap this function to specify custom requirements for
3580 new repositories.
3581 new repositories.
3581 """
3582 """
3582
3583
3583 if b'backend' not in createopts:
3584 if b'backend' not in createopts:
3584 raise error.ProgrammingError(
3585 raise error.ProgrammingError(
3585 b'backend key not present in createopts; '
3586 b'backend key not present in createopts; '
3586 b'was defaultcreateopts() called?'
3587 b'was defaultcreateopts() called?'
3587 )
3588 )
3588
3589
3589 if createopts[b'backend'] != b'revlogv1':
3590 if createopts[b'backend'] != b'revlogv1':
3590 raise error.Abort(
3591 raise error.Abort(
3591 _(
3592 _(
3592 b'unable to determine repository requirements for '
3593 b'unable to determine repository requirements for '
3593 b'storage backend: %s'
3594 b'storage backend: %s'
3594 )
3595 )
3595 % createopts[b'backend']
3596 % createopts[b'backend']
3596 )
3597 )
3597
3598
3598 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3599 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3599 if ui.configbool(b'format', b'usestore'):
3600 if ui.configbool(b'format', b'usestore'):
3600 requirements.add(requirementsmod.STORE_REQUIREMENT)
3601 requirements.add(requirementsmod.STORE_REQUIREMENT)
3601 if ui.configbool(b'format', b'usefncache'):
3602 if ui.configbool(b'format', b'usefncache'):
3602 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3603 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3603 if ui.configbool(b'format', b'dotencode'):
3604 if ui.configbool(b'format', b'dotencode'):
3604 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3605 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3605
3606
3606 compengines = ui.configlist(b'format', b'revlog-compression')
3607 compengines = ui.configlist(b'format', b'revlog-compression')
3607 for compengine in compengines:
3608 for compengine in compengines:
3608 if compengine in util.compengines:
3609 if compengine in util.compengines:
3609 engine = util.compengines[compengine]
3610 engine = util.compengines[compengine]
3610 if engine.available() and engine.revlogheader():
3611 if engine.available() and engine.revlogheader():
3611 break
3612 break
3612 else:
3613 else:
3613 raise error.Abort(
3614 raise error.Abort(
3614 _(
3615 _(
3615 b'compression engines %s defined by '
3616 b'compression engines %s defined by '
3616 b'format.revlog-compression not available'
3617 b'format.revlog-compression not available'
3617 )
3618 )
3618 % b', '.join(b'"%s"' % e for e in compengines),
3619 % b', '.join(b'"%s"' % e for e in compengines),
3619 hint=_(
3620 hint=_(
3620 b'run "hg debuginstall" to list available '
3621 b'run "hg debuginstall" to list available '
3621 b'compression engines'
3622 b'compression engines'
3622 ),
3623 ),
3623 )
3624 )
3624
3625
3625 # zlib is the historical default and doesn't need an explicit requirement.
3626 # zlib is the historical default and doesn't need an explicit requirement.
3626 if compengine == b'zstd':
3627 if compengine == b'zstd':
3627 requirements.add(b'revlog-compression-zstd')
3628 requirements.add(b'revlog-compression-zstd')
3628 elif compengine != b'zlib':
3629 elif compengine != b'zlib':
3629 requirements.add(b'exp-compression-%s' % compengine)
3630 requirements.add(b'exp-compression-%s' % compengine)
3630
3631
3631 if scmutil.gdinitconfig(ui):
3632 if scmutil.gdinitconfig(ui):
3632 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3633 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3633 if ui.configbool(b'format', b'sparse-revlog'):
3634 if ui.configbool(b'format', b'sparse-revlog'):
3634 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3635 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3635
3636
3636 # experimental config: format.use-dirstate-v2
3637 # experimental config: format.use-dirstate-v2
3637 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3638 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3638 if ui.configbool(b'format', b'use-dirstate-v2'):
3639 if ui.configbool(b'format', b'use-dirstate-v2'):
3639 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3640 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3640
3641
3641 # experimental config: format.exp-use-copies-side-data-changeset
3642 # experimental config: format.exp-use-copies-side-data-changeset
3642 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3643 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3643 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3644 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3644 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3645 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3645 if ui.configbool(b'experimental', b'treemanifest'):
3646 if ui.configbool(b'experimental', b'treemanifest'):
3646 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3647 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3647
3648
3648 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3649 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3649 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3651 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3651
3652
3652 revlogv2 = ui.config(b'experimental', b'revlogv2')
3653 revlogv2 = ui.config(b'experimental', b'revlogv2')
3653 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3654 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3654 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3655 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3655 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3656 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3656 # experimental config: format.internal-phase
3657 # experimental config: format.internal-phase
3657 if ui.configbool(b'format', b'internal-phase'):
3658 if ui.configbool(b'format', b'internal-phase'):
3658 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3659 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3659
3660
3660 if createopts.get(b'narrowfiles'):
3661 if createopts.get(b'narrowfiles'):
3661 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3662 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3662
3663
3663 if createopts.get(b'lfs'):
3664 if createopts.get(b'lfs'):
3664 requirements.add(b'lfs')
3665 requirements.add(b'lfs')
3665
3666
3666 if ui.configbool(b'format', b'bookmarks-in-store'):
3667 if ui.configbool(b'format', b'bookmarks-in-store'):
3667 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3668 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3668
3669
3669 if ui.configbool(b'format', b'use-persistent-nodemap'):
3670 if ui.configbool(b'format', b'use-persistent-nodemap'):
3670 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3671 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3671
3672
3672 # if share-safe is enabled, let's create the new repository with the new
3673 # if share-safe is enabled, let's create the new repository with the new
3673 # requirement
3674 # requirement
3674 if ui.configbool(b'format', b'use-share-safe'):
3675 if ui.configbool(b'format', b'use-share-safe'):
3675 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3676 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3676
3677
3677 # if we are creating a share-repoΒΉ we have to handle requirement
3678 # if we are creating a share-repoΒΉ we have to handle requirement
3678 # differently.
3679 # differently.
3679 #
3680 #
3680 # [1] (i.e. reusing the store from another repository, just having a
3681 # [1] (i.e. reusing the store from another repository, just having a
3681 # working copy)
3682 # working copy)
3682 if b'sharedrepo' in createopts:
3683 if b'sharedrepo' in createopts:
3683 source_requirements = set(createopts[b'sharedrepo'].requirements)
3684 source_requirements = set(createopts[b'sharedrepo'].requirements)
3684
3685
3685 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3686 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3686 # share to an old school repository, we have to copy the
3687 # share to an old school repository, we have to copy the
3687 # requirements and hope for the best.
3688 # requirements and hope for the best.
3688 requirements = source_requirements
3689 requirements = source_requirements
3689 else:
3690 else:
3690 # We have control on the working copy only, so "copy" the non
3691 # We have control on the working copy only, so "copy" the non
3691 # working copy part over, ignoring previous logic.
3692 # working copy part over, ignoring previous logic.
3692 to_drop = set()
3693 to_drop = set()
3693 for req in requirements:
3694 for req in requirements:
3694 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3695 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3695 continue
3696 continue
3696 if req in source_requirements:
3697 if req in source_requirements:
3697 continue
3698 continue
3698 to_drop.add(req)
3699 to_drop.add(req)
3699 requirements -= to_drop
3700 requirements -= to_drop
3700 requirements |= source_requirements
3701 requirements |= source_requirements
3701
3702
3702 if createopts.get(b'sharedrelative'):
3703 if createopts.get(b'sharedrelative'):
3703 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3704 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3704 else:
3705 else:
3705 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3706 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3706
3707
3707 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3708 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3708 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3709 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3709 msg = _("ignoring unknown tracked key version: %d\n")
3710 msg = _("ignoring unknown tracked key version: %d\n")
3710 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3711 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3711 if version != 1:
3712 if version != 1:
3712 ui.warn(msg % version, hint=hint)
3713 ui.warn(msg % version, hint=hint)
3713 else:
3714 else:
3714 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3715 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3715
3716
3716 return requirements
3717 return requirements
3717
3718
3718
3719
3719 def checkrequirementscompat(ui, requirements):
3720 def checkrequirementscompat(ui, requirements):
3720 """Checks compatibility of repository requirements enabled and disabled.
3721 """Checks compatibility of repository requirements enabled and disabled.
3721
3722
3722 Returns a set of requirements which needs to be dropped because dependend
3723 Returns a set of requirements which needs to be dropped because dependend
3723 requirements are not enabled. Also warns users about it"""
3724 requirements are not enabled. Also warns users about it"""
3724
3725
3725 dropped = set()
3726 dropped = set()
3726
3727
3727 if requirementsmod.STORE_REQUIREMENT not in requirements:
3728 if requirementsmod.STORE_REQUIREMENT not in requirements:
3728 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3729 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3729 ui.warn(
3730 ui.warn(
3730 _(
3731 _(
3731 b'ignoring enabled \'format.bookmarks-in-store\' config '
3732 b'ignoring enabled \'format.bookmarks-in-store\' config '
3732 b'beacuse it is incompatible with disabled '
3733 b'beacuse it is incompatible with disabled '
3733 b'\'format.usestore\' config\n'
3734 b'\'format.usestore\' config\n'
3734 )
3735 )
3735 )
3736 )
3736 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3737 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3737
3738
3738 if (
3739 if (
3739 requirementsmod.SHARED_REQUIREMENT in requirements
3740 requirementsmod.SHARED_REQUIREMENT in requirements
3740 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3741 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3741 ):
3742 ):
3742 raise error.Abort(
3743 raise error.Abort(
3743 _(
3744 _(
3744 b"cannot create shared repository as source was created"
3745 b"cannot create shared repository as source was created"
3745 b" with 'format.usestore' config disabled"
3746 b" with 'format.usestore' config disabled"
3746 )
3747 )
3747 )
3748 )
3748
3749
3749 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3750 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3750 if ui.hasconfig(b'format', b'use-share-safe'):
3751 if ui.hasconfig(b'format', b'use-share-safe'):
3751 msg = _(
3752 msg = _(
3752 b"ignoring enabled 'format.use-share-safe' config because "
3753 b"ignoring enabled 'format.use-share-safe' config because "
3753 b"it is incompatible with disabled 'format.usestore'"
3754 b"it is incompatible with disabled 'format.usestore'"
3754 b" config\n"
3755 b" config\n"
3755 )
3756 )
3756 ui.warn(msg)
3757 ui.warn(msg)
3757 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3758 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3758
3759
3759 return dropped
3760 return dropped
3760
3761
3761
3762
3762 def filterknowncreateopts(ui, createopts):
3763 def filterknowncreateopts(ui, createopts):
3763 """Filters a dict of repo creation options against options that are known.
3764 """Filters a dict of repo creation options against options that are known.
3764
3765
3765 Receives a dict of repo creation options and returns a dict of those
3766 Receives a dict of repo creation options and returns a dict of those
3766 options that we don't know how to handle.
3767 options that we don't know how to handle.
3767
3768
3768 This function is called as part of repository creation. If the
3769 This function is called as part of repository creation. If the
3769 returned dict contains any items, repository creation will not
3770 returned dict contains any items, repository creation will not
3770 be allowed, as it means there was a request to create a repository
3771 be allowed, as it means there was a request to create a repository
3771 with options not recognized by loaded code.
3772 with options not recognized by loaded code.
3772
3773
3773 Extensions can wrap this function to filter out creation options
3774 Extensions can wrap this function to filter out creation options
3774 they know how to handle.
3775 they know how to handle.
3775 """
3776 """
3776 known = {
3777 known = {
3777 b'backend',
3778 b'backend',
3778 b'lfs',
3779 b'lfs',
3779 b'narrowfiles',
3780 b'narrowfiles',
3780 b'sharedrepo',
3781 b'sharedrepo',
3781 b'sharedrelative',
3782 b'sharedrelative',
3782 b'shareditems',
3783 b'shareditems',
3783 b'shallowfilestore',
3784 b'shallowfilestore',
3784 }
3785 }
3785
3786
3786 return {k: v for k, v in createopts.items() if k not in known}
3787 return {k: v for k, v in createopts.items() if k not in known}
3787
3788
3788
3789
3789 def createrepository(ui, path, createopts=None, requirements=None):
3790 def createrepository(ui, path, createopts=None, requirements=None):
3790 """Create a new repository in a vfs.
3791 """Create a new repository in a vfs.
3791
3792
3792 ``path`` path to the new repo's working directory.
3793 ``path`` path to the new repo's working directory.
3793 ``createopts`` options for the new repository.
3794 ``createopts`` options for the new repository.
3794 ``requirement`` predefined set of requirements.
3795 ``requirement`` predefined set of requirements.
3795 (incompatible with ``createopts``)
3796 (incompatible with ``createopts``)
3796
3797
3797 The following keys for ``createopts`` are recognized:
3798 The following keys for ``createopts`` are recognized:
3798
3799
3799 backend
3800 backend
3800 The storage backend to use.
3801 The storage backend to use.
3801 lfs
3802 lfs
3802 Repository will be created with ``lfs`` requirement. The lfs extension
3803 Repository will be created with ``lfs`` requirement. The lfs extension
3803 will automatically be loaded when the repository is accessed.
3804 will automatically be loaded when the repository is accessed.
3804 narrowfiles
3805 narrowfiles
3805 Set up repository to support narrow file storage.
3806 Set up repository to support narrow file storage.
3806 sharedrepo
3807 sharedrepo
3807 Repository object from which storage should be shared.
3808 Repository object from which storage should be shared.
3808 sharedrelative
3809 sharedrelative
3809 Boolean indicating if the path to the shared repo should be
3810 Boolean indicating if the path to the shared repo should be
3810 stored as relative. By default, the pointer to the "parent" repo
3811 stored as relative. By default, the pointer to the "parent" repo
3811 is stored as an absolute path.
3812 is stored as an absolute path.
3812 shareditems
3813 shareditems
3813 Set of items to share to the new repository (in addition to storage).
3814 Set of items to share to the new repository (in addition to storage).
3814 shallowfilestore
3815 shallowfilestore
3815 Indicates that storage for files should be shallow (not all ancestor
3816 Indicates that storage for files should be shallow (not all ancestor
3816 revisions are known).
3817 revisions are known).
3817 """
3818 """
3818
3819
3819 if requirements is not None:
3820 if requirements is not None:
3820 if createopts is not None:
3821 if createopts is not None:
3821 msg = b'cannot specify both createopts and requirements'
3822 msg = b'cannot specify both createopts and requirements'
3822 raise error.ProgrammingError(msg)
3823 raise error.ProgrammingError(msg)
3823 createopts = {}
3824 createopts = {}
3824 else:
3825 else:
3825 createopts = defaultcreateopts(ui, createopts=createopts)
3826 createopts = defaultcreateopts(ui, createopts=createopts)
3826
3827
3827 unknownopts = filterknowncreateopts(ui, createopts)
3828 unknownopts = filterknowncreateopts(ui, createopts)
3828
3829
3829 if not isinstance(unknownopts, dict):
3830 if not isinstance(unknownopts, dict):
3830 raise error.ProgrammingError(
3831 raise error.ProgrammingError(
3831 b'filterknowncreateopts() did not return a dict'
3832 b'filterknowncreateopts() did not return a dict'
3832 )
3833 )
3833
3834
3834 if unknownopts:
3835 if unknownopts:
3835 raise error.Abort(
3836 raise error.Abort(
3836 _(
3837 _(
3837 b'unable to create repository because of unknown '
3838 b'unable to create repository because of unknown '
3838 b'creation option: %s'
3839 b'creation option: %s'
3839 )
3840 )
3840 % b', '.join(sorted(unknownopts)),
3841 % b', '.join(sorted(unknownopts)),
3841 hint=_(b'is a required extension not loaded?'),
3842 hint=_(b'is a required extension not loaded?'),
3842 )
3843 )
3843
3844
3844 requirements = newreporequirements(ui, createopts=createopts)
3845 requirements = newreporequirements(ui, createopts=createopts)
3845 requirements -= checkrequirementscompat(ui, requirements)
3846 requirements -= checkrequirementscompat(ui, requirements)
3846
3847
3847 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3848 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3848
3849
3849 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3850 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3850 if hgvfs.exists():
3851 if hgvfs.exists():
3851 raise error.RepoError(_(b'repository %s already exists') % path)
3852 raise error.RepoError(_(b'repository %s already exists') % path)
3852
3853
3853 if b'sharedrepo' in createopts:
3854 if b'sharedrepo' in createopts:
3854 sharedpath = createopts[b'sharedrepo'].sharedpath
3855 sharedpath = createopts[b'sharedrepo'].sharedpath
3855
3856
3856 if createopts.get(b'sharedrelative'):
3857 if createopts.get(b'sharedrelative'):
3857 try:
3858 try:
3858 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3859 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3859 sharedpath = util.pconvert(sharedpath)
3860 sharedpath = util.pconvert(sharedpath)
3860 except (IOError, ValueError) as e:
3861 except (IOError, ValueError) as e:
3861 # ValueError is raised on Windows if the drive letters differ
3862 # ValueError is raised on Windows if the drive letters differ
3862 # on each path.
3863 # on each path.
3863 raise error.Abort(
3864 raise error.Abort(
3864 _(b'cannot calculate relative path'),
3865 _(b'cannot calculate relative path'),
3865 hint=stringutil.forcebytestr(e),
3866 hint=stringutil.forcebytestr(e),
3866 )
3867 )
3867
3868
3868 if not wdirvfs.exists():
3869 if not wdirvfs.exists():
3869 wdirvfs.makedirs()
3870 wdirvfs.makedirs()
3870
3871
3871 hgvfs.makedir(notindexed=True)
3872 hgvfs.makedir(notindexed=True)
3872 if b'sharedrepo' not in createopts:
3873 if b'sharedrepo' not in createopts:
3873 hgvfs.mkdir(b'cache')
3874 hgvfs.mkdir(b'cache')
3874 hgvfs.mkdir(b'wcache')
3875 hgvfs.mkdir(b'wcache')
3875
3876
3876 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3877 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3877 if has_store and b'sharedrepo' not in createopts:
3878 if has_store and b'sharedrepo' not in createopts:
3878 hgvfs.mkdir(b'store')
3879 hgvfs.mkdir(b'store')
3879
3880
3880 # We create an invalid changelog outside the store so very old
3881 # We create an invalid changelog outside the store so very old
3881 # Mercurial versions (which didn't know about the requirements
3882 # Mercurial versions (which didn't know about the requirements
3882 # file) encounter an error on reading the changelog. This
3883 # file) encounter an error on reading the changelog. This
3883 # effectively locks out old clients and prevents them from
3884 # effectively locks out old clients and prevents them from
3884 # mucking with a repo in an unknown format.
3885 # mucking with a repo in an unknown format.
3885 #
3886 #
3886 # The revlog header has version 65535, which won't be recognized by
3887 # The revlog header has version 65535, which won't be recognized by
3887 # such old clients.
3888 # such old clients.
3888 hgvfs.append(
3889 hgvfs.append(
3889 b'00changelog.i',
3890 b'00changelog.i',
3890 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3891 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3891 b'layout',
3892 b'layout',
3892 )
3893 )
3893
3894
3894 # Filter the requirements into working copy and store ones
3895 # Filter the requirements into working copy and store ones
3895 wcreq, storereq = scmutil.filterrequirements(requirements)
3896 wcreq, storereq = scmutil.filterrequirements(requirements)
3896 # write working copy ones
3897 # write working copy ones
3897 scmutil.writerequires(hgvfs, wcreq)
3898 scmutil.writerequires(hgvfs, wcreq)
3898 # If there are store requirements and the current repository
3899 # If there are store requirements and the current repository
3899 # is not a shared one, write stored requirements
3900 # is not a shared one, write stored requirements
3900 # For new shared repository, we don't need to write the store
3901 # For new shared repository, we don't need to write the store
3901 # requirements as they are already present in store requires
3902 # requirements as they are already present in store requires
3902 if storereq and b'sharedrepo' not in createopts:
3903 if storereq and b'sharedrepo' not in createopts:
3903 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3904 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3904 scmutil.writerequires(storevfs, storereq)
3905 scmutil.writerequires(storevfs, storereq)
3905
3906
3906 # Write out file telling readers where to find the shared store.
3907 # Write out file telling readers where to find the shared store.
3907 if b'sharedrepo' in createopts:
3908 if b'sharedrepo' in createopts:
3908 hgvfs.write(b'sharedpath', sharedpath)
3909 hgvfs.write(b'sharedpath', sharedpath)
3909
3910
3910 if createopts.get(b'shareditems'):
3911 if createopts.get(b'shareditems'):
3911 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3912 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3912 hgvfs.write(b'shared', shared)
3913 hgvfs.write(b'shared', shared)
3913
3914
3914
3915
3915 def poisonrepository(repo):
3916 def poisonrepository(repo):
3916 """Poison a repository instance so it can no longer be used."""
3917 """Poison a repository instance so it can no longer be used."""
3917 # Perform any cleanup on the instance.
3918 # Perform any cleanup on the instance.
3918 repo.close()
3919 repo.close()
3919
3920
3920 # Our strategy is to replace the type of the object with one that
3921 # Our strategy is to replace the type of the object with one that
3921 # has all attribute lookups result in error.
3922 # has all attribute lookups result in error.
3922 #
3923 #
3923 # But we have to allow the close() method because some constructors
3924 # But we have to allow the close() method because some constructors
3924 # of repos call close() on repo references.
3925 # of repos call close() on repo references.
3925 class poisonedrepository:
3926 class poisonedrepository:
3926 def __getattribute__(self, item):
3927 def __getattribute__(self, item):
3927 if item == 'close':
3928 if item == 'close':
3928 return object.__getattribute__(self, item)
3929 return object.__getattribute__(self, item)
3929
3930
3930 raise error.ProgrammingError(
3931 raise error.ProgrammingError(
3931 b'repo instances should not be used after unshare'
3932 b'repo instances should not be used after unshare'
3932 )
3933 )
3933
3934
3934 def close(self):
3935 def close(self):
3935 pass
3936 pass
3936
3937
3937 # We may have a repoview, which intercepts __setattr__. So be sure
3938 # We may have a repoview, which intercepts __setattr__. So be sure
3938 # we operate at the lowest level possible.
3939 # we operate at the lowest level possible.
3939 object.__setattr__(repo, '__class__', poisonedrepository)
3940 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3322 +1,3342 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
10
10
11 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
12 and O(changes) merge between branches.
12 and O(changes) merge between branches.
13 """
13 """
14
14
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 CHANGELOGV2,
38 CHANGELOGV2,
39 COMP_MODE_DEFAULT,
39 COMP_MODE_DEFAULT,
40 COMP_MODE_INLINE,
40 COMP_MODE_INLINE,
41 COMP_MODE_PLAIN,
41 COMP_MODE_PLAIN,
42 ENTRY_RANK,
42 ENTRY_RANK,
43 FEATURES_BY_VERSION,
43 FEATURES_BY_VERSION,
44 FLAG_GENERALDELTA,
44 FLAG_GENERALDELTA,
45 FLAG_INLINE_DATA,
45 FLAG_INLINE_DATA,
46 INDEX_HEADER,
46 INDEX_HEADER,
47 KIND_CHANGELOG,
47 KIND_CHANGELOG,
48 RANK_UNKNOWN,
48 RANK_UNKNOWN,
49 REVLOGV0,
49 REVLOGV0,
50 REVLOGV1,
50 REVLOGV1,
51 REVLOGV1_FLAGS,
51 REVLOGV1_FLAGS,
52 REVLOGV2,
52 REVLOGV2,
53 REVLOGV2_FLAGS,
53 REVLOGV2_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
55 REVLOG_DEFAULT_FORMAT,
55 REVLOG_DEFAULT_FORMAT,
56 REVLOG_DEFAULT_VERSION,
56 REVLOG_DEFAULT_VERSION,
57 SUPPORTED_FLAGS,
57 SUPPORTED_FLAGS,
58 )
58 )
59 from .revlogutils.flagutil import (
59 from .revlogutils.flagutil import (
60 REVIDX_DEFAULT_FLAGS,
60 REVIDX_DEFAULT_FLAGS,
61 REVIDX_ELLIPSIS,
61 REVIDX_ELLIPSIS,
62 REVIDX_EXTSTORED,
62 REVIDX_EXTSTORED,
63 REVIDX_FLAGS_ORDER,
63 REVIDX_FLAGS_ORDER,
64 REVIDX_HASCOPIESINFO,
64 REVIDX_HASCOPIESINFO,
65 REVIDX_ISCENSORED,
65 REVIDX_ISCENSORED,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
67 )
67 )
68 from .thirdparty import attr
68 from .thirdparty import attr
69 from . import (
69 from . import (
70 ancestor,
70 ancestor,
71 dagop,
71 dagop,
72 error,
72 error,
73 mdiff,
73 mdiff,
74 policy,
74 policy,
75 pycompat,
75 pycompat,
76 revlogutils,
76 revlogutils,
77 templatefilters,
77 templatefilters,
78 util,
78 util,
79 )
79 )
80 from .interfaces import (
80 from .interfaces import (
81 repository,
81 repository,
82 util as interfaceutil,
82 util as interfaceutil,
83 )
83 )
84 from .revlogutils import (
84 from .revlogutils import (
85 deltas as deltautil,
85 deltas as deltautil,
86 docket as docketutil,
86 docket as docketutil,
87 flagutil,
87 flagutil,
88 nodemap as nodemaputil,
88 nodemap as nodemaputil,
89 randomaccessfile,
89 randomaccessfile,
90 revlogv0,
90 revlogv0,
91 rewrite,
91 rewrite,
92 sidedata as sidedatautil,
92 sidedata as sidedatautil,
93 )
93 )
94 from .utils import (
94 from .utils import (
95 storageutil,
95 storageutil,
96 stringutil,
96 stringutil,
97 )
97 )
98
98
99 # blanked usage of all the name to prevent pyflakes constraints
99 # blanked usage of all the name to prevent pyflakes constraints
100 # We need these name available in the module for extensions.
100 # We need these name available in the module for extensions.
101
101
102 REVLOGV0
102 REVLOGV0
103 REVLOGV1
103 REVLOGV1
104 REVLOGV2
104 REVLOGV2
105 CHANGELOGV2
105 CHANGELOGV2
106 FLAG_INLINE_DATA
106 FLAG_INLINE_DATA
107 FLAG_GENERALDELTA
107 FLAG_GENERALDELTA
108 REVLOG_DEFAULT_FLAGS
108 REVLOG_DEFAULT_FLAGS
109 REVLOG_DEFAULT_FORMAT
109 REVLOG_DEFAULT_FORMAT
110 REVLOG_DEFAULT_VERSION
110 REVLOG_DEFAULT_VERSION
111 REVLOGV1_FLAGS
111 REVLOGV1_FLAGS
112 REVLOGV2_FLAGS
112 REVLOGV2_FLAGS
113 REVIDX_ISCENSORED
113 REVIDX_ISCENSORED
114 REVIDX_ELLIPSIS
114 REVIDX_ELLIPSIS
115 REVIDX_HASCOPIESINFO
115 REVIDX_HASCOPIESINFO
116 REVIDX_EXTSTORED
116 REVIDX_EXTSTORED
117 REVIDX_DEFAULT_FLAGS
117 REVIDX_DEFAULT_FLAGS
118 REVIDX_FLAGS_ORDER
118 REVIDX_FLAGS_ORDER
119 REVIDX_RAWTEXT_CHANGING_FLAGS
119 REVIDX_RAWTEXT_CHANGING_FLAGS
120
120
121 parsers = policy.importmod('parsers')
121 parsers = policy.importmod('parsers')
122 rustancestor = policy.importrust('ancestor')
122 rustancestor = policy.importrust('ancestor')
123 rustdagop = policy.importrust('dagop')
123 rustdagop = policy.importrust('dagop')
124 rustrevlog = policy.importrust('revlog')
124 rustrevlog = policy.importrust('revlog')
125
125
126 # Aliased for performance.
126 # Aliased for performance.
127 _zlibdecompress = zlib.decompress
127 _zlibdecompress = zlib.decompress
128
128
129 # max size of revlog with inline data
129 # max size of revlog with inline data
130 _maxinline = 131072
130 _maxinline = 131072
131
131
132 # Flag processors for REVIDX_ELLIPSIS.
132 # Flag processors for REVIDX_ELLIPSIS.
133 def ellipsisreadprocessor(rl, text):
133 def ellipsisreadprocessor(rl, text):
134 return text, False
134 return text, False
135
135
136
136
137 def ellipsiswriteprocessor(rl, text):
137 def ellipsiswriteprocessor(rl, text):
138 return text, False
138 return text, False
139
139
140
140
141 def ellipsisrawprocessor(rl, text):
141 def ellipsisrawprocessor(rl, text):
142 return False
142 return False
143
143
144
144
145 ellipsisprocessor = (
145 ellipsisprocessor = (
146 ellipsisreadprocessor,
146 ellipsisreadprocessor,
147 ellipsiswriteprocessor,
147 ellipsiswriteprocessor,
148 ellipsisrawprocessor,
148 ellipsisrawprocessor,
149 )
149 )
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 # True if a fast implementation for persistent-nodemap is available
162 # True if a fast implementation for persistent-nodemap is available
163 #
163 #
164 # We also consider we have a "fast" implementation in "pure" python because
164 # We also consider we have a "fast" implementation in "pure" python because
165 # people using pure don't really have performance consideration (and a
165 # people using pure don't really have performance consideration (and a
166 # wheelbarrow of other slowness source)
166 # wheelbarrow of other slowness source)
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 parsers, 'BaseIndexObject'
168 parsers, 'BaseIndexObject'
169 )
169 )
170
170
171
171
172 @interfaceutil.implementer(repository.irevisiondelta)
172 @interfaceutil.implementer(repository.irevisiondelta)
173 @attr.s(slots=True)
173 @attr.s(slots=True)
174 class revlogrevisiondelta:
174 class revlogrevisiondelta:
175 node = attr.ib()
175 node = attr.ib()
176 p1node = attr.ib()
176 p1node = attr.ib()
177 p2node = attr.ib()
177 p2node = attr.ib()
178 basenode = attr.ib()
178 basenode = attr.ib()
179 flags = attr.ib()
179 flags = attr.ib()
180 baserevisionsize = attr.ib()
180 baserevisionsize = attr.ib()
181 revision = attr.ib()
181 revision = attr.ib()
182 delta = attr.ib()
182 delta = attr.ib()
183 sidedata = attr.ib()
183 sidedata = attr.ib()
184 protocol_flags = attr.ib()
184 protocol_flags = attr.ib()
185 linknode = attr.ib(default=None)
185 linknode = attr.ib(default=None)
186
186
187
187
188 @interfaceutil.implementer(repository.iverifyproblem)
188 @interfaceutil.implementer(repository.iverifyproblem)
189 @attr.s(frozen=True)
189 @attr.s(frozen=True)
190 class revlogproblem:
190 class revlogproblem:
191 warning = attr.ib(default=None)
191 warning = attr.ib(default=None)
192 error = attr.ib(default=None)
192 error = attr.ib(default=None)
193 node = attr.ib(default=None)
193 node = attr.ib(default=None)
194
194
195
195
196 def parse_index_v1(data, inline):
196 def parse_index_v1(data, inline):
197 # call the C implementation to parse the index data
197 # call the C implementation to parse the index data
198 index, cache = parsers.parse_index2(data, inline)
198 index, cache = parsers.parse_index2(data, inline)
199 return index, cache
199 return index, cache
200
200
201
201
202 def parse_index_v2(data, inline):
202 def parse_index_v2(data, inline):
203 # call the C implementation to parse the index data
203 # call the C implementation to parse the index data
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
205 return index, cache
205 return index, cache
206
206
207
207
208 def parse_index_cl_v2(data, inline):
208 def parse_index_cl_v2(data, inline):
209 # call the C implementation to parse the index data
209 # call the C implementation to parse the index data
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
211 return index, cache
211 return index, cache
212
212
213
213
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
215
215
216 def parse_index_v1_nodemap(data, inline):
216 def parse_index_v1_nodemap(data, inline):
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
218 return index, cache
218 return index, cache
219
219
220
220
221 else:
221 else:
222 parse_index_v1_nodemap = None
222 parse_index_v1_nodemap = None
223
223
224
224
225 def parse_index_v1_mixed(data, inline):
225 def parse_index_v1_mixed(data, inline):
226 index, cache = parse_index_v1(data, inline)
226 index, cache = parse_index_v1(data, inline)
227 return rustrevlog.MixedIndex(index), cache
227 return rustrevlog.MixedIndex(index), cache
228
228
229
229
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
231 # signed integer)
231 # signed integer)
232 _maxentrysize = 0x7FFFFFFF
232 _maxentrysize = 0x7FFFFFFF
233
233
234 FILE_TOO_SHORT_MSG = _(
234 FILE_TOO_SHORT_MSG = _(
235 b'cannot read from revlog %s;'
235 b'cannot read from revlog %s;'
236 b' expected %d bytes from offset %d, data size is %d'
236 b' expected %d bytes from offset %d, data size is %d'
237 )
237 )
238
238
239
239
240 class revlog:
240 class revlog:
241 """
241 """
242 the underlying revision storage object
242 the underlying revision storage object
243
243
244 A revlog consists of two parts, an index and the revision data.
244 A revlog consists of two parts, an index and the revision data.
245
245
246 The index is a file with a fixed record size containing
246 The index is a file with a fixed record size containing
247 information on each revision, including its nodeid (hash), the
247 information on each revision, including its nodeid (hash), the
248 nodeids of its parents, the position and offset of its data within
248 nodeids of its parents, the position and offset of its data within
249 the data file, and the revision it's based on. Finally, each entry
249 the data file, and the revision it's based on. Finally, each entry
250 contains a linkrev entry that can serve as a pointer to external
250 contains a linkrev entry that can serve as a pointer to external
251 data.
251 data.
252
252
253 The revision data itself is a linear collection of data chunks.
253 The revision data itself is a linear collection of data chunks.
254 Each chunk represents a revision and is usually represented as a
254 Each chunk represents a revision and is usually represented as a
255 delta against the previous chunk. To bound lookup time, runs of
255 delta against the previous chunk. To bound lookup time, runs of
256 deltas are limited to about 2 times the length of the original
256 deltas are limited to about 2 times the length of the original
257 version data. This makes retrieval of a version proportional to
257 version data. This makes retrieval of a version proportional to
258 its size, or O(1) relative to the number of revisions.
258 its size, or O(1) relative to the number of revisions.
259
259
260 Both pieces of the revlog are written to in an append-only
260 Both pieces of the revlog are written to in an append-only
261 fashion, which means we never need to rewrite a file to insert or
261 fashion, which means we never need to rewrite a file to insert or
262 remove data, and can use some simple techniques to avoid the need
262 remove data, and can use some simple techniques to avoid the need
263 for locking while reading.
263 for locking while reading.
264
264
265 If checkambig, indexfile is opened with checkambig=True at
265 If checkambig, indexfile is opened with checkambig=True at
266 writing, to avoid file stat ambiguity.
266 writing, to avoid file stat ambiguity.
267
267
268 If mmaplargeindex is True, and an mmapindexthreshold is set, the
268 If mmaplargeindex is True, and an mmapindexthreshold is set, the
269 index will be mmapped rather than read if it is larger than the
269 index will be mmapped rather than read if it is larger than the
270 configured threshold.
270 configured threshold.
271
271
272 If censorable is True, the revlog can have censored revisions.
272 If censorable is True, the revlog can have censored revisions.
273
273
274 If `upperboundcomp` is not None, this is the expected maximal gain from
274 If `upperboundcomp` is not None, this is the expected maximal gain from
275 compression for the data content.
275 compression for the data content.
276
276
277 `concurrencychecker` is an optional function that receives 3 arguments: a
277 `concurrencychecker` is an optional function that receives 3 arguments: a
278 file handle, a filename, and an expected position. It should check whether
278 file handle, a filename, and an expected position. It should check whether
279 the current position in the file handle is valid, and log/warn/fail (by
279 the current position in the file handle is valid, and log/warn/fail (by
280 raising).
280 raising).
281
281
282 See mercurial/revlogutils/contants.py for details about the content of an
282 See mercurial/revlogutils/contants.py for details about the content of an
283 index entry.
283 index entry.
284 """
284 """
285
285
286 _flagserrorclass = error.RevlogError
286 _flagserrorclass = error.RevlogError
287
287
288 def __init__(
288 def __init__(
289 self,
289 self,
290 opener,
290 opener,
291 target,
291 target,
292 radix,
292 radix,
293 postfix=None, # only exist for `tmpcensored` now
293 postfix=None, # only exist for `tmpcensored` now
294 checkambig=False,
294 checkambig=False,
295 mmaplargeindex=False,
295 mmaplargeindex=False,
296 censorable=False,
296 censorable=False,
297 upperboundcomp=None,
297 upperboundcomp=None,
298 persistentnodemap=False,
298 persistentnodemap=False,
299 concurrencychecker=None,
299 concurrencychecker=None,
300 trypending=False,
300 trypending=False,
301 canonical_parent_order=True,
301 canonical_parent_order=True,
302 ):
302 ):
303 """
303 """
304 create a revlog object
304 create a revlog object
305
305
306 opener is a function that abstracts the file opening operation
306 opener is a function that abstracts the file opening operation
307 and can be used to implement COW semantics or the like.
307 and can be used to implement COW semantics or the like.
308
308
309 `target`: a (KIND, ID) tuple that identify the content stored in
309 `target`: a (KIND, ID) tuple that identify the content stored in
310 this revlog. It help the rest of the code to understand what the revlog
310 this revlog. It help the rest of the code to understand what the revlog
311 is about without having to resort to heuristic and index filename
311 is about without having to resort to heuristic and index filename
312 analysis. Note: that this must be reliably be set by normal code, but
312 analysis. Note: that this must be reliably be set by normal code, but
313 that test, debug, or performance measurement code might not set this to
313 that test, debug, or performance measurement code might not set this to
314 accurate value.
314 accurate value.
315 """
315 """
316 self.upperboundcomp = upperboundcomp
316 self.upperboundcomp = upperboundcomp
317
317
318 self.radix = radix
318 self.radix = radix
319
319
320 self._docket_file = None
320 self._docket_file = None
321 self._indexfile = None
321 self._indexfile = None
322 self._datafile = None
322 self._datafile = None
323 self._sidedatafile = None
323 self._sidedatafile = None
324 self._nodemap_file = None
324 self._nodemap_file = None
325 self.postfix = postfix
325 self.postfix = postfix
326 self._trypending = trypending
326 self._trypending = trypending
327 self.opener = opener
327 self.opener = opener
328 if persistentnodemap:
328 if persistentnodemap:
329 self._nodemap_file = nodemaputil.get_nodemap_file(self)
329 self._nodemap_file = nodemaputil.get_nodemap_file(self)
330
330
331 assert target[0] in ALL_KINDS
331 assert target[0] in ALL_KINDS
332 assert len(target) == 2
332 assert len(target) == 2
333 self.target = target
333 self.target = target
334 # When True, indexfile is opened with checkambig=True at writing, to
334 # When True, indexfile is opened with checkambig=True at writing, to
335 # avoid file stat ambiguity.
335 # avoid file stat ambiguity.
336 self._checkambig = checkambig
336 self._checkambig = checkambig
337 self._mmaplargeindex = mmaplargeindex
337 self._mmaplargeindex = mmaplargeindex
338 self._censorable = censorable
338 self._censorable = censorable
339 # 3-tuple of (node, rev, text) for a raw revision.
339 # 3-tuple of (node, rev, text) for a raw revision.
340 self._revisioncache = None
340 self._revisioncache = None
341 # Maps rev to chain base rev.
341 # Maps rev to chain base rev.
342 self._chainbasecache = util.lrucachedict(100)
342 self._chainbasecache = util.lrucachedict(100)
343 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
343 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
344 self._chunkcache = (0, b'')
344 self._chunkcache = (0, b'')
345 # How much data to read and cache into the raw revlog data cache.
345 # How much data to read and cache into the raw revlog data cache.
346 self._chunkcachesize = 65536
346 self._chunkcachesize = 65536
347 self._maxchainlen = None
347 self._maxchainlen = None
348 self._deltabothparents = True
348 self._deltabothparents = True
349 self._debug_delta = False
349 self.index = None
350 self.index = None
350 self._docket = None
351 self._docket = None
351 self._nodemap_docket = None
352 self._nodemap_docket = None
352 # Mapping of partial identifiers to full nodes.
353 # Mapping of partial identifiers to full nodes.
353 self._pcache = {}
354 self._pcache = {}
354 # Mapping of revision integer to full node.
355 # Mapping of revision integer to full node.
355 self._compengine = b'zlib'
356 self._compengine = b'zlib'
356 self._compengineopts = {}
357 self._compengineopts = {}
357 self._maxdeltachainspan = -1
358 self._maxdeltachainspan = -1
358 self._withsparseread = False
359 self._withsparseread = False
359 self._sparserevlog = False
360 self._sparserevlog = False
360 self.hassidedata = False
361 self.hassidedata = False
361 self._srdensitythreshold = 0.50
362 self._srdensitythreshold = 0.50
362 self._srmingapsize = 262144
363 self._srmingapsize = 262144
363
364
364 # Make copy of flag processors so each revlog instance can support
365 # Make copy of flag processors so each revlog instance can support
365 # custom flags.
366 # custom flags.
366 self._flagprocessors = dict(flagutil.flagprocessors)
367 self._flagprocessors = dict(flagutil.flagprocessors)
367
368
368 # 3-tuple of file handles being used for active writing.
369 # 3-tuple of file handles being used for active writing.
369 self._writinghandles = None
370 self._writinghandles = None
370 # prevent nesting of addgroup
371 # prevent nesting of addgroup
371 self._adding_group = None
372 self._adding_group = None
372
373
373 self._loadindex()
374 self._loadindex()
374
375
375 self._concurrencychecker = concurrencychecker
376 self._concurrencychecker = concurrencychecker
376
377
377 # parent order is supposed to be semantically irrelevant, so we
378 # parent order is supposed to be semantically irrelevant, so we
378 # normally resort parents to ensure that the first parent is non-null,
379 # normally resort parents to ensure that the first parent is non-null,
379 # if there is a non-null parent at all.
380 # if there is a non-null parent at all.
380 # filelog abuses the parent order as flag to mark some instances of
381 # filelog abuses the parent order as flag to mark some instances of
381 # meta-encoded files, so allow it to disable this behavior.
382 # meta-encoded files, so allow it to disable this behavior.
382 self.canonical_parent_order = canonical_parent_order
383 self.canonical_parent_order = canonical_parent_order
383
384
384 def _init_opts(self):
385 def _init_opts(self):
385 """process options (from above/config) to setup associated default revlog mode
386 """process options (from above/config) to setup associated default revlog mode
386
387
387 These values might be affected when actually reading on disk information.
388 These values might be affected when actually reading on disk information.
388
389
389 The relevant values are returned for use in _loadindex().
390 The relevant values are returned for use in _loadindex().
390
391
391 * newversionflags:
392 * newversionflags:
392 version header to use if we need to create a new revlog
393 version header to use if we need to create a new revlog
393
394
394 * mmapindexthreshold:
395 * mmapindexthreshold:
395 minimal index size for start to use mmap
396 minimal index size for start to use mmap
396
397
397 * force_nodemap:
398 * force_nodemap:
398 force the usage of a "development" version of the nodemap code
399 force the usage of a "development" version of the nodemap code
399 """
400 """
400 mmapindexthreshold = None
401 mmapindexthreshold = None
401 opts = self.opener.options
402 opts = self.opener.options
402
403
403 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
404 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
404 new_header = CHANGELOGV2
405 new_header = CHANGELOGV2
405 elif b'revlogv2' in opts:
406 elif b'revlogv2' in opts:
406 new_header = REVLOGV2
407 new_header = REVLOGV2
407 elif b'revlogv1' in opts:
408 elif b'revlogv1' in opts:
408 new_header = REVLOGV1 | FLAG_INLINE_DATA
409 new_header = REVLOGV1 | FLAG_INLINE_DATA
409 if b'generaldelta' in opts:
410 if b'generaldelta' in opts:
410 new_header |= FLAG_GENERALDELTA
411 new_header |= FLAG_GENERALDELTA
411 elif b'revlogv0' in self.opener.options:
412 elif b'revlogv0' in self.opener.options:
412 new_header = REVLOGV0
413 new_header = REVLOGV0
413 else:
414 else:
414 new_header = REVLOG_DEFAULT_VERSION
415 new_header = REVLOG_DEFAULT_VERSION
415
416
416 if b'chunkcachesize' in opts:
417 if b'chunkcachesize' in opts:
417 self._chunkcachesize = opts[b'chunkcachesize']
418 self._chunkcachesize = opts[b'chunkcachesize']
418 if b'maxchainlen' in opts:
419 if b'maxchainlen' in opts:
419 self._maxchainlen = opts[b'maxchainlen']
420 self._maxchainlen = opts[b'maxchainlen']
420 if b'deltabothparents' in opts:
421 if b'deltabothparents' in opts:
421 self._deltabothparents = opts[b'deltabothparents']
422 self._deltabothparents = opts[b'deltabothparents']
422 self._lazydelta = bool(opts.get(b'lazydelta', True))
423 self._lazydelta = bool(opts.get(b'lazydelta', True))
423 self._lazydeltabase = False
424 self._lazydeltabase = False
424 if self._lazydelta:
425 if self._lazydelta:
425 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
426 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
427 if b'debug-delta' in opts:
428 self._debug_delta = opts[b'debug-delta']
426 if b'compengine' in opts:
429 if b'compengine' in opts:
427 self._compengine = opts[b'compengine']
430 self._compengine = opts[b'compengine']
428 if b'zlib.level' in opts:
431 if b'zlib.level' in opts:
429 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
432 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
430 if b'zstd.level' in opts:
433 if b'zstd.level' in opts:
431 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
434 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
432 if b'maxdeltachainspan' in opts:
435 if b'maxdeltachainspan' in opts:
433 self._maxdeltachainspan = opts[b'maxdeltachainspan']
436 self._maxdeltachainspan = opts[b'maxdeltachainspan']
434 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
437 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
435 mmapindexthreshold = opts[b'mmapindexthreshold']
438 mmapindexthreshold = opts[b'mmapindexthreshold']
436 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
439 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
437 withsparseread = bool(opts.get(b'with-sparse-read', False))
440 withsparseread = bool(opts.get(b'with-sparse-read', False))
438 # sparse-revlog forces sparse-read
441 # sparse-revlog forces sparse-read
439 self._withsparseread = self._sparserevlog or withsparseread
442 self._withsparseread = self._sparserevlog or withsparseread
440 if b'sparse-read-density-threshold' in opts:
443 if b'sparse-read-density-threshold' in opts:
441 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
444 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
442 if b'sparse-read-min-gap-size' in opts:
445 if b'sparse-read-min-gap-size' in opts:
443 self._srmingapsize = opts[b'sparse-read-min-gap-size']
446 self._srmingapsize = opts[b'sparse-read-min-gap-size']
444 if opts.get(b'enableellipsis'):
447 if opts.get(b'enableellipsis'):
445 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
448 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
446
449
447 # revlog v0 doesn't have flag processors
450 # revlog v0 doesn't have flag processors
448 for flag, processor in opts.get(b'flagprocessors', {}).items():
451 for flag, processor in opts.get(b'flagprocessors', {}).items():
449 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
452 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
450
453
451 if self._chunkcachesize <= 0:
454 if self._chunkcachesize <= 0:
452 raise error.RevlogError(
455 raise error.RevlogError(
453 _(b'revlog chunk cache size %r is not greater than 0')
456 _(b'revlog chunk cache size %r is not greater than 0')
454 % self._chunkcachesize
457 % self._chunkcachesize
455 )
458 )
456 elif self._chunkcachesize & (self._chunkcachesize - 1):
459 elif self._chunkcachesize & (self._chunkcachesize - 1):
457 raise error.RevlogError(
460 raise error.RevlogError(
458 _(b'revlog chunk cache size %r is not a power of 2')
461 _(b'revlog chunk cache size %r is not a power of 2')
459 % self._chunkcachesize
462 % self._chunkcachesize
460 )
463 )
461 force_nodemap = opts.get(b'devel-force-nodemap', False)
464 force_nodemap = opts.get(b'devel-force-nodemap', False)
462 return new_header, mmapindexthreshold, force_nodemap
465 return new_header, mmapindexthreshold, force_nodemap
463
466
464 def _get_data(self, filepath, mmap_threshold, size=None):
467 def _get_data(self, filepath, mmap_threshold, size=None):
465 """return a file content with or without mmap
468 """return a file content with or without mmap
466
469
467 If the file is missing return the empty string"""
470 If the file is missing return the empty string"""
468 try:
471 try:
469 with self.opener(filepath) as fp:
472 with self.opener(filepath) as fp:
470 if mmap_threshold is not None:
473 if mmap_threshold is not None:
471 file_size = self.opener.fstat(fp).st_size
474 file_size = self.opener.fstat(fp).st_size
472 if file_size >= mmap_threshold:
475 if file_size >= mmap_threshold:
473 if size is not None:
476 if size is not None:
474 # avoid potentiel mmap crash
477 # avoid potentiel mmap crash
475 size = min(file_size, size)
478 size = min(file_size, size)
476 # TODO: should .close() to release resources without
479 # TODO: should .close() to release resources without
477 # relying on Python GC
480 # relying on Python GC
478 if size is None:
481 if size is None:
479 return util.buffer(util.mmapread(fp))
482 return util.buffer(util.mmapread(fp))
480 else:
483 else:
481 return util.buffer(util.mmapread(fp, size))
484 return util.buffer(util.mmapread(fp, size))
482 if size is None:
485 if size is None:
483 return fp.read()
486 return fp.read()
484 else:
487 else:
485 return fp.read(size)
488 return fp.read(size)
486 except IOError as inst:
489 except IOError as inst:
487 if inst.errno != errno.ENOENT:
490 if inst.errno != errno.ENOENT:
488 raise
491 raise
489 return b''
492 return b''
490
493
491 def _loadindex(self, docket=None):
494 def _loadindex(self, docket=None):
492
495
493 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
496 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
494
497
495 if self.postfix is not None:
498 if self.postfix is not None:
496 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
499 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
497 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
500 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
498 entry_point = b'%s.i.a' % self.radix
501 entry_point = b'%s.i.a' % self.radix
499 else:
502 else:
500 entry_point = b'%s.i' % self.radix
503 entry_point = b'%s.i' % self.radix
501
504
502 if docket is not None:
505 if docket is not None:
503 self._docket = docket
506 self._docket = docket
504 self._docket_file = entry_point
507 self._docket_file = entry_point
505 else:
508 else:
506 entry_data = b''
509 entry_data = b''
507 self._initempty = True
510 self._initempty = True
508 entry_data = self._get_data(entry_point, mmapindexthreshold)
511 entry_data = self._get_data(entry_point, mmapindexthreshold)
509 if len(entry_data) > 0:
512 if len(entry_data) > 0:
510 header = INDEX_HEADER.unpack(entry_data[:4])[0]
513 header = INDEX_HEADER.unpack(entry_data[:4])[0]
511 self._initempty = False
514 self._initempty = False
512 else:
515 else:
513 header = new_header
516 header = new_header
514
517
515 self._format_flags = header & ~0xFFFF
518 self._format_flags = header & ~0xFFFF
516 self._format_version = header & 0xFFFF
519 self._format_version = header & 0xFFFF
517
520
518 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
521 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
519 if supported_flags is None:
522 if supported_flags is None:
520 msg = _(b'unknown version (%d) in revlog %s')
523 msg = _(b'unknown version (%d) in revlog %s')
521 msg %= (self._format_version, self.display_id)
524 msg %= (self._format_version, self.display_id)
522 raise error.RevlogError(msg)
525 raise error.RevlogError(msg)
523 elif self._format_flags & ~supported_flags:
526 elif self._format_flags & ~supported_flags:
524 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
527 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
525 display_flag = self._format_flags >> 16
528 display_flag = self._format_flags >> 16
526 msg %= (display_flag, self._format_version, self.display_id)
529 msg %= (display_flag, self._format_version, self.display_id)
527 raise error.RevlogError(msg)
530 raise error.RevlogError(msg)
528
531
529 features = FEATURES_BY_VERSION[self._format_version]
532 features = FEATURES_BY_VERSION[self._format_version]
530 self._inline = features[b'inline'](self._format_flags)
533 self._inline = features[b'inline'](self._format_flags)
531 self._generaldelta = features[b'generaldelta'](self._format_flags)
534 self._generaldelta = features[b'generaldelta'](self._format_flags)
532 self.hassidedata = features[b'sidedata']
535 self.hassidedata = features[b'sidedata']
533
536
534 if not features[b'docket']:
537 if not features[b'docket']:
535 self._indexfile = entry_point
538 self._indexfile = entry_point
536 index_data = entry_data
539 index_data = entry_data
537 else:
540 else:
538 self._docket_file = entry_point
541 self._docket_file = entry_point
539 if self._initempty:
542 if self._initempty:
540 self._docket = docketutil.default_docket(self, header)
543 self._docket = docketutil.default_docket(self, header)
541 else:
544 else:
542 self._docket = docketutil.parse_docket(
545 self._docket = docketutil.parse_docket(
543 self, entry_data, use_pending=self._trypending
546 self, entry_data, use_pending=self._trypending
544 )
547 )
545
548
546 if self._docket is not None:
549 if self._docket is not None:
547 self._indexfile = self._docket.index_filepath()
550 self._indexfile = self._docket.index_filepath()
548 index_data = b''
551 index_data = b''
549 index_size = self._docket.index_end
552 index_size = self._docket.index_end
550 if index_size > 0:
553 if index_size > 0:
551 index_data = self._get_data(
554 index_data = self._get_data(
552 self._indexfile, mmapindexthreshold, size=index_size
555 self._indexfile, mmapindexthreshold, size=index_size
553 )
556 )
554 if len(index_data) < index_size:
557 if len(index_data) < index_size:
555 msg = _(b'too few index data for %s: got %d, expected %d')
558 msg = _(b'too few index data for %s: got %d, expected %d')
556 msg %= (self.display_id, len(index_data), index_size)
559 msg %= (self.display_id, len(index_data), index_size)
557 raise error.RevlogError(msg)
560 raise error.RevlogError(msg)
558
561
559 self._inline = False
562 self._inline = False
560 # generaldelta implied by version 2 revlogs.
563 # generaldelta implied by version 2 revlogs.
561 self._generaldelta = True
564 self._generaldelta = True
562 # the logic for persistent nodemap will be dealt with within the
565 # the logic for persistent nodemap will be dealt with within the
563 # main docket, so disable it for now.
566 # main docket, so disable it for now.
564 self._nodemap_file = None
567 self._nodemap_file = None
565
568
566 if self._docket is not None:
569 if self._docket is not None:
567 self._datafile = self._docket.data_filepath()
570 self._datafile = self._docket.data_filepath()
568 self._sidedatafile = self._docket.sidedata_filepath()
571 self._sidedatafile = self._docket.sidedata_filepath()
569 elif self.postfix is None:
572 elif self.postfix is None:
570 self._datafile = b'%s.d' % self.radix
573 self._datafile = b'%s.d' % self.radix
571 else:
574 else:
572 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
575 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
573
576
574 self.nodeconstants = sha1nodeconstants
577 self.nodeconstants = sha1nodeconstants
575 self.nullid = self.nodeconstants.nullid
578 self.nullid = self.nodeconstants.nullid
576
579
577 # sparse-revlog can't be on without general-delta (issue6056)
580 # sparse-revlog can't be on without general-delta (issue6056)
578 if not self._generaldelta:
581 if not self._generaldelta:
579 self._sparserevlog = False
582 self._sparserevlog = False
580
583
581 self._storedeltachains = True
584 self._storedeltachains = True
582
585
583 devel_nodemap = (
586 devel_nodemap = (
584 self._nodemap_file
587 self._nodemap_file
585 and force_nodemap
588 and force_nodemap
586 and parse_index_v1_nodemap is not None
589 and parse_index_v1_nodemap is not None
587 )
590 )
588
591
589 use_rust_index = False
592 use_rust_index = False
590 if rustrevlog is not None:
593 if rustrevlog is not None:
591 if self._nodemap_file is not None:
594 if self._nodemap_file is not None:
592 use_rust_index = True
595 use_rust_index = True
593 else:
596 else:
594 use_rust_index = self.opener.options.get(b'rust.index')
597 use_rust_index = self.opener.options.get(b'rust.index')
595
598
596 self._parse_index = parse_index_v1
599 self._parse_index = parse_index_v1
597 if self._format_version == REVLOGV0:
600 if self._format_version == REVLOGV0:
598 self._parse_index = revlogv0.parse_index_v0
601 self._parse_index = revlogv0.parse_index_v0
599 elif self._format_version == REVLOGV2:
602 elif self._format_version == REVLOGV2:
600 self._parse_index = parse_index_v2
603 self._parse_index = parse_index_v2
601 elif self._format_version == CHANGELOGV2:
604 elif self._format_version == CHANGELOGV2:
602 self._parse_index = parse_index_cl_v2
605 self._parse_index = parse_index_cl_v2
603 elif devel_nodemap:
606 elif devel_nodemap:
604 self._parse_index = parse_index_v1_nodemap
607 self._parse_index = parse_index_v1_nodemap
605 elif use_rust_index:
608 elif use_rust_index:
606 self._parse_index = parse_index_v1_mixed
609 self._parse_index = parse_index_v1_mixed
607 try:
610 try:
608 d = self._parse_index(index_data, self._inline)
611 d = self._parse_index(index_data, self._inline)
609 index, chunkcache = d
612 index, chunkcache = d
610 use_nodemap = (
613 use_nodemap = (
611 not self._inline
614 not self._inline
612 and self._nodemap_file is not None
615 and self._nodemap_file is not None
613 and util.safehasattr(index, 'update_nodemap_data')
616 and util.safehasattr(index, 'update_nodemap_data')
614 )
617 )
615 if use_nodemap:
618 if use_nodemap:
616 nodemap_data = nodemaputil.persisted_data(self)
619 nodemap_data = nodemaputil.persisted_data(self)
617 if nodemap_data is not None:
620 if nodemap_data is not None:
618 docket = nodemap_data[0]
621 docket = nodemap_data[0]
619 if (
622 if (
620 len(d[0]) > docket.tip_rev
623 len(d[0]) > docket.tip_rev
621 and d[0][docket.tip_rev][7] == docket.tip_node
624 and d[0][docket.tip_rev][7] == docket.tip_node
622 ):
625 ):
623 # no changelog tampering
626 # no changelog tampering
624 self._nodemap_docket = docket
627 self._nodemap_docket = docket
625 index.update_nodemap_data(*nodemap_data)
628 index.update_nodemap_data(*nodemap_data)
626 except (ValueError, IndexError):
629 except (ValueError, IndexError):
627 raise error.RevlogError(
630 raise error.RevlogError(
628 _(b"index %s is corrupted") % self.display_id
631 _(b"index %s is corrupted") % self.display_id
629 )
632 )
630 self.index = index
633 self.index = index
631 self._segmentfile = randomaccessfile.randomaccessfile(
634 self._segmentfile = randomaccessfile.randomaccessfile(
632 self.opener,
635 self.opener,
633 (self._indexfile if self._inline else self._datafile),
636 (self._indexfile if self._inline else self._datafile),
634 self._chunkcachesize,
637 self._chunkcachesize,
635 chunkcache,
638 chunkcache,
636 )
639 )
637 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
640 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
638 self.opener,
641 self.opener,
639 self._sidedatafile,
642 self._sidedatafile,
640 self._chunkcachesize,
643 self._chunkcachesize,
641 )
644 )
642 # revnum -> (chain-length, sum-delta-length)
645 # revnum -> (chain-length, sum-delta-length)
643 self._chaininfocache = util.lrucachedict(500)
646 self._chaininfocache = util.lrucachedict(500)
644 # revlog header -> revlog compressor
647 # revlog header -> revlog compressor
645 self._decompressors = {}
648 self._decompressors = {}
646
649
647 @util.propertycache
650 @util.propertycache
648 def revlog_kind(self):
651 def revlog_kind(self):
649 return self.target[0]
652 return self.target[0]
650
653
651 @util.propertycache
654 @util.propertycache
652 def display_id(self):
655 def display_id(self):
653 """The public facing "ID" of the revlog that we use in message"""
656 """The public facing "ID" of the revlog that we use in message"""
654 # Maybe we should build a user facing representation of
657 # Maybe we should build a user facing representation of
655 # revlog.target instead of using `self.radix`
658 # revlog.target instead of using `self.radix`
656 return self.radix
659 return self.radix
657
660
658 def _get_decompressor(self, t):
661 def _get_decompressor(self, t):
659 try:
662 try:
660 compressor = self._decompressors[t]
663 compressor = self._decompressors[t]
661 except KeyError:
664 except KeyError:
662 try:
665 try:
663 engine = util.compengines.forrevlogheader(t)
666 engine = util.compengines.forrevlogheader(t)
664 compressor = engine.revlogcompressor(self._compengineopts)
667 compressor = engine.revlogcompressor(self._compengineopts)
665 self._decompressors[t] = compressor
668 self._decompressors[t] = compressor
666 except KeyError:
669 except KeyError:
667 raise error.RevlogError(
670 raise error.RevlogError(
668 _(b'unknown compression type %s') % binascii.hexlify(t)
671 _(b'unknown compression type %s') % binascii.hexlify(t)
669 )
672 )
670 return compressor
673 return compressor
671
674
672 @util.propertycache
675 @util.propertycache
673 def _compressor(self):
676 def _compressor(self):
674 engine = util.compengines[self._compengine]
677 engine = util.compengines[self._compengine]
675 return engine.revlogcompressor(self._compengineopts)
678 return engine.revlogcompressor(self._compengineopts)
676
679
677 @util.propertycache
680 @util.propertycache
678 def _decompressor(self):
681 def _decompressor(self):
679 """the default decompressor"""
682 """the default decompressor"""
680 if self._docket is None:
683 if self._docket is None:
681 return None
684 return None
682 t = self._docket.default_compression_header
685 t = self._docket.default_compression_header
683 c = self._get_decompressor(t)
686 c = self._get_decompressor(t)
684 return c.decompress
687 return c.decompress
685
688
686 def _indexfp(self):
689 def _indexfp(self):
687 """file object for the revlog's index file"""
690 """file object for the revlog's index file"""
688 return self.opener(self._indexfile, mode=b"r")
691 return self.opener(self._indexfile, mode=b"r")
689
692
690 def __index_write_fp(self):
693 def __index_write_fp(self):
691 # You should not use this directly and use `_writing` instead
694 # You should not use this directly and use `_writing` instead
692 try:
695 try:
693 f = self.opener(
696 f = self.opener(
694 self._indexfile, mode=b"r+", checkambig=self._checkambig
697 self._indexfile, mode=b"r+", checkambig=self._checkambig
695 )
698 )
696 if self._docket is None:
699 if self._docket is None:
697 f.seek(0, os.SEEK_END)
700 f.seek(0, os.SEEK_END)
698 else:
701 else:
699 f.seek(self._docket.index_end, os.SEEK_SET)
702 f.seek(self._docket.index_end, os.SEEK_SET)
700 return f
703 return f
701 except IOError as inst:
704 except IOError as inst:
702 if inst.errno != errno.ENOENT:
705 if inst.errno != errno.ENOENT:
703 raise
706 raise
704 return self.opener(
707 return self.opener(
705 self._indexfile, mode=b"w+", checkambig=self._checkambig
708 self._indexfile, mode=b"w+", checkambig=self._checkambig
706 )
709 )
707
710
708 def __index_new_fp(self):
711 def __index_new_fp(self):
709 # You should not use this unless you are upgrading from inline revlog
712 # You should not use this unless you are upgrading from inline revlog
710 return self.opener(
713 return self.opener(
711 self._indexfile,
714 self._indexfile,
712 mode=b"w",
715 mode=b"w",
713 checkambig=self._checkambig,
716 checkambig=self._checkambig,
714 atomictemp=True,
717 atomictemp=True,
715 )
718 )
716
719
717 def _datafp(self, mode=b'r'):
720 def _datafp(self, mode=b'r'):
718 """file object for the revlog's data file"""
721 """file object for the revlog's data file"""
719 return self.opener(self._datafile, mode=mode)
722 return self.opener(self._datafile, mode=mode)
720
723
721 @contextlib.contextmanager
724 @contextlib.contextmanager
722 def _sidedatareadfp(self):
725 def _sidedatareadfp(self):
723 """file object suitable to read sidedata"""
726 """file object suitable to read sidedata"""
724 if self._writinghandles:
727 if self._writinghandles:
725 yield self._writinghandles[2]
728 yield self._writinghandles[2]
726 else:
729 else:
727 with self.opener(self._sidedatafile) as fp:
730 with self.opener(self._sidedatafile) as fp:
728 yield fp
731 yield fp
729
732
730 def tiprev(self):
733 def tiprev(self):
731 return len(self.index) - 1
734 return len(self.index) - 1
732
735
733 def tip(self):
736 def tip(self):
734 return self.node(self.tiprev())
737 return self.node(self.tiprev())
735
738
736 def __contains__(self, rev):
739 def __contains__(self, rev):
737 return 0 <= rev < len(self)
740 return 0 <= rev < len(self)
738
741
739 def __len__(self):
742 def __len__(self):
740 return len(self.index)
743 return len(self.index)
741
744
742 def __iter__(self):
745 def __iter__(self):
743 return iter(pycompat.xrange(len(self)))
746 return iter(pycompat.xrange(len(self)))
744
747
745 def revs(self, start=0, stop=None):
748 def revs(self, start=0, stop=None):
746 """iterate over all rev in this revlog (from start to stop)"""
749 """iterate over all rev in this revlog (from start to stop)"""
747 return storageutil.iterrevs(len(self), start=start, stop=stop)
750 return storageutil.iterrevs(len(self), start=start, stop=stop)
748
751
749 def hasnode(self, node):
752 def hasnode(self, node):
750 try:
753 try:
751 self.rev(node)
754 self.rev(node)
752 return True
755 return True
753 except KeyError:
756 except KeyError:
754 return False
757 return False
755
758
756 def candelta(self, baserev, rev):
759 def candelta(self, baserev, rev):
757 """whether two revisions (baserev, rev) can be delta-ed or not"""
760 """whether two revisions (baserev, rev) can be delta-ed or not"""
758 # Disable delta if either rev requires a content-changing flag
761 # Disable delta if either rev requires a content-changing flag
759 # processor (ex. LFS). This is because such flag processor can alter
762 # processor (ex. LFS). This is because such flag processor can alter
760 # the rawtext content that the delta will be based on, and two clients
763 # the rawtext content that the delta will be based on, and two clients
761 # could have a same revlog node with different flags (i.e. different
764 # could have a same revlog node with different flags (i.e. different
762 # rawtext contents) and the delta could be incompatible.
765 # rawtext contents) and the delta could be incompatible.
763 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
766 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
764 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
767 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
765 ):
768 ):
766 return False
769 return False
767 return True
770 return True
768
771
769 def update_caches(self, transaction):
772 def update_caches(self, transaction):
770 if self._nodemap_file is not None:
773 if self._nodemap_file is not None:
771 if transaction is None:
774 if transaction is None:
772 nodemaputil.update_persistent_nodemap(self)
775 nodemaputil.update_persistent_nodemap(self)
773 else:
776 else:
774 nodemaputil.setup_persistent_nodemap(transaction, self)
777 nodemaputil.setup_persistent_nodemap(transaction, self)
775
778
776 def clearcaches(self):
779 def clearcaches(self):
777 self._revisioncache = None
780 self._revisioncache = None
778 self._chainbasecache.clear()
781 self._chainbasecache.clear()
779 self._segmentfile.clear_cache()
782 self._segmentfile.clear_cache()
780 self._segmentfile_sidedata.clear_cache()
783 self._segmentfile_sidedata.clear_cache()
781 self._pcache = {}
784 self._pcache = {}
782 self._nodemap_docket = None
785 self._nodemap_docket = None
783 self.index.clearcaches()
786 self.index.clearcaches()
784 # The python code is the one responsible for validating the docket, we
787 # The python code is the one responsible for validating the docket, we
785 # end up having to refresh it here.
788 # end up having to refresh it here.
786 use_nodemap = (
789 use_nodemap = (
787 not self._inline
790 not self._inline
788 and self._nodemap_file is not None
791 and self._nodemap_file is not None
789 and util.safehasattr(self.index, 'update_nodemap_data')
792 and util.safehasattr(self.index, 'update_nodemap_data')
790 )
793 )
791 if use_nodemap:
794 if use_nodemap:
792 nodemap_data = nodemaputil.persisted_data(self)
795 nodemap_data = nodemaputil.persisted_data(self)
793 if nodemap_data is not None:
796 if nodemap_data is not None:
794 self._nodemap_docket = nodemap_data[0]
797 self._nodemap_docket = nodemap_data[0]
795 self.index.update_nodemap_data(*nodemap_data)
798 self.index.update_nodemap_data(*nodemap_data)
796
799
797 def rev(self, node):
800 def rev(self, node):
798 try:
801 try:
799 return self.index.rev(node)
802 return self.index.rev(node)
800 except TypeError:
803 except TypeError:
801 raise
804 raise
802 except error.RevlogError:
805 except error.RevlogError:
803 # parsers.c radix tree lookup failed
806 # parsers.c radix tree lookup failed
804 if (
807 if (
805 node == self.nodeconstants.wdirid
808 node == self.nodeconstants.wdirid
806 or node in self.nodeconstants.wdirfilenodeids
809 or node in self.nodeconstants.wdirfilenodeids
807 ):
810 ):
808 raise error.WdirUnsupported
811 raise error.WdirUnsupported
809 raise error.LookupError(node, self.display_id, _(b'no node'))
812 raise error.LookupError(node, self.display_id, _(b'no node'))
810
813
811 # Accessors for index entries.
814 # Accessors for index entries.
812
815
813 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
816 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
814 # are flags.
817 # are flags.
815 def start(self, rev):
818 def start(self, rev):
816 return int(self.index[rev][0] >> 16)
819 return int(self.index[rev][0] >> 16)
817
820
818 def sidedata_cut_off(self, rev):
821 def sidedata_cut_off(self, rev):
819 sd_cut_off = self.index[rev][8]
822 sd_cut_off = self.index[rev][8]
820 if sd_cut_off != 0:
823 if sd_cut_off != 0:
821 return sd_cut_off
824 return sd_cut_off
822 # This is some annoying dance, because entries without sidedata
825 # This is some annoying dance, because entries without sidedata
823 # currently use 0 as their ofsset. (instead of previous-offset +
826 # currently use 0 as their ofsset. (instead of previous-offset +
824 # previous-size)
827 # previous-size)
825 #
828 #
826 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
829 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
827 # In the meantime, we need this.
830 # In the meantime, we need this.
828 while 0 <= rev:
831 while 0 <= rev:
829 e = self.index[rev]
832 e = self.index[rev]
830 if e[9] != 0:
833 if e[9] != 0:
831 return e[8] + e[9]
834 return e[8] + e[9]
832 rev -= 1
835 rev -= 1
833 return 0
836 return 0
834
837
835 def flags(self, rev):
838 def flags(self, rev):
836 return self.index[rev][0] & 0xFFFF
839 return self.index[rev][0] & 0xFFFF
837
840
838 def length(self, rev):
841 def length(self, rev):
839 return self.index[rev][1]
842 return self.index[rev][1]
840
843
841 def sidedata_length(self, rev):
844 def sidedata_length(self, rev):
842 if not self.hassidedata:
845 if not self.hassidedata:
843 return 0
846 return 0
844 return self.index[rev][9]
847 return self.index[rev][9]
845
848
846 def rawsize(self, rev):
849 def rawsize(self, rev):
847 """return the length of the uncompressed text for a given revision"""
850 """return the length of the uncompressed text for a given revision"""
848 l = self.index[rev][2]
851 l = self.index[rev][2]
849 if l >= 0:
852 if l >= 0:
850 return l
853 return l
851
854
852 t = self.rawdata(rev)
855 t = self.rawdata(rev)
853 return len(t)
856 return len(t)
854
857
855 def size(self, rev):
858 def size(self, rev):
856 """length of non-raw text (processed by a "read" flag processor)"""
859 """length of non-raw text (processed by a "read" flag processor)"""
857 # fast path: if no "read" flag processor could change the content,
860 # fast path: if no "read" flag processor could change the content,
858 # size is rawsize. note: ELLIPSIS is known to not change the content.
861 # size is rawsize. note: ELLIPSIS is known to not change the content.
859 flags = self.flags(rev)
862 flags = self.flags(rev)
860 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
863 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
861 return self.rawsize(rev)
864 return self.rawsize(rev)
862
865
863 return len(self.revision(rev))
866 return len(self.revision(rev))
864
867
865 def fast_rank(self, rev):
868 def fast_rank(self, rev):
866 """Return the rank of a revision if already known, or None otherwise.
869 """Return the rank of a revision if already known, or None otherwise.
867
870
868 The rank of a revision is the size of the sub-graph it defines as a
871 The rank of a revision is the size of the sub-graph it defines as a
869 head. Equivalently, the rank of a revision `r` is the size of the set
872 head. Equivalently, the rank of a revision `r` is the size of the set
870 `ancestors(r)`, `r` included.
873 `ancestors(r)`, `r` included.
871
874
872 This method returns the rank retrieved from the revlog in constant
875 This method returns the rank retrieved from the revlog in constant
873 time. It makes no attempt at computing unknown values for versions of
876 time. It makes no attempt at computing unknown values for versions of
874 the revlog which do not persist the rank.
877 the revlog which do not persist the rank.
875 """
878 """
876 rank = self.index[rev][ENTRY_RANK]
879 rank = self.index[rev][ENTRY_RANK]
877 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
880 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
878 return None
881 return None
879 if rev == nullrev:
882 if rev == nullrev:
880 return 0 # convention
883 return 0 # convention
881 return rank
884 return rank
882
885
883 def chainbase(self, rev):
886 def chainbase(self, rev):
884 base = self._chainbasecache.get(rev)
887 base = self._chainbasecache.get(rev)
885 if base is not None:
888 if base is not None:
886 return base
889 return base
887
890
888 index = self.index
891 index = self.index
889 iterrev = rev
892 iterrev = rev
890 base = index[iterrev][3]
893 base = index[iterrev][3]
891 while base != iterrev:
894 while base != iterrev:
892 iterrev = base
895 iterrev = base
893 base = index[iterrev][3]
896 base = index[iterrev][3]
894
897
895 self._chainbasecache[rev] = base
898 self._chainbasecache[rev] = base
896 return base
899 return base
897
900
898 def linkrev(self, rev):
901 def linkrev(self, rev):
899 return self.index[rev][4]
902 return self.index[rev][4]
900
903
901 def parentrevs(self, rev):
904 def parentrevs(self, rev):
902 try:
905 try:
903 entry = self.index[rev]
906 entry = self.index[rev]
904 except IndexError:
907 except IndexError:
905 if rev == wdirrev:
908 if rev == wdirrev:
906 raise error.WdirUnsupported
909 raise error.WdirUnsupported
907 raise
910 raise
908
911
909 if self.canonical_parent_order and entry[5] == nullrev:
912 if self.canonical_parent_order and entry[5] == nullrev:
910 return entry[6], entry[5]
913 return entry[6], entry[5]
911 else:
914 else:
912 return entry[5], entry[6]
915 return entry[5], entry[6]
913
916
914 # fast parentrevs(rev) where rev isn't filtered
917 # fast parentrevs(rev) where rev isn't filtered
915 _uncheckedparentrevs = parentrevs
918 _uncheckedparentrevs = parentrevs
916
919
917 def node(self, rev):
920 def node(self, rev):
918 try:
921 try:
919 return self.index[rev][7]
922 return self.index[rev][7]
920 except IndexError:
923 except IndexError:
921 if rev == wdirrev:
924 if rev == wdirrev:
922 raise error.WdirUnsupported
925 raise error.WdirUnsupported
923 raise
926 raise
924
927
925 # Derived from index values.
928 # Derived from index values.
926
929
927 def end(self, rev):
930 def end(self, rev):
928 return self.start(rev) + self.length(rev)
931 return self.start(rev) + self.length(rev)
929
932
930 def parents(self, node):
933 def parents(self, node):
931 i = self.index
934 i = self.index
932 d = i[self.rev(node)]
935 d = i[self.rev(node)]
933 # inline node() to avoid function call overhead
936 # inline node() to avoid function call overhead
934 if self.canonical_parent_order and d[5] == self.nullid:
937 if self.canonical_parent_order and d[5] == self.nullid:
935 return i[d[6]][7], i[d[5]][7]
938 return i[d[6]][7], i[d[5]][7]
936 else:
939 else:
937 return i[d[5]][7], i[d[6]][7]
940 return i[d[5]][7], i[d[6]][7]
938
941
939 def chainlen(self, rev):
942 def chainlen(self, rev):
940 return self._chaininfo(rev)[0]
943 return self._chaininfo(rev)[0]
941
944
942 def _chaininfo(self, rev):
945 def _chaininfo(self, rev):
943 chaininfocache = self._chaininfocache
946 chaininfocache = self._chaininfocache
944 if rev in chaininfocache:
947 if rev in chaininfocache:
945 return chaininfocache[rev]
948 return chaininfocache[rev]
946 index = self.index
949 index = self.index
947 generaldelta = self._generaldelta
950 generaldelta = self._generaldelta
948 iterrev = rev
951 iterrev = rev
949 e = index[iterrev]
952 e = index[iterrev]
950 clen = 0
953 clen = 0
951 compresseddeltalen = 0
954 compresseddeltalen = 0
952 while iterrev != e[3]:
955 while iterrev != e[3]:
953 clen += 1
956 clen += 1
954 compresseddeltalen += e[1]
957 compresseddeltalen += e[1]
955 if generaldelta:
958 if generaldelta:
956 iterrev = e[3]
959 iterrev = e[3]
957 else:
960 else:
958 iterrev -= 1
961 iterrev -= 1
959 if iterrev in chaininfocache:
962 if iterrev in chaininfocache:
960 t = chaininfocache[iterrev]
963 t = chaininfocache[iterrev]
961 clen += t[0]
964 clen += t[0]
962 compresseddeltalen += t[1]
965 compresseddeltalen += t[1]
963 break
966 break
964 e = index[iterrev]
967 e = index[iterrev]
965 else:
968 else:
966 # Add text length of base since decompressing that also takes
969 # Add text length of base since decompressing that also takes
967 # work. For cache hits the length is already included.
970 # work. For cache hits the length is already included.
968 compresseddeltalen += e[1]
971 compresseddeltalen += e[1]
969 r = (clen, compresseddeltalen)
972 r = (clen, compresseddeltalen)
970 chaininfocache[rev] = r
973 chaininfocache[rev] = r
971 return r
974 return r
972
975
973 def _deltachain(self, rev, stoprev=None):
976 def _deltachain(self, rev, stoprev=None):
974 """Obtain the delta chain for a revision.
977 """Obtain the delta chain for a revision.
975
978
976 ``stoprev`` specifies a revision to stop at. If not specified, we
979 ``stoprev`` specifies a revision to stop at. If not specified, we
977 stop at the base of the chain.
980 stop at the base of the chain.
978
981
979 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
982 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
980 revs in ascending order and ``stopped`` is a bool indicating whether
983 revs in ascending order and ``stopped`` is a bool indicating whether
981 ``stoprev`` was hit.
984 ``stoprev`` was hit.
982 """
985 """
983 # Try C implementation.
986 # Try C implementation.
984 try:
987 try:
985 return self.index.deltachain(rev, stoprev, self._generaldelta)
988 return self.index.deltachain(rev, stoprev, self._generaldelta)
986 except AttributeError:
989 except AttributeError:
987 pass
990 pass
988
991
989 chain = []
992 chain = []
990
993
991 # Alias to prevent attribute lookup in tight loop.
994 # Alias to prevent attribute lookup in tight loop.
992 index = self.index
995 index = self.index
993 generaldelta = self._generaldelta
996 generaldelta = self._generaldelta
994
997
995 iterrev = rev
998 iterrev = rev
996 e = index[iterrev]
999 e = index[iterrev]
997 while iterrev != e[3] and iterrev != stoprev:
1000 while iterrev != e[3] and iterrev != stoprev:
998 chain.append(iterrev)
1001 chain.append(iterrev)
999 if generaldelta:
1002 if generaldelta:
1000 iterrev = e[3]
1003 iterrev = e[3]
1001 else:
1004 else:
1002 iterrev -= 1
1005 iterrev -= 1
1003 e = index[iterrev]
1006 e = index[iterrev]
1004
1007
1005 if iterrev == stoprev:
1008 if iterrev == stoprev:
1006 stopped = True
1009 stopped = True
1007 else:
1010 else:
1008 chain.append(iterrev)
1011 chain.append(iterrev)
1009 stopped = False
1012 stopped = False
1010
1013
1011 chain.reverse()
1014 chain.reverse()
1012 return chain, stopped
1015 return chain, stopped
1013
1016
1014 def ancestors(self, revs, stoprev=0, inclusive=False):
1017 def ancestors(self, revs, stoprev=0, inclusive=False):
1015 """Generate the ancestors of 'revs' in reverse revision order.
1018 """Generate the ancestors of 'revs' in reverse revision order.
1016 Does not generate revs lower than stoprev.
1019 Does not generate revs lower than stoprev.
1017
1020
1018 See the documentation for ancestor.lazyancestors for more details."""
1021 See the documentation for ancestor.lazyancestors for more details."""
1019
1022
1020 # first, make sure start revisions aren't filtered
1023 # first, make sure start revisions aren't filtered
1021 revs = list(revs)
1024 revs = list(revs)
1022 checkrev = self.node
1025 checkrev = self.node
1023 for r in revs:
1026 for r in revs:
1024 checkrev(r)
1027 checkrev(r)
1025 # and we're sure ancestors aren't filtered as well
1028 # and we're sure ancestors aren't filtered as well
1026
1029
1027 if rustancestor is not None and self.index.rust_ext_compat:
1030 if rustancestor is not None and self.index.rust_ext_compat:
1028 lazyancestors = rustancestor.LazyAncestors
1031 lazyancestors = rustancestor.LazyAncestors
1029 arg = self.index
1032 arg = self.index
1030 else:
1033 else:
1031 lazyancestors = ancestor.lazyancestors
1034 lazyancestors = ancestor.lazyancestors
1032 arg = self._uncheckedparentrevs
1035 arg = self._uncheckedparentrevs
1033 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1036 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1034
1037
1035 def descendants(self, revs):
1038 def descendants(self, revs):
1036 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1039 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1037
1040
1038 def findcommonmissing(self, common=None, heads=None):
1041 def findcommonmissing(self, common=None, heads=None):
1039 """Return a tuple of the ancestors of common and the ancestors of heads
1042 """Return a tuple of the ancestors of common and the ancestors of heads
1040 that are not ancestors of common. In revset terminology, we return the
1043 that are not ancestors of common. In revset terminology, we return the
1041 tuple:
1044 tuple:
1042
1045
1043 ::common, (::heads) - (::common)
1046 ::common, (::heads) - (::common)
1044
1047
1045 The list is sorted by revision number, meaning it is
1048 The list is sorted by revision number, meaning it is
1046 topologically sorted.
1049 topologically sorted.
1047
1050
1048 'heads' and 'common' are both lists of node IDs. If heads is
1051 'heads' and 'common' are both lists of node IDs. If heads is
1049 not supplied, uses all of the revlog's heads. If common is not
1052 not supplied, uses all of the revlog's heads. If common is not
1050 supplied, uses nullid."""
1053 supplied, uses nullid."""
1051 if common is None:
1054 if common is None:
1052 common = [self.nullid]
1055 common = [self.nullid]
1053 if heads is None:
1056 if heads is None:
1054 heads = self.heads()
1057 heads = self.heads()
1055
1058
1056 common = [self.rev(n) for n in common]
1059 common = [self.rev(n) for n in common]
1057 heads = [self.rev(n) for n in heads]
1060 heads = [self.rev(n) for n in heads]
1058
1061
1059 # we want the ancestors, but inclusive
1062 # we want the ancestors, but inclusive
1060 class lazyset:
1063 class lazyset:
1061 def __init__(self, lazyvalues):
1064 def __init__(self, lazyvalues):
1062 self.addedvalues = set()
1065 self.addedvalues = set()
1063 self.lazyvalues = lazyvalues
1066 self.lazyvalues = lazyvalues
1064
1067
1065 def __contains__(self, value):
1068 def __contains__(self, value):
1066 return value in self.addedvalues or value in self.lazyvalues
1069 return value in self.addedvalues or value in self.lazyvalues
1067
1070
1068 def __iter__(self):
1071 def __iter__(self):
1069 added = self.addedvalues
1072 added = self.addedvalues
1070 for r in added:
1073 for r in added:
1071 yield r
1074 yield r
1072 for r in self.lazyvalues:
1075 for r in self.lazyvalues:
1073 if not r in added:
1076 if not r in added:
1074 yield r
1077 yield r
1075
1078
1076 def add(self, value):
1079 def add(self, value):
1077 self.addedvalues.add(value)
1080 self.addedvalues.add(value)
1078
1081
1079 def update(self, values):
1082 def update(self, values):
1080 self.addedvalues.update(values)
1083 self.addedvalues.update(values)
1081
1084
1082 has = lazyset(self.ancestors(common))
1085 has = lazyset(self.ancestors(common))
1083 has.add(nullrev)
1086 has.add(nullrev)
1084 has.update(common)
1087 has.update(common)
1085
1088
1086 # take all ancestors from heads that aren't in has
1089 # take all ancestors from heads that aren't in has
1087 missing = set()
1090 missing = set()
1088 visit = collections.deque(r for r in heads if r not in has)
1091 visit = collections.deque(r for r in heads if r not in has)
1089 while visit:
1092 while visit:
1090 r = visit.popleft()
1093 r = visit.popleft()
1091 if r in missing:
1094 if r in missing:
1092 continue
1095 continue
1093 else:
1096 else:
1094 missing.add(r)
1097 missing.add(r)
1095 for p in self.parentrevs(r):
1098 for p in self.parentrevs(r):
1096 if p not in has:
1099 if p not in has:
1097 visit.append(p)
1100 visit.append(p)
1098 missing = list(missing)
1101 missing = list(missing)
1099 missing.sort()
1102 missing.sort()
1100 return has, [self.node(miss) for miss in missing]
1103 return has, [self.node(miss) for miss in missing]
1101
1104
1102 def incrementalmissingrevs(self, common=None):
1105 def incrementalmissingrevs(self, common=None):
1103 """Return an object that can be used to incrementally compute the
1106 """Return an object that can be used to incrementally compute the
1104 revision numbers of the ancestors of arbitrary sets that are not
1107 revision numbers of the ancestors of arbitrary sets that are not
1105 ancestors of common. This is an ancestor.incrementalmissingancestors
1108 ancestors of common. This is an ancestor.incrementalmissingancestors
1106 object.
1109 object.
1107
1110
1108 'common' is a list of revision numbers. If common is not supplied, uses
1111 'common' is a list of revision numbers. If common is not supplied, uses
1109 nullrev.
1112 nullrev.
1110 """
1113 """
1111 if common is None:
1114 if common is None:
1112 common = [nullrev]
1115 common = [nullrev]
1113
1116
1114 if rustancestor is not None and self.index.rust_ext_compat:
1117 if rustancestor is not None and self.index.rust_ext_compat:
1115 return rustancestor.MissingAncestors(self.index, common)
1118 return rustancestor.MissingAncestors(self.index, common)
1116 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1119 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1117
1120
1118 def findmissingrevs(self, common=None, heads=None):
1121 def findmissingrevs(self, common=None, heads=None):
1119 """Return the revision numbers of the ancestors of heads that
1122 """Return the revision numbers of the ancestors of heads that
1120 are not ancestors of common.
1123 are not ancestors of common.
1121
1124
1122 More specifically, return a list of revision numbers corresponding to
1125 More specifically, return a list of revision numbers corresponding to
1123 nodes N such that every N satisfies the following constraints:
1126 nodes N such that every N satisfies the following constraints:
1124
1127
1125 1. N is an ancestor of some node in 'heads'
1128 1. N is an ancestor of some node in 'heads'
1126 2. N is not an ancestor of any node in 'common'
1129 2. N is not an ancestor of any node in 'common'
1127
1130
1128 The list is sorted by revision number, meaning it is
1131 The list is sorted by revision number, meaning it is
1129 topologically sorted.
1132 topologically sorted.
1130
1133
1131 'heads' and 'common' are both lists of revision numbers. If heads is
1134 'heads' and 'common' are both lists of revision numbers. If heads is
1132 not supplied, uses all of the revlog's heads. If common is not
1135 not supplied, uses all of the revlog's heads. If common is not
1133 supplied, uses nullid."""
1136 supplied, uses nullid."""
1134 if common is None:
1137 if common is None:
1135 common = [nullrev]
1138 common = [nullrev]
1136 if heads is None:
1139 if heads is None:
1137 heads = self.headrevs()
1140 heads = self.headrevs()
1138
1141
1139 inc = self.incrementalmissingrevs(common=common)
1142 inc = self.incrementalmissingrevs(common=common)
1140 return inc.missingancestors(heads)
1143 return inc.missingancestors(heads)
1141
1144
1142 def findmissing(self, common=None, heads=None):
1145 def findmissing(self, common=None, heads=None):
1143 """Return the ancestors of heads that are not ancestors of common.
1146 """Return the ancestors of heads that are not ancestors of common.
1144
1147
1145 More specifically, return a list of nodes N such that every N
1148 More specifically, return a list of nodes N such that every N
1146 satisfies the following constraints:
1149 satisfies the following constraints:
1147
1150
1148 1. N is an ancestor of some node in 'heads'
1151 1. N is an ancestor of some node in 'heads'
1149 2. N is not an ancestor of any node in 'common'
1152 2. N is not an ancestor of any node in 'common'
1150
1153
1151 The list is sorted by revision number, meaning it is
1154 The list is sorted by revision number, meaning it is
1152 topologically sorted.
1155 topologically sorted.
1153
1156
1154 'heads' and 'common' are both lists of node IDs. If heads is
1157 'heads' and 'common' are both lists of node IDs. If heads is
1155 not supplied, uses all of the revlog's heads. If common is not
1158 not supplied, uses all of the revlog's heads. If common is not
1156 supplied, uses nullid."""
1159 supplied, uses nullid."""
1157 if common is None:
1160 if common is None:
1158 common = [self.nullid]
1161 common = [self.nullid]
1159 if heads is None:
1162 if heads is None:
1160 heads = self.heads()
1163 heads = self.heads()
1161
1164
1162 common = [self.rev(n) for n in common]
1165 common = [self.rev(n) for n in common]
1163 heads = [self.rev(n) for n in heads]
1166 heads = [self.rev(n) for n in heads]
1164
1167
1165 inc = self.incrementalmissingrevs(common=common)
1168 inc = self.incrementalmissingrevs(common=common)
1166 return [self.node(r) for r in inc.missingancestors(heads)]
1169 return [self.node(r) for r in inc.missingancestors(heads)]
1167
1170
1168 def nodesbetween(self, roots=None, heads=None):
1171 def nodesbetween(self, roots=None, heads=None):
1169 """Return a topological path from 'roots' to 'heads'.
1172 """Return a topological path from 'roots' to 'heads'.
1170
1173
1171 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1174 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1172 topologically sorted list of all nodes N that satisfy both of
1175 topologically sorted list of all nodes N that satisfy both of
1173 these constraints:
1176 these constraints:
1174
1177
1175 1. N is a descendant of some node in 'roots'
1178 1. N is a descendant of some node in 'roots'
1176 2. N is an ancestor of some node in 'heads'
1179 2. N is an ancestor of some node in 'heads'
1177
1180
1178 Every node is considered to be both a descendant and an ancestor
1181 Every node is considered to be both a descendant and an ancestor
1179 of itself, so every reachable node in 'roots' and 'heads' will be
1182 of itself, so every reachable node in 'roots' and 'heads' will be
1180 included in 'nodes'.
1183 included in 'nodes'.
1181
1184
1182 'outroots' is the list of reachable nodes in 'roots', i.e., the
1185 'outroots' is the list of reachable nodes in 'roots', i.e., the
1183 subset of 'roots' that is returned in 'nodes'. Likewise,
1186 subset of 'roots' that is returned in 'nodes'. Likewise,
1184 'outheads' is the subset of 'heads' that is also in 'nodes'.
1187 'outheads' is the subset of 'heads' that is also in 'nodes'.
1185
1188
1186 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1189 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1187 unspecified, uses nullid as the only root. If 'heads' is
1190 unspecified, uses nullid as the only root. If 'heads' is
1188 unspecified, uses list of all of the revlog's heads."""
1191 unspecified, uses list of all of the revlog's heads."""
1189 nonodes = ([], [], [])
1192 nonodes = ([], [], [])
1190 if roots is not None:
1193 if roots is not None:
1191 roots = list(roots)
1194 roots = list(roots)
1192 if not roots:
1195 if not roots:
1193 return nonodes
1196 return nonodes
1194 lowestrev = min([self.rev(n) for n in roots])
1197 lowestrev = min([self.rev(n) for n in roots])
1195 else:
1198 else:
1196 roots = [self.nullid] # Everybody's a descendant of nullid
1199 roots = [self.nullid] # Everybody's a descendant of nullid
1197 lowestrev = nullrev
1200 lowestrev = nullrev
1198 if (lowestrev == nullrev) and (heads is None):
1201 if (lowestrev == nullrev) and (heads is None):
1199 # We want _all_ the nodes!
1202 # We want _all_ the nodes!
1200 return (
1203 return (
1201 [self.node(r) for r in self],
1204 [self.node(r) for r in self],
1202 [self.nullid],
1205 [self.nullid],
1203 list(self.heads()),
1206 list(self.heads()),
1204 )
1207 )
1205 if heads is None:
1208 if heads is None:
1206 # All nodes are ancestors, so the latest ancestor is the last
1209 # All nodes are ancestors, so the latest ancestor is the last
1207 # node.
1210 # node.
1208 highestrev = len(self) - 1
1211 highestrev = len(self) - 1
1209 # Set ancestors to None to signal that every node is an ancestor.
1212 # Set ancestors to None to signal that every node is an ancestor.
1210 ancestors = None
1213 ancestors = None
1211 # Set heads to an empty dictionary for later discovery of heads
1214 # Set heads to an empty dictionary for later discovery of heads
1212 heads = {}
1215 heads = {}
1213 else:
1216 else:
1214 heads = list(heads)
1217 heads = list(heads)
1215 if not heads:
1218 if not heads:
1216 return nonodes
1219 return nonodes
1217 ancestors = set()
1220 ancestors = set()
1218 # Turn heads into a dictionary so we can remove 'fake' heads.
1221 # Turn heads into a dictionary so we can remove 'fake' heads.
1219 # Also, later we will be using it to filter out the heads we can't
1222 # Also, later we will be using it to filter out the heads we can't
1220 # find from roots.
1223 # find from roots.
1221 heads = dict.fromkeys(heads, False)
1224 heads = dict.fromkeys(heads, False)
1222 # Start at the top and keep marking parents until we're done.
1225 # Start at the top and keep marking parents until we're done.
1223 nodestotag = set(heads)
1226 nodestotag = set(heads)
1224 # Remember where the top was so we can use it as a limit later.
1227 # Remember where the top was so we can use it as a limit later.
1225 highestrev = max([self.rev(n) for n in nodestotag])
1228 highestrev = max([self.rev(n) for n in nodestotag])
1226 while nodestotag:
1229 while nodestotag:
1227 # grab a node to tag
1230 # grab a node to tag
1228 n = nodestotag.pop()
1231 n = nodestotag.pop()
1229 # Never tag nullid
1232 # Never tag nullid
1230 if n == self.nullid:
1233 if n == self.nullid:
1231 continue
1234 continue
1232 # A node's revision number represents its place in a
1235 # A node's revision number represents its place in a
1233 # topologically sorted list of nodes.
1236 # topologically sorted list of nodes.
1234 r = self.rev(n)
1237 r = self.rev(n)
1235 if r >= lowestrev:
1238 if r >= lowestrev:
1236 if n not in ancestors:
1239 if n not in ancestors:
1237 # If we are possibly a descendant of one of the roots
1240 # If we are possibly a descendant of one of the roots
1238 # and we haven't already been marked as an ancestor
1241 # and we haven't already been marked as an ancestor
1239 ancestors.add(n) # Mark as ancestor
1242 ancestors.add(n) # Mark as ancestor
1240 # Add non-nullid parents to list of nodes to tag.
1243 # Add non-nullid parents to list of nodes to tag.
1241 nodestotag.update(
1244 nodestotag.update(
1242 [p for p in self.parents(n) if p != self.nullid]
1245 [p for p in self.parents(n) if p != self.nullid]
1243 )
1246 )
1244 elif n in heads: # We've seen it before, is it a fake head?
1247 elif n in heads: # We've seen it before, is it a fake head?
1245 # So it is, real heads should not be the ancestors of
1248 # So it is, real heads should not be the ancestors of
1246 # any other heads.
1249 # any other heads.
1247 heads.pop(n)
1250 heads.pop(n)
1248 if not ancestors:
1251 if not ancestors:
1249 return nonodes
1252 return nonodes
1250 # Now that we have our set of ancestors, we want to remove any
1253 # Now that we have our set of ancestors, we want to remove any
1251 # roots that are not ancestors.
1254 # roots that are not ancestors.
1252
1255
1253 # If one of the roots was nullid, everything is included anyway.
1256 # If one of the roots was nullid, everything is included anyway.
1254 if lowestrev > nullrev:
1257 if lowestrev > nullrev:
1255 # But, since we weren't, let's recompute the lowest rev to not
1258 # But, since we weren't, let's recompute the lowest rev to not
1256 # include roots that aren't ancestors.
1259 # include roots that aren't ancestors.
1257
1260
1258 # Filter out roots that aren't ancestors of heads
1261 # Filter out roots that aren't ancestors of heads
1259 roots = [root for root in roots if root in ancestors]
1262 roots = [root for root in roots if root in ancestors]
1260 # Recompute the lowest revision
1263 # Recompute the lowest revision
1261 if roots:
1264 if roots:
1262 lowestrev = min([self.rev(root) for root in roots])
1265 lowestrev = min([self.rev(root) for root in roots])
1263 else:
1266 else:
1264 # No more roots? Return empty list
1267 # No more roots? Return empty list
1265 return nonodes
1268 return nonodes
1266 else:
1269 else:
1267 # We are descending from nullid, and don't need to care about
1270 # We are descending from nullid, and don't need to care about
1268 # any other roots.
1271 # any other roots.
1269 lowestrev = nullrev
1272 lowestrev = nullrev
1270 roots = [self.nullid]
1273 roots = [self.nullid]
1271 # Transform our roots list into a set.
1274 # Transform our roots list into a set.
1272 descendants = set(roots)
1275 descendants = set(roots)
1273 # Also, keep the original roots so we can filter out roots that aren't
1276 # Also, keep the original roots so we can filter out roots that aren't
1274 # 'real' roots (i.e. are descended from other roots).
1277 # 'real' roots (i.e. are descended from other roots).
1275 roots = descendants.copy()
1278 roots = descendants.copy()
1276 # Our topologically sorted list of output nodes.
1279 # Our topologically sorted list of output nodes.
1277 orderedout = []
1280 orderedout = []
1278 # Don't start at nullid since we don't want nullid in our output list,
1281 # Don't start at nullid since we don't want nullid in our output list,
1279 # and if nullid shows up in descendants, empty parents will look like
1282 # and if nullid shows up in descendants, empty parents will look like
1280 # they're descendants.
1283 # they're descendants.
1281 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1284 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1282 n = self.node(r)
1285 n = self.node(r)
1283 isdescendant = False
1286 isdescendant = False
1284 if lowestrev == nullrev: # Everybody is a descendant of nullid
1287 if lowestrev == nullrev: # Everybody is a descendant of nullid
1285 isdescendant = True
1288 isdescendant = True
1286 elif n in descendants:
1289 elif n in descendants:
1287 # n is already a descendant
1290 # n is already a descendant
1288 isdescendant = True
1291 isdescendant = True
1289 # This check only needs to be done here because all the roots
1292 # This check only needs to be done here because all the roots
1290 # will start being marked is descendants before the loop.
1293 # will start being marked is descendants before the loop.
1291 if n in roots:
1294 if n in roots:
1292 # If n was a root, check if it's a 'real' root.
1295 # If n was a root, check if it's a 'real' root.
1293 p = tuple(self.parents(n))
1296 p = tuple(self.parents(n))
1294 # If any of its parents are descendants, it's not a root.
1297 # If any of its parents are descendants, it's not a root.
1295 if (p[0] in descendants) or (p[1] in descendants):
1298 if (p[0] in descendants) or (p[1] in descendants):
1296 roots.remove(n)
1299 roots.remove(n)
1297 else:
1300 else:
1298 p = tuple(self.parents(n))
1301 p = tuple(self.parents(n))
1299 # A node is a descendant if either of its parents are
1302 # A node is a descendant if either of its parents are
1300 # descendants. (We seeded the dependents list with the roots
1303 # descendants. (We seeded the dependents list with the roots
1301 # up there, remember?)
1304 # up there, remember?)
1302 if (p[0] in descendants) or (p[1] in descendants):
1305 if (p[0] in descendants) or (p[1] in descendants):
1303 descendants.add(n)
1306 descendants.add(n)
1304 isdescendant = True
1307 isdescendant = True
1305 if isdescendant and ((ancestors is None) or (n in ancestors)):
1308 if isdescendant and ((ancestors is None) or (n in ancestors)):
1306 # Only include nodes that are both descendants and ancestors.
1309 # Only include nodes that are both descendants and ancestors.
1307 orderedout.append(n)
1310 orderedout.append(n)
1308 if (ancestors is not None) and (n in heads):
1311 if (ancestors is not None) and (n in heads):
1309 # We're trying to figure out which heads are reachable
1312 # We're trying to figure out which heads are reachable
1310 # from roots.
1313 # from roots.
1311 # Mark this head as having been reached
1314 # Mark this head as having been reached
1312 heads[n] = True
1315 heads[n] = True
1313 elif ancestors is None:
1316 elif ancestors is None:
1314 # Otherwise, we're trying to discover the heads.
1317 # Otherwise, we're trying to discover the heads.
1315 # Assume this is a head because if it isn't, the next step
1318 # Assume this is a head because if it isn't, the next step
1316 # will eventually remove it.
1319 # will eventually remove it.
1317 heads[n] = True
1320 heads[n] = True
1318 # But, obviously its parents aren't.
1321 # But, obviously its parents aren't.
1319 for p in self.parents(n):
1322 for p in self.parents(n):
1320 heads.pop(p, None)
1323 heads.pop(p, None)
1321 heads = [head for head, flag in heads.items() if flag]
1324 heads = [head for head, flag in heads.items() if flag]
1322 roots = list(roots)
1325 roots = list(roots)
1323 assert orderedout
1326 assert orderedout
1324 assert roots
1327 assert roots
1325 assert heads
1328 assert heads
1326 return (orderedout, roots, heads)
1329 return (orderedout, roots, heads)
1327
1330
1328 def headrevs(self, revs=None):
1331 def headrevs(self, revs=None):
1329 if revs is None:
1332 if revs is None:
1330 try:
1333 try:
1331 return self.index.headrevs()
1334 return self.index.headrevs()
1332 except AttributeError:
1335 except AttributeError:
1333 return self._headrevs()
1336 return self._headrevs()
1334 if rustdagop is not None and self.index.rust_ext_compat:
1337 if rustdagop is not None and self.index.rust_ext_compat:
1335 return rustdagop.headrevs(self.index, revs)
1338 return rustdagop.headrevs(self.index, revs)
1336 return dagop.headrevs(revs, self._uncheckedparentrevs)
1339 return dagop.headrevs(revs, self._uncheckedparentrevs)
1337
1340
1338 def computephases(self, roots):
1341 def computephases(self, roots):
1339 return self.index.computephasesmapsets(roots)
1342 return self.index.computephasesmapsets(roots)
1340
1343
1341 def _headrevs(self):
1344 def _headrevs(self):
1342 count = len(self)
1345 count = len(self)
1343 if not count:
1346 if not count:
1344 return [nullrev]
1347 return [nullrev]
1345 # we won't iter over filtered rev so nobody is a head at start
1348 # we won't iter over filtered rev so nobody is a head at start
1346 ishead = [0] * (count + 1)
1349 ishead = [0] * (count + 1)
1347 index = self.index
1350 index = self.index
1348 for r in self:
1351 for r in self:
1349 ishead[r] = 1 # I may be an head
1352 ishead[r] = 1 # I may be an head
1350 e = index[r]
1353 e = index[r]
1351 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1354 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1352 return [r for r, val in enumerate(ishead) if val]
1355 return [r for r, val in enumerate(ishead) if val]
1353
1356
1354 def heads(self, start=None, stop=None):
1357 def heads(self, start=None, stop=None):
1355 """return the list of all nodes that have no children
1358 """return the list of all nodes that have no children
1356
1359
1357 if start is specified, only heads that are descendants of
1360 if start is specified, only heads that are descendants of
1358 start will be returned
1361 start will be returned
1359 if stop is specified, it will consider all the revs from stop
1362 if stop is specified, it will consider all the revs from stop
1360 as if they had no children
1363 as if they had no children
1361 """
1364 """
1362 if start is None and stop is None:
1365 if start is None and stop is None:
1363 if not len(self):
1366 if not len(self):
1364 return [self.nullid]
1367 return [self.nullid]
1365 return [self.node(r) for r in self.headrevs()]
1368 return [self.node(r) for r in self.headrevs()]
1366
1369
1367 if start is None:
1370 if start is None:
1368 start = nullrev
1371 start = nullrev
1369 else:
1372 else:
1370 start = self.rev(start)
1373 start = self.rev(start)
1371
1374
1372 stoprevs = {self.rev(n) for n in stop or []}
1375 stoprevs = {self.rev(n) for n in stop or []}
1373
1376
1374 revs = dagop.headrevssubset(
1377 revs = dagop.headrevssubset(
1375 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1378 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1376 )
1379 )
1377
1380
1378 return [self.node(rev) for rev in revs]
1381 return [self.node(rev) for rev in revs]
1379
1382
1380 def children(self, node):
1383 def children(self, node):
1381 """find the children of a given node"""
1384 """find the children of a given node"""
1382 c = []
1385 c = []
1383 p = self.rev(node)
1386 p = self.rev(node)
1384 for r in self.revs(start=p + 1):
1387 for r in self.revs(start=p + 1):
1385 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1388 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1386 if prevs:
1389 if prevs:
1387 for pr in prevs:
1390 for pr in prevs:
1388 if pr == p:
1391 if pr == p:
1389 c.append(self.node(r))
1392 c.append(self.node(r))
1390 elif p == nullrev:
1393 elif p == nullrev:
1391 c.append(self.node(r))
1394 c.append(self.node(r))
1392 return c
1395 return c
1393
1396
1394 def commonancestorsheads(self, a, b):
1397 def commonancestorsheads(self, a, b):
1395 """calculate all the heads of the common ancestors of nodes a and b"""
1398 """calculate all the heads of the common ancestors of nodes a and b"""
1396 a, b = self.rev(a), self.rev(b)
1399 a, b = self.rev(a), self.rev(b)
1397 ancs = self._commonancestorsheads(a, b)
1400 ancs = self._commonancestorsheads(a, b)
1398 return pycompat.maplist(self.node, ancs)
1401 return pycompat.maplist(self.node, ancs)
1399
1402
1400 def _commonancestorsheads(self, *revs):
1403 def _commonancestorsheads(self, *revs):
1401 """calculate all the heads of the common ancestors of revs"""
1404 """calculate all the heads of the common ancestors of revs"""
1402 try:
1405 try:
1403 ancs = self.index.commonancestorsheads(*revs)
1406 ancs = self.index.commonancestorsheads(*revs)
1404 except (AttributeError, OverflowError): # C implementation failed
1407 except (AttributeError, OverflowError): # C implementation failed
1405 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1408 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1406 return ancs
1409 return ancs
1407
1410
1408 def isancestor(self, a, b):
1411 def isancestor(self, a, b):
1409 """return True if node a is an ancestor of node b
1412 """return True if node a is an ancestor of node b
1410
1413
1411 A revision is considered an ancestor of itself."""
1414 A revision is considered an ancestor of itself."""
1412 a, b = self.rev(a), self.rev(b)
1415 a, b = self.rev(a), self.rev(b)
1413 return self.isancestorrev(a, b)
1416 return self.isancestorrev(a, b)
1414
1417
1415 def isancestorrev(self, a, b):
1418 def isancestorrev(self, a, b):
1416 """return True if revision a is an ancestor of revision b
1419 """return True if revision a is an ancestor of revision b
1417
1420
1418 A revision is considered an ancestor of itself.
1421 A revision is considered an ancestor of itself.
1419
1422
1420 The implementation of this is trivial but the use of
1423 The implementation of this is trivial but the use of
1421 reachableroots is not."""
1424 reachableroots is not."""
1422 if a == nullrev:
1425 if a == nullrev:
1423 return True
1426 return True
1424 elif a == b:
1427 elif a == b:
1425 return True
1428 return True
1426 elif a > b:
1429 elif a > b:
1427 return False
1430 return False
1428 return bool(self.reachableroots(a, [b], [a], includepath=False))
1431 return bool(self.reachableroots(a, [b], [a], includepath=False))
1429
1432
1430 def reachableroots(self, minroot, heads, roots, includepath=False):
1433 def reachableroots(self, minroot, heads, roots, includepath=False):
1431 """return (heads(::(<roots> and <roots>::<heads>)))
1434 """return (heads(::(<roots> and <roots>::<heads>)))
1432
1435
1433 If includepath is True, return (<roots>::<heads>)."""
1436 If includepath is True, return (<roots>::<heads>)."""
1434 try:
1437 try:
1435 return self.index.reachableroots2(
1438 return self.index.reachableroots2(
1436 minroot, heads, roots, includepath
1439 minroot, heads, roots, includepath
1437 )
1440 )
1438 except AttributeError:
1441 except AttributeError:
1439 return dagop._reachablerootspure(
1442 return dagop._reachablerootspure(
1440 self.parentrevs, minroot, roots, heads, includepath
1443 self.parentrevs, minroot, roots, heads, includepath
1441 )
1444 )
1442
1445
1443 def ancestor(self, a, b):
1446 def ancestor(self, a, b):
1444 """calculate the "best" common ancestor of nodes a and b"""
1447 """calculate the "best" common ancestor of nodes a and b"""
1445
1448
1446 a, b = self.rev(a), self.rev(b)
1449 a, b = self.rev(a), self.rev(b)
1447 try:
1450 try:
1448 ancs = self.index.ancestors(a, b)
1451 ancs = self.index.ancestors(a, b)
1449 except (AttributeError, OverflowError):
1452 except (AttributeError, OverflowError):
1450 ancs = ancestor.ancestors(self.parentrevs, a, b)
1453 ancs = ancestor.ancestors(self.parentrevs, a, b)
1451 if ancs:
1454 if ancs:
1452 # choose a consistent winner when there's a tie
1455 # choose a consistent winner when there's a tie
1453 return min(map(self.node, ancs))
1456 return min(map(self.node, ancs))
1454 return self.nullid
1457 return self.nullid
1455
1458
1456 def _match(self, id):
1459 def _match(self, id):
1457 if isinstance(id, int):
1460 if isinstance(id, int):
1458 # rev
1461 # rev
1459 return self.node(id)
1462 return self.node(id)
1460 if len(id) == self.nodeconstants.nodelen:
1463 if len(id) == self.nodeconstants.nodelen:
1461 # possibly a binary node
1464 # possibly a binary node
1462 # odds of a binary node being all hex in ASCII are 1 in 10**25
1465 # odds of a binary node being all hex in ASCII are 1 in 10**25
1463 try:
1466 try:
1464 node = id
1467 node = id
1465 self.rev(node) # quick search the index
1468 self.rev(node) # quick search the index
1466 return node
1469 return node
1467 except error.LookupError:
1470 except error.LookupError:
1468 pass # may be partial hex id
1471 pass # may be partial hex id
1469 try:
1472 try:
1470 # str(rev)
1473 # str(rev)
1471 rev = int(id)
1474 rev = int(id)
1472 if b"%d" % rev != id:
1475 if b"%d" % rev != id:
1473 raise ValueError
1476 raise ValueError
1474 if rev < 0:
1477 if rev < 0:
1475 rev = len(self) + rev
1478 rev = len(self) + rev
1476 if rev < 0 or rev >= len(self):
1479 if rev < 0 or rev >= len(self):
1477 raise ValueError
1480 raise ValueError
1478 return self.node(rev)
1481 return self.node(rev)
1479 except (ValueError, OverflowError):
1482 except (ValueError, OverflowError):
1480 pass
1483 pass
1481 if len(id) == 2 * self.nodeconstants.nodelen:
1484 if len(id) == 2 * self.nodeconstants.nodelen:
1482 try:
1485 try:
1483 # a full hex nodeid?
1486 # a full hex nodeid?
1484 node = bin(id)
1487 node = bin(id)
1485 self.rev(node)
1488 self.rev(node)
1486 return node
1489 return node
1487 except (TypeError, error.LookupError):
1490 except (TypeError, error.LookupError):
1488 pass
1491 pass
1489
1492
1490 def _partialmatch(self, id):
1493 def _partialmatch(self, id):
1491 # we don't care wdirfilenodeids as they should be always full hash
1494 # we don't care wdirfilenodeids as they should be always full hash
1492 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1495 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1493 ambiguous = False
1496 ambiguous = False
1494 try:
1497 try:
1495 partial = self.index.partialmatch(id)
1498 partial = self.index.partialmatch(id)
1496 if partial and self.hasnode(partial):
1499 if partial and self.hasnode(partial):
1497 if maybewdir:
1500 if maybewdir:
1498 # single 'ff...' match in radix tree, ambiguous with wdir
1501 # single 'ff...' match in radix tree, ambiguous with wdir
1499 ambiguous = True
1502 ambiguous = True
1500 else:
1503 else:
1501 return partial
1504 return partial
1502 elif maybewdir:
1505 elif maybewdir:
1503 # no 'ff...' match in radix tree, wdir identified
1506 # no 'ff...' match in radix tree, wdir identified
1504 raise error.WdirUnsupported
1507 raise error.WdirUnsupported
1505 else:
1508 else:
1506 return None
1509 return None
1507 except error.RevlogError:
1510 except error.RevlogError:
1508 # parsers.c radix tree lookup gave multiple matches
1511 # parsers.c radix tree lookup gave multiple matches
1509 # fast path: for unfiltered changelog, radix tree is accurate
1512 # fast path: for unfiltered changelog, radix tree is accurate
1510 if not getattr(self, 'filteredrevs', None):
1513 if not getattr(self, 'filteredrevs', None):
1511 ambiguous = True
1514 ambiguous = True
1512 # fall through to slow path that filters hidden revisions
1515 # fall through to slow path that filters hidden revisions
1513 except (AttributeError, ValueError):
1516 except (AttributeError, ValueError):
1514 # we are pure python, or key was too short to search radix tree
1517 # we are pure python, or key was too short to search radix tree
1515 pass
1518 pass
1516 if ambiguous:
1519 if ambiguous:
1517 raise error.AmbiguousPrefixLookupError(
1520 raise error.AmbiguousPrefixLookupError(
1518 id, self.display_id, _(b'ambiguous identifier')
1521 id, self.display_id, _(b'ambiguous identifier')
1519 )
1522 )
1520
1523
1521 if id in self._pcache:
1524 if id in self._pcache:
1522 return self._pcache[id]
1525 return self._pcache[id]
1523
1526
1524 if len(id) <= 40:
1527 if len(id) <= 40:
1525 try:
1528 try:
1526 # hex(node)[:...]
1529 # hex(node)[:...]
1527 l = len(id) // 2 # grab an even number of digits
1530 l = len(id) // 2 # grab an even number of digits
1528 prefix = bin(id[: l * 2])
1531 prefix = bin(id[: l * 2])
1529 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1532 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1530 nl = [
1533 nl = [
1531 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1534 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1532 ]
1535 ]
1533 if self.nodeconstants.nullhex.startswith(id):
1536 if self.nodeconstants.nullhex.startswith(id):
1534 nl.append(self.nullid)
1537 nl.append(self.nullid)
1535 if len(nl) > 0:
1538 if len(nl) > 0:
1536 if len(nl) == 1 and not maybewdir:
1539 if len(nl) == 1 and not maybewdir:
1537 self._pcache[id] = nl[0]
1540 self._pcache[id] = nl[0]
1538 return nl[0]
1541 return nl[0]
1539 raise error.AmbiguousPrefixLookupError(
1542 raise error.AmbiguousPrefixLookupError(
1540 id, self.display_id, _(b'ambiguous identifier')
1543 id, self.display_id, _(b'ambiguous identifier')
1541 )
1544 )
1542 if maybewdir:
1545 if maybewdir:
1543 raise error.WdirUnsupported
1546 raise error.WdirUnsupported
1544 return None
1547 return None
1545 except TypeError:
1548 except TypeError:
1546 pass
1549 pass
1547
1550
1548 def lookup(self, id):
1551 def lookup(self, id):
1549 """locate a node based on:
1552 """locate a node based on:
1550 - revision number or str(revision number)
1553 - revision number or str(revision number)
1551 - nodeid or subset of hex nodeid
1554 - nodeid or subset of hex nodeid
1552 """
1555 """
1553 n = self._match(id)
1556 n = self._match(id)
1554 if n is not None:
1557 if n is not None:
1555 return n
1558 return n
1556 n = self._partialmatch(id)
1559 n = self._partialmatch(id)
1557 if n:
1560 if n:
1558 return n
1561 return n
1559
1562
1560 raise error.LookupError(id, self.display_id, _(b'no match found'))
1563 raise error.LookupError(id, self.display_id, _(b'no match found'))
1561
1564
1562 def shortest(self, node, minlength=1):
1565 def shortest(self, node, minlength=1):
1563 """Find the shortest unambiguous prefix that matches node."""
1566 """Find the shortest unambiguous prefix that matches node."""
1564
1567
1565 def isvalid(prefix):
1568 def isvalid(prefix):
1566 try:
1569 try:
1567 matchednode = self._partialmatch(prefix)
1570 matchednode = self._partialmatch(prefix)
1568 except error.AmbiguousPrefixLookupError:
1571 except error.AmbiguousPrefixLookupError:
1569 return False
1572 return False
1570 except error.WdirUnsupported:
1573 except error.WdirUnsupported:
1571 # single 'ff...' match
1574 # single 'ff...' match
1572 return True
1575 return True
1573 if matchednode is None:
1576 if matchednode is None:
1574 raise error.LookupError(node, self.display_id, _(b'no node'))
1577 raise error.LookupError(node, self.display_id, _(b'no node'))
1575 return True
1578 return True
1576
1579
1577 def maybewdir(prefix):
1580 def maybewdir(prefix):
1578 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1581 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1579
1582
1580 hexnode = hex(node)
1583 hexnode = hex(node)
1581
1584
1582 def disambiguate(hexnode, minlength):
1585 def disambiguate(hexnode, minlength):
1583 """Disambiguate against wdirid."""
1586 """Disambiguate against wdirid."""
1584 for length in range(minlength, len(hexnode) + 1):
1587 for length in range(minlength, len(hexnode) + 1):
1585 prefix = hexnode[:length]
1588 prefix = hexnode[:length]
1586 if not maybewdir(prefix):
1589 if not maybewdir(prefix):
1587 return prefix
1590 return prefix
1588
1591
1589 if not getattr(self, 'filteredrevs', None):
1592 if not getattr(self, 'filteredrevs', None):
1590 try:
1593 try:
1591 length = max(self.index.shortest(node), minlength)
1594 length = max(self.index.shortest(node), minlength)
1592 return disambiguate(hexnode, length)
1595 return disambiguate(hexnode, length)
1593 except error.RevlogError:
1596 except error.RevlogError:
1594 if node != self.nodeconstants.wdirid:
1597 if node != self.nodeconstants.wdirid:
1595 raise error.LookupError(
1598 raise error.LookupError(
1596 node, self.display_id, _(b'no node')
1599 node, self.display_id, _(b'no node')
1597 )
1600 )
1598 except AttributeError:
1601 except AttributeError:
1599 # Fall through to pure code
1602 # Fall through to pure code
1600 pass
1603 pass
1601
1604
1602 if node == self.nodeconstants.wdirid:
1605 if node == self.nodeconstants.wdirid:
1603 for length in range(minlength, len(hexnode) + 1):
1606 for length in range(minlength, len(hexnode) + 1):
1604 prefix = hexnode[:length]
1607 prefix = hexnode[:length]
1605 if isvalid(prefix):
1608 if isvalid(prefix):
1606 return prefix
1609 return prefix
1607
1610
1608 for length in range(minlength, len(hexnode) + 1):
1611 for length in range(minlength, len(hexnode) + 1):
1609 prefix = hexnode[:length]
1612 prefix = hexnode[:length]
1610 if isvalid(prefix):
1613 if isvalid(prefix):
1611 return disambiguate(hexnode, length)
1614 return disambiguate(hexnode, length)
1612
1615
1613 def cmp(self, node, text):
1616 def cmp(self, node, text):
1614 """compare text with a given file revision
1617 """compare text with a given file revision
1615
1618
1616 returns True if text is different than what is stored.
1619 returns True if text is different than what is stored.
1617 """
1620 """
1618 p1, p2 = self.parents(node)
1621 p1, p2 = self.parents(node)
1619 return storageutil.hashrevisionsha1(text, p1, p2) != node
1622 return storageutil.hashrevisionsha1(text, p1, p2) != node
1620
1623
1621 def _getsegmentforrevs(self, startrev, endrev, df=None):
1624 def _getsegmentforrevs(self, startrev, endrev, df=None):
1622 """Obtain a segment of raw data corresponding to a range of revisions.
1625 """Obtain a segment of raw data corresponding to a range of revisions.
1623
1626
1624 Accepts the start and end revisions and an optional already-open
1627 Accepts the start and end revisions and an optional already-open
1625 file handle to be used for reading. If the file handle is read, its
1628 file handle to be used for reading. If the file handle is read, its
1626 seek position will not be preserved.
1629 seek position will not be preserved.
1627
1630
1628 Requests for data may be satisfied by a cache.
1631 Requests for data may be satisfied by a cache.
1629
1632
1630 Returns a 2-tuple of (offset, data) for the requested range of
1633 Returns a 2-tuple of (offset, data) for the requested range of
1631 revisions. Offset is the integer offset from the beginning of the
1634 revisions. Offset is the integer offset from the beginning of the
1632 revlog and data is a str or buffer of the raw byte data.
1635 revlog and data is a str or buffer of the raw byte data.
1633
1636
1634 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1637 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1635 to determine where each revision's data begins and ends.
1638 to determine where each revision's data begins and ends.
1636 """
1639 """
1637 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1640 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1638 # (functions are expensive).
1641 # (functions are expensive).
1639 index = self.index
1642 index = self.index
1640 istart = index[startrev]
1643 istart = index[startrev]
1641 start = int(istart[0] >> 16)
1644 start = int(istart[0] >> 16)
1642 if startrev == endrev:
1645 if startrev == endrev:
1643 end = start + istart[1]
1646 end = start + istart[1]
1644 else:
1647 else:
1645 iend = index[endrev]
1648 iend = index[endrev]
1646 end = int(iend[0] >> 16) + iend[1]
1649 end = int(iend[0] >> 16) + iend[1]
1647
1650
1648 if self._inline:
1651 if self._inline:
1649 start += (startrev + 1) * self.index.entry_size
1652 start += (startrev + 1) * self.index.entry_size
1650 end += (endrev + 1) * self.index.entry_size
1653 end += (endrev + 1) * self.index.entry_size
1651 length = end - start
1654 length = end - start
1652
1655
1653 return start, self._segmentfile.read_chunk(start, length, df)
1656 return start, self._segmentfile.read_chunk(start, length, df)
1654
1657
1655 def _chunk(self, rev, df=None):
1658 def _chunk(self, rev, df=None):
1656 """Obtain a single decompressed chunk for a revision.
1659 """Obtain a single decompressed chunk for a revision.
1657
1660
1658 Accepts an integer revision and an optional already-open file handle
1661 Accepts an integer revision and an optional already-open file handle
1659 to be used for reading. If used, the seek position of the file will not
1662 to be used for reading. If used, the seek position of the file will not
1660 be preserved.
1663 be preserved.
1661
1664
1662 Returns a str holding uncompressed data for the requested revision.
1665 Returns a str holding uncompressed data for the requested revision.
1663 """
1666 """
1664 compression_mode = self.index[rev][10]
1667 compression_mode = self.index[rev][10]
1665 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1668 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1666 if compression_mode == COMP_MODE_PLAIN:
1669 if compression_mode == COMP_MODE_PLAIN:
1667 return data
1670 return data
1668 elif compression_mode == COMP_MODE_DEFAULT:
1671 elif compression_mode == COMP_MODE_DEFAULT:
1669 return self._decompressor(data)
1672 return self._decompressor(data)
1670 elif compression_mode == COMP_MODE_INLINE:
1673 elif compression_mode == COMP_MODE_INLINE:
1671 return self.decompress(data)
1674 return self.decompress(data)
1672 else:
1675 else:
1673 msg = b'unknown compression mode %d'
1676 msg = b'unknown compression mode %d'
1674 msg %= compression_mode
1677 msg %= compression_mode
1675 raise error.RevlogError(msg)
1678 raise error.RevlogError(msg)
1676
1679
1677 def _chunks(self, revs, df=None, targetsize=None):
1680 def _chunks(self, revs, df=None, targetsize=None):
1678 """Obtain decompressed chunks for the specified revisions.
1681 """Obtain decompressed chunks for the specified revisions.
1679
1682
1680 Accepts an iterable of numeric revisions that are assumed to be in
1683 Accepts an iterable of numeric revisions that are assumed to be in
1681 ascending order. Also accepts an optional already-open file handle
1684 ascending order. Also accepts an optional already-open file handle
1682 to be used for reading. If used, the seek position of the file will
1685 to be used for reading. If used, the seek position of the file will
1683 not be preserved.
1686 not be preserved.
1684
1687
1685 This function is similar to calling ``self._chunk()`` multiple times,
1688 This function is similar to calling ``self._chunk()`` multiple times,
1686 but is faster.
1689 but is faster.
1687
1690
1688 Returns a list with decompressed data for each requested revision.
1691 Returns a list with decompressed data for each requested revision.
1689 """
1692 """
1690 if not revs:
1693 if not revs:
1691 return []
1694 return []
1692 start = self.start
1695 start = self.start
1693 length = self.length
1696 length = self.length
1694 inline = self._inline
1697 inline = self._inline
1695 iosize = self.index.entry_size
1698 iosize = self.index.entry_size
1696 buffer = util.buffer
1699 buffer = util.buffer
1697
1700
1698 l = []
1701 l = []
1699 ladd = l.append
1702 ladd = l.append
1700
1703
1701 if not self._withsparseread:
1704 if not self._withsparseread:
1702 slicedchunks = (revs,)
1705 slicedchunks = (revs,)
1703 else:
1706 else:
1704 slicedchunks = deltautil.slicechunk(
1707 slicedchunks = deltautil.slicechunk(
1705 self, revs, targetsize=targetsize
1708 self, revs, targetsize=targetsize
1706 )
1709 )
1707
1710
1708 for revschunk in slicedchunks:
1711 for revschunk in slicedchunks:
1709 firstrev = revschunk[0]
1712 firstrev = revschunk[0]
1710 # Skip trailing revisions with empty diff
1713 # Skip trailing revisions with empty diff
1711 for lastrev in revschunk[::-1]:
1714 for lastrev in revschunk[::-1]:
1712 if length(lastrev) != 0:
1715 if length(lastrev) != 0:
1713 break
1716 break
1714
1717
1715 try:
1718 try:
1716 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1719 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1717 except OverflowError:
1720 except OverflowError:
1718 # issue4215 - we can't cache a run of chunks greater than
1721 # issue4215 - we can't cache a run of chunks greater than
1719 # 2G on Windows
1722 # 2G on Windows
1720 return [self._chunk(rev, df=df) for rev in revschunk]
1723 return [self._chunk(rev, df=df) for rev in revschunk]
1721
1724
1722 decomp = self.decompress
1725 decomp = self.decompress
1723 # self._decompressor might be None, but will not be used in that case
1726 # self._decompressor might be None, but will not be used in that case
1724 def_decomp = self._decompressor
1727 def_decomp = self._decompressor
1725 for rev in revschunk:
1728 for rev in revschunk:
1726 chunkstart = start(rev)
1729 chunkstart = start(rev)
1727 if inline:
1730 if inline:
1728 chunkstart += (rev + 1) * iosize
1731 chunkstart += (rev + 1) * iosize
1729 chunklength = length(rev)
1732 chunklength = length(rev)
1730 comp_mode = self.index[rev][10]
1733 comp_mode = self.index[rev][10]
1731 c = buffer(data, chunkstart - offset, chunklength)
1734 c = buffer(data, chunkstart - offset, chunklength)
1732 if comp_mode == COMP_MODE_PLAIN:
1735 if comp_mode == COMP_MODE_PLAIN:
1733 ladd(c)
1736 ladd(c)
1734 elif comp_mode == COMP_MODE_INLINE:
1737 elif comp_mode == COMP_MODE_INLINE:
1735 ladd(decomp(c))
1738 ladd(decomp(c))
1736 elif comp_mode == COMP_MODE_DEFAULT:
1739 elif comp_mode == COMP_MODE_DEFAULT:
1737 ladd(def_decomp(c))
1740 ladd(def_decomp(c))
1738 else:
1741 else:
1739 msg = b'unknown compression mode %d'
1742 msg = b'unknown compression mode %d'
1740 msg %= comp_mode
1743 msg %= comp_mode
1741 raise error.RevlogError(msg)
1744 raise error.RevlogError(msg)
1742
1745
1743 return l
1746 return l
1744
1747
1745 def deltaparent(self, rev):
1748 def deltaparent(self, rev):
1746 """return deltaparent of the given revision"""
1749 """return deltaparent of the given revision"""
1747 base = self.index[rev][3]
1750 base = self.index[rev][3]
1748 if base == rev:
1751 if base == rev:
1749 return nullrev
1752 return nullrev
1750 elif self._generaldelta:
1753 elif self._generaldelta:
1751 return base
1754 return base
1752 else:
1755 else:
1753 return rev - 1
1756 return rev - 1
1754
1757
1755 def issnapshot(self, rev):
1758 def issnapshot(self, rev):
1756 """tells whether rev is a snapshot"""
1759 """tells whether rev is a snapshot"""
1757 if not self._sparserevlog:
1760 if not self._sparserevlog:
1758 return self.deltaparent(rev) == nullrev
1761 return self.deltaparent(rev) == nullrev
1759 elif util.safehasattr(self.index, b'issnapshot'):
1762 elif util.safehasattr(self.index, b'issnapshot'):
1760 # directly assign the method to cache the testing and access
1763 # directly assign the method to cache the testing and access
1761 self.issnapshot = self.index.issnapshot
1764 self.issnapshot = self.index.issnapshot
1762 return self.issnapshot(rev)
1765 return self.issnapshot(rev)
1763 if rev == nullrev:
1766 if rev == nullrev:
1764 return True
1767 return True
1765 entry = self.index[rev]
1768 entry = self.index[rev]
1766 base = entry[3]
1769 base = entry[3]
1767 if base == rev:
1770 if base == rev:
1768 return True
1771 return True
1769 if base == nullrev:
1772 if base == nullrev:
1770 return True
1773 return True
1771 p1 = entry[5]
1774 p1 = entry[5]
1772 p2 = entry[6]
1775 p2 = entry[6]
1773 if base == p1 or base == p2:
1776 if base == p1 or base == p2:
1774 return False
1777 return False
1775 return self.issnapshot(base)
1778 return self.issnapshot(base)
1776
1779
1777 def snapshotdepth(self, rev):
1780 def snapshotdepth(self, rev):
1778 """number of snapshot in the chain before this one"""
1781 """number of snapshot in the chain before this one"""
1779 if not self.issnapshot(rev):
1782 if not self.issnapshot(rev):
1780 raise error.ProgrammingError(b'revision %d not a snapshot')
1783 raise error.ProgrammingError(b'revision %d not a snapshot')
1781 return len(self._deltachain(rev)[0]) - 1
1784 return len(self._deltachain(rev)[0]) - 1
1782
1785
1783 def revdiff(self, rev1, rev2):
1786 def revdiff(self, rev1, rev2):
1784 """return or calculate a delta between two revisions
1787 """return or calculate a delta between two revisions
1785
1788
1786 The delta calculated is in binary form and is intended to be written to
1789 The delta calculated is in binary form and is intended to be written to
1787 revlog data directly. So this function needs raw revision data.
1790 revlog data directly. So this function needs raw revision data.
1788 """
1791 """
1789 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1792 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1790 return bytes(self._chunk(rev2))
1793 return bytes(self._chunk(rev2))
1791
1794
1792 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1795 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1793
1796
1794 def revision(self, nodeorrev, _df=None):
1797 def revision(self, nodeorrev, _df=None):
1795 """return an uncompressed revision of a given node or revision
1798 """return an uncompressed revision of a given node or revision
1796 number.
1799 number.
1797
1800
1798 _df - an existing file handle to read from. (internal-only)
1801 _df - an existing file handle to read from. (internal-only)
1799 """
1802 """
1800 return self._revisiondata(nodeorrev, _df)
1803 return self._revisiondata(nodeorrev, _df)
1801
1804
1802 def sidedata(self, nodeorrev, _df=None):
1805 def sidedata(self, nodeorrev, _df=None):
1803 """a map of extra data related to the changeset but not part of the hash
1806 """a map of extra data related to the changeset but not part of the hash
1804
1807
1805 This function currently return a dictionary. However, more advanced
1808 This function currently return a dictionary. However, more advanced
1806 mapping object will likely be used in the future for a more
1809 mapping object will likely be used in the future for a more
1807 efficient/lazy code.
1810 efficient/lazy code.
1808 """
1811 """
1809 # deal with <nodeorrev> argument type
1812 # deal with <nodeorrev> argument type
1810 if isinstance(nodeorrev, int):
1813 if isinstance(nodeorrev, int):
1811 rev = nodeorrev
1814 rev = nodeorrev
1812 else:
1815 else:
1813 rev = self.rev(nodeorrev)
1816 rev = self.rev(nodeorrev)
1814 return self._sidedata(rev)
1817 return self._sidedata(rev)
1815
1818
1816 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1819 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1817 # deal with <nodeorrev> argument type
1820 # deal with <nodeorrev> argument type
1818 if isinstance(nodeorrev, int):
1821 if isinstance(nodeorrev, int):
1819 rev = nodeorrev
1822 rev = nodeorrev
1820 node = self.node(rev)
1823 node = self.node(rev)
1821 else:
1824 else:
1822 node = nodeorrev
1825 node = nodeorrev
1823 rev = None
1826 rev = None
1824
1827
1825 # fast path the special `nullid` rev
1828 # fast path the special `nullid` rev
1826 if node == self.nullid:
1829 if node == self.nullid:
1827 return b""
1830 return b""
1828
1831
1829 # ``rawtext`` is the text as stored inside the revlog. Might be the
1832 # ``rawtext`` is the text as stored inside the revlog. Might be the
1830 # revision or might need to be processed to retrieve the revision.
1833 # revision or might need to be processed to retrieve the revision.
1831 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1834 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1832
1835
1833 if raw and validated:
1836 if raw and validated:
1834 # if we don't want to process the raw text and that raw
1837 # if we don't want to process the raw text and that raw
1835 # text is cached, we can exit early.
1838 # text is cached, we can exit early.
1836 return rawtext
1839 return rawtext
1837 if rev is None:
1840 if rev is None:
1838 rev = self.rev(node)
1841 rev = self.rev(node)
1839 # the revlog's flag for this revision
1842 # the revlog's flag for this revision
1840 # (usually alter its state or content)
1843 # (usually alter its state or content)
1841 flags = self.flags(rev)
1844 flags = self.flags(rev)
1842
1845
1843 if validated and flags == REVIDX_DEFAULT_FLAGS:
1846 if validated and flags == REVIDX_DEFAULT_FLAGS:
1844 # no extra flags set, no flag processor runs, text = rawtext
1847 # no extra flags set, no flag processor runs, text = rawtext
1845 return rawtext
1848 return rawtext
1846
1849
1847 if raw:
1850 if raw:
1848 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1851 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1849 text = rawtext
1852 text = rawtext
1850 else:
1853 else:
1851 r = flagutil.processflagsread(self, rawtext, flags)
1854 r = flagutil.processflagsread(self, rawtext, flags)
1852 text, validatehash = r
1855 text, validatehash = r
1853 if validatehash:
1856 if validatehash:
1854 self.checkhash(text, node, rev=rev)
1857 self.checkhash(text, node, rev=rev)
1855 if not validated:
1858 if not validated:
1856 self._revisioncache = (node, rev, rawtext)
1859 self._revisioncache = (node, rev, rawtext)
1857
1860
1858 return text
1861 return text
1859
1862
1860 def _rawtext(self, node, rev, _df=None):
1863 def _rawtext(self, node, rev, _df=None):
1861 """return the possibly unvalidated rawtext for a revision
1864 """return the possibly unvalidated rawtext for a revision
1862
1865
1863 returns (rev, rawtext, validated)
1866 returns (rev, rawtext, validated)
1864 """
1867 """
1865
1868
1866 # revision in the cache (could be useful to apply delta)
1869 # revision in the cache (could be useful to apply delta)
1867 cachedrev = None
1870 cachedrev = None
1868 # An intermediate text to apply deltas to
1871 # An intermediate text to apply deltas to
1869 basetext = None
1872 basetext = None
1870
1873
1871 # Check if we have the entry in cache
1874 # Check if we have the entry in cache
1872 # The cache entry looks like (node, rev, rawtext)
1875 # The cache entry looks like (node, rev, rawtext)
1873 if self._revisioncache:
1876 if self._revisioncache:
1874 if self._revisioncache[0] == node:
1877 if self._revisioncache[0] == node:
1875 return (rev, self._revisioncache[2], True)
1878 return (rev, self._revisioncache[2], True)
1876 cachedrev = self._revisioncache[1]
1879 cachedrev = self._revisioncache[1]
1877
1880
1878 if rev is None:
1881 if rev is None:
1879 rev = self.rev(node)
1882 rev = self.rev(node)
1880
1883
1881 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1884 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1882 if stopped:
1885 if stopped:
1883 basetext = self._revisioncache[2]
1886 basetext = self._revisioncache[2]
1884
1887
1885 # drop cache to save memory, the caller is expected to
1888 # drop cache to save memory, the caller is expected to
1886 # update self._revisioncache after validating the text
1889 # update self._revisioncache after validating the text
1887 self._revisioncache = None
1890 self._revisioncache = None
1888
1891
1889 targetsize = None
1892 targetsize = None
1890 rawsize = self.index[rev][2]
1893 rawsize = self.index[rev][2]
1891 if 0 <= rawsize:
1894 if 0 <= rawsize:
1892 targetsize = 4 * rawsize
1895 targetsize = 4 * rawsize
1893
1896
1894 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1897 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1895 if basetext is None:
1898 if basetext is None:
1896 basetext = bytes(bins[0])
1899 basetext = bytes(bins[0])
1897 bins = bins[1:]
1900 bins = bins[1:]
1898
1901
1899 rawtext = mdiff.patches(basetext, bins)
1902 rawtext = mdiff.patches(basetext, bins)
1900 del basetext # let us have a chance to free memory early
1903 del basetext # let us have a chance to free memory early
1901 return (rev, rawtext, False)
1904 return (rev, rawtext, False)
1902
1905
1903 def _sidedata(self, rev):
1906 def _sidedata(self, rev):
1904 """Return the sidedata for a given revision number."""
1907 """Return the sidedata for a given revision number."""
1905 index_entry = self.index[rev]
1908 index_entry = self.index[rev]
1906 sidedata_offset = index_entry[8]
1909 sidedata_offset = index_entry[8]
1907 sidedata_size = index_entry[9]
1910 sidedata_size = index_entry[9]
1908
1911
1909 if self._inline:
1912 if self._inline:
1910 sidedata_offset += self.index.entry_size * (1 + rev)
1913 sidedata_offset += self.index.entry_size * (1 + rev)
1911 if sidedata_size == 0:
1914 if sidedata_size == 0:
1912 return {}
1915 return {}
1913
1916
1914 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1917 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1915 filename = self._sidedatafile
1918 filename = self._sidedatafile
1916 end = self._docket.sidedata_end
1919 end = self._docket.sidedata_end
1917 offset = sidedata_offset
1920 offset = sidedata_offset
1918 length = sidedata_size
1921 length = sidedata_size
1919 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1922 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1920 raise error.RevlogError(m)
1923 raise error.RevlogError(m)
1921
1924
1922 comp_segment = self._segmentfile_sidedata.read_chunk(
1925 comp_segment = self._segmentfile_sidedata.read_chunk(
1923 sidedata_offset, sidedata_size
1926 sidedata_offset, sidedata_size
1924 )
1927 )
1925
1928
1926 comp = self.index[rev][11]
1929 comp = self.index[rev][11]
1927 if comp == COMP_MODE_PLAIN:
1930 if comp == COMP_MODE_PLAIN:
1928 segment = comp_segment
1931 segment = comp_segment
1929 elif comp == COMP_MODE_DEFAULT:
1932 elif comp == COMP_MODE_DEFAULT:
1930 segment = self._decompressor(comp_segment)
1933 segment = self._decompressor(comp_segment)
1931 elif comp == COMP_MODE_INLINE:
1934 elif comp == COMP_MODE_INLINE:
1932 segment = self.decompress(comp_segment)
1935 segment = self.decompress(comp_segment)
1933 else:
1936 else:
1934 msg = b'unknown compression mode %d'
1937 msg = b'unknown compression mode %d'
1935 msg %= comp
1938 msg %= comp
1936 raise error.RevlogError(msg)
1939 raise error.RevlogError(msg)
1937
1940
1938 sidedata = sidedatautil.deserialize_sidedata(segment)
1941 sidedata = sidedatautil.deserialize_sidedata(segment)
1939 return sidedata
1942 return sidedata
1940
1943
1941 def rawdata(self, nodeorrev, _df=None):
1944 def rawdata(self, nodeorrev, _df=None):
1942 """return an uncompressed raw data of a given node or revision number.
1945 """return an uncompressed raw data of a given node or revision number.
1943
1946
1944 _df - an existing file handle to read from. (internal-only)
1947 _df - an existing file handle to read from. (internal-only)
1945 """
1948 """
1946 return self._revisiondata(nodeorrev, _df, raw=True)
1949 return self._revisiondata(nodeorrev, _df, raw=True)
1947
1950
1948 def hash(self, text, p1, p2):
1951 def hash(self, text, p1, p2):
1949 """Compute a node hash.
1952 """Compute a node hash.
1950
1953
1951 Available as a function so that subclasses can replace the hash
1954 Available as a function so that subclasses can replace the hash
1952 as needed.
1955 as needed.
1953 """
1956 """
1954 return storageutil.hashrevisionsha1(text, p1, p2)
1957 return storageutil.hashrevisionsha1(text, p1, p2)
1955
1958
1956 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1959 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1957 """Check node hash integrity.
1960 """Check node hash integrity.
1958
1961
1959 Available as a function so that subclasses can extend hash mismatch
1962 Available as a function so that subclasses can extend hash mismatch
1960 behaviors as needed.
1963 behaviors as needed.
1961 """
1964 """
1962 try:
1965 try:
1963 if p1 is None and p2 is None:
1966 if p1 is None and p2 is None:
1964 p1, p2 = self.parents(node)
1967 p1, p2 = self.parents(node)
1965 if node != self.hash(text, p1, p2):
1968 if node != self.hash(text, p1, p2):
1966 # Clear the revision cache on hash failure. The revision cache
1969 # Clear the revision cache on hash failure. The revision cache
1967 # only stores the raw revision and clearing the cache does have
1970 # only stores the raw revision and clearing the cache does have
1968 # the side-effect that we won't have a cache hit when the raw
1971 # the side-effect that we won't have a cache hit when the raw
1969 # revision data is accessed. But this case should be rare and
1972 # revision data is accessed. But this case should be rare and
1970 # it is extra work to teach the cache about the hash
1973 # it is extra work to teach the cache about the hash
1971 # verification state.
1974 # verification state.
1972 if self._revisioncache and self._revisioncache[0] == node:
1975 if self._revisioncache and self._revisioncache[0] == node:
1973 self._revisioncache = None
1976 self._revisioncache = None
1974
1977
1975 revornode = rev
1978 revornode = rev
1976 if revornode is None:
1979 if revornode is None:
1977 revornode = templatefilters.short(hex(node))
1980 revornode = templatefilters.short(hex(node))
1978 raise error.RevlogError(
1981 raise error.RevlogError(
1979 _(b"integrity check failed on %s:%s")
1982 _(b"integrity check failed on %s:%s")
1980 % (self.display_id, pycompat.bytestr(revornode))
1983 % (self.display_id, pycompat.bytestr(revornode))
1981 )
1984 )
1982 except error.RevlogError:
1985 except error.RevlogError:
1983 if self._censorable and storageutil.iscensoredtext(text):
1986 if self._censorable and storageutil.iscensoredtext(text):
1984 raise error.CensoredNodeError(self.display_id, node, text)
1987 raise error.CensoredNodeError(self.display_id, node, text)
1985 raise
1988 raise
1986
1989
1987 def _enforceinlinesize(self, tr):
1990 def _enforceinlinesize(self, tr):
1988 """Check if the revlog is too big for inline and convert if so.
1991 """Check if the revlog is too big for inline and convert if so.
1989
1992
1990 This should be called after revisions are added to the revlog. If the
1993 This should be called after revisions are added to the revlog. If the
1991 revlog has grown too large to be an inline revlog, it will convert it
1994 revlog has grown too large to be an inline revlog, it will convert it
1992 to use multiple index and data files.
1995 to use multiple index and data files.
1993 """
1996 """
1994 tiprev = len(self) - 1
1997 tiprev = len(self) - 1
1995 total_size = self.start(tiprev) + self.length(tiprev)
1998 total_size = self.start(tiprev) + self.length(tiprev)
1996 if not self._inline or total_size < _maxinline:
1999 if not self._inline or total_size < _maxinline:
1997 return
2000 return
1998
2001
1999 troffset = tr.findoffset(self._indexfile)
2002 troffset = tr.findoffset(self._indexfile)
2000 if troffset is None:
2003 if troffset is None:
2001 raise error.RevlogError(
2004 raise error.RevlogError(
2002 _(b"%s not found in the transaction") % self._indexfile
2005 _(b"%s not found in the transaction") % self._indexfile
2003 )
2006 )
2004 trindex = None
2007 trindex = None
2005 tr.add(self._datafile, 0)
2008 tr.add(self._datafile, 0)
2006
2009
2007 existing_handles = False
2010 existing_handles = False
2008 if self._writinghandles is not None:
2011 if self._writinghandles is not None:
2009 existing_handles = True
2012 existing_handles = True
2010 fp = self._writinghandles[0]
2013 fp = self._writinghandles[0]
2011 fp.flush()
2014 fp.flush()
2012 fp.close()
2015 fp.close()
2013 # We can't use the cached file handle after close(). So prevent
2016 # We can't use the cached file handle after close(). So prevent
2014 # its usage.
2017 # its usage.
2015 self._writinghandles = None
2018 self._writinghandles = None
2016 self._segmentfile.writing_handle = None
2019 self._segmentfile.writing_handle = None
2017 # No need to deal with sidedata writing handle as it is only
2020 # No need to deal with sidedata writing handle as it is only
2018 # relevant with revlog-v2 which is never inline, not reaching
2021 # relevant with revlog-v2 which is never inline, not reaching
2019 # this code
2022 # this code
2020
2023
2021 new_dfh = self._datafp(b'w+')
2024 new_dfh = self._datafp(b'w+')
2022 new_dfh.truncate(0) # drop any potentially existing data
2025 new_dfh.truncate(0) # drop any potentially existing data
2023 try:
2026 try:
2024 with self._indexfp() as read_ifh:
2027 with self._indexfp() as read_ifh:
2025 for r in self:
2028 for r in self:
2026 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2029 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2027 if (
2030 if (
2028 trindex is None
2031 trindex is None
2029 and troffset
2032 and troffset
2030 <= self.start(r) + r * self.index.entry_size
2033 <= self.start(r) + r * self.index.entry_size
2031 ):
2034 ):
2032 trindex = r
2035 trindex = r
2033 new_dfh.flush()
2036 new_dfh.flush()
2034
2037
2035 if trindex is None:
2038 if trindex is None:
2036 trindex = 0
2039 trindex = 0
2037
2040
2038 with self.__index_new_fp() as fp:
2041 with self.__index_new_fp() as fp:
2039 self._format_flags &= ~FLAG_INLINE_DATA
2042 self._format_flags &= ~FLAG_INLINE_DATA
2040 self._inline = False
2043 self._inline = False
2041 for i in self:
2044 for i in self:
2042 e = self.index.entry_binary(i)
2045 e = self.index.entry_binary(i)
2043 if i == 0 and self._docket is None:
2046 if i == 0 and self._docket is None:
2044 header = self._format_flags | self._format_version
2047 header = self._format_flags | self._format_version
2045 header = self.index.pack_header(header)
2048 header = self.index.pack_header(header)
2046 e = header + e
2049 e = header + e
2047 fp.write(e)
2050 fp.write(e)
2048 if self._docket is not None:
2051 if self._docket is not None:
2049 self._docket.index_end = fp.tell()
2052 self._docket.index_end = fp.tell()
2050
2053
2051 # There is a small transactional race here. If the rename of
2054 # There is a small transactional race here. If the rename of
2052 # the index fails, we should remove the datafile. It is more
2055 # the index fails, we should remove the datafile. It is more
2053 # important to ensure that the data file is not truncated
2056 # important to ensure that the data file is not truncated
2054 # when the index is replaced as otherwise data is lost.
2057 # when the index is replaced as otherwise data is lost.
2055 tr.replace(self._datafile, self.start(trindex))
2058 tr.replace(self._datafile, self.start(trindex))
2056
2059
2057 # the temp file replace the real index when we exit the context
2060 # the temp file replace the real index when we exit the context
2058 # manager
2061 # manager
2059
2062
2060 tr.replace(self._indexfile, trindex * self.index.entry_size)
2063 tr.replace(self._indexfile, trindex * self.index.entry_size)
2061 nodemaputil.setup_persistent_nodemap(tr, self)
2064 nodemaputil.setup_persistent_nodemap(tr, self)
2062 self._segmentfile = randomaccessfile.randomaccessfile(
2065 self._segmentfile = randomaccessfile.randomaccessfile(
2063 self.opener,
2066 self.opener,
2064 self._datafile,
2067 self._datafile,
2065 self._chunkcachesize,
2068 self._chunkcachesize,
2066 )
2069 )
2067
2070
2068 if existing_handles:
2071 if existing_handles:
2069 # switched from inline to conventional reopen the index
2072 # switched from inline to conventional reopen the index
2070 ifh = self.__index_write_fp()
2073 ifh = self.__index_write_fp()
2071 self._writinghandles = (ifh, new_dfh, None)
2074 self._writinghandles = (ifh, new_dfh, None)
2072 self._segmentfile.writing_handle = new_dfh
2075 self._segmentfile.writing_handle = new_dfh
2073 new_dfh = None
2076 new_dfh = None
2074 # No need to deal with sidedata writing handle as it is only
2077 # No need to deal with sidedata writing handle as it is only
2075 # relevant with revlog-v2 which is never inline, not reaching
2078 # relevant with revlog-v2 which is never inline, not reaching
2076 # this code
2079 # this code
2077 finally:
2080 finally:
2078 if new_dfh is not None:
2081 if new_dfh is not None:
2079 new_dfh.close()
2082 new_dfh.close()
2080
2083
2081 def _nodeduplicatecallback(self, transaction, node):
2084 def _nodeduplicatecallback(self, transaction, node):
2082 """called when trying to add a node already stored."""
2085 """called when trying to add a node already stored."""
2083
2086
2084 @contextlib.contextmanager
2087 @contextlib.contextmanager
2085 def reading(self):
2088 def reading(self):
2086 """Context manager that keeps data and sidedata files open for reading"""
2089 """Context manager that keeps data and sidedata files open for reading"""
2087 with self._segmentfile.reading():
2090 with self._segmentfile.reading():
2088 with self._segmentfile_sidedata.reading():
2091 with self._segmentfile_sidedata.reading():
2089 yield
2092 yield
2090
2093
2091 @contextlib.contextmanager
2094 @contextlib.contextmanager
2092 def _writing(self, transaction):
2095 def _writing(self, transaction):
2093 if self._trypending:
2096 if self._trypending:
2094 msg = b'try to write in a `trypending` revlog: %s'
2097 msg = b'try to write in a `trypending` revlog: %s'
2095 msg %= self.display_id
2098 msg %= self.display_id
2096 raise error.ProgrammingError(msg)
2099 raise error.ProgrammingError(msg)
2097 if self._writinghandles is not None:
2100 if self._writinghandles is not None:
2098 yield
2101 yield
2099 else:
2102 else:
2100 ifh = dfh = sdfh = None
2103 ifh = dfh = sdfh = None
2101 try:
2104 try:
2102 r = len(self)
2105 r = len(self)
2103 # opening the data file.
2106 # opening the data file.
2104 dsize = 0
2107 dsize = 0
2105 if r:
2108 if r:
2106 dsize = self.end(r - 1)
2109 dsize = self.end(r - 1)
2107 dfh = None
2110 dfh = None
2108 if not self._inline:
2111 if not self._inline:
2109 try:
2112 try:
2110 dfh = self._datafp(b"r+")
2113 dfh = self._datafp(b"r+")
2111 if self._docket is None:
2114 if self._docket is None:
2112 dfh.seek(0, os.SEEK_END)
2115 dfh.seek(0, os.SEEK_END)
2113 else:
2116 else:
2114 dfh.seek(self._docket.data_end, os.SEEK_SET)
2117 dfh.seek(self._docket.data_end, os.SEEK_SET)
2115 except IOError as inst:
2118 except IOError as inst:
2116 if inst.errno != errno.ENOENT:
2119 if inst.errno != errno.ENOENT:
2117 raise
2120 raise
2118 dfh = self._datafp(b"w+")
2121 dfh = self._datafp(b"w+")
2119 transaction.add(self._datafile, dsize)
2122 transaction.add(self._datafile, dsize)
2120 if self._sidedatafile is not None:
2123 if self._sidedatafile is not None:
2121 # revlog-v2 does not inline, help Pytype
2124 # revlog-v2 does not inline, help Pytype
2122 assert dfh is not None
2125 assert dfh is not None
2123 try:
2126 try:
2124 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2127 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2125 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2128 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2126 except IOError as inst:
2129 except IOError as inst:
2127 if inst.errno != errno.ENOENT:
2130 if inst.errno != errno.ENOENT:
2128 raise
2131 raise
2129 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2132 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2130 transaction.add(
2133 transaction.add(
2131 self._sidedatafile, self._docket.sidedata_end
2134 self._sidedatafile, self._docket.sidedata_end
2132 )
2135 )
2133
2136
2134 # opening the index file.
2137 # opening the index file.
2135 isize = r * self.index.entry_size
2138 isize = r * self.index.entry_size
2136 ifh = self.__index_write_fp()
2139 ifh = self.__index_write_fp()
2137 if self._inline:
2140 if self._inline:
2138 transaction.add(self._indexfile, dsize + isize)
2141 transaction.add(self._indexfile, dsize + isize)
2139 else:
2142 else:
2140 transaction.add(self._indexfile, isize)
2143 transaction.add(self._indexfile, isize)
2141 # exposing all file handle for writing.
2144 # exposing all file handle for writing.
2142 self._writinghandles = (ifh, dfh, sdfh)
2145 self._writinghandles = (ifh, dfh, sdfh)
2143 self._segmentfile.writing_handle = ifh if self._inline else dfh
2146 self._segmentfile.writing_handle = ifh if self._inline else dfh
2144 self._segmentfile_sidedata.writing_handle = sdfh
2147 self._segmentfile_sidedata.writing_handle = sdfh
2145 yield
2148 yield
2146 if self._docket is not None:
2149 if self._docket is not None:
2147 self._write_docket(transaction)
2150 self._write_docket(transaction)
2148 finally:
2151 finally:
2149 self._writinghandles = None
2152 self._writinghandles = None
2150 self._segmentfile.writing_handle = None
2153 self._segmentfile.writing_handle = None
2151 self._segmentfile_sidedata.writing_handle = None
2154 self._segmentfile_sidedata.writing_handle = None
2152 if dfh is not None:
2155 if dfh is not None:
2153 dfh.close()
2156 dfh.close()
2154 if sdfh is not None:
2157 if sdfh is not None:
2155 sdfh.close()
2158 sdfh.close()
2156 # closing the index file last to avoid exposing referent to
2159 # closing the index file last to avoid exposing referent to
2157 # potential unflushed data content.
2160 # potential unflushed data content.
2158 if ifh is not None:
2161 if ifh is not None:
2159 ifh.close()
2162 ifh.close()
2160
2163
2161 def _write_docket(self, transaction):
2164 def _write_docket(self, transaction):
2162 """write the current docket on disk
2165 """write the current docket on disk
2163
2166
2164 Exist as a method to help changelog to implement transaction logic
2167 Exist as a method to help changelog to implement transaction logic
2165
2168
2166 We could also imagine using the same transaction logic for all revlog
2169 We could also imagine using the same transaction logic for all revlog
2167 since docket are cheap."""
2170 since docket are cheap."""
2168 self._docket.write(transaction)
2171 self._docket.write(transaction)
2169
2172
2170 def addrevision(
2173 def addrevision(
2171 self,
2174 self,
2172 text,
2175 text,
2173 transaction,
2176 transaction,
2174 link,
2177 link,
2175 p1,
2178 p1,
2176 p2,
2179 p2,
2177 cachedelta=None,
2180 cachedelta=None,
2178 node=None,
2181 node=None,
2179 flags=REVIDX_DEFAULT_FLAGS,
2182 flags=REVIDX_DEFAULT_FLAGS,
2180 deltacomputer=None,
2183 deltacomputer=None,
2181 sidedata=None,
2184 sidedata=None,
2182 ):
2185 ):
2183 """add a revision to the log
2186 """add a revision to the log
2184
2187
2185 text - the revision data to add
2188 text - the revision data to add
2186 transaction - the transaction object used for rollback
2189 transaction - the transaction object used for rollback
2187 link - the linkrev data to add
2190 link - the linkrev data to add
2188 p1, p2 - the parent nodeids of the revision
2191 p1, p2 - the parent nodeids of the revision
2189 cachedelta - an optional precomputed delta
2192 cachedelta - an optional precomputed delta
2190 node - nodeid of revision; typically node is not specified, and it is
2193 node - nodeid of revision; typically node is not specified, and it is
2191 computed by default as hash(text, p1, p2), however subclasses might
2194 computed by default as hash(text, p1, p2), however subclasses might
2192 use different hashing method (and override checkhash() in such case)
2195 use different hashing method (and override checkhash() in such case)
2193 flags - the known flags to set on the revision
2196 flags - the known flags to set on the revision
2194 deltacomputer - an optional deltacomputer instance shared between
2197 deltacomputer - an optional deltacomputer instance shared between
2195 multiple calls
2198 multiple calls
2196 """
2199 """
2197 if link == nullrev:
2200 if link == nullrev:
2198 raise error.RevlogError(
2201 raise error.RevlogError(
2199 _(b"attempted to add linkrev -1 to %s") % self.display_id
2202 _(b"attempted to add linkrev -1 to %s") % self.display_id
2200 )
2203 )
2201
2204
2202 if sidedata is None:
2205 if sidedata is None:
2203 sidedata = {}
2206 sidedata = {}
2204 elif sidedata and not self.hassidedata:
2207 elif sidedata and not self.hassidedata:
2205 raise error.ProgrammingError(
2208 raise error.ProgrammingError(
2206 _(b"trying to add sidedata to a revlog who don't support them")
2209 _(b"trying to add sidedata to a revlog who don't support them")
2207 )
2210 )
2208
2211
2209 if flags:
2212 if flags:
2210 node = node or self.hash(text, p1, p2)
2213 node = node or self.hash(text, p1, p2)
2211
2214
2212 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2215 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2213
2216
2214 # If the flag processor modifies the revision data, ignore any provided
2217 # If the flag processor modifies the revision data, ignore any provided
2215 # cachedelta.
2218 # cachedelta.
2216 if rawtext != text:
2219 if rawtext != text:
2217 cachedelta = None
2220 cachedelta = None
2218
2221
2219 if len(rawtext) > _maxentrysize:
2222 if len(rawtext) > _maxentrysize:
2220 raise error.RevlogError(
2223 raise error.RevlogError(
2221 _(
2224 _(
2222 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2225 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2223 )
2226 )
2224 % (self.display_id, len(rawtext))
2227 % (self.display_id, len(rawtext))
2225 )
2228 )
2226
2229
2227 node = node or self.hash(rawtext, p1, p2)
2230 node = node or self.hash(rawtext, p1, p2)
2228 rev = self.index.get_rev(node)
2231 rev = self.index.get_rev(node)
2229 if rev is not None:
2232 if rev is not None:
2230 return rev
2233 return rev
2231
2234
2232 if validatehash:
2235 if validatehash:
2233 self.checkhash(rawtext, node, p1=p1, p2=p2)
2236 self.checkhash(rawtext, node, p1=p1, p2=p2)
2234
2237
2235 return self.addrawrevision(
2238 return self.addrawrevision(
2236 rawtext,
2239 rawtext,
2237 transaction,
2240 transaction,
2238 link,
2241 link,
2239 p1,
2242 p1,
2240 p2,
2243 p2,
2241 node,
2244 node,
2242 flags,
2245 flags,
2243 cachedelta=cachedelta,
2246 cachedelta=cachedelta,
2244 deltacomputer=deltacomputer,
2247 deltacomputer=deltacomputer,
2245 sidedata=sidedata,
2248 sidedata=sidedata,
2246 )
2249 )
2247
2250
2248 def addrawrevision(
2251 def addrawrevision(
2249 self,
2252 self,
2250 rawtext,
2253 rawtext,
2251 transaction,
2254 transaction,
2252 link,
2255 link,
2253 p1,
2256 p1,
2254 p2,
2257 p2,
2255 node,
2258 node,
2256 flags,
2259 flags,
2257 cachedelta=None,
2260 cachedelta=None,
2258 deltacomputer=None,
2261 deltacomputer=None,
2259 sidedata=None,
2262 sidedata=None,
2260 ):
2263 ):
2261 """add a raw revision with known flags, node and parents
2264 """add a raw revision with known flags, node and parents
2262 useful when reusing a revision not stored in this revlog (ex: received
2265 useful when reusing a revision not stored in this revlog (ex: received
2263 over wire, or read from an external bundle).
2266 over wire, or read from an external bundle).
2264 """
2267 """
2265 with self._writing(transaction):
2268 with self._writing(transaction):
2266 return self._addrevision(
2269 return self._addrevision(
2267 node,
2270 node,
2268 rawtext,
2271 rawtext,
2269 transaction,
2272 transaction,
2270 link,
2273 link,
2271 p1,
2274 p1,
2272 p2,
2275 p2,
2273 flags,
2276 flags,
2274 cachedelta,
2277 cachedelta,
2275 deltacomputer=deltacomputer,
2278 deltacomputer=deltacomputer,
2276 sidedata=sidedata,
2279 sidedata=sidedata,
2277 )
2280 )
2278
2281
2279 def compress(self, data):
2282 def compress(self, data):
2280 """Generate a possibly-compressed representation of data."""
2283 """Generate a possibly-compressed representation of data."""
2281 if not data:
2284 if not data:
2282 return b'', data
2285 return b'', data
2283
2286
2284 compressed = self._compressor.compress(data)
2287 compressed = self._compressor.compress(data)
2285
2288
2286 if compressed:
2289 if compressed:
2287 # The revlog compressor added the header in the returned data.
2290 # The revlog compressor added the header in the returned data.
2288 return b'', compressed
2291 return b'', compressed
2289
2292
2290 if data[0:1] == b'\0':
2293 if data[0:1] == b'\0':
2291 return b'', data
2294 return b'', data
2292 return b'u', data
2295 return b'u', data
2293
2296
2294 def decompress(self, data):
2297 def decompress(self, data):
2295 """Decompress a revlog chunk.
2298 """Decompress a revlog chunk.
2296
2299
2297 The chunk is expected to begin with a header identifying the
2300 The chunk is expected to begin with a header identifying the
2298 format type so it can be routed to an appropriate decompressor.
2301 format type so it can be routed to an appropriate decompressor.
2299 """
2302 """
2300 if not data:
2303 if not data:
2301 return data
2304 return data
2302
2305
2303 # Revlogs are read much more frequently than they are written and many
2306 # Revlogs are read much more frequently than they are written and many
2304 # chunks only take microseconds to decompress, so performance is
2307 # chunks only take microseconds to decompress, so performance is
2305 # important here.
2308 # important here.
2306 #
2309 #
2307 # We can make a few assumptions about revlogs:
2310 # We can make a few assumptions about revlogs:
2308 #
2311 #
2309 # 1) the majority of chunks will be compressed (as opposed to inline
2312 # 1) the majority of chunks will be compressed (as opposed to inline
2310 # raw data).
2313 # raw data).
2311 # 2) decompressing *any* data will likely by at least 10x slower than
2314 # 2) decompressing *any* data will likely by at least 10x slower than
2312 # returning raw inline data.
2315 # returning raw inline data.
2313 # 3) we want to prioritize common and officially supported compression
2316 # 3) we want to prioritize common and officially supported compression
2314 # engines
2317 # engines
2315 #
2318 #
2316 # It follows that we want to optimize for "decompress compressed data
2319 # It follows that we want to optimize for "decompress compressed data
2317 # when encoded with common and officially supported compression engines"
2320 # when encoded with common and officially supported compression engines"
2318 # case over "raw data" and "data encoded by less common or non-official
2321 # case over "raw data" and "data encoded by less common or non-official
2319 # compression engines." That is why we have the inline lookup first
2322 # compression engines." That is why we have the inline lookup first
2320 # followed by the compengines lookup.
2323 # followed by the compengines lookup.
2321 #
2324 #
2322 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2325 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2323 # compressed chunks. And this matters for changelog and manifest reads.
2326 # compressed chunks. And this matters for changelog and manifest reads.
2324 t = data[0:1]
2327 t = data[0:1]
2325
2328
2326 if t == b'x':
2329 if t == b'x':
2327 try:
2330 try:
2328 return _zlibdecompress(data)
2331 return _zlibdecompress(data)
2329 except zlib.error as e:
2332 except zlib.error as e:
2330 raise error.RevlogError(
2333 raise error.RevlogError(
2331 _(b'revlog decompress error: %s')
2334 _(b'revlog decompress error: %s')
2332 % stringutil.forcebytestr(e)
2335 % stringutil.forcebytestr(e)
2333 )
2336 )
2334 # '\0' is more common than 'u' so it goes first.
2337 # '\0' is more common than 'u' so it goes first.
2335 elif t == b'\0':
2338 elif t == b'\0':
2336 return data
2339 return data
2337 elif t == b'u':
2340 elif t == b'u':
2338 return util.buffer(data, 1)
2341 return util.buffer(data, 1)
2339
2342
2340 compressor = self._get_decompressor(t)
2343 compressor = self._get_decompressor(t)
2341
2344
2342 return compressor.decompress(data)
2345 return compressor.decompress(data)
2343
2346
2344 def _addrevision(
2347 def _addrevision(
2345 self,
2348 self,
2346 node,
2349 node,
2347 rawtext,
2350 rawtext,
2348 transaction,
2351 transaction,
2349 link,
2352 link,
2350 p1,
2353 p1,
2351 p2,
2354 p2,
2352 flags,
2355 flags,
2353 cachedelta,
2356 cachedelta,
2354 alwayscache=False,
2357 alwayscache=False,
2355 deltacomputer=None,
2358 deltacomputer=None,
2356 sidedata=None,
2359 sidedata=None,
2357 ):
2360 ):
2358 """internal function to add revisions to the log
2361 """internal function to add revisions to the log
2359
2362
2360 see addrevision for argument descriptions.
2363 see addrevision for argument descriptions.
2361
2364
2362 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2365 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2363
2366
2364 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2367 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2365 be used.
2368 be used.
2366
2369
2367 invariants:
2370 invariants:
2368 - rawtext is optional (can be None); if not set, cachedelta must be set.
2371 - rawtext is optional (can be None); if not set, cachedelta must be set.
2369 if both are set, they must correspond to each other.
2372 if both are set, they must correspond to each other.
2370 """
2373 """
2371 if node == self.nullid:
2374 if node == self.nullid:
2372 raise error.RevlogError(
2375 raise error.RevlogError(
2373 _(b"%s: attempt to add null revision") % self.display_id
2376 _(b"%s: attempt to add null revision") % self.display_id
2374 )
2377 )
2375 if (
2378 if (
2376 node == self.nodeconstants.wdirid
2379 node == self.nodeconstants.wdirid
2377 or node in self.nodeconstants.wdirfilenodeids
2380 or node in self.nodeconstants.wdirfilenodeids
2378 ):
2381 ):
2379 raise error.RevlogError(
2382 raise error.RevlogError(
2380 _(b"%s: attempt to add wdir revision") % self.display_id
2383 _(b"%s: attempt to add wdir revision") % self.display_id
2381 )
2384 )
2382 if self._writinghandles is None:
2385 if self._writinghandles is None:
2383 msg = b'adding revision outside `revlog._writing` context'
2386 msg = b'adding revision outside `revlog._writing` context'
2384 raise error.ProgrammingError(msg)
2387 raise error.ProgrammingError(msg)
2385
2388
2386 if self._inline:
2389 if self._inline:
2387 fh = self._writinghandles[0]
2390 fh = self._writinghandles[0]
2388 else:
2391 else:
2389 fh = self._writinghandles[1]
2392 fh = self._writinghandles[1]
2390
2393
2391 btext = [rawtext]
2394 btext = [rawtext]
2392
2395
2393 curr = len(self)
2396 curr = len(self)
2394 prev = curr - 1
2397 prev = curr - 1
2395
2398
2396 offset = self._get_data_offset(prev)
2399 offset = self._get_data_offset(prev)
2397
2400
2398 if self._concurrencychecker:
2401 if self._concurrencychecker:
2399 ifh, dfh, sdfh = self._writinghandles
2402 ifh, dfh, sdfh = self._writinghandles
2400 # XXX no checking for the sidedata file
2403 # XXX no checking for the sidedata file
2401 if self._inline:
2404 if self._inline:
2402 # offset is "as if" it were in the .d file, so we need to add on
2405 # offset is "as if" it were in the .d file, so we need to add on
2403 # the size of the entry metadata.
2406 # the size of the entry metadata.
2404 self._concurrencychecker(
2407 self._concurrencychecker(
2405 ifh, self._indexfile, offset + curr * self.index.entry_size
2408 ifh, self._indexfile, offset + curr * self.index.entry_size
2406 )
2409 )
2407 else:
2410 else:
2408 # Entries in the .i are a consistent size.
2411 # Entries in the .i are a consistent size.
2409 self._concurrencychecker(
2412 self._concurrencychecker(
2410 ifh, self._indexfile, curr * self.index.entry_size
2413 ifh, self._indexfile, curr * self.index.entry_size
2411 )
2414 )
2412 self._concurrencychecker(dfh, self._datafile, offset)
2415 self._concurrencychecker(dfh, self._datafile, offset)
2413
2416
2414 p1r, p2r = self.rev(p1), self.rev(p2)
2417 p1r, p2r = self.rev(p1), self.rev(p2)
2415
2418
2416 # full versions are inserted when the needed deltas
2419 # full versions are inserted when the needed deltas
2417 # become comparable to the uncompressed text
2420 # become comparable to the uncompressed text
2418 if rawtext is None:
2421 if rawtext is None:
2419 # need rawtext size, before changed by flag processors, which is
2422 # need rawtext size, before changed by flag processors, which is
2420 # the non-raw size. use revlog explicitly to avoid filelog's extra
2423 # the non-raw size. use revlog explicitly to avoid filelog's extra
2421 # logic that might remove metadata size.
2424 # logic that might remove metadata size.
2422 textlen = mdiff.patchedsize(
2425 textlen = mdiff.patchedsize(
2423 revlog.size(self, cachedelta[0]), cachedelta[1]
2426 revlog.size(self, cachedelta[0]), cachedelta[1]
2424 )
2427 )
2425 else:
2428 else:
2426 textlen = len(rawtext)
2429 textlen = len(rawtext)
2427
2430
2428 if deltacomputer is None:
2431 if deltacomputer is None:
2429 deltacomputer = deltautil.deltacomputer(self)
2432 write_debug = None
2433 if self._debug_delta:
2434 write_debug = transaction._report
2435 deltacomputer = deltautil.deltacomputer(
2436 self, write_debug=write_debug
2437 )
2430
2438
2431 revinfo = revlogutils.revisioninfo(
2439 revinfo = revlogutils.revisioninfo(
2432 node,
2440 node,
2433 p1,
2441 p1,
2434 p2,
2442 p2,
2435 btext,
2443 btext,
2436 textlen,
2444 textlen,
2437 cachedelta,
2445 cachedelta,
2438 flags,
2446 flags,
2439 )
2447 )
2440
2448
2441 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2449 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2442
2450
2443 compression_mode = COMP_MODE_INLINE
2451 compression_mode = COMP_MODE_INLINE
2444 if self._docket is not None:
2452 if self._docket is not None:
2445 default_comp = self._docket.default_compression_header
2453 default_comp = self._docket.default_compression_header
2446 r = deltautil.delta_compression(default_comp, deltainfo)
2454 r = deltautil.delta_compression(default_comp, deltainfo)
2447 compression_mode, deltainfo = r
2455 compression_mode, deltainfo = r
2448
2456
2449 sidedata_compression_mode = COMP_MODE_INLINE
2457 sidedata_compression_mode = COMP_MODE_INLINE
2450 if sidedata and self.hassidedata:
2458 if sidedata and self.hassidedata:
2451 sidedata_compression_mode = COMP_MODE_PLAIN
2459 sidedata_compression_mode = COMP_MODE_PLAIN
2452 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2460 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2453 sidedata_offset = self._docket.sidedata_end
2461 sidedata_offset = self._docket.sidedata_end
2454 h, comp_sidedata = self.compress(serialized_sidedata)
2462 h, comp_sidedata = self.compress(serialized_sidedata)
2455 if (
2463 if (
2456 h != b'u'
2464 h != b'u'
2457 and comp_sidedata[0:1] != b'\0'
2465 and comp_sidedata[0:1] != b'\0'
2458 and len(comp_sidedata) < len(serialized_sidedata)
2466 and len(comp_sidedata) < len(serialized_sidedata)
2459 ):
2467 ):
2460 assert not h
2468 assert not h
2461 if (
2469 if (
2462 comp_sidedata[0:1]
2470 comp_sidedata[0:1]
2463 == self._docket.default_compression_header
2471 == self._docket.default_compression_header
2464 ):
2472 ):
2465 sidedata_compression_mode = COMP_MODE_DEFAULT
2473 sidedata_compression_mode = COMP_MODE_DEFAULT
2466 serialized_sidedata = comp_sidedata
2474 serialized_sidedata = comp_sidedata
2467 else:
2475 else:
2468 sidedata_compression_mode = COMP_MODE_INLINE
2476 sidedata_compression_mode = COMP_MODE_INLINE
2469 serialized_sidedata = comp_sidedata
2477 serialized_sidedata = comp_sidedata
2470 else:
2478 else:
2471 serialized_sidedata = b""
2479 serialized_sidedata = b""
2472 # Don't store the offset if the sidedata is empty, that way
2480 # Don't store the offset if the sidedata is empty, that way
2473 # we can easily detect empty sidedata and they will be no different
2481 # we can easily detect empty sidedata and they will be no different
2474 # than ones we manually add.
2482 # than ones we manually add.
2475 sidedata_offset = 0
2483 sidedata_offset = 0
2476
2484
2477 rank = RANK_UNKNOWN
2485 rank = RANK_UNKNOWN
2478 if self._format_version == CHANGELOGV2:
2486 if self._format_version == CHANGELOGV2:
2479 if (p1r, p2r) == (nullrev, nullrev):
2487 if (p1r, p2r) == (nullrev, nullrev):
2480 rank = 1
2488 rank = 1
2481 elif p1r != nullrev and p2r == nullrev:
2489 elif p1r != nullrev and p2r == nullrev:
2482 rank = 1 + self.fast_rank(p1r)
2490 rank = 1 + self.fast_rank(p1r)
2483 elif p1r == nullrev and p2r != nullrev:
2491 elif p1r == nullrev and p2r != nullrev:
2484 rank = 1 + self.fast_rank(p2r)
2492 rank = 1 + self.fast_rank(p2r)
2485 else: # merge node
2493 else: # merge node
2486 if rustdagop is not None and self.index.rust_ext_compat:
2494 if rustdagop is not None and self.index.rust_ext_compat:
2487 rank = rustdagop.rank(self.index, p1r, p2r)
2495 rank = rustdagop.rank(self.index, p1r, p2r)
2488 else:
2496 else:
2489 pmin, pmax = sorted((p1r, p2r))
2497 pmin, pmax = sorted((p1r, p2r))
2490 rank = 1 + self.fast_rank(pmax)
2498 rank = 1 + self.fast_rank(pmax)
2491 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2499 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2492
2500
2493 e = revlogutils.entry(
2501 e = revlogutils.entry(
2494 flags=flags,
2502 flags=flags,
2495 data_offset=offset,
2503 data_offset=offset,
2496 data_compressed_length=deltainfo.deltalen,
2504 data_compressed_length=deltainfo.deltalen,
2497 data_uncompressed_length=textlen,
2505 data_uncompressed_length=textlen,
2498 data_compression_mode=compression_mode,
2506 data_compression_mode=compression_mode,
2499 data_delta_base=deltainfo.base,
2507 data_delta_base=deltainfo.base,
2500 link_rev=link,
2508 link_rev=link,
2501 parent_rev_1=p1r,
2509 parent_rev_1=p1r,
2502 parent_rev_2=p2r,
2510 parent_rev_2=p2r,
2503 node_id=node,
2511 node_id=node,
2504 sidedata_offset=sidedata_offset,
2512 sidedata_offset=sidedata_offset,
2505 sidedata_compressed_length=len(serialized_sidedata),
2513 sidedata_compressed_length=len(serialized_sidedata),
2506 sidedata_compression_mode=sidedata_compression_mode,
2514 sidedata_compression_mode=sidedata_compression_mode,
2507 rank=rank,
2515 rank=rank,
2508 )
2516 )
2509
2517
2510 self.index.append(e)
2518 self.index.append(e)
2511 entry = self.index.entry_binary(curr)
2519 entry = self.index.entry_binary(curr)
2512 if curr == 0 and self._docket is None:
2520 if curr == 0 and self._docket is None:
2513 header = self._format_flags | self._format_version
2521 header = self._format_flags | self._format_version
2514 header = self.index.pack_header(header)
2522 header = self.index.pack_header(header)
2515 entry = header + entry
2523 entry = header + entry
2516 self._writeentry(
2524 self._writeentry(
2517 transaction,
2525 transaction,
2518 entry,
2526 entry,
2519 deltainfo.data,
2527 deltainfo.data,
2520 link,
2528 link,
2521 offset,
2529 offset,
2522 serialized_sidedata,
2530 serialized_sidedata,
2523 sidedata_offset,
2531 sidedata_offset,
2524 )
2532 )
2525
2533
2526 rawtext = btext[0]
2534 rawtext = btext[0]
2527
2535
2528 if alwayscache and rawtext is None:
2536 if alwayscache and rawtext is None:
2529 rawtext = deltacomputer.buildtext(revinfo, fh)
2537 rawtext = deltacomputer.buildtext(revinfo, fh)
2530
2538
2531 if type(rawtext) == bytes: # only accept immutable objects
2539 if type(rawtext) == bytes: # only accept immutable objects
2532 self._revisioncache = (node, curr, rawtext)
2540 self._revisioncache = (node, curr, rawtext)
2533 self._chainbasecache[curr] = deltainfo.chainbase
2541 self._chainbasecache[curr] = deltainfo.chainbase
2534 return curr
2542 return curr
2535
2543
2536 def _get_data_offset(self, prev):
2544 def _get_data_offset(self, prev):
2537 """Returns the current offset in the (in-transaction) data file.
2545 """Returns the current offset in the (in-transaction) data file.
2538 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2546 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2539 file to store that information: since sidedata can be rewritten to the
2547 file to store that information: since sidedata can be rewritten to the
2540 end of the data file within a transaction, you can have cases where, for
2548 end of the data file within a transaction, you can have cases where, for
2541 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2549 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2542 to `n - 1`'s sidedata being written after `n`'s data.
2550 to `n - 1`'s sidedata being written after `n`'s data.
2543
2551
2544 TODO cache this in a docket file before getting out of experimental."""
2552 TODO cache this in a docket file before getting out of experimental."""
2545 if self._docket is None:
2553 if self._docket is None:
2546 return self.end(prev)
2554 return self.end(prev)
2547 else:
2555 else:
2548 return self._docket.data_end
2556 return self._docket.data_end
2549
2557
2550 def _writeentry(
2558 def _writeentry(
2551 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2559 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2552 ):
2560 ):
2553 # Files opened in a+ mode have inconsistent behavior on various
2561 # Files opened in a+ mode have inconsistent behavior on various
2554 # platforms. Windows requires that a file positioning call be made
2562 # platforms. Windows requires that a file positioning call be made
2555 # when the file handle transitions between reads and writes. See
2563 # when the file handle transitions between reads and writes. See
2556 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2564 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2557 # platforms, Python or the platform itself can be buggy. Some versions
2565 # platforms, Python or the platform itself can be buggy. Some versions
2558 # of Solaris have been observed to not append at the end of the file
2566 # of Solaris have been observed to not append at the end of the file
2559 # if the file was seeked to before the end. See issue4943 for more.
2567 # if the file was seeked to before the end. See issue4943 for more.
2560 #
2568 #
2561 # We work around this issue by inserting a seek() before writing.
2569 # We work around this issue by inserting a seek() before writing.
2562 # Note: This is likely not necessary on Python 3. However, because
2570 # Note: This is likely not necessary on Python 3. However, because
2563 # the file handle is reused for reads and may be seeked there, we need
2571 # the file handle is reused for reads and may be seeked there, we need
2564 # to be careful before changing this.
2572 # to be careful before changing this.
2565 if self._writinghandles is None:
2573 if self._writinghandles is None:
2566 msg = b'adding revision outside `revlog._writing` context'
2574 msg = b'adding revision outside `revlog._writing` context'
2567 raise error.ProgrammingError(msg)
2575 raise error.ProgrammingError(msg)
2568 ifh, dfh, sdfh = self._writinghandles
2576 ifh, dfh, sdfh = self._writinghandles
2569 if self._docket is None:
2577 if self._docket is None:
2570 ifh.seek(0, os.SEEK_END)
2578 ifh.seek(0, os.SEEK_END)
2571 else:
2579 else:
2572 ifh.seek(self._docket.index_end, os.SEEK_SET)
2580 ifh.seek(self._docket.index_end, os.SEEK_SET)
2573 if dfh:
2581 if dfh:
2574 if self._docket is None:
2582 if self._docket is None:
2575 dfh.seek(0, os.SEEK_END)
2583 dfh.seek(0, os.SEEK_END)
2576 else:
2584 else:
2577 dfh.seek(self._docket.data_end, os.SEEK_SET)
2585 dfh.seek(self._docket.data_end, os.SEEK_SET)
2578 if sdfh:
2586 if sdfh:
2579 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2587 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2580
2588
2581 curr = len(self) - 1
2589 curr = len(self) - 1
2582 if not self._inline:
2590 if not self._inline:
2583 transaction.add(self._datafile, offset)
2591 transaction.add(self._datafile, offset)
2584 if self._sidedatafile:
2592 if self._sidedatafile:
2585 transaction.add(self._sidedatafile, sidedata_offset)
2593 transaction.add(self._sidedatafile, sidedata_offset)
2586 transaction.add(self._indexfile, curr * len(entry))
2594 transaction.add(self._indexfile, curr * len(entry))
2587 if data[0]:
2595 if data[0]:
2588 dfh.write(data[0])
2596 dfh.write(data[0])
2589 dfh.write(data[1])
2597 dfh.write(data[1])
2590 if sidedata:
2598 if sidedata:
2591 sdfh.write(sidedata)
2599 sdfh.write(sidedata)
2592 ifh.write(entry)
2600 ifh.write(entry)
2593 else:
2601 else:
2594 offset += curr * self.index.entry_size
2602 offset += curr * self.index.entry_size
2595 transaction.add(self._indexfile, offset)
2603 transaction.add(self._indexfile, offset)
2596 ifh.write(entry)
2604 ifh.write(entry)
2597 ifh.write(data[0])
2605 ifh.write(data[0])
2598 ifh.write(data[1])
2606 ifh.write(data[1])
2599 assert not sidedata
2607 assert not sidedata
2600 self._enforceinlinesize(transaction)
2608 self._enforceinlinesize(transaction)
2601 if self._docket is not None:
2609 if self._docket is not None:
2602 # revlog-v2 always has 3 writing handles, help Pytype
2610 # revlog-v2 always has 3 writing handles, help Pytype
2603 wh1 = self._writinghandles[0]
2611 wh1 = self._writinghandles[0]
2604 wh2 = self._writinghandles[1]
2612 wh2 = self._writinghandles[1]
2605 wh3 = self._writinghandles[2]
2613 wh3 = self._writinghandles[2]
2606 assert wh1 is not None
2614 assert wh1 is not None
2607 assert wh2 is not None
2615 assert wh2 is not None
2608 assert wh3 is not None
2616 assert wh3 is not None
2609 self._docket.index_end = wh1.tell()
2617 self._docket.index_end = wh1.tell()
2610 self._docket.data_end = wh2.tell()
2618 self._docket.data_end = wh2.tell()
2611 self._docket.sidedata_end = wh3.tell()
2619 self._docket.sidedata_end = wh3.tell()
2612
2620
2613 nodemaputil.setup_persistent_nodemap(transaction, self)
2621 nodemaputil.setup_persistent_nodemap(transaction, self)
2614
2622
2615 def addgroup(
2623 def addgroup(
2616 self,
2624 self,
2617 deltas,
2625 deltas,
2618 linkmapper,
2626 linkmapper,
2619 transaction,
2627 transaction,
2620 alwayscache=False,
2628 alwayscache=False,
2621 addrevisioncb=None,
2629 addrevisioncb=None,
2622 duplicaterevisioncb=None,
2630 duplicaterevisioncb=None,
2623 ):
2631 ):
2624 """
2632 """
2625 add a delta group
2633 add a delta group
2626
2634
2627 given a set of deltas, add them to the revision log. the
2635 given a set of deltas, add them to the revision log. the
2628 first delta is against its parent, which should be in our
2636 first delta is against its parent, which should be in our
2629 log, the rest are against the previous delta.
2637 log, the rest are against the previous delta.
2630
2638
2631 If ``addrevisioncb`` is defined, it will be called with arguments of
2639 If ``addrevisioncb`` is defined, it will be called with arguments of
2632 this revlog and the node that was added.
2640 this revlog and the node that was added.
2633 """
2641 """
2634
2642
2635 if self._adding_group:
2643 if self._adding_group:
2636 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2644 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2637
2645
2638 self._adding_group = True
2646 self._adding_group = True
2639 empty = True
2647 empty = True
2640 try:
2648 try:
2641 with self._writing(transaction):
2649 with self._writing(transaction):
2642 deltacomputer = deltautil.deltacomputer(self)
2650 write_debug = None
2651 if self._debug_delta:
2652 write_debug = transaction._report
2653 deltacomputer = deltautil.deltacomputer(
2654 self,
2655 write_debug=write_debug,
2656 )
2643 # loop through our set of deltas
2657 # loop through our set of deltas
2644 for data in deltas:
2658 for data in deltas:
2645 (
2659 (
2646 node,
2660 node,
2647 p1,
2661 p1,
2648 p2,
2662 p2,
2649 linknode,
2663 linknode,
2650 deltabase,
2664 deltabase,
2651 delta,
2665 delta,
2652 flags,
2666 flags,
2653 sidedata,
2667 sidedata,
2654 ) = data
2668 ) = data
2655 link = linkmapper(linknode)
2669 link = linkmapper(linknode)
2656 flags = flags or REVIDX_DEFAULT_FLAGS
2670 flags = flags or REVIDX_DEFAULT_FLAGS
2657
2671
2658 rev = self.index.get_rev(node)
2672 rev = self.index.get_rev(node)
2659 if rev is not None:
2673 if rev is not None:
2660 # this can happen if two branches make the same change
2674 # this can happen if two branches make the same change
2661 self._nodeduplicatecallback(transaction, rev)
2675 self._nodeduplicatecallback(transaction, rev)
2662 if duplicaterevisioncb:
2676 if duplicaterevisioncb:
2663 duplicaterevisioncb(self, rev)
2677 duplicaterevisioncb(self, rev)
2664 empty = False
2678 empty = False
2665 continue
2679 continue
2666
2680
2667 for p in (p1, p2):
2681 for p in (p1, p2):
2668 if not self.index.has_node(p):
2682 if not self.index.has_node(p):
2669 raise error.LookupError(
2683 raise error.LookupError(
2670 p, self.radix, _(b'unknown parent')
2684 p, self.radix, _(b'unknown parent')
2671 )
2685 )
2672
2686
2673 if not self.index.has_node(deltabase):
2687 if not self.index.has_node(deltabase):
2674 raise error.LookupError(
2688 raise error.LookupError(
2675 deltabase, self.display_id, _(b'unknown delta base')
2689 deltabase, self.display_id, _(b'unknown delta base')
2676 )
2690 )
2677
2691
2678 baserev = self.rev(deltabase)
2692 baserev = self.rev(deltabase)
2679
2693
2680 if baserev != nullrev and self.iscensored(baserev):
2694 if baserev != nullrev and self.iscensored(baserev):
2681 # if base is censored, delta must be full replacement in a
2695 # if base is censored, delta must be full replacement in a
2682 # single patch operation
2696 # single patch operation
2683 hlen = struct.calcsize(b">lll")
2697 hlen = struct.calcsize(b">lll")
2684 oldlen = self.rawsize(baserev)
2698 oldlen = self.rawsize(baserev)
2685 newlen = len(delta) - hlen
2699 newlen = len(delta) - hlen
2686 if delta[:hlen] != mdiff.replacediffheader(
2700 if delta[:hlen] != mdiff.replacediffheader(
2687 oldlen, newlen
2701 oldlen, newlen
2688 ):
2702 ):
2689 raise error.CensoredBaseError(
2703 raise error.CensoredBaseError(
2690 self.display_id, self.node(baserev)
2704 self.display_id, self.node(baserev)
2691 )
2705 )
2692
2706
2693 if not flags and self._peek_iscensored(baserev, delta):
2707 if not flags and self._peek_iscensored(baserev, delta):
2694 flags |= REVIDX_ISCENSORED
2708 flags |= REVIDX_ISCENSORED
2695
2709
2696 # We assume consumers of addrevisioncb will want to retrieve
2710 # We assume consumers of addrevisioncb will want to retrieve
2697 # the added revision, which will require a call to
2711 # the added revision, which will require a call to
2698 # revision(). revision() will fast path if there is a cache
2712 # revision(). revision() will fast path if there is a cache
2699 # hit. So, we tell _addrevision() to always cache in this case.
2713 # hit. So, we tell _addrevision() to always cache in this case.
2700 # We're only using addgroup() in the context of changegroup
2714 # We're only using addgroup() in the context of changegroup
2701 # generation so the revision data can always be handled as raw
2715 # generation so the revision data can always be handled as raw
2702 # by the flagprocessor.
2716 # by the flagprocessor.
2703 rev = self._addrevision(
2717 rev = self._addrevision(
2704 node,
2718 node,
2705 None,
2719 None,
2706 transaction,
2720 transaction,
2707 link,
2721 link,
2708 p1,
2722 p1,
2709 p2,
2723 p2,
2710 flags,
2724 flags,
2711 (baserev, delta),
2725 (baserev, delta),
2712 alwayscache=alwayscache,
2726 alwayscache=alwayscache,
2713 deltacomputer=deltacomputer,
2727 deltacomputer=deltacomputer,
2714 sidedata=sidedata,
2728 sidedata=sidedata,
2715 )
2729 )
2716
2730
2717 if addrevisioncb:
2731 if addrevisioncb:
2718 addrevisioncb(self, rev)
2732 addrevisioncb(self, rev)
2719 empty = False
2733 empty = False
2720 finally:
2734 finally:
2721 self._adding_group = False
2735 self._adding_group = False
2722 return not empty
2736 return not empty
2723
2737
2724 def iscensored(self, rev):
2738 def iscensored(self, rev):
2725 """Check if a file revision is censored."""
2739 """Check if a file revision is censored."""
2726 if not self._censorable:
2740 if not self._censorable:
2727 return False
2741 return False
2728
2742
2729 return self.flags(rev) & REVIDX_ISCENSORED
2743 return self.flags(rev) & REVIDX_ISCENSORED
2730
2744
2731 def _peek_iscensored(self, baserev, delta):
2745 def _peek_iscensored(self, baserev, delta):
2732 """Quickly check if a delta produces a censored revision."""
2746 """Quickly check if a delta produces a censored revision."""
2733 if not self._censorable:
2747 if not self._censorable:
2734 return False
2748 return False
2735
2749
2736 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2750 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2737
2751
2738 def getstrippoint(self, minlink):
2752 def getstrippoint(self, minlink):
2739 """find the minimum rev that must be stripped to strip the linkrev
2753 """find the minimum rev that must be stripped to strip the linkrev
2740
2754
2741 Returns a tuple containing the minimum rev and a set of all revs that
2755 Returns a tuple containing the minimum rev and a set of all revs that
2742 have linkrevs that will be broken by this strip.
2756 have linkrevs that will be broken by this strip.
2743 """
2757 """
2744 return storageutil.resolvestripinfo(
2758 return storageutil.resolvestripinfo(
2745 minlink,
2759 minlink,
2746 len(self) - 1,
2760 len(self) - 1,
2747 self.headrevs(),
2761 self.headrevs(),
2748 self.linkrev,
2762 self.linkrev,
2749 self.parentrevs,
2763 self.parentrevs,
2750 )
2764 )
2751
2765
2752 def strip(self, minlink, transaction):
2766 def strip(self, minlink, transaction):
2753 """truncate the revlog on the first revision with a linkrev >= minlink
2767 """truncate the revlog on the first revision with a linkrev >= minlink
2754
2768
2755 This function is called when we're stripping revision minlink and
2769 This function is called when we're stripping revision minlink and
2756 its descendants from the repository.
2770 its descendants from the repository.
2757
2771
2758 We have to remove all revisions with linkrev >= minlink, because
2772 We have to remove all revisions with linkrev >= minlink, because
2759 the equivalent changelog revisions will be renumbered after the
2773 the equivalent changelog revisions will be renumbered after the
2760 strip.
2774 strip.
2761
2775
2762 So we truncate the revlog on the first of these revisions, and
2776 So we truncate the revlog on the first of these revisions, and
2763 trust that the caller has saved the revisions that shouldn't be
2777 trust that the caller has saved the revisions that shouldn't be
2764 removed and that it'll re-add them after this truncation.
2778 removed and that it'll re-add them after this truncation.
2765 """
2779 """
2766 if len(self) == 0:
2780 if len(self) == 0:
2767 return
2781 return
2768
2782
2769 rev, _ = self.getstrippoint(minlink)
2783 rev, _ = self.getstrippoint(minlink)
2770 if rev == len(self):
2784 if rev == len(self):
2771 return
2785 return
2772
2786
2773 # first truncate the files on disk
2787 # first truncate the files on disk
2774 data_end = self.start(rev)
2788 data_end = self.start(rev)
2775 if not self._inline:
2789 if not self._inline:
2776 transaction.add(self._datafile, data_end)
2790 transaction.add(self._datafile, data_end)
2777 end = rev * self.index.entry_size
2791 end = rev * self.index.entry_size
2778 else:
2792 else:
2779 end = data_end + (rev * self.index.entry_size)
2793 end = data_end + (rev * self.index.entry_size)
2780
2794
2781 if self._sidedatafile:
2795 if self._sidedatafile:
2782 sidedata_end = self.sidedata_cut_off(rev)
2796 sidedata_end = self.sidedata_cut_off(rev)
2783 transaction.add(self._sidedatafile, sidedata_end)
2797 transaction.add(self._sidedatafile, sidedata_end)
2784
2798
2785 transaction.add(self._indexfile, end)
2799 transaction.add(self._indexfile, end)
2786 if self._docket is not None:
2800 if self._docket is not None:
2787 # XXX we could, leverage the docket while stripping. However it is
2801 # XXX we could, leverage the docket while stripping. However it is
2788 # not powerfull enough at the time of this comment
2802 # not powerfull enough at the time of this comment
2789 self._docket.index_end = end
2803 self._docket.index_end = end
2790 self._docket.data_end = data_end
2804 self._docket.data_end = data_end
2791 self._docket.sidedata_end = sidedata_end
2805 self._docket.sidedata_end = sidedata_end
2792 self._docket.write(transaction, stripping=True)
2806 self._docket.write(transaction, stripping=True)
2793
2807
2794 # then reset internal state in memory to forget those revisions
2808 # then reset internal state in memory to forget those revisions
2795 self._revisioncache = None
2809 self._revisioncache = None
2796 self._chaininfocache = util.lrucachedict(500)
2810 self._chaininfocache = util.lrucachedict(500)
2797 self._segmentfile.clear_cache()
2811 self._segmentfile.clear_cache()
2798 self._segmentfile_sidedata.clear_cache()
2812 self._segmentfile_sidedata.clear_cache()
2799
2813
2800 del self.index[rev:-1]
2814 del self.index[rev:-1]
2801
2815
2802 def checksize(self):
2816 def checksize(self):
2803 """Check size of index and data files
2817 """Check size of index and data files
2804
2818
2805 return a (dd, di) tuple.
2819 return a (dd, di) tuple.
2806 - dd: extra bytes for the "data" file
2820 - dd: extra bytes for the "data" file
2807 - di: extra bytes for the "index" file
2821 - di: extra bytes for the "index" file
2808
2822
2809 A healthy revlog will return (0, 0).
2823 A healthy revlog will return (0, 0).
2810 """
2824 """
2811 expected = 0
2825 expected = 0
2812 if len(self):
2826 if len(self):
2813 expected = max(0, self.end(len(self) - 1))
2827 expected = max(0, self.end(len(self) - 1))
2814
2828
2815 try:
2829 try:
2816 with self._datafp() as f:
2830 with self._datafp() as f:
2817 f.seek(0, io.SEEK_END)
2831 f.seek(0, io.SEEK_END)
2818 actual = f.tell()
2832 actual = f.tell()
2819 dd = actual - expected
2833 dd = actual - expected
2820 except IOError as inst:
2834 except IOError as inst:
2821 if inst.errno != errno.ENOENT:
2835 if inst.errno != errno.ENOENT:
2822 raise
2836 raise
2823 dd = 0
2837 dd = 0
2824
2838
2825 try:
2839 try:
2826 f = self.opener(self._indexfile)
2840 f = self.opener(self._indexfile)
2827 f.seek(0, io.SEEK_END)
2841 f.seek(0, io.SEEK_END)
2828 actual = f.tell()
2842 actual = f.tell()
2829 f.close()
2843 f.close()
2830 s = self.index.entry_size
2844 s = self.index.entry_size
2831 i = max(0, actual // s)
2845 i = max(0, actual // s)
2832 di = actual - (i * s)
2846 di = actual - (i * s)
2833 if self._inline:
2847 if self._inline:
2834 databytes = 0
2848 databytes = 0
2835 for r in self:
2849 for r in self:
2836 databytes += max(0, self.length(r))
2850 databytes += max(0, self.length(r))
2837 dd = 0
2851 dd = 0
2838 di = actual - len(self) * s - databytes
2852 di = actual - len(self) * s - databytes
2839 except IOError as inst:
2853 except IOError as inst:
2840 if inst.errno != errno.ENOENT:
2854 if inst.errno != errno.ENOENT:
2841 raise
2855 raise
2842 di = 0
2856 di = 0
2843
2857
2844 return (dd, di)
2858 return (dd, di)
2845
2859
2846 def files(self):
2860 def files(self):
2847 res = [self._indexfile]
2861 res = [self._indexfile]
2848 if self._docket_file is None:
2862 if self._docket_file is None:
2849 if not self._inline:
2863 if not self._inline:
2850 res.append(self._datafile)
2864 res.append(self._datafile)
2851 else:
2865 else:
2852 res.append(self._docket_file)
2866 res.append(self._docket_file)
2853 res.extend(self._docket.old_index_filepaths(include_empty=False))
2867 res.extend(self._docket.old_index_filepaths(include_empty=False))
2854 if self._docket.data_end:
2868 if self._docket.data_end:
2855 res.append(self._datafile)
2869 res.append(self._datafile)
2856 res.extend(self._docket.old_data_filepaths(include_empty=False))
2870 res.extend(self._docket.old_data_filepaths(include_empty=False))
2857 if self._docket.sidedata_end:
2871 if self._docket.sidedata_end:
2858 res.append(self._sidedatafile)
2872 res.append(self._sidedatafile)
2859 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2873 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2860 return res
2874 return res
2861
2875
2862 def emitrevisions(
2876 def emitrevisions(
2863 self,
2877 self,
2864 nodes,
2878 nodes,
2865 nodesorder=None,
2879 nodesorder=None,
2866 revisiondata=False,
2880 revisiondata=False,
2867 assumehaveparentrevisions=False,
2881 assumehaveparentrevisions=False,
2868 deltamode=repository.CG_DELTAMODE_STD,
2882 deltamode=repository.CG_DELTAMODE_STD,
2869 sidedata_helpers=None,
2883 sidedata_helpers=None,
2870 ):
2884 ):
2871 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2885 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2872 raise error.ProgrammingError(
2886 raise error.ProgrammingError(
2873 b'unhandled value for nodesorder: %s' % nodesorder
2887 b'unhandled value for nodesorder: %s' % nodesorder
2874 )
2888 )
2875
2889
2876 if nodesorder is None and not self._generaldelta:
2890 if nodesorder is None and not self._generaldelta:
2877 nodesorder = b'storage'
2891 nodesorder = b'storage'
2878
2892
2879 if (
2893 if (
2880 not self._storedeltachains
2894 not self._storedeltachains
2881 and deltamode != repository.CG_DELTAMODE_PREV
2895 and deltamode != repository.CG_DELTAMODE_PREV
2882 ):
2896 ):
2883 deltamode = repository.CG_DELTAMODE_FULL
2897 deltamode = repository.CG_DELTAMODE_FULL
2884
2898
2885 return storageutil.emitrevisions(
2899 return storageutil.emitrevisions(
2886 self,
2900 self,
2887 nodes,
2901 nodes,
2888 nodesorder,
2902 nodesorder,
2889 revlogrevisiondelta,
2903 revlogrevisiondelta,
2890 deltaparentfn=self.deltaparent,
2904 deltaparentfn=self.deltaparent,
2891 candeltafn=self.candelta,
2905 candeltafn=self.candelta,
2892 rawsizefn=self.rawsize,
2906 rawsizefn=self.rawsize,
2893 revdifffn=self.revdiff,
2907 revdifffn=self.revdiff,
2894 flagsfn=self.flags,
2908 flagsfn=self.flags,
2895 deltamode=deltamode,
2909 deltamode=deltamode,
2896 revisiondata=revisiondata,
2910 revisiondata=revisiondata,
2897 assumehaveparentrevisions=assumehaveparentrevisions,
2911 assumehaveparentrevisions=assumehaveparentrevisions,
2898 sidedata_helpers=sidedata_helpers,
2912 sidedata_helpers=sidedata_helpers,
2899 )
2913 )
2900
2914
2901 DELTAREUSEALWAYS = b'always'
2915 DELTAREUSEALWAYS = b'always'
2902 DELTAREUSESAMEREVS = b'samerevs'
2916 DELTAREUSESAMEREVS = b'samerevs'
2903 DELTAREUSENEVER = b'never'
2917 DELTAREUSENEVER = b'never'
2904
2918
2905 DELTAREUSEFULLADD = b'fulladd'
2919 DELTAREUSEFULLADD = b'fulladd'
2906
2920
2907 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2921 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2908
2922
2909 def clone(
2923 def clone(
2910 self,
2924 self,
2911 tr,
2925 tr,
2912 destrevlog,
2926 destrevlog,
2913 addrevisioncb=None,
2927 addrevisioncb=None,
2914 deltareuse=DELTAREUSESAMEREVS,
2928 deltareuse=DELTAREUSESAMEREVS,
2915 forcedeltabothparents=None,
2929 forcedeltabothparents=None,
2916 sidedata_helpers=None,
2930 sidedata_helpers=None,
2917 ):
2931 ):
2918 """Copy this revlog to another, possibly with format changes.
2932 """Copy this revlog to another, possibly with format changes.
2919
2933
2920 The destination revlog will contain the same revisions and nodes.
2934 The destination revlog will contain the same revisions and nodes.
2921 However, it may not be bit-for-bit identical due to e.g. delta encoding
2935 However, it may not be bit-for-bit identical due to e.g. delta encoding
2922 differences.
2936 differences.
2923
2937
2924 The ``deltareuse`` argument control how deltas from the existing revlog
2938 The ``deltareuse`` argument control how deltas from the existing revlog
2925 are preserved in the destination revlog. The argument can have the
2939 are preserved in the destination revlog. The argument can have the
2926 following values:
2940 following values:
2927
2941
2928 DELTAREUSEALWAYS
2942 DELTAREUSEALWAYS
2929 Deltas will always be reused (if possible), even if the destination
2943 Deltas will always be reused (if possible), even if the destination
2930 revlog would not select the same revisions for the delta. This is the
2944 revlog would not select the same revisions for the delta. This is the
2931 fastest mode of operation.
2945 fastest mode of operation.
2932 DELTAREUSESAMEREVS
2946 DELTAREUSESAMEREVS
2933 Deltas will be reused if the destination revlog would pick the same
2947 Deltas will be reused if the destination revlog would pick the same
2934 revisions for the delta. This mode strikes a balance between speed
2948 revisions for the delta. This mode strikes a balance between speed
2935 and optimization.
2949 and optimization.
2936 DELTAREUSENEVER
2950 DELTAREUSENEVER
2937 Deltas will never be reused. This is the slowest mode of execution.
2951 Deltas will never be reused. This is the slowest mode of execution.
2938 This mode can be used to recompute deltas (e.g. if the diff/delta
2952 This mode can be used to recompute deltas (e.g. if the diff/delta
2939 algorithm changes).
2953 algorithm changes).
2940 DELTAREUSEFULLADD
2954 DELTAREUSEFULLADD
2941 Revision will be re-added as if their were new content. This is
2955 Revision will be re-added as if their were new content. This is
2942 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2956 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2943 eg: large file detection and handling.
2957 eg: large file detection and handling.
2944
2958
2945 Delta computation can be slow, so the choice of delta reuse policy can
2959 Delta computation can be slow, so the choice of delta reuse policy can
2946 significantly affect run time.
2960 significantly affect run time.
2947
2961
2948 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2962 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2949 two extremes. Deltas will be reused if they are appropriate. But if the
2963 two extremes. Deltas will be reused if they are appropriate. But if the
2950 delta could choose a better revision, it will do so. This means if you
2964 delta could choose a better revision, it will do so. This means if you
2951 are converting a non-generaldelta revlog to a generaldelta revlog,
2965 are converting a non-generaldelta revlog to a generaldelta revlog,
2952 deltas will be recomputed if the delta's parent isn't a parent of the
2966 deltas will be recomputed if the delta's parent isn't a parent of the
2953 revision.
2967 revision.
2954
2968
2955 In addition to the delta policy, the ``forcedeltabothparents``
2969 In addition to the delta policy, the ``forcedeltabothparents``
2956 argument controls whether to force compute deltas against both parents
2970 argument controls whether to force compute deltas against both parents
2957 for merges. By default, the current default is used.
2971 for merges. By default, the current default is used.
2958
2972
2959 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2973 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2960 `sidedata_helpers`.
2974 `sidedata_helpers`.
2961 """
2975 """
2962 if deltareuse not in self.DELTAREUSEALL:
2976 if deltareuse not in self.DELTAREUSEALL:
2963 raise ValueError(
2977 raise ValueError(
2964 _(b'value for deltareuse invalid: %s') % deltareuse
2978 _(b'value for deltareuse invalid: %s') % deltareuse
2965 )
2979 )
2966
2980
2967 if len(destrevlog):
2981 if len(destrevlog):
2968 raise ValueError(_(b'destination revlog is not empty'))
2982 raise ValueError(_(b'destination revlog is not empty'))
2969
2983
2970 if getattr(self, 'filteredrevs', None):
2984 if getattr(self, 'filteredrevs', None):
2971 raise ValueError(_(b'source revlog has filtered revisions'))
2985 raise ValueError(_(b'source revlog has filtered revisions'))
2972 if getattr(destrevlog, 'filteredrevs', None):
2986 if getattr(destrevlog, 'filteredrevs', None):
2973 raise ValueError(_(b'destination revlog has filtered revisions'))
2987 raise ValueError(_(b'destination revlog has filtered revisions'))
2974
2988
2975 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2989 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2976 # if possible.
2990 # if possible.
2977 oldlazydelta = destrevlog._lazydelta
2991 oldlazydelta = destrevlog._lazydelta
2978 oldlazydeltabase = destrevlog._lazydeltabase
2992 oldlazydeltabase = destrevlog._lazydeltabase
2979 oldamd = destrevlog._deltabothparents
2993 oldamd = destrevlog._deltabothparents
2980
2994
2981 try:
2995 try:
2982 if deltareuse == self.DELTAREUSEALWAYS:
2996 if deltareuse == self.DELTAREUSEALWAYS:
2983 destrevlog._lazydeltabase = True
2997 destrevlog._lazydeltabase = True
2984 destrevlog._lazydelta = True
2998 destrevlog._lazydelta = True
2985 elif deltareuse == self.DELTAREUSESAMEREVS:
2999 elif deltareuse == self.DELTAREUSESAMEREVS:
2986 destrevlog._lazydeltabase = False
3000 destrevlog._lazydeltabase = False
2987 destrevlog._lazydelta = True
3001 destrevlog._lazydelta = True
2988 elif deltareuse == self.DELTAREUSENEVER:
3002 elif deltareuse == self.DELTAREUSENEVER:
2989 destrevlog._lazydeltabase = False
3003 destrevlog._lazydeltabase = False
2990 destrevlog._lazydelta = False
3004 destrevlog._lazydelta = False
2991
3005
2992 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3006 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2993
3007
2994 self._clone(
3008 self._clone(
2995 tr,
3009 tr,
2996 destrevlog,
3010 destrevlog,
2997 addrevisioncb,
3011 addrevisioncb,
2998 deltareuse,
3012 deltareuse,
2999 forcedeltabothparents,
3013 forcedeltabothparents,
3000 sidedata_helpers,
3014 sidedata_helpers,
3001 )
3015 )
3002
3016
3003 finally:
3017 finally:
3004 destrevlog._lazydelta = oldlazydelta
3018 destrevlog._lazydelta = oldlazydelta
3005 destrevlog._lazydeltabase = oldlazydeltabase
3019 destrevlog._lazydeltabase = oldlazydeltabase
3006 destrevlog._deltabothparents = oldamd
3020 destrevlog._deltabothparents = oldamd
3007
3021
3008 def _clone(
3022 def _clone(
3009 self,
3023 self,
3010 tr,
3024 tr,
3011 destrevlog,
3025 destrevlog,
3012 addrevisioncb,
3026 addrevisioncb,
3013 deltareuse,
3027 deltareuse,
3014 forcedeltabothparents,
3028 forcedeltabothparents,
3015 sidedata_helpers,
3029 sidedata_helpers,
3016 ):
3030 ):
3017 """perform the core duty of `revlog.clone` after parameter processing"""
3031 """perform the core duty of `revlog.clone` after parameter processing"""
3018 deltacomputer = deltautil.deltacomputer(destrevlog)
3032 write_debug = None
3033 if self._debug_delta:
3034 write_debug = tr._report
3035 deltacomputer = deltautil.deltacomputer(
3036 destrevlog,
3037 write_debug=write_debug,
3038 )
3019 index = self.index
3039 index = self.index
3020 for rev in self:
3040 for rev in self:
3021 entry = index[rev]
3041 entry = index[rev]
3022
3042
3023 # Some classes override linkrev to take filtered revs into
3043 # Some classes override linkrev to take filtered revs into
3024 # account. Use raw entry from index.
3044 # account. Use raw entry from index.
3025 flags = entry[0] & 0xFFFF
3045 flags = entry[0] & 0xFFFF
3026 linkrev = entry[4]
3046 linkrev = entry[4]
3027 p1 = index[entry[5]][7]
3047 p1 = index[entry[5]][7]
3028 p2 = index[entry[6]][7]
3048 p2 = index[entry[6]][7]
3029 node = entry[7]
3049 node = entry[7]
3030
3050
3031 # (Possibly) reuse the delta from the revlog if allowed and
3051 # (Possibly) reuse the delta from the revlog if allowed and
3032 # the revlog chunk is a delta.
3052 # the revlog chunk is a delta.
3033 cachedelta = None
3053 cachedelta = None
3034 rawtext = None
3054 rawtext = None
3035 if deltareuse == self.DELTAREUSEFULLADD:
3055 if deltareuse == self.DELTAREUSEFULLADD:
3036 text = self._revisiondata(rev)
3056 text = self._revisiondata(rev)
3037 sidedata = self.sidedata(rev)
3057 sidedata = self.sidedata(rev)
3038
3058
3039 if sidedata_helpers is not None:
3059 if sidedata_helpers is not None:
3040 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3060 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3041 self, sidedata_helpers, sidedata, rev
3061 self, sidedata_helpers, sidedata, rev
3042 )
3062 )
3043 flags = flags | new_flags[0] & ~new_flags[1]
3063 flags = flags | new_flags[0] & ~new_flags[1]
3044
3064
3045 destrevlog.addrevision(
3065 destrevlog.addrevision(
3046 text,
3066 text,
3047 tr,
3067 tr,
3048 linkrev,
3068 linkrev,
3049 p1,
3069 p1,
3050 p2,
3070 p2,
3051 cachedelta=cachedelta,
3071 cachedelta=cachedelta,
3052 node=node,
3072 node=node,
3053 flags=flags,
3073 flags=flags,
3054 deltacomputer=deltacomputer,
3074 deltacomputer=deltacomputer,
3055 sidedata=sidedata,
3075 sidedata=sidedata,
3056 )
3076 )
3057 else:
3077 else:
3058 if destrevlog._lazydelta:
3078 if destrevlog._lazydelta:
3059 dp = self.deltaparent(rev)
3079 dp = self.deltaparent(rev)
3060 if dp != nullrev:
3080 if dp != nullrev:
3061 cachedelta = (dp, bytes(self._chunk(rev)))
3081 cachedelta = (dp, bytes(self._chunk(rev)))
3062
3082
3063 sidedata = None
3083 sidedata = None
3064 if not cachedelta:
3084 if not cachedelta:
3065 rawtext = self._revisiondata(rev)
3085 rawtext = self._revisiondata(rev)
3066 sidedata = self.sidedata(rev)
3086 sidedata = self.sidedata(rev)
3067 if sidedata is None:
3087 if sidedata is None:
3068 sidedata = self.sidedata(rev)
3088 sidedata = self.sidedata(rev)
3069
3089
3070 if sidedata_helpers is not None:
3090 if sidedata_helpers is not None:
3071 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3091 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3072 self, sidedata_helpers, sidedata, rev
3092 self, sidedata_helpers, sidedata, rev
3073 )
3093 )
3074 flags = flags | new_flags[0] & ~new_flags[1]
3094 flags = flags | new_flags[0] & ~new_flags[1]
3075
3095
3076 with destrevlog._writing(tr):
3096 with destrevlog._writing(tr):
3077 destrevlog._addrevision(
3097 destrevlog._addrevision(
3078 node,
3098 node,
3079 rawtext,
3099 rawtext,
3080 tr,
3100 tr,
3081 linkrev,
3101 linkrev,
3082 p1,
3102 p1,
3083 p2,
3103 p2,
3084 flags,
3104 flags,
3085 cachedelta,
3105 cachedelta,
3086 deltacomputer=deltacomputer,
3106 deltacomputer=deltacomputer,
3087 sidedata=sidedata,
3107 sidedata=sidedata,
3088 )
3108 )
3089
3109
3090 if addrevisioncb:
3110 if addrevisioncb:
3091 addrevisioncb(self, rev, node)
3111 addrevisioncb(self, rev, node)
3092
3112
3093 def censorrevision(self, tr, censornode, tombstone=b''):
3113 def censorrevision(self, tr, censornode, tombstone=b''):
3094 if self._format_version == REVLOGV0:
3114 if self._format_version == REVLOGV0:
3095 raise error.RevlogError(
3115 raise error.RevlogError(
3096 _(b'cannot censor with version %d revlogs')
3116 _(b'cannot censor with version %d revlogs')
3097 % self._format_version
3117 % self._format_version
3098 )
3118 )
3099 elif self._format_version == REVLOGV1:
3119 elif self._format_version == REVLOGV1:
3100 rewrite.v1_censor(self, tr, censornode, tombstone)
3120 rewrite.v1_censor(self, tr, censornode, tombstone)
3101 else:
3121 else:
3102 rewrite.v2_censor(self, tr, censornode, tombstone)
3122 rewrite.v2_censor(self, tr, censornode, tombstone)
3103
3123
3104 def verifyintegrity(self, state):
3124 def verifyintegrity(self, state):
3105 """Verifies the integrity of the revlog.
3125 """Verifies the integrity of the revlog.
3106
3126
3107 Yields ``revlogproblem`` instances describing problems that are
3127 Yields ``revlogproblem`` instances describing problems that are
3108 found.
3128 found.
3109 """
3129 """
3110 dd, di = self.checksize()
3130 dd, di = self.checksize()
3111 if dd:
3131 if dd:
3112 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3132 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3113 if di:
3133 if di:
3114 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3134 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3115
3135
3116 version = self._format_version
3136 version = self._format_version
3117
3137
3118 # The verifier tells us what version revlog we should be.
3138 # The verifier tells us what version revlog we should be.
3119 if version != state[b'expectedversion']:
3139 if version != state[b'expectedversion']:
3120 yield revlogproblem(
3140 yield revlogproblem(
3121 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3141 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3122 % (self.display_id, version, state[b'expectedversion'])
3142 % (self.display_id, version, state[b'expectedversion'])
3123 )
3143 )
3124
3144
3125 state[b'skipread'] = set()
3145 state[b'skipread'] = set()
3126 state[b'safe_renamed'] = set()
3146 state[b'safe_renamed'] = set()
3127
3147
3128 for rev in self:
3148 for rev in self:
3129 node = self.node(rev)
3149 node = self.node(rev)
3130
3150
3131 # Verify contents. 4 cases to care about:
3151 # Verify contents. 4 cases to care about:
3132 #
3152 #
3133 # common: the most common case
3153 # common: the most common case
3134 # rename: with a rename
3154 # rename: with a rename
3135 # meta: file content starts with b'\1\n', the metadata
3155 # meta: file content starts with b'\1\n', the metadata
3136 # header defined in filelog.py, but without a rename
3156 # header defined in filelog.py, but without a rename
3137 # ext: content stored externally
3157 # ext: content stored externally
3138 #
3158 #
3139 # More formally, their differences are shown below:
3159 # More formally, their differences are shown below:
3140 #
3160 #
3141 # | common | rename | meta | ext
3161 # | common | rename | meta | ext
3142 # -------------------------------------------------------
3162 # -------------------------------------------------------
3143 # flags() | 0 | 0 | 0 | not 0
3163 # flags() | 0 | 0 | 0 | not 0
3144 # renamed() | False | True | False | ?
3164 # renamed() | False | True | False | ?
3145 # rawtext[0:2]=='\1\n'| False | True | True | ?
3165 # rawtext[0:2]=='\1\n'| False | True | True | ?
3146 #
3166 #
3147 # "rawtext" means the raw text stored in revlog data, which
3167 # "rawtext" means the raw text stored in revlog data, which
3148 # could be retrieved by "rawdata(rev)". "text"
3168 # could be retrieved by "rawdata(rev)". "text"
3149 # mentioned below is "revision(rev)".
3169 # mentioned below is "revision(rev)".
3150 #
3170 #
3151 # There are 3 different lengths stored physically:
3171 # There are 3 different lengths stored physically:
3152 # 1. L1: rawsize, stored in revlog index
3172 # 1. L1: rawsize, stored in revlog index
3153 # 2. L2: len(rawtext), stored in revlog data
3173 # 2. L2: len(rawtext), stored in revlog data
3154 # 3. L3: len(text), stored in revlog data if flags==0, or
3174 # 3. L3: len(text), stored in revlog data if flags==0, or
3155 # possibly somewhere else if flags!=0
3175 # possibly somewhere else if flags!=0
3156 #
3176 #
3157 # L1 should be equal to L2. L3 could be different from them.
3177 # L1 should be equal to L2. L3 could be different from them.
3158 # "text" may or may not affect commit hash depending on flag
3178 # "text" may or may not affect commit hash depending on flag
3159 # processors (see flagutil.addflagprocessor).
3179 # processors (see flagutil.addflagprocessor).
3160 #
3180 #
3161 # | common | rename | meta | ext
3181 # | common | rename | meta | ext
3162 # -------------------------------------------------
3182 # -------------------------------------------------
3163 # rawsize() | L1 | L1 | L1 | L1
3183 # rawsize() | L1 | L1 | L1 | L1
3164 # size() | L1 | L2-LM | L1(*) | L1 (?)
3184 # size() | L1 | L2-LM | L1(*) | L1 (?)
3165 # len(rawtext) | L2 | L2 | L2 | L2
3185 # len(rawtext) | L2 | L2 | L2 | L2
3166 # len(text) | L2 | L2 | L2 | L3
3186 # len(text) | L2 | L2 | L2 | L3
3167 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3187 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3168 #
3188 #
3169 # LM: length of metadata, depending on rawtext
3189 # LM: length of metadata, depending on rawtext
3170 # (*): not ideal, see comment in filelog.size
3190 # (*): not ideal, see comment in filelog.size
3171 # (?): could be "- len(meta)" if the resolved content has
3191 # (?): could be "- len(meta)" if the resolved content has
3172 # rename metadata
3192 # rename metadata
3173 #
3193 #
3174 # Checks needed to be done:
3194 # Checks needed to be done:
3175 # 1. length check: L1 == L2, in all cases.
3195 # 1. length check: L1 == L2, in all cases.
3176 # 2. hash check: depending on flag processor, we may need to
3196 # 2. hash check: depending on flag processor, we may need to
3177 # use either "text" (external), or "rawtext" (in revlog).
3197 # use either "text" (external), or "rawtext" (in revlog).
3178
3198
3179 try:
3199 try:
3180 skipflags = state.get(b'skipflags', 0)
3200 skipflags = state.get(b'skipflags', 0)
3181 if skipflags:
3201 if skipflags:
3182 skipflags &= self.flags(rev)
3202 skipflags &= self.flags(rev)
3183
3203
3184 _verify_revision(self, skipflags, state, node)
3204 _verify_revision(self, skipflags, state, node)
3185
3205
3186 l1 = self.rawsize(rev)
3206 l1 = self.rawsize(rev)
3187 l2 = len(self.rawdata(node))
3207 l2 = len(self.rawdata(node))
3188
3208
3189 if l1 != l2:
3209 if l1 != l2:
3190 yield revlogproblem(
3210 yield revlogproblem(
3191 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3211 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3192 node=node,
3212 node=node,
3193 )
3213 )
3194
3214
3195 except error.CensoredNodeError:
3215 except error.CensoredNodeError:
3196 if state[b'erroroncensored']:
3216 if state[b'erroroncensored']:
3197 yield revlogproblem(
3217 yield revlogproblem(
3198 error=_(b'censored file data'), node=node
3218 error=_(b'censored file data'), node=node
3199 )
3219 )
3200 state[b'skipread'].add(node)
3220 state[b'skipread'].add(node)
3201 except Exception as e:
3221 except Exception as e:
3202 yield revlogproblem(
3222 yield revlogproblem(
3203 error=_(b'unpacking %s: %s')
3223 error=_(b'unpacking %s: %s')
3204 % (short(node), stringutil.forcebytestr(e)),
3224 % (short(node), stringutil.forcebytestr(e)),
3205 node=node,
3225 node=node,
3206 )
3226 )
3207 state[b'skipread'].add(node)
3227 state[b'skipread'].add(node)
3208
3228
3209 def storageinfo(
3229 def storageinfo(
3210 self,
3230 self,
3211 exclusivefiles=False,
3231 exclusivefiles=False,
3212 sharedfiles=False,
3232 sharedfiles=False,
3213 revisionscount=False,
3233 revisionscount=False,
3214 trackedsize=False,
3234 trackedsize=False,
3215 storedsize=False,
3235 storedsize=False,
3216 ):
3236 ):
3217 d = {}
3237 d = {}
3218
3238
3219 if exclusivefiles:
3239 if exclusivefiles:
3220 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3240 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3221 if not self._inline:
3241 if not self._inline:
3222 d[b'exclusivefiles'].append((self.opener, self._datafile))
3242 d[b'exclusivefiles'].append((self.opener, self._datafile))
3223
3243
3224 if sharedfiles:
3244 if sharedfiles:
3225 d[b'sharedfiles'] = []
3245 d[b'sharedfiles'] = []
3226
3246
3227 if revisionscount:
3247 if revisionscount:
3228 d[b'revisionscount'] = len(self)
3248 d[b'revisionscount'] = len(self)
3229
3249
3230 if trackedsize:
3250 if trackedsize:
3231 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3251 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3232
3252
3233 if storedsize:
3253 if storedsize:
3234 d[b'storedsize'] = sum(
3254 d[b'storedsize'] = sum(
3235 self.opener.stat(path).st_size for path in self.files()
3255 self.opener.stat(path).st_size for path in self.files()
3236 )
3256 )
3237
3257
3238 return d
3258 return d
3239
3259
3240 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3260 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3241 if not self.hassidedata:
3261 if not self.hassidedata:
3242 return
3262 return
3243 # revlog formats with sidedata support does not support inline
3263 # revlog formats with sidedata support does not support inline
3244 assert not self._inline
3264 assert not self._inline
3245 if not helpers[1] and not helpers[2]:
3265 if not helpers[1] and not helpers[2]:
3246 # Nothing to generate or remove
3266 # Nothing to generate or remove
3247 return
3267 return
3248
3268
3249 new_entries = []
3269 new_entries = []
3250 # append the new sidedata
3270 # append the new sidedata
3251 with self._writing(transaction):
3271 with self._writing(transaction):
3252 ifh, dfh, sdfh = self._writinghandles
3272 ifh, dfh, sdfh = self._writinghandles
3253 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3273 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3254
3274
3255 current_offset = sdfh.tell()
3275 current_offset = sdfh.tell()
3256 for rev in range(startrev, endrev + 1):
3276 for rev in range(startrev, endrev + 1):
3257 entry = self.index[rev]
3277 entry = self.index[rev]
3258 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3278 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3259 store=self,
3279 store=self,
3260 sidedata_helpers=helpers,
3280 sidedata_helpers=helpers,
3261 sidedata={},
3281 sidedata={},
3262 rev=rev,
3282 rev=rev,
3263 )
3283 )
3264
3284
3265 serialized_sidedata = sidedatautil.serialize_sidedata(
3285 serialized_sidedata = sidedatautil.serialize_sidedata(
3266 new_sidedata
3286 new_sidedata
3267 )
3287 )
3268
3288
3269 sidedata_compression_mode = COMP_MODE_INLINE
3289 sidedata_compression_mode = COMP_MODE_INLINE
3270 if serialized_sidedata and self.hassidedata:
3290 if serialized_sidedata and self.hassidedata:
3271 sidedata_compression_mode = COMP_MODE_PLAIN
3291 sidedata_compression_mode = COMP_MODE_PLAIN
3272 h, comp_sidedata = self.compress(serialized_sidedata)
3292 h, comp_sidedata = self.compress(serialized_sidedata)
3273 if (
3293 if (
3274 h != b'u'
3294 h != b'u'
3275 and comp_sidedata[0] != b'\0'
3295 and comp_sidedata[0] != b'\0'
3276 and len(comp_sidedata) < len(serialized_sidedata)
3296 and len(comp_sidedata) < len(serialized_sidedata)
3277 ):
3297 ):
3278 assert not h
3298 assert not h
3279 if (
3299 if (
3280 comp_sidedata[0]
3300 comp_sidedata[0]
3281 == self._docket.default_compression_header
3301 == self._docket.default_compression_header
3282 ):
3302 ):
3283 sidedata_compression_mode = COMP_MODE_DEFAULT
3303 sidedata_compression_mode = COMP_MODE_DEFAULT
3284 serialized_sidedata = comp_sidedata
3304 serialized_sidedata = comp_sidedata
3285 else:
3305 else:
3286 sidedata_compression_mode = COMP_MODE_INLINE
3306 sidedata_compression_mode = COMP_MODE_INLINE
3287 serialized_sidedata = comp_sidedata
3307 serialized_sidedata = comp_sidedata
3288 if entry[8] != 0 or entry[9] != 0:
3308 if entry[8] != 0 or entry[9] != 0:
3289 # rewriting entries that already have sidedata is not
3309 # rewriting entries that already have sidedata is not
3290 # supported yet, because it introduces garbage data in the
3310 # supported yet, because it introduces garbage data in the
3291 # revlog.
3311 # revlog.
3292 msg = b"rewriting existing sidedata is not supported yet"
3312 msg = b"rewriting existing sidedata is not supported yet"
3293 raise error.Abort(msg)
3313 raise error.Abort(msg)
3294
3314
3295 # Apply (potential) flags to add and to remove after running
3315 # Apply (potential) flags to add and to remove after running
3296 # the sidedata helpers
3316 # the sidedata helpers
3297 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3317 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3298 entry_update = (
3318 entry_update = (
3299 current_offset,
3319 current_offset,
3300 len(serialized_sidedata),
3320 len(serialized_sidedata),
3301 new_offset_flags,
3321 new_offset_flags,
3302 sidedata_compression_mode,
3322 sidedata_compression_mode,
3303 )
3323 )
3304
3324
3305 # the sidedata computation might have move the file cursors around
3325 # the sidedata computation might have move the file cursors around
3306 sdfh.seek(current_offset, os.SEEK_SET)
3326 sdfh.seek(current_offset, os.SEEK_SET)
3307 sdfh.write(serialized_sidedata)
3327 sdfh.write(serialized_sidedata)
3308 new_entries.append(entry_update)
3328 new_entries.append(entry_update)
3309 current_offset += len(serialized_sidedata)
3329 current_offset += len(serialized_sidedata)
3310 self._docket.sidedata_end = sdfh.tell()
3330 self._docket.sidedata_end = sdfh.tell()
3311
3331
3312 # rewrite the new index entries
3332 # rewrite the new index entries
3313 ifh.seek(startrev * self.index.entry_size)
3333 ifh.seek(startrev * self.index.entry_size)
3314 for i, e in enumerate(new_entries):
3334 for i, e in enumerate(new_entries):
3315 rev = startrev + i
3335 rev = startrev + i
3316 self.index.replace_sidedata_info(rev, *e)
3336 self.index.replace_sidedata_info(rev, *e)
3317 packed = self.index.entry_binary(rev)
3337 packed = self.index.entry_binary(rev)
3318 if rev == 0 and self._docket is None:
3338 if rev == 0 and self._docket is None:
3319 header = self._format_flags | self._format_version
3339 header = self._format_flags | self._format_version
3320 header = self.index.pack_header(header)
3340 header = self.index.pack_header(header)
3321 packed = header + packed
3341 packed = header + packed
3322 ifh.write(packed)
3342 ifh.write(packed)
@@ -1,1040 +1,1065 b''
1 Setting up test
1 Setting up test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo 0 > afile
5 $ echo 0 > afile
6 $ hg add afile
6 $ hg add afile
7 $ hg commit -m "0.0"
7 $ hg commit -m "0.0"
8 $ echo 1 >> afile
8 $ echo 1 >> afile
9 $ hg commit -m "0.1"
9 $ hg commit -m "0.1"
10 $ echo 2 >> afile
10 $ echo 2 >> afile
11 $ hg commit -m "0.2"
11 $ hg commit -m "0.2"
12 $ echo 3 >> afile
12 $ echo 3 >> afile
13 $ hg commit -m "0.3"
13 $ hg commit -m "0.3"
14 $ hg update -C 0
14 $ hg update -C 0
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 $ echo 1 >> afile
16 $ echo 1 >> afile
17 $ hg commit -m "1.1"
17 $ hg commit -m "1.1"
18 created new head
18 created new head
19 $ echo 2 >> afile
19 $ echo 2 >> afile
20 $ hg commit -m "1.2"
20 $ hg commit -m "1.2"
21 $ echo "a line" > fred
21 $ echo "a line" > fred
22 $ echo 3 >> afile
22 $ echo 3 >> afile
23 $ hg add fred
23 $ hg add fred
24 $ hg commit -m "1.3"
24 $ hg commit -m "1.3"
25 $ hg mv afile adifferentfile
25 $ hg mv afile adifferentfile
26 $ hg commit -m "1.3m"
26 $ hg commit -m "1.3m"
27 $ hg update -C 3
27 $ hg update -C 3
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 $ hg mv afile anotherfile
29 $ hg mv afile anotherfile
30 $ hg commit -m "0.3m"
30 $ hg commit -m "0.3m"
31 $ hg verify
31 $ hg verify
32 checking changesets
32 checking changesets
33 checking manifests
33 checking manifests
34 crosschecking files in changesets and manifests
34 crosschecking files in changesets and manifests
35 checking files
35 checking files
36 checked 9 changesets with 7 changes to 4 files
36 checked 9 changesets with 7 changes to 4 files
37 $ cd ..
37 $ cd ..
38 $ hg init empty
38 $ hg init empty
39
39
40 Bundle and phase
40 Bundle and phase
41
41
42 $ hg -R test phase --force --secret 0
42 $ hg -R test phase --force --secret 0
43 $ hg -R test bundle phase.hg empty
43 $ hg -R test bundle phase.hg empty
44 searching for changes
44 searching for changes
45 no changes found (ignored 9 secret changesets)
45 no changes found (ignored 9 secret changesets)
46 [1]
46 [1]
47 $ hg -R test phase --draft -r 'head()'
47 $ hg -R test phase --draft -r 'head()'
48
48
49 Bundle --all
49 Bundle --all
50
50
51 $ hg -R test bundle --all all.hg
51 $ hg -R test bundle --all all.hg
52 9 changesets found
52 9 changesets found
53
53
54 Bundle test to full.hg
54 Bundle test to full.hg
55
55
56 $ hg -R test bundle full.hg empty
56 $ hg -R test bundle full.hg empty
57 searching for changes
57 searching for changes
58 9 changesets found
58 9 changesets found
59
59
60 Unbundle full.hg in test
60 Unbundle full.hg in test
61
61
62 $ hg -R test unbundle full.hg
62 $ hg -R test unbundle full.hg
63 adding changesets
63 adding changesets
64 adding manifests
64 adding manifests
65 adding file changes
65 adding file changes
66 added 0 changesets with 0 changes to 4 files
66 added 0 changesets with 0 changes to 4 files
67 (run 'hg update' to get a working copy)
67 (run 'hg update' to get a working copy)
68
68
69 Verify empty
69 Verify empty
70
70
71 $ hg -R empty heads
71 $ hg -R empty heads
72 [1]
72 [1]
73 $ hg -R empty verify
73 $ hg -R empty verify
74 checking changesets
74 checking changesets
75 checking manifests
75 checking manifests
76 crosschecking files in changesets and manifests
76 crosschecking files in changesets and manifests
77 checking files
77 checking files
78 checked 0 changesets with 0 changes to 0 files
78 checked 0 changesets with 0 changes to 0 files
79
79
80 #if repobundlerepo
80 #if repobundlerepo
81
81
82 Pull full.hg into test (using --cwd)
82 Pull full.hg into test (using --cwd)
83
83
84 $ hg --cwd test pull ../full.hg
84 $ hg --cwd test pull ../full.hg
85 pulling from ../full.hg
85 pulling from ../full.hg
86 searching for changes
86 searching for changes
87 no changes found
87 no changes found
88
88
89 Verify that there are no leaked temporary files after pull (issue2797)
89 Verify that there are no leaked temporary files after pull (issue2797)
90
90
91 $ ls test/.hg | grep .hg10un
91 $ ls test/.hg | grep .hg10un
92 [1]
92 [1]
93
93
94 Pull full.hg into empty (using --cwd)
94 Pull full.hg into empty (using --cwd)
95
95
96 $ hg --cwd empty pull ../full.hg
96 $ hg --cwd empty pull ../full.hg
97 pulling from ../full.hg
97 pulling from ../full.hg
98 requesting all changes
98 requesting all changes
99 adding changesets
99 adding changesets
100 adding manifests
100 adding manifests
101 adding file changes
101 adding file changes
102 added 9 changesets with 7 changes to 4 files (+1 heads)
102 added 9 changesets with 7 changes to 4 files (+1 heads)
103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
105
105
106 Rollback empty
106 Rollback empty
107
107
108 $ hg -R empty rollback
108 $ hg -R empty rollback
109 repository tip rolled back to revision -1 (undo pull)
109 repository tip rolled back to revision -1 (undo pull)
110
110
111 Pull full.hg into empty again (using --cwd)
111 Pull full.hg into empty again (using --cwd)
112
112
113 $ hg --cwd empty pull ../full.hg
113 $ hg --cwd empty pull ../full.hg
114 pulling from ../full.hg
114 pulling from ../full.hg
115 requesting all changes
115 requesting all changes
116 adding changesets
116 adding changesets
117 adding manifests
117 adding manifests
118 adding file changes
118 adding file changes
119 added 9 changesets with 7 changes to 4 files (+1 heads)
119 added 9 changesets with 7 changes to 4 files (+1 heads)
120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
121 (run 'hg heads' to see heads, 'hg merge' to merge)
121 (run 'hg heads' to see heads, 'hg merge' to merge)
122
122
123 Pull full.hg into test (using -R)
123 Pull full.hg into test (using -R)
124
124
125 $ hg -R test pull full.hg
125 $ hg -R test pull full.hg
126 pulling from full.hg
126 pulling from full.hg
127 searching for changes
127 searching for changes
128 no changes found
128 no changes found
129
129
130 Pull full.hg into empty (using -R)
130 Pull full.hg into empty (using -R)
131
131
132 $ hg -R empty pull full.hg
132 $ hg -R empty pull full.hg
133 pulling from full.hg
133 pulling from full.hg
134 searching for changes
134 searching for changes
135 no changes found
135 no changes found
136
136
137 Rollback empty
137 Rollback empty
138
138
139 $ hg -R empty rollback
139 $ hg -R empty rollback
140 repository tip rolled back to revision -1 (undo pull)
140 repository tip rolled back to revision -1 (undo pull)
141
141
142 Pull full.hg into empty again (using -R)
142 Pull full.hg into empty again (using -R)
143
143
144 $ hg -R empty pull full.hg
144 $ hg -R empty pull full.hg
145 pulling from full.hg
145 pulling from full.hg
146 requesting all changes
146 requesting all changes
147 adding changesets
147 adding changesets
148 adding manifests
148 adding manifests
149 adding file changes
149 adding file changes
150 added 9 changesets with 7 changes to 4 files (+1 heads)
150 added 9 changesets with 7 changes to 4 files (+1 heads)
151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
152 (run 'hg heads' to see heads, 'hg merge' to merge)
152 (run 'hg heads' to see heads, 'hg merge' to merge)
153
153
154 Log -R full.hg in fresh empty
154 Log -R full.hg in fresh empty
155
155
156 $ rm -r empty
156 $ rm -r empty
157 $ hg init empty
157 $ hg init empty
158 $ cd empty
158 $ cd empty
159 $ hg -R bundle://../full.hg log
159 $ hg -R bundle://../full.hg log
160 changeset: 8:aa35859c02ea
160 changeset: 8:aa35859c02ea
161 tag: tip
161 tag: tip
162 parent: 3:eebf5a27f8ca
162 parent: 3:eebf5a27f8ca
163 user: test
163 user: test
164 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
165 summary: 0.3m
165 summary: 0.3m
166
166
167 changeset: 7:a6a34bfa0076
167 changeset: 7:a6a34bfa0076
168 user: test
168 user: test
169 date: Thu Jan 01 00:00:00 1970 +0000
169 date: Thu Jan 01 00:00:00 1970 +0000
170 summary: 1.3m
170 summary: 1.3m
171
171
172 changeset: 6:7373c1169842
172 changeset: 6:7373c1169842
173 user: test
173 user: test
174 date: Thu Jan 01 00:00:00 1970 +0000
174 date: Thu Jan 01 00:00:00 1970 +0000
175 summary: 1.3
175 summary: 1.3
176
176
177 changeset: 5:1bb50a9436a7
177 changeset: 5:1bb50a9436a7
178 user: test
178 user: test
179 date: Thu Jan 01 00:00:00 1970 +0000
179 date: Thu Jan 01 00:00:00 1970 +0000
180 summary: 1.2
180 summary: 1.2
181
181
182 changeset: 4:095197eb4973
182 changeset: 4:095197eb4973
183 parent: 0:f9ee2f85a263
183 parent: 0:f9ee2f85a263
184 user: test
184 user: test
185 date: Thu Jan 01 00:00:00 1970 +0000
185 date: Thu Jan 01 00:00:00 1970 +0000
186 summary: 1.1
186 summary: 1.1
187
187
188 changeset: 3:eebf5a27f8ca
188 changeset: 3:eebf5a27f8ca
189 user: test
189 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
190 date: Thu Jan 01 00:00:00 1970 +0000
191 summary: 0.3
191 summary: 0.3
192
192
193 changeset: 2:e38ba6f5b7e0
193 changeset: 2:e38ba6f5b7e0
194 user: test
194 user: test
195 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
196 summary: 0.2
196 summary: 0.2
197
197
198 changeset: 1:34c2bf6b0626
198 changeset: 1:34c2bf6b0626
199 user: test
199 user: test
200 date: Thu Jan 01 00:00:00 1970 +0000
200 date: Thu Jan 01 00:00:00 1970 +0000
201 summary: 0.1
201 summary: 0.1
202
202
203 changeset: 0:f9ee2f85a263
203 changeset: 0:f9ee2f85a263
204 user: test
204 user: test
205 date: Thu Jan 01 00:00:00 1970 +0000
205 date: Thu Jan 01 00:00:00 1970 +0000
206 summary: 0.0
206 summary: 0.0
207
207
208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
209
209
210 $ ls .hg
210 $ ls .hg
211 00changelog.i
211 00changelog.i
212 cache
212 cache
213 requires
213 requires
214 store
214 store
215 wcache
215 wcache
216
216
217 Pull ../full.hg into empty (with hook)
217 Pull ../full.hg into empty (with hook)
218
218
219 $ cat >> .hg/hgrc <<EOF
219 $ cat >> .hg/hgrc <<EOF
220 > [hooks]
220 > [hooks]
221 > changegroup = sh -c "printenv.py --line changegroup"
221 > changegroup = sh -c "printenv.py --line changegroup"
222 > EOF
222 > EOF
223
223
224 doesn't work (yet ?)
224 doesn't work (yet ?)
225 NOTE: msys is mangling the URL below
225 NOTE: msys is mangling the URL below
226
226
227 hg -R bundle://../full.hg verify
227 hg -R bundle://../full.hg verify
228
228
229 $ hg pull bundle://../full.hg
229 $ hg pull bundle://../full.hg
230 pulling from bundle:../full.hg
230 pulling from bundle:../full.hg
231 requesting all changes
231 requesting all changes
232 adding changesets
232 adding changesets
233 adding manifests
233 adding manifests
234 adding file changes
234 adding file changes
235 added 9 changesets with 7 changes to 4 files (+1 heads)
235 added 9 changesets with 7 changes to 4 files (+1 heads)
236 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
236 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
237 changegroup hook: HG_HOOKNAME=changegroup
237 changegroup hook: HG_HOOKNAME=changegroup
238 HG_HOOKTYPE=changegroup
238 HG_HOOKTYPE=changegroup
239 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
239 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
240 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
240 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
241 HG_SOURCE=pull
241 HG_SOURCE=pull
242 HG_TXNID=TXN:$ID$
242 HG_TXNID=TXN:$ID$
243 HG_TXNNAME=pull
243 HG_TXNNAME=pull
244 bundle:../full.hg (no-msys !)
244 bundle:../full.hg (no-msys !)
245 bundle;../full.hg (msys !)
245 bundle;../full.hg (msys !)
246 HG_URL=bundle:../full.hg (no-msys !)
246 HG_URL=bundle:../full.hg (no-msys !)
247 HG_URL=bundle;../full.hg (msys !)
247 HG_URL=bundle;../full.hg (msys !)
248
248
249 (run 'hg heads' to see heads, 'hg merge' to merge)
249 (run 'hg heads' to see heads, 'hg merge' to merge)
250
250
251 Rollback empty
251 Rollback empty
252
252
253 $ hg rollback
253 $ hg rollback
254 repository tip rolled back to revision -1 (undo pull)
254 repository tip rolled back to revision -1 (undo pull)
255 $ cd ..
255 $ cd ..
256
256
257 Log -R bundle:empty+full.hg
257 Log -R bundle:empty+full.hg
258
258
259 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
259 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
260 8 7 6 5 4 3 2 1 0
260 8 7 6 5 4 3 2 1 0
261
261
262 Pull full.hg into empty again (using -R; with hook)
262 Pull full.hg into empty again (using -R; with hook)
263
263
264 $ hg -R empty pull full.hg
264 $ hg -R empty pull full.hg
265 pulling from full.hg
265 pulling from full.hg
266 requesting all changes
266 requesting all changes
267 adding changesets
267 adding changesets
268 adding manifests
268 adding manifests
269 adding file changes
269 adding file changes
270 added 9 changesets with 7 changes to 4 files (+1 heads)
270 added 9 changesets with 7 changes to 4 files (+1 heads)
271 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
271 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
272 changegroup hook: HG_HOOKNAME=changegroup
272 changegroup hook: HG_HOOKNAME=changegroup
273 HG_HOOKTYPE=changegroup
273 HG_HOOKTYPE=changegroup
274 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
274 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
275 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
275 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
276 HG_SOURCE=pull
276 HG_SOURCE=pull
277 HG_TXNID=TXN:$ID$
277 HG_TXNID=TXN:$ID$
278 HG_TXNNAME=pull
278 HG_TXNNAME=pull
279 bundle:empty+full.hg
279 bundle:empty+full.hg
280 HG_URL=bundle:empty+full.hg
280 HG_URL=bundle:empty+full.hg
281
281
282 (run 'hg heads' to see heads, 'hg merge' to merge)
282 (run 'hg heads' to see heads, 'hg merge' to merge)
283
283
284 #endif
284 #endif
285
285
286 Cannot produce streaming clone bundles with "hg bundle"
286 Cannot produce streaming clone bundles with "hg bundle"
287
287
288 $ hg -R test bundle -t packed1 packed.hg
288 $ hg -R test bundle -t packed1 packed.hg
289 abort: packed bundles cannot be produced by "hg bundle"
289 abort: packed bundles cannot be produced by "hg bundle"
290 (use 'hg debugcreatestreamclonebundle')
290 (use 'hg debugcreatestreamclonebundle')
291 [10]
291 [10]
292
292
293 packed1 is produced properly
293 packed1 is produced properly
294
294
295
295
296 #if reporevlogstore rust
296 #if reporevlogstore rust
297
297
298 $ hg -R test debugcreatestreamclonebundle packed.hg
298 $ hg -R test debugcreatestreamclonebundle packed.hg
299 writing 2665 bytes for 6 files
299 writing 2665 bytes for 6 files
300 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
300 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
301
301
302 $ f -B 64 --size --sha1 --hexdump packed.hg
302 $ f -B 64 --size --sha1 --hexdump packed.hg
303 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
303 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
304 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
304 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
305 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
305 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
306 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
306 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
307 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
307 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
308 $ hg debugbundle --spec packed.hg
308 $ hg debugbundle --spec packed.hg
309 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
309 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
310 #endif
310 #endif
311
311
312 #if reporevlogstore no-rust zstd
312 #if reporevlogstore no-rust zstd
313
313
314 $ hg -R test debugcreatestreamclonebundle packed.hg
314 $ hg -R test debugcreatestreamclonebundle packed.hg
315 writing 2665 bytes for 6 files
315 writing 2665 bytes for 6 files
316 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
316 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
317
317
318 $ f -B 64 --size --sha1 --hexdump packed.hg
318 $ f -B 64 --size --sha1 --hexdump packed.hg
319 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
319 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
320 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
320 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
321 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
321 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
322 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
322 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
323 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
323 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
324 $ hg debugbundle --spec packed.hg
324 $ hg debugbundle --spec packed.hg
325 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
325 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
326 #endif
326 #endif
327
327
328 #if reporevlogstore no-rust no-zstd
328 #if reporevlogstore no-rust no-zstd
329
329
330 $ hg -R test debugcreatestreamclonebundle packed.hg
330 $ hg -R test debugcreatestreamclonebundle packed.hg
331 writing 2664 bytes for 6 files
331 writing 2664 bytes for 6 files
332 bundle requirements: generaldelta, revlogv1, sparserevlog
332 bundle requirements: generaldelta, revlogv1, sparserevlog
333
333
334 $ f -B 64 --size --sha1 --hexdump packed.hg
334 $ f -B 64 --size --sha1 --hexdump packed.hg
335 packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
335 packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
336 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
336 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
337 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
337 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
338 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
338 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
339 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
339 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
340 $ hg debugbundle --spec packed.hg
340 $ hg debugbundle --spec packed.hg
341 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
341 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
342 #endif
342 #endif
343
343
344 #if reporevlogstore
344 #if reporevlogstore
345
345
346 generaldelta requirement is not listed in stream clone bundles unless used
346 generaldelta requirement is not listed in stream clone bundles unless used
347
347
348 $ hg --config format.usegeneraldelta=false init testnongd
348 $ hg --config format.usegeneraldelta=false init testnongd
349 $ cd testnongd
349 $ cd testnongd
350 $ touch foo
350 $ touch foo
351 $ hg -q commit -A -m initial
351 $ hg -q commit -A -m initial
352 $ cd ..
352 $ cd ..
353
353
354 #endif
354 #endif
355
355
356 #if reporevlogstore rust
356 #if reporevlogstore rust
357
357
358 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
358 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
359 writing 301 bytes for 3 files
359 writing 301 bytes for 3 files
360 bundle requirements: revlog-compression-zstd, revlogv1
360 bundle requirements: revlog-compression-zstd, revlogv1
361
361
362 $ f -B 64 --size --sha1 --hexdump packednongd.hg
362 $ f -B 64 --size --sha1 --hexdump packednongd.hg
363 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
363 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
364 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
364 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
365 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
365 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
366 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
366 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
367 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
367 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
368
368
369 $ hg debugbundle --spec packednongd.hg
369 $ hg debugbundle --spec packednongd.hg
370 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
370 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
371
371
372 #endif
372 #endif
373
373
374 #if reporevlogstore no-rust zstd
374 #if reporevlogstore no-rust zstd
375
375
376 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
376 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
377 writing 301 bytes for 3 files
377 writing 301 bytes for 3 files
378 bundle requirements: revlog-compression-zstd, revlogv1
378 bundle requirements: revlog-compression-zstd, revlogv1
379
379
380 $ f -B 64 --size --sha1 --hexdump packednongd.hg
380 $ f -B 64 --size --sha1 --hexdump packednongd.hg
381 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
381 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
382 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
382 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
383 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
383 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
384 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
384 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
385 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
385 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
386
386
387 $ hg debugbundle --spec packednongd.hg
387 $ hg debugbundle --spec packednongd.hg
388 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
388 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
389
389
390
390
391 #endif
391 #endif
392
392
393 #if reporevlogstore no-rust no-zstd
393 #if reporevlogstore no-rust no-zstd
394
394
395 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
395 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
396 writing 301 bytes for 3 files
396 writing 301 bytes for 3 files
397 bundle requirements: revlogv1
397 bundle requirements: revlogv1
398
398
399 $ f -B 64 --size --sha1 --hexdump packednongd.hg
399 $ f -B 64 --size --sha1 --hexdump packednongd.hg
400 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
400 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
401 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
401 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
402 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
402 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
403 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
403 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
404 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
404 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
405
405
406 $ hg debugbundle --spec packednongd.hg
406 $ hg debugbundle --spec packednongd.hg
407 none-packed1;requirements%3Drevlogv1
407 none-packed1;requirements%3Drevlogv1
408
408
409
409
410 #endif
410 #endif
411
411
412 #if reporevlogstore
412 #if reporevlogstore
413
413
414 Warning emitted when packed bundles contain secret changesets
414 Warning emitted when packed bundles contain secret changesets
415
415
416 $ hg init testsecret
416 $ hg init testsecret
417 $ cd testsecret
417 $ cd testsecret
418 $ touch foo
418 $ touch foo
419 $ hg -q commit -A -m initial
419 $ hg -q commit -A -m initial
420 $ hg phase --force --secret -r .
420 $ hg phase --force --secret -r .
421 $ cd ..
421 $ cd ..
422
422
423 #endif
423 #endif
424
424
425 #if reporevlogstore rust
425 #if reporevlogstore rust
426
426
427 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
427 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
428 (warning: stream clone bundle will contain secret revisions)
428 (warning: stream clone bundle will contain secret revisions)
429 writing 301 bytes for 3 files
429 writing 301 bytes for 3 files
430 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
430 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
431
431
432 #endif
432 #endif
433
433
434 #if reporevlogstore no-rust zstd
434 #if reporevlogstore no-rust zstd
435
435
436 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
436 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
437 (warning: stream clone bundle will contain secret revisions)
437 (warning: stream clone bundle will contain secret revisions)
438 writing 301 bytes for 3 files
438 writing 301 bytes for 3 files
439 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
439 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
440
440
441 #endif
441 #endif
442
442
443 #if reporevlogstore no-rust no-zstd
443 #if reporevlogstore no-rust no-zstd
444
444
445 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
445 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
446 (warning: stream clone bundle will contain secret revisions)
446 (warning: stream clone bundle will contain secret revisions)
447 writing 301 bytes for 3 files
447 writing 301 bytes for 3 files
448 bundle requirements: generaldelta, revlogv1, sparserevlog
448 bundle requirements: generaldelta, revlogv1, sparserevlog
449
449
450 #endif
450 #endif
451
451
452 #if reporevlogstore
452 #if reporevlogstore
453
453
454 Unpacking packed1 bundles with "hg unbundle" isn't allowed
454 Unpacking packed1 bundles with "hg unbundle" isn't allowed
455
455
456 $ hg init packed
456 $ hg init packed
457 $ hg -R packed unbundle packed.hg
457 $ hg -R packed unbundle packed.hg
458 abort: packed bundles cannot be applied with "hg unbundle"
458 abort: packed bundles cannot be applied with "hg unbundle"
459 (use "hg debugapplystreamclonebundle")
459 (use "hg debugapplystreamclonebundle")
460 [10]
460 [10]
461
461
462 packed1 can be consumed from debug command
462 packed1 can be consumed from debug command
463
463
464 (this also confirms that streamclone-ed changes are visible via
464 (this also confirms that streamclone-ed changes are visible via
465 @filecache properties to in-process procedures before closing
465 @filecache properties to in-process procedures before closing
466 transaction)
466 transaction)
467
467
468 $ cat > $TESTTMP/showtip.py <<EOF
468 $ cat > $TESTTMP/showtip.py <<EOF
469 >
469 >
470 > def showtip(ui, repo, hooktype, **kwargs):
470 > def showtip(ui, repo, hooktype, **kwargs):
471 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
471 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
472 >
472 >
473 > def reposetup(ui, repo):
473 > def reposetup(ui, repo):
474 > # this confirms (and ensures) that (empty) 00changelog.i
474 > # this confirms (and ensures) that (empty) 00changelog.i
475 > # before streamclone is already cached as repo.changelog
475 > # before streamclone is already cached as repo.changelog
476 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
476 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
477 >
477 >
478 > # this confirms that streamclone-ed changes are visible to
478 > # this confirms that streamclone-ed changes are visible to
479 > # in-process procedures before closing transaction
479 > # in-process procedures before closing transaction
480 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
480 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
481 >
481 >
482 > # this confirms that streamclone-ed changes are still visible
482 > # this confirms that streamclone-ed changes are still visible
483 > # after closing transaction
483 > # after closing transaction
484 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
484 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
485 > EOF
485 > EOF
486 $ cat >> $HGRCPATH <<EOF
486 $ cat >> $HGRCPATH <<EOF
487 > [extensions]
487 > [extensions]
488 > showtip = $TESTTMP/showtip.py
488 > showtip = $TESTTMP/showtip.py
489 > EOF
489 > EOF
490
490
491 $ hg -R packed debugapplystreamclonebundle packed.hg
491 $ hg -R packed debugapplystreamclonebundle packed.hg
492 6 files to transfer, 2.60 KB of data
492 6 files to transfer, 2.60 KB of data
493 pretxnopen: 000000000000
493 pretxnopen: 000000000000
494 pretxnclose: aa35859c02ea
494 pretxnclose: aa35859c02ea
495 transferred 2.60 KB in * seconds (* */sec) (glob)
495 transferred 2.60 KB in * seconds (* */sec) (glob)
496 txnclose: aa35859c02ea
496 txnclose: aa35859c02ea
497
497
498 (for safety, confirm visibility of streamclone-ed changes by another
498 (for safety, confirm visibility of streamclone-ed changes by another
499 process, too)
499 process, too)
500
500
501 $ hg -R packed tip -T "{node|short}\n"
501 $ hg -R packed tip -T "{node|short}\n"
502 aa35859c02ea
502 aa35859c02ea
503
503
504 $ cat >> $HGRCPATH <<EOF
504 $ cat >> $HGRCPATH <<EOF
505 > [extensions]
505 > [extensions]
506 > showtip = !
506 > showtip = !
507 > EOF
507 > EOF
508
508
509 Does not work on non-empty repo
509 Does not work on non-empty repo
510
510
511 $ hg -R packed debugapplystreamclonebundle packed.hg
511 $ hg -R packed debugapplystreamclonebundle packed.hg
512 abort: cannot apply stream clone bundle on non-empty repo
512 abort: cannot apply stream clone bundle on non-empty repo
513 [255]
513 [255]
514
514
515 #endif
515 #endif
516
516
517 Create partial clones
517 Create partial clones
518
518
519 $ rm -r empty
519 $ rm -r empty
520 $ hg init empty
520 $ hg init empty
521 $ hg clone -r 3 test partial
521 $ hg clone -r 3 test partial
522 adding changesets
522 adding changesets
523 adding manifests
523 adding manifests
524 adding file changes
524 adding file changes
525 added 4 changesets with 4 changes to 1 files
525 added 4 changesets with 4 changes to 1 files
526 new changesets f9ee2f85a263:eebf5a27f8ca
526 new changesets f9ee2f85a263:eebf5a27f8ca
527 updating to branch default
527 updating to branch default
528 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
528 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
529 $ hg clone partial partial2
529 $ hg clone partial partial2
530 updating to branch default
530 updating to branch default
531 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
531 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
532 $ cd partial
532 $ cd partial
533
533
534 #if repobundlerepo
534 #if repobundlerepo
535
535
536 Log -R full.hg in partial
536 Log -R full.hg in partial
537
537
538 $ hg -R bundle://../full.hg log -T phases
538 $ hg -R bundle://../full.hg log -T phases
539 changeset: 8:aa35859c02ea
539 changeset: 8:aa35859c02ea
540 tag: tip
540 tag: tip
541 phase: draft
541 phase: draft
542 parent: 3:eebf5a27f8ca
542 parent: 3:eebf5a27f8ca
543 user: test
543 user: test
544 date: Thu Jan 01 00:00:00 1970 +0000
544 date: Thu Jan 01 00:00:00 1970 +0000
545 summary: 0.3m
545 summary: 0.3m
546
546
547 changeset: 7:a6a34bfa0076
547 changeset: 7:a6a34bfa0076
548 phase: draft
548 phase: draft
549 user: test
549 user: test
550 date: Thu Jan 01 00:00:00 1970 +0000
550 date: Thu Jan 01 00:00:00 1970 +0000
551 summary: 1.3m
551 summary: 1.3m
552
552
553 changeset: 6:7373c1169842
553 changeset: 6:7373c1169842
554 phase: draft
554 phase: draft
555 user: test
555 user: test
556 date: Thu Jan 01 00:00:00 1970 +0000
556 date: Thu Jan 01 00:00:00 1970 +0000
557 summary: 1.3
557 summary: 1.3
558
558
559 changeset: 5:1bb50a9436a7
559 changeset: 5:1bb50a9436a7
560 phase: draft
560 phase: draft
561 user: test
561 user: test
562 date: Thu Jan 01 00:00:00 1970 +0000
562 date: Thu Jan 01 00:00:00 1970 +0000
563 summary: 1.2
563 summary: 1.2
564
564
565 changeset: 4:095197eb4973
565 changeset: 4:095197eb4973
566 phase: draft
566 phase: draft
567 parent: 0:f9ee2f85a263
567 parent: 0:f9ee2f85a263
568 user: test
568 user: test
569 date: Thu Jan 01 00:00:00 1970 +0000
569 date: Thu Jan 01 00:00:00 1970 +0000
570 summary: 1.1
570 summary: 1.1
571
571
572 changeset: 3:eebf5a27f8ca
572 changeset: 3:eebf5a27f8ca
573 phase: public
573 phase: public
574 user: test
574 user: test
575 date: Thu Jan 01 00:00:00 1970 +0000
575 date: Thu Jan 01 00:00:00 1970 +0000
576 summary: 0.3
576 summary: 0.3
577
577
578 changeset: 2:e38ba6f5b7e0
578 changeset: 2:e38ba6f5b7e0
579 phase: public
579 phase: public
580 user: test
580 user: test
581 date: Thu Jan 01 00:00:00 1970 +0000
581 date: Thu Jan 01 00:00:00 1970 +0000
582 summary: 0.2
582 summary: 0.2
583
583
584 changeset: 1:34c2bf6b0626
584 changeset: 1:34c2bf6b0626
585 phase: public
585 phase: public
586 user: test
586 user: test
587 date: Thu Jan 01 00:00:00 1970 +0000
587 date: Thu Jan 01 00:00:00 1970 +0000
588 summary: 0.1
588 summary: 0.1
589
589
590 changeset: 0:f9ee2f85a263
590 changeset: 0:f9ee2f85a263
591 phase: public
591 phase: public
592 user: test
592 user: test
593 date: Thu Jan 01 00:00:00 1970 +0000
593 date: Thu Jan 01 00:00:00 1970 +0000
594 summary: 0.0
594 summary: 0.0
595
595
596
596
597 Incoming full.hg in partial
597 Incoming full.hg in partial
598
598
599 $ hg incoming bundle://../full.hg
599 $ hg incoming bundle://../full.hg
600 comparing with bundle:../full.hg
600 comparing with bundle:../full.hg
601 searching for changes
601 searching for changes
602 changeset: 4:095197eb4973
602 changeset: 4:095197eb4973
603 parent: 0:f9ee2f85a263
603 parent: 0:f9ee2f85a263
604 user: test
604 user: test
605 date: Thu Jan 01 00:00:00 1970 +0000
605 date: Thu Jan 01 00:00:00 1970 +0000
606 summary: 1.1
606 summary: 1.1
607
607
608 changeset: 5:1bb50a9436a7
608 changeset: 5:1bb50a9436a7
609 user: test
609 user: test
610 date: Thu Jan 01 00:00:00 1970 +0000
610 date: Thu Jan 01 00:00:00 1970 +0000
611 summary: 1.2
611 summary: 1.2
612
612
613 changeset: 6:7373c1169842
613 changeset: 6:7373c1169842
614 user: test
614 user: test
615 date: Thu Jan 01 00:00:00 1970 +0000
615 date: Thu Jan 01 00:00:00 1970 +0000
616 summary: 1.3
616 summary: 1.3
617
617
618 changeset: 7:a6a34bfa0076
618 changeset: 7:a6a34bfa0076
619 user: test
619 user: test
620 date: Thu Jan 01 00:00:00 1970 +0000
620 date: Thu Jan 01 00:00:00 1970 +0000
621 summary: 1.3m
621 summary: 1.3m
622
622
623 changeset: 8:aa35859c02ea
623 changeset: 8:aa35859c02ea
624 tag: tip
624 tag: tip
625 parent: 3:eebf5a27f8ca
625 parent: 3:eebf5a27f8ca
626 user: test
626 user: test
627 date: Thu Jan 01 00:00:00 1970 +0000
627 date: Thu Jan 01 00:00:00 1970 +0000
628 summary: 0.3m
628 summary: 0.3m
629
629
630
630
631 Outgoing -R full.hg vs partial2 in partial
631 Outgoing -R full.hg vs partial2 in partial
632
632
633 $ hg -R bundle://../full.hg outgoing ../partial2
633 $ hg -R bundle://../full.hg outgoing ../partial2
634 comparing with ../partial2
634 comparing with ../partial2
635 searching for changes
635 searching for changes
636 changeset: 4:095197eb4973
636 changeset: 4:095197eb4973
637 parent: 0:f9ee2f85a263
637 parent: 0:f9ee2f85a263
638 user: test
638 user: test
639 date: Thu Jan 01 00:00:00 1970 +0000
639 date: Thu Jan 01 00:00:00 1970 +0000
640 summary: 1.1
640 summary: 1.1
641
641
642 changeset: 5:1bb50a9436a7
642 changeset: 5:1bb50a9436a7
643 user: test
643 user: test
644 date: Thu Jan 01 00:00:00 1970 +0000
644 date: Thu Jan 01 00:00:00 1970 +0000
645 summary: 1.2
645 summary: 1.2
646
646
647 changeset: 6:7373c1169842
647 changeset: 6:7373c1169842
648 user: test
648 user: test
649 date: Thu Jan 01 00:00:00 1970 +0000
649 date: Thu Jan 01 00:00:00 1970 +0000
650 summary: 1.3
650 summary: 1.3
651
651
652 changeset: 7:a6a34bfa0076
652 changeset: 7:a6a34bfa0076
653 user: test
653 user: test
654 date: Thu Jan 01 00:00:00 1970 +0000
654 date: Thu Jan 01 00:00:00 1970 +0000
655 summary: 1.3m
655 summary: 1.3m
656
656
657 changeset: 8:aa35859c02ea
657 changeset: 8:aa35859c02ea
658 tag: tip
658 tag: tip
659 parent: 3:eebf5a27f8ca
659 parent: 3:eebf5a27f8ca
660 user: test
660 user: test
661 date: Thu Jan 01 00:00:00 1970 +0000
661 date: Thu Jan 01 00:00:00 1970 +0000
662 summary: 0.3m
662 summary: 0.3m
663
663
664
664
665 Outgoing -R does-not-exist.hg vs partial2 in partial
665 Outgoing -R does-not-exist.hg vs partial2 in partial
666
666
667 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
667 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
668 abort: *../does-not-exist.hg* (glob)
668 abort: *../does-not-exist.hg* (glob)
669 [255]
669 [255]
670
670
671 #endif
671 #endif
672
672
673 $ cd ..
673 $ cd ..
674
674
675 hide outer repo
675 hide outer repo
676 $ hg init
676 $ hg init
677
677
678 Direct clone from bundle (all-history)
678 Direct clone from bundle (all-history)
679
679
680 #if repobundlerepo
680 #if repobundlerepo
681
681
682 $ hg clone full.hg full-clone
682 $ hg clone full.hg full-clone
683 requesting all changes
683 requesting all changes
684 adding changesets
684 adding changesets
685 adding manifests
685 adding manifests
686 adding file changes
686 adding file changes
687 added 9 changesets with 7 changes to 4 files (+1 heads)
687 added 9 changesets with 7 changes to 4 files (+1 heads)
688 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
688 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
689 updating to branch default
689 updating to branch default
690 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
690 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
691 $ hg -R full-clone heads
691 $ hg -R full-clone heads
692 changeset: 8:aa35859c02ea
692 changeset: 8:aa35859c02ea
693 tag: tip
693 tag: tip
694 parent: 3:eebf5a27f8ca
694 parent: 3:eebf5a27f8ca
695 user: test
695 user: test
696 date: Thu Jan 01 00:00:00 1970 +0000
696 date: Thu Jan 01 00:00:00 1970 +0000
697 summary: 0.3m
697 summary: 0.3m
698
698
699 changeset: 7:a6a34bfa0076
699 changeset: 7:a6a34bfa0076
700 user: test
700 user: test
701 date: Thu Jan 01 00:00:00 1970 +0000
701 date: Thu Jan 01 00:00:00 1970 +0000
702 summary: 1.3m
702 summary: 1.3m
703
703
704 $ rm -r full-clone
704 $ rm -r full-clone
705
705
706 When cloning from a non-copiable repository into '', do not
706 When cloning from a non-copiable repository into '', do not
707 recurse infinitely (issue2528)
707 recurse infinitely (issue2528)
708
708
709 $ hg clone full.hg ''
709 $ hg clone full.hg ''
710 abort: empty destination path is not valid
710 abort: empty destination path is not valid
711 [10]
711 [10]
712
712
713 test for https://bz.mercurial-scm.org/216
713 test for https://bz.mercurial-scm.org/216
714
714
715 Unbundle incremental bundles into fresh empty in one go
715 Unbundle incremental bundles into fresh empty in one go
716
716
717 $ rm -r empty
717 $ rm -r empty
718 $ hg init empty
718 $ hg init empty
719 $ hg -R test bundle --base null -r 0 ../0.hg
719 $ hg -R test bundle --base null -r 0 ../0.hg
720 1 changesets found
720 1 changesets found
721 $ hg -R test bundle --base 0 -r 1 ../1.hg
721 $ hg -R test bundle --base 0 -r 1 ../1.hg
722 1 changesets found
722 1 changesets found
723 $ hg -R empty unbundle -u ../0.hg ../1.hg
723 $ hg -R empty unbundle -u ../0.hg ../1.hg
724 adding changesets
724 adding changesets
725 adding manifests
725 adding manifests
726 adding file changes
726 adding file changes
727 added 1 changesets with 1 changes to 1 files
727 added 1 changesets with 1 changes to 1 files
728 new changesets f9ee2f85a263 (1 drafts)
728 new changesets f9ee2f85a263 (1 drafts)
729 adding changesets
729 adding changesets
730 adding manifests
730 adding manifests
731 adding file changes
731 adding file changes
732 added 1 changesets with 1 changes to 1 files
732 added 1 changesets with 1 changes to 1 files
733 new changesets 34c2bf6b0626 (1 drafts)
733 new changesets 34c2bf6b0626 (1 drafts)
734 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
734 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
735
735
736 View full contents of the bundle
736 View full contents of the bundle
737 $ hg -R test bundle --base null -r 3 ../partial.hg
737 $ hg -R test bundle --base null -r 3 ../partial.hg
738 4 changesets found
738 4 changesets found
739 $ cd test
739 $ cd test
740 $ hg -R ../../partial.hg log -r "bundle()"
740 $ hg -R ../../partial.hg log -r "bundle()"
741 changeset: 0:f9ee2f85a263
741 changeset: 0:f9ee2f85a263
742 user: test
742 user: test
743 date: Thu Jan 01 00:00:00 1970 +0000
743 date: Thu Jan 01 00:00:00 1970 +0000
744 summary: 0.0
744 summary: 0.0
745
745
746 changeset: 1:34c2bf6b0626
746 changeset: 1:34c2bf6b0626
747 user: test
747 user: test
748 date: Thu Jan 01 00:00:00 1970 +0000
748 date: Thu Jan 01 00:00:00 1970 +0000
749 summary: 0.1
749 summary: 0.1
750
750
751 changeset: 2:e38ba6f5b7e0
751 changeset: 2:e38ba6f5b7e0
752 user: test
752 user: test
753 date: Thu Jan 01 00:00:00 1970 +0000
753 date: Thu Jan 01 00:00:00 1970 +0000
754 summary: 0.2
754 summary: 0.2
755
755
756 changeset: 3:eebf5a27f8ca
756 changeset: 3:eebf5a27f8ca
757 user: test
757 user: test
758 date: Thu Jan 01 00:00:00 1970 +0000
758 date: Thu Jan 01 00:00:00 1970 +0000
759 summary: 0.3
759 summary: 0.3
760
760
761 $ cd ..
761 $ cd ..
762
762
763 #endif
763 #endif
764
764
765 test for 540d1059c802
765 test for 540d1059c802
766
766
767 $ hg init orig
767 $ hg init orig
768 $ cd orig
768 $ cd orig
769 $ echo foo > foo
769 $ echo foo > foo
770 $ hg add foo
770 $ hg add foo
771 $ hg ci -m 'add foo'
771 $ hg ci -m 'add foo'
772
772
773 $ hg clone . ../copy
773 $ hg clone . ../copy
774 updating to branch default
774 updating to branch default
775 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
775 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
776 $ hg tag foo
776 $ hg tag foo
777
777
778 $ cd ../copy
778 $ cd ../copy
779 $ echo >> foo
779 $ echo >> foo
780 $ hg ci -m 'change foo'
780 $ hg ci -m 'change foo'
781 $ hg bundle ../bundle.hg ../orig
781 $ hg bundle ../bundle.hg ../orig
782 searching for changes
782 searching for changes
783 1 changesets found
783 1 changesets found
784
784
785 $ cd ..
785 $ cd ..
786
786
787 #if repobundlerepo
787 #if repobundlerepo
788 $ cd orig
788 $ cd orig
789 $ hg incoming ../bundle.hg
789 $ hg incoming ../bundle.hg
790 comparing with ../bundle.hg
790 comparing with ../bundle.hg
791 searching for changes
791 searching for changes
792 changeset: 2:ed1b79f46b9a
792 changeset: 2:ed1b79f46b9a
793 tag: tip
793 tag: tip
794 parent: 0:bbd179dfa0a7
794 parent: 0:bbd179dfa0a7
795 user: test
795 user: test
796 date: Thu Jan 01 00:00:00 1970 +0000
796 date: Thu Jan 01 00:00:00 1970 +0000
797 summary: change foo
797 summary: change foo
798
798
799 $ cd ..
799 $ cd ..
800
800
801 test bundle with # in the filename (issue2154):
801 test bundle with # in the filename (issue2154):
802
802
803 $ cp bundle.hg 'test#bundle.hg'
803 $ cp bundle.hg 'test#bundle.hg'
804 $ cd orig
804 $ cd orig
805 $ hg incoming '../test#bundle.hg'
805 $ hg incoming '../test#bundle.hg'
806 comparing with ../test
806 comparing with ../test
807 abort: unknown revision 'bundle.hg'
807 abort: unknown revision 'bundle.hg'
808 [10]
808 [10]
809
809
810 note that percent encoding is not handled:
810 note that percent encoding is not handled:
811
811
812 $ hg incoming ../test%23bundle.hg
812 $ hg incoming ../test%23bundle.hg
813 abort: repository ../test%23bundle.hg not found
813 abort: repository ../test%23bundle.hg not found
814 [255]
814 [255]
815 $ cd ..
815 $ cd ..
816
816
817 #endif
817 #endif
818
818
819 test to bundle revisions on the newly created branch (issue3828):
819 test to bundle revisions on the newly created branch (issue3828):
820
820
821 $ hg -q clone -U test test-clone
821 $ hg -q clone -U test test-clone
822 $ cd test
822 $ cd test
823
823
824 $ hg -q branch foo
824 $ hg -q branch foo
825 $ hg commit -m "create foo branch"
825 $ hg commit -m "create foo branch"
826 $ hg -q outgoing ../test-clone
826 $ hg -q outgoing ../test-clone
827 9:b4f5acb1ee27
827 9:b4f5acb1ee27
828 $ hg -q bundle --branch foo foo.hg ../test-clone
828 $ hg -q bundle --branch foo foo.hg ../test-clone
829 #if repobundlerepo
829 #if repobundlerepo
830 $ hg -R foo.hg -q log -r "bundle()"
830 $ hg -R foo.hg -q log -r "bundle()"
831 9:b4f5acb1ee27
831 9:b4f5acb1ee27
832 #endif
832 #endif
833
833
834 $ cd ..
834 $ cd ..
835
835
836 test for https://bz.mercurial-scm.org/1144
836 test for https://bz.mercurial-scm.org/1144
837
837
838 test that verify bundle does not traceback
838 test that verify bundle does not traceback
839
839
840 partial history bundle, fails w/ unknown parent
840 partial history bundle, fails w/ unknown parent
841
841
842 $ hg -R bundle.hg verify
842 $ hg -R bundle.hg verify
843 abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
843 abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
844 [50]
844 [50]
845
845
846 full history bundle, refuses to verify non-local repo
846 full history bundle, refuses to verify non-local repo
847
847
848 #if repobundlerepo
848 #if repobundlerepo
849 $ hg -R all.hg verify
849 $ hg -R all.hg verify
850 abort: cannot verify bundle or remote repos
850 abort: cannot verify bundle or remote repos
851 [255]
851 [255]
852 #endif
852 #endif
853
853
854 but, regular verify must continue to work
854 but, regular verify must continue to work
855
855
856 $ hg -R orig verify
856 $ hg -R orig verify
857 checking changesets
857 checking changesets
858 checking manifests
858 checking manifests
859 crosschecking files in changesets and manifests
859 crosschecking files in changesets and manifests
860 checking files
860 checking files
861 checked 2 changesets with 2 changes to 2 files
861 checked 2 changesets with 2 changes to 2 files
862
862
863 #if repobundlerepo
863 #if repobundlerepo
864 diff against bundle
864 diff against bundle
865
865
866 $ hg init b
866 $ hg init b
867 $ cd b
867 $ cd b
868 $ hg -R ../all.hg diff -r tip
868 $ hg -R ../all.hg diff -r tip
869 diff -r aa35859c02ea anotherfile
869 diff -r aa35859c02ea anotherfile
870 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
870 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
871 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
871 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
872 @@ -1,4 +0,0 @@
872 @@ -1,4 +0,0 @@
873 -0
873 -0
874 -1
874 -1
875 -2
875 -2
876 -3
876 -3
877 $ cd ..
877 $ cd ..
878 #endif
878 #endif
879
879
880 bundle single branch
880 bundle single branch
881
881
882 $ hg init branchy
882 $ hg init branchy
883 $ cd branchy
883 $ cd branchy
884 $ echo a >a
884 $ echo a >a
885 $ echo x >x
885 $ echo x >x
886 $ hg ci -Ama
886 $ hg ci -Ama
887 adding a
887 adding a
888 adding x
888 adding x
889 $ echo c >c
889 $ echo c >c
890 $ echo xx >x
890 $ echo xx >x
891 $ hg ci -Amc
891 $ hg ci -Amc
892 adding c
892 adding c
893 $ echo c1 >c1
893 $ echo c1 >c1
894 $ hg ci -Amc1
894 $ hg ci -Amc1
895 adding c1
895 adding c1
896 $ hg up 0
896 $ hg up 0
897 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
897 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
898 $ echo b >b
898 $ echo b >b
899 $ hg ci -Amb
899 $ hg ci -Amb
900 adding b
900 adding b
901 created new head
901 created new head
902 $ echo b1 >b1
902 $ echo b1 >b1
903 $ echo xx >x
903 $ echo xx >x
904 $ hg ci -Amb1
904 $ hg ci -Amb1
905 adding b1
905 adding b1
906 $ hg clone -q -r2 . part
906 $ hg clone -q -r2 . part
907
907
908 == bundling via incoming
908 == bundling via incoming
909
909
910 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
910 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
911 comparing with .
911 comparing with .
912 searching for changes
912 searching for changes
913 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
913 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
914 057f4db07f61970e1c11e83be79e9d08adc4dc31
914 057f4db07f61970e1c11e83be79e9d08adc4dc31
915
915
916 == bundling
916 == bundling
917
917
918 $ hg bundle bundle.hg part --debug --config progress.debug=true
918 $ hg bundle bundle.hg part --debug --config progress.debug=true
919 query 1; heads
919 query 1; heads
920 searching for changes
920 searching for changes
921 all remote heads known locally
921 all remote heads known locally
922 2 changesets found
922 2 changesets found
923 list of changesets:
923 list of changesets:
924 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
924 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
925 057f4db07f61970e1c11e83be79e9d08adc4dc31
925 057f4db07f61970e1c11e83be79e9d08adc4dc31
926 bundle2-output-bundle: "HG20", (1 params) 2 parts total
926 bundle2-output-bundle: "HG20", (1 params) 2 parts total
927 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
927 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
928 changesets: 1/2 chunks (50.00%)
928 changesets: 1/2 chunks (50.00%)
929 changesets: 2/2 chunks (100.00%)
929 changesets: 2/2 chunks (100.00%)
930 manifests: 1/2 chunks (50.00%)
930 manifests: 1/2 chunks (50.00%)
931 manifests: 2/2 chunks (100.00%)
931 manifests: 2/2 chunks (100.00%)
932 files: b 1/3 files (33.33%)
932 files: b 1/3 files (33.33%)
933 files: b1 2/3 files (66.67%)
933 files: b1 2/3 files (66.67%)
934 files: x 3/3 files (100.00%)
934 files: x 3/3 files (100.00%)
935 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
935 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
936
936
937 #if repobundlerepo
937 #if repobundlerepo
938 == Test for issue3441
938 == Test for issue3441
939
939
940 $ hg clone -q -r0 . part2
940 $ hg clone -q -r0 . part2
941 $ hg -q -R part2 pull bundle.hg
941 $ hg -q -R part2 pull bundle.hg
942 $ hg -R part2 verify
942 $ hg -R part2 verify
943 checking changesets
943 checking changesets
944 checking manifests
944 checking manifests
945 crosschecking files in changesets and manifests
945 crosschecking files in changesets and manifests
946 checking files
946 checking files
947 checked 3 changesets with 5 changes to 4 files
947 checked 3 changesets with 5 changes to 4 files
948 #endif
948 #endif
949
949
950 == Test bundling no commits
950 == Test bundling no commits
951
951
952 $ hg bundle -r 'public()' no-output.hg
952 $ hg bundle -r 'public()' no-output.hg
953 abort: no commits to bundle
953 abort: no commits to bundle
954 [10]
954 [10]
955
955
956 $ cd ..
956 $ cd ..
957
957
958 When user merges to the revision existing only in the bundle,
958 When user merges to the revision existing only in the bundle,
959 it should show warning that second parent of the working
959 it should show warning that second parent of the working
960 directory does not exist
960 directory does not exist
961
961
962 $ hg init update2bundled
962 $ hg init update2bundled
963 $ cd update2bundled
963 $ cd update2bundled
964 $ cat <<EOF >> .hg/hgrc
964 $ cat <<EOF >> .hg/hgrc
965 > [extensions]
965 > [extensions]
966 > strip =
966 > strip =
967 > EOF
967 > EOF
968 $ echo "aaa" >> a
968 $ echo "aaa" >> a
969 $ hg commit -A -m 0
969 $ hg commit -A -m 0
970 adding a
970 adding a
971 $ echo "bbb" >> b
971 $ echo "bbb" >> b
972 $ hg commit -A -m 1
972 $ hg commit -A -m 1
973 adding b
973 adding b
974 $ echo "ccc" >> c
974 $ echo "ccc" >> c
975 $ hg commit -A -m 2
975 $ hg commit -A -m 2
976 adding c
976 adding c
977 $ hg update -r 1
977 $ hg update -r 1
978 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
978 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
979 $ echo "ddd" >> d
979 $ echo "ddd" >> d
980 $ hg commit -A -m 3
980 $ hg commit -A -m 3
981 adding d
981 adding d
982 created new head
982 created new head
983 $ hg update -r 2
983 $ hg update -r 2
984 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
984 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
985 $ hg log -G
985 $ hg log -G
986 o changeset: 3:8bd3e1f196af
986 o changeset: 3:8bd3e1f196af
987 | tag: tip
987 | tag: tip
988 | parent: 1:a01eca7af26d
988 | parent: 1:a01eca7af26d
989 | user: test
989 | user: test
990 | date: Thu Jan 01 00:00:00 1970 +0000
990 | date: Thu Jan 01 00:00:00 1970 +0000
991 | summary: 3
991 | summary: 3
992 |
992 |
993 | @ changeset: 2:4652c276ac4f
993 | @ changeset: 2:4652c276ac4f
994 |/ user: test
994 |/ user: test
995 | date: Thu Jan 01 00:00:00 1970 +0000
995 | date: Thu Jan 01 00:00:00 1970 +0000
996 | summary: 2
996 | summary: 2
997 |
997 |
998 o changeset: 1:a01eca7af26d
998 o changeset: 1:a01eca7af26d
999 | user: test
999 | user: test
1000 | date: Thu Jan 01 00:00:00 1970 +0000
1000 | date: Thu Jan 01 00:00:00 1970 +0000
1001 | summary: 1
1001 | summary: 1
1002 |
1002 |
1003 o changeset: 0:4fe08cd4693e
1003 o changeset: 0:4fe08cd4693e
1004 user: test
1004 user: test
1005 date: Thu Jan 01 00:00:00 1970 +0000
1005 date: Thu Jan 01 00:00:00 1970 +0000
1006 summary: 0
1006 summary: 0
1007
1007
1008
1008
1009 #if repobundlerepo
1009 #if repobundlerepo
1010 $ hg bundle --base 1 -r 3 ../update2bundled.hg
1010 $ hg bundle --base 1 -r 3 ../update2bundled.hg
1011 1 changesets found
1011 1 changesets found
1012 $ hg strip -r 3
1012 $ hg strip -r 3
1013 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
1013 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
1014 $ hg merge -R ../update2bundled.hg -r 3
1014 $ hg merge -R ../update2bundled.hg -r 3
1015 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1015 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1016 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1016 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1017 (branch merge, don't forget to commit)
1017 (branch merge, don't forget to commit)
1018
1018
1019 When user updates to the revision existing only in the bundle,
1019 When user updates to the revision existing only in the bundle,
1020 it should show warning
1020 it should show warning
1021
1021
1022 $ hg update -R ../update2bundled.hg --clean -r 3
1022 $ hg update -R ../update2bundled.hg --clean -r 3
1023 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1023 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1024 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1024 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1025
1025
1026 When user updates to the revision existing in the local repository
1026 When user updates to the revision existing in the local repository
1027 the warning shouldn't be emitted
1027 the warning shouldn't be emitted
1028
1028
1029 $ hg update -R ../update2bundled.hg -r 0
1029 $ hg update -R ../update2bundled.hg -r 0
1030 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1030 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1031 #endif
1031 #endif
1032
1032
1033 Test the option that create slim bundle
1033 Test the option that create slim bundle
1034
1034
1035 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
1035 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
1036 3 changesets found
1036 3 changesets found
1037
1037
1038 Test the option that create and no-delta's bundle
1038 Test the option that create and no-delta's bundle
1039 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1039 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1040 3 changesets found
1040 3 changesets found
1041
1042 Test the debug output when applying delta
1043 -----------------------------------------
1044
1045 $ hg init foo
1046 $ hg -R foo unbundle ./slim.hg \
1047 > --config debug.revlog.debug-delta=yes \
1048 > --config storage.revlog.reuse-external-delta=no \
1049 > --config storage.revlog.reuse-external-delta-parent=no
1050 adding changesets
1051 DBG-DELTAS: CHANGELOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1052 DBG-DELTAS: CHANGELOG: rev=1: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1053 DBG-DELTAS: CHANGELOG: rev=2: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1054 adding manifests
1055 DBG-DELTAS: MANIFESTLOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1056 DBG-DELTAS: MANIFESTLOG: rev=1: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1057 DBG-DELTAS: MANIFESTLOG: rev=2: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1058 adding file changes
1059 DBG-DELTAS: FILELOG:a: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1060 DBG-DELTAS: FILELOG:b: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1061 DBG-DELTAS: FILELOG:c: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1062 added 3 changesets with 3 changes to 3 files
1063 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1064 (run 'hg update' to get a working copy)
1065
General Comments 0
You need to be logged in to leave comments. Login now