##// END OF EJS Templates
rust-index: add a `experimental.rust.index` option to use the wrapper...
Georges Racinet -
r44466:8042856c default
parent child Browse files
Show More
@@ -1,1552 +1,1555 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section, configprefix + b'nodates', default=False,
136 section, configprefix + b'nodates', default=False,
137 )
137 )
138 coreconfigitem(
138 coreconfigitem(
139 section, configprefix + b'showfunc', default=False,
139 section, configprefix + b'showfunc', default=False,
140 )
140 )
141 coreconfigitem(
141 coreconfigitem(
142 section, configprefix + b'unified', default=None,
142 section, configprefix + b'unified', default=None,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section, configprefix + b'git', default=False,
145 section, configprefix + b'git', default=False,
146 )
146 )
147 coreconfigitem(
147 coreconfigitem(
148 section, configprefix + b'ignorews', default=False,
148 section, configprefix + b'ignorews', default=False,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section, configprefix + b'ignorewsamount', default=False,
151 section, configprefix + b'ignorewsamount', default=False,
152 )
152 )
153 coreconfigitem(
153 coreconfigitem(
154 section, configprefix + b'ignoreblanklines', default=False,
154 section, configprefix + b'ignoreblanklines', default=False,
155 )
155 )
156 coreconfigitem(
156 coreconfigitem(
157 section, configprefix + b'ignorewseol', default=False,
157 section, configprefix + b'ignorewseol', default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section, configprefix + b'nobinary', default=False,
160 section, configprefix + b'nobinary', default=False,
161 )
161 )
162 coreconfigitem(
162 coreconfigitem(
163 section, configprefix + b'noprefix', default=False,
163 section, configprefix + b'noprefix', default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section, configprefix + b'word-diff', default=False,
166 section, configprefix + b'word-diff', default=False,
167 )
167 )
168
168
169
169
170 coreconfigitem(
170 coreconfigitem(
171 b'alias', b'.*', default=dynamicdefault, generic=True,
171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 )
172 )
173 coreconfigitem(
173 coreconfigitem(
174 b'auth', b'cookiefile', default=None,
174 b'auth', b'cookiefile', default=None,
175 )
175 )
176 _registerdiffopts(section=b'annotate')
176 _registerdiffopts(section=b'annotate')
177 # bookmarks.pushing: internal hack for discovery
177 # bookmarks.pushing: internal hack for discovery
178 coreconfigitem(
178 coreconfigitem(
179 b'bookmarks', b'pushing', default=list,
179 b'bookmarks', b'pushing', default=list,
180 )
180 )
181 # bundle.mainreporoot: internal hack for bundlerepo
181 # bundle.mainreporoot: internal hack for bundlerepo
182 coreconfigitem(
182 coreconfigitem(
183 b'bundle', b'mainreporoot', default=b'',
183 b'bundle', b'mainreporoot', default=b'',
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 b'censor', b'policy', default=b'abort', experimental=True,
186 b'censor', b'policy', default=b'abort', experimental=True,
187 )
187 )
188 coreconfigitem(
188 coreconfigitem(
189 b'chgserver', b'idletimeout', default=3600,
189 b'chgserver', b'idletimeout', default=3600,
190 )
190 )
191 coreconfigitem(
191 coreconfigitem(
192 b'chgserver', b'skiphash', default=False,
192 b'chgserver', b'skiphash', default=False,
193 )
193 )
194 coreconfigitem(
194 coreconfigitem(
195 b'cmdserver', b'log', default=None,
195 b'cmdserver', b'log', default=None,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'cmdserver', b'max-log-files', default=7,
198 b'cmdserver', b'max-log-files', default=7,
199 )
199 )
200 coreconfigitem(
200 coreconfigitem(
201 b'cmdserver', b'max-log-size', default=b'1 MB',
201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 )
202 )
203 coreconfigitem(
203 coreconfigitem(
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 )
205 )
206 coreconfigitem(
206 coreconfigitem(
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 )
208 )
209 coreconfigitem(
209 coreconfigitem(
210 b'cmdserver',
210 b'cmdserver',
211 b'track-log',
211 b'track-log',
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 )
213 )
214 coreconfigitem(
214 coreconfigitem(
215 b'color', b'.*', default=None, generic=True,
215 b'color', b'.*', default=None, generic=True,
216 )
216 )
217 coreconfigitem(
217 coreconfigitem(
218 b'color', b'mode', default=b'auto',
218 b'color', b'mode', default=b'auto',
219 )
219 )
220 coreconfigitem(
220 coreconfigitem(
221 b'color', b'pagermode', default=dynamicdefault,
221 b'color', b'pagermode', default=dynamicdefault,
222 )
222 )
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 coreconfigitem(
224 coreconfigitem(
225 b'commands', b'commit.post-status', default=False,
225 b'commands', b'commit.post-status', default=False,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'commands', b'grep.all-files', default=False, experimental=True,
228 b'commands', b'grep.all-files', default=False, experimental=True,
229 )
229 )
230 coreconfigitem(
230 coreconfigitem(
231 b'commands', b'merge.require-rev', default=False,
231 b'commands', b'merge.require-rev', default=False,
232 )
232 )
233 coreconfigitem(
233 coreconfigitem(
234 b'commands', b'push.require-revs', default=False,
234 b'commands', b'push.require-revs', default=False,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'commands', b'resolve.confirm', default=False,
237 b'commands', b'resolve.confirm', default=False,
238 )
238 )
239 coreconfigitem(
239 coreconfigitem(
240 b'commands', b'resolve.explicit-re-merge', default=False,
240 b'commands', b'resolve.explicit-re-merge', default=False,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'commands', b'resolve.mark-check', default=b'none',
243 b'commands', b'resolve.mark-check', default=b'none',
244 )
244 )
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 coreconfigitem(
246 coreconfigitem(
247 b'commands', b'show.aliasprefix', default=list,
247 b'commands', b'show.aliasprefix', default=list,
248 )
248 )
249 coreconfigitem(
249 coreconfigitem(
250 b'commands', b'status.relative', default=False,
250 b'commands', b'status.relative', default=False,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'commands', b'status.skipstates', default=[], experimental=True,
253 b'commands', b'status.skipstates', default=[], experimental=True,
254 )
254 )
255 coreconfigitem(
255 coreconfigitem(
256 b'commands', b'status.terse', default=b'',
256 b'commands', b'status.terse', default=b'',
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'commands', b'status.verbose', default=False,
259 b'commands', b'status.verbose', default=False,
260 )
260 )
261 coreconfigitem(
261 coreconfigitem(
262 b'commands', b'update.check', default=None,
262 b'commands', b'update.check', default=None,
263 )
263 )
264 coreconfigitem(
264 coreconfigitem(
265 b'commands', b'update.requiredest', default=False,
265 b'commands', b'update.requiredest', default=False,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'committemplate', b'.*', default=None, generic=True,
268 b'committemplate', b'.*', default=None, generic=True,
269 )
269 )
270 coreconfigitem(
270 coreconfigitem(
271 b'convert', b'bzr.saverev', default=True,
271 b'convert', b'bzr.saverev', default=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'convert', b'cvsps.cache', default=True,
274 b'convert', b'cvsps.cache', default=True,
275 )
275 )
276 coreconfigitem(
276 coreconfigitem(
277 b'convert', b'cvsps.fuzz', default=60,
277 b'convert', b'cvsps.fuzz', default=60,
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'convert', b'cvsps.logencoding', default=None,
280 b'convert', b'cvsps.logencoding', default=None,
281 )
281 )
282 coreconfigitem(
282 coreconfigitem(
283 b'convert', b'cvsps.mergefrom', default=None,
283 b'convert', b'cvsps.mergefrom', default=None,
284 )
284 )
285 coreconfigitem(
285 coreconfigitem(
286 b'convert', b'cvsps.mergeto', default=None,
286 b'convert', b'cvsps.mergeto', default=None,
287 )
287 )
288 coreconfigitem(
288 coreconfigitem(
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 )
290 )
291 coreconfigitem(
291 coreconfigitem(
292 b'convert', b'git.extrakeys', default=list,
292 b'convert', b'git.extrakeys', default=list,
293 )
293 )
294 coreconfigitem(
294 coreconfigitem(
295 b'convert', b'git.findcopiesharder', default=False,
295 b'convert', b'git.findcopiesharder', default=False,
296 )
296 )
297 coreconfigitem(
297 coreconfigitem(
298 b'convert', b'git.remoteprefix', default=b'remote',
298 b'convert', b'git.remoteprefix', default=b'remote',
299 )
299 )
300 coreconfigitem(
300 coreconfigitem(
301 b'convert', b'git.renamelimit', default=400,
301 b'convert', b'git.renamelimit', default=400,
302 )
302 )
303 coreconfigitem(
303 coreconfigitem(
304 b'convert', b'git.saverev', default=True,
304 b'convert', b'git.saverev', default=True,
305 )
305 )
306 coreconfigitem(
306 coreconfigitem(
307 b'convert', b'git.similarity', default=50,
307 b'convert', b'git.similarity', default=50,
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'convert', b'git.skipsubmodules', default=False,
310 b'convert', b'git.skipsubmodules', default=False,
311 )
311 )
312 coreconfigitem(
312 coreconfigitem(
313 b'convert', b'hg.clonebranches', default=False,
313 b'convert', b'hg.clonebranches', default=False,
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'convert', b'hg.ignoreerrors', default=False,
316 b'convert', b'hg.ignoreerrors', default=False,
317 )
317 )
318 coreconfigitem(
318 coreconfigitem(
319 b'convert', b'hg.preserve-hash', default=False,
319 b'convert', b'hg.preserve-hash', default=False,
320 )
320 )
321 coreconfigitem(
321 coreconfigitem(
322 b'convert', b'hg.revs', default=None,
322 b'convert', b'hg.revs', default=None,
323 )
323 )
324 coreconfigitem(
324 coreconfigitem(
325 b'convert', b'hg.saverev', default=False,
325 b'convert', b'hg.saverev', default=False,
326 )
326 )
327 coreconfigitem(
327 coreconfigitem(
328 b'convert', b'hg.sourcename', default=None,
328 b'convert', b'hg.sourcename', default=None,
329 )
329 )
330 coreconfigitem(
330 coreconfigitem(
331 b'convert', b'hg.startrev', default=None,
331 b'convert', b'hg.startrev', default=None,
332 )
332 )
333 coreconfigitem(
333 coreconfigitem(
334 b'convert', b'hg.tagsbranch', default=b'default',
334 b'convert', b'hg.tagsbranch', default=b'default',
335 )
335 )
336 coreconfigitem(
336 coreconfigitem(
337 b'convert', b'hg.usebranchnames', default=True,
337 b'convert', b'hg.usebranchnames', default=True,
338 )
338 )
339 coreconfigitem(
339 coreconfigitem(
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'convert', b'localtimezone', default=False,
343 b'convert', b'localtimezone', default=False,
344 )
344 )
345 coreconfigitem(
345 coreconfigitem(
346 b'convert', b'p4.encoding', default=dynamicdefault,
346 b'convert', b'p4.encoding', default=dynamicdefault,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'convert', b'p4.startrev', default=0,
349 b'convert', b'p4.startrev', default=0,
350 )
350 )
351 coreconfigitem(
351 coreconfigitem(
352 b'convert', b'skiptags', default=False,
352 b'convert', b'skiptags', default=False,
353 )
353 )
354 coreconfigitem(
354 coreconfigitem(
355 b'convert', b'svn.debugsvnlog', default=True,
355 b'convert', b'svn.debugsvnlog', default=True,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'convert', b'svn.trunk', default=None,
358 b'convert', b'svn.trunk', default=None,
359 )
359 )
360 coreconfigitem(
360 coreconfigitem(
361 b'convert', b'svn.tags', default=None,
361 b'convert', b'svn.tags', default=None,
362 )
362 )
363 coreconfigitem(
363 coreconfigitem(
364 b'convert', b'svn.branches', default=None,
364 b'convert', b'svn.branches', default=None,
365 )
365 )
366 coreconfigitem(
366 coreconfigitem(
367 b'convert', b'svn.startrev', default=0,
367 b'convert', b'svn.startrev', default=0,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'debug', b'dirstate.delaywrite', default=0,
370 b'debug', b'dirstate.delaywrite', default=0,
371 )
371 )
372 coreconfigitem(
372 coreconfigitem(
373 b'defaults', b'.*', default=None, generic=True,
373 b'defaults', b'.*', default=None, generic=True,
374 )
374 )
375 coreconfigitem(
375 coreconfigitem(
376 b'devel', b'all-warnings', default=False,
376 b'devel', b'all-warnings', default=False,
377 )
377 )
378 coreconfigitem(
378 coreconfigitem(
379 b'devel', b'bundle2.debug', default=False,
379 b'devel', b'bundle2.debug', default=False,
380 )
380 )
381 coreconfigitem(
381 coreconfigitem(
382 b'devel', b'bundle.delta', default=b'',
382 b'devel', b'bundle.delta', default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'devel', b'cache-vfs', default=None,
385 b'devel', b'cache-vfs', default=None,
386 )
386 )
387 coreconfigitem(
387 coreconfigitem(
388 b'devel', b'check-locks', default=False,
388 b'devel', b'check-locks', default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'devel', b'check-relroot', default=False,
391 b'devel', b'check-relroot', default=False,
392 )
392 )
393 coreconfigitem(
393 coreconfigitem(
394 b'devel', b'default-date', default=None,
394 b'devel', b'default-date', default=None,
395 )
395 )
396 coreconfigitem(
396 coreconfigitem(
397 b'devel', b'deprec-warn', default=False,
397 b'devel', b'deprec-warn', default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'devel', b'disableloaddefaultcerts', default=False,
400 b'devel', b'disableloaddefaultcerts', default=False,
401 )
401 )
402 coreconfigitem(
402 coreconfigitem(
403 b'devel', b'warn-empty-changegroup', default=False,
403 b'devel', b'warn-empty-changegroup', default=False,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'devel', b'legacy.exchange', default=list,
406 b'devel', b'legacy.exchange', default=list,
407 )
407 )
408 coreconfigitem(
408 coreconfigitem(
409 b'devel', b'servercafile', default=b'',
409 b'devel', b'servercafile', default=b'',
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'devel', b'serverexactprotocol', default=b'',
412 b'devel', b'serverexactprotocol', default=b'',
413 )
413 )
414 coreconfigitem(
414 coreconfigitem(
415 b'devel', b'serverrequirecert', default=False,
415 b'devel', b'serverrequirecert', default=False,
416 )
416 )
417 coreconfigitem(
417 coreconfigitem(
418 b'devel', b'strip-obsmarkers', default=True,
418 b'devel', b'strip-obsmarkers', default=True,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'devel', b'warn-config', default=None,
421 b'devel', b'warn-config', default=None,
422 )
422 )
423 coreconfigitem(
423 coreconfigitem(
424 b'devel', b'warn-config-default', default=None,
424 b'devel', b'warn-config-default', default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'devel', b'user.obsmarker', default=None,
427 b'devel', b'user.obsmarker', default=None,
428 )
428 )
429 coreconfigitem(
429 coreconfigitem(
430 b'devel', b'warn-config-unknown', default=None,
430 b'devel', b'warn-config-unknown', default=None,
431 )
431 )
432 coreconfigitem(
432 coreconfigitem(
433 b'devel', b'debug.copies', default=False,
433 b'devel', b'debug.copies', default=False,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'devel', b'debug.extensions', default=False,
436 b'devel', b'debug.extensions', default=False,
437 )
437 )
438 coreconfigitem(
438 coreconfigitem(
439 b'devel', b'debug.repo-filters', default=False,
439 b'devel', b'debug.repo-filters', default=False,
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'devel', b'debug.peer-request', default=False,
442 b'devel', b'debug.peer-request', default=False,
443 )
443 )
444 coreconfigitem(
444 coreconfigitem(
445 b'devel', b'discovery.randomize', default=True,
445 b'devel', b'discovery.randomize', default=True,
446 )
446 )
447 _registerdiffopts(section=b'diff')
447 _registerdiffopts(section=b'diff')
448 coreconfigitem(
448 coreconfigitem(
449 b'email', b'bcc', default=None,
449 b'email', b'bcc', default=None,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'email', b'cc', default=None,
452 b'email', b'cc', default=None,
453 )
453 )
454 coreconfigitem(
454 coreconfigitem(
455 b'email', b'charsets', default=list,
455 b'email', b'charsets', default=list,
456 )
456 )
457 coreconfigitem(
457 coreconfigitem(
458 b'email', b'from', default=None,
458 b'email', b'from', default=None,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'email', b'method', default=b'smtp',
461 b'email', b'method', default=b'smtp',
462 )
462 )
463 coreconfigitem(
463 coreconfigitem(
464 b'email', b'reply-to', default=None,
464 b'email', b'reply-to', default=None,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'email', b'to', default=None,
467 b'email', b'to', default=None,
468 )
468 )
469 coreconfigitem(
469 coreconfigitem(
470 b'experimental', b'archivemetatemplate', default=dynamicdefault,
470 b'experimental', b'archivemetatemplate', default=dynamicdefault,
471 )
471 )
472 coreconfigitem(
472 coreconfigitem(
473 b'experimental', b'auto-publish', default=b'publish',
473 b'experimental', b'auto-publish', default=b'publish',
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'experimental', b'bundle-phases', default=False,
476 b'experimental', b'bundle-phases', default=False,
477 )
477 )
478 coreconfigitem(
478 coreconfigitem(
479 b'experimental', b'bundle2-advertise', default=True,
479 b'experimental', b'bundle2-advertise', default=True,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'experimental', b'bundle2-output-capture', default=False,
482 b'experimental', b'bundle2-output-capture', default=False,
483 )
483 )
484 coreconfigitem(
484 coreconfigitem(
485 b'experimental', b'bundle2.pushback', default=False,
485 b'experimental', b'bundle2.pushback', default=False,
486 )
486 )
487 coreconfigitem(
487 coreconfigitem(
488 b'experimental', b'bundle2lazylocking', default=False,
488 b'experimental', b'bundle2lazylocking', default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'experimental', b'bundlecomplevel', default=None,
491 b'experimental', b'bundlecomplevel', default=None,
492 )
492 )
493 coreconfigitem(
493 coreconfigitem(
494 b'experimental', b'bundlecomplevel.bzip2', default=None,
494 b'experimental', b'bundlecomplevel.bzip2', default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'experimental', b'bundlecomplevel.gzip', default=None,
497 b'experimental', b'bundlecomplevel.gzip', default=None,
498 )
498 )
499 coreconfigitem(
499 coreconfigitem(
500 b'experimental', b'bundlecomplevel.none', default=None,
500 b'experimental', b'bundlecomplevel.none', default=None,
501 )
501 )
502 coreconfigitem(
502 coreconfigitem(
503 b'experimental', b'bundlecomplevel.zstd', default=None,
503 b'experimental', b'bundlecomplevel.zstd', default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'experimental', b'changegroup3', default=False,
506 b'experimental', b'changegroup3', default=False,
507 )
507 )
508 coreconfigitem(
508 coreconfigitem(
509 b'experimental', b'cleanup-as-archived', default=False,
509 b'experimental', b'cleanup-as-archived', default=False,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'experimental', b'clientcompressionengines', default=list,
512 b'experimental', b'clientcompressionengines', default=list,
513 )
513 )
514 coreconfigitem(
514 coreconfigitem(
515 b'experimental', b'copytrace', default=b'on',
515 b'experimental', b'copytrace', default=b'on',
516 )
516 )
517 coreconfigitem(
517 coreconfigitem(
518 b'experimental', b'copytrace.movecandidateslimit', default=100,
518 b'experimental', b'copytrace.movecandidateslimit', default=100,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'experimental', b'copytrace.sourcecommitlimit', default=100,
521 b'experimental', b'copytrace.sourcecommitlimit', default=100,
522 )
522 )
523 coreconfigitem(
523 coreconfigitem(
524 b'experimental', b'copies.read-from', default=b"filelog-only",
524 b'experimental', b'copies.read-from', default=b"filelog-only",
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'experimental', b'copies.write-to', default=b'filelog-only',
527 b'experimental', b'copies.write-to', default=b'filelog-only',
528 )
528 )
529 coreconfigitem(
529 coreconfigitem(
530 b'experimental', b'crecordtest', default=None,
530 b'experimental', b'crecordtest', default=None,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'experimental', b'directaccess', default=False,
533 b'experimental', b'directaccess', default=False,
534 )
534 )
535 coreconfigitem(
535 coreconfigitem(
536 b'experimental', b'directaccess.revnums', default=False,
536 b'experimental', b'directaccess.revnums', default=False,
537 )
537 )
538 coreconfigitem(
538 coreconfigitem(
539 b'experimental', b'editortmpinhg', default=False,
539 b'experimental', b'editortmpinhg', default=False,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'experimental', b'evolution', default=list,
542 b'experimental', b'evolution', default=list,
543 )
543 )
544 coreconfigitem(
544 coreconfigitem(
545 b'experimental',
545 b'experimental',
546 b'evolution.allowdivergence',
546 b'evolution.allowdivergence',
547 default=False,
547 default=False,
548 alias=[(b'experimental', b'allowdivergence')],
548 alias=[(b'experimental', b'allowdivergence')],
549 )
549 )
550 coreconfigitem(
550 coreconfigitem(
551 b'experimental', b'evolution.allowunstable', default=None,
551 b'experimental', b'evolution.allowunstable', default=None,
552 )
552 )
553 coreconfigitem(
553 coreconfigitem(
554 b'experimental', b'evolution.createmarkers', default=None,
554 b'experimental', b'evolution.createmarkers', default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'experimental',
557 b'experimental',
558 b'evolution.effect-flags',
558 b'evolution.effect-flags',
559 default=True,
559 default=True,
560 alias=[(b'experimental', b'effect-flags')],
560 alias=[(b'experimental', b'effect-flags')],
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'experimental', b'evolution.exchange', default=None,
563 b'experimental', b'evolution.exchange', default=None,
564 )
564 )
565 coreconfigitem(
565 coreconfigitem(
566 b'experimental', b'evolution.bundle-obsmarker', default=False,
566 b'experimental', b'evolution.bundle-obsmarker', default=False,
567 )
567 )
568 coreconfigitem(
568 coreconfigitem(
569 b'experimental', b'log.topo', default=False,
569 b'experimental', b'log.topo', default=False,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'experimental', b'evolution.report-instabilities', default=True,
572 b'experimental', b'evolution.report-instabilities', default=True,
573 )
573 )
574 coreconfigitem(
574 coreconfigitem(
575 b'experimental', b'evolution.track-operation', default=True,
575 b'experimental', b'evolution.track-operation', default=True,
576 )
576 )
577 # repo-level config to exclude a revset visibility
577 # repo-level config to exclude a revset visibility
578 #
578 #
579 # The target use case is to use `share` to expose different subset of the same
579 # The target use case is to use `share` to expose different subset of the same
580 # repository, especially server side. See also `server.view`.
580 # repository, especially server side. See also `server.view`.
581 coreconfigitem(
581 coreconfigitem(
582 b'experimental', b'extra-filter-revs', default=None,
582 b'experimental', b'extra-filter-revs', default=None,
583 )
583 )
584 coreconfigitem(
584 coreconfigitem(
585 b'experimental', b'maxdeltachainspan', default=-1,
585 b'experimental', b'maxdeltachainspan', default=-1,
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'experimental', b'mergetempdirprefix', default=None,
588 b'experimental', b'mergetempdirprefix', default=None,
589 )
589 )
590 coreconfigitem(
590 coreconfigitem(
591 b'experimental', b'mmapindexthreshold', default=None,
591 b'experimental', b'mmapindexthreshold', default=None,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'experimental', b'narrow', default=False,
594 b'experimental', b'narrow', default=False,
595 )
595 )
596 coreconfigitem(
596 coreconfigitem(
597 b'experimental', b'nonnormalparanoidcheck', default=False,
597 b'experimental', b'nonnormalparanoidcheck', default=False,
598 )
598 )
599 coreconfigitem(
599 coreconfigitem(
600 b'experimental', b'exportableenviron', default=list,
600 b'experimental', b'exportableenviron', default=list,
601 )
601 )
602 coreconfigitem(
602 coreconfigitem(
603 b'experimental', b'extendedheader.index', default=None,
603 b'experimental', b'extendedheader.index', default=None,
604 )
604 )
605 coreconfigitem(
605 coreconfigitem(
606 b'experimental', b'extendedheader.similarity', default=False,
606 b'experimental', b'extendedheader.similarity', default=False,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'experimental', b'graphshorten', default=False,
609 b'experimental', b'graphshorten', default=False,
610 )
610 )
611 coreconfigitem(
611 coreconfigitem(
612 b'experimental', b'graphstyle.parent', default=dynamicdefault,
612 b'experimental', b'graphstyle.parent', default=dynamicdefault,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'experimental', b'graphstyle.missing', default=dynamicdefault,
615 b'experimental', b'graphstyle.missing', default=dynamicdefault,
616 )
616 )
617 coreconfigitem(
617 coreconfigitem(
618 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
618 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
619 )
619 )
620 coreconfigitem(
620 coreconfigitem(
621 b'experimental', b'hook-track-tags', default=False,
621 b'experimental', b'hook-track-tags', default=False,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'experimental', b'httppeer.advertise-v2', default=False,
624 b'experimental', b'httppeer.advertise-v2', default=False,
625 )
625 )
626 coreconfigitem(
626 coreconfigitem(
627 b'experimental', b'httppeer.v2-encoder-order', default=None,
627 b'experimental', b'httppeer.v2-encoder-order', default=None,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'experimental', b'httppostargs', default=False,
630 b'experimental', b'httppostargs', default=False,
631 )
631 )
632 coreconfigitem(
632 coreconfigitem(
633 b'experimental', b'mergedriver', default=None,
633 b'experimental', b'mergedriver', default=None,
634 )
634 )
635 coreconfigitem(b'experimental', b'nointerrupt', default=False)
635 coreconfigitem(b'experimental', b'nointerrupt', default=False)
636 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
636 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
637
637
638 coreconfigitem(
638 coreconfigitem(
639 b'experimental', b'obsmarkers-exchange-debug', default=False,
639 b'experimental', b'obsmarkers-exchange-debug', default=False,
640 )
640 )
641 coreconfigitem(
641 coreconfigitem(
642 b'experimental', b'remotenames', default=False,
642 b'experimental', b'remotenames', default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'experimental', b'removeemptydirs', default=True,
645 b'experimental', b'removeemptydirs', default=True,
646 )
646 )
647 coreconfigitem(
647 coreconfigitem(
648 b'experimental', b'revert.interactive.select-to-keep', default=False,
648 b'experimental', b'revert.interactive.select-to-keep', default=False,
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'experimental', b'revisions.prefixhexnode', default=False,
651 b'experimental', b'revisions.prefixhexnode', default=False,
652 )
652 )
653 coreconfigitem(
653 coreconfigitem(
654 b'experimental', b'revlogv2', default=None,
654 b'experimental', b'revlogv2', default=None,
655 )
655 )
656 coreconfigitem(
656 coreconfigitem(
657 b'experimental', b'revisions.disambiguatewithin', default=None,
657 b'experimental', b'revisions.disambiguatewithin', default=None,
658 )
658 )
659 coreconfigitem(
659 coreconfigitem(
660 b'experimental', b'rust.index', default=False,
661 )
662 coreconfigitem(
660 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
663 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
661 )
664 )
662 coreconfigitem(
665 coreconfigitem(
663 b'experimental',
666 b'experimental',
664 b'server.manifestdata.recommended-batch-size',
667 b'server.manifestdata.recommended-batch-size',
665 default=100000,
668 default=100000,
666 )
669 )
667 coreconfigitem(
670 coreconfigitem(
668 b'experimental', b'server.stream-narrow-clones', default=False,
671 b'experimental', b'server.stream-narrow-clones', default=False,
669 )
672 )
670 coreconfigitem(
673 coreconfigitem(
671 b'experimental', b'single-head-per-branch', default=False,
674 b'experimental', b'single-head-per-branch', default=False,
672 )
675 )
673 coreconfigitem(
676 coreconfigitem(
674 b'experimental',
677 b'experimental',
675 b'single-head-per-branch:account-closed-heads',
678 b'single-head-per-branch:account-closed-heads',
676 default=False,
679 default=False,
677 )
680 )
678 coreconfigitem(
681 coreconfigitem(
679 b'experimental', b'sshserver.support-v2', default=False,
682 b'experimental', b'sshserver.support-v2', default=False,
680 )
683 )
681 coreconfigitem(
684 coreconfigitem(
682 b'experimental', b'sparse-read', default=False,
685 b'experimental', b'sparse-read', default=False,
683 )
686 )
684 coreconfigitem(
687 coreconfigitem(
685 b'experimental', b'sparse-read.density-threshold', default=0.50,
688 b'experimental', b'sparse-read.density-threshold', default=0.50,
686 )
689 )
687 coreconfigitem(
690 coreconfigitem(
688 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
691 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
689 )
692 )
690 coreconfigitem(
693 coreconfigitem(
691 b'experimental', b'treemanifest', default=False,
694 b'experimental', b'treemanifest', default=False,
692 )
695 )
693 coreconfigitem(
696 coreconfigitem(
694 b'experimental', b'update.atomic-file', default=False,
697 b'experimental', b'update.atomic-file', default=False,
695 )
698 )
696 coreconfigitem(
699 coreconfigitem(
697 b'experimental', b'sshpeer.advertise-v2', default=False,
700 b'experimental', b'sshpeer.advertise-v2', default=False,
698 )
701 )
699 coreconfigitem(
702 coreconfigitem(
700 b'experimental', b'web.apiserver', default=False,
703 b'experimental', b'web.apiserver', default=False,
701 )
704 )
702 coreconfigitem(
705 coreconfigitem(
703 b'experimental', b'web.api.http-v2', default=False,
706 b'experimental', b'web.api.http-v2', default=False,
704 )
707 )
705 coreconfigitem(
708 coreconfigitem(
706 b'experimental', b'web.api.debugreflect', default=False,
709 b'experimental', b'web.api.debugreflect', default=False,
707 )
710 )
708 coreconfigitem(
711 coreconfigitem(
709 b'experimental', b'worker.wdir-get-thread-safe', default=False,
712 b'experimental', b'worker.wdir-get-thread-safe', default=False,
710 )
713 )
711 coreconfigitem(
714 coreconfigitem(
712 b'experimental', b'worker.repository-upgrade', default=False,
715 b'experimental', b'worker.repository-upgrade', default=False,
713 )
716 )
714 coreconfigitem(
717 coreconfigitem(
715 b'experimental', b'xdiff', default=False,
718 b'experimental', b'xdiff', default=False,
716 )
719 )
717 coreconfigitem(
720 coreconfigitem(
718 b'extensions', b'.*', default=None, generic=True,
721 b'extensions', b'.*', default=None, generic=True,
719 )
722 )
720 coreconfigitem(
723 coreconfigitem(
721 b'extdata', b'.*', default=None, generic=True,
724 b'extdata', b'.*', default=None, generic=True,
722 )
725 )
723 coreconfigitem(
726 coreconfigitem(
724 b'format', b'bookmarks-in-store', default=False,
727 b'format', b'bookmarks-in-store', default=False,
725 )
728 )
726 coreconfigitem(
729 coreconfigitem(
727 b'format', b'chunkcachesize', default=None, experimental=True,
730 b'format', b'chunkcachesize', default=None, experimental=True,
728 )
731 )
729 coreconfigitem(
732 coreconfigitem(
730 b'format', b'dotencode', default=True,
733 b'format', b'dotencode', default=True,
731 )
734 )
732 coreconfigitem(
735 coreconfigitem(
733 b'format', b'generaldelta', default=False, experimental=True,
736 b'format', b'generaldelta', default=False, experimental=True,
734 )
737 )
735 coreconfigitem(
738 coreconfigitem(
736 b'format', b'manifestcachesize', default=None, experimental=True,
739 b'format', b'manifestcachesize', default=None, experimental=True,
737 )
740 )
738 coreconfigitem(
741 coreconfigitem(
739 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
742 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
740 )
743 )
741 coreconfigitem(
744 coreconfigitem(
742 b'format', b'obsstore-version', default=None,
745 b'format', b'obsstore-version', default=None,
743 )
746 )
744 coreconfigitem(
747 coreconfigitem(
745 b'format', b'sparse-revlog', default=True,
748 b'format', b'sparse-revlog', default=True,
746 )
749 )
747 coreconfigitem(
750 coreconfigitem(
748 b'format',
751 b'format',
749 b'revlog-compression',
752 b'revlog-compression',
750 default=b'zlib',
753 default=b'zlib',
751 alias=[(b'experimental', b'format.compression')],
754 alias=[(b'experimental', b'format.compression')],
752 )
755 )
753 coreconfigitem(
756 coreconfigitem(
754 b'format', b'usefncache', default=True,
757 b'format', b'usefncache', default=True,
755 )
758 )
756 coreconfigitem(
759 coreconfigitem(
757 b'format', b'usegeneraldelta', default=True,
760 b'format', b'usegeneraldelta', default=True,
758 )
761 )
759 coreconfigitem(
762 coreconfigitem(
760 b'format', b'usestore', default=True,
763 b'format', b'usestore', default=True,
761 )
764 )
762 coreconfigitem(
765 coreconfigitem(
763 b'format',
766 b'format',
764 b'exp-use-copies-side-data-changeset',
767 b'exp-use-copies-side-data-changeset',
765 default=False,
768 default=False,
766 experimental=True,
769 experimental=True,
767 )
770 )
768 coreconfigitem(
771 coreconfigitem(
769 b'format', b'exp-use-side-data', default=False, experimental=True,
772 b'format', b'exp-use-side-data', default=False, experimental=True,
770 )
773 )
771 coreconfigitem(
774 coreconfigitem(
772 b'format', b'internal-phase', default=False, experimental=True,
775 b'format', b'internal-phase', default=False, experimental=True,
773 )
776 )
774 coreconfigitem(
777 coreconfigitem(
775 b'fsmonitor', b'warn_when_unused', default=True,
778 b'fsmonitor', b'warn_when_unused', default=True,
776 )
779 )
777 coreconfigitem(
780 coreconfigitem(
778 b'fsmonitor', b'warn_update_file_count', default=50000,
781 b'fsmonitor', b'warn_update_file_count', default=50000,
779 )
782 )
780 coreconfigitem(
783 coreconfigitem(
781 b'help', br'hidden-command\..*', default=False, generic=True,
784 b'help', br'hidden-command\..*', default=False, generic=True,
782 )
785 )
783 coreconfigitem(
786 coreconfigitem(
784 b'help', br'hidden-topic\..*', default=False, generic=True,
787 b'help', br'hidden-topic\..*', default=False, generic=True,
785 )
788 )
786 coreconfigitem(
789 coreconfigitem(
787 b'hooks', b'.*', default=dynamicdefault, generic=True,
790 b'hooks', b'.*', default=dynamicdefault, generic=True,
788 )
791 )
789 coreconfigitem(
792 coreconfigitem(
790 b'hgweb-paths', b'.*', default=list, generic=True,
793 b'hgweb-paths', b'.*', default=list, generic=True,
791 )
794 )
792 coreconfigitem(
795 coreconfigitem(
793 b'hostfingerprints', b'.*', default=list, generic=True,
796 b'hostfingerprints', b'.*', default=list, generic=True,
794 )
797 )
795 coreconfigitem(
798 coreconfigitem(
796 b'hostsecurity', b'ciphers', default=None,
799 b'hostsecurity', b'ciphers', default=None,
797 )
800 )
798 coreconfigitem(
801 coreconfigitem(
799 b'hostsecurity', b'disabletls10warning', default=False,
802 b'hostsecurity', b'disabletls10warning', default=False,
800 )
803 )
801 coreconfigitem(
804 coreconfigitem(
802 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
805 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
803 )
806 )
804 coreconfigitem(
807 coreconfigitem(
805 b'hostsecurity',
808 b'hostsecurity',
806 b'.*:minimumprotocol$',
809 b'.*:minimumprotocol$',
807 default=dynamicdefault,
810 default=dynamicdefault,
808 generic=True,
811 generic=True,
809 )
812 )
810 coreconfigitem(
813 coreconfigitem(
811 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
814 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
812 )
815 )
813 coreconfigitem(
816 coreconfigitem(
814 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
817 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
815 )
818 )
816 coreconfigitem(
819 coreconfigitem(
817 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
820 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
818 )
821 )
819
822
820 coreconfigitem(
823 coreconfigitem(
821 b'http_proxy', b'always', default=False,
824 b'http_proxy', b'always', default=False,
822 )
825 )
823 coreconfigitem(
826 coreconfigitem(
824 b'http_proxy', b'host', default=None,
827 b'http_proxy', b'host', default=None,
825 )
828 )
826 coreconfigitem(
829 coreconfigitem(
827 b'http_proxy', b'no', default=list,
830 b'http_proxy', b'no', default=list,
828 )
831 )
829 coreconfigitem(
832 coreconfigitem(
830 b'http_proxy', b'passwd', default=None,
833 b'http_proxy', b'passwd', default=None,
831 )
834 )
832 coreconfigitem(
835 coreconfigitem(
833 b'http_proxy', b'user', default=None,
836 b'http_proxy', b'user', default=None,
834 )
837 )
835
838
836 coreconfigitem(
839 coreconfigitem(
837 b'http', b'timeout', default=None,
840 b'http', b'timeout', default=None,
838 )
841 )
839
842
840 coreconfigitem(
843 coreconfigitem(
841 b'logtoprocess', b'commandexception', default=None,
844 b'logtoprocess', b'commandexception', default=None,
842 )
845 )
843 coreconfigitem(
846 coreconfigitem(
844 b'logtoprocess', b'commandfinish', default=None,
847 b'logtoprocess', b'commandfinish', default=None,
845 )
848 )
846 coreconfigitem(
849 coreconfigitem(
847 b'logtoprocess', b'command', default=None,
850 b'logtoprocess', b'command', default=None,
848 )
851 )
849 coreconfigitem(
852 coreconfigitem(
850 b'logtoprocess', b'develwarn', default=None,
853 b'logtoprocess', b'develwarn', default=None,
851 )
854 )
852 coreconfigitem(
855 coreconfigitem(
853 b'logtoprocess', b'uiblocked', default=None,
856 b'logtoprocess', b'uiblocked', default=None,
854 )
857 )
855 coreconfigitem(
858 coreconfigitem(
856 b'merge', b'checkunknown', default=b'abort',
859 b'merge', b'checkunknown', default=b'abort',
857 )
860 )
858 coreconfigitem(
861 coreconfigitem(
859 b'merge', b'checkignored', default=b'abort',
862 b'merge', b'checkignored', default=b'abort',
860 )
863 )
861 coreconfigitem(
864 coreconfigitem(
862 b'experimental', b'merge.checkpathconflicts', default=False,
865 b'experimental', b'merge.checkpathconflicts', default=False,
863 )
866 )
864 coreconfigitem(
867 coreconfigitem(
865 b'merge', b'followcopies', default=True,
868 b'merge', b'followcopies', default=True,
866 )
869 )
867 coreconfigitem(
870 coreconfigitem(
868 b'merge', b'on-failure', default=b'continue',
871 b'merge', b'on-failure', default=b'continue',
869 )
872 )
870 coreconfigitem(
873 coreconfigitem(
871 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
874 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
872 )
875 )
873 coreconfigitem(
876 coreconfigitem(
874 b'merge', b'strict-capability-check', default=False,
877 b'merge', b'strict-capability-check', default=False,
875 )
878 )
876 coreconfigitem(
879 coreconfigitem(
877 b'merge-tools', b'.*', default=None, generic=True,
880 b'merge-tools', b'.*', default=None, generic=True,
878 )
881 )
879 coreconfigitem(
882 coreconfigitem(
880 b'merge-tools',
883 b'merge-tools',
881 br'.*\.args$',
884 br'.*\.args$',
882 default=b"$local $base $other",
885 default=b"$local $base $other",
883 generic=True,
886 generic=True,
884 priority=-1,
887 priority=-1,
885 )
888 )
886 coreconfigitem(
889 coreconfigitem(
887 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
890 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
888 )
891 )
889 coreconfigitem(
892 coreconfigitem(
890 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
893 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
891 )
894 )
892 coreconfigitem(
895 coreconfigitem(
893 b'merge-tools',
896 b'merge-tools',
894 br'.*\.checkchanged$',
897 br'.*\.checkchanged$',
895 default=False,
898 default=False,
896 generic=True,
899 generic=True,
897 priority=-1,
900 priority=-1,
898 )
901 )
899 coreconfigitem(
902 coreconfigitem(
900 b'merge-tools',
903 b'merge-tools',
901 br'.*\.executable$',
904 br'.*\.executable$',
902 default=dynamicdefault,
905 default=dynamicdefault,
903 generic=True,
906 generic=True,
904 priority=-1,
907 priority=-1,
905 )
908 )
906 coreconfigitem(
909 coreconfigitem(
907 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
910 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
908 )
911 )
909 coreconfigitem(
912 coreconfigitem(
910 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
913 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
911 )
914 )
912 coreconfigitem(
915 coreconfigitem(
913 b'merge-tools',
916 b'merge-tools',
914 br'.*\.mergemarkers$',
917 br'.*\.mergemarkers$',
915 default=b'basic',
918 default=b'basic',
916 generic=True,
919 generic=True,
917 priority=-1,
920 priority=-1,
918 )
921 )
919 coreconfigitem(
922 coreconfigitem(
920 b'merge-tools',
923 b'merge-tools',
921 br'.*\.mergemarkertemplate$',
924 br'.*\.mergemarkertemplate$',
922 default=dynamicdefault, # take from ui.mergemarkertemplate
925 default=dynamicdefault, # take from ui.mergemarkertemplate
923 generic=True,
926 generic=True,
924 priority=-1,
927 priority=-1,
925 )
928 )
926 coreconfigitem(
929 coreconfigitem(
927 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
930 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
928 )
931 )
929 coreconfigitem(
932 coreconfigitem(
930 b'merge-tools',
933 b'merge-tools',
931 br'.*\.premerge$',
934 br'.*\.premerge$',
932 default=dynamicdefault,
935 default=dynamicdefault,
933 generic=True,
936 generic=True,
934 priority=-1,
937 priority=-1,
935 )
938 )
936 coreconfigitem(
939 coreconfigitem(
937 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
940 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
938 )
941 )
939 coreconfigitem(
942 coreconfigitem(
940 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
943 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
941 )
944 )
942 coreconfigitem(
945 coreconfigitem(
943 b'pager', b'ignore', default=list,
946 b'pager', b'ignore', default=list,
944 )
947 )
945 coreconfigitem(
948 coreconfigitem(
946 b'pager', b'pager', default=dynamicdefault,
949 b'pager', b'pager', default=dynamicdefault,
947 )
950 )
948 coreconfigitem(
951 coreconfigitem(
949 b'patch', b'eol', default=b'strict',
952 b'patch', b'eol', default=b'strict',
950 )
953 )
951 coreconfigitem(
954 coreconfigitem(
952 b'patch', b'fuzz', default=2,
955 b'patch', b'fuzz', default=2,
953 )
956 )
954 coreconfigitem(
957 coreconfigitem(
955 b'paths', b'default', default=None,
958 b'paths', b'default', default=None,
956 )
959 )
957 coreconfigitem(
960 coreconfigitem(
958 b'paths', b'default-push', default=None,
961 b'paths', b'default-push', default=None,
959 )
962 )
960 coreconfigitem(
963 coreconfigitem(
961 b'paths', b'.*', default=None, generic=True,
964 b'paths', b'.*', default=None, generic=True,
962 )
965 )
963 coreconfigitem(
966 coreconfigitem(
964 b'phases', b'checksubrepos', default=b'follow',
967 b'phases', b'checksubrepos', default=b'follow',
965 )
968 )
966 coreconfigitem(
969 coreconfigitem(
967 b'phases', b'new-commit', default=b'draft',
970 b'phases', b'new-commit', default=b'draft',
968 )
971 )
969 coreconfigitem(
972 coreconfigitem(
970 b'phases', b'publish', default=True,
973 b'phases', b'publish', default=True,
971 )
974 )
972 coreconfigitem(
975 coreconfigitem(
973 b'profiling', b'enabled', default=False,
976 b'profiling', b'enabled', default=False,
974 )
977 )
975 coreconfigitem(
978 coreconfigitem(
976 b'profiling', b'format', default=b'text',
979 b'profiling', b'format', default=b'text',
977 )
980 )
978 coreconfigitem(
981 coreconfigitem(
979 b'profiling', b'freq', default=1000,
982 b'profiling', b'freq', default=1000,
980 )
983 )
981 coreconfigitem(
984 coreconfigitem(
982 b'profiling', b'limit', default=30,
985 b'profiling', b'limit', default=30,
983 )
986 )
984 coreconfigitem(
987 coreconfigitem(
985 b'profiling', b'nested', default=0,
988 b'profiling', b'nested', default=0,
986 )
989 )
987 coreconfigitem(
990 coreconfigitem(
988 b'profiling', b'output', default=None,
991 b'profiling', b'output', default=None,
989 )
992 )
990 coreconfigitem(
993 coreconfigitem(
991 b'profiling', b'showmax', default=0.999,
994 b'profiling', b'showmax', default=0.999,
992 )
995 )
993 coreconfigitem(
996 coreconfigitem(
994 b'profiling', b'showmin', default=dynamicdefault,
997 b'profiling', b'showmin', default=dynamicdefault,
995 )
998 )
996 coreconfigitem(
999 coreconfigitem(
997 b'profiling', b'showtime', default=True,
1000 b'profiling', b'showtime', default=True,
998 )
1001 )
999 coreconfigitem(
1002 coreconfigitem(
1000 b'profiling', b'sort', default=b'inlinetime',
1003 b'profiling', b'sort', default=b'inlinetime',
1001 )
1004 )
1002 coreconfigitem(
1005 coreconfigitem(
1003 b'profiling', b'statformat', default=b'hotpath',
1006 b'profiling', b'statformat', default=b'hotpath',
1004 )
1007 )
1005 coreconfigitem(
1008 coreconfigitem(
1006 b'profiling', b'time-track', default=dynamicdefault,
1009 b'profiling', b'time-track', default=dynamicdefault,
1007 )
1010 )
1008 coreconfigitem(
1011 coreconfigitem(
1009 b'profiling', b'type', default=b'stat',
1012 b'profiling', b'type', default=b'stat',
1010 )
1013 )
1011 coreconfigitem(
1014 coreconfigitem(
1012 b'progress', b'assume-tty', default=False,
1015 b'progress', b'assume-tty', default=False,
1013 )
1016 )
1014 coreconfigitem(
1017 coreconfigitem(
1015 b'progress', b'changedelay', default=1,
1018 b'progress', b'changedelay', default=1,
1016 )
1019 )
1017 coreconfigitem(
1020 coreconfigitem(
1018 b'progress', b'clear-complete', default=True,
1021 b'progress', b'clear-complete', default=True,
1019 )
1022 )
1020 coreconfigitem(
1023 coreconfigitem(
1021 b'progress', b'debug', default=False,
1024 b'progress', b'debug', default=False,
1022 )
1025 )
1023 coreconfigitem(
1026 coreconfigitem(
1024 b'progress', b'delay', default=3,
1027 b'progress', b'delay', default=3,
1025 )
1028 )
1026 coreconfigitem(
1029 coreconfigitem(
1027 b'progress', b'disable', default=False,
1030 b'progress', b'disable', default=False,
1028 )
1031 )
1029 coreconfigitem(
1032 coreconfigitem(
1030 b'progress', b'estimateinterval', default=60.0,
1033 b'progress', b'estimateinterval', default=60.0,
1031 )
1034 )
1032 coreconfigitem(
1035 coreconfigitem(
1033 b'progress',
1036 b'progress',
1034 b'format',
1037 b'format',
1035 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1038 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1036 )
1039 )
1037 coreconfigitem(
1040 coreconfigitem(
1038 b'progress', b'refresh', default=0.1,
1041 b'progress', b'refresh', default=0.1,
1039 )
1042 )
1040 coreconfigitem(
1043 coreconfigitem(
1041 b'progress', b'width', default=dynamicdefault,
1044 b'progress', b'width', default=dynamicdefault,
1042 )
1045 )
1043 coreconfigitem(
1046 coreconfigitem(
1044 b'push', b'pushvars.server', default=False,
1047 b'push', b'pushvars.server', default=False,
1045 )
1048 )
1046 coreconfigitem(
1049 coreconfigitem(
1047 b'rewrite',
1050 b'rewrite',
1048 b'backup-bundle',
1051 b'backup-bundle',
1049 default=True,
1052 default=True,
1050 alias=[(b'ui', b'history-editing-backup')],
1053 alias=[(b'ui', b'history-editing-backup')],
1051 )
1054 )
1052 coreconfigitem(
1055 coreconfigitem(
1053 b'rewrite', b'update-timestamp', default=False,
1056 b'rewrite', b'update-timestamp', default=False,
1054 )
1057 )
1055 coreconfigitem(
1058 coreconfigitem(
1056 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1059 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1057 )
1060 )
1058 coreconfigitem(
1061 coreconfigitem(
1059 b'storage',
1062 b'storage',
1060 b'revlog.optimize-delta-parent-choice',
1063 b'revlog.optimize-delta-parent-choice',
1061 default=True,
1064 default=True,
1062 alias=[(b'format', b'aggressivemergedeltas')],
1065 alias=[(b'format', b'aggressivemergedeltas')],
1063 )
1066 )
1064 coreconfigitem(
1067 coreconfigitem(
1065 b'storage', b'revlog.reuse-external-delta', default=True,
1068 b'storage', b'revlog.reuse-external-delta', default=True,
1066 )
1069 )
1067 coreconfigitem(
1070 coreconfigitem(
1068 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1071 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1069 )
1072 )
1070 coreconfigitem(
1073 coreconfigitem(
1071 b'storage', b'revlog.zlib.level', default=None,
1074 b'storage', b'revlog.zlib.level', default=None,
1072 )
1075 )
1073 coreconfigitem(
1076 coreconfigitem(
1074 b'storage', b'revlog.zstd.level', default=None,
1077 b'storage', b'revlog.zstd.level', default=None,
1075 )
1078 )
1076 coreconfigitem(
1079 coreconfigitem(
1077 b'server', b'bookmarks-pushkey-compat', default=True,
1080 b'server', b'bookmarks-pushkey-compat', default=True,
1078 )
1081 )
1079 coreconfigitem(
1082 coreconfigitem(
1080 b'server', b'bundle1', default=True,
1083 b'server', b'bundle1', default=True,
1081 )
1084 )
1082 coreconfigitem(
1085 coreconfigitem(
1083 b'server', b'bundle1gd', default=None,
1086 b'server', b'bundle1gd', default=None,
1084 )
1087 )
1085 coreconfigitem(
1088 coreconfigitem(
1086 b'server', b'bundle1.pull', default=None,
1089 b'server', b'bundle1.pull', default=None,
1087 )
1090 )
1088 coreconfigitem(
1091 coreconfigitem(
1089 b'server', b'bundle1gd.pull', default=None,
1092 b'server', b'bundle1gd.pull', default=None,
1090 )
1093 )
1091 coreconfigitem(
1094 coreconfigitem(
1092 b'server', b'bundle1.push', default=None,
1095 b'server', b'bundle1.push', default=None,
1093 )
1096 )
1094 coreconfigitem(
1097 coreconfigitem(
1095 b'server', b'bundle1gd.push', default=None,
1098 b'server', b'bundle1gd.push', default=None,
1096 )
1099 )
1097 coreconfigitem(
1100 coreconfigitem(
1098 b'server',
1101 b'server',
1099 b'bundle2.stream',
1102 b'bundle2.stream',
1100 default=True,
1103 default=True,
1101 alias=[(b'experimental', b'bundle2.stream')],
1104 alias=[(b'experimental', b'bundle2.stream')],
1102 )
1105 )
1103 coreconfigitem(
1106 coreconfigitem(
1104 b'server', b'compressionengines', default=list,
1107 b'server', b'compressionengines', default=list,
1105 )
1108 )
1106 coreconfigitem(
1109 coreconfigitem(
1107 b'server', b'concurrent-push-mode', default=b'strict',
1110 b'server', b'concurrent-push-mode', default=b'strict',
1108 )
1111 )
1109 coreconfigitem(
1112 coreconfigitem(
1110 b'server', b'disablefullbundle', default=False,
1113 b'server', b'disablefullbundle', default=False,
1111 )
1114 )
1112 coreconfigitem(
1115 coreconfigitem(
1113 b'server', b'maxhttpheaderlen', default=1024,
1116 b'server', b'maxhttpheaderlen', default=1024,
1114 )
1117 )
1115 coreconfigitem(
1118 coreconfigitem(
1116 b'server', b'pullbundle', default=False,
1119 b'server', b'pullbundle', default=False,
1117 )
1120 )
1118 coreconfigitem(
1121 coreconfigitem(
1119 b'server', b'preferuncompressed', default=False,
1122 b'server', b'preferuncompressed', default=False,
1120 )
1123 )
1121 coreconfigitem(
1124 coreconfigitem(
1122 b'server', b'streamunbundle', default=False,
1125 b'server', b'streamunbundle', default=False,
1123 )
1126 )
1124 coreconfigitem(
1127 coreconfigitem(
1125 b'server', b'uncompressed', default=True,
1128 b'server', b'uncompressed', default=True,
1126 )
1129 )
1127 coreconfigitem(
1130 coreconfigitem(
1128 b'server', b'uncompressedallowsecret', default=False,
1131 b'server', b'uncompressedallowsecret', default=False,
1129 )
1132 )
1130 coreconfigitem(
1133 coreconfigitem(
1131 b'server', b'view', default=b'served',
1134 b'server', b'view', default=b'served',
1132 )
1135 )
1133 coreconfigitem(
1136 coreconfigitem(
1134 b'server', b'validate', default=False,
1137 b'server', b'validate', default=False,
1135 )
1138 )
1136 coreconfigitem(
1139 coreconfigitem(
1137 b'server', b'zliblevel', default=-1,
1140 b'server', b'zliblevel', default=-1,
1138 )
1141 )
1139 coreconfigitem(
1142 coreconfigitem(
1140 b'server', b'zstdlevel', default=3,
1143 b'server', b'zstdlevel', default=3,
1141 )
1144 )
1142 coreconfigitem(
1145 coreconfigitem(
1143 b'share', b'pool', default=None,
1146 b'share', b'pool', default=None,
1144 )
1147 )
1145 coreconfigitem(
1148 coreconfigitem(
1146 b'share', b'poolnaming', default=b'identity',
1149 b'share', b'poolnaming', default=b'identity',
1147 )
1150 )
1148 coreconfigitem(
1151 coreconfigitem(
1149 b'shelve', b'maxbackups', default=10,
1152 b'shelve', b'maxbackups', default=10,
1150 )
1153 )
1151 coreconfigitem(
1154 coreconfigitem(
1152 b'smtp', b'host', default=None,
1155 b'smtp', b'host', default=None,
1153 )
1156 )
1154 coreconfigitem(
1157 coreconfigitem(
1155 b'smtp', b'local_hostname', default=None,
1158 b'smtp', b'local_hostname', default=None,
1156 )
1159 )
1157 coreconfigitem(
1160 coreconfigitem(
1158 b'smtp', b'password', default=None,
1161 b'smtp', b'password', default=None,
1159 )
1162 )
1160 coreconfigitem(
1163 coreconfigitem(
1161 b'smtp', b'port', default=dynamicdefault,
1164 b'smtp', b'port', default=dynamicdefault,
1162 )
1165 )
1163 coreconfigitem(
1166 coreconfigitem(
1164 b'smtp', b'tls', default=b'none',
1167 b'smtp', b'tls', default=b'none',
1165 )
1168 )
1166 coreconfigitem(
1169 coreconfigitem(
1167 b'smtp', b'username', default=None,
1170 b'smtp', b'username', default=None,
1168 )
1171 )
1169 coreconfigitem(
1172 coreconfigitem(
1170 b'sparse', b'missingwarning', default=True, experimental=True,
1173 b'sparse', b'missingwarning', default=True, experimental=True,
1171 )
1174 )
1172 coreconfigitem(
1175 coreconfigitem(
1173 b'subrepos',
1176 b'subrepos',
1174 b'allowed',
1177 b'allowed',
1175 default=dynamicdefault, # to make backporting simpler
1178 default=dynamicdefault, # to make backporting simpler
1176 )
1179 )
1177 coreconfigitem(
1180 coreconfigitem(
1178 b'subrepos', b'hg:allowed', default=dynamicdefault,
1181 b'subrepos', b'hg:allowed', default=dynamicdefault,
1179 )
1182 )
1180 coreconfigitem(
1183 coreconfigitem(
1181 b'subrepos', b'git:allowed', default=dynamicdefault,
1184 b'subrepos', b'git:allowed', default=dynamicdefault,
1182 )
1185 )
1183 coreconfigitem(
1186 coreconfigitem(
1184 b'subrepos', b'svn:allowed', default=dynamicdefault,
1187 b'subrepos', b'svn:allowed', default=dynamicdefault,
1185 )
1188 )
1186 coreconfigitem(
1189 coreconfigitem(
1187 b'templates', b'.*', default=None, generic=True,
1190 b'templates', b'.*', default=None, generic=True,
1188 )
1191 )
1189 coreconfigitem(
1192 coreconfigitem(
1190 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1193 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1191 )
1194 )
1192 coreconfigitem(
1195 coreconfigitem(
1193 b'trusted', b'groups', default=list,
1196 b'trusted', b'groups', default=list,
1194 )
1197 )
1195 coreconfigitem(
1198 coreconfigitem(
1196 b'trusted', b'users', default=list,
1199 b'trusted', b'users', default=list,
1197 )
1200 )
1198 coreconfigitem(
1201 coreconfigitem(
1199 b'ui', b'_usedassubrepo', default=False,
1202 b'ui', b'_usedassubrepo', default=False,
1200 )
1203 )
1201 coreconfigitem(
1204 coreconfigitem(
1202 b'ui', b'allowemptycommit', default=False,
1205 b'ui', b'allowemptycommit', default=False,
1203 )
1206 )
1204 coreconfigitem(
1207 coreconfigitem(
1205 b'ui', b'archivemeta', default=True,
1208 b'ui', b'archivemeta', default=True,
1206 )
1209 )
1207 coreconfigitem(
1210 coreconfigitem(
1208 b'ui', b'askusername', default=False,
1211 b'ui', b'askusername', default=False,
1209 )
1212 )
1210 coreconfigitem(
1213 coreconfigitem(
1211 b'ui', b'clonebundlefallback', default=False,
1214 b'ui', b'clonebundlefallback', default=False,
1212 )
1215 )
1213 coreconfigitem(
1216 coreconfigitem(
1214 b'ui', b'clonebundleprefers', default=list,
1217 b'ui', b'clonebundleprefers', default=list,
1215 )
1218 )
1216 coreconfigitem(
1219 coreconfigitem(
1217 b'ui', b'clonebundles', default=True,
1220 b'ui', b'clonebundles', default=True,
1218 )
1221 )
1219 coreconfigitem(
1222 coreconfigitem(
1220 b'ui', b'color', default=b'auto',
1223 b'ui', b'color', default=b'auto',
1221 )
1224 )
1222 coreconfigitem(
1225 coreconfigitem(
1223 b'ui', b'commitsubrepos', default=False,
1226 b'ui', b'commitsubrepos', default=False,
1224 )
1227 )
1225 coreconfigitem(
1228 coreconfigitem(
1226 b'ui', b'debug', default=False,
1229 b'ui', b'debug', default=False,
1227 )
1230 )
1228 coreconfigitem(
1231 coreconfigitem(
1229 b'ui', b'debugger', default=None,
1232 b'ui', b'debugger', default=None,
1230 )
1233 )
1231 coreconfigitem(
1234 coreconfigitem(
1232 b'ui', b'editor', default=dynamicdefault,
1235 b'ui', b'editor', default=dynamicdefault,
1233 )
1236 )
1234 coreconfigitem(
1237 coreconfigitem(
1235 b'ui', b'fallbackencoding', default=None,
1238 b'ui', b'fallbackencoding', default=None,
1236 )
1239 )
1237 coreconfigitem(
1240 coreconfigitem(
1238 b'ui', b'forcecwd', default=None,
1241 b'ui', b'forcecwd', default=None,
1239 )
1242 )
1240 coreconfigitem(
1243 coreconfigitem(
1241 b'ui', b'forcemerge', default=None,
1244 b'ui', b'forcemerge', default=None,
1242 )
1245 )
1243 coreconfigitem(
1246 coreconfigitem(
1244 b'ui', b'formatdebug', default=False,
1247 b'ui', b'formatdebug', default=False,
1245 )
1248 )
1246 coreconfigitem(
1249 coreconfigitem(
1247 b'ui', b'formatjson', default=False,
1250 b'ui', b'formatjson', default=False,
1248 )
1251 )
1249 coreconfigitem(
1252 coreconfigitem(
1250 b'ui', b'formatted', default=None,
1253 b'ui', b'formatted', default=None,
1251 )
1254 )
1252 coreconfigitem(
1255 coreconfigitem(
1253 b'ui', b'graphnodetemplate', default=None,
1256 b'ui', b'graphnodetemplate', default=None,
1254 )
1257 )
1255 coreconfigitem(
1258 coreconfigitem(
1256 b'ui', b'interactive', default=None,
1259 b'ui', b'interactive', default=None,
1257 )
1260 )
1258 coreconfigitem(
1261 coreconfigitem(
1259 b'ui', b'interface', default=None,
1262 b'ui', b'interface', default=None,
1260 )
1263 )
1261 coreconfigitem(
1264 coreconfigitem(
1262 b'ui', b'interface.chunkselector', default=None,
1265 b'ui', b'interface.chunkselector', default=None,
1263 )
1266 )
1264 coreconfigitem(
1267 coreconfigitem(
1265 b'ui', b'large-file-limit', default=10000000,
1268 b'ui', b'large-file-limit', default=10000000,
1266 )
1269 )
1267 coreconfigitem(
1270 coreconfigitem(
1268 b'ui', b'logblockedtimes', default=False,
1271 b'ui', b'logblockedtimes', default=False,
1269 )
1272 )
1270 coreconfigitem(
1273 coreconfigitem(
1271 b'ui', b'logtemplate', default=None,
1274 b'ui', b'logtemplate', default=None,
1272 )
1275 )
1273 coreconfigitem(
1276 coreconfigitem(
1274 b'ui', b'merge', default=None,
1277 b'ui', b'merge', default=None,
1275 )
1278 )
1276 coreconfigitem(
1279 coreconfigitem(
1277 b'ui', b'mergemarkers', default=b'basic',
1280 b'ui', b'mergemarkers', default=b'basic',
1278 )
1281 )
1279 coreconfigitem(
1282 coreconfigitem(
1280 b'ui',
1283 b'ui',
1281 b'mergemarkertemplate',
1284 b'mergemarkertemplate',
1282 default=(
1285 default=(
1283 b'{node|short} '
1286 b'{node|short} '
1284 b'{ifeq(tags, "tip", "", '
1287 b'{ifeq(tags, "tip", "", '
1285 b'ifeq(tags, "", "", "{tags} "))}'
1288 b'ifeq(tags, "", "", "{tags} "))}'
1286 b'{if(bookmarks, "{bookmarks} ")}'
1289 b'{if(bookmarks, "{bookmarks} ")}'
1287 b'{ifeq(branch, "default", "", "{branch} ")}'
1290 b'{ifeq(branch, "default", "", "{branch} ")}'
1288 b'- {author|user}: {desc|firstline}'
1291 b'- {author|user}: {desc|firstline}'
1289 ),
1292 ),
1290 )
1293 )
1291 coreconfigitem(
1294 coreconfigitem(
1292 b'ui', b'message-output', default=b'stdio',
1295 b'ui', b'message-output', default=b'stdio',
1293 )
1296 )
1294 coreconfigitem(
1297 coreconfigitem(
1295 b'ui', b'nontty', default=False,
1298 b'ui', b'nontty', default=False,
1296 )
1299 )
1297 coreconfigitem(
1300 coreconfigitem(
1298 b'ui', b'origbackuppath', default=None,
1301 b'ui', b'origbackuppath', default=None,
1299 )
1302 )
1300 coreconfigitem(
1303 coreconfigitem(
1301 b'ui', b'paginate', default=True,
1304 b'ui', b'paginate', default=True,
1302 )
1305 )
1303 coreconfigitem(
1306 coreconfigitem(
1304 b'ui', b'patch', default=None,
1307 b'ui', b'patch', default=None,
1305 )
1308 )
1306 coreconfigitem(
1309 coreconfigitem(
1307 b'ui', b'pre-merge-tool-output-template', default=None,
1310 b'ui', b'pre-merge-tool-output-template', default=None,
1308 )
1311 )
1309 coreconfigitem(
1312 coreconfigitem(
1310 b'ui', b'portablefilenames', default=b'warn',
1313 b'ui', b'portablefilenames', default=b'warn',
1311 )
1314 )
1312 coreconfigitem(
1315 coreconfigitem(
1313 b'ui', b'promptecho', default=False,
1316 b'ui', b'promptecho', default=False,
1314 )
1317 )
1315 coreconfigitem(
1318 coreconfigitem(
1316 b'ui', b'quiet', default=False,
1319 b'ui', b'quiet', default=False,
1317 )
1320 )
1318 coreconfigitem(
1321 coreconfigitem(
1319 b'ui', b'quietbookmarkmove', default=False,
1322 b'ui', b'quietbookmarkmove', default=False,
1320 )
1323 )
1321 coreconfigitem(
1324 coreconfigitem(
1322 b'ui', b'relative-paths', default=b'legacy',
1325 b'ui', b'relative-paths', default=b'legacy',
1323 )
1326 )
1324 coreconfigitem(
1327 coreconfigitem(
1325 b'ui', b'remotecmd', default=b'hg',
1328 b'ui', b'remotecmd', default=b'hg',
1326 )
1329 )
1327 coreconfigitem(
1330 coreconfigitem(
1328 b'ui', b'report_untrusted', default=True,
1331 b'ui', b'report_untrusted', default=True,
1329 )
1332 )
1330 coreconfigitem(
1333 coreconfigitem(
1331 b'ui', b'rollback', default=True,
1334 b'ui', b'rollback', default=True,
1332 )
1335 )
1333 coreconfigitem(
1336 coreconfigitem(
1334 b'ui', b'signal-safe-lock', default=True,
1337 b'ui', b'signal-safe-lock', default=True,
1335 )
1338 )
1336 coreconfigitem(
1339 coreconfigitem(
1337 b'ui', b'slash', default=False,
1340 b'ui', b'slash', default=False,
1338 )
1341 )
1339 coreconfigitem(
1342 coreconfigitem(
1340 b'ui', b'ssh', default=b'ssh',
1343 b'ui', b'ssh', default=b'ssh',
1341 )
1344 )
1342 coreconfigitem(
1345 coreconfigitem(
1343 b'ui', b'ssherrorhint', default=None,
1346 b'ui', b'ssherrorhint', default=None,
1344 )
1347 )
1345 coreconfigitem(
1348 coreconfigitem(
1346 b'ui', b'statuscopies', default=False,
1349 b'ui', b'statuscopies', default=False,
1347 )
1350 )
1348 coreconfigitem(
1351 coreconfigitem(
1349 b'ui', b'strict', default=False,
1352 b'ui', b'strict', default=False,
1350 )
1353 )
1351 coreconfigitem(
1354 coreconfigitem(
1352 b'ui', b'style', default=b'',
1355 b'ui', b'style', default=b'',
1353 )
1356 )
1354 coreconfigitem(
1357 coreconfigitem(
1355 b'ui', b'supportcontact', default=None,
1358 b'ui', b'supportcontact', default=None,
1356 )
1359 )
1357 coreconfigitem(
1360 coreconfigitem(
1358 b'ui', b'textwidth', default=78,
1361 b'ui', b'textwidth', default=78,
1359 )
1362 )
1360 coreconfigitem(
1363 coreconfigitem(
1361 b'ui', b'timeout', default=b'600',
1364 b'ui', b'timeout', default=b'600',
1362 )
1365 )
1363 coreconfigitem(
1366 coreconfigitem(
1364 b'ui', b'timeout.warn', default=0,
1367 b'ui', b'timeout.warn', default=0,
1365 )
1368 )
1366 coreconfigitem(
1369 coreconfigitem(
1367 b'ui', b'traceback', default=False,
1370 b'ui', b'traceback', default=False,
1368 )
1371 )
1369 coreconfigitem(
1372 coreconfigitem(
1370 b'ui', b'tweakdefaults', default=False,
1373 b'ui', b'tweakdefaults', default=False,
1371 )
1374 )
1372 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1375 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1373 coreconfigitem(
1376 coreconfigitem(
1374 b'ui', b'verbose', default=False,
1377 b'ui', b'verbose', default=False,
1375 )
1378 )
1376 coreconfigitem(
1379 coreconfigitem(
1377 b'verify', b'skipflags', default=None,
1380 b'verify', b'skipflags', default=None,
1378 )
1381 )
1379 coreconfigitem(
1382 coreconfigitem(
1380 b'web', b'allowbz2', default=False,
1383 b'web', b'allowbz2', default=False,
1381 )
1384 )
1382 coreconfigitem(
1385 coreconfigitem(
1383 b'web', b'allowgz', default=False,
1386 b'web', b'allowgz', default=False,
1384 )
1387 )
1385 coreconfigitem(
1388 coreconfigitem(
1386 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1389 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1387 )
1390 )
1388 coreconfigitem(
1391 coreconfigitem(
1389 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1392 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1390 )
1393 )
1391 coreconfigitem(
1394 coreconfigitem(
1392 b'web', b'allowzip', default=False,
1395 b'web', b'allowzip', default=False,
1393 )
1396 )
1394 coreconfigitem(
1397 coreconfigitem(
1395 b'web', b'archivesubrepos', default=False,
1398 b'web', b'archivesubrepos', default=False,
1396 )
1399 )
1397 coreconfigitem(
1400 coreconfigitem(
1398 b'web', b'cache', default=True,
1401 b'web', b'cache', default=True,
1399 )
1402 )
1400 coreconfigitem(
1403 coreconfigitem(
1401 b'web', b'comparisoncontext', default=5,
1404 b'web', b'comparisoncontext', default=5,
1402 )
1405 )
1403 coreconfigitem(
1406 coreconfigitem(
1404 b'web', b'contact', default=None,
1407 b'web', b'contact', default=None,
1405 )
1408 )
1406 coreconfigitem(
1409 coreconfigitem(
1407 b'web', b'deny_push', default=list,
1410 b'web', b'deny_push', default=list,
1408 )
1411 )
1409 coreconfigitem(
1412 coreconfigitem(
1410 b'web', b'guessmime', default=False,
1413 b'web', b'guessmime', default=False,
1411 )
1414 )
1412 coreconfigitem(
1415 coreconfigitem(
1413 b'web', b'hidden', default=False,
1416 b'web', b'hidden', default=False,
1414 )
1417 )
1415 coreconfigitem(
1418 coreconfigitem(
1416 b'web', b'labels', default=list,
1419 b'web', b'labels', default=list,
1417 )
1420 )
1418 coreconfigitem(
1421 coreconfigitem(
1419 b'web', b'logoimg', default=b'hglogo.png',
1422 b'web', b'logoimg', default=b'hglogo.png',
1420 )
1423 )
1421 coreconfigitem(
1424 coreconfigitem(
1422 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1425 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1423 )
1426 )
1424 coreconfigitem(
1427 coreconfigitem(
1425 b'web', b'accesslog', default=b'-',
1428 b'web', b'accesslog', default=b'-',
1426 )
1429 )
1427 coreconfigitem(
1430 coreconfigitem(
1428 b'web', b'address', default=b'',
1431 b'web', b'address', default=b'',
1429 )
1432 )
1430 coreconfigitem(
1433 coreconfigitem(
1431 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1434 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1432 )
1435 )
1433 coreconfigitem(
1436 coreconfigitem(
1434 b'web', b'allow_read', default=list,
1437 b'web', b'allow_read', default=list,
1435 )
1438 )
1436 coreconfigitem(
1439 coreconfigitem(
1437 b'web', b'baseurl', default=None,
1440 b'web', b'baseurl', default=None,
1438 )
1441 )
1439 coreconfigitem(
1442 coreconfigitem(
1440 b'web', b'cacerts', default=None,
1443 b'web', b'cacerts', default=None,
1441 )
1444 )
1442 coreconfigitem(
1445 coreconfigitem(
1443 b'web', b'certificate', default=None,
1446 b'web', b'certificate', default=None,
1444 )
1447 )
1445 coreconfigitem(
1448 coreconfigitem(
1446 b'web', b'collapse', default=False,
1449 b'web', b'collapse', default=False,
1447 )
1450 )
1448 coreconfigitem(
1451 coreconfigitem(
1449 b'web', b'csp', default=None,
1452 b'web', b'csp', default=None,
1450 )
1453 )
1451 coreconfigitem(
1454 coreconfigitem(
1452 b'web', b'deny_read', default=list,
1455 b'web', b'deny_read', default=list,
1453 )
1456 )
1454 coreconfigitem(
1457 coreconfigitem(
1455 b'web', b'descend', default=True,
1458 b'web', b'descend', default=True,
1456 )
1459 )
1457 coreconfigitem(
1460 coreconfigitem(
1458 b'web', b'description', default=b"",
1461 b'web', b'description', default=b"",
1459 )
1462 )
1460 coreconfigitem(
1463 coreconfigitem(
1461 b'web', b'encoding', default=lambda: encoding.encoding,
1464 b'web', b'encoding', default=lambda: encoding.encoding,
1462 )
1465 )
1463 coreconfigitem(
1466 coreconfigitem(
1464 b'web', b'errorlog', default=b'-',
1467 b'web', b'errorlog', default=b'-',
1465 )
1468 )
1466 coreconfigitem(
1469 coreconfigitem(
1467 b'web', b'ipv6', default=False,
1470 b'web', b'ipv6', default=False,
1468 )
1471 )
1469 coreconfigitem(
1472 coreconfigitem(
1470 b'web', b'maxchanges', default=10,
1473 b'web', b'maxchanges', default=10,
1471 )
1474 )
1472 coreconfigitem(
1475 coreconfigitem(
1473 b'web', b'maxfiles', default=10,
1476 b'web', b'maxfiles', default=10,
1474 )
1477 )
1475 coreconfigitem(
1478 coreconfigitem(
1476 b'web', b'maxshortchanges', default=60,
1479 b'web', b'maxshortchanges', default=60,
1477 )
1480 )
1478 coreconfigitem(
1481 coreconfigitem(
1479 b'web', b'motd', default=b'',
1482 b'web', b'motd', default=b'',
1480 )
1483 )
1481 coreconfigitem(
1484 coreconfigitem(
1482 b'web', b'name', default=dynamicdefault,
1485 b'web', b'name', default=dynamicdefault,
1483 )
1486 )
1484 coreconfigitem(
1487 coreconfigitem(
1485 b'web', b'port', default=8000,
1488 b'web', b'port', default=8000,
1486 )
1489 )
1487 coreconfigitem(
1490 coreconfigitem(
1488 b'web', b'prefix', default=b'',
1491 b'web', b'prefix', default=b'',
1489 )
1492 )
1490 coreconfigitem(
1493 coreconfigitem(
1491 b'web', b'push_ssl', default=True,
1494 b'web', b'push_ssl', default=True,
1492 )
1495 )
1493 coreconfigitem(
1496 coreconfigitem(
1494 b'web', b'refreshinterval', default=20,
1497 b'web', b'refreshinterval', default=20,
1495 )
1498 )
1496 coreconfigitem(
1499 coreconfigitem(
1497 b'web', b'server-header', default=None,
1500 b'web', b'server-header', default=None,
1498 )
1501 )
1499 coreconfigitem(
1502 coreconfigitem(
1500 b'web', b'static', default=None,
1503 b'web', b'static', default=None,
1501 )
1504 )
1502 coreconfigitem(
1505 coreconfigitem(
1503 b'web', b'staticurl', default=None,
1506 b'web', b'staticurl', default=None,
1504 )
1507 )
1505 coreconfigitem(
1508 coreconfigitem(
1506 b'web', b'stripes', default=1,
1509 b'web', b'stripes', default=1,
1507 )
1510 )
1508 coreconfigitem(
1511 coreconfigitem(
1509 b'web', b'style', default=b'paper',
1512 b'web', b'style', default=b'paper',
1510 )
1513 )
1511 coreconfigitem(
1514 coreconfigitem(
1512 b'web', b'templates', default=None,
1515 b'web', b'templates', default=None,
1513 )
1516 )
1514 coreconfigitem(
1517 coreconfigitem(
1515 b'web', b'view', default=b'served', experimental=True,
1518 b'web', b'view', default=b'served', experimental=True,
1516 )
1519 )
1517 coreconfigitem(
1520 coreconfigitem(
1518 b'worker', b'backgroundclose', default=dynamicdefault,
1521 b'worker', b'backgroundclose', default=dynamicdefault,
1519 )
1522 )
1520 # Windows defaults to a limit of 512 open files. A buffer of 128
1523 # Windows defaults to a limit of 512 open files. A buffer of 128
1521 # should give us enough headway.
1524 # should give us enough headway.
1522 coreconfigitem(
1525 coreconfigitem(
1523 b'worker', b'backgroundclosemaxqueue', default=384,
1526 b'worker', b'backgroundclosemaxqueue', default=384,
1524 )
1527 )
1525 coreconfigitem(
1528 coreconfigitem(
1526 b'worker', b'backgroundcloseminfilecount', default=2048,
1529 b'worker', b'backgroundcloseminfilecount', default=2048,
1527 )
1530 )
1528 coreconfigitem(
1531 coreconfigitem(
1529 b'worker', b'backgroundclosethreadcount', default=4,
1532 b'worker', b'backgroundclosethreadcount', default=4,
1530 )
1533 )
1531 coreconfigitem(
1534 coreconfigitem(
1532 b'worker', b'enabled', default=True,
1535 b'worker', b'enabled', default=True,
1533 )
1536 )
1534 coreconfigitem(
1537 coreconfigitem(
1535 b'worker', b'numcpus', default=None,
1538 b'worker', b'numcpus', default=None,
1536 )
1539 )
1537
1540
1538 # Rebase related configuration moved to core because other extension are doing
1541 # Rebase related configuration moved to core because other extension are doing
1539 # strange things. For example, shelve import the extensions to reuse some bit
1542 # strange things. For example, shelve import the extensions to reuse some bit
1540 # without formally loading it.
1543 # without formally loading it.
1541 coreconfigitem(
1544 coreconfigitem(
1542 b'commands', b'rebase.requiredest', default=False,
1545 b'commands', b'rebase.requiredest', default=False,
1543 )
1546 )
1544 coreconfigitem(
1547 coreconfigitem(
1545 b'experimental', b'rebaseskipobsolete', default=True,
1548 b'experimental', b'rebaseskipobsolete', default=True,
1546 )
1549 )
1547 coreconfigitem(
1550 coreconfigitem(
1548 b'rebase', b'singletransaction', default=False,
1551 b'rebase', b'singletransaction', default=False,
1549 )
1552 )
1550 coreconfigitem(
1553 coreconfigitem(
1551 b'rebase', b'experimental.inmemory', default=False,
1554 b'rebase', b'experimental.inmemory', default=False,
1552 )
1555 )
@@ -1,3744 +1,3747 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 changegroup,
34 changegroup,
35 color,
35 color,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 merge as mergemod,
48 merge as mergemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 procutil,
77 procutil,
78 stringutil,
78 stringutil,
79 )
79 )
80
80
81 from .revlogutils import constants as revlogconst
81 from .revlogutils import constants as revlogconst
82
82
83 release = lockmod.release
83 release = lockmod.release
84 urlerr = util.urlerr
84 urlerr = util.urlerr
85 urlreq = util.urlreq
85 urlreq = util.urlreq
86
86
87 # set of (path, vfs-location) tuples. vfs-location is:
87 # set of (path, vfs-location) tuples. vfs-location is:
88 # - 'plain for vfs relative paths
88 # - 'plain for vfs relative paths
89 # - '' for svfs relative paths
89 # - '' for svfs relative paths
90 _cachedfiles = set()
90 _cachedfiles = set()
91
91
92
92
93 class _basefilecache(scmutil.filecache):
93 class _basefilecache(scmutil.filecache):
94 """All filecache usage on repo are done for logic that should be unfiltered
94 """All filecache usage on repo are done for logic that should be unfiltered
95 """
95 """
96
96
97 def __get__(self, repo, type=None):
97 def __get__(self, repo, type=None):
98 if repo is None:
98 if repo is None:
99 return self
99 return self
100 # proxy to unfiltered __dict__ since filtered repo has no entry
100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 unfi = repo.unfiltered()
101 unfi = repo.unfiltered()
102 try:
102 try:
103 return unfi.__dict__[self.sname]
103 return unfi.__dict__[self.sname]
104 except KeyError:
104 except KeyError:
105 pass
105 pass
106 return super(_basefilecache, self).__get__(unfi, type)
106 return super(_basefilecache, self).__get__(unfi, type)
107
107
108 def set(self, repo, value):
108 def set(self, repo, value):
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110
110
111
111
112 class repofilecache(_basefilecache):
112 class repofilecache(_basefilecache):
113 """filecache for files in .hg but outside of .hg/store"""
113 """filecache for files in .hg but outside of .hg/store"""
114
114
115 def __init__(self, *paths):
115 def __init__(self, *paths):
116 super(repofilecache, self).__init__(*paths)
116 super(repofilecache, self).__init__(*paths)
117 for path in paths:
117 for path in paths:
118 _cachedfiles.add((path, b'plain'))
118 _cachedfiles.add((path, b'plain'))
119
119
120 def join(self, obj, fname):
120 def join(self, obj, fname):
121 return obj.vfs.join(fname)
121 return obj.vfs.join(fname)
122
122
123
123
124 class storecache(_basefilecache):
124 class storecache(_basefilecache):
125 """filecache for files in the store"""
125 """filecache for files in the store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(storecache, self).__init__(*paths)
128 super(storecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b''))
130 _cachedfiles.add((path, b''))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.sjoin(fname)
133 return obj.sjoin(fname)
134
134
135
135
136 class mixedrepostorecache(_basefilecache):
136 class mixedrepostorecache(_basefilecache):
137 """filecache for a mix files in .hg/store and outside"""
137 """filecache for a mix files in .hg/store and outside"""
138
138
139 def __init__(self, *pathsandlocations):
139 def __init__(self, *pathsandlocations):
140 # scmutil.filecache only uses the path for passing back into our
140 # scmutil.filecache only uses the path for passing back into our
141 # join(), so we can safely pass a list of paths and locations
141 # join(), so we can safely pass a list of paths and locations
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
144
144
145 def join(self, obj, fnameandlocation):
145 def join(self, obj, fnameandlocation):
146 fname, location = fnameandlocation
146 fname, location = fnameandlocation
147 if location == b'plain':
147 if location == b'plain':
148 return obj.vfs.join(fname)
148 return obj.vfs.join(fname)
149 else:
149 else:
150 if location != b'':
150 if location != b'':
151 raise error.ProgrammingError(
151 raise error.ProgrammingError(
152 b'unexpected location: %s' % location
152 b'unexpected location: %s' % location
153 )
153 )
154 return obj.sjoin(fname)
154 return obj.sjoin(fname)
155
155
156
156
157 def isfilecached(repo, name):
157 def isfilecached(repo, name):
158 """check if a repo has already cached "name" filecache-ed property
158 """check if a repo has already cached "name" filecache-ed property
159
159
160 This returns (cachedobj-or-None, iscached) tuple.
160 This returns (cachedobj-or-None, iscached) tuple.
161 """
161 """
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 if not cacheentry:
163 if not cacheentry:
164 return None, False
164 return None, False
165 return cacheentry.obj, True
165 return cacheentry.obj, True
166
166
167
167
168 class unfilteredpropertycache(util.propertycache):
168 class unfilteredpropertycache(util.propertycache):
169 """propertycache that apply to unfiltered repo only"""
169 """propertycache that apply to unfiltered repo only"""
170
170
171 def __get__(self, repo, type=None):
171 def __get__(self, repo, type=None):
172 unfi = repo.unfiltered()
172 unfi = repo.unfiltered()
173 if unfi is repo:
173 if unfi is repo:
174 return super(unfilteredpropertycache, self).__get__(unfi)
174 return super(unfilteredpropertycache, self).__get__(unfi)
175 return getattr(unfi, self.name)
175 return getattr(unfi, self.name)
176
176
177
177
178 class filteredpropertycache(util.propertycache):
178 class filteredpropertycache(util.propertycache):
179 """propertycache that must take filtering in account"""
179 """propertycache that must take filtering in account"""
180
180
181 def cachevalue(self, obj, value):
181 def cachevalue(self, obj, value):
182 object.__setattr__(obj, self.name, value)
182 object.__setattr__(obj, self.name, value)
183
183
184
184
185 def hasunfilteredcache(repo, name):
185 def hasunfilteredcache(repo, name):
186 """check if a repo has an unfilteredpropertycache value for <name>"""
186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 return name in vars(repo.unfiltered())
187 return name in vars(repo.unfiltered())
188
188
189
189
190 def unfilteredmethod(orig):
190 def unfilteredmethod(orig):
191 """decorate method that always need to be run on unfiltered version"""
191 """decorate method that always need to be run on unfiltered version"""
192
192
193 def wrapper(repo, *args, **kwargs):
193 def wrapper(repo, *args, **kwargs):
194 return orig(repo.unfiltered(), *args, **kwargs)
194 return orig(repo.unfiltered(), *args, **kwargs)
195
195
196 return wrapper
196 return wrapper
197
197
198
198
199 moderncaps = {
199 moderncaps = {
200 b'lookup',
200 b'lookup',
201 b'branchmap',
201 b'branchmap',
202 b'pushkey',
202 b'pushkey',
203 b'known',
203 b'known',
204 b'getbundle',
204 b'getbundle',
205 b'unbundle',
205 b'unbundle',
206 }
206 }
207 legacycaps = moderncaps.union({b'changegroupsubset'})
207 legacycaps = moderncaps.union({b'changegroupsubset'})
208
208
209
209
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 class localcommandexecutor(object):
211 class localcommandexecutor(object):
212 def __init__(self, peer):
212 def __init__(self, peer):
213 self._peer = peer
213 self._peer = peer
214 self._sent = False
214 self._sent = False
215 self._closed = False
215 self._closed = False
216
216
217 def __enter__(self):
217 def __enter__(self):
218 return self
218 return self
219
219
220 def __exit__(self, exctype, excvalue, exctb):
220 def __exit__(self, exctype, excvalue, exctb):
221 self.close()
221 self.close()
222
222
223 def callcommand(self, command, args):
223 def callcommand(self, command, args):
224 if self._sent:
224 if self._sent:
225 raise error.ProgrammingError(
225 raise error.ProgrammingError(
226 b'callcommand() cannot be used after sendcommands()'
226 b'callcommand() cannot be used after sendcommands()'
227 )
227 )
228
228
229 if self._closed:
229 if self._closed:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after close()'
231 b'callcommand() cannot be used after close()'
232 )
232 )
233
233
234 # We don't need to support anything fancy. Just call the named
234 # We don't need to support anything fancy. Just call the named
235 # method on the peer and return a resolved future.
235 # method on the peer and return a resolved future.
236 fn = getattr(self._peer, pycompat.sysstr(command))
236 fn = getattr(self._peer, pycompat.sysstr(command))
237
237
238 f = pycompat.futures.Future()
238 f = pycompat.futures.Future()
239
239
240 try:
240 try:
241 result = fn(**pycompat.strkwargs(args))
241 result = fn(**pycompat.strkwargs(args))
242 except Exception:
242 except Exception:
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 else:
244 else:
245 f.set_result(result)
245 f.set_result(result)
246
246
247 return f
247 return f
248
248
249 def sendcommands(self):
249 def sendcommands(self):
250 self._sent = True
250 self._sent = True
251
251
252 def close(self):
252 def close(self):
253 self._closed = True
253 self._closed = True
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommands)
256 @interfaceutil.implementer(repository.ipeercommands)
257 class localpeer(repository.peer):
257 class localpeer(repository.peer):
258 '''peer for a local repo; reflects only the most recent API'''
258 '''peer for a local repo; reflects only the most recent API'''
259
259
260 def __init__(self, repo, caps=None):
260 def __init__(self, repo, caps=None):
261 super(localpeer, self).__init__()
261 super(localpeer, self).__init__()
262
262
263 if caps is None:
263 if caps is None:
264 caps = moderncaps.copy()
264 caps = moderncaps.copy()
265 self._repo = repo.filtered(b'served')
265 self._repo = repo.filtered(b'served')
266 self.ui = repo.ui
266 self.ui = repo.ui
267 self._caps = repo._restrictcapabilities(caps)
267 self._caps = repo._restrictcapabilities(caps)
268
268
269 # Begin of _basepeer interface.
269 # Begin of _basepeer interface.
270
270
271 def url(self):
271 def url(self):
272 return self._repo.url()
272 return self._repo.url()
273
273
274 def local(self):
274 def local(self):
275 return self._repo
275 return self._repo
276
276
277 def peer(self):
277 def peer(self):
278 return self
278 return self
279
279
280 def canpush(self):
280 def canpush(self):
281 return True
281 return True
282
282
283 def close(self):
283 def close(self):
284 self._repo.close()
284 self._repo.close()
285
285
286 # End of _basepeer interface.
286 # End of _basepeer interface.
287
287
288 # Begin of _basewirecommands interface.
288 # Begin of _basewirecommands interface.
289
289
290 def branchmap(self):
290 def branchmap(self):
291 return self._repo.branchmap()
291 return self._repo.branchmap()
292
292
293 def capabilities(self):
293 def capabilities(self):
294 return self._caps
294 return self._caps
295
295
296 def clonebundles(self):
296 def clonebundles(self):
297 return self._repo.tryread(b'clonebundles.manifest')
297 return self._repo.tryread(b'clonebundles.manifest')
298
298
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 """Used to test argument passing over the wire"""
300 """Used to test argument passing over the wire"""
301 return b"%s %s %s %s %s" % (
301 return b"%s %s %s %s %s" % (
302 one,
302 one,
303 two,
303 two,
304 pycompat.bytestr(three),
304 pycompat.bytestr(three),
305 pycompat.bytestr(four),
305 pycompat.bytestr(four),
306 pycompat.bytestr(five),
306 pycompat.bytestr(five),
307 )
307 )
308
308
309 def getbundle(
309 def getbundle(
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 ):
311 ):
312 chunks = exchange.getbundlechunks(
312 chunks = exchange.getbundlechunks(
313 self._repo,
313 self._repo,
314 source,
314 source,
315 heads=heads,
315 heads=heads,
316 common=common,
316 common=common,
317 bundlecaps=bundlecaps,
317 bundlecaps=bundlecaps,
318 **kwargs
318 **kwargs
319 )[1]
319 )[1]
320 cb = util.chunkbuffer(chunks)
320 cb = util.chunkbuffer(chunks)
321
321
322 if exchange.bundle2requested(bundlecaps):
322 if exchange.bundle2requested(bundlecaps):
323 # When requesting a bundle2, getbundle returns a stream to make the
323 # When requesting a bundle2, getbundle returns a stream to make the
324 # wire level function happier. We need to build a proper object
324 # wire level function happier. We need to build a proper object
325 # from it in local peer.
325 # from it in local peer.
326 return bundle2.getunbundler(self.ui, cb)
326 return bundle2.getunbundler(self.ui, cb)
327 else:
327 else:
328 return changegroup.getunbundler(b'01', cb, None)
328 return changegroup.getunbundler(b'01', cb, None)
329
329
330 def heads(self):
330 def heads(self):
331 return self._repo.heads()
331 return self._repo.heads()
332
332
333 def known(self, nodes):
333 def known(self, nodes):
334 return self._repo.known(nodes)
334 return self._repo.known(nodes)
335
335
336 def listkeys(self, namespace):
336 def listkeys(self, namespace):
337 return self._repo.listkeys(namespace)
337 return self._repo.listkeys(namespace)
338
338
339 def lookup(self, key):
339 def lookup(self, key):
340 return self._repo.lookup(key)
340 return self._repo.lookup(key)
341
341
342 def pushkey(self, namespace, key, old, new):
342 def pushkey(self, namespace, key, old, new):
343 return self._repo.pushkey(namespace, key, old, new)
343 return self._repo.pushkey(namespace, key, old, new)
344
344
345 def stream_out(self):
345 def stream_out(self):
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347
347
348 def unbundle(self, bundle, heads, url):
348 def unbundle(self, bundle, heads, url):
349 """apply a bundle on a repo
349 """apply a bundle on a repo
350
350
351 This function handles the repo locking itself."""
351 This function handles the repo locking itself."""
352 try:
352 try:
353 try:
353 try:
354 bundle = exchange.readbundle(self.ui, bundle, None)
354 bundle = exchange.readbundle(self.ui, bundle, None)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 if util.safehasattr(ret, b'getchunks'):
356 if util.safehasattr(ret, b'getchunks'):
357 # This is a bundle20 object, turn it into an unbundler.
357 # This is a bundle20 object, turn it into an unbundler.
358 # This little dance should be dropped eventually when the
358 # This little dance should be dropped eventually when the
359 # API is finally improved.
359 # API is finally improved.
360 stream = util.chunkbuffer(ret.getchunks())
360 stream = util.chunkbuffer(ret.getchunks())
361 ret = bundle2.getunbundler(self.ui, stream)
361 ret = bundle2.getunbundler(self.ui, stream)
362 return ret
362 return ret
363 except Exception as exc:
363 except Exception as exc:
364 # If the exception contains output salvaged from a bundle2
364 # If the exception contains output salvaged from a bundle2
365 # reply, we need to make sure it is printed before continuing
365 # reply, we need to make sure it is printed before continuing
366 # to fail. So we build a bundle2 with such output and consume
366 # to fail. So we build a bundle2 with such output and consume
367 # it directly.
367 # it directly.
368 #
368 #
369 # This is not very elegant but allows a "simple" solution for
369 # This is not very elegant but allows a "simple" solution for
370 # issue4594
370 # issue4594
371 output = getattr(exc, '_bundle2salvagedoutput', ())
371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 if output:
372 if output:
373 bundler = bundle2.bundle20(self._repo.ui)
373 bundler = bundle2.bundle20(self._repo.ui)
374 for out in output:
374 for out in output:
375 bundler.addpart(out)
375 bundler.addpart(out)
376 stream = util.chunkbuffer(bundler.getchunks())
376 stream = util.chunkbuffer(bundler.getchunks())
377 b = bundle2.getunbundler(self.ui, stream)
377 b = bundle2.getunbundler(self.ui, stream)
378 bundle2.processbundle(self._repo, b)
378 bundle2.processbundle(self._repo, b)
379 raise
379 raise
380 except error.PushRaced as exc:
380 except error.PushRaced as exc:
381 raise error.ResponseError(
381 raise error.ResponseError(
382 _(b'push failed:'), stringutil.forcebytestr(exc)
382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 )
383 )
384
384
385 # End of _basewirecommands interface.
385 # End of _basewirecommands interface.
386
386
387 # Begin of peer interface.
387 # Begin of peer interface.
388
388
389 def commandexecutor(self):
389 def commandexecutor(self):
390 return localcommandexecutor(self)
390 return localcommandexecutor(self)
391
391
392 # End of peer interface.
392 # End of peer interface.
393
393
394
394
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 class locallegacypeer(localpeer):
396 class locallegacypeer(localpeer):
397 '''peer extension which implements legacy methods too; used for tests with
397 '''peer extension which implements legacy methods too; used for tests with
398 restricted capabilities'''
398 restricted capabilities'''
399
399
400 def __init__(self, repo):
400 def __init__(self, repo):
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402
402
403 # Begin of baselegacywirecommands interface.
403 # Begin of baselegacywirecommands interface.
404
404
405 def between(self, pairs):
405 def between(self, pairs):
406 return self._repo.between(pairs)
406 return self._repo.between(pairs)
407
407
408 def branches(self, nodes):
408 def branches(self, nodes):
409 return self._repo.branches(nodes)
409 return self._repo.branches(nodes)
410
410
411 def changegroup(self, nodes, source):
411 def changegroup(self, nodes, source):
412 outgoing = discovery.outgoing(
412 outgoing = discovery.outgoing(
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 )
414 )
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416
416
417 def changegroupsubset(self, bases, heads, source):
417 def changegroupsubset(self, bases, heads, source):
418 outgoing = discovery.outgoing(
418 outgoing = discovery.outgoing(
419 self._repo, missingroots=bases, missingheads=heads
419 self._repo, missingroots=bases, missingheads=heads
420 )
420 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
422
423 # End of baselegacywirecommands interface.
423 # End of baselegacywirecommands interface.
424
424
425
425
426 # Increment the sub-version when the revlog v2 format changes to lock out old
426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # clients.
427 # clients.
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429
429
430 # A repository with the sparserevlog feature will have delta chains that
430 # A repository with the sparserevlog feature will have delta chains that
431 # can spread over a larger span. Sparse reading cuts these large spans into
431 # can spread over a larger span. Sparse reading cuts these large spans into
432 # pieces, so that each piece isn't too big.
432 # pieces, so that each piece isn't too big.
433 # Without the sparserevlog capability, reading from the repository could use
433 # Without the sparserevlog capability, reading from the repository could use
434 # huge amounts of memory, because the whole span would be read at once,
434 # huge amounts of memory, because the whole span would be read at once,
435 # including all the intermediate revisions that aren't pertinent for the chain.
435 # including all the intermediate revisions that aren't pertinent for the chain.
436 # This is why once a repository has enabled sparse-read, it becomes required.
436 # This is why once a repository has enabled sparse-read, it becomes required.
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438
438
439 # A repository with the sidedataflag requirement will allow to store extra
439 # A repository with the sidedataflag requirement will allow to store extra
440 # information for revision without altering their original hashes.
440 # information for revision without altering their original hashes.
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442
442
443 # A repository with the the copies-sidedata-changeset requirement will store
443 # A repository with the the copies-sidedata-changeset requirement will store
444 # copies related information in changeset's sidedata.
444 # copies related information in changeset's sidedata.
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446
446
447 # Functions receiving (ui, features) that extensions can register to impact
447 # Functions receiving (ui, features) that extensions can register to impact
448 # the ability to load repositories with custom requirements. Only
448 # the ability to load repositories with custom requirements. Only
449 # functions defined in loaded extensions are called.
449 # functions defined in loaded extensions are called.
450 #
450 #
451 # The function receives a set of requirement strings that the repository
451 # The function receives a set of requirement strings that the repository
452 # is capable of opening. Functions will typically add elements to the
452 # is capable of opening. Functions will typically add elements to the
453 # set to reflect that the extension knows how to handle that requirements.
453 # set to reflect that the extension knows how to handle that requirements.
454 featuresetupfuncs = set()
454 featuresetupfuncs = set()
455
455
456
456
457 def makelocalrepository(baseui, path, intents=None):
457 def makelocalrepository(baseui, path, intents=None):
458 """Create a local repository object.
458 """Create a local repository object.
459
459
460 Given arguments needed to construct a local repository, this function
460 Given arguments needed to construct a local repository, this function
461 performs various early repository loading functionality (such as
461 performs various early repository loading functionality (such as
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 the repository can be opened, derives a type suitable for representing
463 the repository can be opened, derives a type suitable for representing
464 that repository, and returns an instance of it.
464 that repository, and returns an instance of it.
465
465
466 The returned object conforms to the ``repository.completelocalrepository``
466 The returned object conforms to the ``repository.completelocalrepository``
467 interface.
467 interface.
468
468
469 The repository type is derived by calling a series of factory functions
469 The repository type is derived by calling a series of factory functions
470 for each aspect/interface of the final repository. These are defined by
470 for each aspect/interface of the final repository. These are defined by
471 ``REPO_INTERFACES``.
471 ``REPO_INTERFACES``.
472
472
473 Each factory function is called to produce a type implementing a specific
473 Each factory function is called to produce a type implementing a specific
474 interface. The cumulative list of returned types will be combined into a
474 interface. The cumulative list of returned types will be combined into a
475 new type and that type will be instantiated to represent the local
475 new type and that type will be instantiated to represent the local
476 repository.
476 repository.
477
477
478 The factory functions each receive various state that may be consulted
478 The factory functions each receive various state that may be consulted
479 as part of deriving a type.
479 as part of deriving a type.
480
480
481 Extensions should wrap these factory functions to customize repository type
481 Extensions should wrap these factory functions to customize repository type
482 creation. Note that an extension's wrapped function may be called even if
482 creation. Note that an extension's wrapped function may be called even if
483 that extension is not loaded for the repo being constructed. Extensions
483 that extension is not loaded for the repo being constructed. Extensions
484 should check if their ``__name__`` appears in the
484 should check if their ``__name__`` appears in the
485 ``extensionmodulenames`` set passed to the factory function and no-op if
485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 not.
486 not.
487 """
487 """
488 ui = baseui.copy()
488 ui = baseui.copy()
489 # Prevent copying repo configuration.
489 # Prevent copying repo configuration.
490 ui.copy = baseui.copy
490 ui.copy = baseui.copy
491
491
492 # Working directory VFS rooted at repository root.
492 # Working directory VFS rooted at repository root.
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494
494
495 # Main VFS for .hg/ directory.
495 # Main VFS for .hg/ directory.
496 hgpath = wdirvfs.join(b'.hg')
496 hgpath = wdirvfs.join(b'.hg')
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498
498
499 # The .hg/ path should exist and should be a directory. All other
499 # The .hg/ path should exist and should be a directory. All other
500 # cases are errors.
500 # cases are errors.
501 if not hgvfs.isdir():
501 if not hgvfs.isdir():
502 try:
502 try:
503 hgvfs.stat()
503 hgvfs.stat()
504 except OSError as e:
504 except OSError as e:
505 if e.errno != errno.ENOENT:
505 if e.errno != errno.ENOENT:
506 raise
506 raise
507
507
508 raise error.RepoError(_(b'repository %s not found') % path)
508 raise error.RepoError(_(b'repository %s not found') % path)
509
509
510 # .hg/requires file contains a newline-delimited list of
510 # .hg/requires file contains a newline-delimited list of
511 # features/capabilities the opener (us) must have in order to use
511 # features/capabilities the opener (us) must have in order to use
512 # the repository. This file was introduced in Mercurial 0.9.2,
512 # the repository. This file was introduced in Mercurial 0.9.2,
513 # which means very old repositories may not have one. We assume
513 # which means very old repositories may not have one. We assume
514 # a missing file translates to no requirements.
514 # a missing file translates to no requirements.
515 try:
515 try:
516 requirements = set(hgvfs.read(b'requires').splitlines())
516 requirements = set(hgvfs.read(b'requires').splitlines())
517 except IOError as e:
517 except IOError as e:
518 if e.errno != errno.ENOENT:
518 if e.errno != errno.ENOENT:
519 raise
519 raise
520 requirements = set()
520 requirements = set()
521
521
522 # The .hg/hgrc file may load extensions or contain config options
522 # The .hg/hgrc file may load extensions or contain config options
523 # that influence repository construction. Attempt to load it and
523 # that influence repository construction. Attempt to load it and
524 # process any new extensions that it may have pulled in.
524 # process any new extensions that it may have pulled in.
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 extensions.loadall(ui)
527 extensions.loadall(ui)
528 extensions.populateui(ui)
528 extensions.populateui(ui)
529
529
530 # Set of module names of extensions loaded for this repository.
530 # Set of module names of extensions loaded for this repository.
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532
532
533 supportedrequirements = gathersupportedrequirements(ui)
533 supportedrequirements = gathersupportedrequirements(ui)
534
534
535 # We first validate the requirements are known.
535 # We first validate the requirements are known.
536 ensurerequirementsrecognized(requirements, supportedrequirements)
536 ensurerequirementsrecognized(requirements, supportedrequirements)
537
537
538 # Then we validate that the known set is reasonable to use together.
538 # Then we validate that the known set is reasonable to use together.
539 ensurerequirementscompatible(ui, requirements)
539 ensurerequirementscompatible(ui, requirements)
540
540
541 # TODO there are unhandled edge cases related to opening repositories with
541 # TODO there are unhandled edge cases related to opening repositories with
542 # shared storage. If storage is shared, we should also test for requirements
542 # shared storage. If storage is shared, we should also test for requirements
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # that repo, as that repo may load extensions needed to open it. This is a
544 # that repo, as that repo may load extensions needed to open it. This is a
545 # bit complicated because we don't want the other hgrc to overwrite settings
545 # bit complicated because we don't want the other hgrc to overwrite settings
546 # in this hgrc.
546 # in this hgrc.
547 #
547 #
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # file when sharing repos. But if a requirement is added after the share is
549 # file when sharing repos. But if a requirement is added after the share is
550 # performed, thereby introducing a new requirement for the opener, we may
550 # performed, thereby introducing a new requirement for the opener, we may
551 # will not see that and could encounter a run-time error interacting with
551 # will not see that and could encounter a run-time error interacting with
552 # that shared store since it has an unknown-to-us requirement.
552 # that shared store since it has an unknown-to-us requirement.
553
553
554 # At this point, we know we should be capable of opening the repository.
554 # At this point, we know we should be capable of opening the repository.
555 # Now get on with doing that.
555 # Now get on with doing that.
556
556
557 features = set()
557 features = set()
558
558
559 # The "store" part of the repository holds versioned data. How it is
559 # The "store" part of the repository holds versioned data. How it is
560 # accessed is determined by various requirements. The ``shared`` or
560 # accessed is determined by various requirements. The ``shared`` or
561 # ``relshared`` requirements indicate the store lives in the path contained
561 # ``relshared`` requirements indicate the store lives in the path contained
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 if b'shared' in requirements or b'relshared' in requirements:
564 if b'shared' in requirements or b'relshared' in requirements:
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 if b'relshared' in requirements:
566 if b'relshared' in requirements:
567 sharedpath = hgvfs.join(sharedpath)
567 sharedpath = hgvfs.join(sharedpath)
568
568
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570
570
571 if not sharedvfs.exists():
571 if not sharedvfs.exists():
572 raise error.RepoError(
572 raise error.RepoError(
573 _(b'.hg/sharedpath points to nonexistent directory %s')
573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 % sharedvfs.base
574 % sharedvfs.base
575 )
575 )
576
576
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578
578
579 storebasepath = sharedvfs.base
579 storebasepath = sharedvfs.base
580 cachepath = sharedvfs.join(b'cache')
580 cachepath = sharedvfs.join(b'cache')
581 else:
581 else:
582 storebasepath = hgvfs.base
582 storebasepath = hgvfs.base
583 cachepath = hgvfs.join(b'cache')
583 cachepath = hgvfs.join(b'cache')
584 wcachepath = hgvfs.join(b'wcache')
584 wcachepath = hgvfs.join(b'wcache')
585
585
586 # The store has changed over time and the exact layout is dictated by
586 # The store has changed over time and the exact layout is dictated by
587 # requirements. The store interface abstracts differences across all
587 # requirements. The store interface abstracts differences across all
588 # of them.
588 # of them.
589 store = makestore(
589 store = makestore(
590 requirements,
590 requirements,
591 storebasepath,
591 storebasepath,
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 )
593 )
594 hgvfs.createmode = store.createmode
594 hgvfs.createmode = store.createmode
595
595
596 storevfs = store.vfs
596 storevfs = store.vfs
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598
598
599 # The cache vfs is used to manage cache files.
599 # The cache vfs is used to manage cache files.
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs.createmode = store.createmode
601 cachevfs.createmode = store.createmode
602 # The cache vfs is used to manage cache files related to the working copy
602 # The cache vfs is used to manage cache files related to the working copy
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs.createmode = store.createmode
604 wcachevfs.createmode = store.createmode
605
605
606 # Now resolve the type for the repository object. We do this by repeatedly
606 # Now resolve the type for the repository object. We do this by repeatedly
607 # calling a factory function to produces types for specific aspects of the
607 # calling a factory function to produces types for specific aspects of the
608 # repo's operation. The aggregate returned types are used as base classes
608 # repo's operation. The aggregate returned types are used as base classes
609 # for a dynamically-derived type, which will represent our new repository.
609 # for a dynamically-derived type, which will represent our new repository.
610
610
611 bases = []
611 bases = []
612 extrastate = {}
612 extrastate = {}
613
613
614 for iface, fn in REPO_INTERFACES:
614 for iface, fn in REPO_INTERFACES:
615 # We pass all potentially useful state to give extensions tons of
615 # We pass all potentially useful state to give extensions tons of
616 # flexibility.
616 # flexibility.
617 typ = fn()(
617 typ = fn()(
618 ui=ui,
618 ui=ui,
619 intents=intents,
619 intents=intents,
620 requirements=requirements,
620 requirements=requirements,
621 features=features,
621 features=features,
622 wdirvfs=wdirvfs,
622 wdirvfs=wdirvfs,
623 hgvfs=hgvfs,
623 hgvfs=hgvfs,
624 store=store,
624 store=store,
625 storevfs=storevfs,
625 storevfs=storevfs,
626 storeoptions=storevfs.options,
626 storeoptions=storevfs.options,
627 cachevfs=cachevfs,
627 cachevfs=cachevfs,
628 wcachevfs=wcachevfs,
628 wcachevfs=wcachevfs,
629 extensionmodulenames=extensionmodulenames,
629 extensionmodulenames=extensionmodulenames,
630 extrastate=extrastate,
630 extrastate=extrastate,
631 baseclasses=bases,
631 baseclasses=bases,
632 )
632 )
633
633
634 if not isinstance(typ, type):
634 if not isinstance(typ, type):
635 raise error.ProgrammingError(
635 raise error.ProgrammingError(
636 b'unable to construct type for %s' % iface
636 b'unable to construct type for %s' % iface
637 )
637 )
638
638
639 bases.append(typ)
639 bases.append(typ)
640
640
641 # type() allows you to use characters in type names that wouldn't be
641 # type() allows you to use characters in type names that wouldn't be
642 # recognized as Python symbols in source code. We abuse that to add
642 # recognized as Python symbols in source code. We abuse that to add
643 # rich information about our constructed repo.
643 # rich information about our constructed repo.
644 name = pycompat.sysstr(
644 name = pycompat.sysstr(
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 )
646 )
647
647
648 cls = type(name, tuple(bases), {})
648 cls = type(name, tuple(bases), {})
649
649
650 return cls(
650 return cls(
651 baseui=baseui,
651 baseui=baseui,
652 ui=ui,
652 ui=ui,
653 origroot=path,
653 origroot=path,
654 wdirvfs=wdirvfs,
654 wdirvfs=wdirvfs,
655 hgvfs=hgvfs,
655 hgvfs=hgvfs,
656 requirements=requirements,
656 requirements=requirements,
657 supportedrequirements=supportedrequirements,
657 supportedrequirements=supportedrequirements,
658 sharedpath=storebasepath,
658 sharedpath=storebasepath,
659 store=store,
659 store=store,
660 cachevfs=cachevfs,
660 cachevfs=cachevfs,
661 wcachevfs=wcachevfs,
661 wcachevfs=wcachevfs,
662 features=features,
662 features=features,
663 intents=intents,
663 intents=intents,
664 )
664 )
665
665
666
666
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 """Load hgrc files/content into a ui instance.
668 """Load hgrc files/content into a ui instance.
669
669
670 This is called during repository opening to load any additional
670 This is called during repository opening to load any additional
671 config files or settings relevant to the current repository.
671 config files or settings relevant to the current repository.
672
672
673 Returns a bool indicating whether any additional configs were loaded.
673 Returns a bool indicating whether any additional configs were loaded.
674
674
675 Extensions should monkeypatch this function to modify how per-repo
675 Extensions should monkeypatch this function to modify how per-repo
676 configs are loaded. For example, an extension may wish to pull in
676 configs are loaded. For example, an extension may wish to pull in
677 configs from alternate files or sources.
677 configs from alternate files or sources.
678 """
678 """
679 try:
679 try:
680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
681 return True
681 return True
682 except IOError:
682 except IOError:
683 return False
683 return False
684
684
685
685
686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
687 """Perform additional actions after .hg/hgrc is loaded.
687 """Perform additional actions after .hg/hgrc is loaded.
688
688
689 This function is called during repository loading immediately after
689 This function is called during repository loading immediately after
690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
691
691
692 The function can be used to validate configs, automatically add
692 The function can be used to validate configs, automatically add
693 options (including extensions) based on requirements, etc.
693 options (including extensions) based on requirements, etc.
694 """
694 """
695
695
696 # Map of requirements to list of extensions to load automatically when
696 # Map of requirements to list of extensions to load automatically when
697 # requirement is present.
697 # requirement is present.
698 autoextensions = {
698 autoextensions = {
699 b'largefiles': [b'largefiles'],
699 b'largefiles': [b'largefiles'],
700 b'lfs': [b'lfs'],
700 b'lfs': [b'lfs'],
701 }
701 }
702
702
703 for requirement, names in sorted(autoextensions.items()):
703 for requirement, names in sorted(autoextensions.items()):
704 if requirement not in requirements:
704 if requirement not in requirements:
705 continue
705 continue
706
706
707 for name in names:
707 for name in names:
708 if not ui.hasconfig(b'extensions', name):
708 if not ui.hasconfig(b'extensions', name):
709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
710
710
711
711
712 def gathersupportedrequirements(ui):
712 def gathersupportedrequirements(ui):
713 """Determine the complete set of recognized requirements."""
713 """Determine the complete set of recognized requirements."""
714 # Start with all requirements supported by this file.
714 # Start with all requirements supported by this file.
715 supported = set(localrepository._basesupported)
715 supported = set(localrepository._basesupported)
716
716
717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
718 # relevant to this ui instance.
718 # relevant to this ui instance.
719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
720
720
721 for fn in featuresetupfuncs:
721 for fn in featuresetupfuncs:
722 if fn.__module__ in modules:
722 if fn.__module__ in modules:
723 fn(ui, supported)
723 fn(ui, supported)
724
724
725 # Add derived requirements from registered compression engines.
725 # Add derived requirements from registered compression engines.
726 for name in util.compengines:
726 for name in util.compengines:
727 engine = util.compengines[name]
727 engine = util.compengines[name]
728 if engine.available() and engine.revlogheader():
728 if engine.available() and engine.revlogheader():
729 supported.add(b'exp-compression-%s' % name)
729 supported.add(b'exp-compression-%s' % name)
730 if engine.name() == b'zstd':
730 if engine.name() == b'zstd':
731 supported.add(b'revlog-compression-zstd')
731 supported.add(b'revlog-compression-zstd')
732
732
733 return supported
733 return supported
734
734
735
735
736 def ensurerequirementsrecognized(requirements, supported):
736 def ensurerequirementsrecognized(requirements, supported):
737 """Validate that a set of local requirements is recognized.
737 """Validate that a set of local requirements is recognized.
738
738
739 Receives a set of requirements. Raises an ``error.RepoError`` if there
739 Receives a set of requirements. Raises an ``error.RepoError`` if there
740 exists any requirement in that set that currently loaded code doesn't
740 exists any requirement in that set that currently loaded code doesn't
741 recognize.
741 recognize.
742
742
743 Returns a set of supported requirements.
743 Returns a set of supported requirements.
744 """
744 """
745 missing = set()
745 missing = set()
746
746
747 for requirement in requirements:
747 for requirement in requirements:
748 if requirement in supported:
748 if requirement in supported:
749 continue
749 continue
750
750
751 if not requirement or not requirement[0:1].isalnum():
751 if not requirement or not requirement[0:1].isalnum():
752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
753
753
754 missing.add(requirement)
754 missing.add(requirement)
755
755
756 if missing:
756 if missing:
757 raise error.RequirementError(
757 raise error.RequirementError(
758 _(b'repository requires features unknown to this Mercurial: %s')
758 _(b'repository requires features unknown to this Mercurial: %s')
759 % b' '.join(sorted(missing)),
759 % b' '.join(sorted(missing)),
760 hint=_(
760 hint=_(
761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
762 b'for more information'
762 b'for more information'
763 ),
763 ),
764 )
764 )
765
765
766
766
767 def ensurerequirementscompatible(ui, requirements):
767 def ensurerequirementscompatible(ui, requirements):
768 """Validates that a set of recognized requirements is mutually compatible.
768 """Validates that a set of recognized requirements is mutually compatible.
769
769
770 Some requirements may not be compatible with others or require
770 Some requirements may not be compatible with others or require
771 config options that aren't enabled. This function is called during
771 config options that aren't enabled. This function is called during
772 repository opening to ensure that the set of requirements needed
772 repository opening to ensure that the set of requirements needed
773 to open a repository is sane and compatible with config options.
773 to open a repository is sane and compatible with config options.
774
774
775 Extensions can monkeypatch this function to perform additional
775 Extensions can monkeypatch this function to perform additional
776 checking.
776 checking.
777
777
778 ``error.RepoError`` should be raised on failure.
778 ``error.RepoError`` should be raised on failure.
779 """
779 """
780 if b'exp-sparse' in requirements and not sparse.enabled:
780 if b'exp-sparse' in requirements and not sparse.enabled:
781 raise error.RepoError(
781 raise error.RepoError(
782 _(
782 _(
783 b'repository is using sparse feature but '
783 b'repository is using sparse feature but '
784 b'sparse is not enabled; enable the '
784 b'sparse is not enabled; enable the '
785 b'"sparse" extensions to access'
785 b'"sparse" extensions to access'
786 )
786 )
787 )
787 )
788
788
789
789
790 def makestore(requirements, path, vfstype):
790 def makestore(requirements, path, vfstype):
791 """Construct a storage object for a repository."""
791 """Construct a storage object for a repository."""
792 if b'store' in requirements:
792 if b'store' in requirements:
793 if b'fncache' in requirements:
793 if b'fncache' in requirements:
794 return storemod.fncachestore(
794 return storemod.fncachestore(
795 path, vfstype, b'dotencode' in requirements
795 path, vfstype, b'dotencode' in requirements
796 )
796 )
797
797
798 return storemod.encodedstore(path, vfstype)
798 return storemod.encodedstore(path, vfstype)
799
799
800 return storemod.basicstore(path, vfstype)
800 return storemod.basicstore(path, vfstype)
801
801
802
802
803 def resolvestorevfsoptions(ui, requirements, features):
803 def resolvestorevfsoptions(ui, requirements, features):
804 """Resolve the options to pass to the store vfs opener.
804 """Resolve the options to pass to the store vfs opener.
805
805
806 The returned dict is used to influence behavior of the storage layer.
806 The returned dict is used to influence behavior of the storage layer.
807 """
807 """
808 options = {}
808 options = {}
809
809
810 if b'treemanifest' in requirements:
810 if b'treemanifest' in requirements:
811 options[b'treemanifest'] = True
811 options[b'treemanifest'] = True
812
812
813 # experimental config: format.manifestcachesize
813 # experimental config: format.manifestcachesize
814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
815 if manifestcachesize is not None:
815 if manifestcachesize is not None:
816 options[b'manifestcachesize'] = manifestcachesize
816 options[b'manifestcachesize'] = manifestcachesize
817
817
818 # In the absence of another requirement superseding a revlog-related
818 # In the absence of another requirement superseding a revlog-related
819 # requirement, we have to assume the repo is using revlog version 0.
819 # requirement, we have to assume the repo is using revlog version 0.
820 # This revlog format is super old and we don't bother trying to parse
820 # This revlog format is super old and we don't bother trying to parse
821 # opener options for it because those options wouldn't do anything
821 # opener options for it because those options wouldn't do anything
822 # meaningful on such old repos.
822 # meaningful on such old repos.
823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
825 else: # explicitly mark repo as using revlogv0
825 else: # explicitly mark repo as using revlogv0
826 options[b'revlogv0'] = True
826 options[b'revlogv0'] = True
827
827
828 if COPIESSDC_REQUIREMENT in requirements:
828 if COPIESSDC_REQUIREMENT in requirements:
829 options[b'copies-storage'] = b'changeset-sidedata'
829 options[b'copies-storage'] = b'changeset-sidedata'
830 else:
830 else:
831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
832 copiesextramode = (b'changeset-only', b'compatibility')
832 copiesextramode = (b'changeset-only', b'compatibility')
833 if writecopiesto in copiesextramode:
833 if writecopiesto in copiesextramode:
834 options[b'copies-storage'] = b'extra'
834 options[b'copies-storage'] = b'extra'
835
835
836 return options
836 return options
837
837
838
838
839 def resolverevlogstorevfsoptions(ui, requirements, features):
839 def resolverevlogstorevfsoptions(ui, requirements, features):
840 """Resolve opener options specific to revlogs."""
840 """Resolve opener options specific to revlogs."""
841
841
842 options = {}
842 options = {}
843 options[b'flagprocessors'] = {}
843 options[b'flagprocessors'] = {}
844
844
845 if b'revlogv1' in requirements:
845 if b'revlogv1' in requirements:
846 options[b'revlogv1'] = True
846 options[b'revlogv1'] = True
847 if REVLOGV2_REQUIREMENT in requirements:
847 if REVLOGV2_REQUIREMENT in requirements:
848 options[b'revlogv2'] = True
848 options[b'revlogv2'] = True
849
849
850 if b'generaldelta' in requirements:
850 if b'generaldelta' in requirements:
851 options[b'generaldelta'] = True
851 options[b'generaldelta'] = True
852
852
853 # experimental config: format.chunkcachesize
853 # experimental config: format.chunkcachesize
854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
855 if chunkcachesize is not None:
855 if chunkcachesize is not None:
856 options[b'chunkcachesize'] = chunkcachesize
856 options[b'chunkcachesize'] = chunkcachesize
857
857
858 deltabothparents = ui.configbool(
858 deltabothparents = ui.configbool(
859 b'storage', b'revlog.optimize-delta-parent-choice'
859 b'storage', b'revlog.optimize-delta-parent-choice'
860 )
860 )
861 options[b'deltabothparents'] = deltabothparents
861 options[b'deltabothparents'] = deltabothparents
862
862
863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
864 lazydeltabase = False
864 lazydeltabase = False
865 if lazydelta:
865 if lazydelta:
866 lazydeltabase = ui.configbool(
866 lazydeltabase = ui.configbool(
867 b'storage', b'revlog.reuse-external-delta-parent'
867 b'storage', b'revlog.reuse-external-delta-parent'
868 )
868 )
869 if lazydeltabase is None:
869 if lazydeltabase is None:
870 lazydeltabase = not scmutil.gddeltaconfig(ui)
870 lazydeltabase = not scmutil.gddeltaconfig(ui)
871 options[b'lazydelta'] = lazydelta
871 options[b'lazydelta'] = lazydelta
872 options[b'lazydeltabase'] = lazydeltabase
872 options[b'lazydeltabase'] = lazydeltabase
873
873
874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
875 if 0 <= chainspan:
875 if 0 <= chainspan:
876 options[b'maxdeltachainspan'] = chainspan
876 options[b'maxdeltachainspan'] = chainspan
877
877
878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
879 if mmapindexthreshold is not None:
879 if mmapindexthreshold is not None:
880 options[b'mmapindexthreshold'] = mmapindexthreshold
880 options[b'mmapindexthreshold'] = mmapindexthreshold
881
881
882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
883 srdensitythres = float(
883 srdensitythres = float(
884 ui.config(b'experimental', b'sparse-read.density-threshold')
884 ui.config(b'experimental', b'sparse-read.density-threshold')
885 )
885 )
886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
887 options[b'with-sparse-read'] = withsparseread
887 options[b'with-sparse-read'] = withsparseread
888 options[b'sparse-read-density-threshold'] = srdensitythres
888 options[b'sparse-read-density-threshold'] = srdensitythres
889 options[b'sparse-read-min-gap-size'] = srmingapsize
889 options[b'sparse-read-min-gap-size'] = srmingapsize
890
890
891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
892 options[b'sparse-revlog'] = sparserevlog
892 options[b'sparse-revlog'] = sparserevlog
893 if sparserevlog:
893 if sparserevlog:
894 options[b'generaldelta'] = True
894 options[b'generaldelta'] = True
895
895
896 sidedata = SIDEDATA_REQUIREMENT in requirements
896 sidedata = SIDEDATA_REQUIREMENT in requirements
897 options[b'side-data'] = sidedata
897 options[b'side-data'] = sidedata
898
898
899 maxchainlen = None
899 maxchainlen = None
900 if sparserevlog:
900 if sparserevlog:
901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
902 # experimental config: format.maxchainlen
902 # experimental config: format.maxchainlen
903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
904 if maxchainlen is not None:
904 if maxchainlen is not None:
905 options[b'maxchainlen'] = maxchainlen
905 options[b'maxchainlen'] = maxchainlen
906
906
907 for r in requirements:
907 for r in requirements:
908 # we allow multiple compression engine requirement to co-exist because
908 # we allow multiple compression engine requirement to co-exist because
909 # strickly speaking, revlog seems to support mixed compression style.
909 # strickly speaking, revlog seems to support mixed compression style.
910 #
910 #
911 # The compression used for new entries will be "the last one"
911 # The compression used for new entries will be "the last one"
912 prefix = r.startswith
912 prefix = r.startswith
913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
914 options[b'compengine'] = r.split(b'-', 2)[2]
914 options[b'compengine'] = r.split(b'-', 2)[2]
915
915
916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
917 if options[b'zlib.level'] is not None:
917 if options[b'zlib.level'] is not None:
918 if not (0 <= options[b'zlib.level'] <= 9):
918 if not (0 <= options[b'zlib.level'] <= 9):
919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
920 raise error.Abort(msg % options[b'zlib.level'])
920 raise error.Abort(msg % options[b'zlib.level'])
921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
922 if options[b'zstd.level'] is not None:
922 if options[b'zstd.level'] is not None:
923 if not (0 <= options[b'zstd.level'] <= 22):
923 if not (0 <= options[b'zstd.level'] <= 22):
924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
925 raise error.Abort(msg % options[b'zstd.level'])
925 raise error.Abort(msg % options[b'zstd.level'])
926
926
927 if repository.NARROW_REQUIREMENT in requirements:
927 if repository.NARROW_REQUIREMENT in requirements:
928 options[b'enableellipsis'] = True
928 options[b'enableellipsis'] = True
929
929
930 if ui.configbool('experimental', 'rust.index'):
931 options[b'rust.index'] = True
932
930 return options
933 return options
931
934
932
935
933 def makemain(**kwargs):
936 def makemain(**kwargs):
934 """Produce a type conforming to ``ilocalrepositorymain``."""
937 """Produce a type conforming to ``ilocalrepositorymain``."""
935 return localrepository
938 return localrepository
936
939
937
940
938 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
941 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
939 class revlogfilestorage(object):
942 class revlogfilestorage(object):
940 """File storage when using revlogs."""
943 """File storage when using revlogs."""
941
944
942 def file(self, path):
945 def file(self, path):
943 if path[0] == b'/':
946 if path[0] == b'/':
944 path = path[1:]
947 path = path[1:]
945
948
946 return filelog.filelog(self.svfs, path)
949 return filelog.filelog(self.svfs, path)
947
950
948
951
949 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
952 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
950 class revlognarrowfilestorage(object):
953 class revlognarrowfilestorage(object):
951 """File storage when using revlogs and narrow files."""
954 """File storage when using revlogs and narrow files."""
952
955
953 def file(self, path):
956 def file(self, path):
954 if path[0] == b'/':
957 if path[0] == b'/':
955 path = path[1:]
958 path = path[1:]
956
959
957 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
960 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
958
961
959
962
960 def makefilestorage(requirements, features, **kwargs):
963 def makefilestorage(requirements, features, **kwargs):
961 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
964 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
962 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
965 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
963 features.add(repository.REPO_FEATURE_STREAM_CLONE)
966 features.add(repository.REPO_FEATURE_STREAM_CLONE)
964
967
965 if repository.NARROW_REQUIREMENT in requirements:
968 if repository.NARROW_REQUIREMENT in requirements:
966 return revlognarrowfilestorage
969 return revlognarrowfilestorage
967 else:
970 else:
968 return revlogfilestorage
971 return revlogfilestorage
969
972
970
973
971 # List of repository interfaces and factory functions for them. Each
974 # List of repository interfaces and factory functions for them. Each
972 # will be called in order during ``makelocalrepository()`` to iteratively
975 # will be called in order during ``makelocalrepository()`` to iteratively
973 # derive the final type for a local repository instance. We capture the
976 # derive the final type for a local repository instance. We capture the
974 # function as a lambda so we don't hold a reference and the module-level
977 # function as a lambda so we don't hold a reference and the module-level
975 # functions can be wrapped.
978 # functions can be wrapped.
976 REPO_INTERFACES = [
979 REPO_INTERFACES = [
977 (repository.ilocalrepositorymain, lambda: makemain),
980 (repository.ilocalrepositorymain, lambda: makemain),
978 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
981 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
979 ]
982 ]
980
983
981
984
982 @interfaceutil.implementer(repository.ilocalrepositorymain)
985 @interfaceutil.implementer(repository.ilocalrepositorymain)
983 class localrepository(object):
986 class localrepository(object):
984 """Main class for representing local repositories.
987 """Main class for representing local repositories.
985
988
986 All local repositories are instances of this class.
989 All local repositories are instances of this class.
987
990
988 Constructed on its own, instances of this class are not usable as
991 Constructed on its own, instances of this class are not usable as
989 repository objects. To obtain a usable repository object, call
992 repository objects. To obtain a usable repository object, call
990 ``hg.repository()``, ``localrepo.instance()``, or
993 ``hg.repository()``, ``localrepo.instance()``, or
991 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
994 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
992 ``instance()`` adds support for creating new repositories.
995 ``instance()`` adds support for creating new repositories.
993 ``hg.repository()`` adds more extension integration, including calling
996 ``hg.repository()`` adds more extension integration, including calling
994 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
997 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
995 used.
998 used.
996 """
999 """
997
1000
998 # obsolete experimental requirements:
1001 # obsolete experimental requirements:
999 # - manifestv2: An experimental new manifest format that allowed
1002 # - manifestv2: An experimental new manifest format that allowed
1000 # for stem compression of long paths. Experiment ended up not
1003 # for stem compression of long paths. Experiment ended up not
1001 # being successful (repository sizes went up due to worse delta
1004 # being successful (repository sizes went up due to worse delta
1002 # chains), and the code was deleted in 4.6.
1005 # chains), and the code was deleted in 4.6.
1003 supportedformats = {
1006 supportedformats = {
1004 b'revlogv1',
1007 b'revlogv1',
1005 b'generaldelta',
1008 b'generaldelta',
1006 b'treemanifest',
1009 b'treemanifest',
1007 COPIESSDC_REQUIREMENT,
1010 COPIESSDC_REQUIREMENT,
1008 REVLOGV2_REQUIREMENT,
1011 REVLOGV2_REQUIREMENT,
1009 SIDEDATA_REQUIREMENT,
1012 SIDEDATA_REQUIREMENT,
1010 SPARSEREVLOG_REQUIREMENT,
1013 SPARSEREVLOG_REQUIREMENT,
1011 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1014 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1012 }
1015 }
1013 _basesupported = supportedformats | {
1016 _basesupported = supportedformats | {
1014 b'store',
1017 b'store',
1015 b'fncache',
1018 b'fncache',
1016 b'shared',
1019 b'shared',
1017 b'relshared',
1020 b'relshared',
1018 b'dotencode',
1021 b'dotencode',
1019 b'exp-sparse',
1022 b'exp-sparse',
1020 b'internal-phase',
1023 b'internal-phase',
1021 }
1024 }
1022
1025
1023 # list of prefix for file which can be written without 'wlock'
1026 # list of prefix for file which can be written without 'wlock'
1024 # Extensions should extend this list when needed
1027 # Extensions should extend this list when needed
1025 _wlockfreeprefix = {
1028 _wlockfreeprefix = {
1026 # We migh consider requiring 'wlock' for the next
1029 # We migh consider requiring 'wlock' for the next
1027 # two, but pretty much all the existing code assume
1030 # two, but pretty much all the existing code assume
1028 # wlock is not needed so we keep them excluded for
1031 # wlock is not needed so we keep them excluded for
1029 # now.
1032 # now.
1030 b'hgrc',
1033 b'hgrc',
1031 b'requires',
1034 b'requires',
1032 # XXX cache is a complicatged business someone
1035 # XXX cache is a complicatged business someone
1033 # should investigate this in depth at some point
1036 # should investigate this in depth at some point
1034 b'cache/',
1037 b'cache/',
1035 # XXX shouldn't be dirstate covered by the wlock?
1038 # XXX shouldn't be dirstate covered by the wlock?
1036 b'dirstate',
1039 b'dirstate',
1037 # XXX bisect was still a bit too messy at the time
1040 # XXX bisect was still a bit too messy at the time
1038 # this changeset was introduced. Someone should fix
1041 # this changeset was introduced. Someone should fix
1039 # the remainig bit and drop this line
1042 # the remainig bit and drop this line
1040 b'bisect.state',
1043 b'bisect.state',
1041 }
1044 }
1042
1045
1043 def __init__(
1046 def __init__(
1044 self,
1047 self,
1045 baseui,
1048 baseui,
1046 ui,
1049 ui,
1047 origroot,
1050 origroot,
1048 wdirvfs,
1051 wdirvfs,
1049 hgvfs,
1052 hgvfs,
1050 requirements,
1053 requirements,
1051 supportedrequirements,
1054 supportedrequirements,
1052 sharedpath,
1055 sharedpath,
1053 store,
1056 store,
1054 cachevfs,
1057 cachevfs,
1055 wcachevfs,
1058 wcachevfs,
1056 features,
1059 features,
1057 intents=None,
1060 intents=None,
1058 ):
1061 ):
1059 """Create a new local repository instance.
1062 """Create a new local repository instance.
1060
1063
1061 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1064 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1062 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1065 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1063 object.
1066 object.
1064
1067
1065 Arguments:
1068 Arguments:
1066
1069
1067 baseui
1070 baseui
1068 ``ui.ui`` instance that ``ui`` argument was based off of.
1071 ``ui.ui`` instance that ``ui`` argument was based off of.
1069
1072
1070 ui
1073 ui
1071 ``ui.ui`` instance for use by the repository.
1074 ``ui.ui`` instance for use by the repository.
1072
1075
1073 origroot
1076 origroot
1074 ``bytes`` path to working directory root of this repository.
1077 ``bytes`` path to working directory root of this repository.
1075
1078
1076 wdirvfs
1079 wdirvfs
1077 ``vfs.vfs`` rooted at the working directory.
1080 ``vfs.vfs`` rooted at the working directory.
1078
1081
1079 hgvfs
1082 hgvfs
1080 ``vfs.vfs`` rooted at .hg/
1083 ``vfs.vfs`` rooted at .hg/
1081
1084
1082 requirements
1085 requirements
1083 ``set`` of bytestrings representing repository opening requirements.
1086 ``set`` of bytestrings representing repository opening requirements.
1084
1087
1085 supportedrequirements
1088 supportedrequirements
1086 ``set`` of bytestrings representing repository requirements that we
1089 ``set`` of bytestrings representing repository requirements that we
1087 know how to open. May be a supetset of ``requirements``.
1090 know how to open. May be a supetset of ``requirements``.
1088
1091
1089 sharedpath
1092 sharedpath
1090 ``bytes`` Defining path to storage base directory. Points to a
1093 ``bytes`` Defining path to storage base directory. Points to a
1091 ``.hg/`` directory somewhere.
1094 ``.hg/`` directory somewhere.
1092
1095
1093 store
1096 store
1094 ``store.basicstore`` (or derived) instance providing access to
1097 ``store.basicstore`` (or derived) instance providing access to
1095 versioned storage.
1098 versioned storage.
1096
1099
1097 cachevfs
1100 cachevfs
1098 ``vfs.vfs`` used for cache files.
1101 ``vfs.vfs`` used for cache files.
1099
1102
1100 wcachevfs
1103 wcachevfs
1101 ``vfs.vfs`` used for cache files related to the working copy.
1104 ``vfs.vfs`` used for cache files related to the working copy.
1102
1105
1103 features
1106 features
1104 ``set`` of bytestrings defining features/capabilities of this
1107 ``set`` of bytestrings defining features/capabilities of this
1105 instance.
1108 instance.
1106
1109
1107 intents
1110 intents
1108 ``set`` of system strings indicating what this repo will be used
1111 ``set`` of system strings indicating what this repo will be used
1109 for.
1112 for.
1110 """
1113 """
1111 self.baseui = baseui
1114 self.baseui = baseui
1112 self.ui = ui
1115 self.ui = ui
1113 self.origroot = origroot
1116 self.origroot = origroot
1114 # vfs rooted at working directory.
1117 # vfs rooted at working directory.
1115 self.wvfs = wdirvfs
1118 self.wvfs = wdirvfs
1116 self.root = wdirvfs.base
1119 self.root = wdirvfs.base
1117 # vfs rooted at .hg/. Used to access most non-store paths.
1120 # vfs rooted at .hg/. Used to access most non-store paths.
1118 self.vfs = hgvfs
1121 self.vfs = hgvfs
1119 self.path = hgvfs.base
1122 self.path = hgvfs.base
1120 self.requirements = requirements
1123 self.requirements = requirements
1121 self.supported = supportedrequirements
1124 self.supported = supportedrequirements
1122 self.sharedpath = sharedpath
1125 self.sharedpath = sharedpath
1123 self.store = store
1126 self.store = store
1124 self.cachevfs = cachevfs
1127 self.cachevfs = cachevfs
1125 self.wcachevfs = wcachevfs
1128 self.wcachevfs = wcachevfs
1126 self.features = features
1129 self.features = features
1127
1130
1128 self.filtername = None
1131 self.filtername = None
1129
1132
1130 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1133 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1131 b'devel', b'check-locks'
1134 b'devel', b'check-locks'
1132 ):
1135 ):
1133 self.vfs.audit = self._getvfsward(self.vfs.audit)
1136 self.vfs.audit = self._getvfsward(self.vfs.audit)
1134 # A list of callback to shape the phase if no data were found.
1137 # A list of callback to shape the phase if no data were found.
1135 # Callback are in the form: func(repo, roots) --> processed root.
1138 # Callback are in the form: func(repo, roots) --> processed root.
1136 # This list it to be filled by extension during repo setup
1139 # This list it to be filled by extension during repo setup
1137 self._phasedefaults = []
1140 self._phasedefaults = []
1138
1141
1139 color.setup(self.ui)
1142 color.setup(self.ui)
1140
1143
1141 self.spath = self.store.path
1144 self.spath = self.store.path
1142 self.svfs = self.store.vfs
1145 self.svfs = self.store.vfs
1143 self.sjoin = self.store.join
1146 self.sjoin = self.store.join
1144 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1147 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1145 b'devel', b'check-locks'
1148 b'devel', b'check-locks'
1146 ):
1149 ):
1147 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1150 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1148 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1151 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1149 else: # standard vfs
1152 else: # standard vfs
1150 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1153 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1151
1154
1152 self._dirstatevalidatewarned = False
1155 self._dirstatevalidatewarned = False
1153
1156
1154 self._branchcaches = branchmap.BranchMapCache()
1157 self._branchcaches = branchmap.BranchMapCache()
1155 self._revbranchcache = None
1158 self._revbranchcache = None
1156 self._filterpats = {}
1159 self._filterpats = {}
1157 self._datafilters = {}
1160 self._datafilters = {}
1158 self._transref = self._lockref = self._wlockref = None
1161 self._transref = self._lockref = self._wlockref = None
1159
1162
1160 # A cache for various files under .hg/ that tracks file changes,
1163 # A cache for various files under .hg/ that tracks file changes,
1161 # (used by the filecache decorator)
1164 # (used by the filecache decorator)
1162 #
1165 #
1163 # Maps a property name to its util.filecacheentry
1166 # Maps a property name to its util.filecacheentry
1164 self._filecache = {}
1167 self._filecache = {}
1165
1168
1166 # hold sets of revision to be filtered
1169 # hold sets of revision to be filtered
1167 # should be cleared when something might have changed the filter value:
1170 # should be cleared when something might have changed the filter value:
1168 # - new changesets,
1171 # - new changesets,
1169 # - phase change,
1172 # - phase change,
1170 # - new obsolescence marker,
1173 # - new obsolescence marker,
1171 # - working directory parent change,
1174 # - working directory parent change,
1172 # - bookmark changes
1175 # - bookmark changes
1173 self.filteredrevcache = {}
1176 self.filteredrevcache = {}
1174
1177
1175 # post-dirstate-status hooks
1178 # post-dirstate-status hooks
1176 self._postdsstatus = []
1179 self._postdsstatus = []
1177
1180
1178 # generic mapping between names and nodes
1181 # generic mapping between names and nodes
1179 self.names = namespaces.namespaces()
1182 self.names = namespaces.namespaces()
1180
1183
1181 # Key to signature value.
1184 # Key to signature value.
1182 self._sparsesignaturecache = {}
1185 self._sparsesignaturecache = {}
1183 # Signature to cached matcher instance.
1186 # Signature to cached matcher instance.
1184 self._sparsematchercache = {}
1187 self._sparsematchercache = {}
1185
1188
1186 self._extrafilterid = repoview.extrafilter(ui)
1189 self._extrafilterid = repoview.extrafilter(ui)
1187
1190
1188 self.filecopiesmode = None
1191 self.filecopiesmode = None
1189 if COPIESSDC_REQUIREMENT in self.requirements:
1192 if COPIESSDC_REQUIREMENT in self.requirements:
1190 self.filecopiesmode = b'changeset-sidedata'
1193 self.filecopiesmode = b'changeset-sidedata'
1191
1194
1192 def _getvfsward(self, origfunc):
1195 def _getvfsward(self, origfunc):
1193 """build a ward for self.vfs"""
1196 """build a ward for self.vfs"""
1194 rref = weakref.ref(self)
1197 rref = weakref.ref(self)
1195
1198
1196 def checkvfs(path, mode=None):
1199 def checkvfs(path, mode=None):
1197 ret = origfunc(path, mode=mode)
1200 ret = origfunc(path, mode=mode)
1198 repo = rref()
1201 repo = rref()
1199 if (
1202 if (
1200 repo is None
1203 repo is None
1201 or not util.safehasattr(repo, b'_wlockref')
1204 or not util.safehasattr(repo, b'_wlockref')
1202 or not util.safehasattr(repo, b'_lockref')
1205 or not util.safehasattr(repo, b'_lockref')
1203 ):
1206 ):
1204 return
1207 return
1205 if mode in (None, b'r', b'rb'):
1208 if mode in (None, b'r', b'rb'):
1206 return
1209 return
1207 if path.startswith(repo.path):
1210 if path.startswith(repo.path):
1208 # truncate name relative to the repository (.hg)
1211 # truncate name relative to the repository (.hg)
1209 path = path[len(repo.path) + 1 :]
1212 path = path[len(repo.path) + 1 :]
1210 if path.startswith(b'cache/'):
1213 if path.startswith(b'cache/'):
1211 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1214 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1212 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1215 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1213 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1216 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1214 # journal is covered by 'lock'
1217 # journal is covered by 'lock'
1215 if repo._currentlock(repo._lockref) is None:
1218 if repo._currentlock(repo._lockref) is None:
1216 repo.ui.develwarn(
1219 repo.ui.develwarn(
1217 b'write with no lock: "%s"' % path,
1220 b'write with no lock: "%s"' % path,
1218 stacklevel=3,
1221 stacklevel=3,
1219 config=b'check-locks',
1222 config=b'check-locks',
1220 )
1223 )
1221 elif repo._currentlock(repo._wlockref) is None:
1224 elif repo._currentlock(repo._wlockref) is None:
1222 # rest of vfs files are covered by 'wlock'
1225 # rest of vfs files are covered by 'wlock'
1223 #
1226 #
1224 # exclude special files
1227 # exclude special files
1225 for prefix in self._wlockfreeprefix:
1228 for prefix in self._wlockfreeprefix:
1226 if path.startswith(prefix):
1229 if path.startswith(prefix):
1227 return
1230 return
1228 repo.ui.develwarn(
1231 repo.ui.develwarn(
1229 b'write with no wlock: "%s"' % path,
1232 b'write with no wlock: "%s"' % path,
1230 stacklevel=3,
1233 stacklevel=3,
1231 config=b'check-locks',
1234 config=b'check-locks',
1232 )
1235 )
1233 return ret
1236 return ret
1234
1237
1235 return checkvfs
1238 return checkvfs
1236
1239
1237 def _getsvfsward(self, origfunc):
1240 def _getsvfsward(self, origfunc):
1238 """build a ward for self.svfs"""
1241 """build a ward for self.svfs"""
1239 rref = weakref.ref(self)
1242 rref = weakref.ref(self)
1240
1243
1241 def checksvfs(path, mode=None):
1244 def checksvfs(path, mode=None):
1242 ret = origfunc(path, mode=mode)
1245 ret = origfunc(path, mode=mode)
1243 repo = rref()
1246 repo = rref()
1244 if repo is None or not util.safehasattr(repo, b'_lockref'):
1247 if repo is None or not util.safehasattr(repo, b'_lockref'):
1245 return
1248 return
1246 if mode in (None, b'r', b'rb'):
1249 if mode in (None, b'r', b'rb'):
1247 return
1250 return
1248 if path.startswith(repo.sharedpath):
1251 if path.startswith(repo.sharedpath):
1249 # truncate name relative to the repository (.hg)
1252 # truncate name relative to the repository (.hg)
1250 path = path[len(repo.sharedpath) + 1 :]
1253 path = path[len(repo.sharedpath) + 1 :]
1251 if repo._currentlock(repo._lockref) is None:
1254 if repo._currentlock(repo._lockref) is None:
1252 repo.ui.develwarn(
1255 repo.ui.develwarn(
1253 b'write with no lock: "%s"' % path, stacklevel=4
1256 b'write with no lock: "%s"' % path, stacklevel=4
1254 )
1257 )
1255 return ret
1258 return ret
1256
1259
1257 return checksvfs
1260 return checksvfs
1258
1261
1259 def close(self):
1262 def close(self):
1260 self._writecaches()
1263 self._writecaches()
1261
1264
1262 def _writecaches(self):
1265 def _writecaches(self):
1263 if self._revbranchcache:
1266 if self._revbranchcache:
1264 self._revbranchcache.write()
1267 self._revbranchcache.write()
1265
1268
1266 def _restrictcapabilities(self, caps):
1269 def _restrictcapabilities(self, caps):
1267 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1270 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1268 caps = set(caps)
1271 caps = set(caps)
1269 capsblob = bundle2.encodecaps(
1272 capsblob = bundle2.encodecaps(
1270 bundle2.getrepocaps(self, role=b'client')
1273 bundle2.getrepocaps(self, role=b'client')
1271 )
1274 )
1272 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1275 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1273 return caps
1276 return caps
1274
1277
1275 def _writerequirements(self):
1278 def _writerequirements(self):
1276 scmutil.writerequires(self.vfs, self.requirements)
1279 scmutil.writerequires(self.vfs, self.requirements)
1277
1280
1278 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1281 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1279 # self -> auditor -> self._checknested -> self
1282 # self -> auditor -> self._checknested -> self
1280
1283
1281 @property
1284 @property
1282 def auditor(self):
1285 def auditor(self):
1283 # This is only used by context.workingctx.match in order to
1286 # This is only used by context.workingctx.match in order to
1284 # detect files in subrepos.
1287 # detect files in subrepos.
1285 return pathutil.pathauditor(self.root, callback=self._checknested)
1288 return pathutil.pathauditor(self.root, callback=self._checknested)
1286
1289
1287 @property
1290 @property
1288 def nofsauditor(self):
1291 def nofsauditor(self):
1289 # This is only used by context.basectx.match in order to detect
1292 # This is only used by context.basectx.match in order to detect
1290 # files in subrepos.
1293 # files in subrepos.
1291 return pathutil.pathauditor(
1294 return pathutil.pathauditor(
1292 self.root, callback=self._checknested, realfs=False, cached=True
1295 self.root, callback=self._checknested, realfs=False, cached=True
1293 )
1296 )
1294
1297
1295 def _checknested(self, path):
1298 def _checknested(self, path):
1296 """Determine if path is a legal nested repository."""
1299 """Determine if path is a legal nested repository."""
1297 if not path.startswith(self.root):
1300 if not path.startswith(self.root):
1298 return False
1301 return False
1299 subpath = path[len(self.root) + 1 :]
1302 subpath = path[len(self.root) + 1 :]
1300 normsubpath = util.pconvert(subpath)
1303 normsubpath = util.pconvert(subpath)
1301
1304
1302 # XXX: Checking against the current working copy is wrong in
1305 # XXX: Checking against the current working copy is wrong in
1303 # the sense that it can reject things like
1306 # the sense that it can reject things like
1304 #
1307 #
1305 # $ hg cat -r 10 sub/x.txt
1308 # $ hg cat -r 10 sub/x.txt
1306 #
1309 #
1307 # if sub/ is no longer a subrepository in the working copy
1310 # if sub/ is no longer a subrepository in the working copy
1308 # parent revision.
1311 # parent revision.
1309 #
1312 #
1310 # However, it can of course also allow things that would have
1313 # However, it can of course also allow things that would have
1311 # been rejected before, such as the above cat command if sub/
1314 # been rejected before, such as the above cat command if sub/
1312 # is a subrepository now, but was a normal directory before.
1315 # is a subrepository now, but was a normal directory before.
1313 # The old path auditor would have rejected by mistake since it
1316 # The old path auditor would have rejected by mistake since it
1314 # panics when it sees sub/.hg/.
1317 # panics when it sees sub/.hg/.
1315 #
1318 #
1316 # All in all, checking against the working copy seems sensible
1319 # All in all, checking against the working copy seems sensible
1317 # since we want to prevent access to nested repositories on
1320 # since we want to prevent access to nested repositories on
1318 # the filesystem *now*.
1321 # the filesystem *now*.
1319 ctx = self[None]
1322 ctx = self[None]
1320 parts = util.splitpath(subpath)
1323 parts = util.splitpath(subpath)
1321 while parts:
1324 while parts:
1322 prefix = b'/'.join(parts)
1325 prefix = b'/'.join(parts)
1323 if prefix in ctx.substate:
1326 if prefix in ctx.substate:
1324 if prefix == normsubpath:
1327 if prefix == normsubpath:
1325 return True
1328 return True
1326 else:
1329 else:
1327 sub = ctx.sub(prefix)
1330 sub = ctx.sub(prefix)
1328 return sub.checknested(subpath[len(prefix) + 1 :])
1331 return sub.checknested(subpath[len(prefix) + 1 :])
1329 else:
1332 else:
1330 parts.pop()
1333 parts.pop()
1331 return False
1334 return False
1332
1335
1333 def peer(self):
1336 def peer(self):
1334 return localpeer(self) # not cached to avoid reference cycle
1337 return localpeer(self) # not cached to avoid reference cycle
1335
1338
1336 def unfiltered(self):
1339 def unfiltered(self):
1337 """Return unfiltered version of the repository
1340 """Return unfiltered version of the repository
1338
1341
1339 Intended to be overwritten by filtered repo."""
1342 Intended to be overwritten by filtered repo."""
1340 return self
1343 return self
1341
1344
1342 def filtered(self, name, visibilityexceptions=None):
1345 def filtered(self, name, visibilityexceptions=None):
1343 """Return a filtered version of a repository
1346 """Return a filtered version of a repository
1344
1347
1345 The `name` parameter is the identifier of the requested view. This
1348 The `name` parameter is the identifier of the requested view. This
1346 will return a repoview object set "exactly" to the specified view.
1349 will return a repoview object set "exactly" to the specified view.
1347
1350
1348 This function does not apply recursive filtering to a repository. For
1351 This function does not apply recursive filtering to a repository. For
1349 example calling `repo.filtered("served")` will return a repoview using
1352 example calling `repo.filtered("served")` will return a repoview using
1350 the "served" view, regardless of the initial view used by `repo`.
1353 the "served" view, regardless of the initial view used by `repo`.
1351
1354
1352 In other word, there is always only one level of `repoview` "filtering".
1355 In other word, there is always only one level of `repoview` "filtering".
1353 """
1356 """
1354 if self._extrafilterid is not None and b'%' not in name:
1357 if self._extrafilterid is not None and b'%' not in name:
1355 name = name + b'%' + self._extrafilterid
1358 name = name + b'%' + self._extrafilterid
1356
1359
1357 cls = repoview.newtype(self.unfiltered().__class__)
1360 cls = repoview.newtype(self.unfiltered().__class__)
1358 return cls(self, name, visibilityexceptions)
1361 return cls(self, name, visibilityexceptions)
1359
1362
1360 @mixedrepostorecache(
1363 @mixedrepostorecache(
1361 (b'bookmarks', b'plain'),
1364 (b'bookmarks', b'plain'),
1362 (b'bookmarks.current', b'plain'),
1365 (b'bookmarks.current', b'plain'),
1363 (b'bookmarks', b''),
1366 (b'bookmarks', b''),
1364 (b'00changelog.i', b''),
1367 (b'00changelog.i', b''),
1365 )
1368 )
1366 def _bookmarks(self):
1369 def _bookmarks(self):
1367 # Since the multiple files involved in the transaction cannot be
1370 # Since the multiple files involved in the transaction cannot be
1368 # written atomically (with current repository format), there is a race
1371 # written atomically (with current repository format), there is a race
1369 # condition here.
1372 # condition here.
1370 #
1373 #
1371 # 1) changelog content A is read
1374 # 1) changelog content A is read
1372 # 2) outside transaction update changelog to content B
1375 # 2) outside transaction update changelog to content B
1373 # 3) outside transaction update bookmark file referring to content B
1376 # 3) outside transaction update bookmark file referring to content B
1374 # 4) bookmarks file content is read and filtered against changelog-A
1377 # 4) bookmarks file content is read and filtered against changelog-A
1375 #
1378 #
1376 # When this happens, bookmarks against nodes missing from A are dropped.
1379 # When this happens, bookmarks against nodes missing from A are dropped.
1377 #
1380 #
1378 # Having this happening during read is not great, but it become worse
1381 # Having this happening during read is not great, but it become worse
1379 # when this happen during write because the bookmarks to the "unknown"
1382 # when this happen during write because the bookmarks to the "unknown"
1380 # nodes will be dropped for good. However, writes happen within locks.
1383 # nodes will be dropped for good. However, writes happen within locks.
1381 # This locking makes it possible to have a race free consistent read.
1384 # This locking makes it possible to have a race free consistent read.
1382 # For this purpose data read from disc before locking are
1385 # For this purpose data read from disc before locking are
1383 # "invalidated" right after the locks are taken. This invalidations are
1386 # "invalidated" right after the locks are taken. This invalidations are
1384 # "light", the `filecache` mechanism keep the data in memory and will
1387 # "light", the `filecache` mechanism keep the data in memory and will
1385 # reuse them if the underlying files did not changed. Not parsing the
1388 # reuse them if the underlying files did not changed. Not parsing the
1386 # same data multiple times helps performances.
1389 # same data multiple times helps performances.
1387 #
1390 #
1388 # Unfortunately in the case describe above, the files tracked by the
1391 # Unfortunately in the case describe above, the files tracked by the
1389 # bookmarks file cache might not have changed, but the in-memory
1392 # bookmarks file cache might not have changed, but the in-memory
1390 # content is still "wrong" because we used an older changelog content
1393 # content is still "wrong" because we used an older changelog content
1391 # to process the on-disk data. So after locking, the changelog would be
1394 # to process the on-disk data. So after locking, the changelog would be
1392 # refreshed but `_bookmarks` would be preserved.
1395 # refreshed but `_bookmarks` would be preserved.
1393 # Adding `00changelog.i` to the list of tracked file is not
1396 # Adding `00changelog.i` to the list of tracked file is not
1394 # enough, because at the time we build the content for `_bookmarks` in
1397 # enough, because at the time we build the content for `_bookmarks` in
1395 # (4), the changelog file has already diverged from the content used
1398 # (4), the changelog file has already diverged from the content used
1396 # for loading `changelog` in (1)
1399 # for loading `changelog` in (1)
1397 #
1400 #
1398 # To prevent the issue, we force the changelog to be explicitly
1401 # To prevent the issue, we force the changelog to be explicitly
1399 # reloaded while computing `_bookmarks`. The data race can still happen
1402 # reloaded while computing `_bookmarks`. The data race can still happen
1400 # without the lock (with a narrower window), but it would no longer go
1403 # without the lock (with a narrower window), but it would no longer go
1401 # undetected during the lock time refresh.
1404 # undetected during the lock time refresh.
1402 #
1405 #
1403 # The new schedule is as follow
1406 # The new schedule is as follow
1404 #
1407 #
1405 # 1) filecache logic detect that `_bookmarks` needs to be computed
1408 # 1) filecache logic detect that `_bookmarks` needs to be computed
1406 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1409 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1407 # 3) We force `changelog` filecache to be tested
1410 # 3) We force `changelog` filecache to be tested
1408 # 4) cachestat for `changelog` are captured (for changelog)
1411 # 4) cachestat for `changelog` are captured (for changelog)
1409 # 5) `_bookmarks` is computed and cached
1412 # 5) `_bookmarks` is computed and cached
1410 #
1413 #
1411 # The step in (3) ensure we have a changelog at least as recent as the
1414 # The step in (3) ensure we have a changelog at least as recent as the
1412 # cache stat computed in (1). As a result at locking time:
1415 # cache stat computed in (1). As a result at locking time:
1413 # * if the changelog did not changed since (1) -> we can reuse the data
1416 # * if the changelog did not changed since (1) -> we can reuse the data
1414 # * otherwise -> the bookmarks get refreshed.
1417 # * otherwise -> the bookmarks get refreshed.
1415 self._refreshchangelog()
1418 self._refreshchangelog()
1416 return bookmarks.bmstore(self)
1419 return bookmarks.bmstore(self)
1417
1420
1418 def _refreshchangelog(self):
1421 def _refreshchangelog(self):
1419 """make sure the in memory changelog match the on-disk one"""
1422 """make sure the in memory changelog match the on-disk one"""
1420 if 'changelog' in vars(self) and self.currenttransaction() is None:
1423 if 'changelog' in vars(self) and self.currenttransaction() is None:
1421 del self.changelog
1424 del self.changelog
1422
1425
1423 @property
1426 @property
1424 def _activebookmark(self):
1427 def _activebookmark(self):
1425 return self._bookmarks.active
1428 return self._bookmarks.active
1426
1429
1427 # _phasesets depend on changelog. what we need is to call
1430 # _phasesets depend on changelog. what we need is to call
1428 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1431 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1429 # can't be easily expressed in filecache mechanism.
1432 # can't be easily expressed in filecache mechanism.
1430 @storecache(b'phaseroots', b'00changelog.i')
1433 @storecache(b'phaseroots', b'00changelog.i')
1431 def _phasecache(self):
1434 def _phasecache(self):
1432 return phases.phasecache(self, self._phasedefaults)
1435 return phases.phasecache(self, self._phasedefaults)
1433
1436
1434 @storecache(b'obsstore')
1437 @storecache(b'obsstore')
1435 def obsstore(self):
1438 def obsstore(self):
1436 return obsolete.makestore(self.ui, self)
1439 return obsolete.makestore(self.ui, self)
1437
1440
1438 @storecache(b'00changelog.i')
1441 @storecache(b'00changelog.i')
1439 def changelog(self):
1442 def changelog(self):
1440 return self.store.changelog(txnutil.mayhavepending(self.root))
1443 return self.store.changelog(txnutil.mayhavepending(self.root))
1441
1444
1442 @storecache(b'00manifest.i')
1445 @storecache(b'00manifest.i')
1443 def manifestlog(self):
1446 def manifestlog(self):
1444 return self.store.manifestlog(self, self._storenarrowmatch)
1447 return self.store.manifestlog(self, self._storenarrowmatch)
1445
1448
1446 @repofilecache(b'dirstate')
1449 @repofilecache(b'dirstate')
1447 def dirstate(self):
1450 def dirstate(self):
1448 return self._makedirstate()
1451 return self._makedirstate()
1449
1452
1450 def _makedirstate(self):
1453 def _makedirstate(self):
1451 """Extension point for wrapping the dirstate per-repo."""
1454 """Extension point for wrapping the dirstate per-repo."""
1452 sparsematchfn = lambda: sparse.matcher(self)
1455 sparsematchfn = lambda: sparse.matcher(self)
1453
1456
1454 return dirstate.dirstate(
1457 return dirstate.dirstate(
1455 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1458 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1456 )
1459 )
1457
1460
1458 def _dirstatevalidate(self, node):
1461 def _dirstatevalidate(self, node):
1459 try:
1462 try:
1460 self.changelog.rev(node)
1463 self.changelog.rev(node)
1461 return node
1464 return node
1462 except error.LookupError:
1465 except error.LookupError:
1463 if not self._dirstatevalidatewarned:
1466 if not self._dirstatevalidatewarned:
1464 self._dirstatevalidatewarned = True
1467 self._dirstatevalidatewarned = True
1465 self.ui.warn(
1468 self.ui.warn(
1466 _(b"warning: ignoring unknown working parent %s!\n")
1469 _(b"warning: ignoring unknown working parent %s!\n")
1467 % short(node)
1470 % short(node)
1468 )
1471 )
1469 return nullid
1472 return nullid
1470
1473
1471 @storecache(narrowspec.FILENAME)
1474 @storecache(narrowspec.FILENAME)
1472 def narrowpats(self):
1475 def narrowpats(self):
1473 """matcher patterns for this repository's narrowspec
1476 """matcher patterns for this repository's narrowspec
1474
1477
1475 A tuple of (includes, excludes).
1478 A tuple of (includes, excludes).
1476 """
1479 """
1477 return narrowspec.load(self)
1480 return narrowspec.load(self)
1478
1481
1479 @storecache(narrowspec.FILENAME)
1482 @storecache(narrowspec.FILENAME)
1480 def _storenarrowmatch(self):
1483 def _storenarrowmatch(self):
1481 if repository.NARROW_REQUIREMENT not in self.requirements:
1484 if repository.NARROW_REQUIREMENT not in self.requirements:
1482 return matchmod.always()
1485 return matchmod.always()
1483 include, exclude = self.narrowpats
1486 include, exclude = self.narrowpats
1484 return narrowspec.match(self.root, include=include, exclude=exclude)
1487 return narrowspec.match(self.root, include=include, exclude=exclude)
1485
1488
1486 @storecache(narrowspec.FILENAME)
1489 @storecache(narrowspec.FILENAME)
1487 def _narrowmatch(self):
1490 def _narrowmatch(self):
1488 if repository.NARROW_REQUIREMENT not in self.requirements:
1491 if repository.NARROW_REQUIREMENT not in self.requirements:
1489 return matchmod.always()
1492 return matchmod.always()
1490 narrowspec.checkworkingcopynarrowspec(self)
1493 narrowspec.checkworkingcopynarrowspec(self)
1491 include, exclude = self.narrowpats
1494 include, exclude = self.narrowpats
1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1495 return narrowspec.match(self.root, include=include, exclude=exclude)
1493
1496
1494 def narrowmatch(self, match=None, includeexact=False):
1497 def narrowmatch(self, match=None, includeexact=False):
1495 """matcher corresponding the the repo's narrowspec
1498 """matcher corresponding the the repo's narrowspec
1496
1499
1497 If `match` is given, then that will be intersected with the narrow
1500 If `match` is given, then that will be intersected with the narrow
1498 matcher.
1501 matcher.
1499
1502
1500 If `includeexact` is True, then any exact matches from `match` will
1503 If `includeexact` is True, then any exact matches from `match` will
1501 be included even if they're outside the narrowspec.
1504 be included even if they're outside the narrowspec.
1502 """
1505 """
1503 if match:
1506 if match:
1504 if includeexact and not self._narrowmatch.always():
1507 if includeexact and not self._narrowmatch.always():
1505 # do not exclude explicitly-specified paths so that they can
1508 # do not exclude explicitly-specified paths so that they can
1506 # be warned later on
1509 # be warned later on
1507 em = matchmod.exact(match.files())
1510 em = matchmod.exact(match.files())
1508 nm = matchmod.unionmatcher([self._narrowmatch, em])
1511 nm = matchmod.unionmatcher([self._narrowmatch, em])
1509 return matchmod.intersectmatchers(match, nm)
1512 return matchmod.intersectmatchers(match, nm)
1510 return matchmod.intersectmatchers(match, self._narrowmatch)
1513 return matchmod.intersectmatchers(match, self._narrowmatch)
1511 return self._narrowmatch
1514 return self._narrowmatch
1512
1515
1513 def setnarrowpats(self, newincludes, newexcludes):
1516 def setnarrowpats(self, newincludes, newexcludes):
1514 narrowspec.save(self, newincludes, newexcludes)
1517 narrowspec.save(self, newincludes, newexcludes)
1515 self.invalidate(clearfilecache=True)
1518 self.invalidate(clearfilecache=True)
1516
1519
1517 @util.propertycache
1520 @util.propertycache
1518 def _quick_access_changeid(self):
1521 def _quick_access_changeid(self):
1519 """an helper dictionnary for __getitem__ calls
1522 """an helper dictionnary for __getitem__ calls
1520
1523
1521 This contains a list of symbol we can recognise right away without
1524 This contains a list of symbol we can recognise right away without
1522 further processing.
1525 further processing.
1523 """
1526 """
1524 return {
1527 return {
1525 b'null': (nullrev, nullid),
1528 b'null': (nullrev, nullid),
1526 nullrev: (nullrev, nullid),
1529 nullrev: (nullrev, nullid),
1527 nullid: (nullrev, nullid),
1530 nullid: (nullrev, nullid),
1528 }
1531 }
1529
1532
1530 def __getitem__(self, changeid):
1533 def __getitem__(self, changeid):
1531 # dealing with special cases
1534 # dealing with special cases
1532 if changeid is None:
1535 if changeid is None:
1533 return context.workingctx(self)
1536 return context.workingctx(self)
1534 if isinstance(changeid, context.basectx):
1537 if isinstance(changeid, context.basectx):
1535 return changeid
1538 return changeid
1536
1539
1537 # dealing with multiple revisions
1540 # dealing with multiple revisions
1538 if isinstance(changeid, slice):
1541 if isinstance(changeid, slice):
1539 # wdirrev isn't contiguous so the slice shouldn't include it
1542 # wdirrev isn't contiguous so the slice shouldn't include it
1540 return [
1543 return [
1541 self[i]
1544 self[i]
1542 for i in pycompat.xrange(*changeid.indices(len(self)))
1545 for i in pycompat.xrange(*changeid.indices(len(self)))
1543 if i not in self.changelog.filteredrevs
1546 if i not in self.changelog.filteredrevs
1544 ]
1547 ]
1545
1548
1546 # dealing with some special values
1549 # dealing with some special values
1547 quick_access = self._quick_access_changeid.get(changeid)
1550 quick_access = self._quick_access_changeid.get(changeid)
1548 if quick_access is not None:
1551 if quick_access is not None:
1549 rev, node = quick_access
1552 rev, node = quick_access
1550 return context.changectx(self, rev, node, maybe_filtered=False)
1553 return context.changectx(self, rev, node, maybe_filtered=False)
1551 if changeid == b'tip':
1554 if changeid == b'tip':
1552 node = self.changelog.tip()
1555 node = self.changelog.tip()
1553 rev = self.changelog.rev(node)
1556 rev = self.changelog.rev(node)
1554 return context.changectx(self, rev, node)
1557 return context.changectx(self, rev, node)
1555
1558
1556 # dealing with arbitrary values
1559 # dealing with arbitrary values
1557 try:
1560 try:
1558 if isinstance(changeid, int):
1561 if isinstance(changeid, int):
1559 node = self.changelog.node(changeid)
1562 node = self.changelog.node(changeid)
1560 rev = changeid
1563 rev = changeid
1561 elif changeid == b'.':
1564 elif changeid == b'.':
1562 # this is a hack to delay/avoid loading obsmarkers
1565 # this is a hack to delay/avoid loading obsmarkers
1563 # when we know that '.' won't be hidden
1566 # when we know that '.' won't be hidden
1564 node = self.dirstate.p1()
1567 node = self.dirstate.p1()
1565 rev = self.unfiltered().changelog.rev(node)
1568 rev = self.unfiltered().changelog.rev(node)
1566 elif len(changeid) == 20:
1569 elif len(changeid) == 20:
1567 try:
1570 try:
1568 node = changeid
1571 node = changeid
1569 rev = self.changelog.rev(changeid)
1572 rev = self.changelog.rev(changeid)
1570 except error.FilteredLookupError:
1573 except error.FilteredLookupError:
1571 changeid = hex(changeid) # for the error message
1574 changeid = hex(changeid) # for the error message
1572 raise
1575 raise
1573 except LookupError:
1576 except LookupError:
1574 # check if it might have come from damaged dirstate
1577 # check if it might have come from damaged dirstate
1575 #
1578 #
1576 # XXX we could avoid the unfiltered if we had a recognizable
1579 # XXX we could avoid the unfiltered if we had a recognizable
1577 # exception for filtered changeset access
1580 # exception for filtered changeset access
1578 if (
1581 if (
1579 self.local()
1582 self.local()
1580 and changeid in self.unfiltered().dirstate.parents()
1583 and changeid in self.unfiltered().dirstate.parents()
1581 ):
1584 ):
1582 msg = _(b"working directory has unknown parent '%s'!")
1585 msg = _(b"working directory has unknown parent '%s'!")
1583 raise error.Abort(msg % short(changeid))
1586 raise error.Abort(msg % short(changeid))
1584 changeid = hex(changeid) # for the error message
1587 changeid = hex(changeid) # for the error message
1585 raise
1588 raise
1586
1589
1587 elif len(changeid) == 40:
1590 elif len(changeid) == 40:
1588 node = bin(changeid)
1591 node = bin(changeid)
1589 rev = self.changelog.rev(node)
1592 rev = self.changelog.rev(node)
1590 else:
1593 else:
1591 raise error.ProgrammingError(
1594 raise error.ProgrammingError(
1592 b"unsupported changeid '%s' of type %s"
1595 b"unsupported changeid '%s' of type %s"
1593 % (changeid, pycompat.bytestr(type(changeid)))
1596 % (changeid, pycompat.bytestr(type(changeid)))
1594 )
1597 )
1595
1598
1596 return context.changectx(self, rev, node)
1599 return context.changectx(self, rev, node)
1597
1600
1598 except (error.FilteredIndexError, error.FilteredLookupError):
1601 except (error.FilteredIndexError, error.FilteredLookupError):
1599 raise error.FilteredRepoLookupError(
1602 raise error.FilteredRepoLookupError(
1600 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1603 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1601 )
1604 )
1602 except (IndexError, LookupError):
1605 except (IndexError, LookupError):
1603 raise error.RepoLookupError(
1606 raise error.RepoLookupError(
1604 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1607 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1605 )
1608 )
1606 except error.WdirUnsupported:
1609 except error.WdirUnsupported:
1607 return context.workingctx(self)
1610 return context.workingctx(self)
1608
1611
1609 def __contains__(self, changeid):
1612 def __contains__(self, changeid):
1610 """True if the given changeid exists
1613 """True if the given changeid exists
1611
1614
1612 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1615 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1613 specified.
1616 specified.
1614 """
1617 """
1615 try:
1618 try:
1616 self[changeid]
1619 self[changeid]
1617 return True
1620 return True
1618 except error.RepoLookupError:
1621 except error.RepoLookupError:
1619 return False
1622 return False
1620
1623
1621 def __nonzero__(self):
1624 def __nonzero__(self):
1622 return True
1625 return True
1623
1626
1624 __bool__ = __nonzero__
1627 __bool__ = __nonzero__
1625
1628
1626 def __len__(self):
1629 def __len__(self):
1627 # no need to pay the cost of repoview.changelog
1630 # no need to pay the cost of repoview.changelog
1628 unfi = self.unfiltered()
1631 unfi = self.unfiltered()
1629 return len(unfi.changelog)
1632 return len(unfi.changelog)
1630
1633
1631 def __iter__(self):
1634 def __iter__(self):
1632 return iter(self.changelog)
1635 return iter(self.changelog)
1633
1636
1634 def revs(self, expr, *args):
1637 def revs(self, expr, *args):
1635 '''Find revisions matching a revset.
1638 '''Find revisions matching a revset.
1636
1639
1637 The revset is specified as a string ``expr`` that may contain
1640 The revset is specified as a string ``expr`` that may contain
1638 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1641 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1639
1642
1640 Revset aliases from the configuration are not expanded. To expand
1643 Revset aliases from the configuration are not expanded. To expand
1641 user aliases, consider calling ``scmutil.revrange()`` or
1644 user aliases, consider calling ``scmutil.revrange()`` or
1642 ``repo.anyrevs([expr], user=True)``.
1645 ``repo.anyrevs([expr], user=True)``.
1643
1646
1644 Returns a revset.abstractsmartset, which is a list-like interface
1647 Returns a revset.abstractsmartset, which is a list-like interface
1645 that contains integer revisions.
1648 that contains integer revisions.
1646 '''
1649 '''
1647 tree = revsetlang.spectree(expr, *args)
1650 tree = revsetlang.spectree(expr, *args)
1648 return revset.makematcher(tree)(self)
1651 return revset.makematcher(tree)(self)
1649
1652
1650 def set(self, expr, *args):
1653 def set(self, expr, *args):
1651 '''Find revisions matching a revset and emit changectx instances.
1654 '''Find revisions matching a revset and emit changectx instances.
1652
1655
1653 This is a convenience wrapper around ``revs()`` that iterates the
1656 This is a convenience wrapper around ``revs()`` that iterates the
1654 result and is a generator of changectx instances.
1657 result and is a generator of changectx instances.
1655
1658
1656 Revset aliases from the configuration are not expanded. To expand
1659 Revset aliases from the configuration are not expanded. To expand
1657 user aliases, consider calling ``scmutil.revrange()``.
1660 user aliases, consider calling ``scmutil.revrange()``.
1658 '''
1661 '''
1659 for r in self.revs(expr, *args):
1662 for r in self.revs(expr, *args):
1660 yield self[r]
1663 yield self[r]
1661
1664
1662 def anyrevs(self, specs, user=False, localalias=None):
1665 def anyrevs(self, specs, user=False, localalias=None):
1663 '''Find revisions matching one of the given revsets.
1666 '''Find revisions matching one of the given revsets.
1664
1667
1665 Revset aliases from the configuration are not expanded by default. To
1668 Revset aliases from the configuration are not expanded by default. To
1666 expand user aliases, specify ``user=True``. To provide some local
1669 expand user aliases, specify ``user=True``. To provide some local
1667 definitions overriding user aliases, set ``localalias`` to
1670 definitions overriding user aliases, set ``localalias`` to
1668 ``{name: definitionstring}``.
1671 ``{name: definitionstring}``.
1669 '''
1672 '''
1670 if specs == [b'null']:
1673 if specs == [b'null']:
1671 return revset.baseset([nullrev])
1674 return revset.baseset([nullrev])
1672 if user:
1675 if user:
1673 m = revset.matchany(
1676 m = revset.matchany(
1674 self.ui,
1677 self.ui,
1675 specs,
1678 specs,
1676 lookup=revset.lookupfn(self),
1679 lookup=revset.lookupfn(self),
1677 localalias=localalias,
1680 localalias=localalias,
1678 )
1681 )
1679 else:
1682 else:
1680 m = revset.matchany(None, specs, localalias=localalias)
1683 m = revset.matchany(None, specs, localalias=localalias)
1681 return m(self)
1684 return m(self)
1682
1685
1683 def url(self):
1686 def url(self):
1684 return b'file:' + self.root
1687 return b'file:' + self.root
1685
1688
1686 def hook(self, name, throw=False, **args):
1689 def hook(self, name, throw=False, **args):
1687 """Call a hook, passing this repo instance.
1690 """Call a hook, passing this repo instance.
1688
1691
1689 This a convenience method to aid invoking hooks. Extensions likely
1692 This a convenience method to aid invoking hooks. Extensions likely
1690 won't call this unless they have registered a custom hook or are
1693 won't call this unless they have registered a custom hook or are
1691 replacing code that is expected to call a hook.
1694 replacing code that is expected to call a hook.
1692 """
1695 """
1693 return hook.hook(self.ui, self, name, throw, **args)
1696 return hook.hook(self.ui, self, name, throw, **args)
1694
1697
1695 @filteredpropertycache
1698 @filteredpropertycache
1696 def _tagscache(self):
1699 def _tagscache(self):
1697 '''Returns a tagscache object that contains various tags related
1700 '''Returns a tagscache object that contains various tags related
1698 caches.'''
1701 caches.'''
1699
1702
1700 # This simplifies its cache management by having one decorated
1703 # This simplifies its cache management by having one decorated
1701 # function (this one) and the rest simply fetch things from it.
1704 # function (this one) and the rest simply fetch things from it.
1702 class tagscache(object):
1705 class tagscache(object):
1703 def __init__(self):
1706 def __init__(self):
1704 # These two define the set of tags for this repository. tags
1707 # These two define the set of tags for this repository. tags
1705 # maps tag name to node; tagtypes maps tag name to 'global' or
1708 # maps tag name to node; tagtypes maps tag name to 'global' or
1706 # 'local'. (Global tags are defined by .hgtags across all
1709 # 'local'. (Global tags are defined by .hgtags across all
1707 # heads, and local tags are defined in .hg/localtags.)
1710 # heads, and local tags are defined in .hg/localtags.)
1708 # They constitute the in-memory cache of tags.
1711 # They constitute the in-memory cache of tags.
1709 self.tags = self.tagtypes = None
1712 self.tags = self.tagtypes = None
1710
1713
1711 self.nodetagscache = self.tagslist = None
1714 self.nodetagscache = self.tagslist = None
1712
1715
1713 cache = tagscache()
1716 cache = tagscache()
1714 cache.tags, cache.tagtypes = self._findtags()
1717 cache.tags, cache.tagtypes = self._findtags()
1715
1718
1716 return cache
1719 return cache
1717
1720
1718 def tags(self):
1721 def tags(self):
1719 '''return a mapping of tag to node'''
1722 '''return a mapping of tag to node'''
1720 t = {}
1723 t = {}
1721 if self.changelog.filteredrevs:
1724 if self.changelog.filteredrevs:
1722 tags, tt = self._findtags()
1725 tags, tt = self._findtags()
1723 else:
1726 else:
1724 tags = self._tagscache.tags
1727 tags = self._tagscache.tags
1725 rev = self.changelog.rev
1728 rev = self.changelog.rev
1726 for k, v in pycompat.iteritems(tags):
1729 for k, v in pycompat.iteritems(tags):
1727 try:
1730 try:
1728 # ignore tags to unknown nodes
1731 # ignore tags to unknown nodes
1729 rev(v)
1732 rev(v)
1730 t[k] = v
1733 t[k] = v
1731 except (error.LookupError, ValueError):
1734 except (error.LookupError, ValueError):
1732 pass
1735 pass
1733 return t
1736 return t
1734
1737
1735 def _findtags(self):
1738 def _findtags(self):
1736 '''Do the hard work of finding tags. Return a pair of dicts
1739 '''Do the hard work of finding tags. Return a pair of dicts
1737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1740 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1738 maps tag name to a string like \'global\' or \'local\'.
1741 maps tag name to a string like \'global\' or \'local\'.
1739 Subclasses or extensions are free to add their own tags, but
1742 Subclasses or extensions are free to add their own tags, but
1740 should be aware that the returned dicts will be retained for the
1743 should be aware that the returned dicts will be retained for the
1741 duration of the localrepo object.'''
1744 duration of the localrepo object.'''
1742
1745
1743 # XXX what tagtype should subclasses/extensions use? Currently
1746 # XXX what tagtype should subclasses/extensions use? Currently
1744 # mq and bookmarks add tags, but do not set the tagtype at all.
1747 # mq and bookmarks add tags, but do not set the tagtype at all.
1745 # Should each extension invent its own tag type? Should there
1748 # Should each extension invent its own tag type? Should there
1746 # be one tagtype for all such "virtual" tags? Or is the status
1749 # be one tagtype for all such "virtual" tags? Or is the status
1747 # quo fine?
1750 # quo fine?
1748
1751
1749 # map tag name to (node, hist)
1752 # map tag name to (node, hist)
1750 alltags = tagsmod.findglobaltags(self.ui, self)
1753 alltags = tagsmod.findglobaltags(self.ui, self)
1751 # map tag name to tag type
1754 # map tag name to tag type
1752 tagtypes = dict((tag, b'global') for tag in alltags)
1755 tagtypes = dict((tag, b'global') for tag in alltags)
1753
1756
1754 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1757 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1755
1758
1756 # Build the return dicts. Have to re-encode tag names because
1759 # Build the return dicts. Have to re-encode tag names because
1757 # the tags module always uses UTF-8 (in order not to lose info
1760 # the tags module always uses UTF-8 (in order not to lose info
1758 # writing to the cache), but the rest of Mercurial wants them in
1761 # writing to the cache), but the rest of Mercurial wants them in
1759 # local encoding.
1762 # local encoding.
1760 tags = {}
1763 tags = {}
1761 for (name, (node, hist)) in pycompat.iteritems(alltags):
1764 for (name, (node, hist)) in pycompat.iteritems(alltags):
1762 if node != nullid:
1765 if node != nullid:
1763 tags[encoding.tolocal(name)] = node
1766 tags[encoding.tolocal(name)] = node
1764 tags[b'tip'] = self.changelog.tip()
1767 tags[b'tip'] = self.changelog.tip()
1765 tagtypes = dict(
1768 tagtypes = dict(
1766 [
1769 [
1767 (encoding.tolocal(name), value)
1770 (encoding.tolocal(name), value)
1768 for (name, value) in pycompat.iteritems(tagtypes)
1771 for (name, value) in pycompat.iteritems(tagtypes)
1769 ]
1772 ]
1770 )
1773 )
1771 return (tags, tagtypes)
1774 return (tags, tagtypes)
1772
1775
1773 def tagtype(self, tagname):
1776 def tagtype(self, tagname):
1774 '''
1777 '''
1775 return the type of the given tag. result can be:
1778 return the type of the given tag. result can be:
1776
1779
1777 'local' : a local tag
1780 'local' : a local tag
1778 'global' : a global tag
1781 'global' : a global tag
1779 None : tag does not exist
1782 None : tag does not exist
1780 '''
1783 '''
1781
1784
1782 return self._tagscache.tagtypes.get(tagname)
1785 return self._tagscache.tagtypes.get(tagname)
1783
1786
1784 def tagslist(self):
1787 def tagslist(self):
1785 '''return a list of tags ordered by revision'''
1788 '''return a list of tags ordered by revision'''
1786 if not self._tagscache.tagslist:
1789 if not self._tagscache.tagslist:
1787 l = []
1790 l = []
1788 for t, n in pycompat.iteritems(self.tags()):
1791 for t, n in pycompat.iteritems(self.tags()):
1789 l.append((self.changelog.rev(n), t, n))
1792 l.append((self.changelog.rev(n), t, n))
1790 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1793 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1791
1794
1792 return self._tagscache.tagslist
1795 return self._tagscache.tagslist
1793
1796
1794 def nodetags(self, node):
1797 def nodetags(self, node):
1795 '''return the tags associated with a node'''
1798 '''return the tags associated with a node'''
1796 if not self._tagscache.nodetagscache:
1799 if not self._tagscache.nodetagscache:
1797 nodetagscache = {}
1800 nodetagscache = {}
1798 for t, n in pycompat.iteritems(self._tagscache.tags):
1801 for t, n in pycompat.iteritems(self._tagscache.tags):
1799 nodetagscache.setdefault(n, []).append(t)
1802 nodetagscache.setdefault(n, []).append(t)
1800 for tags in pycompat.itervalues(nodetagscache):
1803 for tags in pycompat.itervalues(nodetagscache):
1801 tags.sort()
1804 tags.sort()
1802 self._tagscache.nodetagscache = nodetagscache
1805 self._tagscache.nodetagscache = nodetagscache
1803 return self._tagscache.nodetagscache.get(node, [])
1806 return self._tagscache.nodetagscache.get(node, [])
1804
1807
1805 def nodebookmarks(self, node):
1808 def nodebookmarks(self, node):
1806 """return the list of bookmarks pointing to the specified node"""
1809 """return the list of bookmarks pointing to the specified node"""
1807 return self._bookmarks.names(node)
1810 return self._bookmarks.names(node)
1808
1811
1809 def branchmap(self):
1812 def branchmap(self):
1810 '''returns a dictionary {branch: [branchheads]} with branchheads
1813 '''returns a dictionary {branch: [branchheads]} with branchheads
1811 ordered by increasing revision number'''
1814 ordered by increasing revision number'''
1812 return self._branchcaches[self]
1815 return self._branchcaches[self]
1813
1816
1814 @unfilteredmethod
1817 @unfilteredmethod
1815 def revbranchcache(self):
1818 def revbranchcache(self):
1816 if not self._revbranchcache:
1819 if not self._revbranchcache:
1817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1820 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1818 return self._revbranchcache
1821 return self._revbranchcache
1819
1822
1820 def branchtip(self, branch, ignoremissing=False):
1823 def branchtip(self, branch, ignoremissing=False):
1821 '''return the tip node for a given branch
1824 '''return the tip node for a given branch
1822
1825
1823 If ignoremissing is True, then this method will not raise an error.
1826 If ignoremissing is True, then this method will not raise an error.
1824 This is helpful for callers that only expect None for a missing branch
1827 This is helpful for callers that only expect None for a missing branch
1825 (e.g. namespace).
1828 (e.g. namespace).
1826
1829
1827 '''
1830 '''
1828 try:
1831 try:
1829 return self.branchmap().branchtip(branch)
1832 return self.branchmap().branchtip(branch)
1830 except KeyError:
1833 except KeyError:
1831 if not ignoremissing:
1834 if not ignoremissing:
1832 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1835 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1833 else:
1836 else:
1834 pass
1837 pass
1835
1838
1836 def lookup(self, key):
1839 def lookup(self, key):
1837 node = scmutil.revsymbol(self, key).node()
1840 node = scmutil.revsymbol(self, key).node()
1838 if node is None:
1841 if node is None:
1839 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1842 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1840 return node
1843 return node
1841
1844
1842 def lookupbranch(self, key):
1845 def lookupbranch(self, key):
1843 if self.branchmap().hasbranch(key):
1846 if self.branchmap().hasbranch(key):
1844 return key
1847 return key
1845
1848
1846 return scmutil.revsymbol(self, key).branch()
1849 return scmutil.revsymbol(self, key).branch()
1847
1850
1848 def known(self, nodes):
1851 def known(self, nodes):
1849 cl = self.changelog
1852 cl = self.changelog
1850 get_rev = cl.index.get_rev
1853 get_rev = cl.index.get_rev
1851 filtered = cl.filteredrevs
1854 filtered = cl.filteredrevs
1852 result = []
1855 result = []
1853 for n in nodes:
1856 for n in nodes:
1854 r = get_rev(n)
1857 r = get_rev(n)
1855 resp = not (r is None or r in filtered)
1858 resp = not (r is None or r in filtered)
1856 result.append(resp)
1859 result.append(resp)
1857 return result
1860 return result
1858
1861
1859 def local(self):
1862 def local(self):
1860 return self
1863 return self
1861
1864
1862 def publishing(self):
1865 def publishing(self):
1863 # it's safe (and desirable) to trust the publish flag unconditionally
1866 # it's safe (and desirable) to trust the publish flag unconditionally
1864 # so that we don't finalize changes shared between users via ssh or nfs
1867 # so that we don't finalize changes shared between users via ssh or nfs
1865 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1868 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1866
1869
1867 def cancopy(self):
1870 def cancopy(self):
1868 # so statichttprepo's override of local() works
1871 # so statichttprepo's override of local() works
1869 if not self.local():
1872 if not self.local():
1870 return False
1873 return False
1871 if not self.publishing():
1874 if not self.publishing():
1872 return True
1875 return True
1873 # if publishing we can't copy if there is filtered content
1876 # if publishing we can't copy if there is filtered content
1874 return not self.filtered(b'visible').changelog.filteredrevs
1877 return not self.filtered(b'visible').changelog.filteredrevs
1875
1878
1876 def shared(self):
1879 def shared(self):
1877 '''the type of shared repository (None if not shared)'''
1880 '''the type of shared repository (None if not shared)'''
1878 if self.sharedpath != self.path:
1881 if self.sharedpath != self.path:
1879 return b'store'
1882 return b'store'
1880 return None
1883 return None
1881
1884
1882 def wjoin(self, f, *insidef):
1885 def wjoin(self, f, *insidef):
1883 return self.vfs.reljoin(self.root, f, *insidef)
1886 return self.vfs.reljoin(self.root, f, *insidef)
1884
1887
1885 def setparents(self, p1, p2=nullid):
1888 def setparents(self, p1, p2=nullid):
1886 with self.dirstate.parentchange():
1889 with self.dirstate.parentchange():
1887 copies = self.dirstate.setparents(p1, p2)
1890 copies = self.dirstate.setparents(p1, p2)
1888 pctx = self[p1]
1891 pctx = self[p1]
1889 if copies:
1892 if copies:
1890 # Adjust copy records, the dirstate cannot do it, it
1893 # Adjust copy records, the dirstate cannot do it, it
1891 # requires access to parents manifests. Preserve them
1894 # requires access to parents manifests. Preserve them
1892 # only for entries added to first parent.
1895 # only for entries added to first parent.
1893 for f in copies:
1896 for f in copies:
1894 if f not in pctx and copies[f] in pctx:
1897 if f not in pctx and copies[f] in pctx:
1895 self.dirstate.copy(copies[f], f)
1898 self.dirstate.copy(copies[f], f)
1896 if p2 == nullid:
1899 if p2 == nullid:
1897 for f, s in sorted(self.dirstate.copies().items()):
1900 for f, s in sorted(self.dirstate.copies().items()):
1898 if f not in pctx and s not in pctx:
1901 if f not in pctx and s not in pctx:
1899 self.dirstate.copy(None, f)
1902 self.dirstate.copy(None, f)
1900
1903
1901 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1904 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1902 """changeid must be a changeset revision, if specified.
1905 """changeid must be a changeset revision, if specified.
1903 fileid can be a file revision or node."""
1906 fileid can be a file revision or node."""
1904 return context.filectx(
1907 return context.filectx(
1905 self, path, changeid, fileid, changectx=changectx
1908 self, path, changeid, fileid, changectx=changectx
1906 )
1909 )
1907
1910
1908 def getcwd(self):
1911 def getcwd(self):
1909 return self.dirstate.getcwd()
1912 return self.dirstate.getcwd()
1910
1913
1911 def pathto(self, f, cwd=None):
1914 def pathto(self, f, cwd=None):
1912 return self.dirstate.pathto(f, cwd)
1915 return self.dirstate.pathto(f, cwd)
1913
1916
1914 def _loadfilter(self, filter):
1917 def _loadfilter(self, filter):
1915 if filter not in self._filterpats:
1918 if filter not in self._filterpats:
1916 l = []
1919 l = []
1917 for pat, cmd in self.ui.configitems(filter):
1920 for pat, cmd in self.ui.configitems(filter):
1918 if cmd == b'!':
1921 if cmd == b'!':
1919 continue
1922 continue
1920 mf = matchmod.match(self.root, b'', [pat])
1923 mf = matchmod.match(self.root, b'', [pat])
1921 fn = None
1924 fn = None
1922 params = cmd
1925 params = cmd
1923 for name, filterfn in pycompat.iteritems(self._datafilters):
1926 for name, filterfn in pycompat.iteritems(self._datafilters):
1924 if cmd.startswith(name):
1927 if cmd.startswith(name):
1925 fn = filterfn
1928 fn = filterfn
1926 params = cmd[len(name) :].lstrip()
1929 params = cmd[len(name) :].lstrip()
1927 break
1930 break
1928 if not fn:
1931 if not fn:
1929 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1932 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1930 fn.__name__ = 'commandfilter'
1933 fn.__name__ = 'commandfilter'
1931 # Wrap old filters not supporting keyword arguments
1934 # Wrap old filters not supporting keyword arguments
1932 if not pycompat.getargspec(fn)[2]:
1935 if not pycompat.getargspec(fn)[2]:
1933 oldfn = fn
1936 oldfn = fn
1934 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1937 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1935 fn.__name__ = 'compat-' + oldfn.__name__
1938 fn.__name__ = 'compat-' + oldfn.__name__
1936 l.append((mf, fn, params))
1939 l.append((mf, fn, params))
1937 self._filterpats[filter] = l
1940 self._filterpats[filter] = l
1938 return self._filterpats[filter]
1941 return self._filterpats[filter]
1939
1942
1940 def _filter(self, filterpats, filename, data):
1943 def _filter(self, filterpats, filename, data):
1941 for mf, fn, cmd in filterpats:
1944 for mf, fn, cmd in filterpats:
1942 if mf(filename):
1945 if mf(filename):
1943 self.ui.debug(
1946 self.ui.debug(
1944 b"filtering %s through %s\n"
1947 b"filtering %s through %s\n"
1945 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1948 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1946 )
1949 )
1947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1950 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1948 break
1951 break
1949
1952
1950 return data
1953 return data
1951
1954
1952 @unfilteredpropertycache
1955 @unfilteredpropertycache
1953 def _encodefilterpats(self):
1956 def _encodefilterpats(self):
1954 return self._loadfilter(b'encode')
1957 return self._loadfilter(b'encode')
1955
1958
1956 @unfilteredpropertycache
1959 @unfilteredpropertycache
1957 def _decodefilterpats(self):
1960 def _decodefilterpats(self):
1958 return self._loadfilter(b'decode')
1961 return self._loadfilter(b'decode')
1959
1962
1960 def adddatafilter(self, name, filter):
1963 def adddatafilter(self, name, filter):
1961 self._datafilters[name] = filter
1964 self._datafilters[name] = filter
1962
1965
1963 def wread(self, filename):
1966 def wread(self, filename):
1964 if self.wvfs.islink(filename):
1967 if self.wvfs.islink(filename):
1965 data = self.wvfs.readlink(filename)
1968 data = self.wvfs.readlink(filename)
1966 else:
1969 else:
1967 data = self.wvfs.read(filename)
1970 data = self.wvfs.read(filename)
1968 return self._filter(self._encodefilterpats, filename, data)
1971 return self._filter(self._encodefilterpats, filename, data)
1969
1972
1970 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1973 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1971 """write ``data`` into ``filename`` in the working directory
1974 """write ``data`` into ``filename`` in the working directory
1972
1975
1973 This returns length of written (maybe decoded) data.
1976 This returns length of written (maybe decoded) data.
1974 """
1977 """
1975 data = self._filter(self._decodefilterpats, filename, data)
1978 data = self._filter(self._decodefilterpats, filename, data)
1976 if b'l' in flags:
1979 if b'l' in flags:
1977 self.wvfs.symlink(data, filename)
1980 self.wvfs.symlink(data, filename)
1978 else:
1981 else:
1979 self.wvfs.write(
1982 self.wvfs.write(
1980 filename, data, backgroundclose=backgroundclose, **kwargs
1983 filename, data, backgroundclose=backgroundclose, **kwargs
1981 )
1984 )
1982 if b'x' in flags:
1985 if b'x' in flags:
1983 self.wvfs.setflags(filename, False, True)
1986 self.wvfs.setflags(filename, False, True)
1984 else:
1987 else:
1985 self.wvfs.setflags(filename, False, False)
1988 self.wvfs.setflags(filename, False, False)
1986 return len(data)
1989 return len(data)
1987
1990
1988 def wwritedata(self, filename, data):
1991 def wwritedata(self, filename, data):
1989 return self._filter(self._decodefilterpats, filename, data)
1992 return self._filter(self._decodefilterpats, filename, data)
1990
1993
1991 def currenttransaction(self):
1994 def currenttransaction(self):
1992 """return the current transaction or None if non exists"""
1995 """return the current transaction or None if non exists"""
1993 if self._transref:
1996 if self._transref:
1994 tr = self._transref()
1997 tr = self._transref()
1995 else:
1998 else:
1996 tr = None
1999 tr = None
1997
2000
1998 if tr and tr.running():
2001 if tr and tr.running():
1999 return tr
2002 return tr
2000 return None
2003 return None
2001
2004
2002 def transaction(self, desc, report=None):
2005 def transaction(self, desc, report=None):
2003 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2006 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2004 b'devel', b'check-locks'
2007 b'devel', b'check-locks'
2005 ):
2008 ):
2006 if self._currentlock(self._lockref) is None:
2009 if self._currentlock(self._lockref) is None:
2007 raise error.ProgrammingError(b'transaction requires locking')
2010 raise error.ProgrammingError(b'transaction requires locking')
2008 tr = self.currenttransaction()
2011 tr = self.currenttransaction()
2009 if tr is not None:
2012 if tr is not None:
2010 return tr.nest(name=desc)
2013 return tr.nest(name=desc)
2011
2014
2012 # abort here if the journal already exists
2015 # abort here if the journal already exists
2013 if self.svfs.exists(b"journal"):
2016 if self.svfs.exists(b"journal"):
2014 raise error.RepoError(
2017 raise error.RepoError(
2015 _(b"abandoned transaction found"),
2018 _(b"abandoned transaction found"),
2016 hint=_(b"run 'hg recover' to clean up transaction"),
2019 hint=_(b"run 'hg recover' to clean up transaction"),
2017 )
2020 )
2018
2021
2019 idbase = b"%.40f#%f" % (random.random(), time.time())
2022 idbase = b"%.40f#%f" % (random.random(), time.time())
2020 ha = hex(hashlib.sha1(idbase).digest())
2023 ha = hex(hashlib.sha1(idbase).digest())
2021 txnid = b'TXN:' + ha
2024 txnid = b'TXN:' + ha
2022 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2025 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2023
2026
2024 self._writejournal(desc)
2027 self._writejournal(desc)
2025 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2028 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2026 if report:
2029 if report:
2027 rp = report
2030 rp = report
2028 else:
2031 else:
2029 rp = self.ui.warn
2032 rp = self.ui.warn
2030 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2033 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2031 # we must avoid cyclic reference between repo and transaction.
2034 # we must avoid cyclic reference between repo and transaction.
2032 reporef = weakref.ref(self)
2035 reporef = weakref.ref(self)
2033 # Code to track tag movement
2036 # Code to track tag movement
2034 #
2037 #
2035 # Since tags are all handled as file content, it is actually quite hard
2038 # Since tags are all handled as file content, it is actually quite hard
2036 # to track these movement from a code perspective. So we fallback to a
2039 # to track these movement from a code perspective. So we fallback to a
2037 # tracking at the repository level. One could envision to track changes
2040 # tracking at the repository level. One could envision to track changes
2038 # to the '.hgtags' file through changegroup apply but that fails to
2041 # to the '.hgtags' file through changegroup apply but that fails to
2039 # cope with case where transaction expose new heads without changegroup
2042 # cope with case where transaction expose new heads without changegroup
2040 # being involved (eg: phase movement).
2043 # being involved (eg: phase movement).
2041 #
2044 #
2042 # For now, We gate the feature behind a flag since this likely comes
2045 # For now, We gate the feature behind a flag since this likely comes
2043 # with performance impacts. The current code run more often than needed
2046 # with performance impacts. The current code run more often than needed
2044 # and do not use caches as much as it could. The current focus is on
2047 # and do not use caches as much as it could. The current focus is on
2045 # the behavior of the feature so we disable it by default. The flag
2048 # the behavior of the feature so we disable it by default. The flag
2046 # will be removed when we are happy with the performance impact.
2049 # will be removed when we are happy with the performance impact.
2047 #
2050 #
2048 # Once this feature is no longer experimental move the following
2051 # Once this feature is no longer experimental move the following
2049 # documentation to the appropriate help section:
2052 # documentation to the appropriate help section:
2050 #
2053 #
2051 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2054 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2052 # tags (new or changed or deleted tags). In addition the details of
2055 # tags (new or changed or deleted tags). In addition the details of
2053 # these changes are made available in a file at:
2056 # these changes are made available in a file at:
2054 # ``REPOROOT/.hg/changes/tags.changes``.
2057 # ``REPOROOT/.hg/changes/tags.changes``.
2055 # Make sure you check for HG_TAG_MOVED before reading that file as it
2058 # Make sure you check for HG_TAG_MOVED before reading that file as it
2056 # might exist from a previous transaction even if no tag were touched
2059 # might exist from a previous transaction even if no tag were touched
2057 # in this one. Changes are recorded in a line base format::
2060 # in this one. Changes are recorded in a line base format::
2058 #
2061 #
2059 # <action> <hex-node> <tag-name>\n
2062 # <action> <hex-node> <tag-name>\n
2060 #
2063 #
2061 # Actions are defined as follow:
2064 # Actions are defined as follow:
2062 # "-R": tag is removed,
2065 # "-R": tag is removed,
2063 # "+A": tag is added,
2066 # "+A": tag is added,
2064 # "-M": tag is moved (old value),
2067 # "-M": tag is moved (old value),
2065 # "+M": tag is moved (new value),
2068 # "+M": tag is moved (new value),
2066 tracktags = lambda x: None
2069 tracktags = lambda x: None
2067 # experimental config: experimental.hook-track-tags
2070 # experimental config: experimental.hook-track-tags
2068 shouldtracktags = self.ui.configbool(
2071 shouldtracktags = self.ui.configbool(
2069 b'experimental', b'hook-track-tags'
2072 b'experimental', b'hook-track-tags'
2070 )
2073 )
2071 if desc != b'strip' and shouldtracktags:
2074 if desc != b'strip' and shouldtracktags:
2072 oldheads = self.changelog.headrevs()
2075 oldheads = self.changelog.headrevs()
2073
2076
2074 def tracktags(tr2):
2077 def tracktags(tr2):
2075 repo = reporef()
2078 repo = reporef()
2076 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2079 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2077 newheads = repo.changelog.headrevs()
2080 newheads = repo.changelog.headrevs()
2078 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2081 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2079 # notes: we compare lists here.
2082 # notes: we compare lists here.
2080 # As we do it only once buiding set would not be cheaper
2083 # As we do it only once buiding set would not be cheaper
2081 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2084 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2082 if changes:
2085 if changes:
2083 tr2.hookargs[b'tag_moved'] = b'1'
2086 tr2.hookargs[b'tag_moved'] = b'1'
2084 with repo.vfs(
2087 with repo.vfs(
2085 b'changes/tags.changes', b'w', atomictemp=True
2088 b'changes/tags.changes', b'w', atomictemp=True
2086 ) as changesfile:
2089 ) as changesfile:
2087 # note: we do not register the file to the transaction
2090 # note: we do not register the file to the transaction
2088 # because we needs it to still exist on the transaction
2091 # because we needs it to still exist on the transaction
2089 # is close (for txnclose hooks)
2092 # is close (for txnclose hooks)
2090 tagsmod.writediff(changesfile, changes)
2093 tagsmod.writediff(changesfile, changes)
2091
2094
2092 def validate(tr2):
2095 def validate(tr2):
2093 """will run pre-closing hooks"""
2096 """will run pre-closing hooks"""
2094 # XXX the transaction API is a bit lacking here so we take a hacky
2097 # XXX the transaction API is a bit lacking here so we take a hacky
2095 # path for now
2098 # path for now
2096 #
2099 #
2097 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2100 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2098 # dict is copied before these run. In addition we needs the data
2101 # dict is copied before these run. In addition we needs the data
2099 # available to in memory hooks too.
2102 # available to in memory hooks too.
2100 #
2103 #
2101 # Moreover, we also need to make sure this runs before txnclose
2104 # Moreover, we also need to make sure this runs before txnclose
2102 # hooks and there is no "pending" mechanism that would execute
2105 # hooks and there is no "pending" mechanism that would execute
2103 # logic only if hooks are about to run.
2106 # logic only if hooks are about to run.
2104 #
2107 #
2105 # Fixing this limitation of the transaction is also needed to track
2108 # Fixing this limitation of the transaction is also needed to track
2106 # other families of changes (bookmarks, phases, obsolescence).
2109 # other families of changes (bookmarks, phases, obsolescence).
2107 #
2110 #
2108 # This will have to be fixed before we remove the experimental
2111 # This will have to be fixed before we remove the experimental
2109 # gating.
2112 # gating.
2110 tracktags(tr2)
2113 tracktags(tr2)
2111 repo = reporef()
2114 repo = reporef()
2112
2115
2113 singleheadopt = (b'experimental', b'single-head-per-branch')
2116 singleheadopt = (b'experimental', b'single-head-per-branch')
2114 singlehead = repo.ui.configbool(*singleheadopt)
2117 singlehead = repo.ui.configbool(*singleheadopt)
2115 if singlehead:
2118 if singlehead:
2116 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2119 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2117 accountclosed = singleheadsub.get(
2120 accountclosed = singleheadsub.get(
2118 b"account-closed-heads", False
2121 b"account-closed-heads", False
2119 )
2122 )
2120 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2123 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2121 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2124 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2122 for name, (old, new) in sorted(
2125 for name, (old, new) in sorted(
2123 tr.changes[b'bookmarks'].items()
2126 tr.changes[b'bookmarks'].items()
2124 ):
2127 ):
2125 args = tr.hookargs.copy()
2128 args = tr.hookargs.copy()
2126 args.update(bookmarks.preparehookargs(name, old, new))
2129 args.update(bookmarks.preparehookargs(name, old, new))
2127 repo.hook(
2130 repo.hook(
2128 b'pretxnclose-bookmark',
2131 b'pretxnclose-bookmark',
2129 throw=True,
2132 throw=True,
2130 **pycompat.strkwargs(args)
2133 **pycompat.strkwargs(args)
2131 )
2134 )
2132 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2135 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2133 cl = repo.unfiltered().changelog
2136 cl = repo.unfiltered().changelog
2134 for rev, (old, new) in tr.changes[b'phases'].items():
2137 for rev, (old, new) in tr.changes[b'phases'].items():
2135 args = tr.hookargs.copy()
2138 args = tr.hookargs.copy()
2136 node = hex(cl.node(rev))
2139 node = hex(cl.node(rev))
2137 args.update(phases.preparehookargs(node, old, new))
2140 args.update(phases.preparehookargs(node, old, new))
2138 repo.hook(
2141 repo.hook(
2139 b'pretxnclose-phase',
2142 b'pretxnclose-phase',
2140 throw=True,
2143 throw=True,
2141 **pycompat.strkwargs(args)
2144 **pycompat.strkwargs(args)
2142 )
2145 )
2143
2146
2144 repo.hook(
2147 repo.hook(
2145 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2148 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2146 )
2149 )
2147
2150
2148 def releasefn(tr, success):
2151 def releasefn(tr, success):
2149 repo = reporef()
2152 repo = reporef()
2150 if repo is None:
2153 if repo is None:
2151 # If the repo has been GC'd (and this release function is being
2154 # If the repo has been GC'd (and this release function is being
2152 # called from transaction.__del__), there's not much we can do,
2155 # called from transaction.__del__), there's not much we can do,
2153 # so just leave the unfinished transaction there and let the
2156 # so just leave the unfinished transaction there and let the
2154 # user run `hg recover`.
2157 # user run `hg recover`.
2155 return
2158 return
2156 if success:
2159 if success:
2157 # this should be explicitly invoked here, because
2160 # this should be explicitly invoked here, because
2158 # in-memory changes aren't written out at closing
2161 # in-memory changes aren't written out at closing
2159 # transaction, if tr.addfilegenerator (via
2162 # transaction, if tr.addfilegenerator (via
2160 # dirstate.write or so) isn't invoked while
2163 # dirstate.write or so) isn't invoked while
2161 # transaction running
2164 # transaction running
2162 repo.dirstate.write(None)
2165 repo.dirstate.write(None)
2163 else:
2166 else:
2164 # discard all changes (including ones already written
2167 # discard all changes (including ones already written
2165 # out) in this transaction
2168 # out) in this transaction
2166 narrowspec.restorebackup(self, b'journal.narrowspec')
2169 narrowspec.restorebackup(self, b'journal.narrowspec')
2167 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2170 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2168 repo.dirstate.restorebackup(None, b'journal.dirstate')
2171 repo.dirstate.restorebackup(None, b'journal.dirstate')
2169
2172
2170 repo.invalidate(clearfilecache=True)
2173 repo.invalidate(clearfilecache=True)
2171
2174
2172 tr = transaction.transaction(
2175 tr = transaction.transaction(
2173 rp,
2176 rp,
2174 self.svfs,
2177 self.svfs,
2175 vfsmap,
2178 vfsmap,
2176 b"journal",
2179 b"journal",
2177 b"undo",
2180 b"undo",
2178 aftertrans(renames),
2181 aftertrans(renames),
2179 self.store.createmode,
2182 self.store.createmode,
2180 validator=validate,
2183 validator=validate,
2181 releasefn=releasefn,
2184 releasefn=releasefn,
2182 checkambigfiles=_cachedfiles,
2185 checkambigfiles=_cachedfiles,
2183 name=desc,
2186 name=desc,
2184 )
2187 )
2185 tr.changes[b'origrepolen'] = len(self)
2188 tr.changes[b'origrepolen'] = len(self)
2186 tr.changes[b'obsmarkers'] = set()
2189 tr.changes[b'obsmarkers'] = set()
2187 tr.changes[b'phases'] = {}
2190 tr.changes[b'phases'] = {}
2188 tr.changes[b'bookmarks'] = {}
2191 tr.changes[b'bookmarks'] = {}
2189
2192
2190 tr.hookargs[b'txnid'] = txnid
2193 tr.hookargs[b'txnid'] = txnid
2191 tr.hookargs[b'txnname'] = desc
2194 tr.hookargs[b'txnname'] = desc
2192 # note: writing the fncache only during finalize mean that the file is
2195 # note: writing the fncache only during finalize mean that the file is
2193 # outdated when running hooks. As fncache is used for streaming clone,
2196 # outdated when running hooks. As fncache is used for streaming clone,
2194 # this is not expected to break anything that happen during the hooks.
2197 # this is not expected to break anything that happen during the hooks.
2195 tr.addfinalize(b'flush-fncache', self.store.write)
2198 tr.addfinalize(b'flush-fncache', self.store.write)
2196
2199
2197 def txnclosehook(tr2):
2200 def txnclosehook(tr2):
2198 """To be run if transaction is successful, will schedule a hook run
2201 """To be run if transaction is successful, will schedule a hook run
2199 """
2202 """
2200 # Don't reference tr2 in hook() so we don't hold a reference.
2203 # Don't reference tr2 in hook() so we don't hold a reference.
2201 # This reduces memory consumption when there are multiple
2204 # This reduces memory consumption when there are multiple
2202 # transactions per lock. This can likely go away if issue5045
2205 # transactions per lock. This can likely go away if issue5045
2203 # fixes the function accumulation.
2206 # fixes the function accumulation.
2204 hookargs = tr2.hookargs
2207 hookargs = tr2.hookargs
2205
2208
2206 def hookfunc(unused_success):
2209 def hookfunc(unused_success):
2207 repo = reporef()
2210 repo = reporef()
2208 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2211 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2209 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2212 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2210 for name, (old, new) in bmchanges:
2213 for name, (old, new) in bmchanges:
2211 args = tr.hookargs.copy()
2214 args = tr.hookargs.copy()
2212 args.update(bookmarks.preparehookargs(name, old, new))
2215 args.update(bookmarks.preparehookargs(name, old, new))
2213 repo.hook(
2216 repo.hook(
2214 b'txnclose-bookmark',
2217 b'txnclose-bookmark',
2215 throw=False,
2218 throw=False,
2216 **pycompat.strkwargs(args)
2219 **pycompat.strkwargs(args)
2217 )
2220 )
2218
2221
2219 if hook.hashook(repo.ui, b'txnclose-phase'):
2222 if hook.hashook(repo.ui, b'txnclose-phase'):
2220 cl = repo.unfiltered().changelog
2223 cl = repo.unfiltered().changelog
2221 phasemv = sorted(tr.changes[b'phases'].items())
2224 phasemv = sorted(tr.changes[b'phases'].items())
2222 for rev, (old, new) in phasemv:
2225 for rev, (old, new) in phasemv:
2223 args = tr.hookargs.copy()
2226 args = tr.hookargs.copy()
2224 node = hex(cl.node(rev))
2227 node = hex(cl.node(rev))
2225 args.update(phases.preparehookargs(node, old, new))
2228 args.update(phases.preparehookargs(node, old, new))
2226 repo.hook(
2229 repo.hook(
2227 b'txnclose-phase',
2230 b'txnclose-phase',
2228 throw=False,
2231 throw=False,
2229 **pycompat.strkwargs(args)
2232 **pycompat.strkwargs(args)
2230 )
2233 )
2231
2234
2232 repo.hook(
2235 repo.hook(
2233 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2236 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2234 )
2237 )
2235
2238
2236 reporef()._afterlock(hookfunc)
2239 reporef()._afterlock(hookfunc)
2237
2240
2238 tr.addfinalize(b'txnclose-hook', txnclosehook)
2241 tr.addfinalize(b'txnclose-hook', txnclosehook)
2239 # Include a leading "-" to make it happen before the transaction summary
2242 # Include a leading "-" to make it happen before the transaction summary
2240 # reports registered via scmutil.registersummarycallback() whose names
2243 # reports registered via scmutil.registersummarycallback() whose names
2241 # are 00-txnreport etc. That way, the caches will be warm when the
2244 # are 00-txnreport etc. That way, the caches will be warm when the
2242 # callbacks run.
2245 # callbacks run.
2243 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2246 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2244
2247
2245 def txnaborthook(tr2):
2248 def txnaborthook(tr2):
2246 """To be run if transaction is aborted
2249 """To be run if transaction is aborted
2247 """
2250 """
2248 reporef().hook(
2251 reporef().hook(
2249 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2252 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2250 )
2253 )
2251
2254
2252 tr.addabort(b'txnabort-hook', txnaborthook)
2255 tr.addabort(b'txnabort-hook', txnaborthook)
2253 # avoid eager cache invalidation. in-memory data should be identical
2256 # avoid eager cache invalidation. in-memory data should be identical
2254 # to stored data if transaction has no error.
2257 # to stored data if transaction has no error.
2255 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2258 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2256 self._transref = weakref.ref(tr)
2259 self._transref = weakref.ref(tr)
2257 scmutil.registersummarycallback(self, tr, desc)
2260 scmutil.registersummarycallback(self, tr, desc)
2258 return tr
2261 return tr
2259
2262
2260 def _journalfiles(self):
2263 def _journalfiles(self):
2261 return (
2264 return (
2262 (self.svfs, b'journal'),
2265 (self.svfs, b'journal'),
2263 (self.svfs, b'journal.narrowspec'),
2266 (self.svfs, b'journal.narrowspec'),
2264 (self.vfs, b'journal.narrowspec.dirstate'),
2267 (self.vfs, b'journal.narrowspec.dirstate'),
2265 (self.vfs, b'journal.dirstate'),
2268 (self.vfs, b'journal.dirstate'),
2266 (self.vfs, b'journal.branch'),
2269 (self.vfs, b'journal.branch'),
2267 (self.vfs, b'journal.desc'),
2270 (self.vfs, b'journal.desc'),
2268 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2271 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2269 (self.svfs, b'journal.phaseroots'),
2272 (self.svfs, b'journal.phaseroots'),
2270 )
2273 )
2271
2274
2272 def undofiles(self):
2275 def undofiles(self):
2273 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2276 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2274
2277
2275 @unfilteredmethod
2278 @unfilteredmethod
2276 def _writejournal(self, desc):
2279 def _writejournal(self, desc):
2277 self.dirstate.savebackup(None, b'journal.dirstate')
2280 self.dirstate.savebackup(None, b'journal.dirstate')
2278 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2281 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2279 narrowspec.savebackup(self, b'journal.narrowspec')
2282 narrowspec.savebackup(self, b'journal.narrowspec')
2280 self.vfs.write(
2283 self.vfs.write(
2281 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2284 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2282 )
2285 )
2283 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2286 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2284 bookmarksvfs = bookmarks.bookmarksvfs(self)
2287 bookmarksvfs = bookmarks.bookmarksvfs(self)
2285 bookmarksvfs.write(
2288 bookmarksvfs.write(
2286 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2289 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2287 )
2290 )
2288 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2291 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2289
2292
2290 def recover(self):
2293 def recover(self):
2291 with self.lock():
2294 with self.lock():
2292 if self.svfs.exists(b"journal"):
2295 if self.svfs.exists(b"journal"):
2293 self.ui.status(_(b"rolling back interrupted transaction\n"))
2296 self.ui.status(_(b"rolling back interrupted transaction\n"))
2294 vfsmap = {
2297 vfsmap = {
2295 b'': self.svfs,
2298 b'': self.svfs,
2296 b'plain': self.vfs,
2299 b'plain': self.vfs,
2297 }
2300 }
2298 transaction.rollback(
2301 transaction.rollback(
2299 self.svfs,
2302 self.svfs,
2300 vfsmap,
2303 vfsmap,
2301 b"journal",
2304 b"journal",
2302 self.ui.warn,
2305 self.ui.warn,
2303 checkambigfiles=_cachedfiles,
2306 checkambigfiles=_cachedfiles,
2304 )
2307 )
2305 self.invalidate()
2308 self.invalidate()
2306 return True
2309 return True
2307 else:
2310 else:
2308 self.ui.warn(_(b"no interrupted transaction available\n"))
2311 self.ui.warn(_(b"no interrupted transaction available\n"))
2309 return False
2312 return False
2310
2313
2311 def rollback(self, dryrun=False, force=False):
2314 def rollback(self, dryrun=False, force=False):
2312 wlock = lock = dsguard = None
2315 wlock = lock = dsguard = None
2313 try:
2316 try:
2314 wlock = self.wlock()
2317 wlock = self.wlock()
2315 lock = self.lock()
2318 lock = self.lock()
2316 if self.svfs.exists(b"undo"):
2319 if self.svfs.exists(b"undo"):
2317 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2320 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2318
2321
2319 return self._rollback(dryrun, force, dsguard)
2322 return self._rollback(dryrun, force, dsguard)
2320 else:
2323 else:
2321 self.ui.warn(_(b"no rollback information available\n"))
2324 self.ui.warn(_(b"no rollback information available\n"))
2322 return 1
2325 return 1
2323 finally:
2326 finally:
2324 release(dsguard, lock, wlock)
2327 release(dsguard, lock, wlock)
2325
2328
2326 @unfilteredmethod # Until we get smarter cache management
2329 @unfilteredmethod # Until we get smarter cache management
2327 def _rollback(self, dryrun, force, dsguard):
2330 def _rollback(self, dryrun, force, dsguard):
2328 ui = self.ui
2331 ui = self.ui
2329 try:
2332 try:
2330 args = self.vfs.read(b'undo.desc').splitlines()
2333 args = self.vfs.read(b'undo.desc').splitlines()
2331 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2334 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2332 if len(args) >= 3:
2335 if len(args) >= 3:
2333 detail = args[2]
2336 detail = args[2]
2334 oldtip = oldlen - 1
2337 oldtip = oldlen - 1
2335
2338
2336 if detail and ui.verbose:
2339 if detail and ui.verbose:
2337 msg = _(
2340 msg = _(
2338 b'repository tip rolled back to revision %d'
2341 b'repository tip rolled back to revision %d'
2339 b' (undo %s: %s)\n'
2342 b' (undo %s: %s)\n'
2340 ) % (oldtip, desc, detail)
2343 ) % (oldtip, desc, detail)
2341 else:
2344 else:
2342 msg = _(
2345 msg = _(
2343 b'repository tip rolled back to revision %d (undo %s)\n'
2346 b'repository tip rolled back to revision %d (undo %s)\n'
2344 ) % (oldtip, desc)
2347 ) % (oldtip, desc)
2345 except IOError:
2348 except IOError:
2346 msg = _(b'rolling back unknown transaction\n')
2349 msg = _(b'rolling back unknown transaction\n')
2347 desc = None
2350 desc = None
2348
2351
2349 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2352 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2350 raise error.Abort(
2353 raise error.Abort(
2351 _(
2354 _(
2352 b'rollback of last commit while not checked out '
2355 b'rollback of last commit while not checked out '
2353 b'may lose data'
2356 b'may lose data'
2354 ),
2357 ),
2355 hint=_(b'use -f to force'),
2358 hint=_(b'use -f to force'),
2356 )
2359 )
2357
2360
2358 ui.status(msg)
2361 ui.status(msg)
2359 if dryrun:
2362 if dryrun:
2360 return 0
2363 return 0
2361
2364
2362 parents = self.dirstate.parents()
2365 parents = self.dirstate.parents()
2363 self.destroying()
2366 self.destroying()
2364 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2367 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2365 transaction.rollback(
2368 transaction.rollback(
2366 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2369 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2367 )
2370 )
2368 bookmarksvfs = bookmarks.bookmarksvfs(self)
2371 bookmarksvfs = bookmarks.bookmarksvfs(self)
2369 if bookmarksvfs.exists(b'undo.bookmarks'):
2372 if bookmarksvfs.exists(b'undo.bookmarks'):
2370 bookmarksvfs.rename(
2373 bookmarksvfs.rename(
2371 b'undo.bookmarks', b'bookmarks', checkambig=True
2374 b'undo.bookmarks', b'bookmarks', checkambig=True
2372 )
2375 )
2373 if self.svfs.exists(b'undo.phaseroots'):
2376 if self.svfs.exists(b'undo.phaseroots'):
2374 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2377 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2375 self.invalidate()
2378 self.invalidate()
2376
2379
2377 has_node = self.changelog.index.has_node
2380 has_node = self.changelog.index.has_node
2378 parentgone = any(not has_node(p) for p in parents)
2381 parentgone = any(not has_node(p) for p in parents)
2379 if parentgone:
2382 if parentgone:
2380 # prevent dirstateguard from overwriting already restored one
2383 # prevent dirstateguard from overwriting already restored one
2381 dsguard.close()
2384 dsguard.close()
2382
2385
2383 narrowspec.restorebackup(self, b'undo.narrowspec')
2386 narrowspec.restorebackup(self, b'undo.narrowspec')
2384 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2387 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2385 self.dirstate.restorebackup(None, b'undo.dirstate')
2388 self.dirstate.restorebackup(None, b'undo.dirstate')
2386 try:
2389 try:
2387 branch = self.vfs.read(b'undo.branch')
2390 branch = self.vfs.read(b'undo.branch')
2388 self.dirstate.setbranch(encoding.tolocal(branch))
2391 self.dirstate.setbranch(encoding.tolocal(branch))
2389 except IOError:
2392 except IOError:
2390 ui.warn(
2393 ui.warn(
2391 _(
2394 _(
2392 b'named branch could not be reset: '
2395 b'named branch could not be reset: '
2393 b'current branch is still \'%s\'\n'
2396 b'current branch is still \'%s\'\n'
2394 )
2397 )
2395 % self.dirstate.branch()
2398 % self.dirstate.branch()
2396 )
2399 )
2397
2400
2398 parents = tuple([p.rev() for p in self[None].parents()])
2401 parents = tuple([p.rev() for p in self[None].parents()])
2399 if len(parents) > 1:
2402 if len(parents) > 1:
2400 ui.status(
2403 ui.status(
2401 _(
2404 _(
2402 b'working directory now based on '
2405 b'working directory now based on '
2403 b'revisions %d and %d\n'
2406 b'revisions %d and %d\n'
2404 )
2407 )
2405 % parents
2408 % parents
2406 )
2409 )
2407 else:
2410 else:
2408 ui.status(
2411 ui.status(
2409 _(b'working directory now based on revision %d\n') % parents
2412 _(b'working directory now based on revision %d\n') % parents
2410 )
2413 )
2411 mergemod.mergestate.clean(self, self[b'.'].node())
2414 mergemod.mergestate.clean(self, self[b'.'].node())
2412
2415
2413 # TODO: if we know which new heads may result from this rollback, pass
2416 # TODO: if we know which new heads may result from this rollback, pass
2414 # them to destroy(), which will prevent the branchhead cache from being
2417 # them to destroy(), which will prevent the branchhead cache from being
2415 # invalidated.
2418 # invalidated.
2416 self.destroyed()
2419 self.destroyed()
2417 return 0
2420 return 0
2418
2421
2419 def _buildcacheupdater(self, newtransaction):
2422 def _buildcacheupdater(self, newtransaction):
2420 """called during transaction to build the callback updating cache
2423 """called during transaction to build the callback updating cache
2421
2424
2422 Lives on the repository to help extension who might want to augment
2425 Lives on the repository to help extension who might want to augment
2423 this logic. For this purpose, the created transaction is passed to the
2426 this logic. For this purpose, the created transaction is passed to the
2424 method.
2427 method.
2425 """
2428 """
2426 # we must avoid cyclic reference between repo and transaction.
2429 # we must avoid cyclic reference between repo and transaction.
2427 reporef = weakref.ref(self)
2430 reporef = weakref.ref(self)
2428
2431
2429 def updater(tr):
2432 def updater(tr):
2430 repo = reporef()
2433 repo = reporef()
2431 repo.updatecaches(tr)
2434 repo.updatecaches(tr)
2432
2435
2433 return updater
2436 return updater
2434
2437
2435 @unfilteredmethod
2438 @unfilteredmethod
2436 def updatecaches(self, tr=None, full=False):
2439 def updatecaches(self, tr=None, full=False):
2437 """warm appropriate caches
2440 """warm appropriate caches
2438
2441
2439 If this function is called after a transaction closed. The transaction
2442 If this function is called after a transaction closed. The transaction
2440 will be available in the 'tr' argument. This can be used to selectively
2443 will be available in the 'tr' argument. This can be used to selectively
2441 update caches relevant to the changes in that transaction.
2444 update caches relevant to the changes in that transaction.
2442
2445
2443 If 'full' is set, make sure all caches the function knows about have
2446 If 'full' is set, make sure all caches the function knows about have
2444 up-to-date data. Even the ones usually loaded more lazily.
2447 up-to-date data. Even the ones usually loaded more lazily.
2445 """
2448 """
2446 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2449 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2447 # During strip, many caches are invalid but
2450 # During strip, many caches are invalid but
2448 # later call to `destroyed` will refresh them.
2451 # later call to `destroyed` will refresh them.
2449 return
2452 return
2450
2453
2451 if tr is None or tr.changes[b'origrepolen'] < len(self):
2454 if tr is None or tr.changes[b'origrepolen'] < len(self):
2452 # accessing the 'ser ved' branchmap should refresh all the others,
2455 # accessing the 'ser ved' branchmap should refresh all the others,
2453 self.ui.debug(b'updating the branch cache\n')
2456 self.ui.debug(b'updating the branch cache\n')
2454 self.filtered(b'served').branchmap()
2457 self.filtered(b'served').branchmap()
2455 self.filtered(b'served.hidden').branchmap()
2458 self.filtered(b'served.hidden').branchmap()
2456
2459
2457 if full:
2460 if full:
2458 unfi = self.unfiltered()
2461 unfi = self.unfiltered()
2459 rbc = unfi.revbranchcache()
2462 rbc = unfi.revbranchcache()
2460 for r in unfi.changelog:
2463 for r in unfi.changelog:
2461 rbc.branchinfo(r)
2464 rbc.branchinfo(r)
2462 rbc.write()
2465 rbc.write()
2463
2466
2464 # ensure the working copy parents are in the manifestfulltextcache
2467 # ensure the working copy parents are in the manifestfulltextcache
2465 for ctx in self[b'.'].parents():
2468 for ctx in self[b'.'].parents():
2466 ctx.manifest() # accessing the manifest is enough
2469 ctx.manifest() # accessing the manifest is enough
2467
2470
2468 # accessing fnode cache warms the cache
2471 # accessing fnode cache warms the cache
2469 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2472 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2470 # accessing tags warm the cache
2473 # accessing tags warm the cache
2471 self.tags()
2474 self.tags()
2472 self.filtered(b'served').tags()
2475 self.filtered(b'served').tags()
2473
2476
2474 # The `full` arg is documented as updating even the lazily-loaded
2477 # The `full` arg is documented as updating even the lazily-loaded
2475 # caches immediately, so we're forcing a write to cause these caches
2478 # caches immediately, so we're forcing a write to cause these caches
2476 # to be warmed up even if they haven't explicitly been requested
2479 # to be warmed up even if they haven't explicitly been requested
2477 # yet (if they've never been used by hg, they won't ever have been
2480 # yet (if they've never been used by hg, they won't ever have been
2478 # written, even if they're a subset of another kind of cache that
2481 # written, even if they're a subset of another kind of cache that
2479 # *has* been used).
2482 # *has* been used).
2480 for filt in repoview.filtertable.keys():
2483 for filt in repoview.filtertable.keys():
2481 filtered = self.filtered(filt)
2484 filtered = self.filtered(filt)
2482 filtered.branchmap().write(filtered)
2485 filtered.branchmap().write(filtered)
2483
2486
2484 def invalidatecaches(self):
2487 def invalidatecaches(self):
2485
2488
2486 if '_tagscache' in vars(self):
2489 if '_tagscache' in vars(self):
2487 # can't use delattr on proxy
2490 # can't use delattr on proxy
2488 del self.__dict__['_tagscache']
2491 del self.__dict__['_tagscache']
2489
2492
2490 self._branchcaches.clear()
2493 self._branchcaches.clear()
2491 self.invalidatevolatilesets()
2494 self.invalidatevolatilesets()
2492 self._sparsesignaturecache.clear()
2495 self._sparsesignaturecache.clear()
2493
2496
2494 def invalidatevolatilesets(self):
2497 def invalidatevolatilesets(self):
2495 self.filteredrevcache.clear()
2498 self.filteredrevcache.clear()
2496 obsolete.clearobscaches(self)
2499 obsolete.clearobscaches(self)
2497
2500
2498 def invalidatedirstate(self):
2501 def invalidatedirstate(self):
2499 '''Invalidates the dirstate, causing the next call to dirstate
2502 '''Invalidates the dirstate, causing the next call to dirstate
2500 to check if it was modified since the last time it was read,
2503 to check if it was modified since the last time it was read,
2501 rereading it if it has.
2504 rereading it if it has.
2502
2505
2503 This is different to dirstate.invalidate() that it doesn't always
2506 This is different to dirstate.invalidate() that it doesn't always
2504 rereads the dirstate. Use dirstate.invalidate() if you want to
2507 rereads the dirstate. Use dirstate.invalidate() if you want to
2505 explicitly read the dirstate again (i.e. restoring it to a previous
2508 explicitly read the dirstate again (i.e. restoring it to a previous
2506 known good state).'''
2509 known good state).'''
2507 if hasunfilteredcache(self, 'dirstate'):
2510 if hasunfilteredcache(self, 'dirstate'):
2508 for k in self.dirstate._filecache:
2511 for k in self.dirstate._filecache:
2509 try:
2512 try:
2510 delattr(self.dirstate, k)
2513 delattr(self.dirstate, k)
2511 except AttributeError:
2514 except AttributeError:
2512 pass
2515 pass
2513 delattr(self.unfiltered(), 'dirstate')
2516 delattr(self.unfiltered(), 'dirstate')
2514
2517
2515 def invalidate(self, clearfilecache=False):
2518 def invalidate(self, clearfilecache=False):
2516 '''Invalidates both store and non-store parts other than dirstate
2519 '''Invalidates both store and non-store parts other than dirstate
2517
2520
2518 If a transaction is running, invalidation of store is omitted,
2521 If a transaction is running, invalidation of store is omitted,
2519 because discarding in-memory changes might cause inconsistency
2522 because discarding in-memory changes might cause inconsistency
2520 (e.g. incomplete fncache causes unintentional failure, but
2523 (e.g. incomplete fncache causes unintentional failure, but
2521 redundant one doesn't).
2524 redundant one doesn't).
2522 '''
2525 '''
2523 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2526 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2524 for k in list(self._filecache.keys()):
2527 for k in list(self._filecache.keys()):
2525 # dirstate is invalidated separately in invalidatedirstate()
2528 # dirstate is invalidated separately in invalidatedirstate()
2526 if k == b'dirstate':
2529 if k == b'dirstate':
2527 continue
2530 continue
2528 if (
2531 if (
2529 k == b'changelog'
2532 k == b'changelog'
2530 and self.currenttransaction()
2533 and self.currenttransaction()
2531 and self.changelog._delayed
2534 and self.changelog._delayed
2532 ):
2535 ):
2533 # The changelog object may store unwritten revisions. We don't
2536 # The changelog object may store unwritten revisions. We don't
2534 # want to lose them.
2537 # want to lose them.
2535 # TODO: Solve the problem instead of working around it.
2538 # TODO: Solve the problem instead of working around it.
2536 continue
2539 continue
2537
2540
2538 if clearfilecache:
2541 if clearfilecache:
2539 del self._filecache[k]
2542 del self._filecache[k]
2540 try:
2543 try:
2541 delattr(unfiltered, k)
2544 delattr(unfiltered, k)
2542 except AttributeError:
2545 except AttributeError:
2543 pass
2546 pass
2544 self.invalidatecaches()
2547 self.invalidatecaches()
2545 if not self.currenttransaction():
2548 if not self.currenttransaction():
2546 # TODO: Changing contents of store outside transaction
2549 # TODO: Changing contents of store outside transaction
2547 # causes inconsistency. We should make in-memory store
2550 # causes inconsistency. We should make in-memory store
2548 # changes detectable, and abort if changed.
2551 # changes detectable, and abort if changed.
2549 self.store.invalidatecaches()
2552 self.store.invalidatecaches()
2550
2553
2551 def invalidateall(self):
2554 def invalidateall(self):
2552 '''Fully invalidates both store and non-store parts, causing the
2555 '''Fully invalidates both store and non-store parts, causing the
2553 subsequent operation to reread any outside changes.'''
2556 subsequent operation to reread any outside changes.'''
2554 # extension should hook this to invalidate its caches
2557 # extension should hook this to invalidate its caches
2555 self.invalidate()
2558 self.invalidate()
2556 self.invalidatedirstate()
2559 self.invalidatedirstate()
2557
2560
2558 @unfilteredmethod
2561 @unfilteredmethod
2559 def _refreshfilecachestats(self, tr):
2562 def _refreshfilecachestats(self, tr):
2560 """Reload stats of cached files so that they are flagged as valid"""
2563 """Reload stats of cached files so that they are flagged as valid"""
2561 for k, ce in self._filecache.items():
2564 for k, ce in self._filecache.items():
2562 k = pycompat.sysstr(k)
2565 k = pycompat.sysstr(k)
2563 if k == 'dirstate' or k not in self.__dict__:
2566 if k == 'dirstate' or k not in self.__dict__:
2564 continue
2567 continue
2565 ce.refresh()
2568 ce.refresh()
2566
2569
2567 def _lock(
2570 def _lock(
2568 self,
2571 self,
2569 vfs,
2572 vfs,
2570 lockname,
2573 lockname,
2571 wait,
2574 wait,
2572 releasefn,
2575 releasefn,
2573 acquirefn,
2576 acquirefn,
2574 desc,
2577 desc,
2575 inheritchecker=None,
2578 inheritchecker=None,
2576 parentenvvar=None,
2579 parentenvvar=None,
2577 ):
2580 ):
2578 parentlock = None
2581 parentlock = None
2579 # the contents of parentenvvar are used by the underlying lock to
2582 # the contents of parentenvvar are used by the underlying lock to
2580 # determine whether it can be inherited
2583 # determine whether it can be inherited
2581 if parentenvvar is not None:
2584 if parentenvvar is not None:
2582 parentlock = encoding.environ.get(parentenvvar)
2585 parentlock = encoding.environ.get(parentenvvar)
2583
2586
2584 timeout = 0
2587 timeout = 0
2585 warntimeout = 0
2588 warntimeout = 0
2586 if wait:
2589 if wait:
2587 timeout = self.ui.configint(b"ui", b"timeout")
2590 timeout = self.ui.configint(b"ui", b"timeout")
2588 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2591 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2589 # internal config: ui.signal-safe-lock
2592 # internal config: ui.signal-safe-lock
2590 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2593 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2591
2594
2592 l = lockmod.trylock(
2595 l = lockmod.trylock(
2593 self.ui,
2596 self.ui,
2594 vfs,
2597 vfs,
2595 lockname,
2598 lockname,
2596 timeout,
2599 timeout,
2597 warntimeout,
2600 warntimeout,
2598 releasefn=releasefn,
2601 releasefn=releasefn,
2599 acquirefn=acquirefn,
2602 acquirefn=acquirefn,
2600 desc=desc,
2603 desc=desc,
2601 inheritchecker=inheritchecker,
2604 inheritchecker=inheritchecker,
2602 parentlock=parentlock,
2605 parentlock=parentlock,
2603 signalsafe=signalsafe,
2606 signalsafe=signalsafe,
2604 )
2607 )
2605 return l
2608 return l
2606
2609
2607 def _afterlock(self, callback):
2610 def _afterlock(self, callback):
2608 """add a callback to be run when the repository is fully unlocked
2611 """add a callback to be run when the repository is fully unlocked
2609
2612
2610 The callback will be executed when the outermost lock is released
2613 The callback will be executed when the outermost lock is released
2611 (with wlock being higher level than 'lock')."""
2614 (with wlock being higher level than 'lock')."""
2612 for ref in (self._wlockref, self._lockref):
2615 for ref in (self._wlockref, self._lockref):
2613 l = ref and ref()
2616 l = ref and ref()
2614 if l and l.held:
2617 if l and l.held:
2615 l.postrelease.append(callback)
2618 l.postrelease.append(callback)
2616 break
2619 break
2617 else: # no lock have been found.
2620 else: # no lock have been found.
2618 callback(True)
2621 callback(True)
2619
2622
2620 def lock(self, wait=True):
2623 def lock(self, wait=True):
2621 '''Lock the repository store (.hg/store) and return a weak reference
2624 '''Lock the repository store (.hg/store) and return a weak reference
2622 to the lock. Use this before modifying the store (e.g. committing or
2625 to the lock. Use this before modifying the store (e.g. committing or
2623 stripping). If you are opening a transaction, get a lock as well.)
2626 stripping). If you are opening a transaction, get a lock as well.)
2624
2627
2625 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2628 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2626 'wlock' first to avoid a dead-lock hazard.'''
2629 'wlock' first to avoid a dead-lock hazard.'''
2627 l = self._currentlock(self._lockref)
2630 l = self._currentlock(self._lockref)
2628 if l is not None:
2631 if l is not None:
2629 l.lock()
2632 l.lock()
2630 return l
2633 return l
2631
2634
2632 l = self._lock(
2635 l = self._lock(
2633 vfs=self.svfs,
2636 vfs=self.svfs,
2634 lockname=b"lock",
2637 lockname=b"lock",
2635 wait=wait,
2638 wait=wait,
2636 releasefn=None,
2639 releasefn=None,
2637 acquirefn=self.invalidate,
2640 acquirefn=self.invalidate,
2638 desc=_(b'repository %s') % self.origroot,
2641 desc=_(b'repository %s') % self.origroot,
2639 )
2642 )
2640 self._lockref = weakref.ref(l)
2643 self._lockref = weakref.ref(l)
2641 return l
2644 return l
2642
2645
2643 def _wlockchecktransaction(self):
2646 def _wlockchecktransaction(self):
2644 if self.currenttransaction() is not None:
2647 if self.currenttransaction() is not None:
2645 raise error.LockInheritanceContractViolation(
2648 raise error.LockInheritanceContractViolation(
2646 b'wlock cannot be inherited in the middle of a transaction'
2649 b'wlock cannot be inherited in the middle of a transaction'
2647 )
2650 )
2648
2651
2649 def wlock(self, wait=True):
2652 def wlock(self, wait=True):
2650 '''Lock the non-store parts of the repository (everything under
2653 '''Lock the non-store parts of the repository (everything under
2651 .hg except .hg/store) and return a weak reference to the lock.
2654 .hg except .hg/store) and return a weak reference to the lock.
2652
2655
2653 Use this before modifying files in .hg.
2656 Use this before modifying files in .hg.
2654
2657
2655 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2658 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2656 'wlock' first to avoid a dead-lock hazard.'''
2659 'wlock' first to avoid a dead-lock hazard.'''
2657 l = self._wlockref and self._wlockref()
2660 l = self._wlockref and self._wlockref()
2658 if l is not None and l.held:
2661 if l is not None and l.held:
2659 l.lock()
2662 l.lock()
2660 return l
2663 return l
2661
2664
2662 # We do not need to check for non-waiting lock acquisition. Such
2665 # We do not need to check for non-waiting lock acquisition. Such
2663 # acquisition would not cause dead-lock as they would just fail.
2666 # acquisition would not cause dead-lock as they would just fail.
2664 if wait and (
2667 if wait and (
2665 self.ui.configbool(b'devel', b'all-warnings')
2668 self.ui.configbool(b'devel', b'all-warnings')
2666 or self.ui.configbool(b'devel', b'check-locks')
2669 or self.ui.configbool(b'devel', b'check-locks')
2667 ):
2670 ):
2668 if self._currentlock(self._lockref) is not None:
2671 if self._currentlock(self._lockref) is not None:
2669 self.ui.develwarn(b'"wlock" acquired after "lock"')
2672 self.ui.develwarn(b'"wlock" acquired after "lock"')
2670
2673
2671 def unlock():
2674 def unlock():
2672 if self.dirstate.pendingparentchange():
2675 if self.dirstate.pendingparentchange():
2673 self.dirstate.invalidate()
2676 self.dirstate.invalidate()
2674 else:
2677 else:
2675 self.dirstate.write(None)
2678 self.dirstate.write(None)
2676
2679
2677 self._filecache[b'dirstate'].refresh()
2680 self._filecache[b'dirstate'].refresh()
2678
2681
2679 l = self._lock(
2682 l = self._lock(
2680 self.vfs,
2683 self.vfs,
2681 b"wlock",
2684 b"wlock",
2682 wait,
2685 wait,
2683 unlock,
2686 unlock,
2684 self.invalidatedirstate,
2687 self.invalidatedirstate,
2685 _(b'working directory of %s') % self.origroot,
2688 _(b'working directory of %s') % self.origroot,
2686 inheritchecker=self._wlockchecktransaction,
2689 inheritchecker=self._wlockchecktransaction,
2687 parentenvvar=b'HG_WLOCK_LOCKER',
2690 parentenvvar=b'HG_WLOCK_LOCKER',
2688 )
2691 )
2689 self._wlockref = weakref.ref(l)
2692 self._wlockref = weakref.ref(l)
2690 return l
2693 return l
2691
2694
2692 def _currentlock(self, lockref):
2695 def _currentlock(self, lockref):
2693 """Returns the lock if it's held, or None if it's not."""
2696 """Returns the lock if it's held, or None if it's not."""
2694 if lockref is None:
2697 if lockref is None:
2695 return None
2698 return None
2696 l = lockref()
2699 l = lockref()
2697 if l is None or not l.held:
2700 if l is None or not l.held:
2698 return None
2701 return None
2699 return l
2702 return l
2700
2703
2701 def currentwlock(self):
2704 def currentwlock(self):
2702 """Returns the wlock if it's held, or None if it's not."""
2705 """Returns the wlock if it's held, or None if it's not."""
2703 return self._currentlock(self._wlockref)
2706 return self._currentlock(self._wlockref)
2704
2707
2705 def _filecommit(
2708 def _filecommit(
2706 self,
2709 self,
2707 fctx,
2710 fctx,
2708 manifest1,
2711 manifest1,
2709 manifest2,
2712 manifest2,
2710 linkrev,
2713 linkrev,
2711 tr,
2714 tr,
2712 changelist,
2715 changelist,
2713 includecopymeta,
2716 includecopymeta,
2714 ):
2717 ):
2715 """
2718 """
2716 commit an individual file as part of a larger transaction
2719 commit an individual file as part of a larger transaction
2717 """
2720 """
2718
2721
2719 fname = fctx.path()
2722 fname = fctx.path()
2720 fparent1 = manifest1.get(fname, nullid)
2723 fparent1 = manifest1.get(fname, nullid)
2721 fparent2 = manifest2.get(fname, nullid)
2724 fparent2 = manifest2.get(fname, nullid)
2722 if isinstance(fctx, context.filectx):
2725 if isinstance(fctx, context.filectx):
2723 node = fctx.filenode()
2726 node = fctx.filenode()
2724 if node in [fparent1, fparent2]:
2727 if node in [fparent1, fparent2]:
2725 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2728 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2726 if (
2729 if (
2727 fparent1 != nullid
2730 fparent1 != nullid
2728 and manifest1.flags(fname) != fctx.flags()
2731 and manifest1.flags(fname) != fctx.flags()
2729 ) or (
2732 ) or (
2730 fparent2 != nullid
2733 fparent2 != nullid
2731 and manifest2.flags(fname) != fctx.flags()
2734 and manifest2.flags(fname) != fctx.flags()
2732 ):
2735 ):
2733 changelist.append(fname)
2736 changelist.append(fname)
2734 return node
2737 return node
2735
2738
2736 flog = self.file(fname)
2739 flog = self.file(fname)
2737 meta = {}
2740 meta = {}
2738 cfname = fctx.copysource()
2741 cfname = fctx.copysource()
2739 if cfname and cfname != fname:
2742 if cfname and cfname != fname:
2740 # Mark the new revision of this file as a copy of another
2743 # Mark the new revision of this file as a copy of another
2741 # file. This copy data will effectively act as a parent
2744 # file. This copy data will effectively act as a parent
2742 # of this new revision. If this is a merge, the first
2745 # of this new revision. If this is a merge, the first
2743 # parent will be the nullid (meaning "look up the copy data")
2746 # parent will be the nullid (meaning "look up the copy data")
2744 # and the second one will be the other parent. For example:
2747 # and the second one will be the other parent. For example:
2745 #
2748 #
2746 # 0 --- 1 --- 3 rev1 changes file foo
2749 # 0 --- 1 --- 3 rev1 changes file foo
2747 # \ / rev2 renames foo to bar and changes it
2750 # \ / rev2 renames foo to bar and changes it
2748 # \- 2 -/ rev3 should have bar with all changes and
2751 # \- 2 -/ rev3 should have bar with all changes and
2749 # should record that bar descends from
2752 # should record that bar descends from
2750 # bar in rev2 and foo in rev1
2753 # bar in rev2 and foo in rev1
2751 #
2754 #
2752 # this allows this merge to succeed:
2755 # this allows this merge to succeed:
2753 #
2756 #
2754 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2757 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2755 # \ / merging rev3 and rev4 should use bar@rev2
2758 # \ / merging rev3 and rev4 should use bar@rev2
2756 # \- 2 --- 4 as the merge base
2759 # \- 2 --- 4 as the merge base
2757 #
2760 #
2758
2761
2759 cnode = manifest1.get(cfname)
2762 cnode = manifest1.get(cfname)
2760 newfparent = fparent2
2763 newfparent = fparent2
2761
2764
2762 if manifest2: # branch merge
2765 if manifest2: # branch merge
2763 if fparent2 == nullid or cnode is None: # copied on remote side
2766 if fparent2 == nullid or cnode is None: # copied on remote side
2764 if cfname in manifest2:
2767 if cfname in manifest2:
2765 cnode = manifest2[cfname]
2768 cnode = manifest2[cfname]
2766 newfparent = fparent1
2769 newfparent = fparent1
2767
2770
2768 # Here, we used to search backwards through history to try to find
2771 # Here, we used to search backwards through history to try to find
2769 # where the file copy came from if the source of a copy was not in
2772 # where the file copy came from if the source of a copy was not in
2770 # the parent directory. However, this doesn't actually make sense to
2773 # the parent directory. However, this doesn't actually make sense to
2771 # do (what does a copy from something not in your working copy even
2774 # do (what does a copy from something not in your working copy even
2772 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2775 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2773 # the user that copy information was dropped, so if they didn't
2776 # the user that copy information was dropped, so if they didn't
2774 # expect this outcome it can be fixed, but this is the correct
2777 # expect this outcome it can be fixed, but this is the correct
2775 # behavior in this circumstance.
2778 # behavior in this circumstance.
2776
2779
2777 if cnode:
2780 if cnode:
2778 self.ui.debug(
2781 self.ui.debug(
2779 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2782 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2780 )
2783 )
2781 if includecopymeta:
2784 if includecopymeta:
2782 meta[b"copy"] = cfname
2785 meta[b"copy"] = cfname
2783 meta[b"copyrev"] = hex(cnode)
2786 meta[b"copyrev"] = hex(cnode)
2784 fparent1, fparent2 = nullid, newfparent
2787 fparent1, fparent2 = nullid, newfparent
2785 else:
2788 else:
2786 self.ui.warn(
2789 self.ui.warn(
2787 _(
2790 _(
2788 b"warning: can't find ancestor for '%s' "
2791 b"warning: can't find ancestor for '%s' "
2789 b"copied from '%s'!\n"
2792 b"copied from '%s'!\n"
2790 )
2793 )
2791 % (fname, cfname)
2794 % (fname, cfname)
2792 )
2795 )
2793
2796
2794 elif fparent1 == nullid:
2797 elif fparent1 == nullid:
2795 fparent1, fparent2 = fparent2, nullid
2798 fparent1, fparent2 = fparent2, nullid
2796 elif fparent2 != nullid:
2799 elif fparent2 != nullid:
2797 # is one parent an ancestor of the other?
2800 # is one parent an ancestor of the other?
2798 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2801 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2799 if fparent1 in fparentancestors:
2802 if fparent1 in fparentancestors:
2800 fparent1, fparent2 = fparent2, nullid
2803 fparent1, fparent2 = fparent2, nullid
2801 elif fparent2 in fparentancestors:
2804 elif fparent2 in fparentancestors:
2802 fparent2 = nullid
2805 fparent2 = nullid
2803
2806
2804 # is the file changed?
2807 # is the file changed?
2805 text = fctx.data()
2808 text = fctx.data()
2806 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2809 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2807 changelist.append(fname)
2810 changelist.append(fname)
2808 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2811 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2809 # are just the flags changed during merge?
2812 # are just the flags changed during merge?
2810 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2813 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2811 changelist.append(fname)
2814 changelist.append(fname)
2812
2815
2813 return fparent1
2816 return fparent1
2814
2817
2815 def checkcommitpatterns(self, wctx, match, status, fail):
2818 def checkcommitpatterns(self, wctx, match, status, fail):
2816 """check for commit arguments that aren't committable"""
2819 """check for commit arguments that aren't committable"""
2817 if match.isexact() or match.prefix():
2820 if match.isexact() or match.prefix():
2818 matched = set(status.modified + status.added + status.removed)
2821 matched = set(status.modified + status.added + status.removed)
2819
2822
2820 for f in match.files():
2823 for f in match.files():
2821 f = self.dirstate.normalize(f)
2824 f = self.dirstate.normalize(f)
2822 if f == b'.' or f in matched or f in wctx.substate:
2825 if f == b'.' or f in matched or f in wctx.substate:
2823 continue
2826 continue
2824 if f in status.deleted:
2827 if f in status.deleted:
2825 fail(f, _(b'file not found!'))
2828 fail(f, _(b'file not found!'))
2826 # Is it a directory that exists or used to exist?
2829 # Is it a directory that exists or used to exist?
2827 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2830 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2828 d = f + b'/'
2831 d = f + b'/'
2829 for mf in matched:
2832 for mf in matched:
2830 if mf.startswith(d):
2833 if mf.startswith(d):
2831 break
2834 break
2832 else:
2835 else:
2833 fail(f, _(b"no match under directory!"))
2836 fail(f, _(b"no match under directory!"))
2834 elif f not in self.dirstate:
2837 elif f not in self.dirstate:
2835 fail(f, _(b"file not tracked!"))
2838 fail(f, _(b"file not tracked!"))
2836
2839
2837 @unfilteredmethod
2840 @unfilteredmethod
2838 def commit(
2841 def commit(
2839 self,
2842 self,
2840 text=b"",
2843 text=b"",
2841 user=None,
2844 user=None,
2842 date=None,
2845 date=None,
2843 match=None,
2846 match=None,
2844 force=False,
2847 force=False,
2845 editor=False,
2848 editor=False,
2846 extra=None,
2849 extra=None,
2847 ):
2850 ):
2848 """Add a new revision to current repository.
2851 """Add a new revision to current repository.
2849
2852
2850 Revision information is gathered from the working directory,
2853 Revision information is gathered from the working directory,
2851 match can be used to filter the committed files. If editor is
2854 match can be used to filter the committed files. If editor is
2852 supplied, it is called to get a commit message.
2855 supplied, it is called to get a commit message.
2853 """
2856 """
2854 if extra is None:
2857 if extra is None:
2855 extra = {}
2858 extra = {}
2856
2859
2857 def fail(f, msg):
2860 def fail(f, msg):
2858 raise error.Abort(b'%s: %s' % (f, msg))
2861 raise error.Abort(b'%s: %s' % (f, msg))
2859
2862
2860 if not match:
2863 if not match:
2861 match = matchmod.always()
2864 match = matchmod.always()
2862
2865
2863 if not force:
2866 if not force:
2864 match.bad = fail
2867 match.bad = fail
2865
2868
2866 # lock() for recent changelog (see issue4368)
2869 # lock() for recent changelog (see issue4368)
2867 with self.wlock(), self.lock():
2870 with self.wlock(), self.lock():
2868 wctx = self[None]
2871 wctx = self[None]
2869 merge = len(wctx.parents()) > 1
2872 merge = len(wctx.parents()) > 1
2870
2873
2871 if not force and merge and not match.always():
2874 if not force and merge and not match.always():
2872 raise error.Abort(
2875 raise error.Abort(
2873 _(
2876 _(
2874 b'cannot partially commit a merge '
2877 b'cannot partially commit a merge '
2875 b'(do not specify files or patterns)'
2878 b'(do not specify files or patterns)'
2876 )
2879 )
2877 )
2880 )
2878
2881
2879 status = self.status(match=match, clean=force)
2882 status = self.status(match=match, clean=force)
2880 if force:
2883 if force:
2881 status.modified.extend(
2884 status.modified.extend(
2882 status.clean
2885 status.clean
2883 ) # mq may commit clean files
2886 ) # mq may commit clean files
2884
2887
2885 # check subrepos
2888 # check subrepos
2886 subs, commitsubs, newstate = subrepoutil.precommit(
2889 subs, commitsubs, newstate = subrepoutil.precommit(
2887 self.ui, wctx, status, match, force=force
2890 self.ui, wctx, status, match, force=force
2888 )
2891 )
2889
2892
2890 # make sure all explicit patterns are matched
2893 # make sure all explicit patterns are matched
2891 if not force:
2894 if not force:
2892 self.checkcommitpatterns(wctx, match, status, fail)
2895 self.checkcommitpatterns(wctx, match, status, fail)
2893
2896
2894 cctx = context.workingcommitctx(
2897 cctx = context.workingcommitctx(
2895 self, status, text, user, date, extra
2898 self, status, text, user, date, extra
2896 )
2899 )
2897
2900
2898 # internal config: ui.allowemptycommit
2901 # internal config: ui.allowemptycommit
2899 allowemptycommit = (
2902 allowemptycommit = (
2900 wctx.branch() != wctx.p1().branch()
2903 wctx.branch() != wctx.p1().branch()
2901 or extra.get(b'close')
2904 or extra.get(b'close')
2902 or merge
2905 or merge
2903 or cctx.files()
2906 or cctx.files()
2904 or self.ui.configbool(b'ui', b'allowemptycommit')
2907 or self.ui.configbool(b'ui', b'allowemptycommit')
2905 )
2908 )
2906 if not allowemptycommit:
2909 if not allowemptycommit:
2907 return None
2910 return None
2908
2911
2909 if merge and cctx.deleted():
2912 if merge and cctx.deleted():
2910 raise error.Abort(_(b"cannot commit merge with missing files"))
2913 raise error.Abort(_(b"cannot commit merge with missing files"))
2911
2914
2912 ms = mergemod.mergestate.read(self)
2915 ms = mergemod.mergestate.read(self)
2913 mergeutil.checkunresolved(ms)
2916 mergeutil.checkunresolved(ms)
2914
2917
2915 if editor:
2918 if editor:
2916 cctx._text = editor(self, cctx, subs)
2919 cctx._text = editor(self, cctx, subs)
2917 edited = text != cctx._text
2920 edited = text != cctx._text
2918
2921
2919 # Save commit message in case this transaction gets rolled back
2922 # Save commit message in case this transaction gets rolled back
2920 # (e.g. by a pretxncommit hook). Leave the content alone on
2923 # (e.g. by a pretxncommit hook). Leave the content alone on
2921 # the assumption that the user will use the same editor again.
2924 # the assumption that the user will use the same editor again.
2922 msgfn = self.savecommitmessage(cctx._text)
2925 msgfn = self.savecommitmessage(cctx._text)
2923
2926
2924 # commit subs and write new state
2927 # commit subs and write new state
2925 if subs:
2928 if subs:
2926 uipathfn = scmutil.getuipathfn(self)
2929 uipathfn = scmutil.getuipathfn(self)
2927 for s in sorted(commitsubs):
2930 for s in sorted(commitsubs):
2928 sub = wctx.sub(s)
2931 sub = wctx.sub(s)
2929 self.ui.status(
2932 self.ui.status(
2930 _(b'committing subrepository %s\n')
2933 _(b'committing subrepository %s\n')
2931 % uipathfn(subrepoutil.subrelpath(sub))
2934 % uipathfn(subrepoutil.subrelpath(sub))
2932 )
2935 )
2933 sr = sub.commit(cctx._text, user, date)
2936 sr = sub.commit(cctx._text, user, date)
2934 newstate[s] = (newstate[s][0], sr)
2937 newstate[s] = (newstate[s][0], sr)
2935 subrepoutil.writestate(self, newstate)
2938 subrepoutil.writestate(self, newstate)
2936
2939
2937 p1, p2 = self.dirstate.parents()
2940 p1, p2 = self.dirstate.parents()
2938 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2941 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2939 try:
2942 try:
2940 self.hook(
2943 self.hook(
2941 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2944 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2942 )
2945 )
2943 with self.transaction(b'commit'):
2946 with self.transaction(b'commit'):
2944 ret = self.commitctx(cctx, True)
2947 ret = self.commitctx(cctx, True)
2945 # update bookmarks, dirstate and mergestate
2948 # update bookmarks, dirstate and mergestate
2946 bookmarks.update(self, [p1, p2], ret)
2949 bookmarks.update(self, [p1, p2], ret)
2947 cctx.markcommitted(ret)
2950 cctx.markcommitted(ret)
2948 ms.reset()
2951 ms.reset()
2949 except: # re-raises
2952 except: # re-raises
2950 if edited:
2953 if edited:
2951 self.ui.write(
2954 self.ui.write(
2952 _(b'note: commit message saved in %s\n') % msgfn
2955 _(b'note: commit message saved in %s\n') % msgfn
2953 )
2956 )
2954 raise
2957 raise
2955
2958
2956 def commithook(unused_success):
2959 def commithook(unused_success):
2957 # hack for command that use a temporary commit (eg: histedit)
2960 # hack for command that use a temporary commit (eg: histedit)
2958 # temporary commit got stripped before hook release
2961 # temporary commit got stripped before hook release
2959 if self.changelog.hasnode(ret):
2962 if self.changelog.hasnode(ret):
2960 self.hook(
2963 self.hook(
2961 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2964 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2962 )
2965 )
2963
2966
2964 self._afterlock(commithook)
2967 self._afterlock(commithook)
2965 return ret
2968 return ret
2966
2969
2967 @unfilteredmethod
2970 @unfilteredmethod
2968 def commitctx(self, ctx, error=False, origctx=None):
2971 def commitctx(self, ctx, error=False, origctx=None):
2969 """Add a new revision to current repository.
2972 """Add a new revision to current repository.
2970 Revision information is passed via the context argument.
2973 Revision information is passed via the context argument.
2971
2974
2972 ctx.files() should list all files involved in this commit, i.e.
2975 ctx.files() should list all files involved in this commit, i.e.
2973 modified/added/removed files. On merge, it may be wider than the
2976 modified/added/removed files. On merge, it may be wider than the
2974 ctx.files() to be committed, since any file nodes derived directly
2977 ctx.files() to be committed, since any file nodes derived directly
2975 from p1 or p2 are excluded from the committed ctx.files().
2978 from p1 or p2 are excluded from the committed ctx.files().
2976
2979
2977 origctx is for convert to work around the problem that bug
2980 origctx is for convert to work around the problem that bug
2978 fixes to the files list in changesets change hashes. For
2981 fixes to the files list in changesets change hashes. For
2979 convert to be the identity, it can pass an origctx and this
2982 convert to be the identity, it can pass an origctx and this
2980 function will use the same files list when it makes sense to
2983 function will use the same files list when it makes sense to
2981 do so.
2984 do so.
2982 """
2985 """
2983
2986
2984 p1, p2 = ctx.p1(), ctx.p2()
2987 p1, p2 = ctx.p1(), ctx.p2()
2985 user = ctx.user()
2988 user = ctx.user()
2986
2989
2987 if self.filecopiesmode == b'changeset-sidedata':
2990 if self.filecopiesmode == b'changeset-sidedata':
2988 writechangesetcopy = True
2991 writechangesetcopy = True
2989 writefilecopymeta = True
2992 writefilecopymeta = True
2990 writecopiesto = None
2993 writecopiesto = None
2991 else:
2994 else:
2992 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2995 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2993 writefilecopymeta = writecopiesto != b'changeset-only'
2996 writefilecopymeta = writecopiesto != b'changeset-only'
2994 writechangesetcopy = writecopiesto in (
2997 writechangesetcopy = writecopiesto in (
2995 b'changeset-only',
2998 b'changeset-only',
2996 b'compatibility',
2999 b'compatibility',
2997 )
3000 )
2998 p1copies, p2copies = None, None
3001 p1copies, p2copies = None, None
2999 if writechangesetcopy:
3002 if writechangesetcopy:
3000 p1copies = ctx.p1copies()
3003 p1copies = ctx.p1copies()
3001 p2copies = ctx.p2copies()
3004 p2copies = ctx.p2copies()
3002 filesadded, filesremoved = None, None
3005 filesadded, filesremoved = None, None
3003 with self.lock(), self.transaction(b"commit") as tr:
3006 with self.lock(), self.transaction(b"commit") as tr:
3004 trp = weakref.proxy(tr)
3007 trp = weakref.proxy(tr)
3005
3008
3006 if ctx.manifestnode():
3009 if ctx.manifestnode():
3007 # reuse an existing manifest revision
3010 # reuse an existing manifest revision
3008 self.ui.debug(b'reusing known manifest\n')
3011 self.ui.debug(b'reusing known manifest\n')
3009 mn = ctx.manifestnode()
3012 mn = ctx.manifestnode()
3010 files = ctx.files()
3013 files = ctx.files()
3011 if writechangesetcopy:
3014 if writechangesetcopy:
3012 filesadded = ctx.filesadded()
3015 filesadded = ctx.filesadded()
3013 filesremoved = ctx.filesremoved()
3016 filesremoved = ctx.filesremoved()
3014 elif ctx.files():
3017 elif ctx.files():
3015 m1ctx = p1.manifestctx()
3018 m1ctx = p1.manifestctx()
3016 m2ctx = p2.manifestctx()
3019 m2ctx = p2.manifestctx()
3017 mctx = m1ctx.copy()
3020 mctx = m1ctx.copy()
3018
3021
3019 m = mctx.read()
3022 m = mctx.read()
3020 m1 = m1ctx.read()
3023 m1 = m1ctx.read()
3021 m2 = m2ctx.read()
3024 m2 = m2ctx.read()
3022
3025
3023 # check in files
3026 # check in files
3024 added = []
3027 added = []
3025 changed = []
3028 changed = []
3026 removed = list(ctx.removed())
3029 removed = list(ctx.removed())
3027 linkrev = len(self)
3030 linkrev = len(self)
3028 self.ui.note(_(b"committing files:\n"))
3031 self.ui.note(_(b"committing files:\n"))
3029 uipathfn = scmutil.getuipathfn(self)
3032 uipathfn = scmutil.getuipathfn(self)
3030 for f in sorted(ctx.modified() + ctx.added()):
3033 for f in sorted(ctx.modified() + ctx.added()):
3031 self.ui.note(uipathfn(f) + b"\n")
3034 self.ui.note(uipathfn(f) + b"\n")
3032 try:
3035 try:
3033 fctx = ctx[f]
3036 fctx = ctx[f]
3034 if fctx is None:
3037 if fctx is None:
3035 removed.append(f)
3038 removed.append(f)
3036 else:
3039 else:
3037 added.append(f)
3040 added.append(f)
3038 m[f] = self._filecommit(
3041 m[f] = self._filecommit(
3039 fctx,
3042 fctx,
3040 m1,
3043 m1,
3041 m2,
3044 m2,
3042 linkrev,
3045 linkrev,
3043 trp,
3046 trp,
3044 changed,
3047 changed,
3045 writefilecopymeta,
3048 writefilecopymeta,
3046 )
3049 )
3047 m.setflag(f, fctx.flags())
3050 m.setflag(f, fctx.flags())
3048 except OSError:
3051 except OSError:
3049 self.ui.warn(
3052 self.ui.warn(
3050 _(b"trouble committing %s!\n") % uipathfn(f)
3053 _(b"trouble committing %s!\n") % uipathfn(f)
3051 )
3054 )
3052 raise
3055 raise
3053 except IOError as inst:
3056 except IOError as inst:
3054 errcode = getattr(inst, 'errno', errno.ENOENT)
3057 errcode = getattr(inst, 'errno', errno.ENOENT)
3055 if error or errcode and errcode != errno.ENOENT:
3058 if error or errcode and errcode != errno.ENOENT:
3056 self.ui.warn(
3059 self.ui.warn(
3057 _(b"trouble committing %s!\n") % uipathfn(f)
3060 _(b"trouble committing %s!\n") % uipathfn(f)
3058 )
3061 )
3059 raise
3062 raise
3060
3063
3061 # update manifest
3064 # update manifest
3062 removed = [f for f in removed if f in m1 or f in m2]
3065 removed = [f for f in removed if f in m1 or f in m2]
3063 drop = sorted([f for f in removed if f in m])
3066 drop = sorted([f for f in removed if f in m])
3064 for f in drop:
3067 for f in drop:
3065 del m[f]
3068 del m[f]
3066 if p2.rev() != nullrev:
3069 if p2.rev() != nullrev:
3067
3070
3068 @util.cachefunc
3071 @util.cachefunc
3069 def mas():
3072 def mas():
3070 p1n = p1.node()
3073 p1n = p1.node()
3071 p2n = p2.node()
3074 p2n = p2.node()
3072 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3075 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3073 if not cahs:
3076 if not cahs:
3074 cahs = [nullrev]
3077 cahs = [nullrev]
3075 return [self[r].manifest() for r in cahs]
3078 return [self[r].manifest() for r in cahs]
3076
3079
3077 def deletionfromparent(f):
3080 def deletionfromparent(f):
3078 # When a file is removed relative to p1 in a merge, this
3081 # When a file is removed relative to p1 in a merge, this
3079 # function determines whether the absence is due to a
3082 # function determines whether the absence is due to a
3080 # deletion from a parent, or whether the merge commit
3083 # deletion from a parent, or whether the merge commit
3081 # itself deletes the file. We decide this by doing a
3084 # itself deletes the file. We decide this by doing a
3082 # simplified three way merge of the manifest entry for
3085 # simplified three way merge of the manifest entry for
3083 # the file. There are two ways we decide the merge
3086 # the file. There are two ways we decide the merge
3084 # itself didn't delete a file:
3087 # itself didn't delete a file:
3085 # - neither parent (nor the merge) contain the file
3088 # - neither parent (nor the merge) contain the file
3086 # - exactly one parent contains the file, and that
3089 # - exactly one parent contains the file, and that
3087 # parent has the same filelog entry as the merge
3090 # parent has the same filelog entry as the merge
3088 # ancestor (or all of them if there two). In other
3091 # ancestor (or all of them if there two). In other
3089 # words, that parent left the file unchanged while the
3092 # words, that parent left the file unchanged while the
3090 # other one deleted it.
3093 # other one deleted it.
3091 # One way to think about this is that deleting a file is
3094 # One way to think about this is that deleting a file is
3092 # similar to emptying it, so the list of changed files
3095 # similar to emptying it, so the list of changed files
3093 # should be similar either way. The computation
3096 # should be similar either way. The computation
3094 # described above is not done directly in _filecommit
3097 # described above is not done directly in _filecommit
3095 # when creating the list of changed files, however
3098 # when creating the list of changed files, however
3096 # it does something very similar by comparing filelog
3099 # it does something very similar by comparing filelog
3097 # nodes.
3100 # nodes.
3098 if f in m1:
3101 if f in m1:
3099 return f not in m2 and all(
3102 return f not in m2 and all(
3100 f in ma and ma.find(f) == m1.find(f)
3103 f in ma and ma.find(f) == m1.find(f)
3101 for ma in mas()
3104 for ma in mas()
3102 )
3105 )
3103 elif f in m2:
3106 elif f in m2:
3104 return all(
3107 return all(
3105 f in ma and ma.find(f) == m2.find(f)
3108 f in ma and ma.find(f) == m2.find(f)
3106 for ma in mas()
3109 for ma in mas()
3107 )
3110 )
3108 else:
3111 else:
3109 return True
3112 return True
3110
3113
3111 removed = [f for f in removed if not deletionfromparent(f)]
3114 removed = [f for f in removed if not deletionfromparent(f)]
3112
3115
3113 files = changed + removed
3116 files = changed + removed
3114 md = None
3117 md = None
3115 if not files:
3118 if not files:
3116 # if no "files" actually changed in terms of the changelog,
3119 # if no "files" actually changed in terms of the changelog,
3117 # try hard to detect unmodified manifest entry so that the
3120 # try hard to detect unmodified manifest entry so that the
3118 # exact same commit can be reproduced later on convert.
3121 # exact same commit can be reproduced later on convert.
3119 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3122 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3120 if not files and md:
3123 if not files and md:
3121 self.ui.debug(
3124 self.ui.debug(
3122 b'not reusing manifest (no file change in '
3125 b'not reusing manifest (no file change in '
3123 b'changelog, but manifest differs)\n'
3126 b'changelog, but manifest differs)\n'
3124 )
3127 )
3125 if files or md:
3128 if files or md:
3126 self.ui.note(_(b"committing manifest\n"))
3129 self.ui.note(_(b"committing manifest\n"))
3127 # we're using narrowmatch here since it's already applied at
3130 # we're using narrowmatch here since it's already applied at
3128 # other stages (such as dirstate.walk), so we're already
3131 # other stages (such as dirstate.walk), so we're already
3129 # ignoring things outside of narrowspec in most cases. The
3132 # ignoring things outside of narrowspec in most cases. The
3130 # one case where we might have files outside the narrowspec
3133 # one case where we might have files outside the narrowspec
3131 # at this point is merges, and we already error out in the
3134 # at this point is merges, and we already error out in the
3132 # case where the merge has files outside of the narrowspec,
3135 # case where the merge has files outside of the narrowspec,
3133 # so this is safe.
3136 # so this is safe.
3134 mn = mctx.write(
3137 mn = mctx.write(
3135 trp,
3138 trp,
3136 linkrev,
3139 linkrev,
3137 p1.manifestnode(),
3140 p1.manifestnode(),
3138 p2.manifestnode(),
3141 p2.manifestnode(),
3139 added,
3142 added,
3140 drop,
3143 drop,
3141 match=self.narrowmatch(),
3144 match=self.narrowmatch(),
3142 )
3145 )
3143
3146
3144 if writechangesetcopy:
3147 if writechangesetcopy:
3145 filesadded = [
3148 filesadded = [
3146 f for f in changed if not (f in m1 or f in m2)
3149 f for f in changed if not (f in m1 or f in m2)
3147 ]
3150 ]
3148 filesremoved = removed
3151 filesremoved = removed
3149 else:
3152 else:
3150 self.ui.debug(
3153 self.ui.debug(
3151 b'reusing manifest from p1 (listed files '
3154 b'reusing manifest from p1 (listed files '
3152 b'actually unchanged)\n'
3155 b'actually unchanged)\n'
3153 )
3156 )
3154 mn = p1.manifestnode()
3157 mn = p1.manifestnode()
3155 else:
3158 else:
3156 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3159 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3157 mn = p1.manifestnode()
3160 mn = p1.manifestnode()
3158 files = []
3161 files = []
3159
3162
3160 if writecopiesto == b'changeset-only':
3163 if writecopiesto == b'changeset-only':
3161 # If writing only to changeset extras, use None to indicate that
3164 # If writing only to changeset extras, use None to indicate that
3162 # no entry should be written. If writing to both, write an empty
3165 # no entry should be written. If writing to both, write an empty
3163 # entry to prevent the reader from falling back to reading
3166 # entry to prevent the reader from falling back to reading
3164 # filelogs.
3167 # filelogs.
3165 p1copies = p1copies or None
3168 p1copies = p1copies or None
3166 p2copies = p2copies or None
3169 p2copies = p2copies or None
3167 filesadded = filesadded or None
3170 filesadded = filesadded or None
3168 filesremoved = filesremoved or None
3171 filesremoved = filesremoved or None
3169
3172
3170 if origctx and origctx.manifestnode() == mn:
3173 if origctx and origctx.manifestnode() == mn:
3171 files = origctx.files()
3174 files = origctx.files()
3172
3175
3173 # update changelog
3176 # update changelog
3174 self.ui.note(_(b"committing changelog\n"))
3177 self.ui.note(_(b"committing changelog\n"))
3175 self.changelog.delayupdate(tr)
3178 self.changelog.delayupdate(tr)
3176 n = self.changelog.add(
3179 n = self.changelog.add(
3177 mn,
3180 mn,
3178 files,
3181 files,
3179 ctx.description(),
3182 ctx.description(),
3180 trp,
3183 trp,
3181 p1.node(),
3184 p1.node(),
3182 p2.node(),
3185 p2.node(),
3183 user,
3186 user,
3184 ctx.date(),
3187 ctx.date(),
3185 ctx.extra().copy(),
3188 ctx.extra().copy(),
3186 p1copies,
3189 p1copies,
3187 p2copies,
3190 p2copies,
3188 filesadded,
3191 filesadded,
3189 filesremoved,
3192 filesremoved,
3190 )
3193 )
3191 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3194 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3192 self.hook(
3195 self.hook(
3193 b'pretxncommit',
3196 b'pretxncommit',
3194 throw=True,
3197 throw=True,
3195 node=hex(n),
3198 node=hex(n),
3196 parent1=xp1,
3199 parent1=xp1,
3197 parent2=xp2,
3200 parent2=xp2,
3198 )
3201 )
3199 # set the new commit is proper phase
3202 # set the new commit is proper phase
3200 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3203 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3201 if targetphase:
3204 if targetphase:
3202 # retract boundary do not alter parent changeset.
3205 # retract boundary do not alter parent changeset.
3203 # if a parent have higher the resulting phase will
3206 # if a parent have higher the resulting phase will
3204 # be compliant anyway
3207 # be compliant anyway
3205 #
3208 #
3206 # if minimal phase was 0 we don't need to retract anything
3209 # if minimal phase was 0 we don't need to retract anything
3207 phases.registernew(self, tr, targetphase, [n])
3210 phases.registernew(self, tr, targetphase, [n])
3208 return n
3211 return n
3209
3212
3210 @unfilteredmethod
3213 @unfilteredmethod
3211 def destroying(self):
3214 def destroying(self):
3212 '''Inform the repository that nodes are about to be destroyed.
3215 '''Inform the repository that nodes are about to be destroyed.
3213 Intended for use by strip and rollback, so there's a common
3216 Intended for use by strip and rollback, so there's a common
3214 place for anything that has to be done before destroying history.
3217 place for anything that has to be done before destroying history.
3215
3218
3216 This is mostly useful for saving state that is in memory and waiting
3219 This is mostly useful for saving state that is in memory and waiting
3217 to be flushed when the current lock is released. Because a call to
3220 to be flushed when the current lock is released. Because a call to
3218 destroyed is imminent, the repo will be invalidated causing those
3221 destroyed is imminent, the repo will be invalidated causing those
3219 changes to stay in memory (waiting for the next unlock), or vanish
3222 changes to stay in memory (waiting for the next unlock), or vanish
3220 completely.
3223 completely.
3221 '''
3224 '''
3222 # When using the same lock to commit and strip, the phasecache is left
3225 # When using the same lock to commit and strip, the phasecache is left
3223 # dirty after committing. Then when we strip, the repo is invalidated,
3226 # dirty after committing. Then when we strip, the repo is invalidated,
3224 # causing those changes to disappear.
3227 # causing those changes to disappear.
3225 if '_phasecache' in vars(self):
3228 if '_phasecache' in vars(self):
3226 self._phasecache.write()
3229 self._phasecache.write()
3227
3230
3228 @unfilteredmethod
3231 @unfilteredmethod
3229 def destroyed(self):
3232 def destroyed(self):
3230 '''Inform the repository that nodes have been destroyed.
3233 '''Inform the repository that nodes have been destroyed.
3231 Intended for use by strip and rollback, so there's a common
3234 Intended for use by strip and rollback, so there's a common
3232 place for anything that has to be done after destroying history.
3235 place for anything that has to be done after destroying history.
3233 '''
3236 '''
3234 # When one tries to:
3237 # When one tries to:
3235 # 1) destroy nodes thus calling this method (e.g. strip)
3238 # 1) destroy nodes thus calling this method (e.g. strip)
3236 # 2) use phasecache somewhere (e.g. commit)
3239 # 2) use phasecache somewhere (e.g. commit)
3237 #
3240 #
3238 # then 2) will fail because the phasecache contains nodes that were
3241 # then 2) will fail because the phasecache contains nodes that were
3239 # removed. We can either remove phasecache from the filecache,
3242 # removed. We can either remove phasecache from the filecache,
3240 # causing it to reload next time it is accessed, or simply filter
3243 # causing it to reload next time it is accessed, or simply filter
3241 # the removed nodes now and write the updated cache.
3244 # the removed nodes now and write the updated cache.
3242 self._phasecache.filterunknown(self)
3245 self._phasecache.filterunknown(self)
3243 self._phasecache.write()
3246 self._phasecache.write()
3244
3247
3245 # refresh all repository caches
3248 # refresh all repository caches
3246 self.updatecaches()
3249 self.updatecaches()
3247
3250
3248 # Ensure the persistent tag cache is updated. Doing it now
3251 # Ensure the persistent tag cache is updated. Doing it now
3249 # means that the tag cache only has to worry about destroyed
3252 # means that the tag cache only has to worry about destroyed
3250 # heads immediately after a strip/rollback. That in turn
3253 # heads immediately after a strip/rollback. That in turn
3251 # guarantees that "cachetip == currenttip" (comparing both rev
3254 # guarantees that "cachetip == currenttip" (comparing both rev
3252 # and node) always means no nodes have been added or destroyed.
3255 # and node) always means no nodes have been added or destroyed.
3253
3256
3254 # XXX this is suboptimal when qrefresh'ing: we strip the current
3257 # XXX this is suboptimal when qrefresh'ing: we strip the current
3255 # head, refresh the tag cache, then immediately add a new head.
3258 # head, refresh the tag cache, then immediately add a new head.
3256 # But I think doing it this way is necessary for the "instant
3259 # But I think doing it this way is necessary for the "instant
3257 # tag cache retrieval" case to work.
3260 # tag cache retrieval" case to work.
3258 self.invalidate()
3261 self.invalidate()
3259
3262
3260 def status(
3263 def status(
3261 self,
3264 self,
3262 node1=b'.',
3265 node1=b'.',
3263 node2=None,
3266 node2=None,
3264 match=None,
3267 match=None,
3265 ignored=False,
3268 ignored=False,
3266 clean=False,
3269 clean=False,
3267 unknown=False,
3270 unknown=False,
3268 listsubrepos=False,
3271 listsubrepos=False,
3269 ):
3272 ):
3270 '''a convenience method that calls node1.status(node2)'''
3273 '''a convenience method that calls node1.status(node2)'''
3271 return self[node1].status(
3274 return self[node1].status(
3272 node2, match, ignored, clean, unknown, listsubrepos
3275 node2, match, ignored, clean, unknown, listsubrepos
3273 )
3276 )
3274
3277
3275 def addpostdsstatus(self, ps):
3278 def addpostdsstatus(self, ps):
3276 """Add a callback to run within the wlock, at the point at which status
3279 """Add a callback to run within the wlock, at the point at which status
3277 fixups happen.
3280 fixups happen.
3278
3281
3279 On status completion, callback(wctx, status) will be called with the
3282 On status completion, callback(wctx, status) will be called with the
3280 wlock held, unless the dirstate has changed from underneath or the wlock
3283 wlock held, unless the dirstate has changed from underneath or the wlock
3281 couldn't be grabbed.
3284 couldn't be grabbed.
3282
3285
3283 Callbacks should not capture and use a cached copy of the dirstate --
3286 Callbacks should not capture and use a cached copy of the dirstate --
3284 it might change in the meanwhile. Instead, they should access the
3287 it might change in the meanwhile. Instead, they should access the
3285 dirstate via wctx.repo().dirstate.
3288 dirstate via wctx.repo().dirstate.
3286
3289
3287 This list is emptied out after each status run -- extensions should
3290 This list is emptied out after each status run -- extensions should
3288 make sure it adds to this list each time dirstate.status is called.
3291 make sure it adds to this list each time dirstate.status is called.
3289 Extensions should also make sure they don't call this for statuses
3292 Extensions should also make sure they don't call this for statuses
3290 that don't involve the dirstate.
3293 that don't involve the dirstate.
3291 """
3294 """
3292
3295
3293 # The list is located here for uniqueness reasons -- it is actually
3296 # The list is located here for uniqueness reasons -- it is actually
3294 # managed by the workingctx, but that isn't unique per-repo.
3297 # managed by the workingctx, but that isn't unique per-repo.
3295 self._postdsstatus.append(ps)
3298 self._postdsstatus.append(ps)
3296
3299
3297 def postdsstatus(self):
3300 def postdsstatus(self):
3298 """Used by workingctx to get the list of post-dirstate-status hooks."""
3301 """Used by workingctx to get the list of post-dirstate-status hooks."""
3299 return self._postdsstatus
3302 return self._postdsstatus
3300
3303
3301 def clearpostdsstatus(self):
3304 def clearpostdsstatus(self):
3302 """Used by workingctx to clear post-dirstate-status hooks."""
3305 """Used by workingctx to clear post-dirstate-status hooks."""
3303 del self._postdsstatus[:]
3306 del self._postdsstatus[:]
3304
3307
3305 def heads(self, start=None):
3308 def heads(self, start=None):
3306 if start is None:
3309 if start is None:
3307 cl = self.changelog
3310 cl = self.changelog
3308 headrevs = reversed(cl.headrevs())
3311 headrevs = reversed(cl.headrevs())
3309 return [cl.node(rev) for rev in headrevs]
3312 return [cl.node(rev) for rev in headrevs]
3310
3313
3311 heads = self.changelog.heads(start)
3314 heads = self.changelog.heads(start)
3312 # sort the output in rev descending order
3315 # sort the output in rev descending order
3313 return sorted(heads, key=self.changelog.rev, reverse=True)
3316 return sorted(heads, key=self.changelog.rev, reverse=True)
3314
3317
3315 def branchheads(self, branch=None, start=None, closed=False):
3318 def branchheads(self, branch=None, start=None, closed=False):
3316 '''return a (possibly filtered) list of heads for the given branch
3319 '''return a (possibly filtered) list of heads for the given branch
3317
3320
3318 Heads are returned in topological order, from newest to oldest.
3321 Heads are returned in topological order, from newest to oldest.
3319 If branch is None, use the dirstate branch.
3322 If branch is None, use the dirstate branch.
3320 If start is not None, return only heads reachable from start.
3323 If start is not None, return only heads reachable from start.
3321 If closed is True, return heads that are marked as closed as well.
3324 If closed is True, return heads that are marked as closed as well.
3322 '''
3325 '''
3323 if branch is None:
3326 if branch is None:
3324 branch = self[None].branch()
3327 branch = self[None].branch()
3325 branches = self.branchmap()
3328 branches = self.branchmap()
3326 if not branches.hasbranch(branch):
3329 if not branches.hasbranch(branch):
3327 return []
3330 return []
3328 # the cache returns heads ordered lowest to highest
3331 # the cache returns heads ordered lowest to highest
3329 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3332 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3330 if start is not None:
3333 if start is not None:
3331 # filter out the heads that cannot be reached from startrev
3334 # filter out the heads that cannot be reached from startrev
3332 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3335 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3333 bheads = [h for h in bheads if h in fbheads]
3336 bheads = [h for h in bheads if h in fbheads]
3334 return bheads
3337 return bheads
3335
3338
3336 def branches(self, nodes):
3339 def branches(self, nodes):
3337 if not nodes:
3340 if not nodes:
3338 nodes = [self.changelog.tip()]
3341 nodes = [self.changelog.tip()]
3339 b = []
3342 b = []
3340 for n in nodes:
3343 for n in nodes:
3341 t = n
3344 t = n
3342 while True:
3345 while True:
3343 p = self.changelog.parents(n)
3346 p = self.changelog.parents(n)
3344 if p[1] != nullid or p[0] == nullid:
3347 if p[1] != nullid or p[0] == nullid:
3345 b.append((t, n, p[0], p[1]))
3348 b.append((t, n, p[0], p[1]))
3346 break
3349 break
3347 n = p[0]
3350 n = p[0]
3348 return b
3351 return b
3349
3352
3350 def between(self, pairs):
3353 def between(self, pairs):
3351 r = []
3354 r = []
3352
3355
3353 for top, bottom in pairs:
3356 for top, bottom in pairs:
3354 n, l, i = top, [], 0
3357 n, l, i = top, [], 0
3355 f = 1
3358 f = 1
3356
3359
3357 while n != bottom and n != nullid:
3360 while n != bottom and n != nullid:
3358 p = self.changelog.parents(n)[0]
3361 p = self.changelog.parents(n)[0]
3359 if i == f:
3362 if i == f:
3360 l.append(n)
3363 l.append(n)
3361 f = f * 2
3364 f = f * 2
3362 n = p
3365 n = p
3363 i += 1
3366 i += 1
3364
3367
3365 r.append(l)
3368 r.append(l)
3366
3369
3367 return r
3370 return r
3368
3371
3369 def checkpush(self, pushop):
3372 def checkpush(self, pushop):
3370 """Extensions can override this function if additional checks have
3373 """Extensions can override this function if additional checks have
3371 to be performed before pushing, or call it if they override push
3374 to be performed before pushing, or call it if they override push
3372 command.
3375 command.
3373 """
3376 """
3374
3377
3375 @unfilteredpropertycache
3378 @unfilteredpropertycache
3376 def prepushoutgoinghooks(self):
3379 def prepushoutgoinghooks(self):
3377 """Return util.hooks consists of a pushop with repo, remote, outgoing
3380 """Return util.hooks consists of a pushop with repo, remote, outgoing
3378 methods, which are called before pushing changesets.
3381 methods, which are called before pushing changesets.
3379 """
3382 """
3380 return util.hooks()
3383 return util.hooks()
3381
3384
3382 def pushkey(self, namespace, key, old, new):
3385 def pushkey(self, namespace, key, old, new):
3383 try:
3386 try:
3384 tr = self.currenttransaction()
3387 tr = self.currenttransaction()
3385 hookargs = {}
3388 hookargs = {}
3386 if tr is not None:
3389 if tr is not None:
3387 hookargs.update(tr.hookargs)
3390 hookargs.update(tr.hookargs)
3388 hookargs = pycompat.strkwargs(hookargs)
3391 hookargs = pycompat.strkwargs(hookargs)
3389 hookargs['namespace'] = namespace
3392 hookargs['namespace'] = namespace
3390 hookargs['key'] = key
3393 hookargs['key'] = key
3391 hookargs['old'] = old
3394 hookargs['old'] = old
3392 hookargs['new'] = new
3395 hookargs['new'] = new
3393 self.hook(b'prepushkey', throw=True, **hookargs)
3396 self.hook(b'prepushkey', throw=True, **hookargs)
3394 except error.HookAbort as exc:
3397 except error.HookAbort as exc:
3395 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3398 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3396 if exc.hint:
3399 if exc.hint:
3397 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3400 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3398 return False
3401 return False
3399 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3402 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3400 ret = pushkey.push(self, namespace, key, old, new)
3403 ret = pushkey.push(self, namespace, key, old, new)
3401
3404
3402 def runhook(unused_success):
3405 def runhook(unused_success):
3403 self.hook(
3406 self.hook(
3404 b'pushkey',
3407 b'pushkey',
3405 namespace=namespace,
3408 namespace=namespace,
3406 key=key,
3409 key=key,
3407 old=old,
3410 old=old,
3408 new=new,
3411 new=new,
3409 ret=ret,
3412 ret=ret,
3410 )
3413 )
3411
3414
3412 self._afterlock(runhook)
3415 self._afterlock(runhook)
3413 return ret
3416 return ret
3414
3417
3415 def listkeys(self, namespace):
3418 def listkeys(self, namespace):
3416 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3419 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3417 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3420 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3418 values = pushkey.list(self, namespace)
3421 values = pushkey.list(self, namespace)
3419 self.hook(b'listkeys', namespace=namespace, values=values)
3422 self.hook(b'listkeys', namespace=namespace, values=values)
3420 return values
3423 return values
3421
3424
3422 def debugwireargs(self, one, two, three=None, four=None, five=None):
3425 def debugwireargs(self, one, two, three=None, four=None, five=None):
3423 '''used to test argument passing over the wire'''
3426 '''used to test argument passing over the wire'''
3424 return b"%s %s %s %s %s" % (
3427 return b"%s %s %s %s %s" % (
3425 one,
3428 one,
3426 two,
3429 two,
3427 pycompat.bytestr(three),
3430 pycompat.bytestr(three),
3428 pycompat.bytestr(four),
3431 pycompat.bytestr(four),
3429 pycompat.bytestr(five),
3432 pycompat.bytestr(five),
3430 )
3433 )
3431
3434
3432 def savecommitmessage(self, text):
3435 def savecommitmessage(self, text):
3433 fp = self.vfs(b'last-message.txt', b'wb')
3436 fp = self.vfs(b'last-message.txt', b'wb')
3434 try:
3437 try:
3435 fp.write(text)
3438 fp.write(text)
3436 finally:
3439 finally:
3437 fp.close()
3440 fp.close()
3438 return self.pathto(fp.name[len(self.root) + 1 :])
3441 return self.pathto(fp.name[len(self.root) + 1 :])
3439
3442
3440
3443
3441 # used to avoid circular references so destructors work
3444 # used to avoid circular references so destructors work
3442 def aftertrans(files):
3445 def aftertrans(files):
3443 renamefiles = [tuple(t) for t in files]
3446 renamefiles = [tuple(t) for t in files]
3444
3447
3445 def a():
3448 def a():
3446 for vfs, src, dest in renamefiles:
3449 for vfs, src, dest in renamefiles:
3447 # if src and dest refer to a same file, vfs.rename is a no-op,
3450 # if src and dest refer to a same file, vfs.rename is a no-op,
3448 # leaving both src and dest on disk. delete dest to make sure
3451 # leaving both src and dest on disk. delete dest to make sure
3449 # the rename couldn't be such a no-op.
3452 # the rename couldn't be such a no-op.
3450 vfs.tryunlink(dest)
3453 vfs.tryunlink(dest)
3451 try:
3454 try:
3452 vfs.rename(src, dest)
3455 vfs.rename(src, dest)
3453 except OSError: # journal file does not yet exist
3456 except OSError: # journal file does not yet exist
3454 pass
3457 pass
3455
3458
3456 return a
3459 return a
3457
3460
3458
3461
3459 def undoname(fn):
3462 def undoname(fn):
3460 base, name = os.path.split(fn)
3463 base, name = os.path.split(fn)
3461 assert name.startswith(b'journal')
3464 assert name.startswith(b'journal')
3462 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3465 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3463
3466
3464
3467
3465 def instance(ui, path, create, intents=None, createopts=None):
3468 def instance(ui, path, create, intents=None, createopts=None):
3466 localpath = util.urllocalpath(path)
3469 localpath = util.urllocalpath(path)
3467 if create:
3470 if create:
3468 createrepository(ui, localpath, createopts=createopts)
3471 createrepository(ui, localpath, createopts=createopts)
3469
3472
3470 return makelocalrepository(ui, localpath, intents=intents)
3473 return makelocalrepository(ui, localpath, intents=intents)
3471
3474
3472
3475
3473 def islocal(path):
3476 def islocal(path):
3474 return True
3477 return True
3475
3478
3476
3479
3477 def defaultcreateopts(ui, createopts=None):
3480 def defaultcreateopts(ui, createopts=None):
3478 """Populate the default creation options for a repository.
3481 """Populate the default creation options for a repository.
3479
3482
3480 A dictionary of explicitly requested creation options can be passed
3483 A dictionary of explicitly requested creation options can be passed
3481 in. Missing keys will be populated.
3484 in. Missing keys will be populated.
3482 """
3485 """
3483 createopts = dict(createopts or {})
3486 createopts = dict(createopts or {})
3484
3487
3485 if b'backend' not in createopts:
3488 if b'backend' not in createopts:
3486 # experimental config: storage.new-repo-backend
3489 # experimental config: storage.new-repo-backend
3487 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3490 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3488
3491
3489 return createopts
3492 return createopts
3490
3493
3491
3494
3492 def newreporequirements(ui, createopts):
3495 def newreporequirements(ui, createopts):
3493 """Determine the set of requirements for a new local repository.
3496 """Determine the set of requirements for a new local repository.
3494
3497
3495 Extensions can wrap this function to specify custom requirements for
3498 Extensions can wrap this function to specify custom requirements for
3496 new repositories.
3499 new repositories.
3497 """
3500 """
3498 # If the repo is being created from a shared repository, we copy
3501 # If the repo is being created from a shared repository, we copy
3499 # its requirements.
3502 # its requirements.
3500 if b'sharedrepo' in createopts:
3503 if b'sharedrepo' in createopts:
3501 requirements = set(createopts[b'sharedrepo'].requirements)
3504 requirements = set(createopts[b'sharedrepo'].requirements)
3502 if createopts.get(b'sharedrelative'):
3505 if createopts.get(b'sharedrelative'):
3503 requirements.add(b'relshared')
3506 requirements.add(b'relshared')
3504 else:
3507 else:
3505 requirements.add(b'shared')
3508 requirements.add(b'shared')
3506
3509
3507 return requirements
3510 return requirements
3508
3511
3509 if b'backend' not in createopts:
3512 if b'backend' not in createopts:
3510 raise error.ProgrammingError(
3513 raise error.ProgrammingError(
3511 b'backend key not present in createopts; '
3514 b'backend key not present in createopts; '
3512 b'was defaultcreateopts() called?'
3515 b'was defaultcreateopts() called?'
3513 )
3516 )
3514
3517
3515 if createopts[b'backend'] != b'revlogv1':
3518 if createopts[b'backend'] != b'revlogv1':
3516 raise error.Abort(
3519 raise error.Abort(
3517 _(
3520 _(
3518 b'unable to determine repository requirements for '
3521 b'unable to determine repository requirements for '
3519 b'storage backend: %s'
3522 b'storage backend: %s'
3520 )
3523 )
3521 % createopts[b'backend']
3524 % createopts[b'backend']
3522 )
3525 )
3523
3526
3524 requirements = {b'revlogv1'}
3527 requirements = {b'revlogv1'}
3525 if ui.configbool(b'format', b'usestore'):
3528 if ui.configbool(b'format', b'usestore'):
3526 requirements.add(b'store')
3529 requirements.add(b'store')
3527 if ui.configbool(b'format', b'usefncache'):
3530 if ui.configbool(b'format', b'usefncache'):
3528 requirements.add(b'fncache')
3531 requirements.add(b'fncache')
3529 if ui.configbool(b'format', b'dotencode'):
3532 if ui.configbool(b'format', b'dotencode'):
3530 requirements.add(b'dotencode')
3533 requirements.add(b'dotencode')
3531
3534
3532 compengine = ui.config(b'format', b'revlog-compression')
3535 compengine = ui.config(b'format', b'revlog-compression')
3533 if compengine not in util.compengines:
3536 if compengine not in util.compengines:
3534 raise error.Abort(
3537 raise error.Abort(
3535 _(
3538 _(
3536 b'compression engine %s defined by '
3539 b'compression engine %s defined by '
3537 b'format.revlog-compression not available'
3540 b'format.revlog-compression not available'
3538 )
3541 )
3539 % compengine,
3542 % compengine,
3540 hint=_(
3543 hint=_(
3541 b'run "hg debuginstall" to list available '
3544 b'run "hg debuginstall" to list available '
3542 b'compression engines'
3545 b'compression engines'
3543 ),
3546 ),
3544 )
3547 )
3545
3548
3546 # zlib is the historical default and doesn't need an explicit requirement.
3549 # zlib is the historical default and doesn't need an explicit requirement.
3547 elif compengine == b'zstd':
3550 elif compengine == b'zstd':
3548 requirements.add(b'revlog-compression-zstd')
3551 requirements.add(b'revlog-compression-zstd')
3549 elif compengine != b'zlib':
3552 elif compengine != b'zlib':
3550 requirements.add(b'exp-compression-%s' % compengine)
3553 requirements.add(b'exp-compression-%s' % compengine)
3551
3554
3552 if scmutil.gdinitconfig(ui):
3555 if scmutil.gdinitconfig(ui):
3553 requirements.add(b'generaldelta')
3556 requirements.add(b'generaldelta')
3554 if ui.configbool(b'format', b'sparse-revlog'):
3557 if ui.configbool(b'format', b'sparse-revlog'):
3555 requirements.add(SPARSEREVLOG_REQUIREMENT)
3558 requirements.add(SPARSEREVLOG_REQUIREMENT)
3556
3559
3557 # experimental config: format.exp-use-side-data
3560 # experimental config: format.exp-use-side-data
3558 if ui.configbool(b'format', b'exp-use-side-data'):
3561 if ui.configbool(b'format', b'exp-use-side-data'):
3559 requirements.add(SIDEDATA_REQUIREMENT)
3562 requirements.add(SIDEDATA_REQUIREMENT)
3560 # experimental config: format.exp-use-copies-side-data-changeset
3563 # experimental config: format.exp-use-copies-side-data-changeset
3561 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3564 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3562 requirements.add(SIDEDATA_REQUIREMENT)
3565 requirements.add(SIDEDATA_REQUIREMENT)
3563 requirements.add(COPIESSDC_REQUIREMENT)
3566 requirements.add(COPIESSDC_REQUIREMENT)
3564 if ui.configbool(b'experimental', b'treemanifest'):
3567 if ui.configbool(b'experimental', b'treemanifest'):
3565 requirements.add(b'treemanifest')
3568 requirements.add(b'treemanifest')
3566
3569
3567 revlogv2 = ui.config(b'experimental', b'revlogv2')
3570 revlogv2 = ui.config(b'experimental', b'revlogv2')
3568 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3571 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3569 requirements.remove(b'revlogv1')
3572 requirements.remove(b'revlogv1')
3570 # generaldelta is implied by revlogv2.
3573 # generaldelta is implied by revlogv2.
3571 requirements.discard(b'generaldelta')
3574 requirements.discard(b'generaldelta')
3572 requirements.add(REVLOGV2_REQUIREMENT)
3575 requirements.add(REVLOGV2_REQUIREMENT)
3573 # experimental config: format.internal-phase
3576 # experimental config: format.internal-phase
3574 if ui.configbool(b'format', b'internal-phase'):
3577 if ui.configbool(b'format', b'internal-phase'):
3575 requirements.add(b'internal-phase')
3578 requirements.add(b'internal-phase')
3576
3579
3577 if createopts.get(b'narrowfiles'):
3580 if createopts.get(b'narrowfiles'):
3578 requirements.add(repository.NARROW_REQUIREMENT)
3581 requirements.add(repository.NARROW_REQUIREMENT)
3579
3582
3580 if createopts.get(b'lfs'):
3583 if createopts.get(b'lfs'):
3581 requirements.add(b'lfs')
3584 requirements.add(b'lfs')
3582
3585
3583 if ui.configbool(b'format', b'bookmarks-in-store'):
3586 if ui.configbool(b'format', b'bookmarks-in-store'):
3584 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3587 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3585
3588
3586 return requirements
3589 return requirements
3587
3590
3588
3591
3589 def filterknowncreateopts(ui, createopts):
3592 def filterknowncreateopts(ui, createopts):
3590 """Filters a dict of repo creation options against options that are known.
3593 """Filters a dict of repo creation options against options that are known.
3591
3594
3592 Receives a dict of repo creation options and returns a dict of those
3595 Receives a dict of repo creation options and returns a dict of those
3593 options that we don't know how to handle.
3596 options that we don't know how to handle.
3594
3597
3595 This function is called as part of repository creation. If the
3598 This function is called as part of repository creation. If the
3596 returned dict contains any items, repository creation will not
3599 returned dict contains any items, repository creation will not
3597 be allowed, as it means there was a request to create a repository
3600 be allowed, as it means there was a request to create a repository
3598 with options not recognized by loaded code.
3601 with options not recognized by loaded code.
3599
3602
3600 Extensions can wrap this function to filter out creation options
3603 Extensions can wrap this function to filter out creation options
3601 they know how to handle.
3604 they know how to handle.
3602 """
3605 """
3603 known = {
3606 known = {
3604 b'backend',
3607 b'backend',
3605 b'lfs',
3608 b'lfs',
3606 b'narrowfiles',
3609 b'narrowfiles',
3607 b'sharedrepo',
3610 b'sharedrepo',
3608 b'sharedrelative',
3611 b'sharedrelative',
3609 b'shareditems',
3612 b'shareditems',
3610 b'shallowfilestore',
3613 b'shallowfilestore',
3611 }
3614 }
3612
3615
3613 return {k: v for k, v in createopts.items() if k not in known}
3616 return {k: v for k, v in createopts.items() if k not in known}
3614
3617
3615
3618
3616 def createrepository(ui, path, createopts=None):
3619 def createrepository(ui, path, createopts=None):
3617 """Create a new repository in a vfs.
3620 """Create a new repository in a vfs.
3618
3621
3619 ``path`` path to the new repo's working directory.
3622 ``path`` path to the new repo's working directory.
3620 ``createopts`` options for the new repository.
3623 ``createopts`` options for the new repository.
3621
3624
3622 The following keys for ``createopts`` are recognized:
3625 The following keys for ``createopts`` are recognized:
3623
3626
3624 backend
3627 backend
3625 The storage backend to use.
3628 The storage backend to use.
3626 lfs
3629 lfs
3627 Repository will be created with ``lfs`` requirement. The lfs extension
3630 Repository will be created with ``lfs`` requirement. The lfs extension
3628 will automatically be loaded when the repository is accessed.
3631 will automatically be loaded when the repository is accessed.
3629 narrowfiles
3632 narrowfiles
3630 Set up repository to support narrow file storage.
3633 Set up repository to support narrow file storage.
3631 sharedrepo
3634 sharedrepo
3632 Repository object from which storage should be shared.
3635 Repository object from which storage should be shared.
3633 sharedrelative
3636 sharedrelative
3634 Boolean indicating if the path to the shared repo should be
3637 Boolean indicating if the path to the shared repo should be
3635 stored as relative. By default, the pointer to the "parent" repo
3638 stored as relative. By default, the pointer to the "parent" repo
3636 is stored as an absolute path.
3639 is stored as an absolute path.
3637 shareditems
3640 shareditems
3638 Set of items to share to the new repository (in addition to storage).
3641 Set of items to share to the new repository (in addition to storage).
3639 shallowfilestore
3642 shallowfilestore
3640 Indicates that storage for files should be shallow (not all ancestor
3643 Indicates that storage for files should be shallow (not all ancestor
3641 revisions are known).
3644 revisions are known).
3642 """
3645 """
3643 createopts = defaultcreateopts(ui, createopts=createopts)
3646 createopts = defaultcreateopts(ui, createopts=createopts)
3644
3647
3645 unknownopts = filterknowncreateopts(ui, createopts)
3648 unknownopts = filterknowncreateopts(ui, createopts)
3646
3649
3647 if not isinstance(unknownopts, dict):
3650 if not isinstance(unknownopts, dict):
3648 raise error.ProgrammingError(
3651 raise error.ProgrammingError(
3649 b'filterknowncreateopts() did not return a dict'
3652 b'filterknowncreateopts() did not return a dict'
3650 )
3653 )
3651
3654
3652 if unknownopts:
3655 if unknownopts:
3653 raise error.Abort(
3656 raise error.Abort(
3654 _(
3657 _(
3655 b'unable to create repository because of unknown '
3658 b'unable to create repository because of unknown '
3656 b'creation option: %s'
3659 b'creation option: %s'
3657 )
3660 )
3658 % b', '.join(sorted(unknownopts)),
3661 % b', '.join(sorted(unknownopts)),
3659 hint=_(b'is a required extension not loaded?'),
3662 hint=_(b'is a required extension not loaded?'),
3660 )
3663 )
3661
3664
3662 requirements = newreporequirements(ui, createopts=createopts)
3665 requirements = newreporequirements(ui, createopts=createopts)
3663
3666
3664 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3667 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3665
3668
3666 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3669 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3667 if hgvfs.exists():
3670 if hgvfs.exists():
3668 raise error.RepoError(_(b'repository %s already exists') % path)
3671 raise error.RepoError(_(b'repository %s already exists') % path)
3669
3672
3670 if b'sharedrepo' in createopts:
3673 if b'sharedrepo' in createopts:
3671 sharedpath = createopts[b'sharedrepo'].sharedpath
3674 sharedpath = createopts[b'sharedrepo'].sharedpath
3672
3675
3673 if createopts.get(b'sharedrelative'):
3676 if createopts.get(b'sharedrelative'):
3674 try:
3677 try:
3675 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3678 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3676 except (IOError, ValueError) as e:
3679 except (IOError, ValueError) as e:
3677 # ValueError is raised on Windows if the drive letters differ
3680 # ValueError is raised on Windows if the drive letters differ
3678 # on each path.
3681 # on each path.
3679 raise error.Abort(
3682 raise error.Abort(
3680 _(b'cannot calculate relative path'),
3683 _(b'cannot calculate relative path'),
3681 hint=stringutil.forcebytestr(e),
3684 hint=stringutil.forcebytestr(e),
3682 )
3685 )
3683
3686
3684 if not wdirvfs.exists():
3687 if not wdirvfs.exists():
3685 wdirvfs.makedirs()
3688 wdirvfs.makedirs()
3686
3689
3687 hgvfs.makedir(notindexed=True)
3690 hgvfs.makedir(notindexed=True)
3688 if b'sharedrepo' not in createopts:
3691 if b'sharedrepo' not in createopts:
3689 hgvfs.mkdir(b'cache')
3692 hgvfs.mkdir(b'cache')
3690 hgvfs.mkdir(b'wcache')
3693 hgvfs.mkdir(b'wcache')
3691
3694
3692 if b'store' in requirements and b'sharedrepo' not in createopts:
3695 if b'store' in requirements and b'sharedrepo' not in createopts:
3693 hgvfs.mkdir(b'store')
3696 hgvfs.mkdir(b'store')
3694
3697
3695 # We create an invalid changelog outside the store so very old
3698 # We create an invalid changelog outside the store so very old
3696 # Mercurial versions (which didn't know about the requirements
3699 # Mercurial versions (which didn't know about the requirements
3697 # file) encounter an error on reading the changelog. This
3700 # file) encounter an error on reading the changelog. This
3698 # effectively locks out old clients and prevents them from
3701 # effectively locks out old clients and prevents them from
3699 # mucking with a repo in an unknown format.
3702 # mucking with a repo in an unknown format.
3700 #
3703 #
3701 # The revlog header has version 2, which won't be recognized by
3704 # The revlog header has version 2, which won't be recognized by
3702 # such old clients.
3705 # such old clients.
3703 hgvfs.append(
3706 hgvfs.append(
3704 b'00changelog.i',
3707 b'00changelog.i',
3705 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3708 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3706 b'layout',
3709 b'layout',
3707 )
3710 )
3708
3711
3709 scmutil.writerequires(hgvfs, requirements)
3712 scmutil.writerequires(hgvfs, requirements)
3710
3713
3711 # Write out file telling readers where to find the shared store.
3714 # Write out file telling readers where to find the shared store.
3712 if b'sharedrepo' in createopts:
3715 if b'sharedrepo' in createopts:
3713 hgvfs.write(b'sharedpath', sharedpath)
3716 hgvfs.write(b'sharedpath', sharedpath)
3714
3717
3715 if createopts.get(b'shareditems'):
3718 if createopts.get(b'shareditems'):
3716 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3719 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3717 hgvfs.write(b'shared', shared)
3720 hgvfs.write(b'shared', shared)
3718
3721
3719
3722
3720 def poisonrepository(repo):
3723 def poisonrepository(repo):
3721 """Poison a repository instance so it can no longer be used."""
3724 """Poison a repository instance so it can no longer be used."""
3722 # Perform any cleanup on the instance.
3725 # Perform any cleanup on the instance.
3723 repo.close()
3726 repo.close()
3724
3727
3725 # Our strategy is to replace the type of the object with one that
3728 # Our strategy is to replace the type of the object with one that
3726 # has all attribute lookups result in error.
3729 # has all attribute lookups result in error.
3727 #
3730 #
3728 # But we have to allow the close() method because some constructors
3731 # But we have to allow the close() method because some constructors
3729 # of repos call close() on repo references.
3732 # of repos call close() on repo references.
3730 class poisonedrepository(object):
3733 class poisonedrepository(object):
3731 def __getattribute__(self, item):
3734 def __getattribute__(self, item):
3732 if item == 'close':
3735 if item == 'close':
3733 return object.__getattribute__(self, item)
3736 return object.__getattribute__(self, item)
3734
3737
3735 raise error.ProgrammingError(
3738 raise error.ProgrammingError(
3736 b'repo instances should not be used after unshare'
3739 b'repo instances should not be used after unshare'
3737 )
3740 )
3738
3741
3739 def close(self):
3742 def close(self):
3740 pass
3743 pass
3741
3744
3742 # We may have a repoview, which intercepts __setattr__. So be sure
3745 # We may have a repoview, which intercepts __setattr__. So be sure
3743 # we operate at the lowest level possible.
3746 # we operate at the lowest level possible.
3744 object.__setattr__(repo, '__class__', poisonedrepository)
3747 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2979 +1,2988 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .pycompat import getattr
38 from .pycompat import getattr
39 from .revlogutils.constants import (
39 from .revlogutils.constants import (
40 FLAG_GENERALDELTA,
40 FLAG_GENERALDELTA,
41 FLAG_INLINE_DATA,
41 FLAG_INLINE_DATA,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 )
50 )
51 from .revlogutils.flagutil import (
51 from .revlogutils.flagutil import (
52 REVIDX_DEFAULT_FLAGS,
52 REVIDX_DEFAULT_FLAGS,
53 REVIDX_ELLIPSIS,
53 REVIDX_ELLIPSIS,
54 REVIDX_EXTSTORED,
54 REVIDX_EXTSTORED,
55 REVIDX_FLAGS_ORDER,
55 REVIDX_FLAGS_ORDER,
56 REVIDX_ISCENSORED,
56 REVIDX_ISCENSORED,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 REVIDX_SIDEDATA,
58 REVIDX_SIDEDATA,
59 )
59 )
60 from .thirdparty import attr
60 from .thirdparty import attr
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 revlogutils,
68 revlogutils,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76 from .revlogutils import (
76 from .revlogutils import (
77 deltas as deltautil,
77 deltas as deltautil,
78 flagutil,
78 flagutil,
79 sidedata as sidedatautil,
79 sidedata as sidedatautil,
80 )
80 )
81 from .utils import (
81 from .utils import (
82 storageutil,
82 storageutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 # blanked usage of all the name to prevent pyflakes constraints
86 # blanked usage of all the name to prevent pyflakes constraints
87 # We need these name available in the module for extensions.
87 # We need these name available in the module for extensions.
88 REVLOGV0
88 REVLOGV0
89 REVLOGV1
89 REVLOGV1
90 REVLOGV2
90 REVLOGV2
91 FLAG_INLINE_DATA
91 FLAG_INLINE_DATA
92 FLAG_GENERALDELTA
92 FLAG_GENERALDELTA
93 REVLOG_DEFAULT_FLAGS
93 REVLOG_DEFAULT_FLAGS
94 REVLOG_DEFAULT_FORMAT
94 REVLOG_DEFAULT_FORMAT
95 REVLOG_DEFAULT_VERSION
95 REVLOG_DEFAULT_VERSION
96 REVLOGV1_FLAGS
96 REVLOGV1_FLAGS
97 REVLOGV2_FLAGS
97 REVLOGV2_FLAGS
98 REVIDX_ISCENSORED
98 REVIDX_ISCENSORED
99 REVIDX_ELLIPSIS
99 REVIDX_ELLIPSIS
100 REVIDX_SIDEDATA
100 REVIDX_SIDEDATA
101 REVIDX_EXTSTORED
101 REVIDX_EXTSTORED
102 REVIDX_DEFAULT_FLAGS
102 REVIDX_DEFAULT_FLAGS
103 REVIDX_FLAGS_ORDER
103 REVIDX_FLAGS_ORDER
104 REVIDX_RAWTEXT_CHANGING_FLAGS
104 REVIDX_RAWTEXT_CHANGING_FLAGS
105
105
106 parsers = policy.importmod('parsers')
106 parsers = policy.importmod('parsers')
107 rustancestor = policy.importrust('ancestor')
107 rustancestor = policy.importrust('ancestor')
108 rustdagop = policy.importrust('dagop')
108 rustdagop = policy.importrust('dagop')
109 rustrevlog = policy.importrust('revlog')
109
110
110 # Aliased for performance.
111 # Aliased for performance.
111 _zlibdecompress = zlib.decompress
112 _zlibdecompress = zlib.decompress
112
113
113 # max size of revlog with inline data
114 # max size of revlog with inline data
114 _maxinline = 131072
115 _maxinline = 131072
115 _chunksize = 1048576
116 _chunksize = 1048576
116
117
117 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
118 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
119 return text, False, {}
120 return text, False, {}
120
121
121
122
122 def ellipsiswriteprocessor(rl, text, sidedata):
123 def ellipsiswriteprocessor(rl, text, sidedata):
123 return text, False
124 return text, False
124
125
125
126
126 def ellipsisrawprocessor(rl, text):
127 def ellipsisrawprocessor(rl, text):
127 return False
128 return False
128
129
129
130
130 ellipsisprocessor = (
131 ellipsisprocessor = (
131 ellipsisreadprocessor,
132 ellipsisreadprocessor,
132 ellipsiswriteprocessor,
133 ellipsiswriteprocessor,
133 ellipsisrawprocessor,
134 ellipsisrawprocessor,
134 )
135 )
135
136
136
137
137 def getoffset(q):
138 def getoffset(q):
138 return int(q >> 16)
139 return int(q >> 16)
139
140
140
141
141 def gettype(q):
142 def gettype(q):
142 return int(q & 0xFFFF)
143 return int(q & 0xFFFF)
143
144
144
145
145 def offset_type(offset, type):
146 def offset_type(offset, type):
146 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 raise ValueError(b'unknown revlog index flags')
148 raise ValueError(b'unknown revlog index flags')
148 return int(int(offset) << 16 | type)
149 return int(int(offset) << 16 | type)
149
150
150
151
151 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
152 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
154 if skipflags:
155 if skipflags:
155 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
156 else:
157 else:
157 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
158 rl.revision(node)
159 rl.revision(node)
159
160
160
161
161 @attr.s(slots=True, frozen=True)
162 @attr.s(slots=True, frozen=True)
162 class _revisioninfo(object):
163 class _revisioninfo(object):
163 """Information about a revision that allows building its fulltext
164 """Information about a revision that allows building its fulltext
164 node: expected hash of the revision
165 node: expected hash of the revision
165 p1, p2: parent revs of the revision
166 p1, p2: parent revs of the revision
166 btext: built text cache consisting of a one-element list
167 btext: built text cache consisting of a one-element list
167 cachedelta: (baserev, uncompressed_delta) or None
168 cachedelta: (baserev, uncompressed_delta) or None
168 flags: flags associated to the revision storage
169 flags: flags associated to the revision storage
169
170
170 One of btext[0] or cachedelta must be set.
171 One of btext[0] or cachedelta must be set.
171 """
172 """
172
173
173 node = attr.ib()
174 node = attr.ib()
174 p1 = attr.ib()
175 p1 = attr.ib()
175 p2 = attr.ib()
176 p2 = attr.ib()
176 btext = attr.ib()
177 btext = attr.ib()
177 textlen = attr.ib()
178 textlen = attr.ib()
178 cachedelta = attr.ib()
179 cachedelta = attr.ib()
179 flags = attr.ib()
180 flags = attr.ib()
180
181
181
182
182 @interfaceutil.implementer(repository.irevisiondelta)
183 @interfaceutil.implementer(repository.irevisiondelta)
183 @attr.s(slots=True)
184 @attr.s(slots=True)
184 class revlogrevisiondelta(object):
185 class revlogrevisiondelta(object):
185 node = attr.ib()
186 node = attr.ib()
186 p1node = attr.ib()
187 p1node = attr.ib()
187 p2node = attr.ib()
188 p2node = attr.ib()
188 basenode = attr.ib()
189 basenode = attr.ib()
189 flags = attr.ib()
190 flags = attr.ib()
190 baserevisionsize = attr.ib()
191 baserevisionsize = attr.ib()
191 revision = attr.ib()
192 revision = attr.ib()
192 delta = attr.ib()
193 delta = attr.ib()
193 linknode = attr.ib(default=None)
194 linknode = attr.ib(default=None)
194
195
195
196
196 @interfaceutil.implementer(repository.iverifyproblem)
197 @interfaceutil.implementer(repository.iverifyproblem)
197 @attr.s(frozen=True)
198 @attr.s(frozen=True)
198 class revlogproblem(object):
199 class revlogproblem(object):
199 warning = attr.ib(default=None)
200 warning = attr.ib(default=None)
200 error = attr.ib(default=None)
201 error = attr.ib(default=None)
201 node = attr.ib(default=None)
202 node = attr.ib(default=None)
202
203
203
204
204 # index v0:
205 # index v0:
205 # 4 bytes: offset
206 # 4 bytes: offset
206 # 4 bytes: compressed length
207 # 4 bytes: compressed length
207 # 4 bytes: base rev
208 # 4 bytes: base rev
208 # 4 bytes: link rev
209 # 4 bytes: link rev
209 # 20 bytes: parent 1 nodeid
210 # 20 bytes: parent 1 nodeid
210 # 20 bytes: parent 2 nodeid
211 # 20 bytes: parent 2 nodeid
211 # 20 bytes: nodeid
212 # 20 bytes: nodeid
212 indexformatv0 = struct.Struct(b">4l20s20s20s")
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
213 indexformatv0_pack = indexformatv0.pack
214 indexformatv0_pack = indexformatv0.pack
214 indexformatv0_unpack = indexformatv0.unpack
215 indexformatv0_unpack = indexformatv0.unpack
215
216
216
217
217 class revlogoldindex(list):
218 class revlogoldindex(list):
218 @property
219 @property
219 def nodemap(self):
220 def nodemap(self):
220 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
221 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
222 return self._nodemap
223 return self._nodemap
223
224
224 @util.propertycache
225 @util.propertycache
225 def _nodemap(self):
226 def _nodemap(self):
226 nodemap = revlogutils.NodeMap({nullid: nullrev})
227 nodemap = revlogutils.NodeMap({nullid: nullrev})
227 for r in range(0, len(self)):
228 for r in range(0, len(self)):
228 n = self[r][7]
229 n = self[r][7]
229 nodemap[n] = r
230 nodemap[n] = r
230 return nodemap
231 return nodemap
231
232
232 def has_node(self, node):
233 def has_node(self, node):
233 """return True if the node exist in the index"""
234 """return True if the node exist in the index"""
234 return node in self._nodemap
235 return node in self._nodemap
235
236
236 def rev(self, node):
237 def rev(self, node):
237 """return a revision for a node
238 """return a revision for a node
238
239
239 If the node is unknown, raise a RevlogError"""
240 If the node is unknown, raise a RevlogError"""
240 return self._nodemap[node]
241 return self._nodemap[node]
241
242
242 def get_rev(self, node):
243 def get_rev(self, node):
243 """return a revision for a node
244 """return a revision for a node
244
245
245 If the node is unknown, return None"""
246 If the node is unknown, return None"""
246 return self._nodemap.get(node)
247 return self._nodemap.get(node)
247
248
248 def append(self, tup):
249 def append(self, tup):
249 self._nodemap[tup[7]] = len(self)
250 self._nodemap[tup[7]] = len(self)
250 super(revlogoldindex, self).append(tup)
251 super(revlogoldindex, self).append(tup)
251
252
252 def __delitem__(self, i):
253 def __delitem__(self, i):
253 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
254 raise ValueError(b"deleting slices only supports a:-1 with step 1")
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
255 for r in pycompat.xrange(i.start, len(self)):
256 for r in pycompat.xrange(i.start, len(self)):
256 del self._nodemap[self[r][7]]
257 del self._nodemap[self[r][7]]
257 super(revlogoldindex, self).__delitem__(i)
258 super(revlogoldindex, self).__delitem__(i)
258
259
259 def clearcaches(self):
260 def clearcaches(self):
260 self.__dict__.pop('_nodemap', None)
261 self.__dict__.pop('_nodemap', None)
261
262
262 def __getitem__(self, i):
263 def __getitem__(self, i):
263 if i == -1:
264 if i == -1:
264 return (0, 0, 0, -1, -1, -1, -1, nullid)
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
265 return list.__getitem__(self, i)
266 return list.__getitem__(self, i)
266
267
267
268
268 class revlogoldio(object):
269 class revlogoldio(object):
269 def __init__(self):
270 def __init__(self):
270 self.size = indexformatv0.size
271 self.size = indexformatv0.size
271
272
272 def parseindex(self, data, inline):
273 def parseindex(self, data, inline):
273 s = self.size
274 s = self.size
274 index = []
275 index = []
275 nodemap = revlogutils.NodeMap({nullid: nullrev})
276 nodemap = revlogutils.NodeMap({nullid: nullrev})
276 n = off = 0
277 n = off = 0
277 l = len(data)
278 l = len(data)
278 while off + s <= l:
279 while off + s <= l:
279 cur = data[off : off + s]
280 cur = data[off : off + s]
280 off += s
281 off += s
281 e = indexformatv0_unpack(cur)
282 e = indexformatv0_unpack(cur)
282 # transform to revlogv1 format
283 # transform to revlogv1 format
283 e2 = (
284 e2 = (
284 offset_type(e[0], 0),
285 offset_type(e[0], 0),
285 e[1],
286 e[1],
286 -1,
287 -1,
287 e[2],
288 e[2],
288 e[3],
289 e[3],
289 nodemap.get(e[4], nullrev),
290 nodemap.get(e[4], nullrev),
290 nodemap.get(e[5], nullrev),
291 nodemap.get(e[5], nullrev),
291 e[6],
292 e[6],
292 )
293 )
293 index.append(e2)
294 index.append(e2)
294 nodemap[e[6]] = n
295 nodemap[e[6]] = n
295 n += 1
296 n += 1
296
297
297 index = revlogoldindex(index)
298 index = revlogoldindex(index)
298 return index, None
299 return index, None
299
300
300 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
301 if gettype(entry[0]):
302 if gettype(entry[0]):
302 raise error.RevlogError(
303 raise error.RevlogError(
303 _(b'index entry flags need revlog version 1')
304 _(b'index entry flags need revlog version 1')
304 )
305 )
305 e2 = (
306 e2 = (
306 getoffset(entry[0]),
307 getoffset(entry[0]),
307 entry[1],
308 entry[1],
308 entry[3],
309 entry[3],
309 entry[4],
310 entry[4],
310 node(entry[5]),
311 node(entry[5]),
311 node(entry[6]),
312 node(entry[6]),
312 entry[7],
313 entry[7],
313 )
314 )
314 return indexformatv0_pack(*e2)
315 return indexformatv0_pack(*e2)
315
316
316
317
317 # index ng:
318 # index ng:
318 # 6 bytes: offset
319 # 6 bytes: offset
319 # 2 bytes: flags
320 # 2 bytes: flags
320 # 4 bytes: compressed length
321 # 4 bytes: compressed length
321 # 4 bytes: uncompressed length
322 # 4 bytes: uncompressed length
322 # 4 bytes: base rev
323 # 4 bytes: base rev
323 # 4 bytes: link rev
324 # 4 bytes: link rev
324 # 4 bytes: parent 1 rev
325 # 4 bytes: parent 1 rev
325 # 4 bytes: parent 2 rev
326 # 4 bytes: parent 2 rev
326 # 32 bytes: nodeid
327 # 32 bytes: nodeid
327 indexformatng = struct.Struct(b">Qiiiiii20s12x")
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
328 indexformatng_pack = indexformatng.pack
329 indexformatng_pack = indexformatng.pack
329 versionformat = struct.Struct(b">I")
330 versionformat = struct.Struct(b">I")
330 versionformat_pack = versionformat.pack
331 versionformat_pack = versionformat.pack
331 versionformat_unpack = versionformat.unpack
332 versionformat_unpack = versionformat.unpack
332
333
333 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
334 # signed integer)
335 # signed integer)
335 _maxentrysize = 0x7FFFFFFF
336 _maxentrysize = 0x7FFFFFFF
336
337
337
338
338 class revlogio(object):
339 class revlogio(object):
339 def __init__(self):
340 def __init__(self):
340 self.size = indexformatng.size
341 self.size = indexformatng.size
341
342
342 def parseindex(self, data, inline):
343 def parseindex(self, data, inline):
343 # call the C implementation to parse the index data
344 # call the C implementation to parse the index data
344 index, cache = parsers.parse_index2(data, inline)
345 index, cache = parsers.parse_index2(data, inline)
345 return index, cache
346 return index, cache
346
347
347 def packentry(self, entry, node, version, rev):
348 def packentry(self, entry, node, version, rev):
348 p = indexformatng_pack(*entry)
349 p = indexformatng_pack(*entry)
349 if rev == 0:
350 if rev == 0:
350 p = versionformat_pack(version) + p[4:]
351 p = versionformat_pack(version) + p[4:]
351 return p
352 return p
352
353
353
354
355 class rustrevlogio(revlogio):
356 def parseindex(self, data, inline):
357 index, cache = super(rustrevlogio, self).parseindex(data, inline)
358 return rustrevlog.MixedIndex(index), cache
359
360
354 class revlog(object):
361 class revlog(object):
355 """
362 """
356 the underlying revision storage object
363 the underlying revision storage object
357
364
358 A revlog consists of two parts, an index and the revision data.
365 A revlog consists of two parts, an index and the revision data.
359
366
360 The index is a file with a fixed record size containing
367 The index is a file with a fixed record size containing
361 information on each revision, including its nodeid (hash), the
368 information on each revision, including its nodeid (hash), the
362 nodeids of its parents, the position and offset of its data within
369 nodeids of its parents, the position and offset of its data within
363 the data file, and the revision it's based on. Finally, each entry
370 the data file, and the revision it's based on. Finally, each entry
364 contains a linkrev entry that can serve as a pointer to external
371 contains a linkrev entry that can serve as a pointer to external
365 data.
372 data.
366
373
367 The revision data itself is a linear collection of data chunks.
374 The revision data itself is a linear collection of data chunks.
368 Each chunk represents a revision and is usually represented as a
375 Each chunk represents a revision and is usually represented as a
369 delta against the previous chunk. To bound lookup time, runs of
376 delta against the previous chunk. To bound lookup time, runs of
370 deltas are limited to about 2 times the length of the original
377 deltas are limited to about 2 times the length of the original
371 version data. This makes retrieval of a version proportional to
378 version data. This makes retrieval of a version proportional to
372 its size, or O(1) relative to the number of revisions.
379 its size, or O(1) relative to the number of revisions.
373
380
374 Both pieces of the revlog are written to in an append-only
381 Both pieces of the revlog are written to in an append-only
375 fashion, which means we never need to rewrite a file to insert or
382 fashion, which means we never need to rewrite a file to insert or
376 remove data, and can use some simple techniques to avoid the need
383 remove data, and can use some simple techniques to avoid the need
377 for locking while reading.
384 for locking while reading.
378
385
379 If checkambig, indexfile is opened with checkambig=True at
386 If checkambig, indexfile is opened with checkambig=True at
380 writing, to avoid file stat ambiguity.
387 writing, to avoid file stat ambiguity.
381
388
382 If mmaplargeindex is True, and an mmapindexthreshold is set, the
389 If mmaplargeindex is True, and an mmapindexthreshold is set, the
383 index will be mmapped rather than read if it is larger than the
390 index will be mmapped rather than read if it is larger than the
384 configured threshold.
391 configured threshold.
385
392
386 If censorable is True, the revlog can have censored revisions.
393 If censorable is True, the revlog can have censored revisions.
387
394
388 If `upperboundcomp` is not None, this is the expected maximal gain from
395 If `upperboundcomp` is not None, this is the expected maximal gain from
389 compression for the data content.
396 compression for the data content.
390 """
397 """
391
398
392 _flagserrorclass = error.RevlogError
399 _flagserrorclass = error.RevlogError
393
400
394 def __init__(
401 def __init__(
395 self,
402 self,
396 opener,
403 opener,
397 indexfile,
404 indexfile,
398 datafile=None,
405 datafile=None,
399 checkambig=False,
406 checkambig=False,
400 mmaplargeindex=False,
407 mmaplargeindex=False,
401 censorable=False,
408 censorable=False,
402 upperboundcomp=None,
409 upperboundcomp=None,
403 ):
410 ):
404 """
411 """
405 create a revlog object
412 create a revlog object
406
413
407 opener is a function that abstracts the file opening operation
414 opener is a function that abstracts the file opening operation
408 and can be used to implement COW semantics or the like.
415 and can be used to implement COW semantics or the like.
409
416
410 """
417 """
411 self.upperboundcomp = upperboundcomp
418 self.upperboundcomp = upperboundcomp
412 self.indexfile = indexfile
419 self.indexfile = indexfile
413 self.datafile = datafile or (indexfile[:-2] + b".d")
420 self.datafile = datafile or (indexfile[:-2] + b".d")
414 self.opener = opener
421 self.opener = opener
415 # When True, indexfile is opened with checkambig=True at writing, to
422 # When True, indexfile is opened with checkambig=True at writing, to
416 # avoid file stat ambiguity.
423 # avoid file stat ambiguity.
417 self._checkambig = checkambig
424 self._checkambig = checkambig
418 self._mmaplargeindex = mmaplargeindex
425 self._mmaplargeindex = mmaplargeindex
419 self._censorable = censorable
426 self._censorable = censorable
420 # 3-tuple of (node, rev, text) for a raw revision.
427 # 3-tuple of (node, rev, text) for a raw revision.
421 self._revisioncache = None
428 self._revisioncache = None
422 # Maps rev to chain base rev.
429 # Maps rev to chain base rev.
423 self._chainbasecache = util.lrucachedict(100)
430 self._chainbasecache = util.lrucachedict(100)
424 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
431 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
425 self._chunkcache = (0, b'')
432 self._chunkcache = (0, b'')
426 # How much data to read and cache into the raw revlog data cache.
433 # How much data to read and cache into the raw revlog data cache.
427 self._chunkcachesize = 65536
434 self._chunkcachesize = 65536
428 self._maxchainlen = None
435 self._maxchainlen = None
429 self._deltabothparents = True
436 self._deltabothparents = True
430 self.index = None
437 self.index = None
431 # Mapping of partial identifiers to full nodes.
438 # Mapping of partial identifiers to full nodes.
432 self._pcache = {}
439 self._pcache = {}
433 # Mapping of revision integer to full node.
440 # Mapping of revision integer to full node.
434 self._compengine = b'zlib'
441 self._compengine = b'zlib'
435 self._compengineopts = {}
442 self._compengineopts = {}
436 self._maxdeltachainspan = -1
443 self._maxdeltachainspan = -1
437 self._withsparseread = False
444 self._withsparseread = False
438 self._sparserevlog = False
445 self._sparserevlog = False
439 self._srdensitythreshold = 0.50
446 self._srdensitythreshold = 0.50
440 self._srmingapsize = 262144
447 self._srmingapsize = 262144
441
448
442 # Make copy of flag processors so each revlog instance can support
449 # Make copy of flag processors so each revlog instance can support
443 # custom flags.
450 # custom flags.
444 self._flagprocessors = dict(flagutil.flagprocessors)
451 self._flagprocessors = dict(flagutil.flagprocessors)
445
452
446 # 2-tuple of file handles being used for active writing.
453 # 2-tuple of file handles being used for active writing.
447 self._writinghandles = None
454 self._writinghandles = None
448
455
449 self._loadindex()
456 self._loadindex()
450
457
451 def _loadindex(self):
458 def _loadindex(self):
452 mmapindexthreshold = None
459 mmapindexthreshold = None
453 opts = self.opener.options
460 opts = self.opener.options
454
461
455 if b'revlogv2' in opts:
462 if b'revlogv2' in opts:
456 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
463 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
457 elif b'revlogv1' in opts:
464 elif b'revlogv1' in opts:
458 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
465 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
459 if b'generaldelta' in opts:
466 if b'generaldelta' in opts:
460 newversionflags |= FLAG_GENERALDELTA
467 newversionflags |= FLAG_GENERALDELTA
461 elif b'revlogv0' in self.opener.options:
468 elif b'revlogv0' in self.opener.options:
462 newversionflags = REVLOGV0
469 newversionflags = REVLOGV0
463 else:
470 else:
464 newversionflags = REVLOG_DEFAULT_VERSION
471 newversionflags = REVLOG_DEFAULT_VERSION
465
472
466 if b'chunkcachesize' in opts:
473 if b'chunkcachesize' in opts:
467 self._chunkcachesize = opts[b'chunkcachesize']
474 self._chunkcachesize = opts[b'chunkcachesize']
468 if b'maxchainlen' in opts:
475 if b'maxchainlen' in opts:
469 self._maxchainlen = opts[b'maxchainlen']
476 self._maxchainlen = opts[b'maxchainlen']
470 if b'deltabothparents' in opts:
477 if b'deltabothparents' in opts:
471 self._deltabothparents = opts[b'deltabothparents']
478 self._deltabothparents = opts[b'deltabothparents']
472 self._lazydelta = bool(opts.get(b'lazydelta', True))
479 self._lazydelta = bool(opts.get(b'lazydelta', True))
473 self._lazydeltabase = False
480 self._lazydeltabase = False
474 if self._lazydelta:
481 if self._lazydelta:
475 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
482 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
476 if b'compengine' in opts:
483 if b'compengine' in opts:
477 self._compengine = opts[b'compengine']
484 self._compengine = opts[b'compengine']
478 if b'zlib.level' in opts:
485 if b'zlib.level' in opts:
479 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
486 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
480 if b'zstd.level' in opts:
487 if b'zstd.level' in opts:
481 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
488 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
482 if b'maxdeltachainspan' in opts:
489 if b'maxdeltachainspan' in opts:
483 self._maxdeltachainspan = opts[b'maxdeltachainspan']
490 self._maxdeltachainspan = opts[b'maxdeltachainspan']
484 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
491 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
485 mmapindexthreshold = opts[b'mmapindexthreshold']
492 mmapindexthreshold = opts[b'mmapindexthreshold']
486 self.hassidedata = bool(opts.get(b'side-data', False))
493 self.hassidedata = bool(opts.get(b'side-data', False))
487 if self.hassidedata:
494 if self.hassidedata:
488 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
495 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
489 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
496 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
490 withsparseread = bool(opts.get(b'with-sparse-read', False))
497 withsparseread = bool(opts.get(b'with-sparse-read', False))
491 # sparse-revlog forces sparse-read
498 # sparse-revlog forces sparse-read
492 self._withsparseread = self._sparserevlog or withsparseread
499 self._withsparseread = self._sparserevlog or withsparseread
493 if b'sparse-read-density-threshold' in opts:
500 if b'sparse-read-density-threshold' in opts:
494 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
501 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
495 if b'sparse-read-min-gap-size' in opts:
502 if b'sparse-read-min-gap-size' in opts:
496 self._srmingapsize = opts[b'sparse-read-min-gap-size']
503 self._srmingapsize = opts[b'sparse-read-min-gap-size']
497 if opts.get(b'enableellipsis'):
504 if opts.get(b'enableellipsis'):
498 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
505 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
499
506
500 # revlog v0 doesn't have flag processors
507 # revlog v0 doesn't have flag processors
501 for flag, processor in pycompat.iteritems(
508 for flag, processor in pycompat.iteritems(
502 opts.get(b'flagprocessors', {})
509 opts.get(b'flagprocessors', {})
503 ):
510 ):
504 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
511 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
505
512
506 if self._chunkcachesize <= 0:
513 if self._chunkcachesize <= 0:
507 raise error.RevlogError(
514 raise error.RevlogError(
508 _(b'revlog chunk cache size %r is not greater than 0')
515 _(b'revlog chunk cache size %r is not greater than 0')
509 % self._chunkcachesize
516 % self._chunkcachesize
510 )
517 )
511 elif self._chunkcachesize & (self._chunkcachesize - 1):
518 elif self._chunkcachesize & (self._chunkcachesize - 1):
512 raise error.RevlogError(
519 raise error.RevlogError(
513 _(b'revlog chunk cache size %r is not a power of 2')
520 _(b'revlog chunk cache size %r is not a power of 2')
514 % self._chunkcachesize
521 % self._chunkcachesize
515 )
522 )
516
523
517 indexdata = b''
524 indexdata = b''
518 self._initempty = True
525 self._initempty = True
519 try:
526 try:
520 with self._indexfp() as f:
527 with self._indexfp() as f:
521 if (
528 if (
522 mmapindexthreshold is not None
529 mmapindexthreshold is not None
523 and self.opener.fstat(f).st_size >= mmapindexthreshold
530 and self.opener.fstat(f).st_size >= mmapindexthreshold
524 ):
531 ):
525 # TODO: should .close() to release resources without
532 # TODO: should .close() to release resources without
526 # relying on Python GC
533 # relying on Python GC
527 indexdata = util.buffer(util.mmapread(f))
534 indexdata = util.buffer(util.mmapread(f))
528 else:
535 else:
529 indexdata = f.read()
536 indexdata = f.read()
530 if len(indexdata) > 0:
537 if len(indexdata) > 0:
531 versionflags = versionformat_unpack(indexdata[:4])[0]
538 versionflags = versionformat_unpack(indexdata[:4])[0]
532 self._initempty = False
539 self._initempty = False
533 else:
540 else:
534 versionflags = newversionflags
541 versionflags = newversionflags
535 except IOError as inst:
542 except IOError as inst:
536 if inst.errno != errno.ENOENT:
543 if inst.errno != errno.ENOENT:
537 raise
544 raise
538
545
539 versionflags = newversionflags
546 versionflags = newversionflags
540
547
541 self.version = versionflags
548 self.version = versionflags
542
549
543 flags = versionflags & ~0xFFFF
550 flags = versionflags & ~0xFFFF
544 fmt = versionflags & 0xFFFF
551 fmt = versionflags & 0xFFFF
545
552
546 if fmt == REVLOGV0:
553 if fmt == REVLOGV0:
547 if flags:
554 if flags:
548 raise error.RevlogError(
555 raise error.RevlogError(
549 _(b'unknown flags (%#04x) in version %d revlog %s')
556 _(b'unknown flags (%#04x) in version %d revlog %s')
550 % (flags >> 16, fmt, self.indexfile)
557 % (flags >> 16, fmt, self.indexfile)
551 )
558 )
552
559
553 self._inline = False
560 self._inline = False
554 self._generaldelta = False
561 self._generaldelta = False
555
562
556 elif fmt == REVLOGV1:
563 elif fmt == REVLOGV1:
557 if flags & ~REVLOGV1_FLAGS:
564 if flags & ~REVLOGV1_FLAGS:
558 raise error.RevlogError(
565 raise error.RevlogError(
559 _(b'unknown flags (%#04x) in version %d revlog %s')
566 _(b'unknown flags (%#04x) in version %d revlog %s')
560 % (flags >> 16, fmt, self.indexfile)
567 % (flags >> 16, fmt, self.indexfile)
561 )
568 )
562
569
563 self._inline = versionflags & FLAG_INLINE_DATA
570 self._inline = versionflags & FLAG_INLINE_DATA
564 self._generaldelta = versionflags & FLAG_GENERALDELTA
571 self._generaldelta = versionflags & FLAG_GENERALDELTA
565
572
566 elif fmt == REVLOGV2:
573 elif fmt == REVLOGV2:
567 if flags & ~REVLOGV2_FLAGS:
574 if flags & ~REVLOGV2_FLAGS:
568 raise error.RevlogError(
575 raise error.RevlogError(
569 _(b'unknown flags (%#04x) in version %d revlog %s')
576 _(b'unknown flags (%#04x) in version %d revlog %s')
570 % (flags >> 16, fmt, self.indexfile)
577 % (flags >> 16, fmt, self.indexfile)
571 )
578 )
572
579
573 self._inline = versionflags & FLAG_INLINE_DATA
580 self._inline = versionflags & FLAG_INLINE_DATA
574 # generaldelta implied by version 2 revlogs.
581 # generaldelta implied by version 2 revlogs.
575 self._generaldelta = True
582 self._generaldelta = True
576
583
577 else:
584 else:
578 raise error.RevlogError(
585 raise error.RevlogError(
579 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
586 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
580 )
587 )
581 # sparse-revlog can't be on without general-delta (issue6056)
588 # sparse-revlog can't be on without general-delta (issue6056)
582 if not self._generaldelta:
589 if not self._generaldelta:
583 self._sparserevlog = False
590 self._sparserevlog = False
584
591
585 self._storedeltachains = True
592 self._storedeltachains = True
586
593
587 self._io = revlogio()
594 self._io = revlogio()
595 if rustrevlog is not None and self.opener.options.get('rust.index'):
596 self._io = rustrevlogio()
588 if self.version == REVLOGV0:
597 if self.version == REVLOGV0:
589 self._io = revlogoldio()
598 self._io = revlogoldio()
590 try:
599 try:
591 d = self._io.parseindex(indexdata, self._inline)
600 d = self._io.parseindex(indexdata, self._inline)
592 except (ValueError, IndexError):
601 except (ValueError, IndexError):
593 raise error.RevlogError(
602 raise error.RevlogError(
594 _(b"index %s is corrupted") % self.indexfile
603 _(b"index %s is corrupted") % self.indexfile
595 )
604 )
596 self.index, self._chunkcache = d
605 self.index, self._chunkcache = d
597 if not self._chunkcache:
606 if not self._chunkcache:
598 self._chunkclear()
607 self._chunkclear()
599 # revnum -> (chain-length, sum-delta-length)
608 # revnum -> (chain-length, sum-delta-length)
600 self._chaininfocache = {}
609 self._chaininfocache = {}
601 # revlog header -> revlog compressor
610 # revlog header -> revlog compressor
602 self._decompressors = {}
611 self._decompressors = {}
603
612
604 @util.propertycache
613 @util.propertycache
605 def _compressor(self):
614 def _compressor(self):
606 engine = util.compengines[self._compengine]
615 engine = util.compengines[self._compengine]
607 return engine.revlogcompressor(self._compengineopts)
616 return engine.revlogcompressor(self._compengineopts)
608
617
609 def _indexfp(self, mode=b'r'):
618 def _indexfp(self, mode=b'r'):
610 """file object for the revlog's index file"""
619 """file object for the revlog's index file"""
611 args = {'mode': mode}
620 args = {'mode': mode}
612 if mode != b'r':
621 if mode != b'r':
613 args['checkambig'] = self._checkambig
622 args['checkambig'] = self._checkambig
614 if mode == b'w':
623 if mode == b'w':
615 args['atomictemp'] = True
624 args['atomictemp'] = True
616 return self.opener(self.indexfile, **args)
625 return self.opener(self.indexfile, **args)
617
626
618 def _datafp(self, mode=b'r'):
627 def _datafp(self, mode=b'r'):
619 """file object for the revlog's data file"""
628 """file object for the revlog's data file"""
620 return self.opener(self.datafile, mode=mode)
629 return self.opener(self.datafile, mode=mode)
621
630
622 @contextlib.contextmanager
631 @contextlib.contextmanager
623 def _datareadfp(self, existingfp=None):
632 def _datareadfp(self, existingfp=None):
624 """file object suitable to read data"""
633 """file object suitable to read data"""
625 # Use explicit file handle, if given.
634 # Use explicit file handle, if given.
626 if existingfp is not None:
635 if existingfp is not None:
627 yield existingfp
636 yield existingfp
628
637
629 # Use a file handle being actively used for writes, if available.
638 # Use a file handle being actively used for writes, if available.
630 # There is some danger to doing this because reads will seek the
639 # There is some danger to doing this because reads will seek the
631 # file. However, _writeentry() performs a SEEK_END before all writes,
640 # file. However, _writeentry() performs a SEEK_END before all writes,
632 # so we should be safe.
641 # so we should be safe.
633 elif self._writinghandles:
642 elif self._writinghandles:
634 if self._inline:
643 if self._inline:
635 yield self._writinghandles[0]
644 yield self._writinghandles[0]
636 else:
645 else:
637 yield self._writinghandles[1]
646 yield self._writinghandles[1]
638
647
639 # Otherwise open a new file handle.
648 # Otherwise open a new file handle.
640 else:
649 else:
641 if self._inline:
650 if self._inline:
642 func = self._indexfp
651 func = self._indexfp
643 else:
652 else:
644 func = self._datafp
653 func = self._datafp
645 with func() as fp:
654 with func() as fp:
646 yield fp
655 yield fp
647
656
648 def tiprev(self):
657 def tiprev(self):
649 return len(self.index) - 1
658 return len(self.index) - 1
650
659
651 def tip(self):
660 def tip(self):
652 return self.node(self.tiprev())
661 return self.node(self.tiprev())
653
662
654 def __contains__(self, rev):
663 def __contains__(self, rev):
655 return 0 <= rev < len(self)
664 return 0 <= rev < len(self)
656
665
657 def __len__(self):
666 def __len__(self):
658 return len(self.index)
667 return len(self.index)
659
668
660 def __iter__(self):
669 def __iter__(self):
661 return iter(pycompat.xrange(len(self)))
670 return iter(pycompat.xrange(len(self)))
662
671
663 def revs(self, start=0, stop=None):
672 def revs(self, start=0, stop=None):
664 """iterate over all rev in this revlog (from start to stop)"""
673 """iterate over all rev in this revlog (from start to stop)"""
665 return storageutil.iterrevs(len(self), start=start, stop=stop)
674 return storageutil.iterrevs(len(self), start=start, stop=stop)
666
675
667 @property
676 @property
668 def nodemap(self):
677 def nodemap(self):
669 msg = (
678 msg = (
670 b"revlog.nodemap is deprecated, "
679 b"revlog.nodemap is deprecated, "
671 b"use revlog.index.[has_node|rev|get_rev]"
680 b"use revlog.index.[has_node|rev|get_rev]"
672 )
681 )
673 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
682 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
674 return self.index.nodemap
683 return self.index.nodemap
675
684
676 @property
685 @property
677 def _nodecache(self):
686 def _nodecache(self):
678 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
687 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
679 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
688 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
680 return self.index.nodemap
689 return self.index.nodemap
681
690
682 def hasnode(self, node):
691 def hasnode(self, node):
683 try:
692 try:
684 self.rev(node)
693 self.rev(node)
685 return True
694 return True
686 except KeyError:
695 except KeyError:
687 return False
696 return False
688
697
689 def candelta(self, baserev, rev):
698 def candelta(self, baserev, rev):
690 """whether two revisions (baserev, rev) can be delta-ed or not"""
699 """whether two revisions (baserev, rev) can be delta-ed or not"""
691 # Disable delta if either rev requires a content-changing flag
700 # Disable delta if either rev requires a content-changing flag
692 # processor (ex. LFS). This is because such flag processor can alter
701 # processor (ex. LFS). This is because such flag processor can alter
693 # the rawtext content that the delta will be based on, and two clients
702 # the rawtext content that the delta will be based on, and two clients
694 # could have a same revlog node with different flags (i.e. different
703 # could have a same revlog node with different flags (i.e. different
695 # rawtext contents) and the delta could be incompatible.
704 # rawtext contents) and the delta could be incompatible.
696 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
705 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
697 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
706 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
698 ):
707 ):
699 return False
708 return False
700 return True
709 return True
701
710
702 def clearcaches(self):
711 def clearcaches(self):
703 self._revisioncache = None
712 self._revisioncache = None
704 self._chainbasecache.clear()
713 self._chainbasecache.clear()
705 self._chunkcache = (0, b'')
714 self._chunkcache = (0, b'')
706 self._pcache = {}
715 self._pcache = {}
707 self.index.clearcaches()
716 self.index.clearcaches()
708
717
709 def rev(self, node):
718 def rev(self, node):
710 try:
719 try:
711 return self.index.rev(node)
720 return self.index.rev(node)
712 except TypeError:
721 except TypeError:
713 raise
722 raise
714 except error.RevlogError:
723 except error.RevlogError:
715 # parsers.c radix tree lookup failed
724 # parsers.c radix tree lookup failed
716 if node == wdirid or node in wdirfilenodeids:
725 if node == wdirid or node in wdirfilenodeids:
717 raise error.WdirUnsupported
726 raise error.WdirUnsupported
718 raise error.LookupError(node, self.indexfile, _(b'no node'))
727 raise error.LookupError(node, self.indexfile, _(b'no node'))
719
728
720 # Accessors for index entries.
729 # Accessors for index entries.
721
730
722 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
731 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
723 # are flags.
732 # are flags.
724 def start(self, rev):
733 def start(self, rev):
725 return int(self.index[rev][0] >> 16)
734 return int(self.index[rev][0] >> 16)
726
735
727 def flags(self, rev):
736 def flags(self, rev):
728 return self.index[rev][0] & 0xFFFF
737 return self.index[rev][0] & 0xFFFF
729
738
730 def length(self, rev):
739 def length(self, rev):
731 return self.index[rev][1]
740 return self.index[rev][1]
732
741
733 def rawsize(self, rev):
742 def rawsize(self, rev):
734 """return the length of the uncompressed text for a given revision"""
743 """return the length of the uncompressed text for a given revision"""
735 l = self.index[rev][2]
744 l = self.index[rev][2]
736 if l >= 0:
745 if l >= 0:
737 return l
746 return l
738
747
739 t = self.rawdata(rev)
748 t = self.rawdata(rev)
740 return len(t)
749 return len(t)
741
750
742 def size(self, rev):
751 def size(self, rev):
743 """length of non-raw text (processed by a "read" flag processor)"""
752 """length of non-raw text (processed by a "read" flag processor)"""
744 # fast path: if no "read" flag processor could change the content,
753 # fast path: if no "read" flag processor could change the content,
745 # size is rawsize. note: ELLIPSIS is known to not change the content.
754 # size is rawsize. note: ELLIPSIS is known to not change the content.
746 flags = self.flags(rev)
755 flags = self.flags(rev)
747 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
756 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
748 return self.rawsize(rev)
757 return self.rawsize(rev)
749
758
750 return len(self.revision(rev, raw=False))
759 return len(self.revision(rev, raw=False))
751
760
752 def chainbase(self, rev):
761 def chainbase(self, rev):
753 base = self._chainbasecache.get(rev)
762 base = self._chainbasecache.get(rev)
754 if base is not None:
763 if base is not None:
755 return base
764 return base
756
765
757 index = self.index
766 index = self.index
758 iterrev = rev
767 iterrev = rev
759 base = index[iterrev][3]
768 base = index[iterrev][3]
760 while base != iterrev:
769 while base != iterrev:
761 iterrev = base
770 iterrev = base
762 base = index[iterrev][3]
771 base = index[iterrev][3]
763
772
764 self._chainbasecache[rev] = base
773 self._chainbasecache[rev] = base
765 return base
774 return base
766
775
767 def linkrev(self, rev):
776 def linkrev(self, rev):
768 return self.index[rev][4]
777 return self.index[rev][4]
769
778
770 def parentrevs(self, rev):
779 def parentrevs(self, rev):
771 try:
780 try:
772 entry = self.index[rev]
781 entry = self.index[rev]
773 except IndexError:
782 except IndexError:
774 if rev == wdirrev:
783 if rev == wdirrev:
775 raise error.WdirUnsupported
784 raise error.WdirUnsupported
776 raise
785 raise
777
786
778 return entry[5], entry[6]
787 return entry[5], entry[6]
779
788
780 # fast parentrevs(rev) where rev isn't filtered
789 # fast parentrevs(rev) where rev isn't filtered
781 _uncheckedparentrevs = parentrevs
790 _uncheckedparentrevs = parentrevs
782
791
783 def node(self, rev):
792 def node(self, rev):
784 try:
793 try:
785 return self.index[rev][7]
794 return self.index[rev][7]
786 except IndexError:
795 except IndexError:
787 if rev == wdirrev:
796 if rev == wdirrev:
788 raise error.WdirUnsupported
797 raise error.WdirUnsupported
789 raise
798 raise
790
799
791 # Derived from index values.
800 # Derived from index values.
792
801
793 def end(self, rev):
802 def end(self, rev):
794 return self.start(rev) + self.length(rev)
803 return self.start(rev) + self.length(rev)
795
804
796 def parents(self, node):
805 def parents(self, node):
797 i = self.index
806 i = self.index
798 d = i[self.rev(node)]
807 d = i[self.rev(node)]
799 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
808 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
800
809
801 def chainlen(self, rev):
810 def chainlen(self, rev):
802 return self._chaininfo(rev)[0]
811 return self._chaininfo(rev)[0]
803
812
804 def _chaininfo(self, rev):
813 def _chaininfo(self, rev):
805 chaininfocache = self._chaininfocache
814 chaininfocache = self._chaininfocache
806 if rev in chaininfocache:
815 if rev in chaininfocache:
807 return chaininfocache[rev]
816 return chaininfocache[rev]
808 index = self.index
817 index = self.index
809 generaldelta = self._generaldelta
818 generaldelta = self._generaldelta
810 iterrev = rev
819 iterrev = rev
811 e = index[iterrev]
820 e = index[iterrev]
812 clen = 0
821 clen = 0
813 compresseddeltalen = 0
822 compresseddeltalen = 0
814 while iterrev != e[3]:
823 while iterrev != e[3]:
815 clen += 1
824 clen += 1
816 compresseddeltalen += e[1]
825 compresseddeltalen += e[1]
817 if generaldelta:
826 if generaldelta:
818 iterrev = e[3]
827 iterrev = e[3]
819 else:
828 else:
820 iterrev -= 1
829 iterrev -= 1
821 if iterrev in chaininfocache:
830 if iterrev in chaininfocache:
822 t = chaininfocache[iterrev]
831 t = chaininfocache[iterrev]
823 clen += t[0]
832 clen += t[0]
824 compresseddeltalen += t[1]
833 compresseddeltalen += t[1]
825 break
834 break
826 e = index[iterrev]
835 e = index[iterrev]
827 else:
836 else:
828 # Add text length of base since decompressing that also takes
837 # Add text length of base since decompressing that also takes
829 # work. For cache hits the length is already included.
838 # work. For cache hits the length is already included.
830 compresseddeltalen += e[1]
839 compresseddeltalen += e[1]
831 r = (clen, compresseddeltalen)
840 r = (clen, compresseddeltalen)
832 chaininfocache[rev] = r
841 chaininfocache[rev] = r
833 return r
842 return r
834
843
835 def _deltachain(self, rev, stoprev=None):
844 def _deltachain(self, rev, stoprev=None):
836 """Obtain the delta chain for a revision.
845 """Obtain the delta chain for a revision.
837
846
838 ``stoprev`` specifies a revision to stop at. If not specified, we
847 ``stoprev`` specifies a revision to stop at. If not specified, we
839 stop at the base of the chain.
848 stop at the base of the chain.
840
849
841 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
850 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
842 revs in ascending order and ``stopped`` is a bool indicating whether
851 revs in ascending order and ``stopped`` is a bool indicating whether
843 ``stoprev`` was hit.
852 ``stoprev`` was hit.
844 """
853 """
845 # Try C implementation.
854 # Try C implementation.
846 try:
855 try:
847 return self.index.deltachain(rev, stoprev, self._generaldelta)
856 return self.index.deltachain(rev, stoprev, self._generaldelta)
848 except AttributeError:
857 except AttributeError:
849 pass
858 pass
850
859
851 chain = []
860 chain = []
852
861
853 # Alias to prevent attribute lookup in tight loop.
862 # Alias to prevent attribute lookup in tight loop.
854 index = self.index
863 index = self.index
855 generaldelta = self._generaldelta
864 generaldelta = self._generaldelta
856
865
857 iterrev = rev
866 iterrev = rev
858 e = index[iterrev]
867 e = index[iterrev]
859 while iterrev != e[3] and iterrev != stoprev:
868 while iterrev != e[3] and iterrev != stoprev:
860 chain.append(iterrev)
869 chain.append(iterrev)
861 if generaldelta:
870 if generaldelta:
862 iterrev = e[3]
871 iterrev = e[3]
863 else:
872 else:
864 iterrev -= 1
873 iterrev -= 1
865 e = index[iterrev]
874 e = index[iterrev]
866
875
867 if iterrev == stoprev:
876 if iterrev == stoprev:
868 stopped = True
877 stopped = True
869 else:
878 else:
870 chain.append(iterrev)
879 chain.append(iterrev)
871 stopped = False
880 stopped = False
872
881
873 chain.reverse()
882 chain.reverse()
874 return chain, stopped
883 return chain, stopped
875
884
876 def ancestors(self, revs, stoprev=0, inclusive=False):
885 def ancestors(self, revs, stoprev=0, inclusive=False):
877 """Generate the ancestors of 'revs' in reverse revision order.
886 """Generate the ancestors of 'revs' in reverse revision order.
878 Does not generate revs lower than stoprev.
887 Does not generate revs lower than stoprev.
879
888
880 See the documentation for ancestor.lazyancestors for more details."""
889 See the documentation for ancestor.lazyancestors for more details."""
881
890
882 # first, make sure start revisions aren't filtered
891 # first, make sure start revisions aren't filtered
883 revs = list(revs)
892 revs = list(revs)
884 checkrev = self.node
893 checkrev = self.node
885 for r in revs:
894 for r in revs:
886 checkrev(r)
895 checkrev(r)
887 # and we're sure ancestors aren't filtered as well
896 # and we're sure ancestors aren't filtered as well
888
897
889 if rustancestor is not None:
898 if rustancestor is not None:
890 lazyancestors = rustancestor.LazyAncestors
899 lazyancestors = rustancestor.LazyAncestors
891 arg = self.index
900 arg = self.index
892 elif util.safehasattr(parsers, b'rustlazyancestors'):
901 elif util.safehasattr(parsers, b'rustlazyancestors'):
893 lazyancestors = ancestor.rustlazyancestors
902 lazyancestors = ancestor.rustlazyancestors
894 arg = self.index
903 arg = self.index
895 else:
904 else:
896 lazyancestors = ancestor.lazyancestors
905 lazyancestors = ancestor.lazyancestors
897 arg = self._uncheckedparentrevs
906 arg = self._uncheckedparentrevs
898 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
907 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
899
908
900 def descendants(self, revs):
909 def descendants(self, revs):
901 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
910 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
902
911
903 def findcommonmissing(self, common=None, heads=None):
912 def findcommonmissing(self, common=None, heads=None):
904 """Return a tuple of the ancestors of common and the ancestors of heads
913 """Return a tuple of the ancestors of common and the ancestors of heads
905 that are not ancestors of common. In revset terminology, we return the
914 that are not ancestors of common. In revset terminology, we return the
906 tuple:
915 tuple:
907
916
908 ::common, (::heads) - (::common)
917 ::common, (::heads) - (::common)
909
918
910 The list is sorted by revision number, meaning it is
919 The list is sorted by revision number, meaning it is
911 topologically sorted.
920 topologically sorted.
912
921
913 'heads' and 'common' are both lists of node IDs. If heads is
922 'heads' and 'common' are both lists of node IDs. If heads is
914 not supplied, uses all of the revlog's heads. If common is not
923 not supplied, uses all of the revlog's heads. If common is not
915 supplied, uses nullid."""
924 supplied, uses nullid."""
916 if common is None:
925 if common is None:
917 common = [nullid]
926 common = [nullid]
918 if heads is None:
927 if heads is None:
919 heads = self.heads()
928 heads = self.heads()
920
929
921 common = [self.rev(n) for n in common]
930 common = [self.rev(n) for n in common]
922 heads = [self.rev(n) for n in heads]
931 heads = [self.rev(n) for n in heads]
923
932
924 # we want the ancestors, but inclusive
933 # we want the ancestors, but inclusive
925 class lazyset(object):
934 class lazyset(object):
926 def __init__(self, lazyvalues):
935 def __init__(self, lazyvalues):
927 self.addedvalues = set()
936 self.addedvalues = set()
928 self.lazyvalues = lazyvalues
937 self.lazyvalues = lazyvalues
929
938
930 def __contains__(self, value):
939 def __contains__(self, value):
931 return value in self.addedvalues or value in self.lazyvalues
940 return value in self.addedvalues or value in self.lazyvalues
932
941
933 def __iter__(self):
942 def __iter__(self):
934 added = self.addedvalues
943 added = self.addedvalues
935 for r in added:
944 for r in added:
936 yield r
945 yield r
937 for r in self.lazyvalues:
946 for r in self.lazyvalues:
938 if not r in added:
947 if not r in added:
939 yield r
948 yield r
940
949
941 def add(self, value):
950 def add(self, value):
942 self.addedvalues.add(value)
951 self.addedvalues.add(value)
943
952
944 def update(self, values):
953 def update(self, values):
945 self.addedvalues.update(values)
954 self.addedvalues.update(values)
946
955
947 has = lazyset(self.ancestors(common))
956 has = lazyset(self.ancestors(common))
948 has.add(nullrev)
957 has.add(nullrev)
949 has.update(common)
958 has.update(common)
950
959
951 # take all ancestors from heads that aren't in has
960 # take all ancestors from heads that aren't in has
952 missing = set()
961 missing = set()
953 visit = collections.deque(r for r in heads if r not in has)
962 visit = collections.deque(r for r in heads if r not in has)
954 while visit:
963 while visit:
955 r = visit.popleft()
964 r = visit.popleft()
956 if r in missing:
965 if r in missing:
957 continue
966 continue
958 else:
967 else:
959 missing.add(r)
968 missing.add(r)
960 for p in self.parentrevs(r):
969 for p in self.parentrevs(r):
961 if p not in has:
970 if p not in has:
962 visit.append(p)
971 visit.append(p)
963 missing = list(missing)
972 missing = list(missing)
964 missing.sort()
973 missing.sort()
965 return has, [self.node(miss) for miss in missing]
974 return has, [self.node(miss) for miss in missing]
966
975
967 def incrementalmissingrevs(self, common=None):
976 def incrementalmissingrevs(self, common=None):
968 """Return an object that can be used to incrementally compute the
977 """Return an object that can be used to incrementally compute the
969 revision numbers of the ancestors of arbitrary sets that are not
978 revision numbers of the ancestors of arbitrary sets that are not
970 ancestors of common. This is an ancestor.incrementalmissingancestors
979 ancestors of common. This is an ancestor.incrementalmissingancestors
971 object.
980 object.
972
981
973 'common' is a list of revision numbers. If common is not supplied, uses
982 'common' is a list of revision numbers. If common is not supplied, uses
974 nullrev.
983 nullrev.
975 """
984 """
976 if common is None:
985 if common is None:
977 common = [nullrev]
986 common = [nullrev]
978
987
979 if rustancestor is not None:
988 if rustancestor is not None:
980 return rustancestor.MissingAncestors(self.index, common)
989 return rustancestor.MissingAncestors(self.index, common)
981 return ancestor.incrementalmissingancestors(self.parentrevs, common)
990 return ancestor.incrementalmissingancestors(self.parentrevs, common)
982
991
983 def findmissingrevs(self, common=None, heads=None):
992 def findmissingrevs(self, common=None, heads=None):
984 """Return the revision numbers of the ancestors of heads that
993 """Return the revision numbers of the ancestors of heads that
985 are not ancestors of common.
994 are not ancestors of common.
986
995
987 More specifically, return a list of revision numbers corresponding to
996 More specifically, return a list of revision numbers corresponding to
988 nodes N such that every N satisfies the following constraints:
997 nodes N such that every N satisfies the following constraints:
989
998
990 1. N is an ancestor of some node in 'heads'
999 1. N is an ancestor of some node in 'heads'
991 2. N is not an ancestor of any node in 'common'
1000 2. N is not an ancestor of any node in 'common'
992
1001
993 The list is sorted by revision number, meaning it is
1002 The list is sorted by revision number, meaning it is
994 topologically sorted.
1003 topologically sorted.
995
1004
996 'heads' and 'common' are both lists of revision numbers. If heads is
1005 'heads' and 'common' are both lists of revision numbers. If heads is
997 not supplied, uses all of the revlog's heads. If common is not
1006 not supplied, uses all of the revlog's heads. If common is not
998 supplied, uses nullid."""
1007 supplied, uses nullid."""
999 if common is None:
1008 if common is None:
1000 common = [nullrev]
1009 common = [nullrev]
1001 if heads is None:
1010 if heads is None:
1002 heads = self.headrevs()
1011 heads = self.headrevs()
1003
1012
1004 inc = self.incrementalmissingrevs(common=common)
1013 inc = self.incrementalmissingrevs(common=common)
1005 return inc.missingancestors(heads)
1014 return inc.missingancestors(heads)
1006
1015
1007 def findmissing(self, common=None, heads=None):
1016 def findmissing(self, common=None, heads=None):
1008 """Return the ancestors of heads that are not ancestors of common.
1017 """Return the ancestors of heads that are not ancestors of common.
1009
1018
1010 More specifically, return a list of nodes N such that every N
1019 More specifically, return a list of nodes N such that every N
1011 satisfies the following constraints:
1020 satisfies the following constraints:
1012
1021
1013 1. N is an ancestor of some node in 'heads'
1022 1. N is an ancestor of some node in 'heads'
1014 2. N is not an ancestor of any node in 'common'
1023 2. N is not an ancestor of any node in 'common'
1015
1024
1016 The list is sorted by revision number, meaning it is
1025 The list is sorted by revision number, meaning it is
1017 topologically sorted.
1026 topologically sorted.
1018
1027
1019 'heads' and 'common' are both lists of node IDs. If heads is
1028 'heads' and 'common' are both lists of node IDs. If heads is
1020 not supplied, uses all of the revlog's heads. If common is not
1029 not supplied, uses all of the revlog's heads. If common is not
1021 supplied, uses nullid."""
1030 supplied, uses nullid."""
1022 if common is None:
1031 if common is None:
1023 common = [nullid]
1032 common = [nullid]
1024 if heads is None:
1033 if heads is None:
1025 heads = self.heads()
1034 heads = self.heads()
1026
1035
1027 common = [self.rev(n) for n in common]
1036 common = [self.rev(n) for n in common]
1028 heads = [self.rev(n) for n in heads]
1037 heads = [self.rev(n) for n in heads]
1029
1038
1030 inc = self.incrementalmissingrevs(common=common)
1039 inc = self.incrementalmissingrevs(common=common)
1031 return [self.node(r) for r in inc.missingancestors(heads)]
1040 return [self.node(r) for r in inc.missingancestors(heads)]
1032
1041
1033 def nodesbetween(self, roots=None, heads=None):
1042 def nodesbetween(self, roots=None, heads=None):
1034 """Return a topological path from 'roots' to 'heads'.
1043 """Return a topological path from 'roots' to 'heads'.
1035
1044
1036 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1045 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1037 topologically sorted list of all nodes N that satisfy both of
1046 topologically sorted list of all nodes N that satisfy both of
1038 these constraints:
1047 these constraints:
1039
1048
1040 1. N is a descendant of some node in 'roots'
1049 1. N is a descendant of some node in 'roots'
1041 2. N is an ancestor of some node in 'heads'
1050 2. N is an ancestor of some node in 'heads'
1042
1051
1043 Every node is considered to be both a descendant and an ancestor
1052 Every node is considered to be both a descendant and an ancestor
1044 of itself, so every reachable node in 'roots' and 'heads' will be
1053 of itself, so every reachable node in 'roots' and 'heads' will be
1045 included in 'nodes'.
1054 included in 'nodes'.
1046
1055
1047 'outroots' is the list of reachable nodes in 'roots', i.e., the
1056 'outroots' is the list of reachable nodes in 'roots', i.e., the
1048 subset of 'roots' that is returned in 'nodes'. Likewise,
1057 subset of 'roots' that is returned in 'nodes'. Likewise,
1049 'outheads' is the subset of 'heads' that is also in 'nodes'.
1058 'outheads' is the subset of 'heads' that is also in 'nodes'.
1050
1059
1051 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1060 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1052 unspecified, uses nullid as the only root. If 'heads' is
1061 unspecified, uses nullid as the only root. If 'heads' is
1053 unspecified, uses list of all of the revlog's heads."""
1062 unspecified, uses list of all of the revlog's heads."""
1054 nonodes = ([], [], [])
1063 nonodes = ([], [], [])
1055 if roots is not None:
1064 if roots is not None:
1056 roots = list(roots)
1065 roots = list(roots)
1057 if not roots:
1066 if not roots:
1058 return nonodes
1067 return nonodes
1059 lowestrev = min([self.rev(n) for n in roots])
1068 lowestrev = min([self.rev(n) for n in roots])
1060 else:
1069 else:
1061 roots = [nullid] # Everybody's a descendant of nullid
1070 roots = [nullid] # Everybody's a descendant of nullid
1062 lowestrev = nullrev
1071 lowestrev = nullrev
1063 if (lowestrev == nullrev) and (heads is None):
1072 if (lowestrev == nullrev) and (heads is None):
1064 # We want _all_ the nodes!
1073 # We want _all_ the nodes!
1065 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1074 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1066 if heads is None:
1075 if heads is None:
1067 # All nodes are ancestors, so the latest ancestor is the last
1076 # All nodes are ancestors, so the latest ancestor is the last
1068 # node.
1077 # node.
1069 highestrev = len(self) - 1
1078 highestrev = len(self) - 1
1070 # Set ancestors to None to signal that every node is an ancestor.
1079 # Set ancestors to None to signal that every node is an ancestor.
1071 ancestors = None
1080 ancestors = None
1072 # Set heads to an empty dictionary for later discovery of heads
1081 # Set heads to an empty dictionary for later discovery of heads
1073 heads = {}
1082 heads = {}
1074 else:
1083 else:
1075 heads = list(heads)
1084 heads = list(heads)
1076 if not heads:
1085 if not heads:
1077 return nonodes
1086 return nonodes
1078 ancestors = set()
1087 ancestors = set()
1079 # Turn heads into a dictionary so we can remove 'fake' heads.
1088 # Turn heads into a dictionary so we can remove 'fake' heads.
1080 # Also, later we will be using it to filter out the heads we can't
1089 # Also, later we will be using it to filter out the heads we can't
1081 # find from roots.
1090 # find from roots.
1082 heads = dict.fromkeys(heads, False)
1091 heads = dict.fromkeys(heads, False)
1083 # Start at the top and keep marking parents until we're done.
1092 # Start at the top and keep marking parents until we're done.
1084 nodestotag = set(heads)
1093 nodestotag = set(heads)
1085 # Remember where the top was so we can use it as a limit later.
1094 # Remember where the top was so we can use it as a limit later.
1086 highestrev = max([self.rev(n) for n in nodestotag])
1095 highestrev = max([self.rev(n) for n in nodestotag])
1087 while nodestotag:
1096 while nodestotag:
1088 # grab a node to tag
1097 # grab a node to tag
1089 n = nodestotag.pop()
1098 n = nodestotag.pop()
1090 # Never tag nullid
1099 # Never tag nullid
1091 if n == nullid:
1100 if n == nullid:
1092 continue
1101 continue
1093 # A node's revision number represents its place in a
1102 # A node's revision number represents its place in a
1094 # topologically sorted list of nodes.
1103 # topologically sorted list of nodes.
1095 r = self.rev(n)
1104 r = self.rev(n)
1096 if r >= lowestrev:
1105 if r >= lowestrev:
1097 if n not in ancestors:
1106 if n not in ancestors:
1098 # If we are possibly a descendant of one of the roots
1107 # If we are possibly a descendant of one of the roots
1099 # and we haven't already been marked as an ancestor
1108 # and we haven't already been marked as an ancestor
1100 ancestors.add(n) # Mark as ancestor
1109 ancestors.add(n) # Mark as ancestor
1101 # Add non-nullid parents to list of nodes to tag.
1110 # Add non-nullid parents to list of nodes to tag.
1102 nodestotag.update(
1111 nodestotag.update(
1103 [p for p in self.parents(n) if p != nullid]
1112 [p for p in self.parents(n) if p != nullid]
1104 )
1113 )
1105 elif n in heads: # We've seen it before, is it a fake head?
1114 elif n in heads: # We've seen it before, is it a fake head?
1106 # So it is, real heads should not be the ancestors of
1115 # So it is, real heads should not be the ancestors of
1107 # any other heads.
1116 # any other heads.
1108 heads.pop(n)
1117 heads.pop(n)
1109 if not ancestors:
1118 if not ancestors:
1110 return nonodes
1119 return nonodes
1111 # Now that we have our set of ancestors, we want to remove any
1120 # Now that we have our set of ancestors, we want to remove any
1112 # roots that are not ancestors.
1121 # roots that are not ancestors.
1113
1122
1114 # If one of the roots was nullid, everything is included anyway.
1123 # If one of the roots was nullid, everything is included anyway.
1115 if lowestrev > nullrev:
1124 if lowestrev > nullrev:
1116 # But, since we weren't, let's recompute the lowest rev to not
1125 # But, since we weren't, let's recompute the lowest rev to not
1117 # include roots that aren't ancestors.
1126 # include roots that aren't ancestors.
1118
1127
1119 # Filter out roots that aren't ancestors of heads
1128 # Filter out roots that aren't ancestors of heads
1120 roots = [root for root in roots if root in ancestors]
1129 roots = [root for root in roots if root in ancestors]
1121 # Recompute the lowest revision
1130 # Recompute the lowest revision
1122 if roots:
1131 if roots:
1123 lowestrev = min([self.rev(root) for root in roots])
1132 lowestrev = min([self.rev(root) for root in roots])
1124 else:
1133 else:
1125 # No more roots? Return empty list
1134 # No more roots? Return empty list
1126 return nonodes
1135 return nonodes
1127 else:
1136 else:
1128 # We are descending from nullid, and don't need to care about
1137 # We are descending from nullid, and don't need to care about
1129 # any other roots.
1138 # any other roots.
1130 lowestrev = nullrev
1139 lowestrev = nullrev
1131 roots = [nullid]
1140 roots = [nullid]
1132 # Transform our roots list into a set.
1141 # Transform our roots list into a set.
1133 descendants = set(roots)
1142 descendants = set(roots)
1134 # Also, keep the original roots so we can filter out roots that aren't
1143 # Also, keep the original roots so we can filter out roots that aren't
1135 # 'real' roots (i.e. are descended from other roots).
1144 # 'real' roots (i.e. are descended from other roots).
1136 roots = descendants.copy()
1145 roots = descendants.copy()
1137 # Our topologically sorted list of output nodes.
1146 # Our topologically sorted list of output nodes.
1138 orderedout = []
1147 orderedout = []
1139 # Don't start at nullid since we don't want nullid in our output list,
1148 # Don't start at nullid since we don't want nullid in our output list,
1140 # and if nullid shows up in descendants, empty parents will look like
1149 # and if nullid shows up in descendants, empty parents will look like
1141 # they're descendants.
1150 # they're descendants.
1142 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1151 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1143 n = self.node(r)
1152 n = self.node(r)
1144 isdescendant = False
1153 isdescendant = False
1145 if lowestrev == nullrev: # Everybody is a descendant of nullid
1154 if lowestrev == nullrev: # Everybody is a descendant of nullid
1146 isdescendant = True
1155 isdescendant = True
1147 elif n in descendants:
1156 elif n in descendants:
1148 # n is already a descendant
1157 # n is already a descendant
1149 isdescendant = True
1158 isdescendant = True
1150 # This check only needs to be done here because all the roots
1159 # This check only needs to be done here because all the roots
1151 # will start being marked is descendants before the loop.
1160 # will start being marked is descendants before the loop.
1152 if n in roots:
1161 if n in roots:
1153 # If n was a root, check if it's a 'real' root.
1162 # If n was a root, check if it's a 'real' root.
1154 p = tuple(self.parents(n))
1163 p = tuple(self.parents(n))
1155 # If any of its parents are descendants, it's not a root.
1164 # If any of its parents are descendants, it's not a root.
1156 if (p[0] in descendants) or (p[1] in descendants):
1165 if (p[0] in descendants) or (p[1] in descendants):
1157 roots.remove(n)
1166 roots.remove(n)
1158 else:
1167 else:
1159 p = tuple(self.parents(n))
1168 p = tuple(self.parents(n))
1160 # A node is a descendant if either of its parents are
1169 # A node is a descendant if either of its parents are
1161 # descendants. (We seeded the dependents list with the roots
1170 # descendants. (We seeded the dependents list with the roots
1162 # up there, remember?)
1171 # up there, remember?)
1163 if (p[0] in descendants) or (p[1] in descendants):
1172 if (p[0] in descendants) or (p[1] in descendants):
1164 descendants.add(n)
1173 descendants.add(n)
1165 isdescendant = True
1174 isdescendant = True
1166 if isdescendant and ((ancestors is None) or (n in ancestors)):
1175 if isdescendant and ((ancestors is None) or (n in ancestors)):
1167 # Only include nodes that are both descendants and ancestors.
1176 # Only include nodes that are both descendants and ancestors.
1168 orderedout.append(n)
1177 orderedout.append(n)
1169 if (ancestors is not None) and (n in heads):
1178 if (ancestors is not None) and (n in heads):
1170 # We're trying to figure out which heads are reachable
1179 # We're trying to figure out which heads are reachable
1171 # from roots.
1180 # from roots.
1172 # Mark this head as having been reached
1181 # Mark this head as having been reached
1173 heads[n] = True
1182 heads[n] = True
1174 elif ancestors is None:
1183 elif ancestors is None:
1175 # Otherwise, we're trying to discover the heads.
1184 # Otherwise, we're trying to discover the heads.
1176 # Assume this is a head because if it isn't, the next step
1185 # Assume this is a head because if it isn't, the next step
1177 # will eventually remove it.
1186 # will eventually remove it.
1178 heads[n] = True
1187 heads[n] = True
1179 # But, obviously its parents aren't.
1188 # But, obviously its parents aren't.
1180 for p in self.parents(n):
1189 for p in self.parents(n):
1181 heads.pop(p, None)
1190 heads.pop(p, None)
1182 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1191 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1183 roots = list(roots)
1192 roots = list(roots)
1184 assert orderedout
1193 assert orderedout
1185 assert roots
1194 assert roots
1186 assert heads
1195 assert heads
1187 return (orderedout, roots, heads)
1196 return (orderedout, roots, heads)
1188
1197
1189 def headrevs(self, revs=None):
1198 def headrevs(self, revs=None):
1190 if revs is None:
1199 if revs is None:
1191 try:
1200 try:
1192 return self.index.headrevs()
1201 return self.index.headrevs()
1193 except AttributeError:
1202 except AttributeError:
1194 return self._headrevs()
1203 return self._headrevs()
1195 if rustdagop is not None:
1204 if rustdagop is not None:
1196 return rustdagop.headrevs(self.index, revs)
1205 return rustdagop.headrevs(self.index, revs)
1197 return dagop.headrevs(revs, self._uncheckedparentrevs)
1206 return dagop.headrevs(revs, self._uncheckedparentrevs)
1198
1207
1199 def computephases(self, roots):
1208 def computephases(self, roots):
1200 return self.index.computephasesmapsets(roots)
1209 return self.index.computephasesmapsets(roots)
1201
1210
1202 def _headrevs(self):
1211 def _headrevs(self):
1203 count = len(self)
1212 count = len(self)
1204 if not count:
1213 if not count:
1205 return [nullrev]
1214 return [nullrev]
1206 # we won't iter over filtered rev so nobody is a head at start
1215 # we won't iter over filtered rev so nobody is a head at start
1207 ishead = [0] * (count + 1)
1216 ishead = [0] * (count + 1)
1208 index = self.index
1217 index = self.index
1209 for r in self:
1218 for r in self:
1210 ishead[r] = 1 # I may be an head
1219 ishead[r] = 1 # I may be an head
1211 e = index[r]
1220 e = index[r]
1212 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1221 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1213 return [r for r, val in enumerate(ishead) if val]
1222 return [r for r, val in enumerate(ishead) if val]
1214
1223
1215 def heads(self, start=None, stop=None):
1224 def heads(self, start=None, stop=None):
1216 """return the list of all nodes that have no children
1225 """return the list of all nodes that have no children
1217
1226
1218 if start is specified, only heads that are descendants of
1227 if start is specified, only heads that are descendants of
1219 start will be returned
1228 start will be returned
1220 if stop is specified, it will consider all the revs from stop
1229 if stop is specified, it will consider all the revs from stop
1221 as if they had no children
1230 as if they had no children
1222 """
1231 """
1223 if start is None and stop is None:
1232 if start is None and stop is None:
1224 if not len(self):
1233 if not len(self):
1225 return [nullid]
1234 return [nullid]
1226 return [self.node(r) for r in self.headrevs()]
1235 return [self.node(r) for r in self.headrevs()]
1227
1236
1228 if start is None:
1237 if start is None:
1229 start = nullrev
1238 start = nullrev
1230 else:
1239 else:
1231 start = self.rev(start)
1240 start = self.rev(start)
1232
1241
1233 stoprevs = set(self.rev(n) for n in stop or [])
1242 stoprevs = set(self.rev(n) for n in stop or [])
1234
1243
1235 revs = dagop.headrevssubset(
1244 revs = dagop.headrevssubset(
1236 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1245 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1237 )
1246 )
1238
1247
1239 return [self.node(rev) for rev in revs]
1248 return [self.node(rev) for rev in revs]
1240
1249
1241 def children(self, node):
1250 def children(self, node):
1242 """find the children of a given node"""
1251 """find the children of a given node"""
1243 c = []
1252 c = []
1244 p = self.rev(node)
1253 p = self.rev(node)
1245 for r in self.revs(start=p + 1):
1254 for r in self.revs(start=p + 1):
1246 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1255 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1247 if prevs:
1256 if prevs:
1248 for pr in prevs:
1257 for pr in prevs:
1249 if pr == p:
1258 if pr == p:
1250 c.append(self.node(r))
1259 c.append(self.node(r))
1251 elif p == nullrev:
1260 elif p == nullrev:
1252 c.append(self.node(r))
1261 c.append(self.node(r))
1253 return c
1262 return c
1254
1263
1255 def commonancestorsheads(self, a, b):
1264 def commonancestorsheads(self, a, b):
1256 """calculate all the heads of the common ancestors of nodes a and b"""
1265 """calculate all the heads of the common ancestors of nodes a and b"""
1257 a, b = self.rev(a), self.rev(b)
1266 a, b = self.rev(a), self.rev(b)
1258 ancs = self._commonancestorsheads(a, b)
1267 ancs = self._commonancestorsheads(a, b)
1259 return pycompat.maplist(self.node, ancs)
1268 return pycompat.maplist(self.node, ancs)
1260
1269
1261 def _commonancestorsheads(self, *revs):
1270 def _commonancestorsheads(self, *revs):
1262 """calculate all the heads of the common ancestors of revs"""
1271 """calculate all the heads of the common ancestors of revs"""
1263 try:
1272 try:
1264 ancs = self.index.commonancestorsheads(*revs)
1273 ancs = self.index.commonancestorsheads(*revs)
1265 except (AttributeError, OverflowError): # C implementation failed
1274 except (AttributeError, OverflowError): # C implementation failed
1266 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1275 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1267 return ancs
1276 return ancs
1268
1277
1269 def isancestor(self, a, b):
1278 def isancestor(self, a, b):
1270 """return True if node a is an ancestor of node b
1279 """return True if node a is an ancestor of node b
1271
1280
1272 A revision is considered an ancestor of itself."""
1281 A revision is considered an ancestor of itself."""
1273 a, b = self.rev(a), self.rev(b)
1282 a, b = self.rev(a), self.rev(b)
1274 return self.isancestorrev(a, b)
1283 return self.isancestorrev(a, b)
1275
1284
1276 def isancestorrev(self, a, b):
1285 def isancestorrev(self, a, b):
1277 """return True if revision a is an ancestor of revision b
1286 """return True if revision a is an ancestor of revision b
1278
1287
1279 A revision is considered an ancestor of itself.
1288 A revision is considered an ancestor of itself.
1280
1289
1281 The implementation of this is trivial but the use of
1290 The implementation of this is trivial but the use of
1282 reachableroots is not."""
1291 reachableroots is not."""
1283 if a == nullrev:
1292 if a == nullrev:
1284 return True
1293 return True
1285 elif a == b:
1294 elif a == b:
1286 return True
1295 return True
1287 elif a > b:
1296 elif a > b:
1288 return False
1297 return False
1289 return bool(self.reachableroots(a, [b], [a], includepath=False))
1298 return bool(self.reachableroots(a, [b], [a], includepath=False))
1290
1299
1291 def reachableroots(self, minroot, heads, roots, includepath=False):
1300 def reachableroots(self, minroot, heads, roots, includepath=False):
1292 """return (heads(::(<roots> and <roots>::<heads>)))
1301 """return (heads(::(<roots> and <roots>::<heads>)))
1293
1302
1294 If includepath is True, return (<roots>::<heads>)."""
1303 If includepath is True, return (<roots>::<heads>)."""
1295 try:
1304 try:
1296 return self.index.reachableroots2(
1305 return self.index.reachableroots2(
1297 minroot, heads, roots, includepath
1306 minroot, heads, roots, includepath
1298 )
1307 )
1299 except AttributeError:
1308 except AttributeError:
1300 return dagop._reachablerootspure(
1309 return dagop._reachablerootspure(
1301 self.parentrevs, minroot, roots, heads, includepath
1310 self.parentrevs, minroot, roots, heads, includepath
1302 )
1311 )
1303
1312
1304 def ancestor(self, a, b):
1313 def ancestor(self, a, b):
1305 """calculate the "best" common ancestor of nodes a and b"""
1314 """calculate the "best" common ancestor of nodes a and b"""
1306
1315
1307 a, b = self.rev(a), self.rev(b)
1316 a, b = self.rev(a), self.rev(b)
1308 try:
1317 try:
1309 ancs = self.index.ancestors(a, b)
1318 ancs = self.index.ancestors(a, b)
1310 except (AttributeError, OverflowError):
1319 except (AttributeError, OverflowError):
1311 ancs = ancestor.ancestors(self.parentrevs, a, b)
1320 ancs = ancestor.ancestors(self.parentrevs, a, b)
1312 if ancs:
1321 if ancs:
1313 # choose a consistent winner when there's a tie
1322 # choose a consistent winner when there's a tie
1314 return min(map(self.node, ancs))
1323 return min(map(self.node, ancs))
1315 return nullid
1324 return nullid
1316
1325
1317 def _match(self, id):
1326 def _match(self, id):
1318 if isinstance(id, int):
1327 if isinstance(id, int):
1319 # rev
1328 # rev
1320 return self.node(id)
1329 return self.node(id)
1321 if len(id) == 20:
1330 if len(id) == 20:
1322 # possibly a binary node
1331 # possibly a binary node
1323 # odds of a binary node being all hex in ASCII are 1 in 10**25
1332 # odds of a binary node being all hex in ASCII are 1 in 10**25
1324 try:
1333 try:
1325 node = id
1334 node = id
1326 self.rev(node) # quick search the index
1335 self.rev(node) # quick search the index
1327 return node
1336 return node
1328 except error.LookupError:
1337 except error.LookupError:
1329 pass # may be partial hex id
1338 pass # may be partial hex id
1330 try:
1339 try:
1331 # str(rev)
1340 # str(rev)
1332 rev = int(id)
1341 rev = int(id)
1333 if b"%d" % rev != id:
1342 if b"%d" % rev != id:
1334 raise ValueError
1343 raise ValueError
1335 if rev < 0:
1344 if rev < 0:
1336 rev = len(self) + rev
1345 rev = len(self) + rev
1337 if rev < 0 or rev >= len(self):
1346 if rev < 0 or rev >= len(self):
1338 raise ValueError
1347 raise ValueError
1339 return self.node(rev)
1348 return self.node(rev)
1340 except (ValueError, OverflowError):
1349 except (ValueError, OverflowError):
1341 pass
1350 pass
1342 if len(id) == 40:
1351 if len(id) == 40:
1343 try:
1352 try:
1344 # a full hex nodeid?
1353 # a full hex nodeid?
1345 node = bin(id)
1354 node = bin(id)
1346 self.rev(node)
1355 self.rev(node)
1347 return node
1356 return node
1348 except (TypeError, error.LookupError):
1357 except (TypeError, error.LookupError):
1349 pass
1358 pass
1350
1359
1351 def _partialmatch(self, id):
1360 def _partialmatch(self, id):
1352 # we don't care wdirfilenodeids as they should be always full hash
1361 # we don't care wdirfilenodeids as they should be always full hash
1353 maybewdir = wdirhex.startswith(id)
1362 maybewdir = wdirhex.startswith(id)
1354 try:
1363 try:
1355 partial = self.index.partialmatch(id)
1364 partial = self.index.partialmatch(id)
1356 if partial and self.hasnode(partial):
1365 if partial and self.hasnode(partial):
1357 if maybewdir:
1366 if maybewdir:
1358 # single 'ff...' match in radix tree, ambiguous with wdir
1367 # single 'ff...' match in radix tree, ambiguous with wdir
1359 raise error.RevlogError
1368 raise error.RevlogError
1360 return partial
1369 return partial
1361 if maybewdir:
1370 if maybewdir:
1362 # no 'ff...' match in radix tree, wdir identified
1371 # no 'ff...' match in radix tree, wdir identified
1363 raise error.WdirUnsupported
1372 raise error.WdirUnsupported
1364 return None
1373 return None
1365 except error.RevlogError:
1374 except error.RevlogError:
1366 # parsers.c radix tree lookup gave multiple matches
1375 # parsers.c radix tree lookup gave multiple matches
1367 # fast path: for unfiltered changelog, radix tree is accurate
1376 # fast path: for unfiltered changelog, radix tree is accurate
1368 if not getattr(self, 'filteredrevs', None):
1377 if not getattr(self, 'filteredrevs', None):
1369 raise error.AmbiguousPrefixLookupError(
1378 raise error.AmbiguousPrefixLookupError(
1370 id, self.indexfile, _(b'ambiguous identifier')
1379 id, self.indexfile, _(b'ambiguous identifier')
1371 )
1380 )
1372 # fall through to slow path that filters hidden revisions
1381 # fall through to slow path that filters hidden revisions
1373 except (AttributeError, ValueError):
1382 except (AttributeError, ValueError):
1374 # we are pure python, or key was too short to search radix tree
1383 # we are pure python, or key was too short to search radix tree
1375 pass
1384 pass
1376
1385
1377 if id in self._pcache:
1386 if id in self._pcache:
1378 return self._pcache[id]
1387 return self._pcache[id]
1379
1388
1380 if len(id) <= 40:
1389 if len(id) <= 40:
1381 try:
1390 try:
1382 # hex(node)[:...]
1391 # hex(node)[:...]
1383 l = len(id) // 2 # grab an even number of digits
1392 l = len(id) // 2 # grab an even number of digits
1384 prefix = bin(id[: l * 2])
1393 prefix = bin(id[: l * 2])
1385 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1394 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1386 nl = [
1395 nl = [
1387 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1396 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1388 ]
1397 ]
1389 if nullhex.startswith(id):
1398 if nullhex.startswith(id):
1390 nl.append(nullid)
1399 nl.append(nullid)
1391 if len(nl) > 0:
1400 if len(nl) > 0:
1392 if len(nl) == 1 and not maybewdir:
1401 if len(nl) == 1 and not maybewdir:
1393 self._pcache[id] = nl[0]
1402 self._pcache[id] = nl[0]
1394 return nl[0]
1403 return nl[0]
1395 raise error.AmbiguousPrefixLookupError(
1404 raise error.AmbiguousPrefixLookupError(
1396 id, self.indexfile, _(b'ambiguous identifier')
1405 id, self.indexfile, _(b'ambiguous identifier')
1397 )
1406 )
1398 if maybewdir:
1407 if maybewdir:
1399 raise error.WdirUnsupported
1408 raise error.WdirUnsupported
1400 return None
1409 return None
1401 except TypeError:
1410 except TypeError:
1402 pass
1411 pass
1403
1412
1404 def lookup(self, id):
1413 def lookup(self, id):
1405 """locate a node based on:
1414 """locate a node based on:
1406 - revision number or str(revision number)
1415 - revision number or str(revision number)
1407 - nodeid or subset of hex nodeid
1416 - nodeid or subset of hex nodeid
1408 """
1417 """
1409 n = self._match(id)
1418 n = self._match(id)
1410 if n is not None:
1419 if n is not None:
1411 return n
1420 return n
1412 n = self._partialmatch(id)
1421 n = self._partialmatch(id)
1413 if n:
1422 if n:
1414 return n
1423 return n
1415
1424
1416 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1425 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1417
1426
1418 def shortest(self, node, minlength=1):
1427 def shortest(self, node, minlength=1):
1419 """Find the shortest unambiguous prefix that matches node."""
1428 """Find the shortest unambiguous prefix that matches node."""
1420
1429
1421 def isvalid(prefix):
1430 def isvalid(prefix):
1422 try:
1431 try:
1423 matchednode = self._partialmatch(prefix)
1432 matchednode = self._partialmatch(prefix)
1424 except error.AmbiguousPrefixLookupError:
1433 except error.AmbiguousPrefixLookupError:
1425 return False
1434 return False
1426 except error.WdirUnsupported:
1435 except error.WdirUnsupported:
1427 # single 'ff...' match
1436 # single 'ff...' match
1428 return True
1437 return True
1429 if matchednode is None:
1438 if matchednode is None:
1430 raise error.LookupError(node, self.indexfile, _(b'no node'))
1439 raise error.LookupError(node, self.indexfile, _(b'no node'))
1431 return True
1440 return True
1432
1441
1433 def maybewdir(prefix):
1442 def maybewdir(prefix):
1434 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1443 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1435
1444
1436 hexnode = hex(node)
1445 hexnode = hex(node)
1437
1446
1438 def disambiguate(hexnode, minlength):
1447 def disambiguate(hexnode, minlength):
1439 """Disambiguate against wdirid."""
1448 """Disambiguate against wdirid."""
1440 for length in range(minlength, 41):
1449 for length in range(minlength, 41):
1441 prefix = hexnode[:length]
1450 prefix = hexnode[:length]
1442 if not maybewdir(prefix):
1451 if not maybewdir(prefix):
1443 return prefix
1452 return prefix
1444
1453
1445 if not getattr(self, 'filteredrevs', None):
1454 if not getattr(self, 'filteredrevs', None):
1446 try:
1455 try:
1447 length = max(self.index.shortest(node), minlength)
1456 length = max(self.index.shortest(node), minlength)
1448 return disambiguate(hexnode, length)
1457 return disambiguate(hexnode, length)
1449 except error.RevlogError:
1458 except error.RevlogError:
1450 if node != wdirid:
1459 if node != wdirid:
1451 raise error.LookupError(node, self.indexfile, _(b'no node'))
1460 raise error.LookupError(node, self.indexfile, _(b'no node'))
1452 except AttributeError:
1461 except AttributeError:
1453 # Fall through to pure code
1462 # Fall through to pure code
1454 pass
1463 pass
1455
1464
1456 if node == wdirid:
1465 if node == wdirid:
1457 for length in range(minlength, 41):
1466 for length in range(minlength, 41):
1458 prefix = hexnode[:length]
1467 prefix = hexnode[:length]
1459 if isvalid(prefix):
1468 if isvalid(prefix):
1460 return prefix
1469 return prefix
1461
1470
1462 for length in range(minlength, 41):
1471 for length in range(minlength, 41):
1463 prefix = hexnode[:length]
1472 prefix = hexnode[:length]
1464 if isvalid(prefix):
1473 if isvalid(prefix):
1465 return disambiguate(hexnode, length)
1474 return disambiguate(hexnode, length)
1466
1475
1467 def cmp(self, node, text):
1476 def cmp(self, node, text):
1468 """compare text with a given file revision
1477 """compare text with a given file revision
1469
1478
1470 returns True if text is different than what is stored.
1479 returns True if text is different than what is stored.
1471 """
1480 """
1472 p1, p2 = self.parents(node)
1481 p1, p2 = self.parents(node)
1473 return storageutil.hashrevisionsha1(text, p1, p2) != node
1482 return storageutil.hashrevisionsha1(text, p1, p2) != node
1474
1483
1475 def _cachesegment(self, offset, data):
1484 def _cachesegment(self, offset, data):
1476 """Add a segment to the revlog cache.
1485 """Add a segment to the revlog cache.
1477
1486
1478 Accepts an absolute offset and the data that is at that location.
1487 Accepts an absolute offset and the data that is at that location.
1479 """
1488 """
1480 o, d = self._chunkcache
1489 o, d = self._chunkcache
1481 # try to add to existing cache
1490 # try to add to existing cache
1482 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1491 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1483 self._chunkcache = o, d + data
1492 self._chunkcache = o, d + data
1484 else:
1493 else:
1485 self._chunkcache = offset, data
1494 self._chunkcache = offset, data
1486
1495
1487 def _readsegment(self, offset, length, df=None):
1496 def _readsegment(self, offset, length, df=None):
1488 """Load a segment of raw data from the revlog.
1497 """Load a segment of raw data from the revlog.
1489
1498
1490 Accepts an absolute offset, length to read, and an optional existing
1499 Accepts an absolute offset, length to read, and an optional existing
1491 file handle to read from.
1500 file handle to read from.
1492
1501
1493 If an existing file handle is passed, it will be seeked and the
1502 If an existing file handle is passed, it will be seeked and the
1494 original seek position will NOT be restored.
1503 original seek position will NOT be restored.
1495
1504
1496 Returns a str or buffer of raw byte data.
1505 Returns a str or buffer of raw byte data.
1497
1506
1498 Raises if the requested number of bytes could not be read.
1507 Raises if the requested number of bytes could not be read.
1499 """
1508 """
1500 # Cache data both forward and backward around the requested
1509 # Cache data both forward and backward around the requested
1501 # data, in a fixed size window. This helps speed up operations
1510 # data, in a fixed size window. This helps speed up operations
1502 # involving reading the revlog backwards.
1511 # involving reading the revlog backwards.
1503 cachesize = self._chunkcachesize
1512 cachesize = self._chunkcachesize
1504 realoffset = offset & ~(cachesize - 1)
1513 realoffset = offset & ~(cachesize - 1)
1505 reallength = (
1514 reallength = (
1506 (offset + length + cachesize) & ~(cachesize - 1)
1515 (offset + length + cachesize) & ~(cachesize - 1)
1507 ) - realoffset
1516 ) - realoffset
1508 with self._datareadfp(df) as df:
1517 with self._datareadfp(df) as df:
1509 df.seek(realoffset)
1518 df.seek(realoffset)
1510 d = df.read(reallength)
1519 d = df.read(reallength)
1511
1520
1512 self._cachesegment(realoffset, d)
1521 self._cachesegment(realoffset, d)
1513 if offset != realoffset or reallength != length:
1522 if offset != realoffset or reallength != length:
1514 startoffset = offset - realoffset
1523 startoffset = offset - realoffset
1515 if len(d) - startoffset < length:
1524 if len(d) - startoffset < length:
1516 raise error.RevlogError(
1525 raise error.RevlogError(
1517 _(
1526 _(
1518 b'partial read of revlog %s; expected %d bytes from '
1527 b'partial read of revlog %s; expected %d bytes from '
1519 b'offset %d, got %d'
1528 b'offset %d, got %d'
1520 )
1529 )
1521 % (
1530 % (
1522 self.indexfile if self._inline else self.datafile,
1531 self.indexfile if self._inline else self.datafile,
1523 length,
1532 length,
1524 realoffset,
1533 realoffset,
1525 len(d) - startoffset,
1534 len(d) - startoffset,
1526 )
1535 )
1527 )
1536 )
1528
1537
1529 return util.buffer(d, startoffset, length)
1538 return util.buffer(d, startoffset, length)
1530
1539
1531 if len(d) < length:
1540 if len(d) < length:
1532 raise error.RevlogError(
1541 raise error.RevlogError(
1533 _(
1542 _(
1534 b'partial read of revlog %s; expected %d bytes from offset '
1543 b'partial read of revlog %s; expected %d bytes from offset '
1535 b'%d, got %d'
1544 b'%d, got %d'
1536 )
1545 )
1537 % (
1546 % (
1538 self.indexfile if self._inline else self.datafile,
1547 self.indexfile if self._inline else self.datafile,
1539 length,
1548 length,
1540 offset,
1549 offset,
1541 len(d),
1550 len(d),
1542 )
1551 )
1543 )
1552 )
1544
1553
1545 return d
1554 return d
1546
1555
1547 def _getsegment(self, offset, length, df=None):
1556 def _getsegment(self, offset, length, df=None):
1548 """Obtain a segment of raw data from the revlog.
1557 """Obtain a segment of raw data from the revlog.
1549
1558
1550 Accepts an absolute offset, length of bytes to obtain, and an
1559 Accepts an absolute offset, length of bytes to obtain, and an
1551 optional file handle to the already-opened revlog. If the file
1560 optional file handle to the already-opened revlog. If the file
1552 handle is used, it's original seek position will not be preserved.
1561 handle is used, it's original seek position will not be preserved.
1553
1562
1554 Requests for data may be returned from a cache.
1563 Requests for data may be returned from a cache.
1555
1564
1556 Returns a str or a buffer instance of raw byte data.
1565 Returns a str or a buffer instance of raw byte data.
1557 """
1566 """
1558 o, d = self._chunkcache
1567 o, d = self._chunkcache
1559 l = len(d)
1568 l = len(d)
1560
1569
1561 # is it in the cache?
1570 # is it in the cache?
1562 cachestart = offset - o
1571 cachestart = offset - o
1563 cacheend = cachestart + length
1572 cacheend = cachestart + length
1564 if cachestart >= 0 and cacheend <= l:
1573 if cachestart >= 0 and cacheend <= l:
1565 if cachestart == 0 and cacheend == l:
1574 if cachestart == 0 and cacheend == l:
1566 return d # avoid a copy
1575 return d # avoid a copy
1567 return util.buffer(d, cachestart, cacheend - cachestart)
1576 return util.buffer(d, cachestart, cacheend - cachestart)
1568
1577
1569 return self._readsegment(offset, length, df=df)
1578 return self._readsegment(offset, length, df=df)
1570
1579
1571 def _getsegmentforrevs(self, startrev, endrev, df=None):
1580 def _getsegmentforrevs(self, startrev, endrev, df=None):
1572 """Obtain a segment of raw data corresponding to a range of revisions.
1581 """Obtain a segment of raw data corresponding to a range of revisions.
1573
1582
1574 Accepts the start and end revisions and an optional already-open
1583 Accepts the start and end revisions and an optional already-open
1575 file handle to be used for reading. If the file handle is read, its
1584 file handle to be used for reading. If the file handle is read, its
1576 seek position will not be preserved.
1585 seek position will not be preserved.
1577
1586
1578 Requests for data may be satisfied by a cache.
1587 Requests for data may be satisfied by a cache.
1579
1588
1580 Returns a 2-tuple of (offset, data) for the requested range of
1589 Returns a 2-tuple of (offset, data) for the requested range of
1581 revisions. Offset is the integer offset from the beginning of the
1590 revisions. Offset is the integer offset from the beginning of the
1582 revlog and data is a str or buffer of the raw byte data.
1591 revlog and data is a str or buffer of the raw byte data.
1583
1592
1584 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1593 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1585 to determine where each revision's data begins and ends.
1594 to determine where each revision's data begins and ends.
1586 """
1595 """
1587 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1596 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1588 # (functions are expensive).
1597 # (functions are expensive).
1589 index = self.index
1598 index = self.index
1590 istart = index[startrev]
1599 istart = index[startrev]
1591 start = int(istart[0] >> 16)
1600 start = int(istart[0] >> 16)
1592 if startrev == endrev:
1601 if startrev == endrev:
1593 end = start + istart[1]
1602 end = start + istart[1]
1594 else:
1603 else:
1595 iend = index[endrev]
1604 iend = index[endrev]
1596 end = int(iend[0] >> 16) + iend[1]
1605 end = int(iend[0] >> 16) + iend[1]
1597
1606
1598 if self._inline:
1607 if self._inline:
1599 start += (startrev + 1) * self._io.size
1608 start += (startrev + 1) * self._io.size
1600 end += (endrev + 1) * self._io.size
1609 end += (endrev + 1) * self._io.size
1601 length = end - start
1610 length = end - start
1602
1611
1603 return start, self._getsegment(start, length, df=df)
1612 return start, self._getsegment(start, length, df=df)
1604
1613
1605 def _chunk(self, rev, df=None):
1614 def _chunk(self, rev, df=None):
1606 """Obtain a single decompressed chunk for a revision.
1615 """Obtain a single decompressed chunk for a revision.
1607
1616
1608 Accepts an integer revision and an optional already-open file handle
1617 Accepts an integer revision and an optional already-open file handle
1609 to be used for reading. If used, the seek position of the file will not
1618 to be used for reading. If used, the seek position of the file will not
1610 be preserved.
1619 be preserved.
1611
1620
1612 Returns a str holding uncompressed data for the requested revision.
1621 Returns a str holding uncompressed data for the requested revision.
1613 """
1622 """
1614 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1623 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1615
1624
1616 def _chunks(self, revs, df=None, targetsize=None):
1625 def _chunks(self, revs, df=None, targetsize=None):
1617 """Obtain decompressed chunks for the specified revisions.
1626 """Obtain decompressed chunks for the specified revisions.
1618
1627
1619 Accepts an iterable of numeric revisions that are assumed to be in
1628 Accepts an iterable of numeric revisions that are assumed to be in
1620 ascending order. Also accepts an optional already-open file handle
1629 ascending order. Also accepts an optional already-open file handle
1621 to be used for reading. If used, the seek position of the file will
1630 to be used for reading. If used, the seek position of the file will
1622 not be preserved.
1631 not be preserved.
1623
1632
1624 This function is similar to calling ``self._chunk()`` multiple times,
1633 This function is similar to calling ``self._chunk()`` multiple times,
1625 but is faster.
1634 but is faster.
1626
1635
1627 Returns a list with decompressed data for each requested revision.
1636 Returns a list with decompressed data for each requested revision.
1628 """
1637 """
1629 if not revs:
1638 if not revs:
1630 return []
1639 return []
1631 start = self.start
1640 start = self.start
1632 length = self.length
1641 length = self.length
1633 inline = self._inline
1642 inline = self._inline
1634 iosize = self._io.size
1643 iosize = self._io.size
1635 buffer = util.buffer
1644 buffer = util.buffer
1636
1645
1637 l = []
1646 l = []
1638 ladd = l.append
1647 ladd = l.append
1639
1648
1640 if not self._withsparseread:
1649 if not self._withsparseread:
1641 slicedchunks = (revs,)
1650 slicedchunks = (revs,)
1642 else:
1651 else:
1643 slicedchunks = deltautil.slicechunk(
1652 slicedchunks = deltautil.slicechunk(
1644 self, revs, targetsize=targetsize
1653 self, revs, targetsize=targetsize
1645 )
1654 )
1646
1655
1647 for revschunk in slicedchunks:
1656 for revschunk in slicedchunks:
1648 firstrev = revschunk[0]
1657 firstrev = revschunk[0]
1649 # Skip trailing revisions with empty diff
1658 # Skip trailing revisions with empty diff
1650 for lastrev in revschunk[::-1]:
1659 for lastrev in revschunk[::-1]:
1651 if length(lastrev) != 0:
1660 if length(lastrev) != 0:
1652 break
1661 break
1653
1662
1654 try:
1663 try:
1655 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1664 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1656 except OverflowError:
1665 except OverflowError:
1657 # issue4215 - we can't cache a run of chunks greater than
1666 # issue4215 - we can't cache a run of chunks greater than
1658 # 2G on Windows
1667 # 2G on Windows
1659 return [self._chunk(rev, df=df) for rev in revschunk]
1668 return [self._chunk(rev, df=df) for rev in revschunk]
1660
1669
1661 decomp = self.decompress
1670 decomp = self.decompress
1662 for rev in revschunk:
1671 for rev in revschunk:
1663 chunkstart = start(rev)
1672 chunkstart = start(rev)
1664 if inline:
1673 if inline:
1665 chunkstart += (rev + 1) * iosize
1674 chunkstart += (rev + 1) * iosize
1666 chunklength = length(rev)
1675 chunklength = length(rev)
1667 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1676 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1668
1677
1669 return l
1678 return l
1670
1679
1671 def _chunkclear(self):
1680 def _chunkclear(self):
1672 """Clear the raw chunk cache."""
1681 """Clear the raw chunk cache."""
1673 self._chunkcache = (0, b'')
1682 self._chunkcache = (0, b'')
1674
1683
1675 def deltaparent(self, rev):
1684 def deltaparent(self, rev):
1676 """return deltaparent of the given revision"""
1685 """return deltaparent of the given revision"""
1677 base = self.index[rev][3]
1686 base = self.index[rev][3]
1678 if base == rev:
1687 if base == rev:
1679 return nullrev
1688 return nullrev
1680 elif self._generaldelta:
1689 elif self._generaldelta:
1681 return base
1690 return base
1682 else:
1691 else:
1683 return rev - 1
1692 return rev - 1
1684
1693
1685 def issnapshot(self, rev):
1694 def issnapshot(self, rev):
1686 """tells whether rev is a snapshot
1695 """tells whether rev is a snapshot
1687 """
1696 """
1688 if not self._sparserevlog:
1697 if not self._sparserevlog:
1689 return self.deltaparent(rev) == nullrev
1698 return self.deltaparent(rev) == nullrev
1690 elif util.safehasattr(self.index, b'issnapshot'):
1699 elif util.safehasattr(self.index, b'issnapshot'):
1691 # directly assign the method to cache the testing and access
1700 # directly assign the method to cache the testing and access
1692 self.issnapshot = self.index.issnapshot
1701 self.issnapshot = self.index.issnapshot
1693 return self.issnapshot(rev)
1702 return self.issnapshot(rev)
1694 if rev == nullrev:
1703 if rev == nullrev:
1695 return True
1704 return True
1696 entry = self.index[rev]
1705 entry = self.index[rev]
1697 base = entry[3]
1706 base = entry[3]
1698 if base == rev:
1707 if base == rev:
1699 return True
1708 return True
1700 if base == nullrev:
1709 if base == nullrev:
1701 return True
1710 return True
1702 p1 = entry[5]
1711 p1 = entry[5]
1703 p2 = entry[6]
1712 p2 = entry[6]
1704 if base == p1 or base == p2:
1713 if base == p1 or base == p2:
1705 return False
1714 return False
1706 return self.issnapshot(base)
1715 return self.issnapshot(base)
1707
1716
1708 def snapshotdepth(self, rev):
1717 def snapshotdepth(self, rev):
1709 """number of snapshot in the chain before this one"""
1718 """number of snapshot in the chain before this one"""
1710 if not self.issnapshot(rev):
1719 if not self.issnapshot(rev):
1711 raise error.ProgrammingError(b'revision %d not a snapshot')
1720 raise error.ProgrammingError(b'revision %d not a snapshot')
1712 return len(self._deltachain(rev)[0]) - 1
1721 return len(self._deltachain(rev)[0]) - 1
1713
1722
1714 def revdiff(self, rev1, rev2):
1723 def revdiff(self, rev1, rev2):
1715 """return or calculate a delta between two revisions
1724 """return or calculate a delta between two revisions
1716
1725
1717 The delta calculated is in binary form and is intended to be written to
1726 The delta calculated is in binary form and is intended to be written to
1718 revlog data directly. So this function needs raw revision data.
1727 revlog data directly. So this function needs raw revision data.
1719 """
1728 """
1720 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1729 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1721 return bytes(self._chunk(rev2))
1730 return bytes(self._chunk(rev2))
1722
1731
1723 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1732 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1724
1733
1725 def _processflags(self, text, flags, operation, raw=False):
1734 def _processflags(self, text, flags, operation, raw=False):
1726 """deprecated entry point to access flag processors"""
1735 """deprecated entry point to access flag processors"""
1727 msg = b'_processflag(...) use the specialized variant'
1736 msg = b'_processflag(...) use the specialized variant'
1728 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1737 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1729 if raw:
1738 if raw:
1730 return text, flagutil.processflagsraw(self, text, flags)
1739 return text, flagutil.processflagsraw(self, text, flags)
1731 elif operation == b'read':
1740 elif operation == b'read':
1732 return flagutil.processflagsread(self, text, flags)
1741 return flagutil.processflagsread(self, text, flags)
1733 else: # write operation
1742 else: # write operation
1734 return flagutil.processflagswrite(self, text, flags)
1743 return flagutil.processflagswrite(self, text, flags)
1735
1744
1736 def revision(self, nodeorrev, _df=None, raw=False):
1745 def revision(self, nodeorrev, _df=None, raw=False):
1737 """return an uncompressed revision of a given node or revision
1746 """return an uncompressed revision of a given node or revision
1738 number.
1747 number.
1739
1748
1740 _df - an existing file handle to read from. (internal-only)
1749 _df - an existing file handle to read from. (internal-only)
1741 raw - an optional argument specifying if the revision data is to be
1750 raw - an optional argument specifying if the revision data is to be
1742 treated as raw data when applying flag transforms. 'raw' should be set
1751 treated as raw data when applying flag transforms. 'raw' should be set
1743 to True when generating changegroups or in debug commands.
1752 to True when generating changegroups or in debug commands.
1744 """
1753 """
1745 if raw:
1754 if raw:
1746 msg = (
1755 msg = (
1747 b'revlog.revision(..., raw=True) is deprecated, '
1756 b'revlog.revision(..., raw=True) is deprecated, '
1748 b'use revlog.rawdata(...)'
1757 b'use revlog.rawdata(...)'
1749 )
1758 )
1750 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1759 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1751 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1760 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1752
1761
1753 def sidedata(self, nodeorrev, _df=None):
1762 def sidedata(self, nodeorrev, _df=None):
1754 """a map of extra data related to the changeset but not part of the hash
1763 """a map of extra data related to the changeset but not part of the hash
1755
1764
1756 This function currently return a dictionary. However, more advanced
1765 This function currently return a dictionary. However, more advanced
1757 mapping object will likely be used in the future for a more
1766 mapping object will likely be used in the future for a more
1758 efficient/lazy code.
1767 efficient/lazy code.
1759 """
1768 """
1760 return self._revisiondata(nodeorrev, _df)[1]
1769 return self._revisiondata(nodeorrev, _df)[1]
1761
1770
1762 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1771 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1763 # deal with <nodeorrev> argument type
1772 # deal with <nodeorrev> argument type
1764 if isinstance(nodeorrev, int):
1773 if isinstance(nodeorrev, int):
1765 rev = nodeorrev
1774 rev = nodeorrev
1766 node = self.node(rev)
1775 node = self.node(rev)
1767 else:
1776 else:
1768 node = nodeorrev
1777 node = nodeorrev
1769 rev = None
1778 rev = None
1770
1779
1771 # fast path the special `nullid` rev
1780 # fast path the special `nullid` rev
1772 if node == nullid:
1781 if node == nullid:
1773 return b"", {}
1782 return b"", {}
1774
1783
1775 # ``rawtext`` is the text as stored inside the revlog. Might be the
1784 # ``rawtext`` is the text as stored inside the revlog. Might be the
1776 # revision or might need to be processed to retrieve the revision.
1785 # revision or might need to be processed to retrieve the revision.
1777 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1786 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1778
1787
1779 if raw and validated:
1788 if raw and validated:
1780 # if we don't want to process the raw text and that raw
1789 # if we don't want to process the raw text and that raw
1781 # text is cached, we can exit early.
1790 # text is cached, we can exit early.
1782 return rawtext, {}
1791 return rawtext, {}
1783 if rev is None:
1792 if rev is None:
1784 rev = self.rev(node)
1793 rev = self.rev(node)
1785 # the revlog's flag for this revision
1794 # the revlog's flag for this revision
1786 # (usually alter its state or content)
1795 # (usually alter its state or content)
1787 flags = self.flags(rev)
1796 flags = self.flags(rev)
1788
1797
1789 if validated and flags == REVIDX_DEFAULT_FLAGS:
1798 if validated and flags == REVIDX_DEFAULT_FLAGS:
1790 # no extra flags set, no flag processor runs, text = rawtext
1799 # no extra flags set, no flag processor runs, text = rawtext
1791 return rawtext, {}
1800 return rawtext, {}
1792
1801
1793 sidedata = {}
1802 sidedata = {}
1794 if raw:
1803 if raw:
1795 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1804 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1796 text = rawtext
1805 text = rawtext
1797 else:
1806 else:
1798 try:
1807 try:
1799 r = flagutil.processflagsread(self, rawtext, flags)
1808 r = flagutil.processflagsread(self, rawtext, flags)
1800 except error.SidedataHashError as exc:
1809 except error.SidedataHashError as exc:
1801 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1810 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1802 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1811 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1803 raise error.RevlogError(msg)
1812 raise error.RevlogError(msg)
1804 text, validatehash, sidedata = r
1813 text, validatehash, sidedata = r
1805 if validatehash:
1814 if validatehash:
1806 self.checkhash(text, node, rev=rev)
1815 self.checkhash(text, node, rev=rev)
1807 if not validated:
1816 if not validated:
1808 self._revisioncache = (node, rev, rawtext)
1817 self._revisioncache = (node, rev, rawtext)
1809
1818
1810 return text, sidedata
1819 return text, sidedata
1811
1820
1812 def _rawtext(self, node, rev, _df=None):
1821 def _rawtext(self, node, rev, _df=None):
1813 """return the possibly unvalidated rawtext for a revision
1822 """return the possibly unvalidated rawtext for a revision
1814
1823
1815 returns (rev, rawtext, validated)
1824 returns (rev, rawtext, validated)
1816 """
1825 """
1817
1826
1818 # revision in the cache (could be useful to apply delta)
1827 # revision in the cache (could be useful to apply delta)
1819 cachedrev = None
1828 cachedrev = None
1820 # An intermediate text to apply deltas to
1829 # An intermediate text to apply deltas to
1821 basetext = None
1830 basetext = None
1822
1831
1823 # Check if we have the entry in cache
1832 # Check if we have the entry in cache
1824 # The cache entry looks like (node, rev, rawtext)
1833 # The cache entry looks like (node, rev, rawtext)
1825 if self._revisioncache:
1834 if self._revisioncache:
1826 if self._revisioncache[0] == node:
1835 if self._revisioncache[0] == node:
1827 return (rev, self._revisioncache[2], True)
1836 return (rev, self._revisioncache[2], True)
1828 cachedrev = self._revisioncache[1]
1837 cachedrev = self._revisioncache[1]
1829
1838
1830 if rev is None:
1839 if rev is None:
1831 rev = self.rev(node)
1840 rev = self.rev(node)
1832
1841
1833 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1842 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1834 if stopped:
1843 if stopped:
1835 basetext = self._revisioncache[2]
1844 basetext = self._revisioncache[2]
1836
1845
1837 # drop cache to save memory, the caller is expected to
1846 # drop cache to save memory, the caller is expected to
1838 # update self._revisioncache after validating the text
1847 # update self._revisioncache after validating the text
1839 self._revisioncache = None
1848 self._revisioncache = None
1840
1849
1841 targetsize = None
1850 targetsize = None
1842 rawsize = self.index[rev][2]
1851 rawsize = self.index[rev][2]
1843 if 0 <= rawsize:
1852 if 0 <= rawsize:
1844 targetsize = 4 * rawsize
1853 targetsize = 4 * rawsize
1845
1854
1846 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1855 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1847 if basetext is None:
1856 if basetext is None:
1848 basetext = bytes(bins[0])
1857 basetext = bytes(bins[0])
1849 bins = bins[1:]
1858 bins = bins[1:]
1850
1859
1851 rawtext = mdiff.patches(basetext, bins)
1860 rawtext = mdiff.patches(basetext, bins)
1852 del basetext # let us have a chance to free memory early
1861 del basetext # let us have a chance to free memory early
1853 return (rev, rawtext, False)
1862 return (rev, rawtext, False)
1854
1863
1855 def rawdata(self, nodeorrev, _df=None):
1864 def rawdata(self, nodeorrev, _df=None):
1856 """return an uncompressed raw data of a given node or revision number.
1865 """return an uncompressed raw data of a given node or revision number.
1857
1866
1858 _df - an existing file handle to read from. (internal-only)
1867 _df - an existing file handle to read from. (internal-only)
1859 """
1868 """
1860 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1869 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1861
1870
1862 def hash(self, text, p1, p2):
1871 def hash(self, text, p1, p2):
1863 """Compute a node hash.
1872 """Compute a node hash.
1864
1873
1865 Available as a function so that subclasses can replace the hash
1874 Available as a function so that subclasses can replace the hash
1866 as needed.
1875 as needed.
1867 """
1876 """
1868 return storageutil.hashrevisionsha1(text, p1, p2)
1877 return storageutil.hashrevisionsha1(text, p1, p2)
1869
1878
1870 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1879 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1871 """Check node hash integrity.
1880 """Check node hash integrity.
1872
1881
1873 Available as a function so that subclasses can extend hash mismatch
1882 Available as a function so that subclasses can extend hash mismatch
1874 behaviors as needed.
1883 behaviors as needed.
1875 """
1884 """
1876 try:
1885 try:
1877 if p1 is None and p2 is None:
1886 if p1 is None and p2 is None:
1878 p1, p2 = self.parents(node)
1887 p1, p2 = self.parents(node)
1879 if node != self.hash(text, p1, p2):
1888 if node != self.hash(text, p1, p2):
1880 # Clear the revision cache on hash failure. The revision cache
1889 # Clear the revision cache on hash failure. The revision cache
1881 # only stores the raw revision and clearing the cache does have
1890 # only stores the raw revision and clearing the cache does have
1882 # the side-effect that we won't have a cache hit when the raw
1891 # the side-effect that we won't have a cache hit when the raw
1883 # revision data is accessed. But this case should be rare and
1892 # revision data is accessed. But this case should be rare and
1884 # it is extra work to teach the cache about the hash
1893 # it is extra work to teach the cache about the hash
1885 # verification state.
1894 # verification state.
1886 if self._revisioncache and self._revisioncache[0] == node:
1895 if self._revisioncache and self._revisioncache[0] == node:
1887 self._revisioncache = None
1896 self._revisioncache = None
1888
1897
1889 revornode = rev
1898 revornode = rev
1890 if revornode is None:
1899 if revornode is None:
1891 revornode = templatefilters.short(hex(node))
1900 revornode = templatefilters.short(hex(node))
1892 raise error.RevlogError(
1901 raise error.RevlogError(
1893 _(b"integrity check failed on %s:%s")
1902 _(b"integrity check failed on %s:%s")
1894 % (self.indexfile, pycompat.bytestr(revornode))
1903 % (self.indexfile, pycompat.bytestr(revornode))
1895 )
1904 )
1896 except error.RevlogError:
1905 except error.RevlogError:
1897 if self._censorable and storageutil.iscensoredtext(text):
1906 if self._censorable and storageutil.iscensoredtext(text):
1898 raise error.CensoredNodeError(self.indexfile, node, text)
1907 raise error.CensoredNodeError(self.indexfile, node, text)
1899 raise
1908 raise
1900
1909
1901 def _enforceinlinesize(self, tr, fp=None):
1910 def _enforceinlinesize(self, tr, fp=None):
1902 """Check if the revlog is too big for inline and convert if so.
1911 """Check if the revlog is too big for inline and convert if so.
1903
1912
1904 This should be called after revisions are added to the revlog. If the
1913 This should be called after revisions are added to the revlog. If the
1905 revlog has grown too large to be an inline revlog, it will convert it
1914 revlog has grown too large to be an inline revlog, it will convert it
1906 to use multiple index and data files.
1915 to use multiple index and data files.
1907 """
1916 """
1908 tiprev = len(self) - 1
1917 tiprev = len(self) - 1
1909 if (
1918 if (
1910 not self._inline
1919 not self._inline
1911 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1920 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1912 ):
1921 ):
1913 return
1922 return
1914
1923
1915 trinfo = tr.find(self.indexfile)
1924 trinfo = tr.find(self.indexfile)
1916 if trinfo is None:
1925 if trinfo is None:
1917 raise error.RevlogError(
1926 raise error.RevlogError(
1918 _(b"%s not found in the transaction") % self.indexfile
1927 _(b"%s not found in the transaction") % self.indexfile
1919 )
1928 )
1920
1929
1921 trindex = trinfo[2]
1930 trindex = trinfo[2]
1922 if trindex is not None:
1931 if trindex is not None:
1923 dataoff = self.start(trindex)
1932 dataoff = self.start(trindex)
1924 else:
1933 else:
1925 # revlog was stripped at start of transaction, use all leftover data
1934 # revlog was stripped at start of transaction, use all leftover data
1926 trindex = len(self) - 1
1935 trindex = len(self) - 1
1927 dataoff = self.end(tiprev)
1936 dataoff = self.end(tiprev)
1928
1937
1929 tr.add(self.datafile, dataoff)
1938 tr.add(self.datafile, dataoff)
1930
1939
1931 if fp:
1940 if fp:
1932 fp.flush()
1941 fp.flush()
1933 fp.close()
1942 fp.close()
1934 # We can't use the cached file handle after close(). So prevent
1943 # We can't use the cached file handle after close(). So prevent
1935 # its usage.
1944 # its usage.
1936 self._writinghandles = None
1945 self._writinghandles = None
1937
1946
1938 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1947 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1939 for r in self:
1948 for r in self:
1940 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1949 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1941
1950
1942 with self._indexfp(b'w') as fp:
1951 with self._indexfp(b'w') as fp:
1943 self.version &= ~FLAG_INLINE_DATA
1952 self.version &= ~FLAG_INLINE_DATA
1944 self._inline = False
1953 self._inline = False
1945 io = self._io
1954 io = self._io
1946 for i in self:
1955 for i in self:
1947 e = io.packentry(self.index[i], self.node, self.version, i)
1956 e = io.packentry(self.index[i], self.node, self.version, i)
1948 fp.write(e)
1957 fp.write(e)
1949
1958
1950 # the temp file replace the real index when we exit the context
1959 # the temp file replace the real index when we exit the context
1951 # manager
1960 # manager
1952
1961
1953 tr.replace(self.indexfile, trindex * self._io.size)
1962 tr.replace(self.indexfile, trindex * self._io.size)
1954 self._chunkclear()
1963 self._chunkclear()
1955
1964
1956 def _nodeduplicatecallback(self, transaction, node):
1965 def _nodeduplicatecallback(self, transaction, node):
1957 """called when trying to add a node already stored.
1966 """called when trying to add a node already stored.
1958 """
1967 """
1959
1968
1960 def addrevision(
1969 def addrevision(
1961 self,
1970 self,
1962 text,
1971 text,
1963 transaction,
1972 transaction,
1964 link,
1973 link,
1965 p1,
1974 p1,
1966 p2,
1975 p2,
1967 cachedelta=None,
1976 cachedelta=None,
1968 node=None,
1977 node=None,
1969 flags=REVIDX_DEFAULT_FLAGS,
1978 flags=REVIDX_DEFAULT_FLAGS,
1970 deltacomputer=None,
1979 deltacomputer=None,
1971 sidedata=None,
1980 sidedata=None,
1972 ):
1981 ):
1973 """add a revision to the log
1982 """add a revision to the log
1974
1983
1975 text - the revision data to add
1984 text - the revision data to add
1976 transaction - the transaction object used for rollback
1985 transaction - the transaction object used for rollback
1977 link - the linkrev data to add
1986 link - the linkrev data to add
1978 p1, p2 - the parent nodeids of the revision
1987 p1, p2 - the parent nodeids of the revision
1979 cachedelta - an optional precomputed delta
1988 cachedelta - an optional precomputed delta
1980 node - nodeid of revision; typically node is not specified, and it is
1989 node - nodeid of revision; typically node is not specified, and it is
1981 computed by default as hash(text, p1, p2), however subclasses might
1990 computed by default as hash(text, p1, p2), however subclasses might
1982 use different hashing method (and override checkhash() in such case)
1991 use different hashing method (and override checkhash() in such case)
1983 flags - the known flags to set on the revision
1992 flags - the known flags to set on the revision
1984 deltacomputer - an optional deltacomputer instance shared between
1993 deltacomputer - an optional deltacomputer instance shared between
1985 multiple calls
1994 multiple calls
1986 """
1995 """
1987 if link == nullrev:
1996 if link == nullrev:
1988 raise error.RevlogError(
1997 raise error.RevlogError(
1989 _(b"attempted to add linkrev -1 to %s") % self.indexfile
1998 _(b"attempted to add linkrev -1 to %s") % self.indexfile
1990 )
1999 )
1991
2000
1992 if sidedata is None:
2001 if sidedata is None:
1993 sidedata = {}
2002 sidedata = {}
1994 flags = flags & ~REVIDX_SIDEDATA
2003 flags = flags & ~REVIDX_SIDEDATA
1995 elif not self.hassidedata:
2004 elif not self.hassidedata:
1996 raise error.ProgrammingError(
2005 raise error.ProgrammingError(
1997 _(b"trying to add sidedata to a revlog who don't support them")
2006 _(b"trying to add sidedata to a revlog who don't support them")
1998 )
2007 )
1999 else:
2008 else:
2000 flags |= REVIDX_SIDEDATA
2009 flags |= REVIDX_SIDEDATA
2001
2010
2002 if flags:
2011 if flags:
2003 node = node or self.hash(text, p1, p2)
2012 node = node or self.hash(text, p1, p2)
2004
2013
2005 rawtext, validatehash = flagutil.processflagswrite(
2014 rawtext, validatehash = flagutil.processflagswrite(
2006 self, text, flags, sidedata=sidedata
2015 self, text, flags, sidedata=sidedata
2007 )
2016 )
2008
2017
2009 # If the flag processor modifies the revision data, ignore any provided
2018 # If the flag processor modifies the revision data, ignore any provided
2010 # cachedelta.
2019 # cachedelta.
2011 if rawtext != text:
2020 if rawtext != text:
2012 cachedelta = None
2021 cachedelta = None
2013
2022
2014 if len(rawtext) > _maxentrysize:
2023 if len(rawtext) > _maxentrysize:
2015 raise error.RevlogError(
2024 raise error.RevlogError(
2016 _(
2025 _(
2017 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2026 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2018 )
2027 )
2019 % (self.indexfile, len(rawtext))
2028 % (self.indexfile, len(rawtext))
2020 )
2029 )
2021
2030
2022 node = node or self.hash(rawtext, p1, p2)
2031 node = node or self.hash(rawtext, p1, p2)
2023 if self.index.has_node(node):
2032 if self.index.has_node(node):
2024 return node
2033 return node
2025
2034
2026 if validatehash:
2035 if validatehash:
2027 self.checkhash(rawtext, node, p1=p1, p2=p2)
2036 self.checkhash(rawtext, node, p1=p1, p2=p2)
2028
2037
2029 return self.addrawrevision(
2038 return self.addrawrevision(
2030 rawtext,
2039 rawtext,
2031 transaction,
2040 transaction,
2032 link,
2041 link,
2033 p1,
2042 p1,
2034 p2,
2043 p2,
2035 node,
2044 node,
2036 flags,
2045 flags,
2037 cachedelta=cachedelta,
2046 cachedelta=cachedelta,
2038 deltacomputer=deltacomputer,
2047 deltacomputer=deltacomputer,
2039 )
2048 )
2040
2049
2041 def addrawrevision(
2050 def addrawrevision(
2042 self,
2051 self,
2043 rawtext,
2052 rawtext,
2044 transaction,
2053 transaction,
2045 link,
2054 link,
2046 p1,
2055 p1,
2047 p2,
2056 p2,
2048 node,
2057 node,
2049 flags,
2058 flags,
2050 cachedelta=None,
2059 cachedelta=None,
2051 deltacomputer=None,
2060 deltacomputer=None,
2052 ):
2061 ):
2053 """add a raw revision with known flags, node and parents
2062 """add a raw revision with known flags, node and parents
2054 useful when reusing a revision not stored in this revlog (ex: received
2063 useful when reusing a revision not stored in this revlog (ex: received
2055 over wire, or read from an external bundle).
2064 over wire, or read from an external bundle).
2056 """
2065 """
2057 dfh = None
2066 dfh = None
2058 if not self._inline:
2067 if not self._inline:
2059 dfh = self._datafp(b"a+")
2068 dfh = self._datafp(b"a+")
2060 ifh = self._indexfp(b"a+")
2069 ifh = self._indexfp(b"a+")
2061 try:
2070 try:
2062 return self._addrevision(
2071 return self._addrevision(
2063 node,
2072 node,
2064 rawtext,
2073 rawtext,
2065 transaction,
2074 transaction,
2066 link,
2075 link,
2067 p1,
2076 p1,
2068 p2,
2077 p2,
2069 flags,
2078 flags,
2070 cachedelta,
2079 cachedelta,
2071 ifh,
2080 ifh,
2072 dfh,
2081 dfh,
2073 deltacomputer=deltacomputer,
2082 deltacomputer=deltacomputer,
2074 )
2083 )
2075 finally:
2084 finally:
2076 if dfh:
2085 if dfh:
2077 dfh.close()
2086 dfh.close()
2078 ifh.close()
2087 ifh.close()
2079
2088
2080 def compress(self, data):
2089 def compress(self, data):
2081 """Generate a possibly-compressed representation of data."""
2090 """Generate a possibly-compressed representation of data."""
2082 if not data:
2091 if not data:
2083 return b'', data
2092 return b'', data
2084
2093
2085 compressed = self._compressor.compress(data)
2094 compressed = self._compressor.compress(data)
2086
2095
2087 if compressed:
2096 if compressed:
2088 # The revlog compressor added the header in the returned data.
2097 # The revlog compressor added the header in the returned data.
2089 return b'', compressed
2098 return b'', compressed
2090
2099
2091 if data[0:1] == b'\0':
2100 if data[0:1] == b'\0':
2092 return b'', data
2101 return b'', data
2093 return b'u', data
2102 return b'u', data
2094
2103
2095 def decompress(self, data):
2104 def decompress(self, data):
2096 """Decompress a revlog chunk.
2105 """Decompress a revlog chunk.
2097
2106
2098 The chunk is expected to begin with a header identifying the
2107 The chunk is expected to begin with a header identifying the
2099 format type so it can be routed to an appropriate decompressor.
2108 format type so it can be routed to an appropriate decompressor.
2100 """
2109 """
2101 if not data:
2110 if not data:
2102 return data
2111 return data
2103
2112
2104 # Revlogs are read much more frequently than they are written and many
2113 # Revlogs are read much more frequently than they are written and many
2105 # chunks only take microseconds to decompress, so performance is
2114 # chunks only take microseconds to decompress, so performance is
2106 # important here.
2115 # important here.
2107 #
2116 #
2108 # We can make a few assumptions about revlogs:
2117 # We can make a few assumptions about revlogs:
2109 #
2118 #
2110 # 1) the majority of chunks will be compressed (as opposed to inline
2119 # 1) the majority of chunks will be compressed (as opposed to inline
2111 # raw data).
2120 # raw data).
2112 # 2) decompressing *any* data will likely by at least 10x slower than
2121 # 2) decompressing *any* data will likely by at least 10x slower than
2113 # returning raw inline data.
2122 # returning raw inline data.
2114 # 3) we want to prioritize common and officially supported compression
2123 # 3) we want to prioritize common and officially supported compression
2115 # engines
2124 # engines
2116 #
2125 #
2117 # It follows that we want to optimize for "decompress compressed data
2126 # It follows that we want to optimize for "decompress compressed data
2118 # when encoded with common and officially supported compression engines"
2127 # when encoded with common and officially supported compression engines"
2119 # case over "raw data" and "data encoded by less common or non-official
2128 # case over "raw data" and "data encoded by less common or non-official
2120 # compression engines." That is why we have the inline lookup first
2129 # compression engines." That is why we have the inline lookup first
2121 # followed by the compengines lookup.
2130 # followed by the compengines lookup.
2122 #
2131 #
2123 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2132 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2124 # compressed chunks. And this matters for changelog and manifest reads.
2133 # compressed chunks. And this matters for changelog and manifest reads.
2125 t = data[0:1]
2134 t = data[0:1]
2126
2135
2127 if t == b'x':
2136 if t == b'x':
2128 try:
2137 try:
2129 return _zlibdecompress(data)
2138 return _zlibdecompress(data)
2130 except zlib.error as e:
2139 except zlib.error as e:
2131 raise error.RevlogError(
2140 raise error.RevlogError(
2132 _(b'revlog decompress error: %s')
2141 _(b'revlog decompress error: %s')
2133 % stringutil.forcebytestr(e)
2142 % stringutil.forcebytestr(e)
2134 )
2143 )
2135 # '\0' is more common than 'u' so it goes first.
2144 # '\0' is more common than 'u' so it goes first.
2136 elif t == b'\0':
2145 elif t == b'\0':
2137 return data
2146 return data
2138 elif t == b'u':
2147 elif t == b'u':
2139 return util.buffer(data, 1)
2148 return util.buffer(data, 1)
2140
2149
2141 try:
2150 try:
2142 compressor = self._decompressors[t]
2151 compressor = self._decompressors[t]
2143 except KeyError:
2152 except KeyError:
2144 try:
2153 try:
2145 engine = util.compengines.forrevlogheader(t)
2154 engine = util.compengines.forrevlogheader(t)
2146 compressor = engine.revlogcompressor(self._compengineopts)
2155 compressor = engine.revlogcompressor(self._compengineopts)
2147 self._decompressors[t] = compressor
2156 self._decompressors[t] = compressor
2148 except KeyError:
2157 except KeyError:
2149 raise error.RevlogError(_(b'unknown compression type %r') % t)
2158 raise error.RevlogError(_(b'unknown compression type %r') % t)
2150
2159
2151 return compressor.decompress(data)
2160 return compressor.decompress(data)
2152
2161
2153 def _addrevision(
2162 def _addrevision(
2154 self,
2163 self,
2155 node,
2164 node,
2156 rawtext,
2165 rawtext,
2157 transaction,
2166 transaction,
2158 link,
2167 link,
2159 p1,
2168 p1,
2160 p2,
2169 p2,
2161 flags,
2170 flags,
2162 cachedelta,
2171 cachedelta,
2163 ifh,
2172 ifh,
2164 dfh,
2173 dfh,
2165 alwayscache=False,
2174 alwayscache=False,
2166 deltacomputer=None,
2175 deltacomputer=None,
2167 ):
2176 ):
2168 """internal function to add revisions to the log
2177 """internal function to add revisions to the log
2169
2178
2170 see addrevision for argument descriptions.
2179 see addrevision for argument descriptions.
2171
2180
2172 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2181 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2173
2182
2174 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2183 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2175 be used.
2184 be used.
2176
2185
2177 invariants:
2186 invariants:
2178 - rawtext is optional (can be None); if not set, cachedelta must be set.
2187 - rawtext is optional (can be None); if not set, cachedelta must be set.
2179 if both are set, they must correspond to each other.
2188 if both are set, they must correspond to each other.
2180 """
2189 """
2181 if node == nullid:
2190 if node == nullid:
2182 raise error.RevlogError(
2191 raise error.RevlogError(
2183 _(b"%s: attempt to add null revision") % self.indexfile
2192 _(b"%s: attempt to add null revision") % self.indexfile
2184 )
2193 )
2185 if node == wdirid or node in wdirfilenodeids:
2194 if node == wdirid or node in wdirfilenodeids:
2186 raise error.RevlogError(
2195 raise error.RevlogError(
2187 _(b"%s: attempt to add wdir revision") % self.indexfile
2196 _(b"%s: attempt to add wdir revision") % self.indexfile
2188 )
2197 )
2189
2198
2190 if self._inline:
2199 if self._inline:
2191 fh = ifh
2200 fh = ifh
2192 else:
2201 else:
2193 fh = dfh
2202 fh = dfh
2194
2203
2195 btext = [rawtext]
2204 btext = [rawtext]
2196
2205
2197 curr = len(self)
2206 curr = len(self)
2198 prev = curr - 1
2207 prev = curr - 1
2199 offset = self.end(prev)
2208 offset = self.end(prev)
2200 p1r, p2r = self.rev(p1), self.rev(p2)
2209 p1r, p2r = self.rev(p1), self.rev(p2)
2201
2210
2202 # full versions are inserted when the needed deltas
2211 # full versions are inserted when the needed deltas
2203 # become comparable to the uncompressed text
2212 # become comparable to the uncompressed text
2204 if rawtext is None:
2213 if rawtext is None:
2205 # need rawtext size, before changed by flag processors, which is
2214 # need rawtext size, before changed by flag processors, which is
2206 # the non-raw size. use revlog explicitly to avoid filelog's extra
2215 # the non-raw size. use revlog explicitly to avoid filelog's extra
2207 # logic that might remove metadata size.
2216 # logic that might remove metadata size.
2208 textlen = mdiff.patchedsize(
2217 textlen = mdiff.patchedsize(
2209 revlog.size(self, cachedelta[0]), cachedelta[1]
2218 revlog.size(self, cachedelta[0]), cachedelta[1]
2210 )
2219 )
2211 else:
2220 else:
2212 textlen = len(rawtext)
2221 textlen = len(rawtext)
2213
2222
2214 if deltacomputer is None:
2223 if deltacomputer is None:
2215 deltacomputer = deltautil.deltacomputer(self)
2224 deltacomputer = deltautil.deltacomputer(self)
2216
2225
2217 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2226 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2218
2227
2219 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2228 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2220
2229
2221 e = (
2230 e = (
2222 offset_type(offset, flags),
2231 offset_type(offset, flags),
2223 deltainfo.deltalen,
2232 deltainfo.deltalen,
2224 textlen,
2233 textlen,
2225 deltainfo.base,
2234 deltainfo.base,
2226 link,
2235 link,
2227 p1r,
2236 p1r,
2228 p2r,
2237 p2r,
2229 node,
2238 node,
2230 )
2239 )
2231 self.index.append(e)
2240 self.index.append(e)
2232
2241
2233 entry = self._io.packentry(e, self.node, self.version, curr)
2242 entry = self._io.packentry(e, self.node, self.version, curr)
2234 self._writeentry(
2243 self._writeentry(
2235 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2244 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2236 )
2245 )
2237
2246
2238 rawtext = btext[0]
2247 rawtext = btext[0]
2239
2248
2240 if alwayscache and rawtext is None:
2249 if alwayscache and rawtext is None:
2241 rawtext = deltacomputer.buildtext(revinfo, fh)
2250 rawtext = deltacomputer.buildtext(revinfo, fh)
2242
2251
2243 if type(rawtext) == bytes: # only accept immutable objects
2252 if type(rawtext) == bytes: # only accept immutable objects
2244 self._revisioncache = (node, curr, rawtext)
2253 self._revisioncache = (node, curr, rawtext)
2245 self._chainbasecache[curr] = deltainfo.chainbase
2254 self._chainbasecache[curr] = deltainfo.chainbase
2246 return node
2255 return node
2247
2256
2248 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2257 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2249 # Files opened in a+ mode have inconsistent behavior on various
2258 # Files opened in a+ mode have inconsistent behavior on various
2250 # platforms. Windows requires that a file positioning call be made
2259 # platforms. Windows requires that a file positioning call be made
2251 # when the file handle transitions between reads and writes. See
2260 # when the file handle transitions between reads and writes. See
2252 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2261 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2253 # platforms, Python or the platform itself can be buggy. Some versions
2262 # platforms, Python or the platform itself can be buggy. Some versions
2254 # of Solaris have been observed to not append at the end of the file
2263 # of Solaris have been observed to not append at the end of the file
2255 # if the file was seeked to before the end. See issue4943 for more.
2264 # if the file was seeked to before the end. See issue4943 for more.
2256 #
2265 #
2257 # We work around this issue by inserting a seek() before writing.
2266 # We work around this issue by inserting a seek() before writing.
2258 # Note: This is likely not necessary on Python 3. However, because
2267 # Note: This is likely not necessary on Python 3. However, because
2259 # the file handle is reused for reads and may be seeked there, we need
2268 # the file handle is reused for reads and may be seeked there, we need
2260 # to be careful before changing this.
2269 # to be careful before changing this.
2261 ifh.seek(0, os.SEEK_END)
2270 ifh.seek(0, os.SEEK_END)
2262 if dfh:
2271 if dfh:
2263 dfh.seek(0, os.SEEK_END)
2272 dfh.seek(0, os.SEEK_END)
2264
2273
2265 curr = len(self) - 1
2274 curr = len(self) - 1
2266 if not self._inline:
2275 if not self._inline:
2267 transaction.add(self.datafile, offset)
2276 transaction.add(self.datafile, offset)
2268 transaction.add(self.indexfile, curr * len(entry))
2277 transaction.add(self.indexfile, curr * len(entry))
2269 if data[0]:
2278 if data[0]:
2270 dfh.write(data[0])
2279 dfh.write(data[0])
2271 dfh.write(data[1])
2280 dfh.write(data[1])
2272 ifh.write(entry)
2281 ifh.write(entry)
2273 else:
2282 else:
2274 offset += curr * self._io.size
2283 offset += curr * self._io.size
2275 transaction.add(self.indexfile, offset, curr)
2284 transaction.add(self.indexfile, offset, curr)
2276 ifh.write(entry)
2285 ifh.write(entry)
2277 ifh.write(data[0])
2286 ifh.write(data[0])
2278 ifh.write(data[1])
2287 ifh.write(data[1])
2279 self._enforceinlinesize(transaction, ifh)
2288 self._enforceinlinesize(transaction, ifh)
2280
2289
2281 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2290 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2282 """
2291 """
2283 add a delta group
2292 add a delta group
2284
2293
2285 given a set of deltas, add them to the revision log. the
2294 given a set of deltas, add them to the revision log. the
2286 first delta is against its parent, which should be in our
2295 first delta is against its parent, which should be in our
2287 log, the rest are against the previous delta.
2296 log, the rest are against the previous delta.
2288
2297
2289 If ``addrevisioncb`` is defined, it will be called with arguments of
2298 If ``addrevisioncb`` is defined, it will be called with arguments of
2290 this revlog and the node that was added.
2299 this revlog and the node that was added.
2291 """
2300 """
2292
2301
2293 if self._writinghandles:
2302 if self._writinghandles:
2294 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2303 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2295
2304
2296 nodes = []
2305 nodes = []
2297
2306
2298 r = len(self)
2307 r = len(self)
2299 end = 0
2308 end = 0
2300 if r:
2309 if r:
2301 end = self.end(r - 1)
2310 end = self.end(r - 1)
2302 ifh = self._indexfp(b"a+")
2311 ifh = self._indexfp(b"a+")
2303 isize = r * self._io.size
2312 isize = r * self._io.size
2304 if self._inline:
2313 if self._inline:
2305 transaction.add(self.indexfile, end + isize, r)
2314 transaction.add(self.indexfile, end + isize, r)
2306 dfh = None
2315 dfh = None
2307 else:
2316 else:
2308 transaction.add(self.indexfile, isize, r)
2317 transaction.add(self.indexfile, isize, r)
2309 transaction.add(self.datafile, end)
2318 transaction.add(self.datafile, end)
2310 dfh = self._datafp(b"a+")
2319 dfh = self._datafp(b"a+")
2311
2320
2312 def flush():
2321 def flush():
2313 if dfh:
2322 if dfh:
2314 dfh.flush()
2323 dfh.flush()
2315 ifh.flush()
2324 ifh.flush()
2316
2325
2317 self._writinghandles = (ifh, dfh)
2326 self._writinghandles = (ifh, dfh)
2318
2327
2319 try:
2328 try:
2320 deltacomputer = deltautil.deltacomputer(self)
2329 deltacomputer = deltautil.deltacomputer(self)
2321 # loop through our set of deltas
2330 # loop through our set of deltas
2322 for data in deltas:
2331 for data in deltas:
2323 node, p1, p2, linknode, deltabase, delta, flags = data
2332 node, p1, p2, linknode, deltabase, delta, flags = data
2324 link = linkmapper(linknode)
2333 link = linkmapper(linknode)
2325 flags = flags or REVIDX_DEFAULT_FLAGS
2334 flags = flags or REVIDX_DEFAULT_FLAGS
2326
2335
2327 nodes.append(node)
2336 nodes.append(node)
2328
2337
2329 if self.index.has_node(node):
2338 if self.index.has_node(node):
2330 self._nodeduplicatecallback(transaction, node)
2339 self._nodeduplicatecallback(transaction, node)
2331 # this can happen if two branches make the same change
2340 # this can happen if two branches make the same change
2332 continue
2341 continue
2333
2342
2334 for p in (p1, p2):
2343 for p in (p1, p2):
2335 if not self.index.has_node(p):
2344 if not self.index.has_node(p):
2336 raise error.LookupError(
2345 raise error.LookupError(
2337 p, self.indexfile, _(b'unknown parent')
2346 p, self.indexfile, _(b'unknown parent')
2338 )
2347 )
2339
2348
2340 if not self.index.has_node(deltabase):
2349 if not self.index.has_node(deltabase):
2341 raise error.LookupError(
2350 raise error.LookupError(
2342 deltabase, self.indexfile, _(b'unknown delta base')
2351 deltabase, self.indexfile, _(b'unknown delta base')
2343 )
2352 )
2344
2353
2345 baserev = self.rev(deltabase)
2354 baserev = self.rev(deltabase)
2346
2355
2347 if baserev != nullrev and self.iscensored(baserev):
2356 if baserev != nullrev and self.iscensored(baserev):
2348 # if base is censored, delta must be full replacement in a
2357 # if base is censored, delta must be full replacement in a
2349 # single patch operation
2358 # single patch operation
2350 hlen = struct.calcsize(b">lll")
2359 hlen = struct.calcsize(b">lll")
2351 oldlen = self.rawsize(baserev)
2360 oldlen = self.rawsize(baserev)
2352 newlen = len(delta) - hlen
2361 newlen = len(delta) - hlen
2353 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2362 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2354 raise error.CensoredBaseError(
2363 raise error.CensoredBaseError(
2355 self.indexfile, self.node(baserev)
2364 self.indexfile, self.node(baserev)
2356 )
2365 )
2357
2366
2358 if not flags and self._peek_iscensored(baserev, delta, flush):
2367 if not flags and self._peek_iscensored(baserev, delta, flush):
2359 flags |= REVIDX_ISCENSORED
2368 flags |= REVIDX_ISCENSORED
2360
2369
2361 # We assume consumers of addrevisioncb will want to retrieve
2370 # We assume consumers of addrevisioncb will want to retrieve
2362 # the added revision, which will require a call to
2371 # the added revision, which will require a call to
2363 # revision(). revision() will fast path if there is a cache
2372 # revision(). revision() will fast path if there is a cache
2364 # hit. So, we tell _addrevision() to always cache in this case.
2373 # hit. So, we tell _addrevision() to always cache in this case.
2365 # We're only using addgroup() in the context of changegroup
2374 # We're only using addgroup() in the context of changegroup
2366 # generation so the revision data can always be handled as raw
2375 # generation so the revision data can always be handled as raw
2367 # by the flagprocessor.
2376 # by the flagprocessor.
2368 self._addrevision(
2377 self._addrevision(
2369 node,
2378 node,
2370 None,
2379 None,
2371 transaction,
2380 transaction,
2372 link,
2381 link,
2373 p1,
2382 p1,
2374 p2,
2383 p2,
2375 flags,
2384 flags,
2376 (baserev, delta),
2385 (baserev, delta),
2377 ifh,
2386 ifh,
2378 dfh,
2387 dfh,
2379 alwayscache=bool(addrevisioncb),
2388 alwayscache=bool(addrevisioncb),
2380 deltacomputer=deltacomputer,
2389 deltacomputer=deltacomputer,
2381 )
2390 )
2382
2391
2383 if addrevisioncb:
2392 if addrevisioncb:
2384 addrevisioncb(self, node)
2393 addrevisioncb(self, node)
2385
2394
2386 if not dfh and not self._inline:
2395 if not dfh and not self._inline:
2387 # addrevision switched from inline to conventional
2396 # addrevision switched from inline to conventional
2388 # reopen the index
2397 # reopen the index
2389 ifh.close()
2398 ifh.close()
2390 dfh = self._datafp(b"a+")
2399 dfh = self._datafp(b"a+")
2391 ifh = self._indexfp(b"a+")
2400 ifh = self._indexfp(b"a+")
2392 self._writinghandles = (ifh, dfh)
2401 self._writinghandles = (ifh, dfh)
2393 finally:
2402 finally:
2394 self._writinghandles = None
2403 self._writinghandles = None
2395
2404
2396 if dfh:
2405 if dfh:
2397 dfh.close()
2406 dfh.close()
2398 ifh.close()
2407 ifh.close()
2399
2408
2400 return nodes
2409 return nodes
2401
2410
2402 def iscensored(self, rev):
2411 def iscensored(self, rev):
2403 """Check if a file revision is censored."""
2412 """Check if a file revision is censored."""
2404 if not self._censorable:
2413 if not self._censorable:
2405 return False
2414 return False
2406
2415
2407 return self.flags(rev) & REVIDX_ISCENSORED
2416 return self.flags(rev) & REVIDX_ISCENSORED
2408
2417
2409 def _peek_iscensored(self, baserev, delta, flush):
2418 def _peek_iscensored(self, baserev, delta, flush):
2410 """Quickly check if a delta produces a censored revision."""
2419 """Quickly check if a delta produces a censored revision."""
2411 if not self._censorable:
2420 if not self._censorable:
2412 return False
2421 return False
2413
2422
2414 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2423 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2415
2424
2416 def getstrippoint(self, minlink):
2425 def getstrippoint(self, minlink):
2417 """find the minimum rev that must be stripped to strip the linkrev
2426 """find the minimum rev that must be stripped to strip the linkrev
2418
2427
2419 Returns a tuple containing the minimum rev and a set of all revs that
2428 Returns a tuple containing the minimum rev and a set of all revs that
2420 have linkrevs that will be broken by this strip.
2429 have linkrevs that will be broken by this strip.
2421 """
2430 """
2422 return storageutil.resolvestripinfo(
2431 return storageutil.resolvestripinfo(
2423 minlink,
2432 minlink,
2424 len(self) - 1,
2433 len(self) - 1,
2425 self.headrevs(),
2434 self.headrevs(),
2426 self.linkrev,
2435 self.linkrev,
2427 self.parentrevs,
2436 self.parentrevs,
2428 )
2437 )
2429
2438
2430 def strip(self, minlink, transaction):
2439 def strip(self, minlink, transaction):
2431 """truncate the revlog on the first revision with a linkrev >= minlink
2440 """truncate the revlog on the first revision with a linkrev >= minlink
2432
2441
2433 This function is called when we're stripping revision minlink and
2442 This function is called when we're stripping revision minlink and
2434 its descendants from the repository.
2443 its descendants from the repository.
2435
2444
2436 We have to remove all revisions with linkrev >= minlink, because
2445 We have to remove all revisions with linkrev >= minlink, because
2437 the equivalent changelog revisions will be renumbered after the
2446 the equivalent changelog revisions will be renumbered after the
2438 strip.
2447 strip.
2439
2448
2440 So we truncate the revlog on the first of these revisions, and
2449 So we truncate the revlog on the first of these revisions, and
2441 trust that the caller has saved the revisions that shouldn't be
2450 trust that the caller has saved the revisions that shouldn't be
2442 removed and that it'll re-add them after this truncation.
2451 removed and that it'll re-add them after this truncation.
2443 """
2452 """
2444 if len(self) == 0:
2453 if len(self) == 0:
2445 return
2454 return
2446
2455
2447 rev, _ = self.getstrippoint(minlink)
2456 rev, _ = self.getstrippoint(minlink)
2448 if rev == len(self):
2457 if rev == len(self):
2449 return
2458 return
2450
2459
2451 # first truncate the files on disk
2460 # first truncate the files on disk
2452 end = self.start(rev)
2461 end = self.start(rev)
2453 if not self._inline:
2462 if not self._inline:
2454 transaction.add(self.datafile, end)
2463 transaction.add(self.datafile, end)
2455 end = rev * self._io.size
2464 end = rev * self._io.size
2456 else:
2465 else:
2457 end += rev * self._io.size
2466 end += rev * self._io.size
2458
2467
2459 transaction.add(self.indexfile, end)
2468 transaction.add(self.indexfile, end)
2460
2469
2461 # then reset internal state in memory to forget those revisions
2470 # then reset internal state in memory to forget those revisions
2462 self._revisioncache = None
2471 self._revisioncache = None
2463 self._chaininfocache = {}
2472 self._chaininfocache = {}
2464 self._chunkclear()
2473 self._chunkclear()
2465
2474
2466 del self.index[rev:-1]
2475 del self.index[rev:-1]
2467
2476
2468 def checksize(self):
2477 def checksize(self):
2469 """Check size of index and data files
2478 """Check size of index and data files
2470
2479
2471 return a (dd, di) tuple.
2480 return a (dd, di) tuple.
2472 - dd: extra bytes for the "data" file
2481 - dd: extra bytes for the "data" file
2473 - di: extra bytes for the "index" file
2482 - di: extra bytes for the "index" file
2474
2483
2475 A healthy revlog will return (0, 0).
2484 A healthy revlog will return (0, 0).
2476 """
2485 """
2477 expected = 0
2486 expected = 0
2478 if len(self):
2487 if len(self):
2479 expected = max(0, self.end(len(self) - 1))
2488 expected = max(0, self.end(len(self) - 1))
2480
2489
2481 try:
2490 try:
2482 with self._datafp() as f:
2491 with self._datafp() as f:
2483 f.seek(0, io.SEEK_END)
2492 f.seek(0, io.SEEK_END)
2484 actual = f.tell()
2493 actual = f.tell()
2485 dd = actual - expected
2494 dd = actual - expected
2486 except IOError as inst:
2495 except IOError as inst:
2487 if inst.errno != errno.ENOENT:
2496 if inst.errno != errno.ENOENT:
2488 raise
2497 raise
2489 dd = 0
2498 dd = 0
2490
2499
2491 try:
2500 try:
2492 f = self.opener(self.indexfile)
2501 f = self.opener(self.indexfile)
2493 f.seek(0, io.SEEK_END)
2502 f.seek(0, io.SEEK_END)
2494 actual = f.tell()
2503 actual = f.tell()
2495 f.close()
2504 f.close()
2496 s = self._io.size
2505 s = self._io.size
2497 i = max(0, actual // s)
2506 i = max(0, actual // s)
2498 di = actual - (i * s)
2507 di = actual - (i * s)
2499 if self._inline:
2508 if self._inline:
2500 databytes = 0
2509 databytes = 0
2501 for r in self:
2510 for r in self:
2502 databytes += max(0, self.length(r))
2511 databytes += max(0, self.length(r))
2503 dd = 0
2512 dd = 0
2504 di = actual - len(self) * s - databytes
2513 di = actual - len(self) * s - databytes
2505 except IOError as inst:
2514 except IOError as inst:
2506 if inst.errno != errno.ENOENT:
2515 if inst.errno != errno.ENOENT:
2507 raise
2516 raise
2508 di = 0
2517 di = 0
2509
2518
2510 return (dd, di)
2519 return (dd, di)
2511
2520
2512 def files(self):
2521 def files(self):
2513 res = [self.indexfile]
2522 res = [self.indexfile]
2514 if not self._inline:
2523 if not self._inline:
2515 res.append(self.datafile)
2524 res.append(self.datafile)
2516 return res
2525 return res
2517
2526
2518 def emitrevisions(
2527 def emitrevisions(
2519 self,
2528 self,
2520 nodes,
2529 nodes,
2521 nodesorder=None,
2530 nodesorder=None,
2522 revisiondata=False,
2531 revisiondata=False,
2523 assumehaveparentrevisions=False,
2532 assumehaveparentrevisions=False,
2524 deltamode=repository.CG_DELTAMODE_STD,
2533 deltamode=repository.CG_DELTAMODE_STD,
2525 ):
2534 ):
2526 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2535 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2527 raise error.ProgrammingError(
2536 raise error.ProgrammingError(
2528 b'unhandled value for nodesorder: %s' % nodesorder
2537 b'unhandled value for nodesorder: %s' % nodesorder
2529 )
2538 )
2530
2539
2531 if nodesorder is None and not self._generaldelta:
2540 if nodesorder is None and not self._generaldelta:
2532 nodesorder = b'storage'
2541 nodesorder = b'storage'
2533
2542
2534 if (
2543 if (
2535 not self._storedeltachains
2544 not self._storedeltachains
2536 and deltamode != repository.CG_DELTAMODE_PREV
2545 and deltamode != repository.CG_DELTAMODE_PREV
2537 ):
2546 ):
2538 deltamode = repository.CG_DELTAMODE_FULL
2547 deltamode = repository.CG_DELTAMODE_FULL
2539
2548
2540 return storageutil.emitrevisions(
2549 return storageutil.emitrevisions(
2541 self,
2550 self,
2542 nodes,
2551 nodes,
2543 nodesorder,
2552 nodesorder,
2544 revlogrevisiondelta,
2553 revlogrevisiondelta,
2545 deltaparentfn=self.deltaparent,
2554 deltaparentfn=self.deltaparent,
2546 candeltafn=self.candelta,
2555 candeltafn=self.candelta,
2547 rawsizefn=self.rawsize,
2556 rawsizefn=self.rawsize,
2548 revdifffn=self.revdiff,
2557 revdifffn=self.revdiff,
2549 flagsfn=self.flags,
2558 flagsfn=self.flags,
2550 deltamode=deltamode,
2559 deltamode=deltamode,
2551 revisiondata=revisiondata,
2560 revisiondata=revisiondata,
2552 assumehaveparentrevisions=assumehaveparentrevisions,
2561 assumehaveparentrevisions=assumehaveparentrevisions,
2553 )
2562 )
2554
2563
2555 DELTAREUSEALWAYS = b'always'
2564 DELTAREUSEALWAYS = b'always'
2556 DELTAREUSESAMEREVS = b'samerevs'
2565 DELTAREUSESAMEREVS = b'samerevs'
2557 DELTAREUSENEVER = b'never'
2566 DELTAREUSENEVER = b'never'
2558
2567
2559 DELTAREUSEFULLADD = b'fulladd'
2568 DELTAREUSEFULLADD = b'fulladd'
2560
2569
2561 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2570 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2562
2571
2563 def clone(
2572 def clone(
2564 self,
2573 self,
2565 tr,
2574 tr,
2566 destrevlog,
2575 destrevlog,
2567 addrevisioncb=None,
2576 addrevisioncb=None,
2568 deltareuse=DELTAREUSESAMEREVS,
2577 deltareuse=DELTAREUSESAMEREVS,
2569 forcedeltabothparents=None,
2578 forcedeltabothparents=None,
2570 sidedatacompanion=None,
2579 sidedatacompanion=None,
2571 ):
2580 ):
2572 """Copy this revlog to another, possibly with format changes.
2581 """Copy this revlog to another, possibly with format changes.
2573
2582
2574 The destination revlog will contain the same revisions and nodes.
2583 The destination revlog will contain the same revisions and nodes.
2575 However, it may not be bit-for-bit identical due to e.g. delta encoding
2584 However, it may not be bit-for-bit identical due to e.g. delta encoding
2576 differences.
2585 differences.
2577
2586
2578 The ``deltareuse`` argument control how deltas from the existing revlog
2587 The ``deltareuse`` argument control how deltas from the existing revlog
2579 are preserved in the destination revlog. The argument can have the
2588 are preserved in the destination revlog. The argument can have the
2580 following values:
2589 following values:
2581
2590
2582 DELTAREUSEALWAYS
2591 DELTAREUSEALWAYS
2583 Deltas will always be reused (if possible), even if the destination
2592 Deltas will always be reused (if possible), even if the destination
2584 revlog would not select the same revisions for the delta. This is the
2593 revlog would not select the same revisions for the delta. This is the
2585 fastest mode of operation.
2594 fastest mode of operation.
2586 DELTAREUSESAMEREVS
2595 DELTAREUSESAMEREVS
2587 Deltas will be reused if the destination revlog would pick the same
2596 Deltas will be reused if the destination revlog would pick the same
2588 revisions for the delta. This mode strikes a balance between speed
2597 revisions for the delta. This mode strikes a balance between speed
2589 and optimization.
2598 and optimization.
2590 DELTAREUSENEVER
2599 DELTAREUSENEVER
2591 Deltas will never be reused. This is the slowest mode of execution.
2600 Deltas will never be reused. This is the slowest mode of execution.
2592 This mode can be used to recompute deltas (e.g. if the diff/delta
2601 This mode can be used to recompute deltas (e.g. if the diff/delta
2593 algorithm changes).
2602 algorithm changes).
2594 DELTAREUSEFULLADD
2603 DELTAREUSEFULLADD
2595 Revision will be re-added as if their were new content. This is
2604 Revision will be re-added as if their were new content. This is
2596 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2605 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2597 eg: large file detection and handling.
2606 eg: large file detection and handling.
2598
2607
2599 Delta computation can be slow, so the choice of delta reuse policy can
2608 Delta computation can be slow, so the choice of delta reuse policy can
2600 significantly affect run time.
2609 significantly affect run time.
2601
2610
2602 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2611 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2603 two extremes. Deltas will be reused if they are appropriate. But if the
2612 two extremes. Deltas will be reused if they are appropriate. But if the
2604 delta could choose a better revision, it will do so. This means if you
2613 delta could choose a better revision, it will do so. This means if you
2605 are converting a non-generaldelta revlog to a generaldelta revlog,
2614 are converting a non-generaldelta revlog to a generaldelta revlog,
2606 deltas will be recomputed if the delta's parent isn't a parent of the
2615 deltas will be recomputed if the delta's parent isn't a parent of the
2607 revision.
2616 revision.
2608
2617
2609 In addition to the delta policy, the ``forcedeltabothparents``
2618 In addition to the delta policy, the ``forcedeltabothparents``
2610 argument controls whether to force compute deltas against both parents
2619 argument controls whether to force compute deltas against both parents
2611 for merges. By default, the current default is used.
2620 for merges. By default, the current default is used.
2612
2621
2613 If not None, the `sidedatacompanion` is callable that accept two
2622 If not None, the `sidedatacompanion` is callable that accept two
2614 arguments:
2623 arguments:
2615
2624
2616 (srcrevlog, rev)
2625 (srcrevlog, rev)
2617
2626
2618 and return a triplet that control changes to sidedata content from the
2627 and return a triplet that control changes to sidedata content from the
2619 old revision to the new clone result:
2628 old revision to the new clone result:
2620
2629
2621 (dropall, filterout, update)
2630 (dropall, filterout, update)
2622
2631
2623 * if `dropall` is True, all sidedata should be dropped
2632 * if `dropall` is True, all sidedata should be dropped
2624 * `filterout` is a set of sidedata keys that should be dropped
2633 * `filterout` is a set of sidedata keys that should be dropped
2625 * `update` is a mapping of additionnal/new key -> value
2634 * `update` is a mapping of additionnal/new key -> value
2626 """
2635 """
2627 if deltareuse not in self.DELTAREUSEALL:
2636 if deltareuse not in self.DELTAREUSEALL:
2628 raise ValueError(
2637 raise ValueError(
2629 _(b'value for deltareuse invalid: %s') % deltareuse
2638 _(b'value for deltareuse invalid: %s') % deltareuse
2630 )
2639 )
2631
2640
2632 if len(destrevlog):
2641 if len(destrevlog):
2633 raise ValueError(_(b'destination revlog is not empty'))
2642 raise ValueError(_(b'destination revlog is not empty'))
2634
2643
2635 if getattr(self, 'filteredrevs', None):
2644 if getattr(self, 'filteredrevs', None):
2636 raise ValueError(_(b'source revlog has filtered revisions'))
2645 raise ValueError(_(b'source revlog has filtered revisions'))
2637 if getattr(destrevlog, 'filteredrevs', None):
2646 if getattr(destrevlog, 'filteredrevs', None):
2638 raise ValueError(_(b'destination revlog has filtered revisions'))
2647 raise ValueError(_(b'destination revlog has filtered revisions'))
2639
2648
2640 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2649 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2641 # if possible.
2650 # if possible.
2642 oldlazydelta = destrevlog._lazydelta
2651 oldlazydelta = destrevlog._lazydelta
2643 oldlazydeltabase = destrevlog._lazydeltabase
2652 oldlazydeltabase = destrevlog._lazydeltabase
2644 oldamd = destrevlog._deltabothparents
2653 oldamd = destrevlog._deltabothparents
2645
2654
2646 try:
2655 try:
2647 if deltareuse == self.DELTAREUSEALWAYS:
2656 if deltareuse == self.DELTAREUSEALWAYS:
2648 destrevlog._lazydeltabase = True
2657 destrevlog._lazydeltabase = True
2649 destrevlog._lazydelta = True
2658 destrevlog._lazydelta = True
2650 elif deltareuse == self.DELTAREUSESAMEREVS:
2659 elif deltareuse == self.DELTAREUSESAMEREVS:
2651 destrevlog._lazydeltabase = False
2660 destrevlog._lazydeltabase = False
2652 destrevlog._lazydelta = True
2661 destrevlog._lazydelta = True
2653 elif deltareuse == self.DELTAREUSENEVER:
2662 elif deltareuse == self.DELTAREUSENEVER:
2654 destrevlog._lazydeltabase = False
2663 destrevlog._lazydeltabase = False
2655 destrevlog._lazydelta = False
2664 destrevlog._lazydelta = False
2656
2665
2657 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2666 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2658
2667
2659 self._clone(
2668 self._clone(
2660 tr,
2669 tr,
2661 destrevlog,
2670 destrevlog,
2662 addrevisioncb,
2671 addrevisioncb,
2663 deltareuse,
2672 deltareuse,
2664 forcedeltabothparents,
2673 forcedeltabothparents,
2665 sidedatacompanion,
2674 sidedatacompanion,
2666 )
2675 )
2667
2676
2668 finally:
2677 finally:
2669 destrevlog._lazydelta = oldlazydelta
2678 destrevlog._lazydelta = oldlazydelta
2670 destrevlog._lazydeltabase = oldlazydeltabase
2679 destrevlog._lazydeltabase = oldlazydeltabase
2671 destrevlog._deltabothparents = oldamd
2680 destrevlog._deltabothparents = oldamd
2672
2681
2673 def _clone(
2682 def _clone(
2674 self,
2683 self,
2675 tr,
2684 tr,
2676 destrevlog,
2685 destrevlog,
2677 addrevisioncb,
2686 addrevisioncb,
2678 deltareuse,
2687 deltareuse,
2679 forcedeltabothparents,
2688 forcedeltabothparents,
2680 sidedatacompanion,
2689 sidedatacompanion,
2681 ):
2690 ):
2682 """perform the core duty of `revlog.clone` after parameter processing"""
2691 """perform the core duty of `revlog.clone` after parameter processing"""
2683 deltacomputer = deltautil.deltacomputer(destrevlog)
2692 deltacomputer = deltautil.deltacomputer(destrevlog)
2684 index = self.index
2693 index = self.index
2685 for rev in self:
2694 for rev in self:
2686 entry = index[rev]
2695 entry = index[rev]
2687
2696
2688 # Some classes override linkrev to take filtered revs into
2697 # Some classes override linkrev to take filtered revs into
2689 # account. Use raw entry from index.
2698 # account. Use raw entry from index.
2690 flags = entry[0] & 0xFFFF
2699 flags = entry[0] & 0xFFFF
2691 linkrev = entry[4]
2700 linkrev = entry[4]
2692 p1 = index[entry[5]][7]
2701 p1 = index[entry[5]][7]
2693 p2 = index[entry[6]][7]
2702 p2 = index[entry[6]][7]
2694 node = entry[7]
2703 node = entry[7]
2695
2704
2696 sidedataactions = (False, [], {})
2705 sidedataactions = (False, [], {})
2697 if sidedatacompanion is not None:
2706 if sidedatacompanion is not None:
2698 sidedataactions = sidedatacompanion(self, rev)
2707 sidedataactions = sidedatacompanion(self, rev)
2699
2708
2700 # (Possibly) reuse the delta from the revlog if allowed and
2709 # (Possibly) reuse the delta from the revlog if allowed and
2701 # the revlog chunk is a delta.
2710 # the revlog chunk is a delta.
2702 cachedelta = None
2711 cachedelta = None
2703 rawtext = None
2712 rawtext = None
2704 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2713 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2705 dropall, filterout, update = sidedataactions
2714 dropall, filterout, update = sidedataactions
2706 text, sidedata = self._revisiondata(rev)
2715 text, sidedata = self._revisiondata(rev)
2707 if dropall:
2716 if dropall:
2708 sidedata = {}
2717 sidedata = {}
2709 for key in filterout:
2718 for key in filterout:
2710 sidedata.pop(key, None)
2719 sidedata.pop(key, None)
2711 sidedata.update(update)
2720 sidedata.update(update)
2712 if not sidedata:
2721 if not sidedata:
2713 sidedata = None
2722 sidedata = None
2714 destrevlog.addrevision(
2723 destrevlog.addrevision(
2715 text,
2724 text,
2716 tr,
2725 tr,
2717 linkrev,
2726 linkrev,
2718 p1,
2727 p1,
2719 p2,
2728 p2,
2720 cachedelta=cachedelta,
2729 cachedelta=cachedelta,
2721 node=node,
2730 node=node,
2722 flags=flags,
2731 flags=flags,
2723 deltacomputer=deltacomputer,
2732 deltacomputer=deltacomputer,
2724 sidedata=sidedata,
2733 sidedata=sidedata,
2725 )
2734 )
2726 else:
2735 else:
2727 if destrevlog._lazydelta:
2736 if destrevlog._lazydelta:
2728 dp = self.deltaparent(rev)
2737 dp = self.deltaparent(rev)
2729 if dp != nullrev:
2738 if dp != nullrev:
2730 cachedelta = (dp, bytes(self._chunk(rev)))
2739 cachedelta = (dp, bytes(self._chunk(rev)))
2731
2740
2732 if not cachedelta:
2741 if not cachedelta:
2733 rawtext = self.rawdata(rev)
2742 rawtext = self.rawdata(rev)
2734
2743
2735 ifh = destrevlog.opener(
2744 ifh = destrevlog.opener(
2736 destrevlog.indexfile, b'a+', checkambig=False
2745 destrevlog.indexfile, b'a+', checkambig=False
2737 )
2746 )
2738 dfh = None
2747 dfh = None
2739 if not destrevlog._inline:
2748 if not destrevlog._inline:
2740 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2749 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2741 try:
2750 try:
2742 destrevlog._addrevision(
2751 destrevlog._addrevision(
2743 node,
2752 node,
2744 rawtext,
2753 rawtext,
2745 tr,
2754 tr,
2746 linkrev,
2755 linkrev,
2747 p1,
2756 p1,
2748 p2,
2757 p2,
2749 flags,
2758 flags,
2750 cachedelta,
2759 cachedelta,
2751 ifh,
2760 ifh,
2752 dfh,
2761 dfh,
2753 deltacomputer=deltacomputer,
2762 deltacomputer=deltacomputer,
2754 )
2763 )
2755 finally:
2764 finally:
2756 if dfh:
2765 if dfh:
2757 dfh.close()
2766 dfh.close()
2758 ifh.close()
2767 ifh.close()
2759
2768
2760 if addrevisioncb:
2769 if addrevisioncb:
2761 addrevisioncb(self, rev, node)
2770 addrevisioncb(self, rev, node)
2762
2771
2763 def censorrevision(self, tr, censornode, tombstone=b''):
2772 def censorrevision(self, tr, censornode, tombstone=b''):
2764 if (self.version & 0xFFFF) == REVLOGV0:
2773 if (self.version & 0xFFFF) == REVLOGV0:
2765 raise error.RevlogError(
2774 raise error.RevlogError(
2766 _(b'cannot censor with version %d revlogs') % self.version
2775 _(b'cannot censor with version %d revlogs') % self.version
2767 )
2776 )
2768
2777
2769 censorrev = self.rev(censornode)
2778 censorrev = self.rev(censornode)
2770 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2779 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2771
2780
2772 if len(tombstone) > self.rawsize(censorrev):
2781 if len(tombstone) > self.rawsize(censorrev):
2773 raise error.Abort(
2782 raise error.Abort(
2774 _(b'censor tombstone must be no longer than censored data')
2783 _(b'censor tombstone must be no longer than censored data')
2775 )
2784 )
2776
2785
2777 # Rewriting the revlog in place is hard. Our strategy for censoring is
2786 # Rewriting the revlog in place is hard. Our strategy for censoring is
2778 # to create a new revlog, copy all revisions to it, then replace the
2787 # to create a new revlog, copy all revisions to it, then replace the
2779 # revlogs on transaction close.
2788 # revlogs on transaction close.
2780
2789
2781 newindexfile = self.indexfile + b'.tmpcensored'
2790 newindexfile = self.indexfile + b'.tmpcensored'
2782 newdatafile = self.datafile + b'.tmpcensored'
2791 newdatafile = self.datafile + b'.tmpcensored'
2783
2792
2784 # This is a bit dangerous. We could easily have a mismatch of state.
2793 # This is a bit dangerous. We could easily have a mismatch of state.
2785 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2794 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2786 newrl.version = self.version
2795 newrl.version = self.version
2787 newrl._generaldelta = self._generaldelta
2796 newrl._generaldelta = self._generaldelta
2788 newrl._io = self._io
2797 newrl._io = self._io
2789
2798
2790 for rev in self.revs():
2799 for rev in self.revs():
2791 node = self.node(rev)
2800 node = self.node(rev)
2792 p1, p2 = self.parents(node)
2801 p1, p2 = self.parents(node)
2793
2802
2794 if rev == censorrev:
2803 if rev == censorrev:
2795 newrl.addrawrevision(
2804 newrl.addrawrevision(
2796 tombstone,
2805 tombstone,
2797 tr,
2806 tr,
2798 self.linkrev(censorrev),
2807 self.linkrev(censorrev),
2799 p1,
2808 p1,
2800 p2,
2809 p2,
2801 censornode,
2810 censornode,
2802 REVIDX_ISCENSORED,
2811 REVIDX_ISCENSORED,
2803 )
2812 )
2804
2813
2805 if newrl.deltaparent(rev) != nullrev:
2814 if newrl.deltaparent(rev) != nullrev:
2806 raise error.Abort(
2815 raise error.Abort(
2807 _(
2816 _(
2808 b'censored revision stored as delta; '
2817 b'censored revision stored as delta; '
2809 b'cannot censor'
2818 b'cannot censor'
2810 ),
2819 ),
2811 hint=_(
2820 hint=_(
2812 b'censoring of revlogs is not '
2821 b'censoring of revlogs is not '
2813 b'fully implemented; please report '
2822 b'fully implemented; please report '
2814 b'this bug'
2823 b'this bug'
2815 ),
2824 ),
2816 )
2825 )
2817 continue
2826 continue
2818
2827
2819 if self.iscensored(rev):
2828 if self.iscensored(rev):
2820 if self.deltaparent(rev) != nullrev:
2829 if self.deltaparent(rev) != nullrev:
2821 raise error.Abort(
2830 raise error.Abort(
2822 _(
2831 _(
2823 b'cannot censor due to censored '
2832 b'cannot censor due to censored '
2824 b'revision having delta stored'
2833 b'revision having delta stored'
2825 )
2834 )
2826 )
2835 )
2827 rawtext = self._chunk(rev)
2836 rawtext = self._chunk(rev)
2828 else:
2837 else:
2829 rawtext = self.rawdata(rev)
2838 rawtext = self.rawdata(rev)
2830
2839
2831 newrl.addrawrevision(
2840 newrl.addrawrevision(
2832 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2841 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2833 )
2842 )
2834
2843
2835 tr.addbackup(self.indexfile, location=b'store')
2844 tr.addbackup(self.indexfile, location=b'store')
2836 if not self._inline:
2845 if not self._inline:
2837 tr.addbackup(self.datafile, location=b'store')
2846 tr.addbackup(self.datafile, location=b'store')
2838
2847
2839 self.opener.rename(newrl.indexfile, self.indexfile)
2848 self.opener.rename(newrl.indexfile, self.indexfile)
2840 if not self._inline:
2849 if not self._inline:
2841 self.opener.rename(newrl.datafile, self.datafile)
2850 self.opener.rename(newrl.datafile, self.datafile)
2842
2851
2843 self.clearcaches()
2852 self.clearcaches()
2844 self._loadindex()
2853 self._loadindex()
2845
2854
2846 def verifyintegrity(self, state):
2855 def verifyintegrity(self, state):
2847 """Verifies the integrity of the revlog.
2856 """Verifies the integrity of the revlog.
2848
2857
2849 Yields ``revlogproblem`` instances describing problems that are
2858 Yields ``revlogproblem`` instances describing problems that are
2850 found.
2859 found.
2851 """
2860 """
2852 dd, di = self.checksize()
2861 dd, di = self.checksize()
2853 if dd:
2862 if dd:
2854 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2863 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2855 if di:
2864 if di:
2856 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2865 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2857
2866
2858 version = self.version & 0xFFFF
2867 version = self.version & 0xFFFF
2859
2868
2860 # The verifier tells us what version revlog we should be.
2869 # The verifier tells us what version revlog we should be.
2861 if version != state[b'expectedversion']:
2870 if version != state[b'expectedversion']:
2862 yield revlogproblem(
2871 yield revlogproblem(
2863 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2872 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2864 % (self.indexfile, version, state[b'expectedversion'])
2873 % (self.indexfile, version, state[b'expectedversion'])
2865 )
2874 )
2866
2875
2867 state[b'skipread'] = set()
2876 state[b'skipread'] = set()
2868
2877
2869 for rev in self:
2878 for rev in self:
2870 node = self.node(rev)
2879 node = self.node(rev)
2871
2880
2872 # Verify contents. 4 cases to care about:
2881 # Verify contents. 4 cases to care about:
2873 #
2882 #
2874 # common: the most common case
2883 # common: the most common case
2875 # rename: with a rename
2884 # rename: with a rename
2876 # meta: file content starts with b'\1\n', the metadata
2885 # meta: file content starts with b'\1\n', the metadata
2877 # header defined in filelog.py, but without a rename
2886 # header defined in filelog.py, but without a rename
2878 # ext: content stored externally
2887 # ext: content stored externally
2879 #
2888 #
2880 # More formally, their differences are shown below:
2889 # More formally, their differences are shown below:
2881 #
2890 #
2882 # | common | rename | meta | ext
2891 # | common | rename | meta | ext
2883 # -------------------------------------------------------
2892 # -------------------------------------------------------
2884 # flags() | 0 | 0 | 0 | not 0
2893 # flags() | 0 | 0 | 0 | not 0
2885 # renamed() | False | True | False | ?
2894 # renamed() | False | True | False | ?
2886 # rawtext[0:2]=='\1\n'| False | True | True | ?
2895 # rawtext[0:2]=='\1\n'| False | True | True | ?
2887 #
2896 #
2888 # "rawtext" means the raw text stored in revlog data, which
2897 # "rawtext" means the raw text stored in revlog data, which
2889 # could be retrieved by "rawdata(rev)". "text"
2898 # could be retrieved by "rawdata(rev)". "text"
2890 # mentioned below is "revision(rev)".
2899 # mentioned below is "revision(rev)".
2891 #
2900 #
2892 # There are 3 different lengths stored physically:
2901 # There are 3 different lengths stored physically:
2893 # 1. L1: rawsize, stored in revlog index
2902 # 1. L1: rawsize, stored in revlog index
2894 # 2. L2: len(rawtext), stored in revlog data
2903 # 2. L2: len(rawtext), stored in revlog data
2895 # 3. L3: len(text), stored in revlog data if flags==0, or
2904 # 3. L3: len(text), stored in revlog data if flags==0, or
2896 # possibly somewhere else if flags!=0
2905 # possibly somewhere else if flags!=0
2897 #
2906 #
2898 # L1 should be equal to L2. L3 could be different from them.
2907 # L1 should be equal to L2. L3 could be different from them.
2899 # "text" may or may not affect commit hash depending on flag
2908 # "text" may or may not affect commit hash depending on flag
2900 # processors (see flagutil.addflagprocessor).
2909 # processors (see flagutil.addflagprocessor).
2901 #
2910 #
2902 # | common | rename | meta | ext
2911 # | common | rename | meta | ext
2903 # -------------------------------------------------
2912 # -------------------------------------------------
2904 # rawsize() | L1 | L1 | L1 | L1
2913 # rawsize() | L1 | L1 | L1 | L1
2905 # size() | L1 | L2-LM | L1(*) | L1 (?)
2914 # size() | L1 | L2-LM | L1(*) | L1 (?)
2906 # len(rawtext) | L2 | L2 | L2 | L2
2915 # len(rawtext) | L2 | L2 | L2 | L2
2907 # len(text) | L2 | L2 | L2 | L3
2916 # len(text) | L2 | L2 | L2 | L3
2908 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2917 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2909 #
2918 #
2910 # LM: length of metadata, depending on rawtext
2919 # LM: length of metadata, depending on rawtext
2911 # (*): not ideal, see comment in filelog.size
2920 # (*): not ideal, see comment in filelog.size
2912 # (?): could be "- len(meta)" if the resolved content has
2921 # (?): could be "- len(meta)" if the resolved content has
2913 # rename metadata
2922 # rename metadata
2914 #
2923 #
2915 # Checks needed to be done:
2924 # Checks needed to be done:
2916 # 1. length check: L1 == L2, in all cases.
2925 # 1. length check: L1 == L2, in all cases.
2917 # 2. hash check: depending on flag processor, we may need to
2926 # 2. hash check: depending on flag processor, we may need to
2918 # use either "text" (external), or "rawtext" (in revlog).
2927 # use either "text" (external), or "rawtext" (in revlog).
2919
2928
2920 try:
2929 try:
2921 skipflags = state.get(b'skipflags', 0)
2930 skipflags = state.get(b'skipflags', 0)
2922 if skipflags:
2931 if skipflags:
2923 skipflags &= self.flags(rev)
2932 skipflags &= self.flags(rev)
2924
2933
2925 _verify_revision(self, skipflags, state, node)
2934 _verify_revision(self, skipflags, state, node)
2926
2935
2927 l1 = self.rawsize(rev)
2936 l1 = self.rawsize(rev)
2928 l2 = len(self.rawdata(node))
2937 l2 = len(self.rawdata(node))
2929
2938
2930 if l1 != l2:
2939 if l1 != l2:
2931 yield revlogproblem(
2940 yield revlogproblem(
2932 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2941 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2933 node=node,
2942 node=node,
2934 )
2943 )
2935
2944
2936 except error.CensoredNodeError:
2945 except error.CensoredNodeError:
2937 if state[b'erroroncensored']:
2946 if state[b'erroroncensored']:
2938 yield revlogproblem(
2947 yield revlogproblem(
2939 error=_(b'censored file data'), node=node
2948 error=_(b'censored file data'), node=node
2940 )
2949 )
2941 state[b'skipread'].add(node)
2950 state[b'skipread'].add(node)
2942 except Exception as e:
2951 except Exception as e:
2943 yield revlogproblem(
2952 yield revlogproblem(
2944 error=_(b'unpacking %s: %s')
2953 error=_(b'unpacking %s: %s')
2945 % (short(node), stringutil.forcebytestr(e)),
2954 % (short(node), stringutil.forcebytestr(e)),
2946 node=node,
2955 node=node,
2947 )
2956 )
2948 state[b'skipread'].add(node)
2957 state[b'skipread'].add(node)
2949
2958
2950 def storageinfo(
2959 def storageinfo(
2951 self,
2960 self,
2952 exclusivefiles=False,
2961 exclusivefiles=False,
2953 sharedfiles=False,
2962 sharedfiles=False,
2954 revisionscount=False,
2963 revisionscount=False,
2955 trackedsize=False,
2964 trackedsize=False,
2956 storedsize=False,
2965 storedsize=False,
2957 ):
2966 ):
2958 d = {}
2967 d = {}
2959
2968
2960 if exclusivefiles:
2969 if exclusivefiles:
2961 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
2970 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
2962 if not self._inline:
2971 if not self._inline:
2963 d[b'exclusivefiles'].append((self.opener, self.datafile))
2972 d[b'exclusivefiles'].append((self.opener, self.datafile))
2964
2973
2965 if sharedfiles:
2974 if sharedfiles:
2966 d[b'sharedfiles'] = []
2975 d[b'sharedfiles'] = []
2967
2976
2968 if revisionscount:
2977 if revisionscount:
2969 d[b'revisionscount'] = len(self)
2978 d[b'revisionscount'] = len(self)
2970
2979
2971 if trackedsize:
2980 if trackedsize:
2972 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
2981 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
2973
2982
2974 if storedsize:
2983 if storedsize:
2975 d[b'storedsize'] = sum(
2984 d[b'storedsize'] = sum(
2976 self.opener.stat(path).st_size for path in self.files()
2985 self.opener.stat(path).st_size for path in self.files()
2977 )
2986 )
2978
2987
2979 return d
2988 return d
General Comments 0
You need to be logged in to leave comments. Login now