##// END OF EJS Templates
nodemap: add a (python) index class for persistent nodemap testing...
marmoute -
r44794:6f9e8e14 default
parent child Browse files
Show More
@@ -1,1558 +1,1561 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section, configprefix + b'nodates', default=False,
136 section, configprefix + b'nodates', default=False,
137 )
137 )
138 coreconfigitem(
138 coreconfigitem(
139 section, configprefix + b'showfunc', default=False,
139 section, configprefix + b'showfunc', default=False,
140 )
140 )
141 coreconfigitem(
141 coreconfigitem(
142 section, configprefix + b'unified', default=None,
142 section, configprefix + b'unified', default=None,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section, configprefix + b'git', default=False,
145 section, configprefix + b'git', default=False,
146 )
146 )
147 coreconfigitem(
147 coreconfigitem(
148 section, configprefix + b'ignorews', default=False,
148 section, configprefix + b'ignorews', default=False,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section, configprefix + b'ignorewsamount', default=False,
151 section, configprefix + b'ignorewsamount', default=False,
152 )
152 )
153 coreconfigitem(
153 coreconfigitem(
154 section, configprefix + b'ignoreblanklines', default=False,
154 section, configprefix + b'ignoreblanklines', default=False,
155 )
155 )
156 coreconfigitem(
156 coreconfigitem(
157 section, configprefix + b'ignorewseol', default=False,
157 section, configprefix + b'ignorewseol', default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section, configprefix + b'nobinary', default=False,
160 section, configprefix + b'nobinary', default=False,
161 )
161 )
162 coreconfigitem(
162 coreconfigitem(
163 section, configprefix + b'noprefix', default=False,
163 section, configprefix + b'noprefix', default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section, configprefix + b'word-diff', default=False,
166 section, configprefix + b'word-diff', default=False,
167 )
167 )
168
168
169
169
170 coreconfigitem(
170 coreconfigitem(
171 b'alias', b'.*', default=dynamicdefault, generic=True,
171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 )
172 )
173 coreconfigitem(
173 coreconfigitem(
174 b'auth', b'cookiefile', default=None,
174 b'auth', b'cookiefile', default=None,
175 )
175 )
176 _registerdiffopts(section=b'annotate')
176 _registerdiffopts(section=b'annotate')
177 # bookmarks.pushing: internal hack for discovery
177 # bookmarks.pushing: internal hack for discovery
178 coreconfigitem(
178 coreconfigitem(
179 b'bookmarks', b'pushing', default=list,
179 b'bookmarks', b'pushing', default=list,
180 )
180 )
181 # bundle.mainreporoot: internal hack for bundlerepo
181 # bundle.mainreporoot: internal hack for bundlerepo
182 coreconfigitem(
182 coreconfigitem(
183 b'bundle', b'mainreporoot', default=b'',
183 b'bundle', b'mainreporoot', default=b'',
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 b'censor', b'policy', default=b'abort', experimental=True,
186 b'censor', b'policy', default=b'abort', experimental=True,
187 )
187 )
188 coreconfigitem(
188 coreconfigitem(
189 b'chgserver', b'idletimeout', default=3600,
189 b'chgserver', b'idletimeout', default=3600,
190 )
190 )
191 coreconfigitem(
191 coreconfigitem(
192 b'chgserver', b'skiphash', default=False,
192 b'chgserver', b'skiphash', default=False,
193 )
193 )
194 coreconfigitem(
194 coreconfigitem(
195 b'cmdserver', b'log', default=None,
195 b'cmdserver', b'log', default=None,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'cmdserver', b'max-log-files', default=7,
198 b'cmdserver', b'max-log-files', default=7,
199 )
199 )
200 coreconfigitem(
200 coreconfigitem(
201 b'cmdserver', b'max-log-size', default=b'1 MB',
201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 )
202 )
203 coreconfigitem(
203 coreconfigitem(
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 )
205 )
206 coreconfigitem(
206 coreconfigitem(
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 )
208 )
209 coreconfigitem(
209 coreconfigitem(
210 b'cmdserver',
210 b'cmdserver',
211 b'track-log',
211 b'track-log',
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 )
213 )
214 coreconfigitem(
214 coreconfigitem(
215 b'color', b'.*', default=None, generic=True,
215 b'color', b'.*', default=None, generic=True,
216 )
216 )
217 coreconfigitem(
217 coreconfigitem(
218 b'color', b'mode', default=b'auto',
218 b'color', b'mode', default=b'auto',
219 )
219 )
220 coreconfigitem(
220 coreconfigitem(
221 b'color', b'pagermode', default=dynamicdefault,
221 b'color', b'pagermode', default=dynamicdefault,
222 )
222 )
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 coreconfigitem(
224 coreconfigitem(
225 b'commands', b'commit.post-status', default=False,
225 b'commands', b'commit.post-status', default=False,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'commands', b'grep.all-files', default=False, experimental=True,
228 b'commands', b'grep.all-files', default=False, experimental=True,
229 )
229 )
230 coreconfigitem(
230 coreconfigitem(
231 b'commands', b'merge.require-rev', default=False,
231 b'commands', b'merge.require-rev', default=False,
232 )
232 )
233 coreconfigitem(
233 coreconfigitem(
234 b'commands', b'push.require-revs', default=False,
234 b'commands', b'push.require-revs', default=False,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'commands', b'resolve.confirm', default=False,
237 b'commands', b'resolve.confirm', default=False,
238 )
238 )
239 coreconfigitem(
239 coreconfigitem(
240 b'commands', b'resolve.explicit-re-merge', default=False,
240 b'commands', b'resolve.explicit-re-merge', default=False,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'commands', b'resolve.mark-check', default=b'none',
243 b'commands', b'resolve.mark-check', default=b'none',
244 )
244 )
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 coreconfigitem(
246 coreconfigitem(
247 b'commands', b'show.aliasprefix', default=list,
247 b'commands', b'show.aliasprefix', default=list,
248 )
248 )
249 coreconfigitem(
249 coreconfigitem(
250 b'commands', b'status.relative', default=False,
250 b'commands', b'status.relative', default=False,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'commands', b'status.skipstates', default=[], experimental=True,
253 b'commands', b'status.skipstates', default=[], experimental=True,
254 )
254 )
255 coreconfigitem(
255 coreconfigitem(
256 b'commands', b'status.terse', default=b'',
256 b'commands', b'status.terse', default=b'',
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'commands', b'status.verbose', default=False,
259 b'commands', b'status.verbose', default=False,
260 )
260 )
261 coreconfigitem(
261 coreconfigitem(
262 b'commands', b'update.check', default=None,
262 b'commands', b'update.check', default=None,
263 )
263 )
264 coreconfigitem(
264 coreconfigitem(
265 b'commands', b'update.requiredest', default=False,
265 b'commands', b'update.requiredest', default=False,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'committemplate', b'.*', default=None, generic=True,
268 b'committemplate', b'.*', default=None, generic=True,
269 )
269 )
270 coreconfigitem(
270 coreconfigitem(
271 b'convert', b'bzr.saverev', default=True,
271 b'convert', b'bzr.saverev', default=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'convert', b'cvsps.cache', default=True,
274 b'convert', b'cvsps.cache', default=True,
275 )
275 )
276 coreconfigitem(
276 coreconfigitem(
277 b'convert', b'cvsps.fuzz', default=60,
277 b'convert', b'cvsps.fuzz', default=60,
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'convert', b'cvsps.logencoding', default=None,
280 b'convert', b'cvsps.logencoding', default=None,
281 )
281 )
282 coreconfigitem(
282 coreconfigitem(
283 b'convert', b'cvsps.mergefrom', default=None,
283 b'convert', b'cvsps.mergefrom', default=None,
284 )
284 )
285 coreconfigitem(
285 coreconfigitem(
286 b'convert', b'cvsps.mergeto', default=None,
286 b'convert', b'cvsps.mergeto', default=None,
287 )
287 )
288 coreconfigitem(
288 coreconfigitem(
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 )
290 )
291 coreconfigitem(
291 coreconfigitem(
292 b'convert', b'git.extrakeys', default=list,
292 b'convert', b'git.extrakeys', default=list,
293 )
293 )
294 coreconfigitem(
294 coreconfigitem(
295 b'convert', b'git.findcopiesharder', default=False,
295 b'convert', b'git.findcopiesharder', default=False,
296 )
296 )
297 coreconfigitem(
297 coreconfigitem(
298 b'convert', b'git.remoteprefix', default=b'remote',
298 b'convert', b'git.remoteprefix', default=b'remote',
299 )
299 )
300 coreconfigitem(
300 coreconfigitem(
301 b'convert', b'git.renamelimit', default=400,
301 b'convert', b'git.renamelimit', default=400,
302 )
302 )
303 coreconfigitem(
303 coreconfigitem(
304 b'convert', b'git.saverev', default=True,
304 b'convert', b'git.saverev', default=True,
305 )
305 )
306 coreconfigitem(
306 coreconfigitem(
307 b'convert', b'git.similarity', default=50,
307 b'convert', b'git.similarity', default=50,
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'convert', b'git.skipsubmodules', default=False,
310 b'convert', b'git.skipsubmodules', default=False,
311 )
311 )
312 coreconfigitem(
312 coreconfigitem(
313 b'convert', b'hg.clonebranches', default=False,
313 b'convert', b'hg.clonebranches', default=False,
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'convert', b'hg.ignoreerrors', default=False,
316 b'convert', b'hg.ignoreerrors', default=False,
317 )
317 )
318 coreconfigitem(
318 coreconfigitem(
319 b'convert', b'hg.preserve-hash', default=False,
319 b'convert', b'hg.preserve-hash', default=False,
320 )
320 )
321 coreconfigitem(
321 coreconfigitem(
322 b'convert', b'hg.revs', default=None,
322 b'convert', b'hg.revs', default=None,
323 )
323 )
324 coreconfigitem(
324 coreconfigitem(
325 b'convert', b'hg.saverev', default=False,
325 b'convert', b'hg.saverev', default=False,
326 )
326 )
327 coreconfigitem(
327 coreconfigitem(
328 b'convert', b'hg.sourcename', default=None,
328 b'convert', b'hg.sourcename', default=None,
329 )
329 )
330 coreconfigitem(
330 coreconfigitem(
331 b'convert', b'hg.startrev', default=None,
331 b'convert', b'hg.startrev', default=None,
332 )
332 )
333 coreconfigitem(
333 coreconfigitem(
334 b'convert', b'hg.tagsbranch', default=b'default',
334 b'convert', b'hg.tagsbranch', default=b'default',
335 )
335 )
336 coreconfigitem(
336 coreconfigitem(
337 b'convert', b'hg.usebranchnames', default=True,
337 b'convert', b'hg.usebranchnames', default=True,
338 )
338 )
339 coreconfigitem(
339 coreconfigitem(
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'convert', b'localtimezone', default=False,
343 b'convert', b'localtimezone', default=False,
344 )
344 )
345 coreconfigitem(
345 coreconfigitem(
346 b'convert', b'p4.encoding', default=dynamicdefault,
346 b'convert', b'p4.encoding', default=dynamicdefault,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'convert', b'p4.startrev', default=0,
349 b'convert', b'p4.startrev', default=0,
350 )
350 )
351 coreconfigitem(
351 coreconfigitem(
352 b'convert', b'skiptags', default=False,
352 b'convert', b'skiptags', default=False,
353 )
353 )
354 coreconfigitem(
354 coreconfigitem(
355 b'convert', b'svn.debugsvnlog', default=True,
355 b'convert', b'svn.debugsvnlog', default=True,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'convert', b'svn.trunk', default=None,
358 b'convert', b'svn.trunk', default=None,
359 )
359 )
360 coreconfigitem(
360 coreconfigitem(
361 b'convert', b'svn.tags', default=None,
361 b'convert', b'svn.tags', default=None,
362 )
362 )
363 coreconfigitem(
363 coreconfigitem(
364 b'convert', b'svn.branches', default=None,
364 b'convert', b'svn.branches', default=None,
365 )
365 )
366 coreconfigitem(
366 coreconfigitem(
367 b'convert', b'svn.startrev', default=0,
367 b'convert', b'svn.startrev', default=0,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'debug', b'dirstate.delaywrite', default=0,
370 b'debug', b'dirstate.delaywrite', default=0,
371 )
371 )
372 coreconfigitem(
372 coreconfigitem(
373 b'defaults', b'.*', default=None, generic=True,
373 b'defaults', b'.*', default=None, generic=True,
374 )
374 )
375 coreconfigitem(
375 coreconfigitem(
376 b'devel', b'all-warnings', default=False,
376 b'devel', b'all-warnings', default=False,
377 )
377 )
378 coreconfigitem(
378 coreconfigitem(
379 b'devel', b'bundle2.debug', default=False,
379 b'devel', b'bundle2.debug', default=False,
380 )
380 )
381 coreconfigitem(
381 coreconfigitem(
382 b'devel', b'bundle.delta', default=b'',
382 b'devel', b'bundle.delta', default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'devel', b'cache-vfs', default=None,
385 b'devel', b'cache-vfs', default=None,
386 )
386 )
387 coreconfigitem(
387 coreconfigitem(
388 b'devel', b'check-locks', default=False,
388 b'devel', b'check-locks', default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'devel', b'check-relroot', default=False,
391 b'devel', b'check-relroot', default=False,
392 )
392 )
393 coreconfigitem(
393 coreconfigitem(
394 b'devel', b'default-date', default=None,
394 b'devel', b'default-date', default=None,
395 )
395 )
396 coreconfigitem(
396 coreconfigitem(
397 b'devel', b'deprec-warn', default=False,
397 b'devel', b'deprec-warn', default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'devel', b'disableloaddefaultcerts', default=False,
400 b'devel', b'disableloaddefaultcerts', default=False,
401 )
401 )
402 coreconfigitem(
402 coreconfigitem(
403 b'devel', b'warn-empty-changegroup', default=False,
403 b'devel', b'warn-empty-changegroup', default=False,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'devel', b'legacy.exchange', default=list,
406 b'devel', b'legacy.exchange', default=list,
407 )
407 )
408 coreconfigitem(
408 coreconfigitem(
409 b'devel', b'persistent-nodemap', default=False,
410 )
411 coreconfigitem(
409 b'devel', b'servercafile', default=b'',
412 b'devel', b'servercafile', default=b'',
410 )
413 )
411 coreconfigitem(
414 coreconfigitem(
412 b'devel', b'serverexactprotocol', default=b'',
415 b'devel', b'serverexactprotocol', default=b'',
413 )
416 )
414 coreconfigitem(
417 coreconfigitem(
415 b'devel', b'serverrequirecert', default=False,
418 b'devel', b'serverrequirecert', default=False,
416 )
419 )
417 coreconfigitem(
420 coreconfigitem(
418 b'devel', b'strip-obsmarkers', default=True,
421 b'devel', b'strip-obsmarkers', default=True,
419 )
422 )
420 coreconfigitem(
423 coreconfigitem(
421 b'devel', b'warn-config', default=None,
424 b'devel', b'warn-config', default=None,
422 )
425 )
423 coreconfigitem(
426 coreconfigitem(
424 b'devel', b'warn-config-default', default=None,
427 b'devel', b'warn-config-default', default=None,
425 )
428 )
426 coreconfigitem(
429 coreconfigitem(
427 b'devel', b'user.obsmarker', default=None,
430 b'devel', b'user.obsmarker', default=None,
428 )
431 )
429 coreconfigitem(
432 coreconfigitem(
430 b'devel', b'warn-config-unknown', default=None,
433 b'devel', b'warn-config-unknown', default=None,
431 )
434 )
432 coreconfigitem(
435 coreconfigitem(
433 b'devel', b'debug.copies', default=False,
436 b'devel', b'debug.copies', default=False,
434 )
437 )
435 coreconfigitem(
438 coreconfigitem(
436 b'devel', b'debug.extensions', default=False,
439 b'devel', b'debug.extensions', default=False,
437 )
440 )
438 coreconfigitem(
441 coreconfigitem(
439 b'devel', b'debug.repo-filters', default=False,
442 b'devel', b'debug.repo-filters', default=False,
440 )
443 )
441 coreconfigitem(
444 coreconfigitem(
442 b'devel', b'debug.peer-request', default=False,
445 b'devel', b'debug.peer-request', default=False,
443 )
446 )
444 coreconfigitem(
447 coreconfigitem(
445 b'devel', b'discovery.randomize', default=True,
448 b'devel', b'discovery.randomize', default=True,
446 )
449 )
447 _registerdiffopts(section=b'diff')
450 _registerdiffopts(section=b'diff')
448 coreconfigitem(
451 coreconfigitem(
449 b'email', b'bcc', default=None,
452 b'email', b'bcc', default=None,
450 )
453 )
451 coreconfigitem(
454 coreconfigitem(
452 b'email', b'cc', default=None,
455 b'email', b'cc', default=None,
453 )
456 )
454 coreconfigitem(
457 coreconfigitem(
455 b'email', b'charsets', default=list,
458 b'email', b'charsets', default=list,
456 )
459 )
457 coreconfigitem(
460 coreconfigitem(
458 b'email', b'from', default=None,
461 b'email', b'from', default=None,
459 )
462 )
460 coreconfigitem(
463 coreconfigitem(
461 b'email', b'method', default=b'smtp',
464 b'email', b'method', default=b'smtp',
462 )
465 )
463 coreconfigitem(
466 coreconfigitem(
464 b'email', b'reply-to', default=None,
467 b'email', b'reply-to', default=None,
465 )
468 )
466 coreconfigitem(
469 coreconfigitem(
467 b'email', b'to', default=None,
470 b'email', b'to', default=None,
468 )
471 )
469 coreconfigitem(
472 coreconfigitem(
470 b'experimental', b'archivemetatemplate', default=dynamicdefault,
473 b'experimental', b'archivemetatemplate', default=dynamicdefault,
471 )
474 )
472 coreconfigitem(
475 coreconfigitem(
473 b'experimental', b'auto-publish', default=b'publish',
476 b'experimental', b'auto-publish', default=b'publish',
474 )
477 )
475 coreconfigitem(
478 coreconfigitem(
476 b'experimental', b'bundle-phases', default=False,
479 b'experimental', b'bundle-phases', default=False,
477 )
480 )
478 coreconfigitem(
481 coreconfigitem(
479 b'experimental', b'bundle2-advertise', default=True,
482 b'experimental', b'bundle2-advertise', default=True,
480 )
483 )
481 coreconfigitem(
484 coreconfigitem(
482 b'experimental', b'bundle2-output-capture', default=False,
485 b'experimental', b'bundle2-output-capture', default=False,
483 )
486 )
484 coreconfigitem(
487 coreconfigitem(
485 b'experimental', b'bundle2.pushback', default=False,
488 b'experimental', b'bundle2.pushback', default=False,
486 )
489 )
487 coreconfigitem(
490 coreconfigitem(
488 b'experimental', b'bundle2lazylocking', default=False,
491 b'experimental', b'bundle2lazylocking', default=False,
489 )
492 )
490 coreconfigitem(
493 coreconfigitem(
491 b'experimental', b'bundlecomplevel', default=None,
494 b'experimental', b'bundlecomplevel', default=None,
492 )
495 )
493 coreconfigitem(
496 coreconfigitem(
494 b'experimental', b'bundlecomplevel.bzip2', default=None,
497 b'experimental', b'bundlecomplevel.bzip2', default=None,
495 )
498 )
496 coreconfigitem(
499 coreconfigitem(
497 b'experimental', b'bundlecomplevel.gzip', default=None,
500 b'experimental', b'bundlecomplevel.gzip', default=None,
498 )
501 )
499 coreconfigitem(
502 coreconfigitem(
500 b'experimental', b'bundlecomplevel.none', default=None,
503 b'experimental', b'bundlecomplevel.none', default=None,
501 )
504 )
502 coreconfigitem(
505 coreconfigitem(
503 b'experimental', b'bundlecomplevel.zstd', default=None,
506 b'experimental', b'bundlecomplevel.zstd', default=None,
504 )
507 )
505 coreconfigitem(
508 coreconfigitem(
506 b'experimental', b'changegroup3', default=False,
509 b'experimental', b'changegroup3', default=False,
507 )
510 )
508 coreconfigitem(
511 coreconfigitem(
509 b'experimental', b'cleanup-as-archived', default=False,
512 b'experimental', b'cleanup-as-archived', default=False,
510 )
513 )
511 coreconfigitem(
514 coreconfigitem(
512 b'experimental', b'clientcompressionengines', default=list,
515 b'experimental', b'clientcompressionengines', default=list,
513 )
516 )
514 coreconfigitem(
517 coreconfigitem(
515 b'experimental', b'copytrace', default=b'on',
518 b'experimental', b'copytrace', default=b'on',
516 )
519 )
517 coreconfigitem(
520 coreconfigitem(
518 b'experimental', b'copytrace.movecandidateslimit', default=100,
521 b'experimental', b'copytrace.movecandidateslimit', default=100,
519 )
522 )
520 coreconfigitem(
523 coreconfigitem(
521 b'experimental', b'copytrace.sourcecommitlimit', default=100,
524 b'experimental', b'copytrace.sourcecommitlimit', default=100,
522 )
525 )
523 coreconfigitem(
526 coreconfigitem(
524 b'experimental', b'copies.read-from', default=b"filelog-only",
527 b'experimental', b'copies.read-from', default=b"filelog-only",
525 )
528 )
526 coreconfigitem(
529 coreconfigitem(
527 b'experimental', b'copies.write-to', default=b'filelog-only',
530 b'experimental', b'copies.write-to', default=b'filelog-only',
528 )
531 )
529 coreconfigitem(
532 coreconfigitem(
530 b'experimental', b'crecordtest', default=None,
533 b'experimental', b'crecordtest', default=None,
531 )
534 )
532 coreconfigitem(
535 coreconfigitem(
533 b'experimental', b'directaccess', default=False,
536 b'experimental', b'directaccess', default=False,
534 )
537 )
535 coreconfigitem(
538 coreconfigitem(
536 b'experimental', b'directaccess.revnums', default=False,
539 b'experimental', b'directaccess.revnums', default=False,
537 )
540 )
538 coreconfigitem(
541 coreconfigitem(
539 b'experimental', b'editortmpinhg', default=False,
542 b'experimental', b'editortmpinhg', default=False,
540 )
543 )
541 coreconfigitem(
544 coreconfigitem(
542 b'experimental', b'evolution', default=list,
545 b'experimental', b'evolution', default=list,
543 )
546 )
544 coreconfigitem(
547 coreconfigitem(
545 b'experimental',
548 b'experimental',
546 b'evolution.allowdivergence',
549 b'evolution.allowdivergence',
547 default=False,
550 default=False,
548 alias=[(b'experimental', b'allowdivergence')],
551 alias=[(b'experimental', b'allowdivergence')],
549 )
552 )
550 coreconfigitem(
553 coreconfigitem(
551 b'experimental', b'evolution.allowunstable', default=None,
554 b'experimental', b'evolution.allowunstable', default=None,
552 )
555 )
553 coreconfigitem(
556 coreconfigitem(
554 b'experimental', b'evolution.createmarkers', default=None,
557 b'experimental', b'evolution.createmarkers', default=None,
555 )
558 )
556 coreconfigitem(
559 coreconfigitem(
557 b'experimental',
560 b'experimental',
558 b'evolution.effect-flags',
561 b'evolution.effect-flags',
559 default=True,
562 default=True,
560 alias=[(b'experimental', b'effect-flags')],
563 alias=[(b'experimental', b'effect-flags')],
561 )
564 )
562 coreconfigitem(
565 coreconfigitem(
563 b'experimental', b'evolution.exchange', default=None,
566 b'experimental', b'evolution.exchange', default=None,
564 )
567 )
565 coreconfigitem(
568 coreconfigitem(
566 b'experimental', b'evolution.bundle-obsmarker', default=False,
569 b'experimental', b'evolution.bundle-obsmarker', default=False,
567 )
570 )
568 coreconfigitem(
571 coreconfigitem(
569 b'experimental', b'log.topo', default=False,
572 b'experimental', b'log.topo', default=False,
570 )
573 )
571 coreconfigitem(
574 coreconfigitem(
572 b'experimental', b'evolution.report-instabilities', default=True,
575 b'experimental', b'evolution.report-instabilities', default=True,
573 )
576 )
574 coreconfigitem(
577 coreconfigitem(
575 b'experimental', b'evolution.track-operation', default=True,
578 b'experimental', b'evolution.track-operation', default=True,
576 )
579 )
577 # repo-level config to exclude a revset visibility
580 # repo-level config to exclude a revset visibility
578 #
581 #
579 # The target use case is to use `share` to expose different subset of the same
582 # The target use case is to use `share` to expose different subset of the same
580 # repository, especially server side. See also `server.view`.
583 # repository, especially server side. See also `server.view`.
581 coreconfigitem(
584 coreconfigitem(
582 b'experimental', b'extra-filter-revs', default=None,
585 b'experimental', b'extra-filter-revs', default=None,
583 )
586 )
584 coreconfigitem(
587 coreconfigitem(
585 b'experimental', b'maxdeltachainspan', default=-1,
588 b'experimental', b'maxdeltachainspan', default=-1,
586 )
589 )
587 coreconfigitem(
590 coreconfigitem(
588 b'experimental', b'mergetempdirprefix', default=None,
591 b'experimental', b'mergetempdirprefix', default=None,
589 )
592 )
590 coreconfigitem(
593 coreconfigitem(
591 b'experimental', b'mmapindexthreshold', default=None,
594 b'experimental', b'mmapindexthreshold', default=None,
592 )
595 )
593 coreconfigitem(
596 coreconfigitem(
594 b'experimental', b'narrow', default=False,
597 b'experimental', b'narrow', default=False,
595 )
598 )
596 coreconfigitem(
599 coreconfigitem(
597 b'experimental', b'nonnormalparanoidcheck', default=False,
600 b'experimental', b'nonnormalparanoidcheck', default=False,
598 )
601 )
599 coreconfigitem(
602 coreconfigitem(
600 b'experimental', b'exportableenviron', default=list,
603 b'experimental', b'exportableenviron', default=list,
601 )
604 )
602 coreconfigitem(
605 coreconfigitem(
603 b'experimental', b'extendedheader.index', default=None,
606 b'experimental', b'extendedheader.index', default=None,
604 )
607 )
605 coreconfigitem(
608 coreconfigitem(
606 b'experimental', b'extendedheader.similarity', default=False,
609 b'experimental', b'extendedheader.similarity', default=False,
607 )
610 )
608 coreconfigitem(
611 coreconfigitem(
609 b'experimental', b'graphshorten', default=False,
612 b'experimental', b'graphshorten', default=False,
610 )
613 )
611 coreconfigitem(
614 coreconfigitem(
612 b'experimental', b'graphstyle.parent', default=dynamicdefault,
615 b'experimental', b'graphstyle.parent', default=dynamicdefault,
613 )
616 )
614 coreconfigitem(
617 coreconfigitem(
615 b'experimental', b'graphstyle.missing', default=dynamicdefault,
618 b'experimental', b'graphstyle.missing', default=dynamicdefault,
616 )
619 )
617 coreconfigitem(
620 coreconfigitem(
618 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
621 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
619 )
622 )
620 coreconfigitem(
623 coreconfigitem(
621 b'experimental', b'hook-track-tags', default=False,
624 b'experimental', b'hook-track-tags', default=False,
622 )
625 )
623 coreconfigitem(
626 coreconfigitem(
624 b'experimental', b'httppeer.advertise-v2', default=False,
627 b'experimental', b'httppeer.advertise-v2', default=False,
625 )
628 )
626 coreconfigitem(
629 coreconfigitem(
627 b'experimental', b'httppeer.v2-encoder-order', default=None,
630 b'experimental', b'httppeer.v2-encoder-order', default=None,
628 )
631 )
629 coreconfigitem(
632 coreconfigitem(
630 b'experimental', b'httppostargs', default=False,
633 b'experimental', b'httppostargs', default=False,
631 )
634 )
632 coreconfigitem(
635 coreconfigitem(
633 b'experimental', b'mergedriver', default=None,
636 b'experimental', b'mergedriver', default=None,
634 )
637 )
635 coreconfigitem(b'experimental', b'nointerrupt', default=False)
638 coreconfigitem(b'experimental', b'nointerrupt', default=False)
636 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
639 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
637
640
638 coreconfigitem(
641 coreconfigitem(
639 b'experimental', b'obsmarkers-exchange-debug', default=False,
642 b'experimental', b'obsmarkers-exchange-debug', default=False,
640 )
643 )
641 coreconfigitem(
644 coreconfigitem(
642 b'experimental', b'remotenames', default=False,
645 b'experimental', b'remotenames', default=False,
643 )
646 )
644 coreconfigitem(
647 coreconfigitem(
645 b'experimental', b'removeemptydirs', default=True,
648 b'experimental', b'removeemptydirs', default=True,
646 )
649 )
647 coreconfigitem(
650 coreconfigitem(
648 b'experimental', b'revert.interactive.select-to-keep', default=False,
651 b'experimental', b'revert.interactive.select-to-keep', default=False,
649 )
652 )
650 coreconfigitem(
653 coreconfigitem(
651 b'experimental', b'revisions.prefixhexnode', default=False,
654 b'experimental', b'revisions.prefixhexnode', default=False,
652 )
655 )
653 coreconfigitem(
656 coreconfigitem(
654 b'experimental', b'revlogv2', default=None,
657 b'experimental', b'revlogv2', default=None,
655 )
658 )
656 coreconfigitem(
659 coreconfigitem(
657 b'experimental', b'revisions.disambiguatewithin', default=None,
660 b'experimental', b'revisions.disambiguatewithin', default=None,
658 )
661 )
659 coreconfigitem(
662 coreconfigitem(
660 b'experimental', b'rust.index', default=False,
663 b'experimental', b'rust.index', default=False,
661 )
664 )
662 coreconfigitem(
665 coreconfigitem(
663 b'experimental', b'exp-persistent-nodemap', default=False,
666 b'experimental', b'exp-persistent-nodemap', default=False,
664 )
667 )
665 coreconfigitem(
668 coreconfigitem(
666 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
669 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
667 )
670 )
668 coreconfigitem(
671 coreconfigitem(
669 b'experimental',
672 b'experimental',
670 b'server.manifestdata.recommended-batch-size',
673 b'server.manifestdata.recommended-batch-size',
671 default=100000,
674 default=100000,
672 )
675 )
673 coreconfigitem(
676 coreconfigitem(
674 b'experimental', b'server.stream-narrow-clones', default=False,
677 b'experimental', b'server.stream-narrow-clones', default=False,
675 )
678 )
676 coreconfigitem(
679 coreconfigitem(
677 b'experimental', b'single-head-per-branch', default=False,
680 b'experimental', b'single-head-per-branch', default=False,
678 )
681 )
679 coreconfigitem(
682 coreconfigitem(
680 b'experimental',
683 b'experimental',
681 b'single-head-per-branch:account-closed-heads',
684 b'single-head-per-branch:account-closed-heads',
682 default=False,
685 default=False,
683 )
686 )
684 coreconfigitem(
687 coreconfigitem(
685 b'experimental', b'sshserver.support-v2', default=False,
688 b'experimental', b'sshserver.support-v2', default=False,
686 )
689 )
687 coreconfigitem(
690 coreconfigitem(
688 b'experimental', b'sparse-read', default=False,
691 b'experimental', b'sparse-read', default=False,
689 )
692 )
690 coreconfigitem(
693 coreconfigitem(
691 b'experimental', b'sparse-read.density-threshold', default=0.50,
694 b'experimental', b'sparse-read.density-threshold', default=0.50,
692 )
695 )
693 coreconfigitem(
696 coreconfigitem(
694 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
697 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
695 )
698 )
696 coreconfigitem(
699 coreconfigitem(
697 b'experimental', b'treemanifest', default=False,
700 b'experimental', b'treemanifest', default=False,
698 )
701 )
699 coreconfigitem(
702 coreconfigitem(
700 b'experimental', b'update.atomic-file', default=False,
703 b'experimental', b'update.atomic-file', default=False,
701 )
704 )
702 coreconfigitem(
705 coreconfigitem(
703 b'experimental', b'sshpeer.advertise-v2', default=False,
706 b'experimental', b'sshpeer.advertise-v2', default=False,
704 )
707 )
705 coreconfigitem(
708 coreconfigitem(
706 b'experimental', b'web.apiserver', default=False,
709 b'experimental', b'web.apiserver', default=False,
707 )
710 )
708 coreconfigitem(
711 coreconfigitem(
709 b'experimental', b'web.api.http-v2', default=False,
712 b'experimental', b'web.api.http-v2', default=False,
710 )
713 )
711 coreconfigitem(
714 coreconfigitem(
712 b'experimental', b'web.api.debugreflect', default=False,
715 b'experimental', b'web.api.debugreflect', default=False,
713 )
716 )
714 coreconfigitem(
717 coreconfigitem(
715 b'experimental', b'worker.wdir-get-thread-safe', default=False,
718 b'experimental', b'worker.wdir-get-thread-safe', default=False,
716 )
719 )
717 coreconfigitem(
720 coreconfigitem(
718 b'experimental', b'worker.repository-upgrade', default=False,
721 b'experimental', b'worker.repository-upgrade', default=False,
719 )
722 )
720 coreconfigitem(
723 coreconfigitem(
721 b'experimental', b'xdiff', default=False,
724 b'experimental', b'xdiff', default=False,
722 )
725 )
723 coreconfigitem(
726 coreconfigitem(
724 b'extensions', b'.*', default=None, generic=True,
727 b'extensions', b'.*', default=None, generic=True,
725 )
728 )
726 coreconfigitem(
729 coreconfigitem(
727 b'extdata', b'.*', default=None, generic=True,
730 b'extdata', b'.*', default=None, generic=True,
728 )
731 )
729 coreconfigitem(
732 coreconfigitem(
730 b'format', b'bookmarks-in-store', default=False,
733 b'format', b'bookmarks-in-store', default=False,
731 )
734 )
732 coreconfigitem(
735 coreconfigitem(
733 b'format', b'chunkcachesize', default=None, experimental=True,
736 b'format', b'chunkcachesize', default=None, experimental=True,
734 )
737 )
735 coreconfigitem(
738 coreconfigitem(
736 b'format', b'dotencode', default=True,
739 b'format', b'dotencode', default=True,
737 )
740 )
738 coreconfigitem(
741 coreconfigitem(
739 b'format', b'generaldelta', default=False, experimental=True,
742 b'format', b'generaldelta', default=False, experimental=True,
740 )
743 )
741 coreconfigitem(
744 coreconfigitem(
742 b'format', b'manifestcachesize', default=None, experimental=True,
745 b'format', b'manifestcachesize', default=None, experimental=True,
743 )
746 )
744 coreconfigitem(
747 coreconfigitem(
745 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
748 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
746 )
749 )
747 coreconfigitem(
750 coreconfigitem(
748 b'format', b'obsstore-version', default=None,
751 b'format', b'obsstore-version', default=None,
749 )
752 )
750 coreconfigitem(
753 coreconfigitem(
751 b'format', b'sparse-revlog', default=True,
754 b'format', b'sparse-revlog', default=True,
752 )
755 )
753 coreconfigitem(
756 coreconfigitem(
754 b'format',
757 b'format',
755 b'revlog-compression',
758 b'revlog-compression',
756 default=b'zlib',
759 default=b'zlib',
757 alias=[(b'experimental', b'format.compression')],
760 alias=[(b'experimental', b'format.compression')],
758 )
761 )
759 coreconfigitem(
762 coreconfigitem(
760 b'format', b'usefncache', default=True,
763 b'format', b'usefncache', default=True,
761 )
764 )
762 coreconfigitem(
765 coreconfigitem(
763 b'format', b'usegeneraldelta', default=True,
766 b'format', b'usegeneraldelta', default=True,
764 )
767 )
765 coreconfigitem(
768 coreconfigitem(
766 b'format', b'usestore', default=True,
769 b'format', b'usestore', default=True,
767 )
770 )
768 coreconfigitem(
771 coreconfigitem(
769 b'format',
772 b'format',
770 b'exp-use-copies-side-data-changeset',
773 b'exp-use-copies-side-data-changeset',
771 default=False,
774 default=False,
772 experimental=True,
775 experimental=True,
773 )
776 )
774 coreconfigitem(
777 coreconfigitem(
775 b'format', b'exp-use-side-data', default=False, experimental=True,
778 b'format', b'exp-use-side-data', default=False, experimental=True,
776 )
779 )
777 coreconfigitem(
780 coreconfigitem(
778 b'format', b'internal-phase', default=False, experimental=True,
781 b'format', b'internal-phase', default=False, experimental=True,
779 )
782 )
780 coreconfigitem(
783 coreconfigitem(
781 b'fsmonitor', b'warn_when_unused', default=True,
784 b'fsmonitor', b'warn_when_unused', default=True,
782 )
785 )
783 coreconfigitem(
786 coreconfigitem(
784 b'fsmonitor', b'warn_update_file_count', default=50000,
787 b'fsmonitor', b'warn_update_file_count', default=50000,
785 )
788 )
786 coreconfigitem(
789 coreconfigitem(
787 b'help', br'hidden-command\..*', default=False, generic=True,
790 b'help', br'hidden-command\..*', default=False, generic=True,
788 )
791 )
789 coreconfigitem(
792 coreconfigitem(
790 b'help', br'hidden-topic\..*', default=False, generic=True,
793 b'help', br'hidden-topic\..*', default=False, generic=True,
791 )
794 )
792 coreconfigitem(
795 coreconfigitem(
793 b'hooks', b'.*', default=dynamicdefault, generic=True,
796 b'hooks', b'.*', default=dynamicdefault, generic=True,
794 )
797 )
795 coreconfigitem(
798 coreconfigitem(
796 b'hgweb-paths', b'.*', default=list, generic=True,
799 b'hgweb-paths', b'.*', default=list, generic=True,
797 )
800 )
798 coreconfigitem(
801 coreconfigitem(
799 b'hostfingerprints', b'.*', default=list, generic=True,
802 b'hostfingerprints', b'.*', default=list, generic=True,
800 )
803 )
801 coreconfigitem(
804 coreconfigitem(
802 b'hostsecurity', b'ciphers', default=None,
805 b'hostsecurity', b'ciphers', default=None,
803 )
806 )
804 coreconfigitem(
807 coreconfigitem(
805 b'hostsecurity', b'disabletls10warning', default=False,
808 b'hostsecurity', b'disabletls10warning', default=False,
806 )
809 )
807 coreconfigitem(
810 coreconfigitem(
808 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
811 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
809 )
812 )
810 coreconfigitem(
813 coreconfigitem(
811 b'hostsecurity',
814 b'hostsecurity',
812 b'.*:minimumprotocol$',
815 b'.*:minimumprotocol$',
813 default=dynamicdefault,
816 default=dynamicdefault,
814 generic=True,
817 generic=True,
815 )
818 )
816 coreconfigitem(
819 coreconfigitem(
817 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
820 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
818 )
821 )
819 coreconfigitem(
822 coreconfigitem(
820 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
823 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
821 )
824 )
822 coreconfigitem(
825 coreconfigitem(
823 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
826 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
824 )
827 )
825
828
826 coreconfigitem(
829 coreconfigitem(
827 b'http_proxy', b'always', default=False,
830 b'http_proxy', b'always', default=False,
828 )
831 )
829 coreconfigitem(
832 coreconfigitem(
830 b'http_proxy', b'host', default=None,
833 b'http_proxy', b'host', default=None,
831 )
834 )
832 coreconfigitem(
835 coreconfigitem(
833 b'http_proxy', b'no', default=list,
836 b'http_proxy', b'no', default=list,
834 )
837 )
835 coreconfigitem(
838 coreconfigitem(
836 b'http_proxy', b'passwd', default=None,
839 b'http_proxy', b'passwd', default=None,
837 )
840 )
838 coreconfigitem(
841 coreconfigitem(
839 b'http_proxy', b'user', default=None,
842 b'http_proxy', b'user', default=None,
840 )
843 )
841
844
842 coreconfigitem(
845 coreconfigitem(
843 b'http', b'timeout', default=None,
846 b'http', b'timeout', default=None,
844 )
847 )
845
848
846 coreconfigitem(
849 coreconfigitem(
847 b'logtoprocess', b'commandexception', default=None,
850 b'logtoprocess', b'commandexception', default=None,
848 )
851 )
849 coreconfigitem(
852 coreconfigitem(
850 b'logtoprocess', b'commandfinish', default=None,
853 b'logtoprocess', b'commandfinish', default=None,
851 )
854 )
852 coreconfigitem(
855 coreconfigitem(
853 b'logtoprocess', b'command', default=None,
856 b'logtoprocess', b'command', default=None,
854 )
857 )
855 coreconfigitem(
858 coreconfigitem(
856 b'logtoprocess', b'develwarn', default=None,
859 b'logtoprocess', b'develwarn', default=None,
857 )
860 )
858 coreconfigitem(
861 coreconfigitem(
859 b'logtoprocess', b'uiblocked', default=None,
862 b'logtoprocess', b'uiblocked', default=None,
860 )
863 )
861 coreconfigitem(
864 coreconfigitem(
862 b'merge', b'checkunknown', default=b'abort',
865 b'merge', b'checkunknown', default=b'abort',
863 )
866 )
864 coreconfigitem(
867 coreconfigitem(
865 b'merge', b'checkignored', default=b'abort',
868 b'merge', b'checkignored', default=b'abort',
866 )
869 )
867 coreconfigitem(
870 coreconfigitem(
868 b'experimental', b'merge.checkpathconflicts', default=False,
871 b'experimental', b'merge.checkpathconflicts', default=False,
869 )
872 )
870 coreconfigitem(
873 coreconfigitem(
871 b'merge', b'followcopies', default=True,
874 b'merge', b'followcopies', default=True,
872 )
875 )
873 coreconfigitem(
876 coreconfigitem(
874 b'merge', b'on-failure', default=b'continue',
877 b'merge', b'on-failure', default=b'continue',
875 )
878 )
876 coreconfigitem(
879 coreconfigitem(
877 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
880 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
878 )
881 )
879 coreconfigitem(
882 coreconfigitem(
880 b'merge', b'strict-capability-check', default=False,
883 b'merge', b'strict-capability-check', default=False,
881 )
884 )
882 coreconfigitem(
885 coreconfigitem(
883 b'merge-tools', b'.*', default=None, generic=True,
886 b'merge-tools', b'.*', default=None, generic=True,
884 )
887 )
885 coreconfigitem(
888 coreconfigitem(
886 b'merge-tools',
889 b'merge-tools',
887 br'.*\.args$',
890 br'.*\.args$',
888 default=b"$local $base $other",
891 default=b"$local $base $other",
889 generic=True,
892 generic=True,
890 priority=-1,
893 priority=-1,
891 )
894 )
892 coreconfigitem(
895 coreconfigitem(
893 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
896 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
894 )
897 )
895 coreconfigitem(
898 coreconfigitem(
896 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
899 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
897 )
900 )
898 coreconfigitem(
901 coreconfigitem(
899 b'merge-tools',
902 b'merge-tools',
900 br'.*\.checkchanged$',
903 br'.*\.checkchanged$',
901 default=False,
904 default=False,
902 generic=True,
905 generic=True,
903 priority=-1,
906 priority=-1,
904 )
907 )
905 coreconfigitem(
908 coreconfigitem(
906 b'merge-tools',
909 b'merge-tools',
907 br'.*\.executable$',
910 br'.*\.executable$',
908 default=dynamicdefault,
911 default=dynamicdefault,
909 generic=True,
912 generic=True,
910 priority=-1,
913 priority=-1,
911 )
914 )
912 coreconfigitem(
915 coreconfigitem(
913 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
916 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
914 )
917 )
915 coreconfigitem(
918 coreconfigitem(
916 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
919 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
917 )
920 )
918 coreconfigitem(
921 coreconfigitem(
919 b'merge-tools',
922 b'merge-tools',
920 br'.*\.mergemarkers$',
923 br'.*\.mergemarkers$',
921 default=b'basic',
924 default=b'basic',
922 generic=True,
925 generic=True,
923 priority=-1,
926 priority=-1,
924 )
927 )
925 coreconfigitem(
928 coreconfigitem(
926 b'merge-tools',
929 b'merge-tools',
927 br'.*\.mergemarkertemplate$',
930 br'.*\.mergemarkertemplate$',
928 default=dynamicdefault, # take from ui.mergemarkertemplate
931 default=dynamicdefault, # take from ui.mergemarkertemplate
929 generic=True,
932 generic=True,
930 priority=-1,
933 priority=-1,
931 )
934 )
932 coreconfigitem(
935 coreconfigitem(
933 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
936 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
934 )
937 )
935 coreconfigitem(
938 coreconfigitem(
936 b'merge-tools',
939 b'merge-tools',
937 br'.*\.premerge$',
940 br'.*\.premerge$',
938 default=dynamicdefault,
941 default=dynamicdefault,
939 generic=True,
942 generic=True,
940 priority=-1,
943 priority=-1,
941 )
944 )
942 coreconfigitem(
945 coreconfigitem(
943 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
946 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
944 )
947 )
945 coreconfigitem(
948 coreconfigitem(
946 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
949 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
947 )
950 )
948 coreconfigitem(
951 coreconfigitem(
949 b'pager', b'ignore', default=list,
952 b'pager', b'ignore', default=list,
950 )
953 )
951 coreconfigitem(
954 coreconfigitem(
952 b'pager', b'pager', default=dynamicdefault,
955 b'pager', b'pager', default=dynamicdefault,
953 )
956 )
954 coreconfigitem(
957 coreconfigitem(
955 b'patch', b'eol', default=b'strict',
958 b'patch', b'eol', default=b'strict',
956 )
959 )
957 coreconfigitem(
960 coreconfigitem(
958 b'patch', b'fuzz', default=2,
961 b'patch', b'fuzz', default=2,
959 )
962 )
960 coreconfigitem(
963 coreconfigitem(
961 b'paths', b'default', default=None,
964 b'paths', b'default', default=None,
962 )
965 )
963 coreconfigitem(
966 coreconfigitem(
964 b'paths', b'default-push', default=None,
967 b'paths', b'default-push', default=None,
965 )
968 )
966 coreconfigitem(
969 coreconfigitem(
967 b'paths', b'.*', default=None, generic=True,
970 b'paths', b'.*', default=None, generic=True,
968 )
971 )
969 coreconfigitem(
972 coreconfigitem(
970 b'phases', b'checksubrepos', default=b'follow',
973 b'phases', b'checksubrepos', default=b'follow',
971 )
974 )
972 coreconfigitem(
975 coreconfigitem(
973 b'phases', b'new-commit', default=b'draft',
976 b'phases', b'new-commit', default=b'draft',
974 )
977 )
975 coreconfigitem(
978 coreconfigitem(
976 b'phases', b'publish', default=True,
979 b'phases', b'publish', default=True,
977 )
980 )
978 coreconfigitem(
981 coreconfigitem(
979 b'profiling', b'enabled', default=False,
982 b'profiling', b'enabled', default=False,
980 )
983 )
981 coreconfigitem(
984 coreconfigitem(
982 b'profiling', b'format', default=b'text',
985 b'profiling', b'format', default=b'text',
983 )
986 )
984 coreconfigitem(
987 coreconfigitem(
985 b'profiling', b'freq', default=1000,
988 b'profiling', b'freq', default=1000,
986 )
989 )
987 coreconfigitem(
990 coreconfigitem(
988 b'profiling', b'limit', default=30,
991 b'profiling', b'limit', default=30,
989 )
992 )
990 coreconfigitem(
993 coreconfigitem(
991 b'profiling', b'nested', default=0,
994 b'profiling', b'nested', default=0,
992 )
995 )
993 coreconfigitem(
996 coreconfigitem(
994 b'profiling', b'output', default=None,
997 b'profiling', b'output', default=None,
995 )
998 )
996 coreconfigitem(
999 coreconfigitem(
997 b'profiling', b'showmax', default=0.999,
1000 b'profiling', b'showmax', default=0.999,
998 )
1001 )
999 coreconfigitem(
1002 coreconfigitem(
1000 b'profiling', b'showmin', default=dynamicdefault,
1003 b'profiling', b'showmin', default=dynamicdefault,
1001 )
1004 )
1002 coreconfigitem(
1005 coreconfigitem(
1003 b'profiling', b'showtime', default=True,
1006 b'profiling', b'showtime', default=True,
1004 )
1007 )
1005 coreconfigitem(
1008 coreconfigitem(
1006 b'profiling', b'sort', default=b'inlinetime',
1009 b'profiling', b'sort', default=b'inlinetime',
1007 )
1010 )
1008 coreconfigitem(
1011 coreconfigitem(
1009 b'profiling', b'statformat', default=b'hotpath',
1012 b'profiling', b'statformat', default=b'hotpath',
1010 )
1013 )
1011 coreconfigitem(
1014 coreconfigitem(
1012 b'profiling', b'time-track', default=dynamicdefault,
1015 b'profiling', b'time-track', default=dynamicdefault,
1013 )
1016 )
1014 coreconfigitem(
1017 coreconfigitem(
1015 b'profiling', b'type', default=b'stat',
1018 b'profiling', b'type', default=b'stat',
1016 )
1019 )
1017 coreconfigitem(
1020 coreconfigitem(
1018 b'progress', b'assume-tty', default=False,
1021 b'progress', b'assume-tty', default=False,
1019 )
1022 )
1020 coreconfigitem(
1023 coreconfigitem(
1021 b'progress', b'changedelay', default=1,
1024 b'progress', b'changedelay', default=1,
1022 )
1025 )
1023 coreconfigitem(
1026 coreconfigitem(
1024 b'progress', b'clear-complete', default=True,
1027 b'progress', b'clear-complete', default=True,
1025 )
1028 )
1026 coreconfigitem(
1029 coreconfigitem(
1027 b'progress', b'debug', default=False,
1030 b'progress', b'debug', default=False,
1028 )
1031 )
1029 coreconfigitem(
1032 coreconfigitem(
1030 b'progress', b'delay', default=3,
1033 b'progress', b'delay', default=3,
1031 )
1034 )
1032 coreconfigitem(
1035 coreconfigitem(
1033 b'progress', b'disable', default=False,
1036 b'progress', b'disable', default=False,
1034 )
1037 )
1035 coreconfigitem(
1038 coreconfigitem(
1036 b'progress', b'estimateinterval', default=60.0,
1039 b'progress', b'estimateinterval', default=60.0,
1037 )
1040 )
1038 coreconfigitem(
1041 coreconfigitem(
1039 b'progress',
1042 b'progress',
1040 b'format',
1043 b'format',
1041 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1044 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1042 )
1045 )
1043 coreconfigitem(
1046 coreconfigitem(
1044 b'progress', b'refresh', default=0.1,
1047 b'progress', b'refresh', default=0.1,
1045 )
1048 )
1046 coreconfigitem(
1049 coreconfigitem(
1047 b'progress', b'width', default=dynamicdefault,
1050 b'progress', b'width', default=dynamicdefault,
1048 )
1051 )
1049 coreconfigitem(
1052 coreconfigitem(
1050 b'push', b'pushvars.server', default=False,
1053 b'push', b'pushvars.server', default=False,
1051 )
1054 )
1052 coreconfigitem(
1055 coreconfigitem(
1053 b'rewrite',
1056 b'rewrite',
1054 b'backup-bundle',
1057 b'backup-bundle',
1055 default=True,
1058 default=True,
1056 alias=[(b'ui', b'history-editing-backup')],
1059 alias=[(b'ui', b'history-editing-backup')],
1057 )
1060 )
1058 coreconfigitem(
1061 coreconfigitem(
1059 b'rewrite', b'update-timestamp', default=False,
1062 b'rewrite', b'update-timestamp', default=False,
1060 )
1063 )
1061 coreconfigitem(
1064 coreconfigitem(
1062 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1065 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1063 )
1066 )
1064 coreconfigitem(
1067 coreconfigitem(
1065 b'storage',
1068 b'storage',
1066 b'revlog.optimize-delta-parent-choice',
1069 b'revlog.optimize-delta-parent-choice',
1067 default=True,
1070 default=True,
1068 alias=[(b'format', b'aggressivemergedeltas')],
1071 alias=[(b'format', b'aggressivemergedeltas')],
1069 )
1072 )
1070 coreconfigitem(
1073 coreconfigitem(
1071 b'storage', b'revlog.reuse-external-delta', default=True,
1074 b'storage', b'revlog.reuse-external-delta', default=True,
1072 )
1075 )
1073 coreconfigitem(
1076 coreconfigitem(
1074 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1077 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1075 )
1078 )
1076 coreconfigitem(
1079 coreconfigitem(
1077 b'storage', b'revlog.zlib.level', default=None,
1080 b'storage', b'revlog.zlib.level', default=None,
1078 )
1081 )
1079 coreconfigitem(
1082 coreconfigitem(
1080 b'storage', b'revlog.zstd.level', default=None,
1083 b'storage', b'revlog.zstd.level', default=None,
1081 )
1084 )
1082 coreconfigitem(
1085 coreconfigitem(
1083 b'server', b'bookmarks-pushkey-compat', default=True,
1086 b'server', b'bookmarks-pushkey-compat', default=True,
1084 )
1087 )
1085 coreconfigitem(
1088 coreconfigitem(
1086 b'server', b'bundle1', default=True,
1089 b'server', b'bundle1', default=True,
1087 )
1090 )
1088 coreconfigitem(
1091 coreconfigitem(
1089 b'server', b'bundle1gd', default=None,
1092 b'server', b'bundle1gd', default=None,
1090 )
1093 )
1091 coreconfigitem(
1094 coreconfigitem(
1092 b'server', b'bundle1.pull', default=None,
1095 b'server', b'bundle1.pull', default=None,
1093 )
1096 )
1094 coreconfigitem(
1097 coreconfigitem(
1095 b'server', b'bundle1gd.pull', default=None,
1098 b'server', b'bundle1gd.pull', default=None,
1096 )
1099 )
1097 coreconfigitem(
1100 coreconfigitem(
1098 b'server', b'bundle1.push', default=None,
1101 b'server', b'bundle1.push', default=None,
1099 )
1102 )
1100 coreconfigitem(
1103 coreconfigitem(
1101 b'server', b'bundle1gd.push', default=None,
1104 b'server', b'bundle1gd.push', default=None,
1102 )
1105 )
1103 coreconfigitem(
1106 coreconfigitem(
1104 b'server',
1107 b'server',
1105 b'bundle2.stream',
1108 b'bundle2.stream',
1106 default=True,
1109 default=True,
1107 alias=[(b'experimental', b'bundle2.stream')],
1110 alias=[(b'experimental', b'bundle2.stream')],
1108 )
1111 )
1109 coreconfigitem(
1112 coreconfigitem(
1110 b'server', b'compressionengines', default=list,
1113 b'server', b'compressionengines', default=list,
1111 )
1114 )
1112 coreconfigitem(
1115 coreconfigitem(
1113 b'server', b'concurrent-push-mode', default=b'strict',
1116 b'server', b'concurrent-push-mode', default=b'strict',
1114 )
1117 )
1115 coreconfigitem(
1118 coreconfigitem(
1116 b'server', b'disablefullbundle', default=False,
1119 b'server', b'disablefullbundle', default=False,
1117 )
1120 )
1118 coreconfigitem(
1121 coreconfigitem(
1119 b'server', b'maxhttpheaderlen', default=1024,
1122 b'server', b'maxhttpheaderlen', default=1024,
1120 )
1123 )
1121 coreconfigitem(
1124 coreconfigitem(
1122 b'server', b'pullbundle', default=False,
1125 b'server', b'pullbundle', default=False,
1123 )
1126 )
1124 coreconfigitem(
1127 coreconfigitem(
1125 b'server', b'preferuncompressed', default=False,
1128 b'server', b'preferuncompressed', default=False,
1126 )
1129 )
1127 coreconfigitem(
1130 coreconfigitem(
1128 b'server', b'streamunbundle', default=False,
1131 b'server', b'streamunbundle', default=False,
1129 )
1132 )
1130 coreconfigitem(
1133 coreconfigitem(
1131 b'server', b'uncompressed', default=True,
1134 b'server', b'uncompressed', default=True,
1132 )
1135 )
1133 coreconfigitem(
1136 coreconfigitem(
1134 b'server', b'uncompressedallowsecret', default=False,
1137 b'server', b'uncompressedallowsecret', default=False,
1135 )
1138 )
1136 coreconfigitem(
1139 coreconfigitem(
1137 b'server', b'view', default=b'served',
1140 b'server', b'view', default=b'served',
1138 )
1141 )
1139 coreconfigitem(
1142 coreconfigitem(
1140 b'server', b'validate', default=False,
1143 b'server', b'validate', default=False,
1141 )
1144 )
1142 coreconfigitem(
1145 coreconfigitem(
1143 b'server', b'zliblevel', default=-1,
1146 b'server', b'zliblevel', default=-1,
1144 )
1147 )
1145 coreconfigitem(
1148 coreconfigitem(
1146 b'server', b'zstdlevel', default=3,
1149 b'server', b'zstdlevel', default=3,
1147 )
1150 )
1148 coreconfigitem(
1151 coreconfigitem(
1149 b'share', b'pool', default=None,
1152 b'share', b'pool', default=None,
1150 )
1153 )
1151 coreconfigitem(
1154 coreconfigitem(
1152 b'share', b'poolnaming', default=b'identity',
1155 b'share', b'poolnaming', default=b'identity',
1153 )
1156 )
1154 coreconfigitem(
1157 coreconfigitem(
1155 b'shelve', b'maxbackups', default=10,
1158 b'shelve', b'maxbackups', default=10,
1156 )
1159 )
1157 coreconfigitem(
1160 coreconfigitem(
1158 b'smtp', b'host', default=None,
1161 b'smtp', b'host', default=None,
1159 )
1162 )
1160 coreconfigitem(
1163 coreconfigitem(
1161 b'smtp', b'local_hostname', default=None,
1164 b'smtp', b'local_hostname', default=None,
1162 )
1165 )
1163 coreconfigitem(
1166 coreconfigitem(
1164 b'smtp', b'password', default=None,
1167 b'smtp', b'password', default=None,
1165 )
1168 )
1166 coreconfigitem(
1169 coreconfigitem(
1167 b'smtp', b'port', default=dynamicdefault,
1170 b'smtp', b'port', default=dynamicdefault,
1168 )
1171 )
1169 coreconfigitem(
1172 coreconfigitem(
1170 b'smtp', b'tls', default=b'none',
1173 b'smtp', b'tls', default=b'none',
1171 )
1174 )
1172 coreconfigitem(
1175 coreconfigitem(
1173 b'smtp', b'username', default=None,
1176 b'smtp', b'username', default=None,
1174 )
1177 )
1175 coreconfigitem(
1178 coreconfigitem(
1176 b'sparse', b'missingwarning', default=True, experimental=True,
1179 b'sparse', b'missingwarning', default=True, experimental=True,
1177 )
1180 )
1178 coreconfigitem(
1181 coreconfigitem(
1179 b'subrepos',
1182 b'subrepos',
1180 b'allowed',
1183 b'allowed',
1181 default=dynamicdefault, # to make backporting simpler
1184 default=dynamicdefault, # to make backporting simpler
1182 )
1185 )
1183 coreconfigitem(
1186 coreconfigitem(
1184 b'subrepos', b'hg:allowed', default=dynamicdefault,
1187 b'subrepos', b'hg:allowed', default=dynamicdefault,
1185 )
1188 )
1186 coreconfigitem(
1189 coreconfigitem(
1187 b'subrepos', b'git:allowed', default=dynamicdefault,
1190 b'subrepos', b'git:allowed', default=dynamicdefault,
1188 )
1191 )
1189 coreconfigitem(
1192 coreconfigitem(
1190 b'subrepos', b'svn:allowed', default=dynamicdefault,
1193 b'subrepos', b'svn:allowed', default=dynamicdefault,
1191 )
1194 )
1192 coreconfigitem(
1195 coreconfigitem(
1193 b'templates', b'.*', default=None, generic=True,
1196 b'templates', b'.*', default=None, generic=True,
1194 )
1197 )
1195 coreconfigitem(
1198 coreconfigitem(
1196 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1199 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1197 )
1200 )
1198 coreconfigitem(
1201 coreconfigitem(
1199 b'trusted', b'groups', default=list,
1202 b'trusted', b'groups', default=list,
1200 )
1203 )
1201 coreconfigitem(
1204 coreconfigitem(
1202 b'trusted', b'users', default=list,
1205 b'trusted', b'users', default=list,
1203 )
1206 )
1204 coreconfigitem(
1207 coreconfigitem(
1205 b'ui', b'_usedassubrepo', default=False,
1208 b'ui', b'_usedassubrepo', default=False,
1206 )
1209 )
1207 coreconfigitem(
1210 coreconfigitem(
1208 b'ui', b'allowemptycommit', default=False,
1211 b'ui', b'allowemptycommit', default=False,
1209 )
1212 )
1210 coreconfigitem(
1213 coreconfigitem(
1211 b'ui', b'archivemeta', default=True,
1214 b'ui', b'archivemeta', default=True,
1212 )
1215 )
1213 coreconfigitem(
1216 coreconfigitem(
1214 b'ui', b'askusername', default=False,
1217 b'ui', b'askusername', default=False,
1215 )
1218 )
1216 coreconfigitem(
1219 coreconfigitem(
1217 b'ui', b'clonebundlefallback', default=False,
1220 b'ui', b'clonebundlefallback', default=False,
1218 )
1221 )
1219 coreconfigitem(
1222 coreconfigitem(
1220 b'ui', b'clonebundleprefers', default=list,
1223 b'ui', b'clonebundleprefers', default=list,
1221 )
1224 )
1222 coreconfigitem(
1225 coreconfigitem(
1223 b'ui', b'clonebundles', default=True,
1226 b'ui', b'clonebundles', default=True,
1224 )
1227 )
1225 coreconfigitem(
1228 coreconfigitem(
1226 b'ui', b'color', default=b'auto',
1229 b'ui', b'color', default=b'auto',
1227 )
1230 )
1228 coreconfigitem(
1231 coreconfigitem(
1229 b'ui', b'commitsubrepos', default=False,
1232 b'ui', b'commitsubrepos', default=False,
1230 )
1233 )
1231 coreconfigitem(
1234 coreconfigitem(
1232 b'ui', b'debug', default=False,
1235 b'ui', b'debug', default=False,
1233 )
1236 )
1234 coreconfigitem(
1237 coreconfigitem(
1235 b'ui', b'debugger', default=None,
1238 b'ui', b'debugger', default=None,
1236 )
1239 )
1237 coreconfigitem(
1240 coreconfigitem(
1238 b'ui', b'editor', default=dynamicdefault,
1241 b'ui', b'editor', default=dynamicdefault,
1239 )
1242 )
1240 coreconfigitem(
1243 coreconfigitem(
1241 b'ui', b'fallbackencoding', default=None,
1244 b'ui', b'fallbackencoding', default=None,
1242 )
1245 )
1243 coreconfigitem(
1246 coreconfigitem(
1244 b'ui', b'forcecwd', default=None,
1247 b'ui', b'forcecwd', default=None,
1245 )
1248 )
1246 coreconfigitem(
1249 coreconfigitem(
1247 b'ui', b'forcemerge', default=None,
1250 b'ui', b'forcemerge', default=None,
1248 )
1251 )
1249 coreconfigitem(
1252 coreconfigitem(
1250 b'ui', b'formatdebug', default=False,
1253 b'ui', b'formatdebug', default=False,
1251 )
1254 )
1252 coreconfigitem(
1255 coreconfigitem(
1253 b'ui', b'formatjson', default=False,
1256 b'ui', b'formatjson', default=False,
1254 )
1257 )
1255 coreconfigitem(
1258 coreconfigitem(
1256 b'ui', b'formatted', default=None,
1259 b'ui', b'formatted', default=None,
1257 )
1260 )
1258 coreconfigitem(
1261 coreconfigitem(
1259 b'ui', b'graphnodetemplate', default=None,
1262 b'ui', b'graphnodetemplate', default=None,
1260 )
1263 )
1261 coreconfigitem(
1264 coreconfigitem(
1262 b'ui', b'interactive', default=None,
1265 b'ui', b'interactive', default=None,
1263 )
1266 )
1264 coreconfigitem(
1267 coreconfigitem(
1265 b'ui', b'interface', default=None,
1268 b'ui', b'interface', default=None,
1266 )
1269 )
1267 coreconfigitem(
1270 coreconfigitem(
1268 b'ui', b'interface.chunkselector', default=None,
1271 b'ui', b'interface.chunkselector', default=None,
1269 )
1272 )
1270 coreconfigitem(
1273 coreconfigitem(
1271 b'ui', b'large-file-limit', default=10000000,
1274 b'ui', b'large-file-limit', default=10000000,
1272 )
1275 )
1273 coreconfigitem(
1276 coreconfigitem(
1274 b'ui', b'logblockedtimes', default=False,
1277 b'ui', b'logblockedtimes', default=False,
1275 )
1278 )
1276 coreconfigitem(
1279 coreconfigitem(
1277 b'ui', b'logtemplate', default=None,
1280 b'ui', b'logtemplate', default=None,
1278 )
1281 )
1279 coreconfigitem(
1282 coreconfigitem(
1280 b'ui', b'merge', default=None,
1283 b'ui', b'merge', default=None,
1281 )
1284 )
1282 coreconfigitem(
1285 coreconfigitem(
1283 b'ui', b'mergemarkers', default=b'basic',
1286 b'ui', b'mergemarkers', default=b'basic',
1284 )
1287 )
1285 coreconfigitem(
1288 coreconfigitem(
1286 b'ui',
1289 b'ui',
1287 b'mergemarkertemplate',
1290 b'mergemarkertemplate',
1288 default=(
1291 default=(
1289 b'{node|short} '
1292 b'{node|short} '
1290 b'{ifeq(tags, "tip", "", '
1293 b'{ifeq(tags, "tip", "", '
1291 b'ifeq(tags, "", "", "{tags} "))}'
1294 b'ifeq(tags, "", "", "{tags} "))}'
1292 b'{if(bookmarks, "{bookmarks} ")}'
1295 b'{if(bookmarks, "{bookmarks} ")}'
1293 b'{ifeq(branch, "default", "", "{branch} ")}'
1296 b'{ifeq(branch, "default", "", "{branch} ")}'
1294 b'- {author|user}: {desc|firstline}'
1297 b'- {author|user}: {desc|firstline}'
1295 ),
1298 ),
1296 )
1299 )
1297 coreconfigitem(
1300 coreconfigitem(
1298 b'ui', b'message-output', default=b'stdio',
1301 b'ui', b'message-output', default=b'stdio',
1299 )
1302 )
1300 coreconfigitem(
1303 coreconfigitem(
1301 b'ui', b'nontty', default=False,
1304 b'ui', b'nontty', default=False,
1302 )
1305 )
1303 coreconfigitem(
1306 coreconfigitem(
1304 b'ui', b'origbackuppath', default=None,
1307 b'ui', b'origbackuppath', default=None,
1305 )
1308 )
1306 coreconfigitem(
1309 coreconfigitem(
1307 b'ui', b'paginate', default=True,
1310 b'ui', b'paginate', default=True,
1308 )
1311 )
1309 coreconfigitem(
1312 coreconfigitem(
1310 b'ui', b'patch', default=None,
1313 b'ui', b'patch', default=None,
1311 )
1314 )
1312 coreconfigitem(
1315 coreconfigitem(
1313 b'ui', b'pre-merge-tool-output-template', default=None,
1316 b'ui', b'pre-merge-tool-output-template', default=None,
1314 )
1317 )
1315 coreconfigitem(
1318 coreconfigitem(
1316 b'ui', b'portablefilenames', default=b'warn',
1319 b'ui', b'portablefilenames', default=b'warn',
1317 )
1320 )
1318 coreconfigitem(
1321 coreconfigitem(
1319 b'ui', b'promptecho', default=False,
1322 b'ui', b'promptecho', default=False,
1320 )
1323 )
1321 coreconfigitem(
1324 coreconfigitem(
1322 b'ui', b'quiet', default=False,
1325 b'ui', b'quiet', default=False,
1323 )
1326 )
1324 coreconfigitem(
1327 coreconfigitem(
1325 b'ui', b'quietbookmarkmove', default=False,
1328 b'ui', b'quietbookmarkmove', default=False,
1326 )
1329 )
1327 coreconfigitem(
1330 coreconfigitem(
1328 b'ui', b'relative-paths', default=b'legacy',
1331 b'ui', b'relative-paths', default=b'legacy',
1329 )
1332 )
1330 coreconfigitem(
1333 coreconfigitem(
1331 b'ui', b'remotecmd', default=b'hg',
1334 b'ui', b'remotecmd', default=b'hg',
1332 )
1335 )
1333 coreconfigitem(
1336 coreconfigitem(
1334 b'ui', b'report_untrusted', default=True,
1337 b'ui', b'report_untrusted', default=True,
1335 )
1338 )
1336 coreconfigitem(
1339 coreconfigitem(
1337 b'ui', b'rollback', default=True,
1340 b'ui', b'rollback', default=True,
1338 )
1341 )
1339 coreconfigitem(
1342 coreconfigitem(
1340 b'ui', b'signal-safe-lock', default=True,
1343 b'ui', b'signal-safe-lock', default=True,
1341 )
1344 )
1342 coreconfigitem(
1345 coreconfigitem(
1343 b'ui', b'slash', default=False,
1346 b'ui', b'slash', default=False,
1344 )
1347 )
1345 coreconfigitem(
1348 coreconfigitem(
1346 b'ui', b'ssh', default=b'ssh',
1349 b'ui', b'ssh', default=b'ssh',
1347 )
1350 )
1348 coreconfigitem(
1351 coreconfigitem(
1349 b'ui', b'ssherrorhint', default=None,
1352 b'ui', b'ssherrorhint', default=None,
1350 )
1353 )
1351 coreconfigitem(
1354 coreconfigitem(
1352 b'ui', b'statuscopies', default=False,
1355 b'ui', b'statuscopies', default=False,
1353 )
1356 )
1354 coreconfigitem(
1357 coreconfigitem(
1355 b'ui', b'strict', default=False,
1358 b'ui', b'strict', default=False,
1356 )
1359 )
1357 coreconfigitem(
1360 coreconfigitem(
1358 b'ui', b'style', default=b'',
1361 b'ui', b'style', default=b'',
1359 )
1362 )
1360 coreconfigitem(
1363 coreconfigitem(
1361 b'ui', b'supportcontact', default=None,
1364 b'ui', b'supportcontact', default=None,
1362 )
1365 )
1363 coreconfigitem(
1366 coreconfigitem(
1364 b'ui', b'textwidth', default=78,
1367 b'ui', b'textwidth', default=78,
1365 )
1368 )
1366 coreconfigitem(
1369 coreconfigitem(
1367 b'ui', b'timeout', default=b'600',
1370 b'ui', b'timeout', default=b'600',
1368 )
1371 )
1369 coreconfigitem(
1372 coreconfigitem(
1370 b'ui', b'timeout.warn', default=0,
1373 b'ui', b'timeout.warn', default=0,
1371 )
1374 )
1372 coreconfigitem(
1375 coreconfigitem(
1373 b'ui', b'traceback', default=False,
1376 b'ui', b'traceback', default=False,
1374 )
1377 )
1375 coreconfigitem(
1378 coreconfigitem(
1376 b'ui', b'tweakdefaults', default=False,
1379 b'ui', b'tweakdefaults', default=False,
1377 )
1380 )
1378 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1381 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1379 coreconfigitem(
1382 coreconfigitem(
1380 b'ui', b'verbose', default=False,
1383 b'ui', b'verbose', default=False,
1381 )
1384 )
1382 coreconfigitem(
1385 coreconfigitem(
1383 b'verify', b'skipflags', default=None,
1386 b'verify', b'skipflags', default=None,
1384 )
1387 )
1385 coreconfigitem(
1388 coreconfigitem(
1386 b'web', b'allowbz2', default=False,
1389 b'web', b'allowbz2', default=False,
1387 )
1390 )
1388 coreconfigitem(
1391 coreconfigitem(
1389 b'web', b'allowgz', default=False,
1392 b'web', b'allowgz', default=False,
1390 )
1393 )
1391 coreconfigitem(
1394 coreconfigitem(
1392 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1395 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1393 )
1396 )
1394 coreconfigitem(
1397 coreconfigitem(
1395 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1398 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1396 )
1399 )
1397 coreconfigitem(
1400 coreconfigitem(
1398 b'web', b'allowzip', default=False,
1401 b'web', b'allowzip', default=False,
1399 )
1402 )
1400 coreconfigitem(
1403 coreconfigitem(
1401 b'web', b'archivesubrepos', default=False,
1404 b'web', b'archivesubrepos', default=False,
1402 )
1405 )
1403 coreconfigitem(
1406 coreconfigitem(
1404 b'web', b'cache', default=True,
1407 b'web', b'cache', default=True,
1405 )
1408 )
1406 coreconfigitem(
1409 coreconfigitem(
1407 b'web', b'comparisoncontext', default=5,
1410 b'web', b'comparisoncontext', default=5,
1408 )
1411 )
1409 coreconfigitem(
1412 coreconfigitem(
1410 b'web', b'contact', default=None,
1413 b'web', b'contact', default=None,
1411 )
1414 )
1412 coreconfigitem(
1415 coreconfigitem(
1413 b'web', b'deny_push', default=list,
1416 b'web', b'deny_push', default=list,
1414 )
1417 )
1415 coreconfigitem(
1418 coreconfigitem(
1416 b'web', b'guessmime', default=False,
1419 b'web', b'guessmime', default=False,
1417 )
1420 )
1418 coreconfigitem(
1421 coreconfigitem(
1419 b'web', b'hidden', default=False,
1422 b'web', b'hidden', default=False,
1420 )
1423 )
1421 coreconfigitem(
1424 coreconfigitem(
1422 b'web', b'labels', default=list,
1425 b'web', b'labels', default=list,
1423 )
1426 )
1424 coreconfigitem(
1427 coreconfigitem(
1425 b'web', b'logoimg', default=b'hglogo.png',
1428 b'web', b'logoimg', default=b'hglogo.png',
1426 )
1429 )
1427 coreconfigitem(
1430 coreconfigitem(
1428 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1431 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1429 )
1432 )
1430 coreconfigitem(
1433 coreconfigitem(
1431 b'web', b'accesslog', default=b'-',
1434 b'web', b'accesslog', default=b'-',
1432 )
1435 )
1433 coreconfigitem(
1436 coreconfigitem(
1434 b'web', b'address', default=b'',
1437 b'web', b'address', default=b'',
1435 )
1438 )
1436 coreconfigitem(
1439 coreconfigitem(
1437 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1440 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1438 )
1441 )
1439 coreconfigitem(
1442 coreconfigitem(
1440 b'web', b'allow_read', default=list,
1443 b'web', b'allow_read', default=list,
1441 )
1444 )
1442 coreconfigitem(
1445 coreconfigitem(
1443 b'web', b'baseurl', default=None,
1446 b'web', b'baseurl', default=None,
1444 )
1447 )
1445 coreconfigitem(
1448 coreconfigitem(
1446 b'web', b'cacerts', default=None,
1449 b'web', b'cacerts', default=None,
1447 )
1450 )
1448 coreconfigitem(
1451 coreconfigitem(
1449 b'web', b'certificate', default=None,
1452 b'web', b'certificate', default=None,
1450 )
1453 )
1451 coreconfigitem(
1454 coreconfigitem(
1452 b'web', b'collapse', default=False,
1455 b'web', b'collapse', default=False,
1453 )
1456 )
1454 coreconfigitem(
1457 coreconfigitem(
1455 b'web', b'csp', default=None,
1458 b'web', b'csp', default=None,
1456 )
1459 )
1457 coreconfigitem(
1460 coreconfigitem(
1458 b'web', b'deny_read', default=list,
1461 b'web', b'deny_read', default=list,
1459 )
1462 )
1460 coreconfigitem(
1463 coreconfigitem(
1461 b'web', b'descend', default=True,
1464 b'web', b'descend', default=True,
1462 )
1465 )
1463 coreconfigitem(
1466 coreconfigitem(
1464 b'web', b'description', default=b"",
1467 b'web', b'description', default=b"",
1465 )
1468 )
1466 coreconfigitem(
1469 coreconfigitem(
1467 b'web', b'encoding', default=lambda: encoding.encoding,
1470 b'web', b'encoding', default=lambda: encoding.encoding,
1468 )
1471 )
1469 coreconfigitem(
1472 coreconfigitem(
1470 b'web', b'errorlog', default=b'-',
1473 b'web', b'errorlog', default=b'-',
1471 )
1474 )
1472 coreconfigitem(
1475 coreconfigitem(
1473 b'web', b'ipv6', default=False,
1476 b'web', b'ipv6', default=False,
1474 )
1477 )
1475 coreconfigitem(
1478 coreconfigitem(
1476 b'web', b'maxchanges', default=10,
1479 b'web', b'maxchanges', default=10,
1477 )
1480 )
1478 coreconfigitem(
1481 coreconfigitem(
1479 b'web', b'maxfiles', default=10,
1482 b'web', b'maxfiles', default=10,
1480 )
1483 )
1481 coreconfigitem(
1484 coreconfigitem(
1482 b'web', b'maxshortchanges', default=60,
1485 b'web', b'maxshortchanges', default=60,
1483 )
1486 )
1484 coreconfigitem(
1487 coreconfigitem(
1485 b'web', b'motd', default=b'',
1488 b'web', b'motd', default=b'',
1486 )
1489 )
1487 coreconfigitem(
1490 coreconfigitem(
1488 b'web', b'name', default=dynamicdefault,
1491 b'web', b'name', default=dynamicdefault,
1489 )
1492 )
1490 coreconfigitem(
1493 coreconfigitem(
1491 b'web', b'port', default=8000,
1494 b'web', b'port', default=8000,
1492 )
1495 )
1493 coreconfigitem(
1496 coreconfigitem(
1494 b'web', b'prefix', default=b'',
1497 b'web', b'prefix', default=b'',
1495 )
1498 )
1496 coreconfigitem(
1499 coreconfigitem(
1497 b'web', b'push_ssl', default=True,
1500 b'web', b'push_ssl', default=True,
1498 )
1501 )
1499 coreconfigitem(
1502 coreconfigitem(
1500 b'web', b'refreshinterval', default=20,
1503 b'web', b'refreshinterval', default=20,
1501 )
1504 )
1502 coreconfigitem(
1505 coreconfigitem(
1503 b'web', b'server-header', default=None,
1506 b'web', b'server-header', default=None,
1504 )
1507 )
1505 coreconfigitem(
1508 coreconfigitem(
1506 b'web', b'static', default=None,
1509 b'web', b'static', default=None,
1507 )
1510 )
1508 coreconfigitem(
1511 coreconfigitem(
1509 b'web', b'staticurl', default=None,
1512 b'web', b'staticurl', default=None,
1510 )
1513 )
1511 coreconfigitem(
1514 coreconfigitem(
1512 b'web', b'stripes', default=1,
1515 b'web', b'stripes', default=1,
1513 )
1516 )
1514 coreconfigitem(
1517 coreconfigitem(
1515 b'web', b'style', default=b'paper',
1518 b'web', b'style', default=b'paper',
1516 )
1519 )
1517 coreconfigitem(
1520 coreconfigitem(
1518 b'web', b'templates', default=None,
1521 b'web', b'templates', default=None,
1519 )
1522 )
1520 coreconfigitem(
1523 coreconfigitem(
1521 b'web', b'view', default=b'served', experimental=True,
1524 b'web', b'view', default=b'served', experimental=True,
1522 )
1525 )
1523 coreconfigitem(
1526 coreconfigitem(
1524 b'worker', b'backgroundclose', default=dynamicdefault,
1527 b'worker', b'backgroundclose', default=dynamicdefault,
1525 )
1528 )
1526 # Windows defaults to a limit of 512 open files. A buffer of 128
1529 # Windows defaults to a limit of 512 open files. A buffer of 128
1527 # should give us enough headway.
1530 # should give us enough headway.
1528 coreconfigitem(
1531 coreconfigitem(
1529 b'worker', b'backgroundclosemaxqueue', default=384,
1532 b'worker', b'backgroundclosemaxqueue', default=384,
1530 )
1533 )
1531 coreconfigitem(
1534 coreconfigitem(
1532 b'worker', b'backgroundcloseminfilecount', default=2048,
1535 b'worker', b'backgroundcloseminfilecount', default=2048,
1533 )
1536 )
1534 coreconfigitem(
1537 coreconfigitem(
1535 b'worker', b'backgroundclosethreadcount', default=4,
1538 b'worker', b'backgroundclosethreadcount', default=4,
1536 )
1539 )
1537 coreconfigitem(
1540 coreconfigitem(
1538 b'worker', b'enabled', default=True,
1541 b'worker', b'enabled', default=True,
1539 )
1542 )
1540 coreconfigitem(
1543 coreconfigitem(
1541 b'worker', b'numcpus', default=None,
1544 b'worker', b'numcpus', default=None,
1542 )
1545 )
1543
1546
1544 # Rebase related configuration moved to core because other extension are doing
1547 # Rebase related configuration moved to core because other extension are doing
1545 # strange things. For example, shelve import the extensions to reuse some bit
1548 # strange things. For example, shelve import the extensions to reuse some bit
1546 # without formally loading it.
1549 # without formally loading it.
1547 coreconfigitem(
1550 coreconfigitem(
1548 b'commands', b'rebase.requiredest', default=False,
1551 b'commands', b'rebase.requiredest', default=False,
1549 )
1552 )
1550 coreconfigitem(
1553 coreconfigitem(
1551 b'experimental', b'rebaseskipobsolete', default=True,
1554 b'experimental', b'rebaseskipobsolete', default=True,
1552 )
1555 )
1553 coreconfigitem(
1556 coreconfigitem(
1554 b'rebase', b'singletransaction', default=False,
1557 b'rebase', b'singletransaction', default=False,
1555 )
1558 )
1556 coreconfigitem(
1559 coreconfigitem(
1557 b'rebase', b'experimental.inmemory', default=False,
1560 b'rebase', b'experimental.inmemory', default=False,
1558 )
1561 )
@@ -1,3789 +1,3791 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 rcutil,
56 rcutil,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 hashutil,
77 hashutil,
78 procutil,
78 procutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 from .revlogutils import constants as revlogconst
82 from .revlogutils import constants as revlogconst
83
83
84 release = lockmod.release
84 release = lockmod.release
85 urlerr = util.urlerr
85 urlerr = util.urlerr
86 urlreq = util.urlreq
86 urlreq = util.urlreq
87
87
88 # set of (path, vfs-location) tuples. vfs-location is:
88 # set of (path, vfs-location) tuples. vfs-location is:
89 # - 'plain for vfs relative paths
89 # - 'plain for vfs relative paths
90 # - '' for svfs relative paths
90 # - '' for svfs relative paths
91 _cachedfiles = set()
91 _cachedfiles = set()
92
92
93
93
94 class _basefilecache(scmutil.filecache):
94 class _basefilecache(scmutil.filecache):
95 """All filecache usage on repo are done for logic that should be unfiltered
95 """All filecache usage on repo are done for logic that should be unfiltered
96 """
96 """
97
97
98 def __get__(self, repo, type=None):
98 def __get__(self, repo, type=None):
99 if repo is None:
99 if repo is None:
100 return self
100 return self
101 # proxy to unfiltered __dict__ since filtered repo has no entry
101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 unfi = repo.unfiltered()
102 unfi = repo.unfiltered()
103 try:
103 try:
104 return unfi.__dict__[self.sname]
104 return unfi.__dict__[self.sname]
105 except KeyError:
105 except KeyError:
106 pass
106 pass
107 return super(_basefilecache, self).__get__(unfi, type)
107 return super(_basefilecache, self).__get__(unfi, type)
108
108
109 def set(self, repo, value):
109 def set(self, repo, value):
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111
111
112
112
113 class repofilecache(_basefilecache):
113 class repofilecache(_basefilecache):
114 """filecache for files in .hg but outside of .hg/store"""
114 """filecache for files in .hg but outside of .hg/store"""
115
115
116 def __init__(self, *paths):
116 def __init__(self, *paths):
117 super(repofilecache, self).__init__(*paths)
117 super(repofilecache, self).__init__(*paths)
118 for path in paths:
118 for path in paths:
119 _cachedfiles.add((path, b'plain'))
119 _cachedfiles.add((path, b'plain'))
120
120
121 def join(self, obj, fname):
121 def join(self, obj, fname):
122 return obj.vfs.join(fname)
122 return obj.vfs.join(fname)
123
123
124
124
125 class storecache(_basefilecache):
125 class storecache(_basefilecache):
126 """filecache for files in the store"""
126 """filecache for files in the store"""
127
127
128 def __init__(self, *paths):
128 def __init__(self, *paths):
129 super(storecache, self).__init__(*paths)
129 super(storecache, self).__init__(*paths)
130 for path in paths:
130 for path in paths:
131 _cachedfiles.add((path, b''))
131 _cachedfiles.add((path, b''))
132
132
133 def join(self, obj, fname):
133 def join(self, obj, fname):
134 return obj.sjoin(fname)
134 return obj.sjoin(fname)
135
135
136
136
137 class mixedrepostorecache(_basefilecache):
137 class mixedrepostorecache(_basefilecache):
138 """filecache for a mix files in .hg/store and outside"""
138 """filecache for a mix files in .hg/store and outside"""
139
139
140 def __init__(self, *pathsandlocations):
140 def __init__(self, *pathsandlocations):
141 # scmutil.filecache only uses the path for passing back into our
141 # scmutil.filecache only uses the path for passing back into our
142 # join(), so we can safely pass a list of paths and locations
142 # join(), so we can safely pass a list of paths and locations
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
145
145
146 def join(self, obj, fnameandlocation):
146 def join(self, obj, fnameandlocation):
147 fname, location = fnameandlocation
147 fname, location = fnameandlocation
148 if location == b'plain':
148 if location == b'plain':
149 return obj.vfs.join(fname)
149 return obj.vfs.join(fname)
150 else:
150 else:
151 if location != b'':
151 if location != b'':
152 raise error.ProgrammingError(
152 raise error.ProgrammingError(
153 b'unexpected location: %s' % location
153 b'unexpected location: %s' % location
154 )
154 )
155 return obj.sjoin(fname)
155 return obj.sjoin(fname)
156
156
157
157
158 def isfilecached(repo, name):
158 def isfilecached(repo, name):
159 """check if a repo has already cached "name" filecache-ed property
159 """check if a repo has already cached "name" filecache-ed property
160
160
161 This returns (cachedobj-or-None, iscached) tuple.
161 This returns (cachedobj-or-None, iscached) tuple.
162 """
162 """
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 if not cacheentry:
164 if not cacheentry:
165 return None, False
165 return None, False
166 return cacheentry.obj, True
166 return cacheentry.obj, True
167
167
168
168
169 class unfilteredpropertycache(util.propertycache):
169 class unfilteredpropertycache(util.propertycache):
170 """propertycache that apply to unfiltered repo only"""
170 """propertycache that apply to unfiltered repo only"""
171
171
172 def __get__(self, repo, type=None):
172 def __get__(self, repo, type=None):
173 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
174 if unfi is repo:
174 if unfi is repo:
175 return super(unfilteredpropertycache, self).__get__(unfi)
175 return super(unfilteredpropertycache, self).__get__(unfi)
176 return getattr(unfi, self.name)
176 return getattr(unfi, self.name)
177
177
178
178
179 class filteredpropertycache(util.propertycache):
179 class filteredpropertycache(util.propertycache):
180 """propertycache that must take filtering in account"""
180 """propertycache that must take filtering in account"""
181
181
182 def cachevalue(self, obj, value):
182 def cachevalue(self, obj, value):
183 object.__setattr__(obj, self.name, value)
183 object.__setattr__(obj, self.name, value)
184
184
185
185
186 def hasunfilteredcache(repo, name):
186 def hasunfilteredcache(repo, name):
187 """check if a repo has an unfilteredpropertycache value for <name>"""
187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 return name in vars(repo.unfiltered())
188 return name in vars(repo.unfiltered())
189
189
190
190
191 def unfilteredmethod(orig):
191 def unfilteredmethod(orig):
192 """decorate method that always need to be run on unfiltered version"""
192 """decorate method that always need to be run on unfiltered version"""
193
193
194 def wrapper(repo, *args, **kwargs):
194 def wrapper(repo, *args, **kwargs):
195 return orig(repo.unfiltered(), *args, **kwargs)
195 return orig(repo.unfiltered(), *args, **kwargs)
196
196
197 return wrapper
197 return wrapper
198
198
199
199
200 moderncaps = {
200 moderncaps = {
201 b'lookup',
201 b'lookup',
202 b'branchmap',
202 b'branchmap',
203 b'pushkey',
203 b'pushkey',
204 b'known',
204 b'known',
205 b'getbundle',
205 b'getbundle',
206 b'unbundle',
206 b'unbundle',
207 }
207 }
208 legacycaps = moderncaps.union({b'changegroupsubset'})
208 legacycaps = moderncaps.union({b'changegroupsubset'})
209
209
210
210
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 class localcommandexecutor(object):
212 class localcommandexecutor(object):
213 def __init__(self, peer):
213 def __init__(self, peer):
214 self._peer = peer
214 self._peer = peer
215 self._sent = False
215 self._sent = False
216 self._closed = False
216 self._closed = False
217
217
218 def __enter__(self):
218 def __enter__(self):
219 return self
219 return self
220
220
221 def __exit__(self, exctype, excvalue, exctb):
221 def __exit__(self, exctype, excvalue, exctb):
222 self.close()
222 self.close()
223
223
224 def callcommand(self, command, args):
224 def callcommand(self, command, args):
225 if self._sent:
225 if self._sent:
226 raise error.ProgrammingError(
226 raise error.ProgrammingError(
227 b'callcommand() cannot be used after sendcommands()'
227 b'callcommand() cannot be used after sendcommands()'
228 )
228 )
229
229
230 if self._closed:
230 if self._closed:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after close()'
232 b'callcommand() cannot be used after close()'
233 )
233 )
234
234
235 # We don't need to support anything fancy. Just call the named
235 # We don't need to support anything fancy. Just call the named
236 # method on the peer and return a resolved future.
236 # method on the peer and return a resolved future.
237 fn = getattr(self._peer, pycompat.sysstr(command))
237 fn = getattr(self._peer, pycompat.sysstr(command))
238
238
239 f = pycompat.futures.Future()
239 f = pycompat.futures.Future()
240
240
241 try:
241 try:
242 result = fn(**pycompat.strkwargs(args))
242 result = fn(**pycompat.strkwargs(args))
243 except Exception:
243 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 else:
245 else:
246 f.set_result(result)
246 f.set_result(result)
247
247
248 return f
248 return f
249
249
250 def sendcommands(self):
250 def sendcommands(self):
251 self._sent = True
251 self._sent = True
252
252
253 def close(self):
253 def close(self):
254 self._closed = True
254 self._closed = True
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommands)
257 @interfaceutil.implementer(repository.ipeercommands)
258 class localpeer(repository.peer):
258 class localpeer(repository.peer):
259 '''peer for a local repo; reflects only the most recent API'''
259 '''peer for a local repo; reflects only the most recent API'''
260
260
261 def __init__(self, repo, caps=None):
261 def __init__(self, repo, caps=None):
262 super(localpeer, self).__init__()
262 super(localpeer, self).__init__()
263
263
264 if caps is None:
264 if caps is None:
265 caps = moderncaps.copy()
265 caps = moderncaps.copy()
266 self._repo = repo.filtered(b'served')
266 self._repo = repo.filtered(b'served')
267 self.ui = repo.ui
267 self.ui = repo.ui
268 self._caps = repo._restrictcapabilities(caps)
268 self._caps = repo._restrictcapabilities(caps)
269
269
270 # Begin of _basepeer interface.
270 # Begin of _basepeer interface.
271
271
272 def url(self):
272 def url(self):
273 return self._repo.url()
273 return self._repo.url()
274
274
275 def local(self):
275 def local(self):
276 return self._repo
276 return self._repo
277
277
278 def peer(self):
278 def peer(self):
279 return self
279 return self
280
280
281 def canpush(self):
281 def canpush(self):
282 return True
282 return True
283
283
284 def close(self):
284 def close(self):
285 self._repo.close()
285 self._repo.close()
286
286
287 # End of _basepeer interface.
287 # End of _basepeer interface.
288
288
289 # Begin of _basewirecommands interface.
289 # Begin of _basewirecommands interface.
290
290
291 def branchmap(self):
291 def branchmap(self):
292 return self._repo.branchmap()
292 return self._repo.branchmap()
293
293
294 def capabilities(self):
294 def capabilities(self):
295 return self._caps
295 return self._caps
296
296
297 def clonebundles(self):
297 def clonebundles(self):
298 return self._repo.tryread(b'clonebundles.manifest')
298 return self._repo.tryread(b'clonebundles.manifest')
299
299
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 """Used to test argument passing over the wire"""
301 """Used to test argument passing over the wire"""
302 return b"%s %s %s %s %s" % (
302 return b"%s %s %s %s %s" % (
303 one,
303 one,
304 two,
304 two,
305 pycompat.bytestr(three),
305 pycompat.bytestr(three),
306 pycompat.bytestr(four),
306 pycompat.bytestr(four),
307 pycompat.bytestr(five),
307 pycompat.bytestr(five),
308 )
308 )
309
309
310 def getbundle(
310 def getbundle(
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 ):
312 ):
313 chunks = exchange.getbundlechunks(
313 chunks = exchange.getbundlechunks(
314 self._repo,
314 self._repo,
315 source,
315 source,
316 heads=heads,
316 heads=heads,
317 common=common,
317 common=common,
318 bundlecaps=bundlecaps,
318 bundlecaps=bundlecaps,
319 **kwargs
319 **kwargs
320 )[1]
320 )[1]
321 cb = util.chunkbuffer(chunks)
321 cb = util.chunkbuffer(chunks)
322
322
323 if exchange.bundle2requested(bundlecaps):
323 if exchange.bundle2requested(bundlecaps):
324 # When requesting a bundle2, getbundle returns a stream to make the
324 # When requesting a bundle2, getbundle returns a stream to make the
325 # wire level function happier. We need to build a proper object
325 # wire level function happier. We need to build a proper object
326 # from it in local peer.
326 # from it in local peer.
327 return bundle2.getunbundler(self.ui, cb)
327 return bundle2.getunbundler(self.ui, cb)
328 else:
328 else:
329 return changegroup.getunbundler(b'01', cb, None)
329 return changegroup.getunbundler(b'01', cb, None)
330
330
331 def heads(self):
331 def heads(self):
332 return self._repo.heads()
332 return self._repo.heads()
333
333
334 def known(self, nodes):
334 def known(self, nodes):
335 return self._repo.known(nodes)
335 return self._repo.known(nodes)
336
336
337 def listkeys(self, namespace):
337 def listkeys(self, namespace):
338 return self._repo.listkeys(namespace)
338 return self._repo.listkeys(namespace)
339
339
340 def lookup(self, key):
340 def lookup(self, key):
341 return self._repo.lookup(key)
341 return self._repo.lookup(key)
342
342
343 def pushkey(self, namespace, key, old, new):
343 def pushkey(self, namespace, key, old, new):
344 return self._repo.pushkey(namespace, key, old, new)
344 return self._repo.pushkey(namespace, key, old, new)
345
345
346 def stream_out(self):
346 def stream_out(self):
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348
348
349 def unbundle(self, bundle, heads, url):
349 def unbundle(self, bundle, heads, url):
350 """apply a bundle on a repo
350 """apply a bundle on a repo
351
351
352 This function handles the repo locking itself."""
352 This function handles the repo locking itself."""
353 try:
353 try:
354 try:
354 try:
355 bundle = exchange.readbundle(self.ui, bundle, None)
355 bundle = exchange.readbundle(self.ui, bundle, None)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 if util.safehasattr(ret, b'getchunks'):
357 if util.safehasattr(ret, b'getchunks'):
358 # This is a bundle20 object, turn it into an unbundler.
358 # This is a bundle20 object, turn it into an unbundler.
359 # This little dance should be dropped eventually when the
359 # This little dance should be dropped eventually when the
360 # API is finally improved.
360 # API is finally improved.
361 stream = util.chunkbuffer(ret.getchunks())
361 stream = util.chunkbuffer(ret.getchunks())
362 ret = bundle2.getunbundler(self.ui, stream)
362 ret = bundle2.getunbundler(self.ui, stream)
363 return ret
363 return ret
364 except Exception as exc:
364 except Exception as exc:
365 # If the exception contains output salvaged from a bundle2
365 # If the exception contains output salvaged from a bundle2
366 # reply, we need to make sure it is printed before continuing
366 # reply, we need to make sure it is printed before continuing
367 # to fail. So we build a bundle2 with such output and consume
367 # to fail. So we build a bundle2 with such output and consume
368 # it directly.
368 # it directly.
369 #
369 #
370 # This is not very elegant but allows a "simple" solution for
370 # This is not very elegant but allows a "simple" solution for
371 # issue4594
371 # issue4594
372 output = getattr(exc, '_bundle2salvagedoutput', ())
372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 if output:
373 if output:
374 bundler = bundle2.bundle20(self._repo.ui)
374 bundler = bundle2.bundle20(self._repo.ui)
375 for out in output:
375 for out in output:
376 bundler.addpart(out)
376 bundler.addpart(out)
377 stream = util.chunkbuffer(bundler.getchunks())
377 stream = util.chunkbuffer(bundler.getchunks())
378 b = bundle2.getunbundler(self.ui, stream)
378 b = bundle2.getunbundler(self.ui, stream)
379 bundle2.processbundle(self._repo, b)
379 bundle2.processbundle(self._repo, b)
380 raise
380 raise
381 except error.PushRaced as exc:
381 except error.PushRaced as exc:
382 raise error.ResponseError(
382 raise error.ResponseError(
383 _(b'push failed:'), stringutil.forcebytestr(exc)
383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 )
384 )
385
385
386 # End of _basewirecommands interface.
386 # End of _basewirecommands interface.
387
387
388 # Begin of peer interface.
388 # Begin of peer interface.
389
389
390 def commandexecutor(self):
390 def commandexecutor(self):
391 return localcommandexecutor(self)
391 return localcommandexecutor(self)
392
392
393 # End of peer interface.
393 # End of peer interface.
394
394
395
395
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 class locallegacypeer(localpeer):
397 class locallegacypeer(localpeer):
398 '''peer extension which implements legacy methods too; used for tests with
398 '''peer extension which implements legacy methods too; used for tests with
399 restricted capabilities'''
399 restricted capabilities'''
400
400
401 def __init__(self, repo):
401 def __init__(self, repo):
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403
403
404 # Begin of baselegacywirecommands interface.
404 # Begin of baselegacywirecommands interface.
405
405
406 def between(self, pairs):
406 def between(self, pairs):
407 return self._repo.between(pairs)
407 return self._repo.between(pairs)
408
408
409 def branches(self, nodes):
409 def branches(self, nodes):
410 return self._repo.branches(nodes)
410 return self._repo.branches(nodes)
411
411
412 def changegroup(self, nodes, source):
412 def changegroup(self, nodes, source):
413 outgoing = discovery.outgoing(
413 outgoing = discovery.outgoing(
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 )
415 )
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417
417
418 def changegroupsubset(self, bases, heads, source):
418 def changegroupsubset(self, bases, heads, source):
419 outgoing = discovery.outgoing(
419 outgoing = discovery.outgoing(
420 self._repo, missingroots=bases, missingheads=heads
420 self._repo, missingroots=bases, missingheads=heads
421 )
421 )
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423
423
424 # End of baselegacywirecommands interface.
424 # End of baselegacywirecommands interface.
425
425
426
426
427 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # clients.
428 # clients.
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430
430
431 # A repository with the sparserevlog feature will have delta chains that
431 # A repository with the sparserevlog feature will have delta chains that
432 # can spread over a larger span. Sparse reading cuts these large spans into
432 # can spread over a larger span. Sparse reading cuts these large spans into
433 # pieces, so that each piece isn't too big.
433 # pieces, so that each piece isn't too big.
434 # Without the sparserevlog capability, reading from the repository could use
434 # Without the sparserevlog capability, reading from the repository could use
435 # huge amounts of memory, because the whole span would be read at once,
435 # huge amounts of memory, because the whole span would be read at once,
436 # including all the intermediate revisions that aren't pertinent for the chain.
436 # including all the intermediate revisions that aren't pertinent for the chain.
437 # This is why once a repository has enabled sparse-read, it becomes required.
437 # This is why once a repository has enabled sparse-read, it becomes required.
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439
439
440 # A repository with the sidedataflag requirement will allow to store extra
440 # A repository with the sidedataflag requirement will allow to store extra
441 # information for revision without altering their original hashes.
441 # information for revision without altering their original hashes.
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443
443
444 # A repository with the the copies-sidedata-changeset requirement will store
444 # A repository with the the copies-sidedata-changeset requirement will store
445 # copies related information in changeset's sidedata.
445 # copies related information in changeset's sidedata.
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447
447
448 # Functions receiving (ui, features) that extensions can register to impact
448 # Functions receiving (ui, features) that extensions can register to impact
449 # the ability to load repositories with custom requirements. Only
449 # the ability to load repositories with custom requirements. Only
450 # functions defined in loaded extensions are called.
450 # functions defined in loaded extensions are called.
451 #
451 #
452 # The function receives a set of requirement strings that the repository
452 # The function receives a set of requirement strings that the repository
453 # is capable of opening. Functions will typically add elements to the
453 # is capable of opening. Functions will typically add elements to the
454 # set to reflect that the extension knows how to handle that requirements.
454 # set to reflect that the extension knows how to handle that requirements.
455 featuresetupfuncs = set()
455 featuresetupfuncs = set()
456
456
457
457
458 def makelocalrepository(baseui, path, intents=None):
458 def makelocalrepository(baseui, path, intents=None):
459 """Create a local repository object.
459 """Create a local repository object.
460
460
461 Given arguments needed to construct a local repository, this function
461 Given arguments needed to construct a local repository, this function
462 performs various early repository loading functionality (such as
462 performs various early repository loading functionality (such as
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 the repository can be opened, derives a type suitable for representing
464 the repository can be opened, derives a type suitable for representing
465 that repository, and returns an instance of it.
465 that repository, and returns an instance of it.
466
466
467 The returned object conforms to the ``repository.completelocalrepository``
467 The returned object conforms to the ``repository.completelocalrepository``
468 interface.
468 interface.
469
469
470 The repository type is derived by calling a series of factory functions
470 The repository type is derived by calling a series of factory functions
471 for each aspect/interface of the final repository. These are defined by
471 for each aspect/interface of the final repository. These are defined by
472 ``REPO_INTERFACES``.
472 ``REPO_INTERFACES``.
473
473
474 Each factory function is called to produce a type implementing a specific
474 Each factory function is called to produce a type implementing a specific
475 interface. The cumulative list of returned types will be combined into a
475 interface. The cumulative list of returned types will be combined into a
476 new type and that type will be instantiated to represent the local
476 new type and that type will be instantiated to represent the local
477 repository.
477 repository.
478
478
479 The factory functions each receive various state that may be consulted
479 The factory functions each receive various state that may be consulted
480 as part of deriving a type.
480 as part of deriving a type.
481
481
482 Extensions should wrap these factory functions to customize repository type
482 Extensions should wrap these factory functions to customize repository type
483 creation. Note that an extension's wrapped function may be called even if
483 creation. Note that an extension's wrapped function may be called even if
484 that extension is not loaded for the repo being constructed. Extensions
484 that extension is not loaded for the repo being constructed. Extensions
485 should check if their ``__name__`` appears in the
485 should check if their ``__name__`` appears in the
486 ``extensionmodulenames`` set passed to the factory function and no-op if
486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 not.
487 not.
488 """
488 """
489 ui = baseui.copy()
489 ui = baseui.copy()
490 # Prevent copying repo configuration.
490 # Prevent copying repo configuration.
491 ui.copy = baseui.copy
491 ui.copy = baseui.copy
492
492
493 # Working directory VFS rooted at repository root.
493 # Working directory VFS rooted at repository root.
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495
495
496 # Main VFS for .hg/ directory.
496 # Main VFS for .hg/ directory.
497 hgpath = wdirvfs.join(b'.hg')
497 hgpath = wdirvfs.join(b'.hg')
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499
499
500 # The .hg/ path should exist and should be a directory. All other
500 # The .hg/ path should exist and should be a directory. All other
501 # cases are errors.
501 # cases are errors.
502 if not hgvfs.isdir():
502 if not hgvfs.isdir():
503 try:
503 try:
504 hgvfs.stat()
504 hgvfs.stat()
505 except OSError as e:
505 except OSError as e:
506 if e.errno != errno.ENOENT:
506 if e.errno != errno.ENOENT:
507 raise
507 raise
508
508
509 raise error.RepoError(_(b'repository %s not found') % path)
509 raise error.RepoError(_(b'repository %s not found') % path)
510
510
511 # .hg/requires file contains a newline-delimited list of
511 # .hg/requires file contains a newline-delimited list of
512 # features/capabilities the opener (us) must have in order to use
512 # features/capabilities the opener (us) must have in order to use
513 # the repository. This file was introduced in Mercurial 0.9.2,
513 # the repository. This file was introduced in Mercurial 0.9.2,
514 # which means very old repositories may not have one. We assume
514 # which means very old repositories may not have one. We assume
515 # a missing file translates to no requirements.
515 # a missing file translates to no requirements.
516 try:
516 try:
517 requirements = set(hgvfs.read(b'requires').splitlines())
517 requirements = set(hgvfs.read(b'requires').splitlines())
518 except IOError as e:
518 except IOError as e:
519 if e.errno != errno.ENOENT:
519 if e.errno != errno.ENOENT:
520 raise
520 raise
521 requirements = set()
521 requirements = set()
522
522
523 # The .hg/hgrc file may load extensions or contain config options
523 # The .hg/hgrc file may load extensions or contain config options
524 # that influence repository construction. Attempt to load it and
524 # that influence repository construction. Attempt to load it and
525 # process any new extensions that it may have pulled in.
525 # process any new extensions that it may have pulled in.
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 extensions.loadall(ui)
528 extensions.loadall(ui)
529 extensions.populateui(ui)
529 extensions.populateui(ui)
530
530
531 # Set of module names of extensions loaded for this repository.
531 # Set of module names of extensions loaded for this repository.
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533
533
534 supportedrequirements = gathersupportedrequirements(ui)
534 supportedrequirements = gathersupportedrequirements(ui)
535
535
536 # We first validate the requirements are known.
536 # We first validate the requirements are known.
537 ensurerequirementsrecognized(requirements, supportedrequirements)
537 ensurerequirementsrecognized(requirements, supportedrequirements)
538
538
539 # Then we validate that the known set is reasonable to use together.
539 # Then we validate that the known set is reasonable to use together.
540 ensurerequirementscompatible(ui, requirements)
540 ensurerequirementscompatible(ui, requirements)
541
541
542 # TODO there are unhandled edge cases related to opening repositories with
542 # TODO there are unhandled edge cases related to opening repositories with
543 # shared storage. If storage is shared, we should also test for requirements
543 # shared storage. If storage is shared, we should also test for requirements
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 # that repo, as that repo may load extensions needed to open it. This is a
545 # that repo, as that repo may load extensions needed to open it. This is a
546 # bit complicated because we don't want the other hgrc to overwrite settings
546 # bit complicated because we don't want the other hgrc to overwrite settings
547 # in this hgrc.
547 # in this hgrc.
548 #
548 #
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 # file when sharing repos. But if a requirement is added after the share is
550 # file when sharing repos. But if a requirement is added after the share is
551 # performed, thereby introducing a new requirement for the opener, we may
551 # performed, thereby introducing a new requirement for the opener, we may
552 # will not see that and could encounter a run-time error interacting with
552 # will not see that and could encounter a run-time error interacting with
553 # that shared store since it has an unknown-to-us requirement.
553 # that shared store since it has an unknown-to-us requirement.
554
554
555 # At this point, we know we should be capable of opening the repository.
555 # At this point, we know we should be capable of opening the repository.
556 # Now get on with doing that.
556 # Now get on with doing that.
557
557
558 features = set()
558 features = set()
559
559
560 # The "store" part of the repository holds versioned data. How it is
560 # The "store" part of the repository holds versioned data. How it is
561 # accessed is determined by various requirements. The ``shared`` or
561 # accessed is determined by various requirements. The ``shared`` or
562 # ``relshared`` requirements indicate the store lives in the path contained
562 # ``relshared`` requirements indicate the store lives in the path contained
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 if b'shared' in requirements or b'relshared' in requirements:
565 if b'shared' in requirements or b'relshared' in requirements:
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 if b'relshared' in requirements:
567 if b'relshared' in requirements:
568 sharedpath = hgvfs.join(sharedpath)
568 sharedpath = hgvfs.join(sharedpath)
569
569
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571
571
572 if not sharedvfs.exists():
572 if not sharedvfs.exists():
573 raise error.RepoError(
573 raise error.RepoError(
574 _(b'.hg/sharedpath points to nonexistent directory %s')
574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 % sharedvfs.base
575 % sharedvfs.base
576 )
576 )
577
577
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579
579
580 storebasepath = sharedvfs.base
580 storebasepath = sharedvfs.base
581 cachepath = sharedvfs.join(b'cache')
581 cachepath = sharedvfs.join(b'cache')
582 else:
582 else:
583 storebasepath = hgvfs.base
583 storebasepath = hgvfs.base
584 cachepath = hgvfs.join(b'cache')
584 cachepath = hgvfs.join(b'cache')
585 wcachepath = hgvfs.join(b'wcache')
585 wcachepath = hgvfs.join(b'wcache')
586
586
587 # The store has changed over time and the exact layout is dictated by
587 # The store has changed over time and the exact layout is dictated by
588 # requirements. The store interface abstracts differences across all
588 # requirements. The store interface abstracts differences across all
589 # of them.
589 # of them.
590 store = makestore(
590 store = makestore(
591 requirements,
591 requirements,
592 storebasepath,
592 storebasepath,
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 )
594 )
595 hgvfs.createmode = store.createmode
595 hgvfs.createmode = store.createmode
596
596
597 storevfs = store.vfs
597 storevfs = store.vfs
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599
599
600 # The cache vfs is used to manage cache files.
600 # The cache vfs is used to manage cache files.
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 cachevfs.createmode = store.createmode
602 cachevfs.createmode = store.createmode
603 # The cache vfs is used to manage cache files related to the working copy
603 # The cache vfs is used to manage cache files related to the working copy
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 wcachevfs.createmode = store.createmode
605 wcachevfs.createmode = store.createmode
606
606
607 # Now resolve the type for the repository object. We do this by repeatedly
607 # Now resolve the type for the repository object. We do this by repeatedly
608 # calling a factory function to produces types for specific aspects of the
608 # calling a factory function to produces types for specific aspects of the
609 # repo's operation. The aggregate returned types are used as base classes
609 # repo's operation. The aggregate returned types are used as base classes
610 # for a dynamically-derived type, which will represent our new repository.
610 # for a dynamically-derived type, which will represent our new repository.
611
611
612 bases = []
612 bases = []
613 extrastate = {}
613 extrastate = {}
614
614
615 for iface, fn in REPO_INTERFACES:
615 for iface, fn in REPO_INTERFACES:
616 # We pass all potentially useful state to give extensions tons of
616 # We pass all potentially useful state to give extensions tons of
617 # flexibility.
617 # flexibility.
618 typ = fn()(
618 typ = fn()(
619 ui=ui,
619 ui=ui,
620 intents=intents,
620 intents=intents,
621 requirements=requirements,
621 requirements=requirements,
622 features=features,
622 features=features,
623 wdirvfs=wdirvfs,
623 wdirvfs=wdirvfs,
624 hgvfs=hgvfs,
624 hgvfs=hgvfs,
625 store=store,
625 store=store,
626 storevfs=storevfs,
626 storevfs=storevfs,
627 storeoptions=storevfs.options,
627 storeoptions=storevfs.options,
628 cachevfs=cachevfs,
628 cachevfs=cachevfs,
629 wcachevfs=wcachevfs,
629 wcachevfs=wcachevfs,
630 extensionmodulenames=extensionmodulenames,
630 extensionmodulenames=extensionmodulenames,
631 extrastate=extrastate,
631 extrastate=extrastate,
632 baseclasses=bases,
632 baseclasses=bases,
633 )
633 )
634
634
635 if not isinstance(typ, type):
635 if not isinstance(typ, type):
636 raise error.ProgrammingError(
636 raise error.ProgrammingError(
637 b'unable to construct type for %s' % iface
637 b'unable to construct type for %s' % iface
638 )
638 )
639
639
640 bases.append(typ)
640 bases.append(typ)
641
641
642 # type() allows you to use characters in type names that wouldn't be
642 # type() allows you to use characters in type names that wouldn't be
643 # recognized as Python symbols in source code. We abuse that to add
643 # recognized as Python symbols in source code. We abuse that to add
644 # rich information about our constructed repo.
644 # rich information about our constructed repo.
645 name = pycompat.sysstr(
645 name = pycompat.sysstr(
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 )
647 )
648
648
649 cls = type(name, tuple(bases), {})
649 cls = type(name, tuple(bases), {})
650
650
651 return cls(
651 return cls(
652 baseui=baseui,
652 baseui=baseui,
653 ui=ui,
653 ui=ui,
654 origroot=path,
654 origroot=path,
655 wdirvfs=wdirvfs,
655 wdirvfs=wdirvfs,
656 hgvfs=hgvfs,
656 hgvfs=hgvfs,
657 requirements=requirements,
657 requirements=requirements,
658 supportedrequirements=supportedrequirements,
658 supportedrequirements=supportedrequirements,
659 sharedpath=storebasepath,
659 sharedpath=storebasepath,
660 store=store,
660 store=store,
661 cachevfs=cachevfs,
661 cachevfs=cachevfs,
662 wcachevfs=wcachevfs,
662 wcachevfs=wcachevfs,
663 features=features,
663 features=features,
664 intents=intents,
664 intents=intents,
665 )
665 )
666
666
667
667
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 """Load hgrc files/content into a ui instance.
669 """Load hgrc files/content into a ui instance.
670
670
671 This is called during repository opening to load any additional
671 This is called during repository opening to load any additional
672 config files or settings relevant to the current repository.
672 config files or settings relevant to the current repository.
673
673
674 Returns a bool indicating whether any additional configs were loaded.
674 Returns a bool indicating whether any additional configs were loaded.
675
675
676 Extensions should monkeypatch this function to modify how per-repo
676 Extensions should monkeypatch this function to modify how per-repo
677 configs are loaded. For example, an extension may wish to pull in
677 configs are loaded. For example, an extension may wish to pull in
678 configs from alternate files or sources.
678 configs from alternate files or sources.
679 """
679 """
680 if not rcutil.use_repo_hgrc():
680 if not rcutil.use_repo_hgrc():
681 return False
681 return False
682 try:
682 try:
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 return True
684 return True
685 except IOError:
685 except IOError:
686 return False
686 return False
687
687
688
688
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 """Perform additional actions after .hg/hgrc is loaded.
690 """Perform additional actions after .hg/hgrc is loaded.
691
691
692 This function is called during repository loading immediately after
692 This function is called during repository loading immediately after
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694
694
695 The function can be used to validate configs, automatically add
695 The function can be used to validate configs, automatically add
696 options (including extensions) based on requirements, etc.
696 options (including extensions) based on requirements, etc.
697 """
697 """
698
698
699 # Map of requirements to list of extensions to load automatically when
699 # Map of requirements to list of extensions to load automatically when
700 # requirement is present.
700 # requirement is present.
701 autoextensions = {
701 autoextensions = {
702 b'largefiles': [b'largefiles'],
702 b'largefiles': [b'largefiles'],
703 b'lfs': [b'lfs'],
703 b'lfs': [b'lfs'],
704 }
704 }
705
705
706 for requirement, names in sorted(autoextensions.items()):
706 for requirement, names in sorted(autoextensions.items()):
707 if requirement not in requirements:
707 if requirement not in requirements:
708 continue
708 continue
709
709
710 for name in names:
710 for name in names:
711 if not ui.hasconfig(b'extensions', name):
711 if not ui.hasconfig(b'extensions', name):
712 ui.setconfig(b'extensions', name, b'', source=b'autoload')
712 ui.setconfig(b'extensions', name, b'', source=b'autoload')
713
713
714
714
715 def gathersupportedrequirements(ui):
715 def gathersupportedrequirements(ui):
716 """Determine the complete set of recognized requirements."""
716 """Determine the complete set of recognized requirements."""
717 # Start with all requirements supported by this file.
717 # Start with all requirements supported by this file.
718 supported = set(localrepository._basesupported)
718 supported = set(localrepository._basesupported)
719
719
720 # Execute ``featuresetupfuncs`` entries if they belong to an extension
720 # Execute ``featuresetupfuncs`` entries if they belong to an extension
721 # relevant to this ui instance.
721 # relevant to this ui instance.
722 modules = {m.__name__ for n, m in extensions.extensions(ui)}
722 modules = {m.__name__ for n, m in extensions.extensions(ui)}
723
723
724 for fn in featuresetupfuncs:
724 for fn in featuresetupfuncs:
725 if fn.__module__ in modules:
725 if fn.__module__ in modules:
726 fn(ui, supported)
726 fn(ui, supported)
727
727
728 # Add derived requirements from registered compression engines.
728 # Add derived requirements from registered compression engines.
729 for name in util.compengines:
729 for name in util.compengines:
730 engine = util.compengines[name]
730 engine = util.compengines[name]
731 if engine.available() and engine.revlogheader():
731 if engine.available() and engine.revlogheader():
732 supported.add(b'exp-compression-%s' % name)
732 supported.add(b'exp-compression-%s' % name)
733 if engine.name() == b'zstd':
733 if engine.name() == b'zstd':
734 supported.add(b'revlog-compression-zstd')
734 supported.add(b'revlog-compression-zstd')
735
735
736 return supported
736 return supported
737
737
738
738
739 def ensurerequirementsrecognized(requirements, supported):
739 def ensurerequirementsrecognized(requirements, supported):
740 """Validate that a set of local requirements is recognized.
740 """Validate that a set of local requirements is recognized.
741
741
742 Receives a set of requirements. Raises an ``error.RepoError`` if there
742 Receives a set of requirements. Raises an ``error.RepoError`` if there
743 exists any requirement in that set that currently loaded code doesn't
743 exists any requirement in that set that currently loaded code doesn't
744 recognize.
744 recognize.
745
745
746 Returns a set of supported requirements.
746 Returns a set of supported requirements.
747 """
747 """
748 missing = set()
748 missing = set()
749
749
750 for requirement in requirements:
750 for requirement in requirements:
751 if requirement in supported:
751 if requirement in supported:
752 continue
752 continue
753
753
754 if not requirement or not requirement[0:1].isalnum():
754 if not requirement or not requirement[0:1].isalnum():
755 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
755 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
756
756
757 missing.add(requirement)
757 missing.add(requirement)
758
758
759 if missing:
759 if missing:
760 raise error.RequirementError(
760 raise error.RequirementError(
761 _(b'repository requires features unknown to this Mercurial: %s')
761 _(b'repository requires features unknown to this Mercurial: %s')
762 % b' '.join(sorted(missing)),
762 % b' '.join(sorted(missing)),
763 hint=_(
763 hint=_(
764 b'see https://mercurial-scm.org/wiki/MissingRequirement '
764 b'see https://mercurial-scm.org/wiki/MissingRequirement '
765 b'for more information'
765 b'for more information'
766 ),
766 ),
767 )
767 )
768
768
769
769
770 def ensurerequirementscompatible(ui, requirements):
770 def ensurerequirementscompatible(ui, requirements):
771 """Validates that a set of recognized requirements is mutually compatible.
771 """Validates that a set of recognized requirements is mutually compatible.
772
772
773 Some requirements may not be compatible with others or require
773 Some requirements may not be compatible with others or require
774 config options that aren't enabled. This function is called during
774 config options that aren't enabled. This function is called during
775 repository opening to ensure that the set of requirements needed
775 repository opening to ensure that the set of requirements needed
776 to open a repository is sane and compatible with config options.
776 to open a repository is sane and compatible with config options.
777
777
778 Extensions can monkeypatch this function to perform additional
778 Extensions can monkeypatch this function to perform additional
779 checking.
779 checking.
780
780
781 ``error.RepoError`` should be raised on failure.
781 ``error.RepoError`` should be raised on failure.
782 """
782 """
783 if b'exp-sparse' in requirements and not sparse.enabled:
783 if b'exp-sparse' in requirements and not sparse.enabled:
784 raise error.RepoError(
784 raise error.RepoError(
785 _(
785 _(
786 b'repository is using sparse feature but '
786 b'repository is using sparse feature but '
787 b'sparse is not enabled; enable the '
787 b'sparse is not enabled; enable the '
788 b'"sparse" extensions to access'
788 b'"sparse" extensions to access'
789 )
789 )
790 )
790 )
791
791
792
792
793 def makestore(requirements, path, vfstype):
793 def makestore(requirements, path, vfstype):
794 """Construct a storage object for a repository."""
794 """Construct a storage object for a repository."""
795 if b'store' in requirements:
795 if b'store' in requirements:
796 if b'fncache' in requirements:
796 if b'fncache' in requirements:
797 return storemod.fncachestore(
797 return storemod.fncachestore(
798 path, vfstype, b'dotencode' in requirements
798 path, vfstype, b'dotencode' in requirements
799 )
799 )
800
800
801 return storemod.encodedstore(path, vfstype)
801 return storemod.encodedstore(path, vfstype)
802
802
803 return storemod.basicstore(path, vfstype)
803 return storemod.basicstore(path, vfstype)
804
804
805
805
806 def resolvestorevfsoptions(ui, requirements, features):
806 def resolvestorevfsoptions(ui, requirements, features):
807 """Resolve the options to pass to the store vfs opener.
807 """Resolve the options to pass to the store vfs opener.
808
808
809 The returned dict is used to influence behavior of the storage layer.
809 The returned dict is used to influence behavior of the storage layer.
810 """
810 """
811 options = {}
811 options = {}
812
812
813 if b'treemanifest' in requirements:
813 if b'treemanifest' in requirements:
814 options[b'treemanifest'] = True
814 options[b'treemanifest'] = True
815
815
816 # experimental config: format.manifestcachesize
816 # experimental config: format.manifestcachesize
817 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
817 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
818 if manifestcachesize is not None:
818 if manifestcachesize is not None:
819 options[b'manifestcachesize'] = manifestcachesize
819 options[b'manifestcachesize'] = manifestcachesize
820
820
821 # In the absence of another requirement superseding a revlog-related
821 # In the absence of another requirement superseding a revlog-related
822 # requirement, we have to assume the repo is using revlog version 0.
822 # requirement, we have to assume the repo is using revlog version 0.
823 # This revlog format is super old and we don't bother trying to parse
823 # This revlog format is super old and we don't bother trying to parse
824 # opener options for it because those options wouldn't do anything
824 # opener options for it because those options wouldn't do anything
825 # meaningful on such old repos.
825 # meaningful on such old repos.
826 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
826 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
827 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
827 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
828 else: # explicitly mark repo as using revlogv0
828 else: # explicitly mark repo as using revlogv0
829 options[b'revlogv0'] = True
829 options[b'revlogv0'] = True
830
830
831 if COPIESSDC_REQUIREMENT in requirements:
831 if COPIESSDC_REQUIREMENT in requirements:
832 options[b'copies-storage'] = b'changeset-sidedata'
832 options[b'copies-storage'] = b'changeset-sidedata'
833 else:
833 else:
834 writecopiesto = ui.config(b'experimental', b'copies.write-to')
834 writecopiesto = ui.config(b'experimental', b'copies.write-to')
835 copiesextramode = (b'changeset-only', b'compatibility')
835 copiesextramode = (b'changeset-only', b'compatibility')
836 if writecopiesto in copiesextramode:
836 if writecopiesto in copiesextramode:
837 options[b'copies-storage'] = b'extra'
837 options[b'copies-storage'] = b'extra'
838
838
839 return options
839 return options
840
840
841
841
842 def resolverevlogstorevfsoptions(ui, requirements, features):
842 def resolverevlogstorevfsoptions(ui, requirements, features):
843 """Resolve opener options specific to revlogs."""
843 """Resolve opener options specific to revlogs."""
844
844
845 options = {}
845 options = {}
846 options[b'flagprocessors'] = {}
846 options[b'flagprocessors'] = {}
847
847
848 if b'revlogv1' in requirements:
848 if b'revlogv1' in requirements:
849 options[b'revlogv1'] = True
849 options[b'revlogv1'] = True
850 if REVLOGV2_REQUIREMENT in requirements:
850 if REVLOGV2_REQUIREMENT in requirements:
851 options[b'revlogv2'] = True
851 options[b'revlogv2'] = True
852
852
853 if b'generaldelta' in requirements:
853 if b'generaldelta' in requirements:
854 options[b'generaldelta'] = True
854 options[b'generaldelta'] = True
855
855
856 # experimental config: format.chunkcachesize
856 # experimental config: format.chunkcachesize
857 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
857 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
858 if chunkcachesize is not None:
858 if chunkcachesize is not None:
859 options[b'chunkcachesize'] = chunkcachesize
859 options[b'chunkcachesize'] = chunkcachesize
860
860
861 deltabothparents = ui.configbool(
861 deltabothparents = ui.configbool(
862 b'storage', b'revlog.optimize-delta-parent-choice'
862 b'storage', b'revlog.optimize-delta-parent-choice'
863 )
863 )
864 options[b'deltabothparents'] = deltabothparents
864 options[b'deltabothparents'] = deltabothparents
865
865
866 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
866 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
867 lazydeltabase = False
867 lazydeltabase = False
868 if lazydelta:
868 if lazydelta:
869 lazydeltabase = ui.configbool(
869 lazydeltabase = ui.configbool(
870 b'storage', b'revlog.reuse-external-delta-parent'
870 b'storage', b'revlog.reuse-external-delta-parent'
871 )
871 )
872 if lazydeltabase is None:
872 if lazydeltabase is None:
873 lazydeltabase = not scmutil.gddeltaconfig(ui)
873 lazydeltabase = not scmutil.gddeltaconfig(ui)
874 options[b'lazydelta'] = lazydelta
874 options[b'lazydelta'] = lazydelta
875 options[b'lazydeltabase'] = lazydeltabase
875 options[b'lazydeltabase'] = lazydeltabase
876
876
877 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
877 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
878 if 0 <= chainspan:
878 if 0 <= chainspan:
879 options[b'maxdeltachainspan'] = chainspan
879 options[b'maxdeltachainspan'] = chainspan
880
880
881 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
881 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
882 if mmapindexthreshold is not None:
882 if mmapindexthreshold is not None:
883 options[b'mmapindexthreshold'] = mmapindexthreshold
883 options[b'mmapindexthreshold'] = mmapindexthreshold
884
884
885 withsparseread = ui.configbool(b'experimental', b'sparse-read')
885 withsparseread = ui.configbool(b'experimental', b'sparse-read')
886 srdensitythres = float(
886 srdensitythres = float(
887 ui.config(b'experimental', b'sparse-read.density-threshold')
887 ui.config(b'experimental', b'sparse-read.density-threshold')
888 )
888 )
889 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
889 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
890 options[b'with-sparse-read'] = withsparseread
890 options[b'with-sparse-read'] = withsparseread
891 options[b'sparse-read-density-threshold'] = srdensitythres
891 options[b'sparse-read-density-threshold'] = srdensitythres
892 options[b'sparse-read-min-gap-size'] = srmingapsize
892 options[b'sparse-read-min-gap-size'] = srmingapsize
893
893
894 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
894 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
895 options[b'sparse-revlog'] = sparserevlog
895 options[b'sparse-revlog'] = sparserevlog
896 if sparserevlog:
896 if sparserevlog:
897 options[b'generaldelta'] = True
897 options[b'generaldelta'] = True
898
898
899 sidedata = SIDEDATA_REQUIREMENT in requirements
899 sidedata = SIDEDATA_REQUIREMENT in requirements
900 options[b'side-data'] = sidedata
900 options[b'side-data'] = sidedata
901
901
902 maxchainlen = None
902 maxchainlen = None
903 if sparserevlog:
903 if sparserevlog:
904 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
904 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
905 # experimental config: format.maxchainlen
905 # experimental config: format.maxchainlen
906 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
906 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
907 if maxchainlen is not None:
907 if maxchainlen is not None:
908 options[b'maxchainlen'] = maxchainlen
908 options[b'maxchainlen'] = maxchainlen
909
909
910 for r in requirements:
910 for r in requirements:
911 # we allow multiple compression engine requirement to co-exist because
911 # we allow multiple compression engine requirement to co-exist because
912 # strickly speaking, revlog seems to support mixed compression style.
912 # strickly speaking, revlog seems to support mixed compression style.
913 #
913 #
914 # The compression used for new entries will be "the last one"
914 # The compression used for new entries will be "the last one"
915 prefix = r.startswith
915 prefix = r.startswith
916 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
916 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
917 options[b'compengine'] = r.split(b'-', 2)[2]
917 options[b'compengine'] = r.split(b'-', 2)[2]
918
918
919 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
919 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
920 if options[b'zlib.level'] is not None:
920 if options[b'zlib.level'] is not None:
921 if not (0 <= options[b'zlib.level'] <= 9):
921 if not (0 <= options[b'zlib.level'] <= 9):
922 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
922 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
923 raise error.Abort(msg % options[b'zlib.level'])
923 raise error.Abort(msg % options[b'zlib.level'])
924 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
924 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
925 if options[b'zstd.level'] is not None:
925 if options[b'zstd.level'] is not None:
926 if not (0 <= options[b'zstd.level'] <= 22):
926 if not (0 <= options[b'zstd.level'] <= 22):
927 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
927 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
928 raise error.Abort(msg % options[b'zstd.level'])
928 raise error.Abort(msg % options[b'zstd.level'])
929
929
930 if repository.NARROW_REQUIREMENT in requirements:
930 if repository.NARROW_REQUIREMENT in requirements:
931 options[b'enableellipsis'] = True
931 options[b'enableellipsis'] = True
932
932
933 if ui.configbool(b'experimental', b'rust.index'):
933 if ui.configbool(b'experimental', b'rust.index'):
934 options[b'rust.index'] = True
934 options[b'rust.index'] = True
935 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
935 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
936 options[b'exp-persistent-nodemap'] = True
936 options[b'exp-persistent-nodemap'] = True
937 if ui.configbool(b'devel', b'persistent-nodemap'):
938 options[b'devel-force-nodemap'] = True
937
939
938 return options
940 return options
939
941
940
942
941 def makemain(**kwargs):
943 def makemain(**kwargs):
942 """Produce a type conforming to ``ilocalrepositorymain``."""
944 """Produce a type conforming to ``ilocalrepositorymain``."""
943 return localrepository
945 return localrepository
944
946
945
947
946 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
948 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
947 class revlogfilestorage(object):
949 class revlogfilestorage(object):
948 """File storage when using revlogs."""
950 """File storage when using revlogs."""
949
951
950 def file(self, path):
952 def file(self, path):
951 if path[0] == b'/':
953 if path[0] == b'/':
952 path = path[1:]
954 path = path[1:]
953
955
954 return filelog.filelog(self.svfs, path)
956 return filelog.filelog(self.svfs, path)
955
957
956
958
957 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
959 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
958 class revlognarrowfilestorage(object):
960 class revlognarrowfilestorage(object):
959 """File storage when using revlogs and narrow files."""
961 """File storage when using revlogs and narrow files."""
960
962
961 def file(self, path):
963 def file(self, path):
962 if path[0] == b'/':
964 if path[0] == b'/':
963 path = path[1:]
965 path = path[1:]
964
966
965 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
967 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
966
968
967
969
968 def makefilestorage(requirements, features, **kwargs):
970 def makefilestorage(requirements, features, **kwargs):
969 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
971 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
970 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
972 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
971 features.add(repository.REPO_FEATURE_STREAM_CLONE)
973 features.add(repository.REPO_FEATURE_STREAM_CLONE)
972
974
973 if repository.NARROW_REQUIREMENT in requirements:
975 if repository.NARROW_REQUIREMENT in requirements:
974 return revlognarrowfilestorage
976 return revlognarrowfilestorage
975 else:
977 else:
976 return revlogfilestorage
978 return revlogfilestorage
977
979
978
980
979 # List of repository interfaces and factory functions for them. Each
981 # List of repository interfaces and factory functions for them. Each
980 # will be called in order during ``makelocalrepository()`` to iteratively
982 # will be called in order during ``makelocalrepository()`` to iteratively
981 # derive the final type for a local repository instance. We capture the
983 # derive the final type for a local repository instance. We capture the
982 # function as a lambda so we don't hold a reference and the module-level
984 # function as a lambda so we don't hold a reference and the module-level
983 # functions can be wrapped.
985 # functions can be wrapped.
984 REPO_INTERFACES = [
986 REPO_INTERFACES = [
985 (repository.ilocalrepositorymain, lambda: makemain),
987 (repository.ilocalrepositorymain, lambda: makemain),
986 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
988 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
987 ]
989 ]
988
990
989
991
990 @interfaceutil.implementer(repository.ilocalrepositorymain)
992 @interfaceutil.implementer(repository.ilocalrepositorymain)
991 class localrepository(object):
993 class localrepository(object):
992 """Main class for representing local repositories.
994 """Main class for representing local repositories.
993
995
994 All local repositories are instances of this class.
996 All local repositories are instances of this class.
995
997
996 Constructed on its own, instances of this class are not usable as
998 Constructed on its own, instances of this class are not usable as
997 repository objects. To obtain a usable repository object, call
999 repository objects. To obtain a usable repository object, call
998 ``hg.repository()``, ``localrepo.instance()``, or
1000 ``hg.repository()``, ``localrepo.instance()``, or
999 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1001 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1000 ``instance()`` adds support for creating new repositories.
1002 ``instance()`` adds support for creating new repositories.
1001 ``hg.repository()`` adds more extension integration, including calling
1003 ``hg.repository()`` adds more extension integration, including calling
1002 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1004 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1003 used.
1005 used.
1004 """
1006 """
1005
1007
1006 # obsolete experimental requirements:
1008 # obsolete experimental requirements:
1007 # - manifestv2: An experimental new manifest format that allowed
1009 # - manifestv2: An experimental new manifest format that allowed
1008 # for stem compression of long paths. Experiment ended up not
1010 # for stem compression of long paths. Experiment ended up not
1009 # being successful (repository sizes went up due to worse delta
1011 # being successful (repository sizes went up due to worse delta
1010 # chains), and the code was deleted in 4.6.
1012 # chains), and the code was deleted in 4.6.
1011 supportedformats = {
1013 supportedformats = {
1012 b'revlogv1',
1014 b'revlogv1',
1013 b'generaldelta',
1015 b'generaldelta',
1014 b'treemanifest',
1016 b'treemanifest',
1015 COPIESSDC_REQUIREMENT,
1017 COPIESSDC_REQUIREMENT,
1016 REVLOGV2_REQUIREMENT,
1018 REVLOGV2_REQUIREMENT,
1017 SIDEDATA_REQUIREMENT,
1019 SIDEDATA_REQUIREMENT,
1018 SPARSEREVLOG_REQUIREMENT,
1020 SPARSEREVLOG_REQUIREMENT,
1019 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1021 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1020 }
1022 }
1021 _basesupported = supportedformats | {
1023 _basesupported = supportedformats | {
1022 b'store',
1024 b'store',
1023 b'fncache',
1025 b'fncache',
1024 b'shared',
1026 b'shared',
1025 b'relshared',
1027 b'relshared',
1026 b'dotencode',
1028 b'dotencode',
1027 b'exp-sparse',
1029 b'exp-sparse',
1028 b'internal-phase',
1030 b'internal-phase',
1029 }
1031 }
1030
1032
1031 # list of prefix for file which can be written without 'wlock'
1033 # list of prefix for file which can be written without 'wlock'
1032 # Extensions should extend this list when needed
1034 # Extensions should extend this list when needed
1033 _wlockfreeprefix = {
1035 _wlockfreeprefix = {
1034 # We migh consider requiring 'wlock' for the next
1036 # We migh consider requiring 'wlock' for the next
1035 # two, but pretty much all the existing code assume
1037 # two, but pretty much all the existing code assume
1036 # wlock is not needed so we keep them excluded for
1038 # wlock is not needed so we keep them excluded for
1037 # now.
1039 # now.
1038 b'hgrc',
1040 b'hgrc',
1039 b'requires',
1041 b'requires',
1040 # XXX cache is a complicatged business someone
1042 # XXX cache is a complicatged business someone
1041 # should investigate this in depth at some point
1043 # should investigate this in depth at some point
1042 b'cache/',
1044 b'cache/',
1043 # XXX shouldn't be dirstate covered by the wlock?
1045 # XXX shouldn't be dirstate covered by the wlock?
1044 b'dirstate',
1046 b'dirstate',
1045 # XXX bisect was still a bit too messy at the time
1047 # XXX bisect was still a bit too messy at the time
1046 # this changeset was introduced. Someone should fix
1048 # this changeset was introduced. Someone should fix
1047 # the remainig bit and drop this line
1049 # the remainig bit and drop this line
1048 b'bisect.state',
1050 b'bisect.state',
1049 }
1051 }
1050
1052
1051 def __init__(
1053 def __init__(
1052 self,
1054 self,
1053 baseui,
1055 baseui,
1054 ui,
1056 ui,
1055 origroot,
1057 origroot,
1056 wdirvfs,
1058 wdirvfs,
1057 hgvfs,
1059 hgvfs,
1058 requirements,
1060 requirements,
1059 supportedrequirements,
1061 supportedrequirements,
1060 sharedpath,
1062 sharedpath,
1061 store,
1063 store,
1062 cachevfs,
1064 cachevfs,
1063 wcachevfs,
1065 wcachevfs,
1064 features,
1066 features,
1065 intents=None,
1067 intents=None,
1066 ):
1068 ):
1067 """Create a new local repository instance.
1069 """Create a new local repository instance.
1068
1070
1069 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1071 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1070 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1072 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1071 object.
1073 object.
1072
1074
1073 Arguments:
1075 Arguments:
1074
1076
1075 baseui
1077 baseui
1076 ``ui.ui`` instance that ``ui`` argument was based off of.
1078 ``ui.ui`` instance that ``ui`` argument was based off of.
1077
1079
1078 ui
1080 ui
1079 ``ui.ui`` instance for use by the repository.
1081 ``ui.ui`` instance for use by the repository.
1080
1082
1081 origroot
1083 origroot
1082 ``bytes`` path to working directory root of this repository.
1084 ``bytes`` path to working directory root of this repository.
1083
1085
1084 wdirvfs
1086 wdirvfs
1085 ``vfs.vfs`` rooted at the working directory.
1087 ``vfs.vfs`` rooted at the working directory.
1086
1088
1087 hgvfs
1089 hgvfs
1088 ``vfs.vfs`` rooted at .hg/
1090 ``vfs.vfs`` rooted at .hg/
1089
1091
1090 requirements
1092 requirements
1091 ``set`` of bytestrings representing repository opening requirements.
1093 ``set`` of bytestrings representing repository opening requirements.
1092
1094
1093 supportedrequirements
1095 supportedrequirements
1094 ``set`` of bytestrings representing repository requirements that we
1096 ``set`` of bytestrings representing repository requirements that we
1095 know how to open. May be a supetset of ``requirements``.
1097 know how to open. May be a supetset of ``requirements``.
1096
1098
1097 sharedpath
1099 sharedpath
1098 ``bytes`` Defining path to storage base directory. Points to a
1100 ``bytes`` Defining path to storage base directory. Points to a
1099 ``.hg/`` directory somewhere.
1101 ``.hg/`` directory somewhere.
1100
1102
1101 store
1103 store
1102 ``store.basicstore`` (or derived) instance providing access to
1104 ``store.basicstore`` (or derived) instance providing access to
1103 versioned storage.
1105 versioned storage.
1104
1106
1105 cachevfs
1107 cachevfs
1106 ``vfs.vfs`` used for cache files.
1108 ``vfs.vfs`` used for cache files.
1107
1109
1108 wcachevfs
1110 wcachevfs
1109 ``vfs.vfs`` used for cache files related to the working copy.
1111 ``vfs.vfs`` used for cache files related to the working copy.
1110
1112
1111 features
1113 features
1112 ``set`` of bytestrings defining features/capabilities of this
1114 ``set`` of bytestrings defining features/capabilities of this
1113 instance.
1115 instance.
1114
1116
1115 intents
1117 intents
1116 ``set`` of system strings indicating what this repo will be used
1118 ``set`` of system strings indicating what this repo will be used
1117 for.
1119 for.
1118 """
1120 """
1119 self.baseui = baseui
1121 self.baseui = baseui
1120 self.ui = ui
1122 self.ui = ui
1121 self.origroot = origroot
1123 self.origroot = origroot
1122 # vfs rooted at working directory.
1124 # vfs rooted at working directory.
1123 self.wvfs = wdirvfs
1125 self.wvfs = wdirvfs
1124 self.root = wdirvfs.base
1126 self.root = wdirvfs.base
1125 # vfs rooted at .hg/. Used to access most non-store paths.
1127 # vfs rooted at .hg/. Used to access most non-store paths.
1126 self.vfs = hgvfs
1128 self.vfs = hgvfs
1127 self.path = hgvfs.base
1129 self.path = hgvfs.base
1128 self.requirements = requirements
1130 self.requirements = requirements
1129 self.supported = supportedrequirements
1131 self.supported = supportedrequirements
1130 self.sharedpath = sharedpath
1132 self.sharedpath = sharedpath
1131 self.store = store
1133 self.store = store
1132 self.cachevfs = cachevfs
1134 self.cachevfs = cachevfs
1133 self.wcachevfs = wcachevfs
1135 self.wcachevfs = wcachevfs
1134 self.features = features
1136 self.features = features
1135
1137
1136 self.filtername = None
1138 self.filtername = None
1137
1139
1138 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1140 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1139 b'devel', b'check-locks'
1141 b'devel', b'check-locks'
1140 ):
1142 ):
1141 self.vfs.audit = self._getvfsward(self.vfs.audit)
1143 self.vfs.audit = self._getvfsward(self.vfs.audit)
1142 # A list of callback to shape the phase if no data were found.
1144 # A list of callback to shape the phase if no data were found.
1143 # Callback are in the form: func(repo, roots) --> processed root.
1145 # Callback are in the form: func(repo, roots) --> processed root.
1144 # This list it to be filled by extension during repo setup
1146 # This list it to be filled by extension during repo setup
1145 self._phasedefaults = []
1147 self._phasedefaults = []
1146
1148
1147 color.setup(self.ui)
1149 color.setup(self.ui)
1148
1150
1149 self.spath = self.store.path
1151 self.spath = self.store.path
1150 self.svfs = self.store.vfs
1152 self.svfs = self.store.vfs
1151 self.sjoin = self.store.join
1153 self.sjoin = self.store.join
1152 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1154 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1153 b'devel', b'check-locks'
1155 b'devel', b'check-locks'
1154 ):
1156 ):
1155 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1157 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1156 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1158 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1157 else: # standard vfs
1159 else: # standard vfs
1158 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1160 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1159
1161
1160 self._dirstatevalidatewarned = False
1162 self._dirstatevalidatewarned = False
1161
1163
1162 self._branchcaches = branchmap.BranchMapCache()
1164 self._branchcaches = branchmap.BranchMapCache()
1163 self._revbranchcache = None
1165 self._revbranchcache = None
1164 self._filterpats = {}
1166 self._filterpats = {}
1165 self._datafilters = {}
1167 self._datafilters = {}
1166 self._transref = self._lockref = self._wlockref = None
1168 self._transref = self._lockref = self._wlockref = None
1167
1169
1168 # A cache for various files under .hg/ that tracks file changes,
1170 # A cache for various files under .hg/ that tracks file changes,
1169 # (used by the filecache decorator)
1171 # (used by the filecache decorator)
1170 #
1172 #
1171 # Maps a property name to its util.filecacheentry
1173 # Maps a property name to its util.filecacheentry
1172 self._filecache = {}
1174 self._filecache = {}
1173
1175
1174 # hold sets of revision to be filtered
1176 # hold sets of revision to be filtered
1175 # should be cleared when something might have changed the filter value:
1177 # should be cleared when something might have changed the filter value:
1176 # - new changesets,
1178 # - new changesets,
1177 # - phase change,
1179 # - phase change,
1178 # - new obsolescence marker,
1180 # - new obsolescence marker,
1179 # - working directory parent change,
1181 # - working directory parent change,
1180 # - bookmark changes
1182 # - bookmark changes
1181 self.filteredrevcache = {}
1183 self.filteredrevcache = {}
1182
1184
1183 # post-dirstate-status hooks
1185 # post-dirstate-status hooks
1184 self._postdsstatus = []
1186 self._postdsstatus = []
1185
1187
1186 # generic mapping between names and nodes
1188 # generic mapping between names and nodes
1187 self.names = namespaces.namespaces()
1189 self.names = namespaces.namespaces()
1188
1190
1189 # Key to signature value.
1191 # Key to signature value.
1190 self._sparsesignaturecache = {}
1192 self._sparsesignaturecache = {}
1191 # Signature to cached matcher instance.
1193 # Signature to cached matcher instance.
1192 self._sparsematchercache = {}
1194 self._sparsematchercache = {}
1193
1195
1194 self._extrafilterid = repoview.extrafilter(ui)
1196 self._extrafilterid = repoview.extrafilter(ui)
1195
1197
1196 self.filecopiesmode = None
1198 self.filecopiesmode = None
1197 if COPIESSDC_REQUIREMENT in self.requirements:
1199 if COPIESSDC_REQUIREMENT in self.requirements:
1198 self.filecopiesmode = b'changeset-sidedata'
1200 self.filecopiesmode = b'changeset-sidedata'
1199
1201
1200 def _getvfsward(self, origfunc):
1202 def _getvfsward(self, origfunc):
1201 """build a ward for self.vfs"""
1203 """build a ward for self.vfs"""
1202 rref = weakref.ref(self)
1204 rref = weakref.ref(self)
1203
1205
1204 def checkvfs(path, mode=None):
1206 def checkvfs(path, mode=None):
1205 ret = origfunc(path, mode=mode)
1207 ret = origfunc(path, mode=mode)
1206 repo = rref()
1208 repo = rref()
1207 if (
1209 if (
1208 repo is None
1210 repo is None
1209 or not util.safehasattr(repo, b'_wlockref')
1211 or not util.safehasattr(repo, b'_wlockref')
1210 or not util.safehasattr(repo, b'_lockref')
1212 or not util.safehasattr(repo, b'_lockref')
1211 ):
1213 ):
1212 return
1214 return
1213 if mode in (None, b'r', b'rb'):
1215 if mode in (None, b'r', b'rb'):
1214 return
1216 return
1215 if path.startswith(repo.path):
1217 if path.startswith(repo.path):
1216 # truncate name relative to the repository (.hg)
1218 # truncate name relative to the repository (.hg)
1217 path = path[len(repo.path) + 1 :]
1219 path = path[len(repo.path) + 1 :]
1218 if path.startswith(b'cache/'):
1220 if path.startswith(b'cache/'):
1219 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1221 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1220 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1222 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1221 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1223 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1222 # journal is covered by 'lock'
1224 # journal is covered by 'lock'
1223 if repo._currentlock(repo._lockref) is None:
1225 if repo._currentlock(repo._lockref) is None:
1224 repo.ui.develwarn(
1226 repo.ui.develwarn(
1225 b'write with no lock: "%s"' % path,
1227 b'write with no lock: "%s"' % path,
1226 stacklevel=3,
1228 stacklevel=3,
1227 config=b'check-locks',
1229 config=b'check-locks',
1228 )
1230 )
1229 elif repo._currentlock(repo._wlockref) is None:
1231 elif repo._currentlock(repo._wlockref) is None:
1230 # rest of vfs files are covered by 'wlock'
1232 # rest of vfs files are covered by 'wlock'
1231 #
1233 #
1232 # exclude special files
1234 # exclude special files
1233 for prefix in self._wlockfreeprefix:
1235 for prefix in self._wlockfreeprefix:
1234 if path.startswith(prefix):
1236 if path.startswith(prefix):
1235 return
1237 return
1236 repo.ui.develwarn(
1238 repo.ui.develwarn(
1237 b'write with no wlock: "%s"' % path,
1239 b'write with no wlock: "%s"' % path,
1238 stacklevel=3,
1240 stacklevel=3,
1239 config=b'check-locks',
1241 config=b'check-locks',
1240 )
1242 )
1241 return ret
1243 return ret
1242
1244
1243 return checkvfs
1245 return checkvfs
1244
1246
1245 def _getsvfsward(self, origfunc):
1247 def _getsvfsward(self, origfunc):
1246 """build a ward for self.svfs"""
1248 """build a ward for self.svfs"""
1247 rref = weakref.ref(self)
1249 rref = weakref.ref(self)
1248
1250
1249 def checksvfs(path, mode=None):
1251 def checksvfs(path, mode=None):
1250 ret = origfunc(path, mode=mode)
1252 ret = origfunc(path, mode=mode)
1251 repo = rref()
1253 repo = rref()
1252 if repo is None or not util.safehasattr(repo, b'_lockref'):
1254 if repo is None or not util.safehasattr(repo, b'_lockref'):
1253 return
1255 return
1254 if mode in (None, b'r', b'rb'):
1256 if mode in (None, b'r', b'rb'):
1255 return
1257 return
1256 if path.startswith(repo.sharedpath):
1258 if path.startswith(repo.sharedpath):
1257 # truncate name relative to the repository (.hg)
1259 # truncate name relative to the repository (.hg)
1258 path = path[len(repo.sharedpath) + 1 :]
1260 path = path[len(repo.sharedpath) + 1 :]
1259 if repo._currentlock(repo._lockref) is None:
1261 if repo._currentlock(repo._lockref) is None:
1260 repo.ui.develwarn(
1262 repo.ui.develwarn(
1261 b'write with no lock: "%s"' % path, stacklevel=4
1263 b'write with no lock: "%s"' % path, stacklevel=4
1262 )
1264 )
1263 return ret
1265 return ret
1264
1266
1265 return checksvfs
1267 return checksvfs
1266
1268
1267 def close(self):
1269 def close(self):
1268 self._writecaches()
1270 self._writecaches()
1269
1271
1270 def _writecaches(self):
1272 def _writecaches(self):
1271 if self._revbranchcache:
1273 if self._revbranchcache:
1272 self._revbranchcache.write()
1274 self._revbranchcache.write()
1273
1275
1274 def _restrictcapabilities(self, caps):
1276 def _restrictcapabilities(self, caps):
1275 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1277 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1276 caps = set(caps)
1278 caps = set(caps)
1277 capsblob = bundle2.encodecaps(
1279 capsblob = bundle2.encodecaps(
1278 bundle2.getrepocaps(self, role=b'client')
1280 bundle2.getrepocaps(self, role=b'client')
1279 )
1281 )
1280 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1282 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1281 return caps
1283 return caps
1282
1284
1283 def _writerequirements(self):
1285 def _writerequirements(self):
1284 scmutil.writerequires(self.vfs, self.requirements)
1286 scmutil.writerequires(self.vfs, self.requirements)
1285
1287
1286 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1288 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1287 # self -> auditor -> self._checknested -> self
1289 # self -> auditor -> self._checknested -> self
1288
1290
1289 @property
1291 @property
1290 def auditor(self):
1292 def auditor(self):
1291 # This is only used by context.workingctx.match in order to
1293 # This is only used by context.workingctx.match in order to
1292 # detect files in subrepos.
1294 # detect files in subrepos.
1293 return pathutil.pathauditor(self.root, callback=self._checknested)
1295 return pathutil.pathauditor(self.root, callback=self._checknested)
1294
1296
1295 @property
1297 @property
1296 def nofsauditor(self):
1298 def nofsauditor(self):
1297 # This is only used by context.basectx.match in order to detect
1299 # This is only used by context.basectx.match in order to detect
1298 # files in subrepos.
1300 # files in subrepos.
1299 return pathutil.pathauditor(
1301 return pathutil.pathauditor(
1300 self.root, callback=self._checknested, realfs=False, cached=True
1302 self.root, callback=self._checknested, realfs=False, cached=True
1301 )
1303 )
1302
1304
1303 def _checknested(self, path):
1305 def _checknested(self, path):
1304 """Determine if path is a legal nested repository."""
1306 """Determine if path is a legal nested repository."""
1305 if not path.startswith(self.root):
1307 if not path.startswith(self.root):
1306 return False
1308 return False
1307 subpath = path[len(self.root) + 1 :]
1309 subpath = path[len(self.root) + 1 :]
1308 normsubpath = util.pconvert(subpath)
1310 normsubpath = util.pconvert(subpath)
1309
1311
1310 # XXX: Checking against the current working copy is wrong in
1312 # XXX: Checking against the current working copy is wrong in
1311 # the sense that it can reject things like
1313 # the sense that it can reject things like
1312 #
1314 #
1313 # $ hg cat -r 10 sub/x.txt
1315 # $ hg cat -r 10 sub/x.txt
1314 #
1316 #
1315 # if sub/ is no longer a subrepository in the working copy
1317 # if sub/ is no longer a subrepository in the working copy
1316 # parent revision.
1318 # parent revision.
1317 #
1319 #
1318 # However, it can of course also allow things that would have
1320 # However, it can of course also allow things that would have
1319 # been rejected before, such as the above cat command if sub/
1321 # been rejected before, such as the above cat command if sub/
1320 # is a subrepository now, but was a normal directory before.
1322 # is a subrepository now, but was a normal directory before.
1321 # The old path auditor would have rejected by mistake since it
1323 # The old path auditor would have rejected by mistake since it
1322 # panics when it sees sub/.hg/.
1324 # panics when it sees sub/.hg/.
1323 #
1325 #
1324 # All in all, checking against the working copy seems sensible
1326 # All in all, checking against the working copy seems sensible
1325 # since we want to prevent access to nested repositories on
1327 # since we want to prevent access to nested repositories on
1326 # the filesystem *now*.
1328 # the filesystem *now*.
1327 ctx = self[None]
1329 ctx = self[None]
1328 parts = util.splitpath(subpath)
1330 parts = util.splitpath(subpath)
1329 while parts:
1331 while parts:
1330 prefix = b'/'.join(parts)
1332 prefix = b'/'.join(parts)
1331 if prefix in ctx.substate:
1333 if prefix in ctx.substate:
1332 if prefix == normsubpath:
1334 if prefix == normsubpath:
1333 return True
1335 return True
1334 else:
1336 else:
1335 sub = ctx.sub(prefix)
1337 sub = ctx.sub(prefix)
1336 return sub.checknested(subpath[len(prefix) + 1 :])
1338 return sub.checknested(subpath[len(prefix) + 1 :])
1337 else:
1339 else:
1338 parts.pop()
1340 parts.pop()
1339 return False
1341 return False
1340
1342
1341 def peer(self):
1343 def peer(self):
1342 return localpeer(self) # not cached to avoid reference cycle
1344 return localpeer(self) # not cached to avoid reference cycle
1343
1345
1344 def unfiltered(self):
1346 def unfiltered(self):
1345 """Return unfiltered version of the repository
1347 """Return unfiltered version of the repository
1346
1348
1347 Intended to be overwritten by filtered repo."""
1349 Intended to be overwritten by filtered repo."""
1348 return self
1350 return self
1349
1351
1350 def filtered(self, name, visibilityexceptions=None):
1352 def filtered(self, name, visibilityexceptions=None):
1351 """Return a filtered version of a repository
1353 """Return a filtered version of a repository
1352
1354
1353 The `name` parameter is the identifier of the requested view. This
1355 The `name` parameter is the identifier of the requested view. This
1354 will return a repoview object set "exactly" to the specified view.
1356 will return a repoview object set "exactly" to the specified view.
1355
1357
1356 This function does not apply recursive filtering to a repository. For
1358 This function does not apply recursive filtering to a repository. For
1357 example calling `repo.filtered("served")` will return a repoview using
1359 example calling `repo.filtered("served")` will return a repoview using
1358 the "served" view, regardless of the initial view used by `repo`.
1360 the "served" view, regardless of the initial view used by `repo`.
1359
1361
1360 In other word, there is always only one level of `repoview` "filtering".
1362 In other word, there is always only one level of `repoview` "filtering".
1361 """
1363 """
1362 if self._extrafilterid is not None and b'%' not in name:
1364 if self._extrafilterid is not None and b'%' not in name:
1363 name = name + b'%' + self._extrafilterid
1365 name = name + b'%' + self._extrafilterid
1364
1366
1365 cls = repoview.newtype(self.unfiltered().__class__)
1367 cls = repoview.newtype(self.unfiltered().__class__)
1366 return cls(self, name, visibilityexceptions)
1368 return cls(self, name, visibilityexceptions)
1367
1369
1368 @mixedrepostorecache(
1370 @mixedrepostorecache(
1369 (b'bookmarks', b'plain'),
1371 (b'bookmarks', b'plain'),
1370 (b'bookmarks.current', b'plain'),
1372 (b'bookmarks.current', b'plain'),
1371 (b'bookmarks', b''),
1373 (b'bookmarks', b''),
1372 (b'00changelog.i', b''),
1374 (b'00changelog.i', b''),
1373 )
1375 )
1374 def _bookmarks(self):
1376 def _bookmarks(self):
1375 # Since the multiple files involved in the transaction cannot be
1377 # Since the multiple files involved in the transaction cannot be
1376 # written atomically (with current repository format), there is a race
1378 # written atomically (with current repository format), there is a race
1377 # condition here.
1379 # condition here.
1378 #
1380 #
1379 # 1) changelog content A is read
1381 # 1) changelog content A is read
1380 # 2) outside transaction update changelog to content B
1382 # 2) outside transaction update changelog to content B
1381 # 3) outside transaction update bookmark file referring to content B
1383 # 3) outside transaction update bookmark file referring to content B
1382 # 4) bookmarks file content is read and filtered against changelog-A
1384 # 4) bookmarks file content is read and filtered against changelog-A
1383 #
1385 #
1384 # When this happens, bookmarks against nodes missing from A are dropped.
1386 # When this happens, bookmarks against nodes missing from A are dropped.
1385 #
1387 #
1386 # Having this happening during read is not great, but it become worse
1388 # Having this happening during read is not great, but it become worse
1387 # when this happen during write because the bookmarks to the "unknown"
1389 # when this happen during write because the bookmarks to the "unknown"
1388 # nodes will be dropped for good. However, writes happen within locks.
1390 # nodes will be dropped for good. However, writes happen within locks.
1389 # This locking makes it possible to have a race free consistent read.
1391 # This locking makes it possible to have a race free consistent read.
1390 # For this purpose data read from disc before locking are
1392 # For this purpose data read from disc before locking are
1391 # "invalidated" right after the locks are taken. This invalidations are
1393 # "invalidated" right after the locks are taken. This invalidations are
1392 # "light", the `filecache` mechanism keep the data in memory and will
1394 # "light", the `filecache` mechanism keep the data in memory and will
1393 # reuse them if the underlying files did not changed. Not parsing the
1395 # reuse them if the underlying files did not changed. Not parsing the
1394 # same data multiple times helps performances.
1396 # same data multiple times helps performances.
1395 #
1397 #
1396 # Unfortunately in the case describe above, the files tracked by the
1398 # Unfortunately in the case describe above, the files tracked by the
1397 # bookmarks file cache might not have changed, but the in-memory
1399 # bookmarks file cache might not have changed, but the in-memory
1398 # content is still "wrong" because we used an older changelog content
1400 # content is still "wrong" because we used an older changelog content
1399 # to process the on-disk data. So after locking, the changelog would be
1401 # to process the on-disk data. So after locking, the changelog would be
1400 # refreshed but `_bookmarks` would be preserved.
1402 # refreshed but `_bookmarks` would be preserved.
1401 # Adding `00changelog.i` to the list of tracked file is not
1403 # Adding `00changelog.i` to the list of tracked file is not
1402 # enough, because at the time we build the content for `_bookmarks` in
1404 # enough, because at the time we build the content for `_bookmarks` in
1403 # (4), the changelog file has already diverged from the content used
1405 # (4), the changelog file has already diverged from the content used
1404 # for loading `changelog` in (1)
1406 # for loading `changelog` in (1)
1405 #
1407 #
1406 # To prevent the issue, we force the changelog to be explicitly
1408 # To prevent the issue, we force the changelog to be explicitly
1407 # reloaded while computing `_bookmarks`. The data race can still happen
1409 # reloaded while computing `_bookmarks`. The data race can still happen
1408 # without the lock (with a narrower window), but it would no longer go
1410 # without the lock (with a narrower window), but it would no longer go
1409 # undetected during the lock time refresh.
1411 # undetected during the lock time refresh.
1410 #
1412 #
1411 # The new schedule is as follow
1413 # The new schedule is as follow
1412 #
1414 #
1413 # 1) filecache logic detect that `_bookmarks` needs to be computed
1415 # 1) filecache logic detect that `_bookmarks` needs to be computed
1414 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1416 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1415 # 3) We force `changelog` filecache to be tested
1417 # 3) We force `changelog` filecache to be tested
1416 # 4) cachestat for `changelog` are captured (for changelog)
1418 # 4) cachestat for `changelog` are captured (for changelog)
1417 # 5) `_bookmarks` is computed and cached
1419 # 5) `_bookmarks` is computed and cached
1418 #
1420 #
1419 # The step in (3) ensure we have a changelog at least as recent as the
1421 # The step in (3) ensure we have a changelog at least as recent as the
1420 # cache stat computed in (1). As a result at locking time:
1422 # cache stat computed in (1). As a result at locking time:
1421 # * if the changelog did not changed since (1) -> we can reuse the data
1423 # * if the changelog did not changed since (1) -> we can reuse the data
1422 # * otherwise -> the bookmarks get refreshed.
1424 # * otherwise -> the bookmarks get refreshed.
1423 self._refreshchangelog()
1425 self._refreshchangelog()
1424 return bookmarks.bmstore(self)
1426 return bookmarks.bmstore(self)
1425
1427
1426 def _refreshchangelog(self):
1428 def _refreshchangelog(self):
1427 """make sure the in memory changelog match the on-disk one"""
1429 """make sure the in memory changelog match the on-disk one"""
1428 if 'changelog' in vars(self) and self.currenttransaction() is None:
1430 if 'changelog' in vars(self) and self.currenttransaction() is None:
1429 del self.changelog
1431 del self.changelog
1430
1432
1431 @property
1433 @property
1432 def _activebookmark(self):
1434 def _activebookmark(self):
1433 return self._bookmarks.active
1435 return self._bookmarks.active
1434
1436
1435 # _phasesets depend on changelog. what we need is to call
1437 # _phasesets depend on changelog. what we need is to call
1436 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1438 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1437 # can't be easily expressed in filecache mechanism.
1439 # can't be easily expressed in filecache mechanism.
1438 @storecache(b'phaseroots', b'00changelog.i')
1440 @storecache(b'phaseroots', b'00changelog.i')
1439 def _phasecache(self):
1441 def _phasecache(self):
1440 return phases.phasecache(self, self._phasedefaults)
1442 return phases.phasecache(self, self._phasedefaults)
1441
1443
1442 @storecache(b'obsstore')
1444 @storecache(b'obsstore')
1443 def obsstore(self):
1445 def obsstore(self):
1444 return obsolete.makestore(self.ui, self)
1446 return obsolete.makestore(self.ui, self)
1445
1447
1446 @storecache(b'00changelog.i')
1448 @storecache(b'00changelog.i')
1447 def changelog(self):
1449 def changelog(self):
1448 return self.store.changelog(txnutil.mayhavepending(self.root))
1450 return self.store.changelog(txnutil.mayhavepending(self.root))
1449
1451
1450 @storecache(b'00manifest.i')
1452 @storecache(b'00manifest.i')
1451 def manifestlog(self):
1453 def manifestlog(self):
1452 return self.store.manifestlog(self, self._storenarrowmatch)
1454 return self.store.manifestlog(self, self._storenarrowmatch)
1453
1455
1454 @repofilecache(b'dirstate')
1456 @repofilecache(b'dirstate')
1455 def dirstate(self):
1457 def dirstate(self):
1456 return self._makedirstate()
1458 return self._makedirstate()
1457
1459
1458 def _makedirstate(self):
1460 def _makedirstate(self):
1459 """Extension point for wrapping the dirstate per-repo."""
1461 """Extension point for wrapping the dirstate per-repo."""
1460 sparsematchfn = lambda: sparse.matcher(self)
1462 sparsematchfn = lambda: sparse.matcher(self)
1461
1463
1462 return dirstate.dirstate(
1464 return dirstate.dirstate(
1463 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1465 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1464 )
1466 )
1465
1467
1466 def _dirstatevalidate(self, node):
1468 def _dirstatevalidate(self, node):
1467 try:
1469 try:
1468 self.changelog.rev(node)
1470 self.changelog.rev(node)
1469 return node
1471 return node
1470 except error.LookupError:
1472 except error.LookupError:
1471 if not self._dirstatevalidatewarned:
1473 if not self._dirstatevalidatewarned:
1472 self._dirstatevalidatewarned = True
1474 self._dirstatevalidatewarned = True
1473 self.ui.warn(
1475 self.ui.warn(
1474 _(b"warning: ignoring unknown working parent %s!\n")
1476 _(b"warning: ignoring unknown working parent %s!\n")
1475 % short(node)
1477 % short(node)
1476 )
1478 )
1477 return nullid
1479 return nullid
1478
1480
1479 @storecache(narrowspec.FILENAME)
1481 @storecache(narrowspec.FILENAME)
1480 def narrowpats(self):
1482 def narrowpats(self):
1481 """matcher patterns for this repository's narrowspec
1483 """matcher patterns for this repository's narrowspec
1482
1484
1483 A tuple of (includes, excludes).
1485 A tuple of (includes, excludes).
1484 """
1486 """
1485 return narrowspec.load(self)
1487 return narrowspec.load(self)
1486
1488
1487 @storecache(narrowspec.FILENAME)
1489 @storecache(narrowspec.FILENAME)
1488 def _storenarrowmatch(self):
1490 def _storenarrowmatch(self):
1489 if repository.NARROW_REQUIREMENT not in self.requirements:
1491 if repository.NARROW_REQUIREMENT not in self.requirements:
1490 return matchmod.always()
1492 return matchmod.always()
1491 include, exclude = self.narrowpats
1493 include, exclude = self.narrowpats
1492 return narrowspec.match(self.root, include=include, exclude=exclude)
1494 return narrowspec.match(self.root, include=include, exclude=exclude)
1493
1495
1494 @storecache(narrowspec.FILENAME)
1496 @storecache(narrowspec.FILENAME)
1495 def _narrowmatch(self):
1497 def _narrowmatch(self):
1496 if repository.NARROW_REQUIREMENT not in self.requirements:
1498 if repository.NARROW_REQUIREMENT not in self.requirements:
1497 return matchmod.always()
1499 return matchmod.always()
1498 narrowspec.checkworkingcopynarrowspec(self)
1500 narrowspec.checkworkingcopynarrowspec(self)
1499 include, exclude = self.narrowpats
1501 include, exclude = self.narrowpats
1500 return narrowspec.match(self.root, include=include, exclude=exclude)
1502 return narrowspec.match(self.root, include=include, exclude=exclude)
1501
1503
1502 def narrowmatch(self, match=None, includeexact=False):
1504 def narrowmatch(self, match=None, includeexact=False):
1503 """matcher corresponding the the repo's narrowspec
1505 """matcher corresponding the the repo's narrowspec
1504
1506
1505 If `match` is given, then that will be intersected with the narrow
1507 If `match` is given, then that will be intersected with the narrow
1506 matcher.
1508 matcher.
1507
1509
1508 If `includeexact` is True, then any exact matches from `match` will
1510 If `includeexact` is True, then any exact matches from `match` will
1509 be included even if they're outside the narrowspec.
1511 be included even if they're outside the narrowspec.
1510 """
1512 """
1511 if match:
1513 if match:
1512 if includeexact and not self._narrowmatch.always():
1514 if includeexact and not self._narrowmatch.always():
1513 # do not exclude explicitly-specified paths so that they can
1515 # do not exclude explicitly-specified paths so that they can
1514 # be warned later on
1516 # be warned later on
1515 em = matchmod.exact(match.files())
1517 em = matchmod.exact(match.files())
1516 nm = matchmod.unionmatcher([self._narrowmatch, em])
1518 nm = matchmod.unionmatcher([self._narrowmatch, em])
1517 return matchmod.intersectmatchers(match, nm)
1519 return matchmod.intersectmatchers(match, nm)
1518 return matchmod.intersectmatchers(match, self._narrowmatch)
1520 return matchmod.intersectmatchers(match, self._narrowmatch)
1519 return self._narrowmatch
1521 return self._narrowmatch
1520
1522
1521 def setnarrowpats(self, newincludes, newexcludes):
1523 def setnarrowpats(self, newincludes, newexcludes):
1522 narrowspec.save(self, newincludes, newexcludes)
1524 narrowspec.save(self, newincludes, newexcludes)
1523 self.invalidate(clearfilecache=True)
1525 self.invalidate(clearfilecache=True)
1524
1526
1525 @unfilteredpropertycache
1527 @unfilteredpropertycache
1526 def _quick_access_changeid_null(self):
1528 def _quick_access_changeid_null(self):
1527 return {
1529 return {
1528 b'null': (nullrev, nullid),
1530 b'null': (nullrev, nullid),
1529 nullrev: (nullrev, nullid),
1531 nullrev: (nullrev, nullid),
1530 nullid: (nullrev, nullid),
1532 nullid: (nullrev, nullid),
1531 }
1533 }
1532
1534
1533 @unfilteredpropertycache
1535 @unfilteredpropertycache
1534 def _quick_access_changeid_wc(self):
1536 def _quick_access_changeid_wc(self):
1535 # also fast path access to the working copy parents
1537 # also fast path access to the working copy parents
1536 # however, only do it for filter that ensure wc is visible.
1538 # however, only do it for filter that ensure wc is visible.
1537 quick = {}
1539 quick = {}
1538 cl = self.unfiltered().changelog
1540 cl = self.unfiltered().changelog
1539 for node in self.dirstate.parents():
1541 for node in self.dirstate.parents():
1540 if node == nullid:
1542 if node == nullid:
1541 continue
1543 continue
1542 rev = cl.index.get_rev(node)
1544 rev = cl.index.get_rev(node)
1543 if rev is None:
1545 if rev is None:
1544 # unknown working copy parent case:
1546 # unknown working copy parent case:
1545 #
1547 #
1546 # skip the fast path and let higher code deal with it
1548 # skip the fast path and let higher code deal with it
1547 continue
1549 continue
1548 pair = (rev, node)
1550 pair = (rev, node)
1549 quick[rev] = pair
1551 quick[rev] = pair
1550 quick[node] = pair
1552 quick[node] = pair
1551 # also add the parents of the parents
1553 # also add the parents of the parents
1552 for r in cl.parentrevs(rev):
1554 for r in cl.parentrevs(rev):
1553 if r == nullrev:
1555 if r == nullrev:
1554 continue
1556 continue
1555 n = cl.node(r)
1557 n = cl.node(r)
1556 pair = (r, n)
1558 pair = (r, n)
1557 quick[r] = pair
1559 quick[r] = pair
1558 quick[n] = pair
1560 quick[n] = pair
1559 p1node = self.dirstate.p1()
1561 p1node = self.dirstate.p1()
1560 if p1node != nullid:
1562 if p1node != nullid:
1561 quick[b'.'] = quick[p1node]
1563 quick[b'.'] = quick[p1node]
1562 return quick
1564 return quick
1563
1565
1564 @unfilteredmethod
1566 @unfilteredmethod
1565 def _quick_access_changeid_invalidate(self):
1567 def _quick_access_changeid_invalidate(self):
1566 if '_quick_access_changeid_wc' in vars(self):
1568 if '_quick_access_changeid_wc' in vars(self):
1567 del self.__dict__['_quick_access_changeid_wc']
1569 del self.__dict__['_quick_access_changeid_wc']
1568
1570
1569 @property
1571 @property
1570 def _quick_access_changeid(self):
1572 def _quick_access_changeid(self):
1571 """an helper dictionnary for __getitem__ calls
1573 """an helper dictionnary for __getitem__ calls
1572
1574
1573 This contains a list of symbol we can recognise right away without
1575 This contains a list of symbol we can recognise right away without
1574 further processing.
1576 further processing.
1575 """
1577 """
1576 mapping = self._quick_access_changeid_null
1578 mapping = self._quick_access_changeid_null
1577 if self.filtername in repoview.filter_has_wc:
1579 if self.filtername in repoview.filter_has_wc:
1578 mapping = mapping.copy()
1580 mapping = mapping.copy()
1579 mapping.update(self._quick_access_changeid_wc)
1581 mapping.update(self._quick_access_changeid_wc)
1580 return mapping
1582 return mapping
1581
1583
1582 def __getitem__(self, changeid):
1584 def __getitem__(self, changeid):
1583 # dealing with special cases
1585 # dealing with special cases
1584 if changeid is None:
1586 if changeid is None:
1585 return context.workingctx(self)
1587 return context.workingctx(self)
1586 if isinstance(changeid, context.basectx):
1588 if isinstance(changeid, context.basectx):
1587 return changeid
1589 return changeid
1588
1590
1589 # dealing with multiple revisions
1591 # dealing with multiple revisions
1590 if isinstance(changeid, slice):
1592 if isinstance(changeid, slice):
1591 # wdirrev isn't contiguous so the slice shouldn't include it
1593 # wdirrev isn't contiguous so the slice shouldn't include it
1592 return [
1594 return [
1593 self[i]
1595 self[i]
1594 for i in pycompat.xrange(*changeid.indices(len(self)))
1596 for i in pycompat.xrange(*changeid.indices(len(self)))
1595 if i not in self.changelog.filteredrevs
1597 if i not in self.changelog.filteredrevs
1596 ]
1598 ]
1597
1599
1598 # dealing with some special values
1600 # dealing with some special values
1599 quick_access = self._quick_access_changeid.get(changeid)
1601 quick_access = self._quick_access_changeid.get(changeid)
1600 if quick_access is not None:
1602 if quick_access is not None:
1601 rev, node = quick_access
1603 rev, node = quick_access
1602 return context.changectx(self, rev, node, maybe_filtered=False)
1604 return context.changectx(self, rev, node, maybe_filtered=False)
1603 if changeid == b'tip':
1605 if changeid == b'tip':
1604 node = self.changelog.tip()
1606 node = self.changelog.tip()
1605 rev = self.changelog.rev(node)
1607 rev = self.changelog.rev(node)
1606 return context.changectx(self, rev, node)
1608 return context.changectx(self, rev, node)
1607
1609
1608 # dealing with arbitrary values
1610 # dealing with arbitrary values
1609 try:
1611 try:
1610 if isinstance(changeid, int):
1612 if isinstance(changeid, int):
1611 node = self.changelog.node(changeid)
1613 node = self.changelog.node(changeid)
1612 rev = changeid
1614 rev = changeid
1613 elif changeid == b'.':
1615 elif changeid == b'.':
1614 # this is a hack to delay/avoid loading obsmarkers
1616 # this is a hack to delay/avoid loading obsmarkers
1615 # when we know that '.' won't be hidden
1617 # when we know that '.' won't be hidden
1616 node = self.dirstate.p1()
1618 node = self.dirstate.p1()
1617 rev = self.unfiltered().changelog.rev(node)
1619 rev = self.unfiltered().changelog.rev(node)
1618 elif len(changeid) == 20:
1620 elif len(changeid) == 20:
1619 try:
1621 try:
1620 node = changeid
1622 node = changeid
1621 rev = self.changelog.rev(changeid)
1623 rev = self.changelog.rev(changeid)
1622 except error.FilteredLookupError:
1624 except error.FilteredLookupError:
1623 changeid = hex(changeid) # for the error message
1625 changeid = hex(changeid) # for the error message
1624 raise
1626 raise
1625 except LookupError:
1627 except LookupError:
1626 # check if it might have come from damaged dirstate
1628 # check if it might have come from damaged dirstate
1627 #
1629 #
1628 # XXX we could avoid the unfiltered if we had a recognizable
1630 # XXX we could avoid the unfiltered if we had a recognizable
1629 # exception for filtered changeset access
1631 # exception for filtered changeset access
1630 if (
1632 if (
1631 self.local()
1633 self.local()
1632 and changeid in self.unfiltered().dirstate.parents()
1634 and changeid in self.unfiltered().dirstate.parents()
1633 ):
1635 ):
1634 msg = _(b"working directory has unknown parent '%s'!")
1636 msg = _(b"working directory has unknown parent '%s'!")
1635 raise error.Abort(msg % short(changeid))
1637 raise error.Abort(msg % short(changeid))
1636 changeid = hex(changeid) # for the error message
1638 changeid = hex(changeid) # for the error message
1637 raise
1639 raise
1638
1640
1639 elif len(changeid) == 40:
1641 elif len(changeid) == 40:
1640 node = bin(changeid)
1642 node = bin(changeid)
1641 rev = self.changelog.rev(node)
1643 rev = self.changelog.rev(node)
1642 else:
1644 else:
1643 raise error.ProgrammingError(
1645 raise error.ProgrammingError(
1644 b"unsupported changeid '%s' of type %s"
1646 b"unsupported changeid '%s' of type %s"
1645 % (changeid, pycompat.bytestr(type(changeid)))
1647 % (changeid, pycompat.bytestr(type(changeid)))
1646 )
1648 )
1647
1649
1648 return context.changectx(self, rev, node)
1650 return context.changectx(self, rev, node)
1649
1651
1650 except (error.FilteredIndexError, error.FilteredLookupError):
1652 except (error.FilteredIndexError, error.FilteredLookupError):
1651 raise error.FilteredRepoLookupError(
1653 raise error.FilteredRepoLookupError(
1652 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1654 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1653 )
1655 )
1654 except (IndexError, LookupError):
1656 except (IndexError, LookupError):
1655 raise error.RepoLookupError(
1657 raise error.RepoLookupError(
1656 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1658 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1657 )
1659 )
1658 except error.WdirUnsupported:
1660 except error.WdirUnsupported:
1659 return context.workingctx(self)
1661 return context.workingctx(self)
1660
1662
1661 def __contains__(self, changeid):
1663 def __contains__(self, changeid):
1662 """True if the given changeid exists
1664 """True if the given changeid exists
1663
1665
1664 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1666 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1665 specified.
1667 specified.
1666 """
1668 """
1667 try:
1669 try:
1668 self[changeid]
1670 self[changeid]
1669 return True
1671 return True
1670 except error.RepoLookupError:
1672 except error.RepoLookupError:
1671 return False
1673 return False
1672
1674
1673 def __nonzero__(self):
1675 def __nonzero__(self):
1674 return True
1676 return True
1675
1677
1676 __bool__ = __nonzero__
1678 __bool__ = __nonzero__
1677
1679
1678 def __len__(self):
1680 def __len__(self):
1679 # no need to pay the cost of repoview.changelog
1681 # no need to pay the cost of repoview.changelog
1680 unfi = self.unfiltered()
1682 unfi = self.unfiltered()
1681 return len(unfi.changelog)
1683 return len(unfi.changelog)
1682
1684
1683 def __iter__(self):
1685 def __iter__(self):
1684 return iter(self.changelog)
1686 return iter(self.changelog)
1685
1687
1686 def revs(self, expr, *args):
1688 def revs(self, expr, *args):
1687 '''Find revisions matching a revset.
1689 '''Find revisions matching a revset.
1688
1690
1689 The revset is specified as a string ``expr`` that may contain
1691 The revset is specified as a string ``expr`` that may contain
1690 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1692 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1691
1693
1692 Revset aliases from the configuration are not expanded. To expand
1694 Revset aliases from the configuration are not expanded. To expand
1693 user aliases, consider calling ``scmutil.revrange()`` or
1695 user aliases, consider calling ``scmutil.revrange()`` or
1694 ``repo.anyrevs([expr], user=True)``.
1696 ``repo.anyrevs([expr], user=True)``.
1695
1697
1696 Returns a smartset.abstractsmartset, which is a list-like interface
1698 Returns a smartset.abstractsmartset, which is a list-like interface
1697 that contains integer revisions.
1699 that contains integer revisions.
1698 '''
1700 '''
1699 tree = revsetlang.spectree(expr, *args)
1701 tree = revsetlang.spectree(expr, *args)
1700 return revset.makematcher(tree)(self)
1702 return revset.makematcher(tree)(self)
1701
1703
1702 def set(self, expr, *args):
1704 def set(self, expr, *args):
1703 '''Find revisions matching a revset and emit changectx instances.
1705 '''Find revisions matching a revset and emit changectx instances.
1704
1706
1705 This is a convenience wrapper around ``revs()`` that iterates the
1707 This is a convenience wrapper around ``revs()`` that iterates the
1706 result and is a generator of changectx instances.
1708 result and is a generator of changectx instances.
1707
1709
1708 Revset aliases from the configuration are not expanded. To expand
1710 Revset aliases from the configuration are not expanded. To expand
1709 user aliases, consider calling ``scmutil.revrange()``.
1711 user aliases, consider calling ``scmutil.revrange()``.
1710 '''
1712 '''
1711 for r in self.revs(expr, *args):
1713 for r in self.revs(expr, *args):
1712 yield self[r]
1714 yield self[r]
1713
1715
1714 def anyrevs(self, specs, user=False, localalias=None):
1716 def anyrevs(self, specs, user=False, localalias=None):
1715 '''Find revisions matching one of the given revsets.
1717 '''Find revisions matching one of the given revsets.
1716
1718
1717 Revset aliases from the configuration are not expanded by default. To
1719 Revset aliases from the configuration are not expanded by default. To
1718 expand user aliases, specify ``user=True``. To provide some local
1720 expand user aliases, specify ``user=True``. To provide some local
1719 definitions overriding user aliases, set ``localalias`` to
1721 definitions overriding user aliases, set ``localalias`` to
1720 ``{name: definitionstring}``.
1722 ``{name: definitionstring}``.
1721 '''
1723 '''
1722 if specs == [b'null']:
1724 if specs == [b'null']:
1723 return revset.baseset([nullrev])
1725 return revset.baseset([nullrev])
1724 if specs == [b'.']:
1726 if specs == [b'.']:
1725 quick_data = self._quick_access_changeid.get(b'.')
1727 quick_data = self._quick_access_changeid.get(b'.')
1726 if quick_data is not None:
1728 if quick_data is not None:
1727 return revset.baseset([quick_data[0]])
1729 return revset.baseset([quick_data[0]])
1728 if user:
1730 if user:
1729 m = revset.matchany(
1731 m = revset.matchany(
1730 self.ui,
1732 self.ui,
1731 specs,
1733 specs,
1732 lookup=revset.lookupfn(self),
1734 lookup=revset.lookupfn(self),
1733 localalias=localalias,
1735 localalias=localalias,
1734 )
1736 )
1735 else:
1737 else:
1736 m = revset.matchany(None, specs, localalias=localalias)
1738 m = revset.matchany(None, specs, localalias=localalias)
1737 return m(self)
1739 return m(self)
1738
1740
1739 def url(self):
1741 def url(self):
1740 return b'file:' + self.root
1742 return b'file:' + self.root
1741
1743
1742 def hook(self, name, throw=False, **args):
1744 def hook(self, name, throw=False, **args):
1743 """Call a hook, passing this repo instance.
1745 """Call a hook, passing this repo instance.
1744
1746
1745 This a convenience method to aid invoking hooks. Extensions likely
1747 This a convenience method to aid invoking hooks. Extensions likely
1746 won't call this unless they have registered a custom hook or are
1748 won't call this unless they have registered a custom hook or are
1747 replacing code that is expected to call a hook.
1749 replacing code that is expected to call a hook.
1748 """
1750 """
1749 return hook.hook(self.ui, self, name, throw, **args)
1751 return hook.hook(self.ui, self, name, throw, **args)
1750
1752
1751 @filteredpropertycache
1753 @filteredpropertycache
1752 def _tagscache(self):
1754 def _tagscache(self):
1753 '''Returns a tagscache object that contains various tags related
1755 '''Returns a tagscache object that contains various tags related
1754 caches.'''
1756 caches.'''
1755
1757
1756 # This simplifies its cache management by having one decorated
1758 # This simplifies its cache management by having one decorated
1757 # function (this one) and the rest simply fetch things from it.
1759 # function (this one) and the rest simply fetch things from it.
1758 class tagscache(object):
1760 class tagscache(object):
1759 def __init__(self):
1761 def __init__(self):
1760 # These two define the set of tags for this repository. tags
1762 # These two define the set of tags for this repository. tags
1761 # maps tag name to node; tagtypes maps tag name to 'global' or
1763 # maps tag name to node; tagtypes maps tag name to 'global' or
1762 # 'local'. (Global tags are defined by .hgtags across all
1764 # 'local'. (Global tags are defined by .hgtags across all
1763 # heads, and local tags are defined in .hg/localtags.)
1765 # heads, and local tags are defined in .hg/localtags.)
1764 # They constitute the in-memory cache of tags.
1766 # They constitute the in-memory cache of tags.
1765 self.tags = self.tagtypes = None
1767 self.tags = self.tagtypes = None
1766
1768
1767 self.nodetagscache = self.tagslist = None
1769 self.nodetagscache = self.tagslist = None
1768
1770
1769 cache = tagscache()
1771 cache = tagscache()
1770 cache.tags, cache.tagtypes = self._findtags()
1772 cache.tags, cache.tagtypes = self._findtags()
1771
1773
1772 return cache
1774 return cache
1773
1775
1774 def tags(self):
1776 def tags(self):
1775 '''return a mapping of tag to node'''
1777 '''return a mapping of tag to node'''
1776 t = {}
1778 t = {}
1777 if self.changelog.filteredrevs:
1779 if self.changelog.filteredrevs:
1778 tags, tt = self._findtags()
1780 tags, tt = self._findtags()
1779 else:
1781 else:
1780 tags = self._tagscache.tags
1782 tags = self._tagscache.tags
1781 rev = self.changelog.rev
1783 rev = self.changelog.rev
1782 for k, v in pycompat.iteritems(tags):
1784 for k, v in pycompat.iteritems(tags):
1783 try:
1785 try:
1784 # ignore tags to unknown nodes
1786 # ignore tags to unknown nodes
1785 rev(v)
1787 rev(v)
1786 t[k] = v
1788 t[k] = v
1787 except (error.LookupError, ValueError):
1789 except (error.LookupError, ValueError):
1788 pass
1790 pass
1789 return t
1791 return t
1790
1792
1791 def _findtags(self):
1793 def _findtags(self):
1792 '''Do the hard work of finding tags. Return a pair of dicts
1794 '''Do the hard work of finding tags. Return a pair of dicts
1793 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1795 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1794 maps tag name to a string like \'global\' or \'local\'.
1796 maps tag name to a string like \'global\' or \'local\'.
1795 Subclasses or extensions are free to add their own tags, but
1797 Subclasses or extensions are free to add their own tags, but
1796 should be aware that the returned dicts will be retained for the
1798 should be aware that the returned dicts will be retained for the
1797 duration of the localrepo object.'''
1799 duration of the localrepo object.'''
1798
1800
1799 # XXX what tagtype should subclasses/extensions use? Currently
1801 # XXX what tagtype should subclasses/extensions use? Currently
1800 # mq and bookmarks add tags, but do not set the tagtype at all.
1802 # mq and bookmarks add tags, but do not set the tagtype at all.
1801 # Should each extension invent its own tag type? Should there
1803 # Should each extension invent its own tag type? Should there
1802 # be one tagtype for all such "virtual" tags? Or is the status
1804 # be one tagtype for all such "virtual" tags? Or is the status
1803 # quo fine?
1805 # quo fine?
1804
1806
1805 # map tag name to (node, hist)
1807 # map tag name to (node, hist)
1806 alltags = tagsmod.findglobaltags(self.ui, self)
1808 alltags = tagsmod.findglobaltags(self.ui, self)
1807 # map tag name to tag type
1809 # map tag name to tag type
1808 tagtypes = dict((tag, b'global') for tag in alltags)
1810 tagtypes = dict((tag, b'global') for tag in alltags)
1809
1811
1810 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1812 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1811
1813
1812 # Build the return dicts. Have to re-encode tag names because
1814 # Build the return dicts. Have to re-encode tag names because
1813 # the tags module always uses UTF-8 (in order not to lose info
1815 # the tags module always uses UTF-8 (in order not to lose info
1814 # writing to the cache), but the rest of Mercurial wants them in
1816 # writing to the cache), but the rest of Mercurial wants them in
1815 # local encoding.
1817 # local encoding.
1816 tags = {}
1818 tags = {}
1817 for (name, (node, hist)) in pycompat.iteritems(alltags):
1819 for (name, (node, hist)) in pycompat.iteritems(alltags):
1818 if node != nullid:
1820 if node != nullid:
1819 tags[encoding.tolocal(name)] = node
1821 tags[encoding.tolocal(name)] = node
1820 tags[b'tip'] = self.changelog.tip()
1822 tags[b'tip'] = self.changelog.tip()
1821 tagtypes = dict(
1823 tagtypes = dict(
1822 [
1824 [
1823 (encoding.tolocal(name), value)
1825 (encoding.tolocal(name), value)
1824 for (name, value) in pycompat.iteritems(tagtypes)
1826 for (name, value) in pycompat.iteritems(tagtypes)
1825 ]
1827 ]
1826 )
1828 )
1827 return (tags, tagtypes)
1829 return (tags, tagtypes)
1828
1830
1829 def tagtype(self, tagname):
1831 def tagtype(self, tagname):
1830 '''
1832 '''
1831 return the type of the given tag. result can be:
1833 return the type of the given tag. result can be:
1832
1834
1833 'local' : a local tag
1835 'local' : a local tag
1834 'global' : a global tag
1836 'global' : a global tag
1835 None : tag does not exist
1837 None : tag does not exist
1836 '''
1838 '''
1837
1839
1838 return self._tagscache.tagtypes.get(tagname)
1840 return self._tagscache.tagtypes.get(tagname)
1839
1841
1840 def tagslist(self):
1842 def tagslist(self):
1841 '''return a list of tags ordered by revision'''
1843 '''return a list of tags ordered by revision'''
1842 if not self._tagscache.tagslist:
1844 if not self._tagscache.tagslist:
1843 l = []
1845 l = []
1844 for t, n in pycompat.iteritems(self.tags()):
1846 for t, n in pycompat.iteritems(self.tags()):
1845 l.append((self.changelog.rev(n), t, n))
1847 l.append((self.changelog.rev(n), t, n))
1846 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1848 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1847
1849
1848 return self._tagscache.tagslist
1850 return self._tagscache.tagslist
1849
1851
1850 def nodetags(self, node):
1852 def nodetags(self, node):
1851 '''return the tags associated with a node'''
1853 '''return the tags associated with a node'''
1852 if not self._tagscache.nodetagscache:
1854 if not self._tagscache.nodetagscache:
1853 nodetagscache = {}
1855 nodetagscache = {}
1854 for t, n in pycompat.iteritems(self._tagscache.tags):
1856 for t, n in pycompat.iteritems(self._tagscache.tags):
1855 nodetagscache.setdefault(n, []).append(t)
1857 nodetagscache.setdefault(n, []).append(t)
1856 for tags in pycompat.itervalues(nodetagscache):
1858 for tags in pycompat.itervalues(nodetagscache):
1857 tags.sort()
1859 tags.sort()
1858 self._tagscache.nodetagscache = nodetagscache
1860 self._tagscache.nodetagscache = nodetagscache
1859 return self._tagscache.nodetagscache.get(node, [])
1861 return self._tagscache.nodetagscache.get(node, [])
1860
1862
1861 def nodebookmarks(self, node):
1863 def nodebookmarks(self, node):
1862 """return the list of bookmarks pointing to the specified node"""
1864 """return the list of bookmarks pointing to the specified node"""
1863 return self._bookmarks.names(node)
1865 return self._bookmarks.names(node)
1864
1866
1865 def branchmap(self):
1867 def branchmap(self):
1866 '''returns a dictionary {branch: [branchheads]} with branchheads
1868 '''returns a dictionary {branch: [branchheads]} with branchheads
1867 ordered by increasing revision number'''
1869 ordered by increasing revision number'''
1868 return self._branchcaches[self]
1870 return self._branchcaches[self]
1869
1871
1870 @unfilteredmethod
1872 @unfilteredmethod
1871 def revbranchcache(self):
1873 def revbranchcache(self):
1872 if not self._revbranchcache:
1874 if not self._revbranchcache:
1873 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1875 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1874 return self._revbranchcache
1876 return self._revbranchcache
1875
1877
1876 def branchtip(self, branch, ignoremissing=False):
1878 def branchtip(self, branch, ignoremissing=False):
1877 '''return the tip node for a given branch
1879 '''return the tip node for a given branch
1878
1880
1879 If ignoremissing is True, then this method will not raise an error.
1881 If ignoremissing is True, then this method will not raise an error.
1880 This is helpful for callers that only expect None for a missing branch
1882 This is helpful for callers that only expect None for a missing branch
1881 (e.g. namespace).
1883 (e.g. namespace).
1882
1884
1883 '''
1885 '''
1884 try:
1886 try:
1885 return self.branchmap().branchtip(branch)
1887 return self.branchmap().branchtip(branch)
1886 except KeyError:
1888 except KeyError:
1887 if not ignoremissing:
1889 if not ignoremissing:
1888 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1890 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1889 else:
1891 else:
1890 pass
1892 pass
1891
1893
1892 def lookup(self, key):
1894 def lookup(self, key):
1893 node = scmutil.revsymbol(self, key).node()
1895 node = scmutil.revsymbol(self, key).node()
1894 if node is None:
1896 if node is None:
1895 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1897 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1896 return node
1898 return node
1897
1899
1898 def lookupbranch(self, key):
1900 def lookupbranch(self, key):
1899 if self.branchmap().hasbranch(key):
1901 if self.branchmap().hasbranch(key):
1900 return key
1902 return key
1901
1903
1902 return scmutil.revsymbol(self, key).branch()
1904 return scmutil.revsymbol(self, key).branch()
1903
1905
1904 def known(self, nodes):
1906 def known(self, nodes):
1905 cl = self.changelog
1907 cl = self.changelog
1906 get_rev = cl.index.get_rev
1908 get_rev = cl.index.get_rev
1907 filtered = cl.filteredrevs
1909 filtered = cl.filteredrevs
1908 result = []
1910 result = []
1909 for n in nodes:
1911 for n in nodes:
1910 r = get_rev(n)
1912 r = get_rev(n)
1911 resp = not (r is None or r in filtered)
1913 resp = not (r is None or r in filtered)
1912 result.append(resp)
1914 result.append(resp)
1913 return result
1915 return result
1914
1916
1915 def local(self):
1917 def local(self):
1916 return self
1918 return self
1917
1919
1918 def publishing(self):
1920 def publishing(self):
1919 # it's safe (and desirable) to trust the publish flag unconditionally
1921 # it's safe (and desirable) to trust the publish flag unconditionally
1920 # so that we don't finalize changes shared between users via ssh or nfs
1922 # so that we don't finalize changes shared between users via ssh or nfs
1921 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1923 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1922
1924
1923 def cancopy(self):
1925 def cancopy(self):
1924 # so statichttprepo's override of local() works
1926 # so statichttprepo's override of local() works
1925 if not self.local():
1927 if not self.local():
1926 return False
1928 return False
1927 if not self.publishing():
1929 if not self.publishing():
1928 return True
1930 return True
1929 # if publishing we can't copy if there is filtered content
1931 # if publishing we can't copy if there is filtered content
1930 return not self.filtered(b'visible').changelog.filteredrevs
1932 return not self.filtered(b'visible').changelog.filteredrevs
1931
1933
1932 def shared(self):
1934 def shared(self):
1933 '''the type of shared repository (None if not shared)'''
1935 '''the type of shared repository (None if not shared)'''
1934 if self.sharedpath != self.path:
1936 if self.sharedpath != self.path:
1935 return b'store'
1937 return b'store'
1936 return None
1938 return None
1937
1939
1938 def wjoin(self, f, *insidef):
1940 def wjoin(self, f, *insidef):
1939 return self.vfs.reljoin(self.root, f, *insidef)
1941 return self.vfs.reljoin(self.root, f, *insidef)
1940
1942
1941 def setparents(self, p1, p2=nullid):
1943 def setparents(self, p1, p2=nullid):
1942 self[None].setparents(p1, p2)
1944 self[None].setparents(p1, p2)
1943 self._quick_access_changeid_invalidate()
1945 self._quick_access_changeid_invalidate()
1944
1946
1945 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1947 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1946 """changeid must be a changeset revision, if specified.
1948 """changeid must be a changeset revision, if specified.
1947 fileid can be a file revision or node."""
1949 fileid can be a file revision or node."""
1948 return context.filectx(
1950 return context.filectx(
1949 self, path, changeid, fileid, changectx=changectx
1951 self, path, changeid, fileid, changectx=changectx
1950 )
1952 )
1951
1953
1952 def getcwd(self):
1954 def getcwd(self):
1953 return self.dirstate.getcwd()
1955 return self.dirstate.getcwd()
1954
1956
1955 def pathto(self, f, cwd=None):
1957 def pathto(self, f, cwd=None):
1956 return self.dirstate.pathto(f, cwd)
1958 return self.dirstate.pathto(f, cwd)
1957
1959
1958 def _loadfilter(self, filter):
1960 def _loadfilter(self, filter):
1959 if filter not in self._filterpats:
1961 if filter not in self._filterpats:
1960 l = []
1962 l = []
1961 for pat, cmd in self.ui.configitems(filter):
1963 for pat, cmd in self.ui.configitems(filter):
1962 if cmd == b'!':
1964 if cmd == b'!':
1963 continue
1965 continue
1964 mf = matchmod.match(self.root, b'', [pat])
1966 mf = matchmod.match(self.root, b'', [pat])
1965 fn = None
1967 fn = None
1966 params = cmd
1968 params = cmd
1967 for name, filterfn in pycompat.iteritems(self._datafilters):
1969 for name, filterfn in pycompat.iteritems(self._datafilters):
1968 if cmd.startswith(name):
1970 if cmd.startswith(name):
1969 fn = filterfn
1971 fn = filterfn
1970 params = cmd[len(name) :].lstrip()
1972 params = cmd[len(name) :].lstrip()
1971 break
1973 break
1972 if not fn:
1974 if not fn:
1973 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1975 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1974 fn.__name__ = 'commandfilter'
1976 fn.__name__ = 'commandfilter'
1975 # Wrap old filters not supporting keyword arguments
1977 # Wrap old filters not supporting keyword arguments
1976 if not pycompat.getargspec(fn)[2]:
1978 if not pycompat.getargspec(fn)[2]:
1977 oldfn = fn
1979 oldfn = fn
1978 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1980 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1979 fn.__name__ = 'compat-' + oldfn.__name__
1981 fn.__name__ = 'compat-' + oldfn.__name__
1980 l.append((mf, fn, params))
1982 l.append((mf, fn, params))
1981 self._filterpats[filter] = l
1983 self._filterpats[filter] = l
1982 return self._filterpats[filter]
1984 return self._filterpats[filter]
1983
1985
1984 def _filter(self, filterpats, filename, data):
1986 def _filter(self, filterpats, filename, data):
1985 for mf, fn, cmd in filterpats:
1987 for mf, fn, cmd in filterpats:
1986 if mf(filename):
1988 if mf(filename):
1987 self.ui.debug(
1989 self.ui.debug(
1988 b"filtering %s through %s\n"
1990 b"filtering %s through %s\n"
1989 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1991 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1990 )
1992 )
1991 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1993 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1992 break
1994 break
1993
1995
1994 return data
1996 return data
1995
1997
1996 @unfilteredpropertycache
1998 @unfilteredpropertycache
1997 def _encodefilterpats(self):
1999 def _encodefilterpats(self):
1998 return self._loadfilter(b'encode')
2000 return self._loadfilter(b'encode')
1999
2001
2000 @unfilteredpropertycache
2002 @unfilteredpropertycache
2001 def _decodefilterpats(self):
2003 def _decodefilterpats(self):
2002 return self._loadfilter(b'decode')
2004 return self._loadfilter(b'decode')
2003
2005
2004 def adddatafilter(self, name, filter):
2006 def adddatafilter(self, name, filter):
2005 self._datafilters[name] = filter
2007 self._datafilters[name] = filter
2006
2008
2007 def wread(self, filename):
2009 def wread(self, filename):
2008 if self.wvfs.islink(filename):
2010 if self.wvfs.islink(filename):
2009 data = self.wvfs.readlink(filename)
2011 data = self.wvfs.readlink(filename)
2010 else:
2012 else:
2011 data = self.wvfs.read(filename)
2013 data = self.wvfs.read(filename)
2012 return self._filter(self._encodefilterpats, filename, data)
2014 return self._filter(self._encodefilterpats, filename, data)
2013
2015
2014 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2016 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2015 """write ``data`` into ``filename`` in the working directory
2017 """write ``data`` into ``filename`` in the working directory
2016
2018
2017 This returns length of written (maybe decoded) data.
2019 This returns length of written (maybe decoded) data.
2018 """
2020 """
2019 data = self._filter(self._decodefilterpats, filename, data)
2021 data = self._filter(self._decodefilterpats, filename, data)
2020 if b'l' in flags:
2022 if b'l' in flags:
2021 self.wvfs.symlink(data, filename)
2023 self.wvfs.symlink(data, filename)
2022 else:
2024 else:
2023 self.wvfs.write(
2025 self.wvfs.write(
2024 filename, data, backgroundclose=backgroundclose, **kwargs
2026 filename, data, backgroundclose=backgroundclose, **kwargs
2025 )
2027 )
2026 if b'x' in flags:
2028 if b'x' in flags:
2027 self.wvfs.setflags(filename, False, True)
2029 self.wvfs.setflags(filename, False, True)
2028 else:
2030 else:
2029 self.wvfs.setflags(filename, False, False)
2031 self.wvfs.setflags(filename, False, False)
2030 return len(data)
2032 return len(data)
2031
2033
2032 def wwritedata(self, filename, data):
2034 def wwritedata(self, filename, data):
2033 return self._filter(self._decodefilterpats, filename, data)
2035 return self._filter(self._decodefilterpats, filename, data)
2034
2036
2035 def currenttransaction(self):
2037 def currenttransaction(self):
2036 """return the current transaction or None if non exists"""
2038 """return the current transaction or None if non exists"""
2037 if self._transref:
2039 if self._transref:
2038 tr = self._transref()
2040 tr = self._transref()
2039 else:
2041 else:
2040 tr = None
2042 tr = None
2041
2043
2042 if tr and tr.running():
2044 if tr and tr.running():
2043 return tr
2045 return tr
2044 return None
2046 return None
2045
2047
2046 def transaction(self, desc, report=None):
2048 def transaction(self, desc, report=None):
2047 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2049 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2048 b'devel', b'check-locks'
2050 b'devel', b'check-locks'
2049 ):
2051 ):
2050 if self._currentlock(self._lockref) is None:
2052 if self._currentlock(self._lockref) is None:
2051 raise error.ProgrammingError(b'transaction requires locking')
2053 raise error.ProgrammingError(b'transaction requires locking')
2052 tr = self.currenttransaction()
2054 tr = self.currenttransaction()
2053 if tr is not None:
2055 if tr is not None:
2054 return tr.nest(name=desc)
2056 return tr.nest(name=desc)
2055
2057
2056 # abort here if the journal already exists
2058 # abort here if the journal already exists
2057 if self.svfs.exists(b"journal"):
2059 if self.svfs.exists(b"journal"):
2058 raise error.RepoError(
2060 raise error.RepoError(
2059 _(b"abandoned transaction found"),
2061 _(b"abandoned transaction found"),
2060 hint=_(b"run 'hg recover' to clean up transaction"),
2062 hint=_(b"run 'hg recover' to clean up transaction"),
2061 )
2063 )
2062
2064
2063 idbase = b"%.40f#%f" % (random.random(), time.time())
2065 idbase = b"%.40f#%f" % (random.random(), time.time())
2064 ha = hex(hashutil.sha1(idbase).digest())
2066 ha = hex(hashutil.sha1(idbase).digest())
2065 txnid = b'TXN:' + ha
2067 txnid = b'TXN:' + ha
2066 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2068 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2067
2069
2068 self._writejournal(desc)
2070 self._writejournal(desc)
2069 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2071 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2070 if report:
2072 if report:
2071 rp = report
2073 rp = report
2072 else:
2074 else:
2073 rp = self.ui.warn
2075 rp = self.ui.warn
2074 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2076 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2075 # we must avoid cyclic reference between repo and transaction.
2077 # we must avoid cyclic reference between repo and transaction.
2076 reporef = weakref.ref(self)
2078 reporef = weakref.ref(self)
2077 # Code to track tag movement
2079 # Code to track tag movement
2078 #
2080 #
2079 # Since tags are all handled as file content, it is actually quite hard
2081 # Since tags are all handled as file content, it is actually quite hard
2080 # to track these movement from a code perspective. So we fallback to a
2082 # to track these movement from a code perspective. So we fallback to a
2081 # tracking at the repository level. One could envision to track changes
2083 # tracking at the repository level. One could envision to track changes
2082 # to the '.hgtags' file through changegroup apply but that fails to
2084 # to the '.hgtags' file through changegroup apply but that fails to
2083 # cope with case where transaction expose new heads without changegroup
2085 # cope with case where transaction expose new heads without changegroup
2084 # being involved (eg: phase movement).
2086 # being involved (eg: phase movement).
2085 #
2087 #
2086 # For now, We gate the feature behind a flag since this likely comes
2088 # For now, We gate the feature behind a flag since this likely comes
2087 # with performance impacts. The current code run more often than needed
2089 # with performance impacts. The current code run more often than needed
2088 # and do not use caches as much as it could. The current focus is on
2090 # and do not use caches as much as it could. The current focus is on
2089 # the behavior of the feature so we disable it by default. The flag
2091 # the behavior of the feature so we disable it by default. The flag
2090 # will be removed when we are happy with the performance impact.
2092 # will be removed when we are happy with the performance impact.
2091 #
2093 #
2092 # Once this feature is no longer experimental move the following
2094 # Once this feature is no longer experimental move the following
2093 # documentation to the appropriate help section:
2095 # documentation to the appropriate help section:
2094 #
2096 #
2095 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2097 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2096 # tags (new or changed or deleted tags). In addition the details of
2098 # tags (new or changed or deleted tags). In addition the details of
2097 # these changes are made available in a file at:
2099 # these changes are made available in a file at:
2098 # ``REPOROOT/.hg/changes/tags.changes``.
2100 # ``REPOROOT/.hg/changes/tags.changes``.
2099 # Make sure you check for HG_TAG_MOVED before reading that file as it
2101 # Make sure you check for HG_TAG_MOVED before reading that file as it
2100 # might exist from a previous transaction even if no tag were touched
2102 # might exist from a previous transaction even if no tag were touched
2101 # in this one. Changes are recorded in a line base format::
2103 # in this one. Changes are recorded in a line base format::
2102 #
2104 #
2103 # <action> <hex-node> <tag-name>\n
2105 # <action> <hex-node> <tag-name>\n
2104 #
2106 #
2105 # Actions are defined as follow:
2107 # Actions are defined as follow:
2106 # "-R": tag is removed,
2108 # "-R": tag is removed,
2107 # "+A": tag is added,
2109 # "+A": tag is added,
2108 # "-M": tag is moved (old value),
2110 # "-M": tag is moved (old value),
2109 # "+M": tag is moved (new value),
2111 # "+M": tag is moved (new value),
2110 tracktags = lambda x: None
2112 tracktags = lambda x: None
2111 # experimental config: experimental.hook-track-tags
2113 # experimental config: experimental.hook-track-tags
2112 shouldtracktags = self.ui.configbool(
2114 shouldtracktags = self.ui.configbool(
2113 b'experimental', b'hook-track-tags'
2115 b'experimental', b'hook-track-tags'
2114 )
2116 )
2115 if desc != b'strip' and shouldtracktags:
2117 if desc != b'strip' and shouldtracktags:
2116 oldheads = self.changelog.headrevs()
2118 oldheads = self.changelog.headrevs()
2117
2119
2118 def tracktags(tr2):
2120 def tracktags(tr2):
2119 repo = reporef()
2121 repo = reporef()
2120 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2122 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2121 newheads = repo.changelog.headrevs()
2123 newheads = repo.changelog.headrevs()
2122 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2124 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2123 # notes: we compare lists here.
2125 # notes: we compare lists here.
2124 # As we do it only once buiding set would not be cheaper
2126 # As we do it only once buiding set would not be cheaper
2125 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2127 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2126 if changes:
2128 if changes:
2127 tr2.hookargs[b'tag_moved'] = b'1'
2129 tr2.hookargs[b'tag_moved'] = b'1'
2128 with repo.vfs(
2130 with repo.vfs(
2129 b'changes/tags.changes', b'w', atomictemp=True
2131 b'changes/tags.changes', b'w', atomictemp=True
2130 ) as changesfile:
2132 ) as changesfile:
2131 # note: we do not register the file to the transaction
2133 # note: we do not register the file to the transaction
2132 # because we needs it to still exist on the transaction
2134 # because we needs it to still exist on the transaction
2133 # is close (for txnclose hooks)
2135 # is close (for txnclose hooks)
2134 tagsmod.writediff(changesfile, changes)
2136 tagsmod.writediff(changesfile, changes)
2135
2137
2136 def validate(tr2):
2138 def validate(tr2):
2137 """will run pre-closing hooks"""
2139 """will run pre-closing hooks"""
2138 # XXX the transaction API is a bit lacking here so we take a hacky
2140 # XXX the transaction API is a bit lacking here so we take a hacky
2139 # path for now
2141 # path for now
2140 #
2142 #
2141 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2143 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2142 # dict is copied before these run. In addition we needs the data
2144 # dict is copied before these run. In addition we needs the data
2143 # available to in memory hooks too.
2145 # available to in memory hooks too.
2144 #
2146 #
2145 # Moreover, we also need to make sure this runs before txnclose
2147 # Moreover, we also need to make sure this runs before txnclose
2146 # hooks and there is no "pending" mechanism that would execute
2148 # hooks and there is no "pending" mechanism that would execute
2147 # logic only if hooks are about to run.
2149 # logic only if hooks are about to run.
2148 #
2150 #
2149 # Fixing this limitation of the transaction is also needed to track
2151 # Fixing this limitation of the transaction is also needed to track
2150 # other families of changes (bookmarks, phases, obsolescence).
2152 # other families of changes (bookmarks, phases, obsolescence).
2151 #
2153 #
2152 # This will have to be fixed before we remove the experimental
2154 # This will have to be fixed before we remove the experimental
2153 # gating.
2155 # gating.
2154 tracktags(tr2)
2156 tracktags(tr2)
2155 repo = reporef()
2157 repo = reporef()
2156
2158
2157 singleheadopt = (b'experimental', b'single-head-per-branch')
2159 singleheadopt = (b'experimental', b'single-head-per-branch')
2158 singlehead = repo.ui.configbool(*singleheadopt)
2160 singlehead = repo.ui.configbool(*singleheadopt)
2159 if singlehead:
2161 if singlehead:
2160 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2162 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2161 accountclosed = singleheadsub.get(
2163 accountclosed = singleheadsub.get(
2162 b"account-closed-heads", False
2164 b"account-closed-heads", False
2163 )
2165 )
2164 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2166 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2165 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2167 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2166 for name, (old, new) in sorted(
2168 for name, (old, new) in sorted(
2167 tr.changes[b'bookmarks'].items()
2169 tr.changes[b'bookmarks'].items()
2168 ):
2170 ):
2169 args = tr.hookargs.copy()
2171 args = tr.hookargs.copy()
2170 args.update(bookmarks.preparehookargs(name, old, new))
2172 args.update(bookmarks.preparehookargs(name, old, new))
2171 repo.hook(
2173 repo.hook(
2172 b'pretxnclose-bookmark',
2174 b'pretxnclose-bookmark',
2173 throw=True,
2175 throw=True,
2174 **pycompat.strkwargs(args)
2176 **pycompat.strkwargs(args)
2175 )
2177 )
2176 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2178 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2177 cl = repo.unfiltered().changelog
2179 cl = repo.unfiltered().changelog
2178 for rev, (old, new) in tr.changes[b'phases'].items():
2180 for rev, (old, new) in tr.changes[b'phases'].items():
2179 args = tr.hookargs.copy()
2181 args = tr.hookargs.copy()
2180 node = hex(cl.node(rev))
2182 node = hex(cl.node(rev))
2181 args.update(phases.preparehookargs(node, old, new))
2183 args.update(phases.preparehookargs(node, old, new))
2182 repo.hook(
2184 repo.hook(
2183 b'pretxnclose-phase',
2185 b'pretxnclose-phase',
2184 throw=True,
2186 throw=True,
2185 **pycompat.strkwargs(args)
2187 **pycompat.strkwargs(args)
2186 )
2188 )
2187
2189
2188 repo.hook(
2190 repo.hook(
2189 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2191 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2190 )
2192 )
2191
2193
2192 def releasefn(tr, success):
2194 def releasefn(tr, success):
2193 repo = reporef()
2195 repo = reporef()
2194 if repo is None:
2196 if repo is None:
2195 # If the repo has been GC'd (and this release function is being
2197 # If the repo has been GC'd (and this release function is being
2196 # called from transaction.__del__), there's not much we can do,
2198 # called from transaction.__del__), there's not much we can do,
2197 # so just leave the unfinished transaction there and let the
2199 # so just leave the unfinished transaction there and let the
2198 # user run `hg recover`.
2200 # user run `hg recover`.
2199 return
2201 return
2200 if success:
2202 if success:
2201 # this should be explicitly invoked here, because
2203 # this should be explicitly invoked here, because
2202 # in-memory changes aren't written out at closing
2204 # in-memory changes aren't written out at closing
2203 # transaction, if tr.addfilegenerator (via
2205 # transaction, if tr.addfilegenerator (via
2204 # dirstate.write or so) isn't invoked while
2206 # dirstate.write or so) isn't invoked while
2205 # transaction running
2207 # transaction running
2206 repo.dirstate.write(None)
2208 repo.dirstate.write(None)
2207 else:
2209 else:
2208 # discard all changes (including ones already written
2210 # discard all changes (including ones already written
2209 # out) in this transaction
2211 # out) in this transaction
2210 narrowspec.restorebackup(self, b'journal.narrowspec')
2212 narrowspec.restorebackup(self, b'journal.narrowspec')
2211 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2213 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2212 repo.dirstate.restorebackup(None, b'journal.dirstate')
2214 repo.dirstate.restorebackup(None, b'journal.dirstate')
2213
2215
2214 repo.invalidate(clearfilecache=True)
2216 repo.invalidate(clearfilecache=True)
2215
2217
2216 tr = transaction.transaction(
2218 tr = transaction.transaction(
2217 rp,
2219 rp,
2218 self.svfs,
2220 self.svfs,
2219 vfsmap,
2221 vfsmap,
2220 b"journal",
2222 b"journal",
2221 b"undo",
2223 b"undo",
2222 aftertrans(renames),
2224 aftertrans(renames),
2223 self.store.createmode,
2225 self.store.createmode,
2224 validator=validate,
2226 validator=validate,
2225 releasefn=releasefn,
2227 releasefn=releasefn,
2226 checkambigfiles=_cachedfiles,
2228 checkambigfiles=_cachedfiles,
2227 name=desc,
2229 name=desc,
2228 )
2230 )
2229 tr.changes[b'origrepolen'] = len(self)
2231 tr.changes[b'origrepolen'] = len(self)
2230 tr.changes[b'obsmarkers'] = set()
2232 tr.changes[b'obsmarkers'] = set()
2231 tr.changes[b'phases'] = {}
2233 tr.changes[b'phases'] = {}
2232 tr.changes[b'bookmarks'] = {}
2234 tr.changes[b'bookmarks'] = {}
2233
2235
2234 tr.hookargs[b'txnid'] = txnid
2236 tr.hookargs[b'txnid'] = txnid
2235 tr.hookargs[b'txnname'] = desc
2237 tr.hookargs[b'txnname'] = desc
2236 # note: writing the fncache only during finalize mean that the file is
2238 # note: writing the fncache only during finalize mean that the file is
2237 # outdated when running hooks. As fncache is used for streaming clone,
2239 # outdated when running hooks. As fncache is used for streaming clone,
2238 # this is not expected to break anything that happen during the hooks.
2240 # this is not expected to break anything that happen during the hooks.
2239 tr.addfinalize(b'flush-fncache', self.store.write)
2241 tr.addfinalize(b'flush-fncache', self.store.write)
2240
2242
2241 def txnclosehook(tr2):
2243 def txnclosehook(tr2):
2242 """To be run if transaction is successful, will schedule a hook run
2244 """To be run if transaction is successful, will schedule a hook run
2243 """
2245 """
2244 # Don't reference tr2 in hook() so we don't hold a reference.
2246 # Don't reference tr2 in hook() so we don't hold a reference.
2245 # This reduces memory consumption when there are multiple
2247 # This reduces memory consumption when there are multiple
2246 # transactions per lock. This can likely go away if issue5045
2248 # transactions per lock. This can likely go away if issue5045
2247 # fixes the function accumulation.
2249 # fixes the function accumulation.
2248 hookargs = tr2.hookargs
2250 hookargs = tr2.hookargs
2249
2251
2250 def hookfunc(unused_success):
2252 def hookfunc(unused_success):
2251 repo = reporef()
2253 repo = reporef()
2252 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2254 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2253 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2255 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2254 for name, (old, new) in bmchanges:
2256 for name, (old, new) in bmchanges:
2255 args = tr.hookargs.copy()
2257 args = tr.hookargs.copy()
2256 args.update(bookmarks.preparehookargs(name, old, new))
2258 args.update(bookmarks.preparehookargs(name, old, new))
2257 repo.hook(
2259 repo.hook(
2258 b'txnclose-bookmark',
2260 b'txnclose-bookmark',
2259 throw=False,
2261 throw=False,
2260 **pycompat.strkwargs(args)
2262 **pycompat.strkwargs(args)
2261 )
2263 )
2262
2264
2263 if hook.hashook(repo.ui, b'txnclose-phase'):
2265 if hook.hashook(repo.ui, b'txnclose-phase'):
2264 cl = repo.unfiltered().changelog
2266 cl = repo.unfiltered().changelog
2265 phasemv = sorted(tr.changes[b'phases'].items())
2267 phasemv = sorted(tr.changes[b'phases'].items())
2266 for rev, (old, new) in phasemv:
2268 for rev, (old, new) in phasemv:
2267 args = tr.hookargs.copy()
2269 args = tr.hookargs.copy()
2268 node = hex(cl.node(rev))
2270 node = hex(cl.node(rev))
2269 args.update(phases.preparehookargs(node, old, new))
2271 args.update(phases.preparehookargs(node, old, new))
2270 repo.hook(
2272 repo.hook(
2271 b'txnclose-phase',
2273 b'txnclose-phase',
2272 throw=False,
2274 throw=False,
2273 **pycompat.strkwargs(args)
2275 **pycompat.strkwargs(args)
2274 )
2276 )
2275
2277
2276 repo.hook(
2278 repo.hook(
2277 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2279 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2278 )
2280 )
2279
2281
2280 reporef()._afterlock(hookfunc)
2282 reporef()._afterlock(hookfunc)
2281
2283
2282 tr.addfinalize(b'txnclose-hook', txnclosehook)
2284 tr.addfinalize(b'txnclose-hook', txnclosehook)
2283 # Include a leading "-" to make it happen before the transaction summary
2285 # Include a leading "-" to make it happen before the transaction summary
2284 # reports registered via scmutil.registersummarycallback() whose names
2286 # reports registered via scmutil.registersummarycallback() whose names
2285 # are 00-txnreport etc. That way, the caches will be warm when the
2287 # are 00-txnreport etc. That way, the caches will be warm when the
2286 # callbacks run.
2288 # callbacks run.
2287 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2289 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2288
2290
2289 def txnaborthook(tr2):
2291 def txnaborthook(tr2):
2290 """To be run if transaction is aborted
2292 """To be run if transaction is aborted
2291 """
2293 """
2292 reporef().hook(
2294 reporef().hook(
2293 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2295 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2294 )
2296 )
2295
2297
2296 tr.addabort(b'txnabort-hook', txnaborthook)
2298 tr.addabort(b'txnabort-hook', txnaborthook)
2297 # avoid eager cache invalidation. in-memory data should be identical
2299 # avoid eager cache invalidation. in-memory data should be identical
2298 # to stored data if transaction has no error.
2300 # to stored data if transaction has no error.
2299 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2301 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2300 self._transref = weakref.ref(tr)
2302 self._transref = weakref.ref(tr)
2301 scmutil.registersummarycallback(self, tr, desc)
2303 scmutil.registersummarycallback(self, tr, desc)
2302 return tr
2304 return tr
2303
2305
2304 def _journalfiles(self):
2306 def _journalfiles(self):
2305 return (
2307 return (
2306 (self.svfs, b'journal'),
2308 (self.svfs, b'journal'),
2307 (self.svfs, b'journal.narrowspec'),
2309 (self.svfs, b'journal.narrowspec'),
2308 (self.vfs, b'journal.narrowspec.dirstate'),
2310 (self.vfs, b'journal.narrowspec.dirstate'),
2309 (self.vfs, b'journal.dirstate'),
2311 (self.vfs, b'journal.dirstate'),
2310 (self.vfs, b'journal.branch'),
2312 (self.vfs, b'journal.branch'),
2311 (self.vfs, b'journal.desc'),
2313 (self.vfs, b'journal.desc'),
2312 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2314 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2313 (self.svfs, b'journal.phaseroots'),
2315 (self.svfs, b'journal.phaseroots'),
2314 )
2316 )
2315
2317
2316 def undofiles(self):
2318 def undofiles(self):
2317 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2319 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2318
2320
2319 @unfilteredmethod
2321 @unfilteredmethod
2320 def _writejournal(self, desc):
2322 def _writejournal(self, desc):
2321 self.dirstate.savebackup(None, b'journal.dirstate')
2323 self.dirstate.savebackup(None, b'journal.dirstate')
2322 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2324 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2323 narrowspec.savebackup(self, b'journal.narrowspec')
2325 narrowspec.savebackup(self, b'journal.narrowspec')
2324 self.vfs.write(
2326 self.vfs.write(
2325 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2327 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2326 )
2328 )
2327 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2329 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2328 bookmarksvfs = bookmarks.bookmarksvfs(self)
2330 bookmarksvfs = bookmarks.bookmarksvfs(self)
2329 bookmarksvfs.write(
2331 bookmarksvfs.write(
2330 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2332 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2331 )
2333 )
2332 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2334 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2333
2335
2334 def recover(self):
2336 def recover(self):
2335 with self.lock():
2337 with self.lock():
2336 if self.svfs.exists(b"journal"):
2338 if self.svfs.exists(b"journal"):
2337 self.ui.status(_(b"rolling back interrupted transaction\n"))
2339 self.ui.status(_(b"rolling back interrupted transaction\n"))
2338 vfsmap = {
2340 vfsmap = {
2339 b'': self.svfs,
2341 b'': self.svfs,
2340 b'plain': self.vfs,
2342 b'plain': self.vfs,
2341 }
2343 }
2342 transaction.rollback(
2344 transaction.rollback(
2343 self.svfs,
2345 self.svfs,
2344 vfsmap,
2346 vfsmap,
2345 b"journal",
2347 b"journal",
2346 self.ui.warn,
2348 self.ui.warn,
2347 checkambigfiles=_cachedfiles,
2349 checkambigfiles=_cachedfiles,
2348 )
2350 )
2349 self.invalidate()
2351 self.invalidate()
2350 return True
2352 return True
2351 else:
2353 else:
2352 self.ui.warn(_(b"no interrupted transaction available\n"))
2354 self.ui.warn(_(b"no interrupted transaction available\n"))
2353 return False
2355 return False
2354
2356
2355 def rollback(self, dryrun=False, force=False):
2357 def rollback(self, dryrun=False, force=False):
2356 wlock = lock = dsguard = None
2358 wlock = lock = dsguard = None
2357 try:
2359 try:
2358 wlock = self.wlock()
2360 wlock = self.wlock()
2359 lock = self.lock()
2361 lock = self.lock()
2360 if self.svfs.exists(b"undo"):
2362 if self.svfs.exists(b"undo"):
2361 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2363 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2362
2364
2363 return self._rollback(dryrun, force, dsguard)
2365 return self._rollback(dryrun, force, dsguard)
2364 else:
2366 else:
2365 self.ui.warn(_(b"no rollback information available\n"))
2367 self.ui.warn(_(b"no rollback information available\n"))
2366 return 1
2368 return 1
2367 finally:
2369 finally:
2368 release(dsguard, lock, wlock)
2370 release(dsguard, lock, wlock)
2369
2371
2370 @unfilteredmethod # Until we get smarter cache management
2372 @unfilteredmethod # Until we get smarter cache management
2371 def _rollback(self, dryrun, force, dsguard):
2373 def _rollback(self, dryrun, force, dsguard):
2372 ui = self.ui
2374 ui = self.ui
2373 try:
2375 try:
2374 args = self.vfs.read(b'undo.desc').splitlines()
2376 args = self.vfs.read(b'undo.desc').splitlines()
2375 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2377 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2376 if len(args) >= 3:
2378 if len(args) >= 3:
2377 detail = args[2]
2379 detail = args[2]
2378 oldtip = oldlen - 1
2380 oldtip = oldlen - 1
2379
2381
2380 if detail and ui.verbose:
2382 if detail and ui.verbose:
2381 msg = _(
2383 msg = _(
2382 b'repository tip rolled back to revision %d'
2384 b'repository tip rolled back to revision %d'
2383 b' (undo %s: %s)\n'
2385 b' (undo %s: %s)\n'
2384 ) % (oldtip, desc, detail)
2386 ) % (oldtip, desc, detail)
2385 else:
2387 else:
2386 msg = _(
2388 msg = _(
2387 b'repository tip rolled back to revision %d (undo %s)\n'
2389 b'repository tip rolled back to revision %d (undo %s)\n'
2388 ) % (oldtip, desc)
2390 ) % (oldtip, desc)
2389 except IOError:
2391 except IOError:
2390 msg = _(b'rolling back unknown transaction\n')
2392 msg = _(b'rolling back unknown transaction\n')
2391 desc = None
2393 desc = None
2392
2394
2393 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2395 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2394 raise error.Abort(
2396 raise error.Abort(
2395 _(
2397 _(
2396 b'rollback of last commit while not checked out '
2398 b'rollback of last commit while not checked out '
2397 b'may lose data'
2399 b'may lose data'
2398 ),
2400 ),
2399 hint=_(b'use -f to force'),
2401 hint=_(b'use -f to force'),
2400 )
2402 )
2401
2403
2402 ui.status(msg)
2404 ui.status(msg)
2403 if dryrun:
2405 if dryrun:
2404 return 0
2406 return 0
2405
2407
2406 parents = self.dirstate.parents()
2408 parents = self.dirstate.parents()
2407 self.destroying()
2409 self.destroying()
2408 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2410 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2409 transaction.rollback(
2411 transaction.rollback(
2410 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2412 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2411 )
2413 )
2412 bookmarksvfs = bookmarks.bookmarksvfs(self)
2414 bookmarksvfs = bookmarks.bookmarksvfs(self)
2413 if bookmarksvfs.exists(b'undo.bookmarks'):
2415 if bookmarksvfs.exists(b'undo.bookmarks'):
2414 bookmarksvfs.rename(
2416 bookmarksvfs.rename(
2415 b'undo.bookmarks', b'bookmarks', checkambig=True
2417 b'undo.bookmarks', b'bookmarks', checkambig=True
2416 )
2418 )
2417 if self.svfs.exists(b'undo.phaseroots'):
2419 if self.svfs.exists(b'undo.phaseroots'):
2418 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2420 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2419 self.invalidate()
2421 self.invalidate()
2420
2422
2421 has_node = self.changelog.index.has_node
2423 has_node = self.changelog.index.has_node
2422 parentgone = any(not has_node(p) for p in parents)
2424 parentgone = any(not has_node(p) for p in parents)
2423 if parentgone:
2425 if parentgone:
2424 # prevent dirstateguard from overwriting already restored one
2426 # prevent dirstateguard from overwriting already restored one
2425 dsguard.close()
2427 dsguard.close()
2426
2428
2427 narrowspec.restorebackup(self, b'undo.narrowspec')
2429 narrowspec.restorebackup(self, b'undo.narrowspec')
2428 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2430 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2429 self.dirstate.restorebackup(None, b'undo.dirstate')
2431 self.dirstate.restorebackup(None, b'undo.dirstate')
2430 try:
2432 try:
2431 branch = self.vfs.read(b'undo.branch')
2433 branch = self.vfs.read(b'undo.branch')
2432 self.dirstate.setbranch(encoding.tolocal(branch))
2434 self.dirstate.setbranch(encoding.tolocal(branch))
2433 except IOError:
2435 except IOError:
2434 ui.warn(
2436 ui.warn(
2435 _(
2437 _(
2436 b'named branch could not be reset: '
2438 b'named branch could not be reset: '
2437 b'current branch is still \'%s\'\n'
2439 b'current branch is still \'%s\'\n'
2438 )
2440 )
2439 % self.dirstate.branch()
2441 % self.dirstate.branch()
2440 )
2442 )
2441
2443
2442 parents = tuple([p.rev() for p in self[None].parents()])
2444 parents = tuple([p.rev() for p in self[None].parents()])
2443 if len(parents) > 1:
2445 if len(parents) > 1:
2444 ui.status(
2446 ui.status(
2445 _(
2447 _(
2446 b'working directory now based on '
2448 b'working directory now based on '
2447 b'revisions %d and %d\n'
2449 b'revisions %d and %d\n'
2448 )
2450 )
2449 % parents
2451 % parents
2450 )
2452 )
2451 else:
2453 else:
2452 ui.status(
2454 ui.status(
2453 _(b'working directory now based on revision %d\n') % parents
2455 _(b'working directory now based on revision %d\n') % parents
2454 )
2456 )
2455 mergemod.mergestate.clean(self, self[b'.'].node())
2457 mergemod.mergestate.clean(self, self[b'.'].node())
2456
2458
2457 # TODO: if we know which new heads may result from this rollback, pass
2459 # TODO: if we know which new heads may result from this rollback, pass
2458 # them to destroy(), which will prevent the branchhead cache from being
2460 # them to destroy(), which will prevent the branchhead cache from being
2459 # invalidated.
2461 # invalidated.
2460 self.destroyed()
2462 self.destroyed()
2461 return 0
2463 return 0
2462
2464
2463 def _buildcacheupdater(self, newtransaction):
2465 def _buildcacheupdater(self, newtransaction):
2464 """called during transaction to build the callback updating cache
2466 """called during transaction to build the callback updating cache
2465
2467
2466 Lives on the repository to help extension who might want to augment
2468 Lives on the repository to help extension who might want to augment
2467 this logic. For this purpose, the created transaction is passed to the
2469 this logic. For this purpose, the created transaction is passed to the
2468 method.
2470 method.
2469 """
2471 """
2470 # we must avoid cyclic reference between repo and transaction.
2472 # we must avoid cyclic reference between repo and transaction.
2471 reporef = weakref.ref(self)
2473 reporef = weakref.ref(self)
2472
2474
2473 def updater(tr):
2475 def updater(tr):
2474 repo = reporef()
2476 repo = reporef()
2475 repo.updatecaches(tr)
2477 repo.updatecaches(tr)
2476
2478
2477 return updater
2479 return updater
2478
2480
2479 @unfilteredmethod
2481 @unfilteredmethod
2480 def updatecaches(self, tr=None, full=False):
2482 def updatecaches(self, tr=None, full=False):
2481 """warm appropriate caches
2483 """warm appropriate caches
2482
2484
2483 If this function is called after a transaction closed. The transaction
2485 If this function is called after a transaction closed. The transaction
2484 will be available in the 'tr' argument. This can be used to selectively
2486 will be available in the 'tr' argument. This can be used to selectively
2485 update caches relevant to the changes in that transaction.
2487 update caches relevant to the changes in that transaction.
2486
2488
2487 If 'full' is set, make sure all caches the function knows about have
2489 If 'full' is set, make sure all caches the function knows about have
2488 up-to-date data. Even the ones usually loaded more lazily.
2490 up-to-date data. Even the ones usually loaded more lazily.
2489 """
2491 """
2490 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2492 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2491 # During strip, many caches are invalid but
2493 # During strip, many caches are invalid but
2492 # later call to `destroyed` will refresh them.
2494 # later call to `destroyed` will refresh them.
2493 return
2495 return
2494
2496
2495 if tr is None or tr.changes[b'origrepolen'] < len(self):
2497 if tr is None or tr.changes[b'origrepolen'] < len(self):
2496 # accessing the 'ser ved' branchmap should refresh all the others,
2498 # accessing the 'ser ved' branchmap should refresh all the others,
2497 self.ui.debug(b'updating the branch cache\n')
2499 self.ui.debug(b'updating the branch cache\n')
2498 self.filtered(b'served').branchmap()
2500 self.filtered(b'served').branchmap()
2499 self.filtered(b'served.hidden').branchmap()
2501 self.filtered(b'served.hidden').branchmap()
2500
2502
2501 if full:
2503 if full:
2502 unfi = self.unfiltered()
2504 unfi = self.unfiltered()
2503 rbc = unfi.revbranchcache()
2505 rbc = unfi.revbranchcache()
2504 for r in unfi.changelog:
2506 for r in unfi.changelog:
2505 rbc.branchinfo(r)
2507 rbc.branchinfo(r)
2506 rbc.write()
2508 rbc.write()
2507
2509
2508 # ensure the working copy parents are in the manifestfulltextcache
2510 # ensure the working copy parents are in the manifestfulltextcache
2509 for ctx in self[b'.'].parents():
2511 for ctx in self[b'.'].parents():
2510 ctx.manifest() # accessing the manifest is enough
2512 ctx.manifest() # accessing the manifest is enough
2511
2513
2512 # accessing fnode cache warms the cache
2514 # accessing fnode cache warms the cache
2513 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2515 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2514 # accessing tags warm the cache
2516 # accessing tags warm the cache
2515 self.tags()
2517 self.tags()
2516 self.filtered(b'served').tags()
2518 self.filtered(b'served').tags()
2517
2519
2518 # The `full` arg is documented as updating even the lazily-loaded
2520 # The `full` arg is documented as updating even the lazily-loaded
2519 # caches immediately, so we're forcing a write to cause these caches
2521 # caches immediately, so we're forcing a write to cause these caches
2520 # to be warmed up even if they haven't explicitly been requested
2522 # to be warmed up even if they haven't explicitly been requested
2521 # yet (if they've never been used by hg, they won't ever have been
2523 # yet (if they've never been used by hg, they won't ever have been
2522 # written, even if they're a subset of another kind of cache that
2524 # written, even if they're a subset of another kind of cache that
2523 # *has* been used).
2525 # *has* been used).
2524 for filt in repoview.filtertable.keys():
2526 for filt in repoview.filtertable.keys():
2525 filtered = self.filtered(filt)
2527 filtered = self.filtered(filt)
2526 filtered.branchmap().write(filtered)
2528 filtered.branchmap().write(filtered)
2527
2529
2528 def invalidatecaches(self):
2530 def invalidatecaches(self):
2529
2531
2530 if '_tagscache' in vars(self):
2532 if '_tagscache' in vars(self):
2531 # can't use delattr on proxy
2533 # can't use delattr on proxy
2532 del self.__dict__['_tagscache']
2534 del self.__dict__['_tagscache']
2533
2535
2534 self._branchcaches.clear()
2536 self._branchcaches.clear()
2535 self.invalidatevolatilesets()
2537 self.invalidatevolatilesets()
2536 self._sparsesignaturecache.clear()
2538 self._sparsesignaturecache.clear()
2537
2539
2538 def invalidatevolatilesets(self):
2540 def invalidatevolatilesets(self):
2539 self.filteredrevcache.clear()
2541 self.filteredrevcache.clear()
2540 obsolete.clearobscaches(self)
2542 obsolete.clearobscaches(self)
2541 self._quick_access_changeid_invalidate()
2543 self._quick_access_changeid_invalidate()
2542
2544
2543 def invalidatedirstate(self):
2545 def invalidatedirstate(self):
2544 '''Invalidates the dirstate, causing the next call to dirstate
2546 '''Invalidates the dirstate, causing the next call to dirstate
2545 to check if it was modified since the last time it was read,
2547 to check if it was modified since the last time it was read,
2546 rereading it if it has.
2548 rereading it if it has.
2547
2549
2548 This is different to dirstate.invalidate() that it doesn't always
2550 This is different to dirstate.invalidate() that it doesn't always
2549 rereads the dirstate. Use dirstate.invalidate() if you want to
2551 rereads the dirstate. Use dirstate.invalidate() if you want to
2550 explicitly read the dirstate again (i.e. restoring it to a previous
2552 explicitly read the dirstate again (i.e. restoring it to a previous
2551 known good state).'''
2553 known good state).'''
2552 if hasunfilteredcache(self, 'dirstate'):
2554 if hasunfilteredcache(self, 'dirstate'):
2553 for k in self.dirstate._filecache:
2555 for k in self.dirstate._filecache:
2554 try:
2556 try:
2555 delattr(self.dirstate, k)
2557 delattr(self.dirstate, k)
2556 except AttributeError:
2558 except AttributeError:
2557 pass
2559 pass
2558 delattr(self.unfiltered(), 'dirstate')
2560 delattr(self.unfiltered(), 'dirstate')
2559
2561
2560 def invalidate(self, clearfilecache=False):
2562 def invalidate(self, clearfilecache=False):
2561 '''Invalidates both store and non-store parts other than dirstate
2563 '''Invalidates both store and non-store parts other than dirstate
2562
2564
2563 If a transaction is running, invalidation of store is omitted,
2565 If a transaction is running, invalidation of store is omitted,
2564 because discarding in-memory changes might cause inconsistency
2566 because discarding in-memory changes might cause inconsistency
2565 (e.g. incomplete fncache causes unintentional failure, but
2567 (e.g. incomplete fncache causes unintentional failure, but
2566 redundant one doesn't).
2568 redundant one doesn't).
2567 '''
2569 '''
2568 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2570 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2569 for k in list(self._filecache.keys()):
2571 for k in list(self._filecache.keys()):
2570 # dirstate is invalidated separately in invalidatedirstate()
2572 # dirstate is invalidated separately in invalidatedirstate()
2571 if k == b'dirstate':
2573 if k == b'dirstate':
2572 continue
2574 continue
2573 if (
2575 if (
2574 k == b'changelog'
2576 k == b'changelog'
2575 and self.currenttransaction()
2577 and self.currenttransaction()
2576 and self.changelog._delayed
2578 and self.changelog._delayed
2577 ):
2579 ):
2578 # The changelog object may store unwritten revisions. We don't
2580 # The changelog object may store unwritten revisions. We don't
2579 # want to lose them.
2581 # want to lose them.
2580 # TODO: Solve the problem instead of working around it.
2582 # TODO: Solve the problem instead of working around it.
2581 continue
2583 continue
2582
2584
2583 if clearfilecache:
2585 if clearfilecache:
2584 del self._filecache[k]
2586 del self._filecache[k]
2585 try:
2587 try:
2586 delattr(unfiltered, k)
2588 delattr(unfiltered, k)
2587 except AttributeError:
2589 except AttributeError:
2588 pass
2590 pass
2589 self.invalidatecaches()
2591 self.invalidatecaches()
2590 if not self.currenttransaction():
2592 if not self.currenttransaction():
2591 # TODO: Changing contents of store outside transaction
2593 # TODO: Changing contents of store outside transaction
2592 # causes inconsistency. We should make in-memory store
2594 # causes inconsistency. We should make in-memory store
2593 # changes detectable, and abort if changed.
2595 # changes detectable, and abort if changed.
2594 self.store.invalidatecaches()
2596 self.store.invalidatecaches()
2595
2597
2596 def invalidateall(self):
2598 def invalidateall(self):
2597 '''Fully invalidates both store and non-store parts, causing the
2599 '''Fully invalidates both store and non-store parts, causing the
2598 subsequent operation to reread any outside changes.'''
2600 subsequent operation to reread any outside changes.'''
2599 # extension should hook this to invalidate its caches
2601 # extension should hook this to invalidate its caches
2600 self.invalidate()
2602 self.invalidate()
2601 self.invalidatedirstate()
2603 self.invalidatedirstate()
2602
2604
2603 @unfilteredmethod
2605 @unfilteredmethod
2604 def _refreshfilecachestats(self, tr):
2606 def _refreshfilecachestats(self, tr):
2605 """Reload stats of cached files so that they are flagged as valid"""
2607 """Reload stats of cached files so that they are flagged as valid"""
2606 for k, ce in self._filecache.items():
2608 for k, ce in self._filecache.items():
2607 k = pycompat.sysstr(k)
2609 k = pycompat.sysstr(k)
2608 if k == 'dirstate' or k not in self.__dict__:
2610 if k == 'dirstate' or k not in self.__dict__:
2609 continue
2611 continue
2610 ce.refresh()
2612 ce.refresh()
2611
2613
2612 def _lock(
2614 def _lock(
2613 self,
2615 self,
2614 vfs,
2616 vfs,
2615 lockname,
2617 lockname,
2616 wait,
2618 wait,
2617 releasefn,
2619 releasefn,
2618 acquirefn,
2620 acquirefn,
2619 desc,
2621 desc,
2620 inheritchecker=None,
2622 inheritchecker=None,
2621 parentenvvar=None,
2623 parentenvvar=None,
2622 ):
2624 ):
2623 parentlock = None
2625 parentlock = None
2624 # the contents of parentenvvar are used by the underlying lock to
2626 # the contents of parentenvvar are used by the underlying lock to
2625 # determine whether it can be inherited
2627 # determine whether it can be inherited
2626 if parentenvvar is not None:
2628 if parentenvvar is not None:
2627 parentlock = encoding.environ.get(parentenvvar)
2629 parentlock = encoding.environ.get(parentenvvar)
2628
2630
2629 timeout = 0
2631 timeout = 0
2630 warntimeout = 0
2632 warntimeout = 0
2631 if wait:
2633 if wait:
2632 timeout = self.ui.configint(b"ui", b"timeout")
2634 timeout = self.ui.configint(b"ui", b"timeout")
2633 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2635 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2634 # internal config: ui.signal-safe-lock
2636 # internal config: ui.signal-safe-lock
2635 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2637 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2636
2638
2637 l = lockmod.trylock(
2639 l = lockmod.trylock(
2638 self.ui,
2640 self.ui,
2639 vfs,
2641 vfs,
2640 lockname,
2642 lockname,
2641 timeout,
2643 timeout,
2642 warntimeout,
2644 warntimeout,
2643 releasefn=releasefn,
2645 releasefn=releasefn,
2644 acquirefn=acquirefn,
2646 acquirefn=acquirefn,
2645 desc=desc,
2647 desc=desc,
2646 inheritchecker=inheritchecker,
2648 inheritchecker=inheritchecker,
2647 parentlock=parentlock,
2649 parentlock=parentlock,
2648 signalsafe=signalsafe,
2650 signalsafe=signalsafe,
2649 )
2651 )
2650 return l
2652 return l
2651
2653
2652 def _afterlock(self, callback):
2654 def _afterlock(self, callback):
2653 """add a callback to be run when the repository is fully unlocked
2655 """add a callback to be run when the repository is fully unlocked
2654
2656
2655 The callback will be executed when the outermost lock is released
2657 The callback will be executed when the outermost lock is released
2656 (with wlock being higher level than 'lock')."""
2658 (with wlock being higher level than 'lock')."""
2657 for ref in (self._wlockref, self._lockref):
2659 for ref in (self._wlockref, self._lockref):
2658 l = ref and ref()
2660 l = ref and ref()
2659 if l and l.held:
2661 if l and l.held:
2660 l.postrelease.append(callback)
2662 l.postrelease.append(callback)
2661 break
2663 break
2662 else: # no lock have been found.
2664 else: # no lock have been found.
2663 callback(True)
2665 callback(True)
2664
2666
2665 def lock(self, wait=True):
2667 def lock(self, wait=True):
2666 '''Lock the repository store (.hg/store) and return a weak reference
2668 '''Lock the repository store (.hg/store) and return a weak reference
2667 to the lock. Use this before modifying the store (e.g. committing or
2669 to the lock. Use this before modifying the store (e.g. committing or
2668 stripping). If you are opening a transaction, get a lock as well.)
2670 stripping). If you are opening a transaction, get a lock as well.)
2669
2671
2670 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2672 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2671 'wlock' first to avoid a dead-lock hazard.'''
2673 'wlock' first to avoid a dead-lock hazard.'''
2672 l = self._currentlock(self._lockref)
2674 l = self._currentlock(self._lockref)
2673 if l is not None:
2675 if l is not None:
2674 l.lock()
2676 l.lock()
2675 return l
2677 return l
2676
2678
2677 l = self._lock(
2679 l = self._lock(
2678 vfs=self.svfs,
2680 vfs=self.svfs,
2679 lockname=b"lock",
2681 lockname=b"lock",
2680 wait=wait,
2682 wait=wait,
2681 releasefn=None,
2683 releasefn=None,
2682 acquirefn=self.invalidate,
2684 acquirefn=self.invalidate,
2683 desc=_(b'repository %s') % self.origroot,
2685 desc=_(b'repository %s') % self.origroot,
2684 )
2686 )
2685 self._lockref = weakref.ref(l)
2687 self._lockref = weakref.ref(l)
2686 return l
2688 return l
2687
2689
2688 def _wlockchecktransaction(self):
2690 def _wlockchecktransaction(self):
2689 if self.currenttransaction() is not None:
2691 if self.currenttransaction() is not None:
2690 raise error.LockInheritanceContractViolation(
2692 raise error.LockInheritanceContractViolation(
2691 b'wlock cannot be inherited in the middle of a transaction'
2693 b'wlock cannot be inherited in the middle of a transaction'
2692 )
2694 )
2693
2695
2694 def wlock(self, wait=True):
2696 def wlock(self, wait=True):
2695 '''Lock the non-store parts of the repository (everything under
2697 '''Lock the non-store parts of the repository (everything under
2696 .hg except .hg/store) and return a weak reference to the lock.
2698 .hg except .hg/store) and return a weak reference to the lock.
2697
2699
2698 Use this before modifying files in .hg.
2700 Use this before modifying files in .hg.
2699
2701
2700 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2702 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2701 'wlock' first to avoid a dead-lock hazard.'''
2703 'wlock' first to avoid a dead-lock hazard.'''
2702 l = self._wlockref and self._wlockref()
2704 l = self._wlockref and self._wlockref()
2703 if l is not None and l.held:
2705 if l is not None and l.held:
2704 l.lock()
2706 l.lock()
2705 return l
2707 return l
2706
2708
2707 # We do not need to check for non-waiting lock acquisition. Such
2709 # We do not need to check for non-waiting lock acquisition. Such
2708 # acquisition would not cause dead-lock as they would just fail.
2710 # acquisition would not cause dead-lock as they would just fail.
2709 if wait and (
2711 if wait and (
2710 self.ui.configbool(b'devel', b'all-warnings')
2712 self.ui.configbool(b'devel', b'all-warnings')
2711 or self.ui.configbool(b'devel', b'check-locks')
2713 or self.ui.configbool(b'devel', b'check-locks')
2712 ):
2714 ):
2713 if self._currentlock(self._lockref) is not None:
2715 if self._currentlock(self._lockref) is not None:
2714 self.ui.develwarn(b'"wlock" acquired after "lock"')
2716 self.ui.develwarn(b'"wlock" acquired after "lock"')
2715
2717
2716 def unlock():
2718 def unlock():
2717 if self.dirstate.pendingparentchange():
2719 if self.dirstate.pendingparentchange():
2718 self.dirstate.invalidate()
2720 self.dirstate.invalidate()
2719 else:
2721 else:
2720 self.dirstate.write(None)
2722 self.dirstate.write(None)
2721
2723
2722 self._filecache[b'dirstate'].refresh()
2724 self._filecache[b'dirstate'].refresh()
2723
2725
2724 l = self._lock(
2726 l = self._lock(
2725 self.vfs,
2727 self.vfs,
2726 b"wlock",
2728 b"wlock",
2727 wait,
2729 wait,
2728 unlock,
2730 unlock,
2729 self.invalidatedirstate,
2731 self.invalidatedirstate,
2730 _(b'working directory of %s') % self.origroot,
2732 _(b'working directory of %s') % self.origroot,
2731 inheritchecker=self._wlockchecktransaction,
2733 inheritchecker=self._wlockchecktransaction,
2732 parentenvvar=b'HG_WLOCK_LOCKER',
2734 parentenvvar=b'HG_WLOCK_LOCKER',
2733 )
2735 )
2734 self._wlockref = weakref.ref(l)
2736 self._wlockref = weakref.ref(l)
2735 return l
2737 return l
2736
2738
2737 def _currentlock(self, lockref):
2739 def _currentlock(self, lockref):
2738 """Returns the lock if it's held, or None if it's not."""
2740 """Returns the lock if it's held, or None if it's not."""
2739 if lockref is None:
2741 if lockref is None:
2740 return None
2742 return None
2741 l = lockref()
2743 l = lockref()
2742 if l is None or not l.held:
2744 if l is None or not l.held:
2743 return None
2745 return None
2744 return l
2746 return l
2745
2747
2746 def currentwlock(self):
2748 def currentwlock(self):
2747 """Returns the wlock if it's held, or None if it's not."""
2749 """Returns the wlock if it's held, or None if it's not."""
2748 return self._currentlock(self._wlockref)
2750 return self._currentlock(self._wlockref)
2749
2751
2750 def _filecommit(
2752 def _filecommit(
2751 self,
2753 self,
2752 fctx,
2754 fctx,
2753 manifest1,
2755 manifest1,
2754 manifest2,
2756 manifest2,
2755 linkrev,
2757 linkrev,
2756 tr,
2758 tr,
2757 changelist,
2759 changelist,
2758 includecopymeta,
2760 includecopymeta,
2759 ):
2761 ):
2760 """
2762 """
2761 commit an individual file as part of a larger transaction
2763 commit an individual file as part of a larger transaction
2762 """
2764 """
2763
2765
2764 fname = fctx.path()
2766 fname = fctx.path()
2765 fparent1 = manifest1.get(fname, nullid)
2767 fparent1 = manifest1.get(fname, nullid)
2766 fparent2 = manifest2.get(fname, nullid)
2768 fparent2 = manifest2.get(fname, nullid)
2767 if isinstance(fctx, context.filectx):
2769 if isinstance(fctx, context.filectx):
2768 node = fctx.filenode()
2770 node = fctx.filenode()
2769 if node in [fparent1, fparent2]:
2771 if node in [fparent1, fparent2]:
2770 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2772 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2771 if (
2773 if (
2772 fparent1 != nullid
2774 fparent1 != nullid
2773 and manifest1.flags(fname) != fctx.flags()
2775 and manifest1.flags(fname) != fctx.flags()
2774 ) or (
2776 ) or (
2775 fparent2 != nullid
2777 fparent2 != nullid
2776 and manifest2.flags(fname) != fctx.flags()
2778 and manifest2.flags(fname) != fctx.flags()
2777 ):
2779 ):
2778 changelist.append(fname)
2780 changelist.append(fname)
2779 return node
2781 return node
2780
2782
2781 flog = self.file(fname)
2783 flog = self.file(fname)
2782 meta = {}
2784 meta = {}
2783 cfname = fctx.copysource()
2785 cfname = fctx.copysource()
2784 if cfname and cfname != fname:
2786 if cfname and cfname != fname:
2785 # Mark the new revision of this file as a copy of another
2787 # Mark the new revision of this file as a copy of another
2786 # file. This copy data will effectively act as a parent
2788 # file. This copy data will effectively act as a parent
2787 # of this new revision. If this is a merge, the first
2789 # of this new revision. If this is a merge, the first
2788 # parent will be the nullid (meaning "look up the copy data")
2790 # parent will be the nullid (meaning "look up the copy data")
2789 # and the second one will be the other parent. For example:
2791 # and the second one will be the other parent. For example:
2790 #
2792 #
2791 # 0 --- 1 --- 3 rev1 changes file foo
2793 # 0 --- 1 --- 3 rev1 changes file foo
2792 # \ / rev2 renames foo to bar and changes it
2794 # \ / rev2 renames foo to bar and changes it
2793 # \- 2 -/ rev3 should have bar with all changes and
2795 # \- 2 -/ rev3 should have bar with all changes and
2794 # should record that bar descends from
2796 # should record that bar descends from
2795 # bar in rev2 and foo in rev1
2797 # bar in rev2 and foo in rev1
2796 #
2798 #
2797 # this allows this merge to succeed:
2799 # this allows this merge to succeed:
2798 #
2800 #
2799 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2801 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2800 # \ / merging rev3 and rev4 should use bar@rev2
2802 # \ / merging rev3 and rev4 should use bar@rev2
2801 # \- 2 --- 4 as the merge base
2803 # \- 2 --- 4 as the merge base
2802 #
2804 #
2803
2805
2804 cnode = manifest1.get(cfname)
2806 cnode = manifest1.get(cfname)
2805 newfparent = fparent2
2807 newfparent = fparent2
2806
2808
2807 if manifest2: # branch merge
2809 if manifest2: # branch merge
2808 if fparent2 == nullid or cnode is None: # copied on remote side
2810 if fparent2 == nullid or cnode is None: # copied on remote side
2809 if cfname in manifest2:
2811 if cfname in manifest2:
2810 cnode = manifest2[cfname]
2812 cnode = manifest2[cfname]
2811 newfparent = fparent1
2813 newfparent = fparent1
2812
2814
2813 # Here, we used to search backwards through history to try to find
2815 # Here, we used to search backwards through history to try to find
2814 # where the file copy came from if the source of a copy was not in
2816 # where the file copy came from if the source of a copy was not in
2815 # the parent directory. However, this doesn't actually make sense to
2817 # the parent directory. However, this doesn't actually make sense to
2816 # do (what does a copy from something not in your working copy even
2818 # do (what does a copy from something not in your working copy even
2817 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2819 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2818 # the user that copy information was dropped, so if they didn't
2820 # the user that copy information was dropped, so if they didn't
2819 # expect this outcome it can be fixed, but this is the correct
2821 # expect this outcome it can be fixed, but this is the correct
2820 # behavior in this circumstance.
2822 # behavior in this circumstance.
2821
2823
2822 if cnode:
2824 if cnode:
2823 self.ui.debug(
2825 self.ui.debug(
2824 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2826 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2825 )
2827 )
2826 if includecopymeta:
2828 if includecopymeta:
2827 meta[b"copy"] = cfname
2829 meta[b"copy"] = cfname
2828 meta[b"copyrev"] = hex(cnode)
2830 meta[b"copyrev"] = hex(cnode)
2829 fparent1, fparent2 = nullid, newfparent
2831 fparent1, fparent2 = nullid, newfparent
2830 else:
2832 else:
2831 self.ui.warn(
2833 self.ui.warn(
2832 _(
2834 _(
2833 b"warning: can't find ancestor for '%s' "
2835 b"warning: can't find ancestor for '%s' "
2834 b"copied from '%s'!\n"
2836 b"copied from '%s'!\n"
2835 )
2837 )
2836 % (fname, cfname)
2838 % (fname, cfname)
2837 )
2839 )
2838
2840
2839 elif fparent1 == nullid:
2841 elif fparent1 == nullid:
2840 fparent1, fparent2 = fparent2, nullid
2842 fparent1, fparent2 = fparent2, nullid
2841 elif fparent2 != nullid:
2843 elif fparent2 != nullid:
2842 # is one parent an ancestor of the other?
2844 # is one parent an ancestor of the other?
2843 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2845 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2844 if fparent1 in fparentancestors:
2846 if fparent1 in fparentancestors:
2845 fparent1, fparent2 = fparent2, nullid
2847 fparent1, fparent2 = fparent2, nullid
2846 elif fparent2 in fparentancestors:
2848 elif fparent2 in fparentancestors:
2847 fparent2 = nullid
2849 fparent2 = nullid
2848
2850
2849 # is the file changed?
2851 # is the file changed?
2850 text = fctx.data()
2852 text = fctx.data()
2851 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2853 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2852 changelist.append(fname)
2854 changelist.append(fname)
2853 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2855 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2854 # are just the flags changed during merge?
2856 # are just the flags changed during merge?
2855 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2857 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2856 changelist.append(fname)
2858 changelist.append(fname)
2857
2859
2858 return fparent1
2860 return fparent1
2859
2861
2860 def checkcommitpatterns(self, wctx, match, status, fail):
2862 def checkcommitpatterns(self, wctx, match, status, fail):
2861 """check for commit arguments that aren't committable"""
2863 """check for commit arguments that aren't committable"""
2862 if match.isexact() or match.prefix():
2864 if match.isexact() or match.prefix():
2863 matched = set(status.modified + status.added + status.removed)
2865 matched = set(status.modified + status.added + status.removed)
2864
2866
2865 for f in match.files():
2867 for f in match.files():
2866 f = self.dirstate.normalize(f)
2868 f = self.dirstate.normalize(f)
2867 if f == b'.' or f in matched or f in wctx.substate:
2869 if f == b'.' or f in matched or f in wctx.substate:
2868 continue
2870 continue
2869 if f in status.deleted:
2871 if f in status.deleted:
2870 fail(f, _(b'file not found!'))
2872 fail(f, _(b'file not found!'))
2871 # Is it a directory that exists or used to exist?
2873 # Is it a directory that exists or used to exist?
2872 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2874 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2873 d = f + b'/'
2875 d = f + b'/'
2874 for mf in matched:
2876 for mf in matched:
2875 if mf.startswith(d):
2877 if mf.startswith(d):
2876 break
2878 break
2877 else:
2879 else:
2878 fail(f, _(b"no match under directory!"))
2880 fail(f, _(b"no match under directory!"))
2879 elif f not in self.dirstate:
2881 elif f not in self.dirstate:
2880 fail(f, _(b"file not tracked!"))
2882 fail(f, _(b"file not tracked!"))
2881
2883
2882 @unfilteredmethod
2884 @unfilteredmethod
2883 def commit(
2885 def commit(
2884 self,
2886 self,
2885 text=b"",
2887 text=b"",
2886 user=None,
2888 user=None,
2887 date=None,
2889 date=None,
2888 match=None,
2890 match=None,
2889 force=False,
2891 force=False,
2890 editor=None,
2892 editor=None,
2891 extra=None,
2893 extra=None,
2892 ):
2894 ):
2893 """Add a new revision to current repository.
2895 """Add a new revision to current repository.
2894
2896
2895 Revision information is gathered from the working directory,
2897 Revision information is gathered from the working directory,
2896 match can be used to filter the committed files. If editor is
2898 match can be used to filter the committed files. If editor is
2897 supplied, it is called to get a commit message.
2899 supplied, it is called to get a commit message.
2898 """
2900 """
2899 if extra is None:
2901 if extra is None:
2900 extra = {}
2902 extra = {}
2901
2903
2902 def fail(f, msg):
2904 def fail(f, msg):
2903 raise error.Abort(b'%s: %s' % (f, msg))
2905 raise error.Abort(b'%s: %s' % (f, msg))
2904
2906
2905 if not match:
2907 if not match:
2906 match = matchmod.always()
2908 match = matchmod.always()
2907
2909
2908 if not force:
2910 if not force:
2909 match.bad = fail
2911 match.bad = fail
2910
2912
2911 # lock() for recent changelog (see issue4368)
2913 # lock() for recent changelog (see issue4368)
2912 with self.wlock(), self.lock():
2914 with self.wlock(), self.lock():
2913 wctx = self[None]
2915 wctx = self[None]
2914 merge = len(wctx.parents()) > 1
2916 merge = len(wctx.parents()) > 1
2915
2917
2916 if not force and merge and not match.always():
2918 if not force and merge and not match.always():
2917 raise error.Abort(
2919 raise error.Abort(
2918 _(
2920 _(
2919 b'cannot partially commit a merge '
2921 b'cannot partially commit a merge '
2920 b'(do not specify files or patterns)'
2922 b'(do not specify files or patterns)'
2921 )
2923 )
2922 )
2924 )
2923
2925
2924 status = self.status(match=match, clean=force)
2926 status = self.status(match=match, clean=force)
2925 if force:
2927 if force:
2926 status.modified.extend(
2928 status.modified.extend(
2927 status.clean
2929 status.clean
2928 ) # mq may commit clean files
2930 ) # mq may commit clean files
2929
2931
2930 # check subrepos
2932 # check subrepos
2931 subs, commitsubs, newstate = subrepoutil.precommit(
2933 subs, commitsubs, newstate = subrepoutil.precommit(
2932 self.ui, wctx, status, match, force=force
2934 self.ui, wctx, status, match, force=force
2933 )
2935 )
2934
2936
2935 # make sure all explicit patterns are matched
2937 # make sure all explicit patterns are matched
2936 if not force:
2938 if not force:
2937 self.checkcommitpatterns(wctx, match, status, fail)
2939 self.checkcommitpatterns(wctx, match, status, fail)
2938
2940
2939 cctx = context.workingcommitctx(
2941 cctx = context.workingcommitctx(
2940 self, status, text, user, date, extra
2942 self, status, text, user, date, extra
2941 )
2943 )
2942
2944
2943 # internal config: ui.allowemptycommit
2945 # internal config: ui.allowemptycommit
2944 allowemptycommit = (
2946 allowemptycommit = (
2945 wctx.branch() != wctx.p1().branch()
2947 wctx.branch() != wctx.p1().branch()
2946 or extra.get(b'close')
2948 or extra.get(b'close')
2947 or merge
2949 or merge
2948 or cctx.files()
2950 or cctx.files()
2949 or self.ui.configbool(b'ui', b'allowemptycommit')
2951 or self.ui.configbool(b'ui', b'allowemptycommit')
2950 )
2952 )
2951 if not allowemptycommit:
2953 if not allowemptycommit:
2952 return None
2954 return None
2953
2955
2954 if merge and cctx.deleted():
2956 if merge and cctx.deleted():
2955 raise error.Abort(_(b"cannot commit merge with missing files"))
2957 raise error.Abort(_(b"cannot commit merge with missing files"))
2956
2958
2957 ms = mergemod.mergestate.read(self)
2959 ms = mergemod.mergestate.read(self)
2958 mergeutil.checkunresolved(ms)
2960 mergeutil.checkunresolved(ms)
2959
2961
2960 if editor:
2962 if editor:
2961 cctx._text = editor(self, cctx, subs)
2963 cctx._text = editor(self, cctx, subs)
2962 edited = text != cctx._text
2964 edited = text != cctx._text
2963
2965
2964 # Save commit message in case this transaction gets rolled back
2966 # Save commit message in case this transaction gets rolled back
2965 # (e.g. by a pretxncommit hook). Leave the content alone on
2967 # (e.g. by a pretxncommit hook). Leave the content alone on
2966 # the assumption that the user will use the same editor again.
2968 # the assumption that the user will use the same editor again.
2967 msgfn = self.savecommitmessage(cctx._text)
2969 msgfn = self.savecommitmessage(cctx._text)
2968
2970
2969 # commit subs and write new state
2971 # commit subs and write new state
2970 if subs:
2972 if subs:
2971 uipathfn = scmutil.getuipathfn(self)
2973 uipathfn = scmutil.getuipathfn(self)
2972 for s in sorted(commitsubs):
2974 for s in sorted(commitsubs):
2973 sub = wctx.sub(s)
2975 sub = wctx.sub(s)
2974 self.ui.status(
2976 self.ui.status(
2975 _(b'committing subrepository %s\n')
2977 _(b'committing subrepository %s\n')
2976 % uipathfn(subrepoutil.subrelpath(sub))
2978 % uipathfn(subrepoutil.subrelpath(sub))
2977 )
2979 )
2978 sr = sub.commit(cctx._text, user, date)
2980 sr = sub.commit(cctx._text, user, date)
2979 newstate[s] = (newstate[s][0], sr)
2981 newstate[s] = (newstate[s][0], sr)
2980 subrepoutil.writestate(self, newstate)
2982 subrepoutil.writestate(self, newstate)
2981
2983
2982 p1, p2 = self.dirstate.parents()
2984 p1, p2 = self.dirstate.parents()
2983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2985 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2984 try:
2986 try:
2985 self.hook(
2987 self.hook(
2986 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2988 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2987 )
2989 )
2988 with self.transaction(b'commit'):
2990 with self.transaction(b'commit'):
2989 ret = self.commitctx(cctx, True)
2991 ret = self.commitctx(cctx, True)
2990 # update bookmarks, dirstate and mergestate
2992 # update bookmarks, dirstate and mergestate
2991 bookmarks.update(self, [p1, p2], ret)
2993 bookmarks.update(self, [p1, p2], ret)
2992 cctx.markcommitted(ret)
2994 cctx.markcommitted(ret)
2993 ms.reset()
2995 ms.reset()
2994 except: # re-raises
2996 except: # re-raises
2995 if edited:
2997 if edited:
2996 self.ui.write(
2998 self.ui.write(
2997 _(b'note: commit message saved in %s\n') % msgfn
2999 _(b'note: commit message saved in %s\n') % msgfn
2998 )
3000 )
2999 raise
3001 raise
3000
3002
3001 def commithook(unused_success):
3003 def commithook(unused_success):
3002 # hack for command that use a temporary commit (eg: histedit)
3004 # hack for command that use a temporary commit (eg: histedit)
3003 # temporary commit got stripped before hook release
3005 # temporary commit got stripped before hook release
3004 if self.changelog.hasnode(ret):
3006 if self.changelog.hasnode(ret):
3005 self.hook(
3007 self.hook(
3006 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3008 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3007 )
3009 )
3008
3010
3009 self._afterlock(commithook)
3011 self._afterlock(commithook)
3010 return ret
3012 return ret
3011
3013
3012 @unfilteredmethod
3014 @unfilteredmethod
3013 def commitctx(self, ctx, error=False, origctx=None):
3015 def commitctx(self, ctx, error=False, origctx=None):
3014 """Add a new revision to current repository.
3016 """Add a new revision to current repository.
3015 Revision information is passed via the context argument.
3017 Revision information is passed via the context argument.
3016
3018
3017 ctx.files() should list all files involved in this commit, i.e.
3019 ctx.files() should list all files involved in this commit, i.e.
3018 modified/added/removed files. On merge, it may be wider than the
3020 modified/added/removed files. On merge, it may be wider than the
3019 ctx.files() to be committed, since any file nodes derived directly
3021 ctx.files() to be committed, since any file nodes derived directly
3020 from p1 or p2 are excluded from the committed ctx.files().
3022 from p1 or p2 are excluded from the committed ctx.files().
3021
3023
3022 origctx is for convert to work around the problem that bug
3024 origctx is for convert to work around the problem that bug
3023 fixes to the files list in changesets change hashes. For
3025 fixes to the files list in changesets change hashes. For
3024 convert to be the identity, it can pass an origctx and this
3026 convert to be the identity, it can pass an origctx and this
3025 function will use the same files list when it makes sense to
3027 function will use the same files list when it makes sense to
3026 do so.
3028 do so.
3027 """
3029 """
3028
3030
3029 p1, p2 = ctx.p1(), ctx.p2()
3031 p1, p2 = ctx.p1(), ctx.p2()
3030 user = ctx.user()
3032 user = ctx.user()
3031
3033
3032 if self.filecopiesmode == b'changeset-sidedata':
3034 if self.filecopiesmode == b'changeset-sidedata':
3033 writechangesetcopy = True
3035 writechangesetcopy = True
3034 writefilecopymeta = True
3036 writefilecopymeta = True
3035 writecopiesto = None
3037 writecopiesto = None
3036 else:
3038 else:
3037 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3039 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3038 writefilecopymeta = writecopiesto != b'changeset-only'
3040 writefilecopymeta = writecopiesto != b'changeset-only'
3039 writechangesetcopy = writecopiesto in (
3041 writechangesetcopy = writecopiesto in (
3040 b'changeset-only',
3042 b'changeset-only',
3041 b'compatibility',
3043 b'compatibility',
3042 )
3044 )
3043 p1copies, p2copies = None, None
3045 p1copies, p2copies = None, None
3044 if writechangesetcopy:
3046 if writechangesetcopy:
3045 p1copies = ctx.p1copies()
3047 p1copies = ctx.p1copies()
3046 p2copies = ctx.p2copies()
3048 p2copies = ctx.p2copies()
3047 filesadded, filesremoved = None, None
3049 filesadded, filesremoved = None, None
3048 with self.lock(), self.transaction(b"commit") as tr:
3050 with self.lock(), self.transaction(b"commit") as tr:
3049 trp = weakref.proxy(tr)
3051 trp = weakref.proxy(tr)
3050
3052
3051 if ctx.manifestnode():
3053 if ctx.manifestnode():
3052 # reuse an existing manifest revision
3054 # reuse an existing manifest revision
3053 self.ui.debug(b'reusing known manifest\n')
3055 self.ui.debug(b'reusing known manifest\n')
3054 mn = ctx.manifestnode()
3056 mn = ctx.manifestnode()
3055 files = ctx.files()
3057 files = ctx.files()
3056 if writechangesetcopy:
3058 if writechangesetcopy:
3057 filesadded = ctx.filesadded()
3059 filesadded = ctx.filesadded()
3058 filesremoved = ctx.filesremoved()
3060 filesremoved = ctx.filesremoved()
3059 elif ctx.files():
3061 elif ctx.files():
3060 m1ctx = p1.manifestctx()
3062 m1ctx = p1.manifestctx()
3061 m2ctx = p2.manifestctx()
3063 m2ctx = p2.manifestctx()
3062 mctx = m1ctx.copy()
3064 mctx = m1ctx.copy()
3063
3065
3064 m = mctx.read()
3066 m = mctx.read()
3065 m1 = m1ctx.read()
3067 m1 = m1ctx.read()
3066 m2 = m2ctx.read()
3068 m2 = m2ctx.read()
3067
3069
3068 # check in files
3070 # check in files
3069 added = []
3071 added = []
3070 changed = []
3072 changed = []
3071 removed = list(ctx.removed())
3073 removed = list(ctx.removed())
3072 linkrev = len(self)
3074 linkrev = len(self)
3073 self.ui.note(_(b"committing files:\n"))
3075 self.ui.note(_(b"committing files:\n"))
3074 uipathfn = scmutil.getuipathfn(self)
3076 uipathfn = scmutil.getuipathfn(self)
3075 for f in sorted(ctx.modified() + ctx.added()):
3077 for f in sorted(ctx.modified() + ctx.added()):
3076 self.ui.note(uipathfn(f) + b"\n")
3078 self.ui.note(uipathfn(f) + b"\n")
3077 try:
3079 try:
3078 fctx = ctx[f]
3080 fctx = ctx[f]
3079 if fctx is None:
3081 if fctx is None:
3080 removed.append(f)
3082 removed.append(f)
3081 else:
3083 else:
3082 added.append(f)
3084 added.append(f)
3083 m[f] = self._filecommit(
3085 m[f] = self._filecommit(
3084 fctx,
3086 fctx,
3085 m1,
3087 m1,
3086 m2,
3088 m2,
3087 linkrev,
3089 linkrev,
3088 trp,
3090 trp,
3089 changed,
3091 changed,
3090 writefilecopymeta,
3092 writefilecopymeta,
3091 )
3093 )
3092 m.setflag(f, fctx.flags())
3094 m.setflag(f, fctx.flags())
3093 except OSError:
3095 except OSError:
3094 self.ui.warn(
3096 self.ui.warn(
3095 _(b"trouble committing %s!\n") % uipathfn(f)
3097 _(b"trouble committing %s!\n") % uipathfn(f)
3096 )
3098 )
3097 raise
3099 raise
3098 except IOError as inst:
3100 except IOError as inst:
3099 errcode = getattr(inst, 'errno', errno.ENOENT)
3101 errcode = getattr(inst, 'errno', errno.ENOENT)
3100 if error or errcode and errcode != errno.ENOENT:
3102 if error or errcode and errcode != errno.ENOENT:
3101 self.ui.warn(
3103 self.ui.warn(
3102 _(b"trouble committing %s!\n") % uipathfn(f)
3104 _(b"trouble committing %s!\n") % uipathfn(f)
3103 )
3105 )
3104 raise
3106 raise
3105
3107
3106 # update manifest
3108 # update manifest
3107 removed = [f for f in removed if f in m1 or f in m2]
3109 removed = [f for f in removed if f in m1 or f in m2]
3108 drop = sorted([f for f in removed if f in m])
3110 drop = sorted([f for f in removed if f in m])
3109 for f in drop:
3111 for f in drop:
3110 del m[f]
3112 del m[f]
3111 if p2.rev() != nullrev:
3113 if p2.rev() != nullrev:
3112
3114
3113 @util.cachefunc
3115 @util.cachefunc
3114 def mas():
3116 def mas():
3115 p1n = p1.node()
3117 p1n = p1.node()
3116 p2n = p2.node()
3118 p2n = p2.node()
3117 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3119 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3118 if not cahs:
3120 if not cahs:
3119 cahs = [nullrev]
3121 cahs = [nullrev]
3120 return [self[r].manifest() for r in cahs]
3122 return [self[r].manifest() for r in cahs]
3121
3123
3122 def deletionfromparent(f):
3124 def deletionfromparent(f):
3123 # When a file is removed relative to p1 in a merge, this
3125 # When a file is removed relative to p1 in a merge, this
3124 # function determines whether the absence is due to a
3126 # function determines whether the absence is due to a
3125 # deletion from a parent, or whether the merge commit
3127 # deletion from a parent, or whether the merge commit
3126 # itself deletes the file. We decide this by doing a
3128 # itself deletes the file. We decide this by doing a
3127 # simplified three way merge of the manifest entry for
3129 # simplified three way merge of the manifest entry for
3128 # the file. There are two ways we decide the merge
3130 # the file. There are two ways we decide the merge
3129 # itself didn't delete a file:
3131 # itself didn't delete a file:
3130 # - neither parent (nor the merge) contain the file
3132 # - neither parent (nor the merge) contain the file
3131 # - exactly one parent contains the file, and that
3133 # - exactly one parent contains the file, and that
3132 # parent has the same filelog entry as the merge
3134 # parent has the same filelog entry as the merge
3133 # ancestor (or all of them if there two). In other
3135 # ancestor (or all of them if there two). In other
3134 # words, that parent left the file unchanged while the
3136 # words, that parent left the file unchanged while the
3135 # other one deleted it.
3137 # other one deleted it.
3136 # One way to think about this is that deleting a file is
3138 # One way to think about this is that deleting a file is
3137 # similar to emptying it, so the list of changed files
3139 # similar to emptying it, so the list of changed files
3138 # should be similar either way. The computation
3140 # should be similar either way. The computation
3139 # described above is not done directly in _filecommit
3141 # described above is not done directly in _filecommit
3140 # when creating the list of changed files, however
3142 # when creating the list of changed files, however
3141 # it does something very similar by comparing filelog
3143 # it does something very similar by comparing filelog
3142 # nodes.
3144 # nodes.
3143 if f in m1:
3145 if f in m1:
3144 return f not in m2 and all(
3146 return f not in m2 and all(
3145 f in ma and ma.find(f) == m1.find(f)
3147 f in ma and ma.find(f) == m1.find(f)
3146 for ma in mas()
3148 for ma in mas()
3147 )
3149 )
3148 elif f in m2:
3150 elif f in m2:
3149 return all(
3151 return all(
3150 f in ma and ma.find(f) == m2.find(f)
3152 f in ma and ma.find(f) == m2.find(f)
3151 for ma in mas()
3153 for ma in mas()
3152 )
3154 )
3153 else:
3155 else:
3154 return True
3156 return True
3155
3157
3156 removed = [f for f in removed if not deletionfromparent(f)]
3158 removed = [f for f in removed if not deletionfromparent(f)]
3157
3159
3158 files = changed + removed
3160 files = changed + removed
3159 md = None
3161 md = None
3160 if not files:
3162 if not files:
3161 # if no "files" actually changed in terms of the changelog,
3163 # if no "files" actually changed in terms of the changelog,
3162 # try hard to detect unmodified manifest entry so that the
3164 # try hard to detect unmodified manifest entry so that the
3163 # exact same commit can be reproduced later on convert.
3165 # exact same commit can be reproduced later on convert.
3164 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3166 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3165 if not files and md:
3167 if not files and md:
3166 self.ui.debug(
3168 self.ui.debug(
3167 b'not reusing manifest (no file change in '
3169 b'not reusing manifest (no file change in '
3168 b'changelog, but manifest differs)\n'
3170 b'changelog, but manifest differs)\n'
3169 )
3171 )
3170 if files or md:
3172 if files or md:
3171 self.ui.note(_(b"committing manifest\n"))
3173 self.ui.note(_(b"committing manifest\n"))
3172 # we're using narrowmatch here since it's already applied at
3174 # we're using narrowmatch here since it's already applied at
3173 # other stages (such as dirstate.walk), so we're already
3175 # other stages (such as dirstate.walk), so we're already
3174 # ignoring things outside of narrowspec in most cases. The
3176 # ignoring things outside of narrowspec in most cases. The
3175 # one case where we might have files outside the narrowspec
3177 # one case where we might have files outside the narrowspec
3176 # at this point is merges, and we already error out in the
3178 # at this point is merges, and we already error out in the
3177 # case where the merge has files outside of the narrowspec,
3179 # case where the merge has files outside of the narrowspec,
3178 # so this is safe.
3180 # so this is safe.
3179 mn = mctx.write(
3181 mn = mctx.write(
3180 trp,
3182 trp,
3181 linkrev,
3183 linkrev,
3182 p1.manifestnode(),
3184 p1.manifestnode(),
3183 p2.manifestnode(),
3185 p2.manifestnode(),
3184 added,
3186 added,
3185 drop,
3187 drop,
3186 match=self.narrowmatch(),
3188 match=self.narrowmatch(),
3187 )
3189 )
3188
3190
3189 if writechangesetcopy:
3191 if writechangesetcopy:
3190 filesadded = [
3192 filesadded = [
3191 f for f in changed if not (f in m1 or f in m2)
3193 f for f in changed if not (f in m1 or f in m2)
3192 ]
3194 ]
3193 filesremoved = removed
3195 filesremoved = removed
3194 else:
3196 else:
3195 self.ui.debug(
3197 self.ui.debug(
3196 b'reusing manifest from p1 (listed files '
3198 b'reusing manifest from p1 (listed files '
3197 b'actually unchanged)\n'
3199 b'actually unchanged)\n'
3198 )
3200 )
3199 mn = p1.manifestnode()
3201 mn = p1.manifestnode()
3200 else:
3202 else:
3201 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3203 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3202 mn = p1.manifestnode()
3204 mn = p1.manifestnode()
3203 files = []
3205 files = []
3204
3206
3205 if writecopiesto == b'changeset-only':
3207 if writecopiesto == b'changeset-only':
3206 # If writing only to changeset extras, use None to indicate that
3208 # If writing only to changeset extras, use None to indicate that
3207 # no entry should be written. If writing to both, write an empty
3209 # no entry should be written. If writing to both, write an empty
3208 # entry to prevent the reader from falling back to reading
3210 # entry to prevent the reader from falling back to reading
3209 # filelogs.
3211 # filelogs.
3210 p1copies = p1copies or None
3212 p1copies = p1copies or None
3211 p2copies = p2copies or None
3213 p2copies = p2copies or None
3212 filesadded = filesadded or None
3214 filesadded = filesadded or None
3213 filesremoved = filesremoved or None
3215 filesremoved = filesremoved or None
3214
3216
3215 if origctx and origctx.manifestnode() == mn:
3217 if origctx and origctx.manifestnode() == mn:
3216 files = origctx.files()
3218 files = origctx.files()
3217
3219
3218 # update changelog
3220 # update changelog
3219 self.ui.note(_(b"committing changelog\n"))
3221 self.ui.note(_(b"committing changelog\n"))
3220 self.changelog.delayupdate(tr)
3222 self.changelog.delayupdate(tr)
3221 n = self.changelog.add(
3223 n = self.changelog.add(
3222 mn,
3224 mn,
3223 files,
3225 files,
3224 ctx.description(),
3226 ctx.description(),
3225 trp,
3227 trp,
3226 p1.node(),
3228 p1.node(),
3227 p2.node(),
3229 p2.node(),
3228 user,
3230 user,
3229 ctx.date(),
3231 ctx.date(),
3230 ctx.extra().copy(),
3232 ctx.extra().copy(),
3231 p1copies,
3233 p1copies,
3232 p2copies,
3234 p2copies,
3233 filesadded,
3235 filesadded,
3234 filesremoved,
3236 filesremoved,
3235 )
3237 )
3236 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3238 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3237 self.hook(
3239 self.hook(
3238 b'pretxncommit',
3240 b'pretxncommit',
3239 throw=True,
3241 throw=True,
3240 node=hex(n),
3242 node=hex(n),
3241 parent1=xp1,
3243 parent1=xp1,
3242 parent2=xp2,
3244 parent2=xp2,
3243 )
3245 )
3244 # set the new commit is proper phase
3246 # set the new commit is proper phase
3245 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3247 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3246 if targetphase:
3248 if targetphase:
3247 # retract boundary do not alter parent changeset.
3249 # retract boundary do not alter parent changeset.
3248 # if a parent have higher the resulting phase will
3250 # if a parent have higher the resulting phase will
3249 # be compliant anyway
3251 # be compliant anyway
3250 #
3252 #
3251 # if minimal phase was 0 we don't need to retract anything
3253 # if minimal phase was 0 we don't need to retract anything
3252 phases.registernew(self, tr, targetphase, [n])
3254 phases.registernew(self, tr, targetphase, [n])
3253 return n
3255 return n
3254
3256
3255 @unfilteredmethod
3257 @unfilteredmethod
3256 def destroying(self):
3258 def destroying(self):
3257 '''Inform the repository that nodes are about to be destroyed.
3259 '''Inform the repository that nodes are about to be destroyed.
3258 Intended for use by strip and rollback, so there's a common
3260 Intended for use by strip and rollback, so there's a common
3259 place for anything that has to be done before destroying history.
3261 place for anything that has to be done before destroying history.
3260
3262
3261 This is mostly useful for saving state that is in memory and waiting
3263 This is mostly useful for saving state that is in memory and waiting
3262 to be flushed when the current lock is released. Because a call to
3264 to be flushed when the current lock is released. Because a call to
3263 destroyed is imminent, the repo will be invalidated causing those
3265 destroyed is imminent, the repo will be invalidated causing those
3264 changes to stay in memory (waiting for the next unlock), or vanish
3266 changes to stay in memory (waiting for the next unlock), or vanish
3265 completely.
3267 completely.
3266 '''
3268 '''
3267 # When using the same lock to commit and strip, the phasecache is left
3269 # When using the same lock to commit and strip, the phasecache is left
3268 # dirty after committing. Then when we strip, the repo is invalidated,
3270 # dirty after committing. Then when we strip, the repo is invalidated,
3269 # causing those changes to disappear.
3271 # causing those changes to disappear.
3270 if '_phasecache' in vars(self):
3272 if '_phasecache' in vars(self):
3271 self._phasecache.write()
3273 self._phasecache.write()
3272
3274
3273 @unfilteredmethod
3275 @unfilteredmethod
3274 def destroyed(self):
3276 def destroyed(self):
3275 '''Inform the repository that nodes have been destroyed.
3277 '''Inform the repository that nodes have been destroyed.
3276 Intended for use by strip and rollback, so there's a common
3278 Intended for use by strip and rollback, so there's a common
3277 place for anything that has to be done after destroying history.
3279 place for anything that has to be done after destroying history.
3278 '''
3280 '''
3279 # When one tries to:
3281 # When one tries to:
3280 # 1) destroy nodes thus calling this method (e.g. strip)
3282 # 1) destroy nodes thus calling this method (e.g. strip)
3281 # 2) use phasecache somewhere (e.g. commit)
3283 # 2) use phasecache somewhere (e.g. commit)
3282 #
3284 #
3283 # then 2) will fail because the phasecache contains nodes that were
3285 # then 2) will fail because the phasecache contains nodes that were
3284 # removed. We can either remove phasecache from the filecache,
3286 # removed. We can either remove phasecache from the filecache,
3285 # causing it to reload next time it is accessed, or simply filter
3287 # causing it to reload next time it is accessed, or simply filter
3286 # the removed nodes now and write the updated cache.
3288 # the removed nodes now and write the updated cache.
3287 self._phasecache.filterunknown(self)
3289 self._phasecache.filterunknown(self)
3288 self._phasecache.write()
3290 self._phasecache.write()
3289
3291
3290 # refresh all repository caches
3292 # refresh all repository caches
3291 self.updatecaches()
3293 self.updatecaches()
3292
3294
3293 # Ensure the persistent tag cache is updated. Doing it now
3295 # Ensure the persistent tag cache is updated. Doing it now
3294 # means that the tag cache only has to worry about destroyed
3296 # means that the tag cache only has to worry about destroyed
3295 # heads immediately after a strip/rollback. That in turn
3297 # heads immediately after a strip/rollback. That in turn
3296 # guarantees that "cachetip == currenttip" (comparing both rev
3298 # guarantees that "cachetip == currenttip" (comparing both rev
3297 # and node) always means no nodes have been added or destroyed.
3299 # and node) always means no nodes have been added or destroyed.
3298
3300
3299 # XXX this is suboptimal when qrefresh'ing: we strip the current
3301 # XXX this is suboptimal when qrefresh'ing: we strip the current
3300 # head, refresh the tag cache, then immediately add a new head.
3302 # head, refresh the tag cache, then immediately add a new head.
3301 # But I think doing it this way is necessary for the "instant
3303 # But I think doing it this way is necessary for the "instant
3302 # tag cache retrieval" case to work.
3304 # tag cache retrieval" case to work.
3303 self.invalidate()
3305 self.invalidate()
3304
3306
3305 def status(
3307 def status(
3306 self,
3308 self,
3307 node1=b'.',
3309 node1=b'.',
3308 node2=None,
3310 node2=None,
3309 match=None,
3311 match=None,
3310 ignored=False,
3312 ignored=False,
3311 clean=False,
3313 clean=False,
3312 unknown=False,
3314 unknown=False,
3313 listsubrepos=False,
3315 listsubrepos=False,
3314 ):
3316 ):
3315 '''a convenience method that calls node1.status(node2)'''
3317 '''a convenience method that calls node1.status(node2)'''
3316 return self[node1].status(
3318 return self[node1].status(
3317 node2, match, ignored, clean, unknown, listsubrepos
3319 node2, match, ignored, clean, unknown, listsubrepos
3318 )
3320 )
3319
3321
3320 def addpostdsstatus(self, ps):
3322 def addpostdsstatus(self, ps):
3321 """Add a callback to run within the wlock, at the point at which status
3323 """Add a callback to run within the wlock, at the point at which status
3322 fixups happen.
3324 fixups happen.
3323
3325
3324 On status completion, callback(wctx, status) will be called with the
3326 On status completion, callback(wctx, status) will be called with the
3325 wlock held, unless the dirstate has changed from underneath or the wlock
3327 wlock held, unless the dirstate has changed from underneath or the wlock
3326 couldn't be grabbed.
3328 couldn't be grabbed.
3327
3329
3328 Callbacks should not capture and use a cached copy of the dirstate --
3330 Callbacks should not capture and use a cached copy of the dirstate --
3329 it might change in the meanwhile. Instead, they should access the
3331 it might change in the meanwhile. Instead, they should access the
3330 dirstate via wctx.repo().dirstate.
3332 dirstate via wctx.repo().dirstate.
3331
3333
3332 This list is emptied out after each status run -- extensions should
3334 This list is emptied out after each status run -- extensions should
3333 make sure it adds to this list each time dirstate.status is called.
3335 make sure it adds to this list each time dirstate.status is called.
3334 Extensions should also make sure they don't call this for statuses
3336 Extensions should also make sure they don't call this for statuses
3335 that don't involve the dirstate.
3337 that don't involve the dirstate.
3336 """
3338 """
3337
3339
3338 # The list is located here for uniqueness reasons -- it is actually
3340 # The list is located here for uniqueness reasons -- it is actually
3339 # managed by the workingctx, but that isn't unique per-repo.
3341 # managed by the workingctx, but that isn't unique per-repo.
3340 self._postdsstatus.append(ps)
3342 self._postdsstatus.append(ps)
3341
3343
3342 def postdsstatus(self):
3344 def postdsstatus(self):
3343 """Used by workingctx to get the list of post-dirstate-status hooks."""
3345 """Used by workingctx to get the list of post-dirstate-status hooks."""
3344 return self._postdsstatus
3346 return self._postdsstatus
3345
3347
3346 def clearpostdsstatus(self):
3348 def clearpostdsstatus(self):
3347 """Used by workingctx to clear post-dirstate-status hooks."""
3349 """Used by workingctx to clear post-dirstate-status hooks."""
3348 del self._postdsstatus[:]
3350 del self._postdsstatus[:]
3349
3351
3350 def heads(self, start=None):
3352 def heads(self, start=None):
3351 if start is None:
3353 if start is None:
3352 cl = self.changelog
3354 cl = self.changelog
3353 headrevs = reversed(cl.headrevs())
3355 headrevs = reversed(cl.headrevs())
3354 return [cl.node(rev) for rev in headrevs]
3356 return [cl.node(rev) for rev in headrevs]
3355
3357
3356 heads = self.changelog.heads(start)
3358 heads = self.changelog.heads(start)
3357 # sort the output in rev descending order
3359 # sort the output in rev descending order
3358 return sorted(heads, key=self.changelog.rev, reverse=True)
3360 return sorted(heads, key=self.changelog.rev, reverse=True)
3359
3361
3360 def branchheads(self, branch=None, start=None, closed=False):
3362 def branchheads(self, branch=None, start=None, closed=False):
3361 '''return a (possibly filtered) list of heads for the given branch
3363 '''return a (possibly filtered) list of heads for the given branch
3362
3364
3363 Heads are returned in topological order, from newest to oldest.
3365 Heads are returned in topological order, from newest to oldest.
3364 If branch is None, use the dirstate branch.
3366 If branch is None, use the dirstate branch.
3365 If start is not None, return only heads reachable from start.
3367 If start is not None, return only heads reachable from start.
3366 If closed is True, return heads that are marked as closed as well.
3368 If closed is True, return heads that are marked as closed as well.
3367 '''
3369 '''
3368 if branch is None:
3370 if branch is None:
3369 branch = self[None].branch()
3371 branch = self[None].branch()
3370 branches = self.branchmap()
3372 branches = self.branchmap()
3371 if not branches.hasbranch(branch):
3373 if not branches.hasbranch(branch):
3372 return []
3374 return []
3373 # the cache returns heads ordered lowest to highest
3375 # the cache returns heads ordered lowest to highest
3374 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3376 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3375 if start is not None:
3377 if start is not None:
3376 # filter out the heads that cannot be reached from startrev
3378 # filter out the heads that cannot be reached from startrev
3377 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3379 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3378 bheads = [h for h in bheads if h in fbheads]
3380 bheads = [h for h in bheads if h in fbheads]
3379 return bheads
3381 return bheads
3380
3382
3381 def branches(self, nodes):
3383 def branches(self, nodes):
3382 if not nodes:
3384 if not nodes:
3383 nodes = [self.changelog.tip()]
3385 nodes = [self.changelog.tip()]
3384 b = []
3386 b = []
3385 for n in nodes:
3387 for n in nodes:
3386 t = n
3388 t = n
3387 while True:
3389 while True:
3388 p = self.changelog.parents(n)
3390 p = self.changelog.parents(n)
3389 if p[1] != nullid or p[0] == nullid:
3391 if p[1] != nullid or p[0] == nullid:
3390 b.append((t, n, p[0], p[1]))
3392 b.append((t, n, p[0], p[1]))
3391 break
3393 break
3392 n = p[0]
3394 n = p[0]
3393 return b
3395 return b
3394
3396
3395 def between(self, pairs):
3397 def between(self, pairs):
3396 r = []
3398 r = []
3397
3399
3398 for top, bottom in pairs:
3400 for top, bottom in pairs:
3399 n, l, i = top, [], 0
3401 n, l, i = top, [], 0
3400 f = 1
3402 f = 1
3401
3403
3402 while n != bottom and n != nullid:
3404 while n != bottom and n != nullid:
3403 p = self.changelog.parents(n)[0]
3405 p = self.changelog.parents(n)[0]
3404 if i == f:
3406 if i == f:
3405 l.append(n)
3407 l.append(n)
3406 f = f * 2
3408 f = f * 2
3407 n = p
3409 n = p
3408 i += 1
3410 i += 1
3409
3411
3410 r.append(l)
3412 r.append(l)
3411
3413
3412 return r
3414 return r
3413
3415
3414 def checkpush(self, pushop):
3416 def checkpush(self, pushop):
3415 """Extensions can override this function if additional checks have
3417 """Extensions can override this function if additional checks have
3416 to be performed before pushing, or call it if they override push
3418 to be performed before pushing, or call it if they override push
3417 command.
3419 command.
3418 """
3420 """
3419
3421
3420 @unfilteredpropertycache
3422 @unfilteredpropertycache
3421 def prepushoutgoinghooks(self):
3423 def prepushoutgoinghooks(self):
3422 """Return util.hooks consists of a pushop with repo, remote, outgoing
3424 """Return util.hooks consists of a pushop with repo, remote, outgoing
3423 methods, which are called before pushing changesets.
3425 methods, which are called before pushing changesets.
3424 """
3426 """
3425 return util.hooks()
3427 return util.hooks()
3426
3428
3427 def pushkey(self, namespace, key, old, new):
3429 def pushkey(self, namespace, key, old, new):
3428 try:
3430 try:
3429 tr = self.currenttransaction()
3431 tr = self.currenttransaction()
3430 hookargs = {}
3432 hookargs = {}
3431 if tr is not None:
3433 if tr is not None:
3432 hookargs.update(tr.hookargs)
3434 hookargs.update(tr.hookargs)
3433 hookargs = pycompat.strkwargs(hookargs)
3435 hookargs = pycompat.strkwargs(hookargs)
3434 hookargs['namespace'] = namespace
3436 hookargs['namespace'] = namespace
3435 hookargs['key'] = key
3437 hookargs['key'] = key
3436 hookargs['old'] = old
3438 hookargs['old'] = old
3437 hookargs['new'] = new
3439 hookargs['new'] = new
3438 self.hook(b'prepushkey', throw=True, **hookargs)
3440 self.hook(b'prepushkey', throw=True, **hookargs)
3439 except error.HookAbort as exc:
3441 except error.HookAbort as exc:
3440 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3442 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3441 if exc.hint:
3443 if exc.hint:
3442 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3444 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3443 return False
3445 return False
3444 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3446 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3445 ret = pushkey.push(self, namespace, key, old, new)
3447 ret = pushkey.push(self, namespace, key, old, new)
3446
3448
3447 def runhook(unused_success):
3449 def runhook(unused_success):
3448 self.hook(
3450 self.hook(
3449 b'pushkey',
3451 b'pushkey',
3450 namespace=namespace,
3452 namespace=namespace,
3451 key=key,
3453 key=key,
3452 old=old,
3454 old=old,
3453 new=new,
3455 new=new,
3454 ret=ret,
3456 ret=ret,
3455 )
3457 )
3456
3458
3457 self._afterlock(runhook)
3459 self._afterlock(runhook)
3458 return ret
3460 return ret
3459
3461
3460 def listkeys(self, namespace):
3462 def listkeys(self, namespace):
3461 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3463 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3462 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3464 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3463 values = pushkey.list(self, namespace)
3465 values = pushkey.list(self, namespace)
3464 self.hook(b'listkeys', namespace=namespace, values=values)
3466 self.hook(b'listkeys', namespace=namespace, values=values)
3465 return values
3467 return values
3466
3468
3467 def debugwireargs(self, one, two, three=None, four=None, five=None):
3469 def debugwireargs(self, one, two, three=None, four=None, five=None):
3468 '''used to test argument passing over the wire'''
3470 '''used to test argument passing over the wire'''
3469 return b"%s %s %s %s %s" % (
3471 return b"%s %s %s %s %s" % (
3470 one,
3472 one,
3471 two,
3473 two,
3472 pycompat.bytestr(three),
3474 pycompat.bytestr(three),
3473 pycompat.bytestr(four),
3475 pycompat.bytestr(four),
3474 pycompat.bytestr(five),
3476 pycompat.bytestr(five),
3475 )
3477 )
3476
3478
3477 def savecommitmessage(self, text):
3479 def savecommitmessage(self, text):
3478 fp = self.vfs(b'last-message.txt', b'wb')
3480 fp = self.vfs(b'last-message.txt', b'wb')
3479 try:
3481 try:
3480 fp.write(text)
3482 fp.write(text)
3481 finally:
3483 finally:
3482 fp.close()
3484 fp.close()
3483 return self.pathto(fp.name[len(self.root) + 1 :])
3485 return self.pathto(fp.name[len(self.root) + 1 :])
3484
3486
3485
3487
3486 # used to avoid circular references so destructors work
3488 # used to avoid circular references so destructors work
3487 def aftertrans(files):
3489 def aftertrans(files):
3488 renamefiles = [tuple(t) for t in files]
3490 renamefiles = [tuple(t) for t in files]
3489
3491
3490 def a():
3492 def a():
3491 for vfs, src, dest in renamefiles:
3493 for vfs, src, dest in renamefiles:
3492 # if src and dest refer to a same file, vfs.rename is a no-op,
3494 # if src and dest refer to a same file, vfs.rename is a no-op,
3493 # leaving both src and dest on disk. delete dest to make sure
3495 # leaving both src and dest on disk. delete dest to make sure
3494 # the rename couldn't be such a no-op.
3496 # the rename couldn't be such a no-op.
3495 vfs.tryunlink(dest)
3497 vfs.tryunlink(dest)
3496 try:
3498 try:
3497 vfs.rename(src, dest)
3499 vfs.rename(src, dest)
3498 except OSError: # journal file does not yet exist
3500 except OSError: # journal file does not yet exist
3499 pass
3501 pass
3500
3502
3501 return a
3503 return a
3502
3504
3503
3505
3504 def undoname(fn):
3506 def undoname(fn):
3505 base, name = os.path.split(fn)
3507 base, name = os.path.split(fn)
3506 assert name.startswith(b'journal')
3508 assert name.startswith(b'journal')
3507 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3509 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3508
3510
3509
3511
3510 def instance(ui, path, create, intents=None, createopts=None):
3512 def instance(ui, path, create, intents=None, createopts=None):
3511 localpath = util.urllocalpath(path)
3513 localpath = util.urllocalpath(path)
3512 if create:
3514 if create:
3513 createrepository(ui, localpath, createopts=createopts)
3515 createrepository(ui, localpath, createopts=createopts)
3514
3516
3515 return makelocalrepository(ui, localpath, intents=intents)
3517 return makelocalrepository(ui, localpath, intents=intents)
3516
3518
3517
3519
3518 def islocal(path):
3520 def islocal(path):
3519 return True
3521 return True
3520
3522
3521
3523
3522 def defaultcreateopts(ui, createopts=None):
3524 def defaultcreateopts(ui, createopts=None):
3523 """Populate the default creation options for a repository.
3525 """Populate the default creation options for a repository.
3524
3526
3525 A dictionary of explicitly requested creation options can be passed
3527 A dictionary of explicitly requested creation options can be passed
3526 in. Missing keys will be populated.
3528 in. Missing keys will be populated.
3527 """
3529 """
3528 createopts = dict(createopts or {})
3530 createopts = dict(createopts or {})
3529
3531
3530 if b'backend' not in createopts:
3532 if b'backend' not in createopts:
3531 # experimental config: storage.new-repo-backend
3533 # experimental config: storage.new-repo-backend
3532 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3534 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3533
3535
3534 return createopts
3536 return createopts
3535
3537
3536
3538
3537 def newreporequirements(ui, createopts):
3539 def newreporequirements(ui, createopts):
3538 """Determine the set of requirements for a new local repository.
3540 """Determine the set of requirements for a new local repository.
3539
3541
3540 Extensions can wrap this function to specify custom requirements for
3542 Extensions can wrap this function to specify custom requirements for
3541 new repositories.
3543 new repositories.
3542 """
3544 """
3543 # If the repo is being created from a shared repository, we copy
3545 # If the repo is being created from a shared repository, we copy
3544 # its requirements.
3546 # its requirements.
3545 if b'sharedrepo' in createopts:
3547 if b'sharedrepo' in createopts:
3546 requirements = set(createopts[b'sharedrepo'].requirements)
3548 requirements = set(createopts[b'sharedrepo'].requirements)
3547 if createopts.get(b'sharedrelative'):
3549 if createopts.get(b'sharedrelative'):
3548 requirements.add(b'relshared')
3550 requirements.add(b'relshared')
3549 else:
3551 else:
3550 requirements.add(b'shared')
3552 requirements.add(b'shared')
3551
3553
3552 return requirements
3554 return requirements
3553
3555
3554 if b'backend' not in createopts:
3556 if b'backend' not in createopts:
3555 raise error.ProgrammingError(
3557 raise error.ProgrammingError(
3556 b'backend key not present in createopts; '
3558 b'backend key not present in createopts; '
3557 b'was defaultcreateopts() called?'
3559 b'was defaultcreateopts() called?'
3558 )
3560 )
3559
3561
3560 if createopts[b'backend'] != b'revlogv1':
3562 if createopts[b'backend'] != b'revlogv1':
3561 raise error.Abort(
3563 raise error.Abort(
3562 _(
3564 _(
3563 b'unable to determine repository requirements for '
3565 b'unable to determine repository requirements for '
3564 b'storage backend: %s'
3566 b'storage backend: %s'
3565 )
3567 )
3566 % createopts[b'backend']
3568 % createopts[b'backend']
3567 )
3569 )
3568
3570
3569 requirements = {b'revlogv1'}
3571 requirements = {b'revlogv1'}
3570 if ui.configbool(b'format', b'usestore'):
3572 if ui.configbool(b'format', b'usestore'):
3571 requirements.add(b'store')
3573 requirements.add(b'store')
3572 if ui.configbool(b'format', b'usefncache'):
3574 if ui.configbool(b'format', b'usefncache'):
3573 requirements.add(b'fncache')
3575 requirements.add(b'fncache')
3574 if ui.configbool(b'format', b'dotencode'):
3576 if ui.configbool(b'format', b'dotencode'):
3575 requirements.add(b'dotencode')
3577 requirements.add(b'dotencode')
3576
3578
3577 compengine = ui.config(b'format', b'revlog-compression')
3579 compengine = ui.config(b'format', b'revlog-compression')
3578 if compengine not in util.compengines:
3580 if compengine not in util.compengines:
3579 raise error.Abort(
3581 raise error.Abort(
3580 _(
3582 _(
3581 b'compression engine %s defined by '
3583 b'compression engine %s defined by '
3582 b'format.revlog-compression not available'
3584 b'format.revlog-compression not available'
3583 )
3585 )
3584 % compengine,
3586 % compengine,
3585 hint=_(
3587 hint=_(
3586 b'run "hg debuginstall" to list available '
3588 b'run "hg debuginstall" to list available '
3587 b'compression engines'
3589 b'compression engines'
3588 ),
3590 ),
3589 )
3591 )
3590
3592
3591 # zlib is the historical default and doesn't need an explicit requirement.
3593 # zlib is the historical default and doesn't need an explicit requirement.
3592 elif compengine == b'zstd':
3594 elif compengine == b'zstd':
3593 requirements.add(b'revlog-compression-zstd')
3595 requirements.add(b'revlog-compression-zstd')
3594 elif compengine != b'zlib':
3596 elif compengine != b'zlib':
3595 requirements.add(b'exp-compression-%s' % compengine)
3597 requirements.add(b'exp-compression-%s' % compengine)
3596
3598
3597 if scmutil.gdinitconfig(ui):
3599 if scmutil.gdinitconfig(ui):
3598 requirements.add(b'generaldelta')
3600 requirements.add(b'generaldelta')
3599 if ui.configbool(b'format', b'sparse-revlog'):
3601 if ui.configbool(b'format', b'sparse-revlog'):
3600 requirements.add(SPARSEREVLOG_REQUIREMENT)
3602 requirements.add(SPARSEREVLOG_REQUIREMENT)
3601
3603
3602 # experimental config: format.exp-use-side-data
3604 # experimental config: format.exp-use-side-data
3603 if ui.configbool(b'format', b'exp-use-side-data'):
3605 if ui.configbool(b'format', b'exp-use-side-data'):
3604 requirements.add(SIDEDATA_REQUIREMENT)
3606 requirements.add(SIDEDATA_REQUIREMENT)
3605 # experimental config: format.exp-use-copies-side-data-changeset
3607 # experimental config: format.exp-use-copies-side-data-changeset
3606 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3608 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3607 requirements.add(SIDEDATA_REQUIREMENT)
3609 requirements.add(SIDEDATA_REQUIREMENT)
3608 requirements.add(COPIESSDC_REQUIREMENT)
3610 requirements.add(COPIESSDC_REQUIREMENT)
3609 if ui.configbool(b'experimental', b'treemanifest'):
3611 if ui.configbool(b'experimental', b'treemanifest'):
3610 requirements.add(b'treemanifest')
3612 requirements.add(b'treemanifest')
3611
3613
3612 revlogv2 = ui.config(b'experimental', b'revlogv2')
3614 revlogv2 = ui.config(b'experimental', b'revlogv2')
3613 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3615 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3614 requirements.remove(b'revlogv1')
3616 requirements.remove(b'revlogv1')
3615 # generaldelta is implied by revlogv2.
3617 # generaldelta is implied by revlogv2.
3616 requirements.discard(b'generaldelta')
3618 requirements.discard(b'generaldelta')
3617 requirements.add(REVLOGV2_REQUIREMENT)
3619 requirements.add(REVLOGV2_REQUIREMENT)
3618 # experimental config: format.internal-phase
3620 # experimental config: format.internal-phase
3619 if ui.configbool(b'format', b'internal-phase'):
3621 if ui.configbool(b'format', b'internal-phase'):
3620 requirements.add(b'internal-phase')
3622 requirements.add(b'internal-phase')
3621
3623
3622 if createopts.get(b'narrowfiles'):
3624 if createopts.get(b'narrowfiles'):
3623 requirements.add(repository.NARROW_REQUIREMENT)
3625 requirements.add(repository.NARROW_REQUIREMENT)
3624
3626
3625 if createopts.get(b'lfs'):
3627 if createopts.get(b'lfs'):
3626 requirements.add(b'lfs')
3628 requirements.add(b'lfs')
3627
3629
3628 if ui.configbool(b'format', b'bookmarks-in-store'):
3630 if ui.configbool(b'format', b'bookmarks-in-store'):
3629 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3631 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3630
3632
3631 return requirements
3633 return requirements
3632
3634
3633
3635
3634 def filterknowncreateopts(ui, createopts):
3636 def filterknowncreateopts(ui, createopts):
3635 """Filters a dict of repo creation options against options that are known.
3637 """Filters a dict of repo creation options against options that are known.
3636
3638
3637 Receives a dict of repo creation options and returns a dict of those
3639 Receives a dict of repo creation options and returns a dict of those
3638 options that we don't know how to handle.
3640 options that we don't know how to handle.
3639
3641
3640 This function is called as part of repository creation. If the
3642 This function is called as part of repository creation. If the
3641 returned dict contains any items, repository creation will not
3643 returned dict contains any items, repository creation will not
3642 be allowed, as it means there was a request to create a repository
3644 be allowed, as it means there was a request to create a repository
3643 with options not recognized by loaded code.
3645 with options not recognized by loaded code.
3644
3646
3645 Extensions can wrap this function to filter out creation options
3647 Extensions can wrap this function to filter out creation options
3646 they know how to handle.
3648 they know how to handle.
3647 """
3649 """
3648 known = {
3650 known = {
3649 b'backend',
3651 b'backend',
3650 b'lfs',
3652 b'lfs',
3651 b'narrowfiles',
3653 b'narrowfiles',
3652 b'sharedrepo',
3654 b'sharedrepo',
3653 b'sharedrelative',
3655 b'sharedrelative',
3654 b'shareditems',
3656 b'shareditems',
3655 b'shallowfilestore',
3657 b'shallowfilestore',
3656 }
3658 }
3657
3659
3658 return {k: v for k, v in createopts.items() if k not in known}
3660 return {k: v for k, v in createopts.items() if k not in known}
3659
3661
3660
3662
3661 def createrepository(ui, path, createopts=None):
3663 def createrepository(ui, path, createopts=None):
3662 """Create a new repository in a vfs.
3664 """Create a new repository in a vfs.
3663
3665
3664 ``path`` path to the new repo's working directory.
3666 ``path`` path to the new repo's working directory.
3665 ``createopts`` options for the new repository.
3667 ``createopts`` options for the new repository.
3666
3668
3667 The following keys for ``createopts`` are recognized:
3669 The following keys for ``createopts`` are recognized:
3668
3670
3669 backend
3671 backend
3670 The storage backend to use.
3672 The storage backend to use.
3671 lfs
3673 lfs
3672 Repository will be created with ``lfs`` requirement. The lfs extension
3674 Repository will be created with ``lfs`` requirement. The lfs extension
3673 will automatically be loaded when the repository is accessed.
3675 will automatically be loaded when the repository is accessed.
3674 narrowfiles
3676 narrowfiles
3675 Set up repository to support narrow file storage.
3677 Set up repository to support narrow file storage.
3676 sharedrepo
3678 sharedrepo
3677 Repository object from which storage should be shared.
3679 Repository object from which storage should be shared.
3678 sharedrelative
3680 sharedrelative
3679 Boolean indicating if the path to the shared repo should be
3681 Boolean indicating if the path to the shared repo should be
3680 stored as relative. By default, the pointer to the "parent" repo
3682 stored as relative. By default, the pointer to the "parent" repo
3681 is stored as an absolute path.
3683 is stored as an absolute path.
3682 shareditems
3684 shareditems
3683 Set of items to share to the new repository (in addition to storage).
3685 Set of items to share to the new repository (in addition to storage).
3684 shallowfilestore
3686 shallowfilestore
3685 Indicates that storage for files should be shallow (not all ancestor
3687 Indicates that storage for files should be shallow (not all ancestor
3686 revisions are known).
3688 revisions are known).
3687 """
3689 """
3688 createopts = defaultcreateopts(ui, createopts=createopts)
3690 createopts = defaultcreateopts(ui, createopts=createopts)
3689
3691
3690 unknownopts = filterknowncreateopts(ui, createopts)
3692 unknownopts = filterknowncreateopts(ui, createopts)
3691
3693
3692 if not isinstance(unknownopts, dict):
3694 if not isinstance(unknownopts, dict):
3693 raise error.ProgrammingError(
3695 raise error.ProgrammingError(
3694 b'filterknowncreateopts() did not return a dict'
3696 b'filterknowncreateopts() did not return a dict'
3695 )
3697 )
3696
3698
3697 if unknownopts:
3699 if unknownopts:
3698 raise error.Abort(
3700 raise error.Abort(
3699 _(
3701 _(
3700 b'unable to create repository because of unknown '
3702 b'unable to create repository because of unknown '
3701 b'creation option: %s'
3703 b'creation option: %s'
3702 )
3704 )
3703 % b', '.join(sorted(unknownopts)),
3705 % b', '.join(sorted(unknownopts)),
3704 hint=_(b'is a required extension not loaded?'),
3706 hint=_(b'is a required extension not loaded?'),
3705 )
3707 )
3706
3708
3707 requirements = newreporequirements(ui, createopts=createopts)
3709 requirements = newreporequirements(ui, createopts=createopts)
3708
3710
3709 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3711 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3710
3712
3711 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3713 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3712 if hgvfs.exists():
3714 if hgvfs.exists():
3713 raise error.RepoError(_(b'repository %s already exists') % path)
3715 raise error.RepoError(_(b'repository %s already exists') % path)
3714
3716
3715 if b'sharedrepo' in createopts:
3717 if b'sharedrepo' in createopts:
3716 sharedpath = createopts[b'sharedrepo'].sharedpath
3718 sharedpath = createopts[b'sharedrepo'].sharedpath
3717
3719
3718 if createopts.get(b'sharedrelative'):
3720 if createopts.get(b'sharedrelative'):
3719 try:
3721 try:
3720 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3722 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3721 except (IOError, ValueError) as e:
3723 except (IOError, ValueError) as e:
3722 # ValueError is raised on Windows if the drive letters differ
3724 # ValueError is raised on Windows if the drive letters differ
3723 # on each path.
3725 # on each path.
3724 raise error.Abort(
3726 raise error.Abort(
3725 _(b'cannot calculate relative path'),
3727 _(b'cannot calculate relative path'),
3726 hint=stringutil.forcebytestr(e),
3728 hint=stringutil.forcebytestr(e),
3727 )
3729 )
3728
3730
3729 if not wdirvfs.exists():
3731 if not wdirvfs.exists():
3730 wdirvfs.makedirs()
3732 wdirvfs.makedirs()
3731
3733
3732 hgvfs.makedir(notindexed=True)
3734 hgvfs.makedir(notindexed=True)
3733 if b'sharedrepo' not in createopts:
3735 if b'sharedrepo' not in createopts:
3734 hgvfs.mkdir(b'cache')
3736 hgvfs.mkdir(b'cache')
3735 hgvfs.mkdir(b'wcache')
3737 hgvfs.mkdir(b'wcache')
3736
3738
3737 if b'store' in requirements and b'sharedrepo' not in createopts:
3739 if b'store' in requirements and b'sharedrepo' not in createopts:
3738 hgvfs.mkdir(b'store')
3740 hgvfs.mkdir(b'store')
3739
3741
3740 # We create an invalid changelog outside the store so very old
3742 # We create an invalid changelog outside the store so very old
3741 # Mercurial versions (which didn't know about the requirements
3743 # Mercurial versions (which didn't know about the requirements
3742 # file) encounter an error on reading the changelog. This
3744 # file) encounter an error on reading the changelog. This
3743 # effectively locks out old clients and prevents them from
3745 # effectively locks out old clients and prevents them from
3744 # mucking with a repo in an unknown format.
3746 # mucking with a repo in an unknown format.
3745 #
3747 #
3746 # The revlog header has version 2, which won't be recognized by
3748 # The revlog header has version 2, which won't be recognized by
3747 # such old clients.
3749 # such old clients.
3748 hgvfs.append(
3750 hgvfs.append(
3749 b'00changelog.i',
3751 b'00changelog.i',
3750 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3752 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3751 b'layout',
3753 b'layout',
3752 )
3754 )
3753
3755
3754 scmutil.writerequires(hgvfs, requirements)
3756 scmutil.writerequires(hgvfs, requirements)
3755
3757
3756 # Write out file telling readers where to find the shared store.
3758 # Write out file telling readers where to find the shared store.
3757 if b'sharedrepo' in createopts:
3759 if b'sharedrepo' in createopts:
3758 hgvfs.write(b'sharedpath', sharedpath)
3760 hgvfs.write(b'sharedpath', sharedpath)
3759
3761
3760 if createopts.get(b'shareditems'):
3762 if createopts.get(b'shareditems'):
3761 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3763 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3762 hgvfs.write(b'shared', shared)
3764 hgvfs.write(b'shared', shared)
3763
3765
3764
3766
3765 def poisonrepository(repo):
3767 def poisonrepository(repo):
3766 """Poison a repository instance so it can no longer be used."""
3768 """Poison a repository instance so it can no longer be used."""
3767 # Perform any cleanup on the instance.
3769 # Perform any cleanup on the instance.
3768 repo.close()
3770 repo.close()
3769
3771
3770 # Our strategy is to replace the type of the object with one that
3772 # Our strategy is to replace the type of the object with one that
3771 # has all attribute lookups result in error.
3773 # has all attribute lookups result in error.
3772 #
3774 #
3773 # But we have to allow the close() method because some constructors
3775 # But we have to allow the close() method because some constructors
3774 # of repos call close() on repo references.
3776 # of repos call close() on repo references.
3775 class poisonedrepository(object):
3777 class poisonedrepository(object):
3776 def __getattribute__(self, item):
3778 def __getattribute__(self, item):
3777 if item == 'close':
3779 if item == 'close':
3778 return object.__getattribute__(self, item)
3780 return object.__getattribute__(self, item)
3779
3781
3780 raise error.ProgrammingError(
3782 raise error.ProgrammingError(
3781 b'repo instances should not be used after unshare'
3783 b'repo instances should not be used after unshare'
3782 )
3784 )
3783
3785
3784 def close(self):
3786 def close(self):
3785 pass
3787 pass
3786
3788
3787 # We may have a repoview, which intercepts __setattr__. So be sure
3789 # We may have a repoview, which intercepts __setattr__. So be sure
3788 # we operate at the lowest level possible.
3790 # we operate at the lowest level possible.
3789 object.__setattr__(repo, '__class__', poisonedrepository)
3791 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,236 +1,251 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import nullid, nullrev
13 from ..node import nullid, nullrev
14 from .. import (
14 from .. import (
15 pycompat,
15 pycompat,
16 util,
16 util,
17 )
17 )
18
18
19 from ..revlogutils import nodemap as nodemaputil
19 from ..revlogutils import nodemap as nodemaputil
20
20
21 stringio = pycompat.bytesio
21 stringio = pycompat.bytesio
22
22
23
23
24 _pack = struct.pack
24 _pack = struct.pack
25 _unpack = struct.unpack
25 _unpack = struct.unpack
26 _compress = zlib.compress
26 _compress = zlib.compress
27 _decompress = zlib.decompress
27 _decompress = zlib.decompress
28
28
29 # Some code below makes tuples directly because it's more convenient. However,
29 # Some code below makes tuples directly because it's more convenient. However,
30 # code outside this module should always use dirstatetuple.
30 # code outside this module should always use dirstatetuple.
31 def dirstatetuple(*x):
31 def dirstatetuple(*x):
32 # x is a tuple
32 # x is a tuple
33 return x
33 return x
34
34
35
35
36 indexformatng = b">Qiiiiii20s12x"
36 indexformatng = b">Qiiiiii20s12x"
37 indexfirst = struct.calcsize(b'Q')
37 indexfirst = struct.calcsize(b'Q')
38 sizeint = struct.calcsize(b'i')
38 sizeint = struct.calcsize(b'i')
39 indexsize = struct.calcsize(indexformatng)
39 indexsize = struct.calcsize(indexformatng)
40
40
41
41
42 def gettype(q):
42 def gettype(q):
43 return int(q & 0xFFFF)
43 return int(q & 0xFFFF)
44
44
45
45
46 def offset_type(offset, type):
46 def offset_type(offset, type):
47 return int(int(offset) << 16 | type)
47 return int(int(offset) << 16 | type)
48
48
49
49
50 class BaseIndexObject(object):
50 class BaseIndexObject(object):
51 @property
51 @property
52 def nodemap(self):
52 def nodemap(self):
53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
55 return self._nodemap
55 return self._nodemap
56
56
57 @util.propertycache
57 @util.propertycache
58 def _nodemap(self):
58 def _nodemap(self):
59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
60 for r in range(0, len(self)):
60 for r in range(0, len(self)):
61 n = self[r][7]
61 n = self[r][7]
62 nodemap[n] = r
62 nodemap[n] = r
63 return nodemap
63 return nodemap
64
64
65 def has_node(self, node):
65 def has_node(self, node):
66 """return True if the node exist in the index"""
66 """return True if the node exist in the index"""
67 return node in self._nodemap
67 return node in self._nodemap
68
68
69 def rev(self, node):
69 def rev(self, node):
70 """return a revision for a node
70 """return a revision for a node
71
71
72 If the node is unknown, raise a RevlogError"""
72 If the node is unknown, raise a RevlogError"""
73 return self._nodemap[node]
73 return self._nodemap[node]
74
74
75 def get_rev(self, node):
75 def get_rev(self, node):
76 """return a revision for a node
76 """return a revision for a node
77
77
78 If the node is unknown, return None"""
78 If the node is unknown, return None"""
79 return self._nodemap.get(node)
79 return self._nodemap.get(node)
80
80
81 def _stripnodes(self, start):
81 def _stripnodes(self, start):
82 if '_nodemap' in vars(self):
82 if '_nodemap' in vars(self):
83 for r in range(start, len(self)):
83 for r in range(start, len(self)):
84 n = self[r][7]
84 n = self[r][7]
85 del self._nodemap[n]
85 del self._nodemap[n]
86
86
87 def clearcaches(self):
87 def clearcaches(self):
88 self.__dict__.pop('_nodemap', None)
88 self.__dict__.pop('_nodemap', None)
89
89
90 def __len__(self):
90 def __len__(self):
91 return self._lgt + len(self._extra)
91 return self._lgt + len(self._extra)
92
92
93 def append(self, tup):
93 def append(self, tup):
94 if '_nodemap' in vars(self):
94 if '_nodemap' in vars(self):
95 self._nodemap[tup[7]] = len(self)
95 self._nodemap[tup[7]] = len(self)
96 self._extra.append(tup)
96 self._extra.append(tup)
97
97
98 def _check_index(self, i):
98 def _check_index(self, i):
99 if not isinstance(i, int):
99 if not isinstance(i, int):
100 raise TypeError(b"expecting int indexes")
100 raise TypeError(b"expecting int indexes")
101 if i < 0 or i >= len(self):
101 if i < 0 or i >= len(self):
102 raise IndexError
102 raise IndexError
103
103
104 def __getitem__(self, i):
104 def __getitem__(self, i):
105 if i == -1:
105 if i == -1:
106 return (0, 0, 0, -1, -1, -1, -1, nullid)
106 return (0, 0, 0, -1, -1, -1, -1, nullid)
107 self._check_index(i)
107 self._check_index(i)
108 if i >= self._lgt:
108 if i >= self._lgt:
109 return self._extra[i - self._lgt]
109 return self._extra[i - self._lgt]
110 index = self._calculate_index(i)
110 index = self._calculate_index(i)
111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
112 if i == 0:
112 if i == 0:
113 e = list(r)
113 e = list(r)
114 type = gettype(e[0])
114 type = gettype(e[0])
115 e[0] = offset_type(0, type)
115 e[0] = offset_type(0, type)
116 return tuple(e)
116 return tuple(e)
117 return r
117 return r
118
118
119
119
120 class IndexObject(BaseIndexObject):
120 class IndexObject(BaseIndexObject):
121 def __init__(self, data):
121 def __init__(self, data):
122 assert len(data) % indexsize == 0
122 assert len(data) % indexsize == 0
123 self._data = data
123 self._data = data
124 self._lgt = len(data) // indexsize
124 self._lgt = len(data) // indexsize
125 self._extra = []
125 self._extra = []
126
126
127 def _calculate_index(self, i):
127 def _calculate_index(self, i):
128 return i * indexsize
128 return i * indexsize
129
129
130 def __delitem__(self, i):
130 def __delitem__(self, i):
131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
133 i = i.start
133 i = i.start
134 self._check_index(i)
134 self._check_index(i)
135 self._stripnodes(i)
135 self._stripnodes(i)
136 if i < self._lgt:
136 if i < self._lgt:
137 self._data = self._data[: i * indexsize]
137 self._data = self._data[: i * indexsize]
138 self._lgt = i
138 self._lgt = i
139 self._extra = []
139 self._extra = []
140 else:
140 else:
141 self._extra = self._extra[: i - self._lgt]
141 self._extra = self._extra[: i - self._lgt]
142
142
143
143
144 class PersistentNodeMapIndexObject(IndexObject):
145 """a Debug oriented class to test persistent nodemap
146
147 We need a simple python object to test API and higher level behavior. See
148 the Rust implementation for more serious usage. This should be used only
149 through the dedicated `devel.persistent-nodemap` config.
150 """
151
152
144 class InlinedIndexObject(BaseIndexObject):
153 class InlinedIndexObject(BaseIndexObject):
145 def __init__(self, data, inline=0):
154 def __init__(self, data, inline=0):
146 self._data = data
155 self._data = data
147 self._lgt = self._inline_scan(None)
156 self._lgt = self._inline_scan(None)
148 self._inline_scan(self._lgt)
157 self._inline_scan(self._lgt)
149 self._extra = []
158 self._extra = []
150
159
151 def _inline_scan(self, lgt):
160 def _inline_scan(self, lgt):
152 off = 0
161 off = 0
153 if lgt is not None:
162 if lgt is not None:
154 self._offsets = [0] * lgt
163 self._offsets = [0] * lgt
155 count = 0
164 count = 0
156 while off <= len(self._data) - indexsize:
165 while off <= len(self._data) - indexsize:
157 (s,) = struct.unpack(
166 (s,) = struct.unpack(
158 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
167 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
159 )
168 )
160 if lgt is not None:
169 if lgt is not None:
161 self._offsets[count] = off
170 self._offsets[count] = off
162 count += 1
171 count += 1
163 off += indexsize + s
172 off += indexsize + s
164 if off != len(self._data):
173 if off != len(self._data):
165 raise ValueError(b"corrupted data")
174 raise ValueError(b"corrupted data")
166 return count
175 return count
167
176
168 def __delitem__(self, i):
177 def __delitem__(self, i):
169 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
178 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
170 raise ValueError(b"deleting slices only supports a:-1 with step 1")
179 raise ValueError(b"deleting slices only supports a:-1 with step 1")
171 i = i.start
180 i = i.start
172 self._check_index(i)
181 self._check_index(i)
173 self._stripnodes(i)
182 self._stripnodes(i)
174 if i < self._lgt:
183 if i < self._lgt:
175 self._offsets = self._offsets[:i]
184 self._offsets = self._offsets[:i]
176 self._lgt = i
185 self._lgt = i
177 self._extra = []
186 self._extra = []
178 else:
187 else:
179 self._extra = self._extra[: i - self._lgt]
188 self._extra = self._extra[: i - self._lgt]
180
189
181 def _calculate_index(self, i):
190 def _calculate_index(self, i):
182 return self._offsets[i]
191 return self._offsets[i]
183
192
184
193
185 def parse_index2(data, inline):
194 def parse_index2(data, inline):
186 if not inline:
195 if not inline:
187 return IndexObject(data), None
196 return IndexObject(data), None
188 return InlinedIndexObject(data, inline), (0, data)
197 return InlinedIndexObject(data, inline), (0, data)
189
198
190
199
200 def parse_index_devel_nodemap(data, inline):
201 """like parse_index2, but alway return a PersistentNodeMapIndexObject
202 """
203 return PersistentNodeMapIndexObject(data), None
204
205
191 def parse_dirstate(dmap, copymap, st):
206 def parse_dirstate(dmap, copymap, st):
192 parents = [st[:20], st[20:40]]
207 parents = [st[:20], st[20:40]]
193 # dereference fields so they will be local in loop
208 # dereference fields so they will be local in loop
194 format = b">cllll"
209 format = b">cllll"
195 e_size = struct.calcsize(format)
210 e_size = struct.calcsize(format)
196 pos1 = 40
211 pos1 = 40
197 l = len(st)
212 l = len(st)
198
213
199 # the inner loop
214 # the inner loop
200 while pos1 < l:
215 while pos1 < l:
201 pos2 = pos1 + e_size
216 pos2 = pos1 + e_size
202 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
217 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
203 pos1 = pos2 + e[4]
218 pos1 = pos2 + e[4]
204 f = st[pos2:pos1]
219 f = st[pos2:pos1]
205 if b'\0' in f:
220 if b'\0' in f:
206 f, c = f.split(b'\0')
221 f, c = f.split(b'\0')
207 copymap[f] = c
222 copymap[f] = c
208 dmap[f] = e[:4]
223 dmap[f] = e[:4]
209 return parents
224 return parents
210
225
211
226
212 def pack_dirstate(dmap, copymap, pl, now):
227 def pack_dirstate(dmap, copymap, pl, now):
213 now = int(now)
228 now = int(now)
214 cs = stringio()
229 cs = stringio()
215 write = cs.write
230 write = cs.write
216 write(b"".join(pl))
231 write(b"".join(pl))
217 for f, e in pycompat.iteritems(dmap):
232 for f, e in pycompat.iteritems(dmap):
218 if e[0] == b'n' and e[3] == now:
233 if e[0] == b'n' and e[3] == now:
219 # The file was last modified "simultaneously" with the current
234 # The file was last modified "simultaneously" with the current
220 # write to dirstate (i.e. within the same second for file-
235 # write to dirstate (i.e. within the same second for file-
221 # systems with a granularity of 1 sec). This commonly happens
236 # systems with a granularity of 1 sec). This commonly happens
222 # for at least a couple of files on 'update'.
237 # for at least a couple of files on 'update'.
223 # The user could change the file without changing its size
238 # The user could change the file without changing its size
224 # within the same second. Invalidate the file's mtime in
239 # within the same second. Invalidate the file's mtime in
225 # dirstate, forcing future 'status' calls to compare the
240 # dirstate, forcing future 'status' calls to compare the
226 # contents of the file if the size is the same. This prevents
241 # contents of the file if the size is the same. This prevents
227 # mistakenly treating such files as clean.
242 # mistakenly treating such files as clean.
228 e = dirstatetuple(e[0], e[1], e[2], -1)
243 e = dirstatetuple(e[0], e[1], e[2], -1)
229 dmap[f] = e
244 dmap[f] = e
230
245
231 if f in copymap:
246 if f in copymap:
232 f = b"%s\0%s" % (f, copymap[f])
247 f = b"%s\0%s" % (f, copymap[f])
233 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
248 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
234 write(e)
249 write(e)
235 write(f)
250 write(f)
236 return cs.getvalue()
251 return cs.getvalue()
@@ -1,2996 +1,3019 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .pycompat import getattr
38 from .pycompat import getattr
39 from .revlogutils.constants import (
39 from .revlogutils.constants import (
40 FLAG_GENERALDELTA,
40 FLAG_GENERALDELTA,
41 FLAG_INLINE_DATA,
41 FLAG_INLINE_DATA,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 )
50 )
51 from .revlogutils.flagutil import (
51 from .revlogutils.flagutil import (
52 REVIDX_DEFAULT_FLAGS,
52 REVIDX_DEFAULT_FLAGS,
53 REVIDX_ELLIPSIS,
53 REVIDX_ELLIPSIS,
54 REVIDX_EXTSTORED,
54 REVIDX_EXTSTORED,
55 REVIDX_FLAGS_ORDER,
55 REVIDX_FLAGS_ORDER,
56 REVIDX_ISCENSORED,
56 REVIDX_ISCENSORED,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 REVIDX_SIDEDATA,
58 REVIDX_SIDEDATA,
59 )
59 )
60 from .thirdparty import attr
60 from .thirdparty import attr
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 templatefilters,
68 templatefilters,
69 util,
69 util,
70 )
70 )
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75 from .revlogutils import (
75 from .revlogutils import (
76 deltas as deltautil,
76 deltas as deltautil,
77 flagutil,
77 flagutil,
78 nodemap as nodemaputil,
78 nodemap as nodemaputil,
79 sidedata as sidedatautil,
79 sidedata as sidedatautil,
80 )
80 )
81 from .utils import (
81 from .utils import (
82 storageutil,
82 storageutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 # blanked usage of all the name to prevent pyflakes constraints
86 # blanked usage of all the name to prevent pyflakes constraints
87 # We need these name available in the module for extensions.
87 # We need these name available in the module for extensions.
88 REVLOGV0
88 REVLOGV0
89 REVLOGV1
89 REVLOGV1
90 REVLOGV2
90 REVLOGV2
91 FLAG_INLINE_DATA
91 FLAG_INLINE_DATA
92 FLAG_GENERALDELTA
92 FLAG_GENERALDELTA
93 REVLOG_DEFAULT_FLAGS
93 REVLOG_DEFAULT_FLAGS
94 REVLOG_DEFAULT_FORMAT
94 REVLOG_DEFAULT_FORMAT
95 REVLOG_DEFAULT_VERSION
95 REVLOG_DEFAULT_VERSION
96 REVLOGV1_FLAGS
96 REVLOGV1_FLAGS
97 REVLOGV2_FLAGS
97 REVLOGV2_FLAGS
98 REVIDX_ISCENSORED
98 REVIDX_ISCENSORED
99 REVIDX_ELLIPSIS
99 REVIDX_ELLIPSIS
100 REVIDX_SIDEDATA
100 REVIDX_SIDEDATA
101 REVIDX_EXTSTORED
101 REVIDX_EXTSTORED
102 REVIDX_DEFAULT_FLAGS
102 REVIDX_DEFAULT_FLAGS
103 REVIDX_FLAGS_ORDER
103 REVIDX_FLAGS_ORDER
104 REVIDX_RAWTEXT_CHANGING_FLAGS
104 REVIDX_RAWTEXT_CHANGING_FLAGS
105
105
106 parsers = policy.importmod('parsers')
106 parsers = policy.importmod('parsers')
107 rustancestor = policy.importrust('ancestor')
107 rustancestor = policy.importrust('ancestor')
108 rustdagop = policy.importrust('dagop')
108 rustdagop = policy.importrust('dagop')
109 rustrevlog = policy.importrust('revlog')
109 rustrevlog = policy.importrust('revlog')
110
110
111 # Aliased for performance.
111 # Aliased for performance.
112 _zlibdecompress = zlib.decompress
112 _zlibdecompress = zlib.decompress
113
113
114 # max size of revlog with inline data
114 # max size of revlog with inline data
115 _maxinline = 131072
115 _maxinline = 131072
116 _chunksize = 1048576
116 _chunksize = 1048576
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False, {}
120 return text, False, {}
121
121
122
122
123 def ellipsiswriteprocessor(rl, text, sidedata):
123 def ellipsiswriteprocessor(rl, text, sidedata):
124 return text, False
124 return text, False
125
125
126
126
127 def ellipsisrawprocessor(rl, text):
127 def ellipsisrawprocessor(rl, text):
128 return False
128 return False
129
129
130
130
131 ellipsisprocessor = (
131 ellipsisprocessor = (
132 ellipsisreadprocessor,
132 ellipsisreadprocessor,
133 ellipsiswriteprocessor,
133 ellipsiswriteprocessor,
134 ellipsisrawprocessor,
134 ellipsisrawprocessor,
135 )
135 )
136
136
137
137
138 def getoffset(q):
138 def getoffset(q):
139 return int(q >> 16)
139 return int(q >> 16)
140
140
141
141
142 def gettype(q):
142 def gettype(q):
143 return int(q & 0xFFFF)
143 return int(q & 0xFFFF)
144
144
145
145
146 def offset_type(offset, type):
146 def offset_type(offset, type):
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
148 raise ValueError(b'unknown revlog index flags')
148 raise ValueError(b'unknown revlog index flags')
149 return int(int(offset) << 16 | type)
149 return int(int(offset) << 16 | type)
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 @attr.s(slots=True, frozen=True)
162 @attr.s(slots=True, frozen=True)
163 class _revisioninfo(object):
163 class _revisioninfo(object):
164 """Information about a revision that allows building its fulltext
164 """Information about a revision that allows building its fulltext
165 node: expected hash of the revision
165 node: expected hash of the revision
166 p1, p2: parent revs of the revision
166 p1, p2: parent revs of the revision
167 btext: built text cache consisting of a one-element list
167 btext: built text cache consisting of a one-element list
168 cachedelta: (baserev, uncompressed_delta) or None
168 cachedelta: (baserev, uncompressed_delta) or None
169 flags: flags associated to the revision storage
169 flags: flags associated to the revision storage
170
170
171 One of btext[0] or cachedelta must be set.
171 One of btext[0] or cachedelta must be set.
172 """
172 """
173
173
174 node = attr.ib()
174 node = attr.ib()
175 p1 = attr.ib()
175 p1 = attr.ib()
176 p2 = attr.ib()
176 p2 = attr.ib()
177 btext = attr.ib()
177 btext = attr.ib()
178 textlen = attr.ib()
178 textlen = attr.ib()
179 cachedelta = attr.ib()
179 cachedelta = attr.ib()
180 flags = attr.ib()
180 flags = attr.ib()
181
181
182
182
183 @interfaceutil.implementer(repository.irevisiondelta)
183 @interfaceutil.implementer(repository.irevisiondelta)
184 @attr.s(slots=True)
184 @attr.s(slots=True)
185 class revlogrevisiondelta(object):
185 class revlogrevisiondelta(object):
186 node = attr.ib()
186 node = attr.ib()
187 p1node = attr.ib()
187 p1node = attr.ib()
188 p2node = attr.ib()
188 p2node = attr.ib()
189 basenode = attr.ib()
189 basenode = attr.ib()
190 flags = attr.ib()
190 flags = attr.ib()
191 baserevisionsize = attr.ib()
191 baserevisionsize = attr.ib()
192 revision = attr.ib()
192 revision = attr.ib()
193 delta = attr.ib()
193 delta = attr.ib()
194 linknode = attr.ib(default=None)
194 linknode = attr.ib(default=None)
195
195
196
196
197 @interfaceutil.implementer(repository.iverifyproblem)
197 @interfaceutil.implementer(repository.iverifyproblem)
198 @attr.s(frozen=True)
198 @attr.s(frozen=True)
199 class revlogproblem(object):
199 class revlogproblem(object):
200 warning = attr.ib(default=None)
200 warning = attr.ib(default=None)
201 error = attr.ib(default=None)
201 error = attr.ib(default=None)
202 node = attr.ib(default=None)
202 node = attr.ib(default=None)
203
203
204
204
205 # index v0:
205 # index v0:
206 # 4 bytes: offset
206 # 4 bytes: offset
207 # 4 bytes: compressed length
207 # 4 bytes: compressed length
208 # 4 bytes: base rev
208 # 4 bytes: base rev
209 # 4 bytes: link rev
209 # 4 bytes: link rev
210 # 20 bytes: parent 1 nodeid
210 # 20 bytes: parent 1 nodeid
211 # 20 bytes: parent 2 nodeid
211 # 20 bytes: parent 2 nodeid
212 # 20 bytes: nodeid
212 # 20 bytes: nodeid
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
214 indexformatv0_pack = indexformatv0.pack
214 indexformatv0_pack = indexformatv0.pack
215 indexformatv0_unpack = indexformatv0.unpack
215 indexformatv0_unpack = indexformatv0.unpack
216
216
217
217
218 class revlogoldindex(list):
218 class revlogoldindex(list):
219 @property
219 @property
220 def nodemap(self):
220 def nodemap(self):
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
223 return self._nodemap
223 return self._nodemap
224
224
225 @util.propertycache
225 @util.propertycache
226 def _nodemap(self):
226 def _nodemap(self):
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
228 for r in range(0, len(self)):
228 for r in range(0, len(self)):
229 n = self[r][7]
229 n = self[r][7]
230 nodemap[n] = r
230 nodemap[n] = r
231 return nodemap
231 return nodemap
232
232
233 def has_node(self, node):
233 def has_node(self, node):
234 """return True if the node exist in the index"""
234 """return True if the node exist in the index"""
235 return node in self._nodemap
235 return node in self._nodemap
236
236
237 def rev(self, node):
237 def rev(self, node):
238 """return a revision for a node
238 """return a revision for a node
239
239
240 If the node is unknown, raise a RevlogError"""
240 If the node is unknown, raise a RevlogError"""
241 return self._nodemap[node]
241 return self._nodemap[node]
242
242
243 def get_rev(self, node):
243 def get_rev(self, node):
244 """return a revision for a node
244 """return a revision for a node
245
245
246 If the node is unknown, return None"""
246 If the node is unknown, return None"""
247 return self._nodemap.get(node)
247 return self._nodemap.get(node)
248
248
249 def append(self, tup):
249 def append(self, tup):
250 self._nodemap[tup[7]] = len(self)
250 self._nodemap[tup[7]] = len(self)
251 super(revlogoldindex, self).append(tup)
251 super(revlogoldindex, self).append(tup)
252
252
253 def __delitem__(self, i):
253 def __delitem__(self, i):
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 for r in pycompat.xrange(i.start, len(self)):
256 for r in pycompat.xrange(i.start, len(self)):
257 del self._nodemap[self[r][7]]
257 del self._nodemap[self[r][7]]
258 super(revlogoldindex, self).__delitem__(i)
258 super(revlogoldindex, self).__delitem__(i)
259
259
260 def clearcaches(self):
260 def clearcaches(self):
261 self.__dict__.pop('_nodemap', None)
261 self.__dict__.pop('_nodemap', None)
262
262
263 def __getitem__(self, i):
263 def __getitem__(self, i):
264 if i == -1:
264 if i == -1:
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
266 return list.__getitem__(self, i)
266 return list.__getitem__(self, i)
267
267
268
268
269 class revlogoldio(object):
269 class revlogoldio(object):
270 def __init__(self):
270 def __init__(self):
271 self.size = indexformatv0.size
271 self.size = indexformatv0.size
272
272
273 def parseindex(self, data, inline):
273 def parseindex(self, data, inline):
274 s = self.size
274 s = self.size
275 index = []
275 index = []
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
277 n = off = 0
277 n = off = 0
278 l = len(data)
278 l = len(data)
279 while off + s <= l:
279 while off + s <= l:
280 cur = data[off : off + s]
280 cur = data[off : off + s]
281 off += s
281 off += s
282 e = indexformatv0_unpack(cur)
282 e = indexformatv0_unpack(cur)
283 # transform to revlogv1 format
283 # transform to revlogv1 format
284 e2 = (
284 e2 = (
285 offset_type(e[0], 0),
285 offset_type(e[0], 0),
286 e[1],
286 e[1],
287 -1,
287 -1,
288 e[2],
288 e[2],
289 e[3],
289 e[3],
290 nodemap.get(e[4], nullrev),
290 nodemap.get(e[4], nullrev),
291 nodemap.get(e[5], nullrev),
291 nodemap.get(e[5], nullrev),
292 e[6],
292 e[6],
293 )
293 )
294 index.append(e2)
294 index.append(e2)
295 nodemap[e[6]] = n
295 nodemap[e[6]] = n
296 n += 1
296 n += 1
297
297
298 index = revlogoldindex(index)
298 index = revlogoldindex(index)
299 return index, None
299 return index, None
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 if gettype(entry[0]):
302 if gettype(entry[0]):
303 raise error.RevlogError(
303 raise error.RevlogError(
304 _(b'index entry flags need revlog version 1')
304 _(b'index entry flags need revlog version 1')
305 )
305 )
306 e2 = (
306 e2 = (
307 getoffset(entry[0]),
307 getoffset(entry[0]),
308 entry[1],
308 entry[1],
309 entry[3],
309 entry[3],
310 entry[4],
310 entry[4],
311 node(entry[5]),
311 node(entry[5]),
312 node(entry[6]),
312 node(entry[6]),
313 entry[7],
313 entry[7],
314 )
314 )
315 return indexformatv0_pack(*e2)
315 return indexformatv0_pack(*e2)
316
316
317
317
318 # index ng:
318 # index ng:
319 # 6 bytes: offset
319 # 6 bytes: offset
320 # 2 bytes: flags
320 # 2 bytes: flags
321 # 4 bytes: compressed length
321 # 4 bytes: compressed length
322 # 4 bytes: uncompressed length
322 # 4 bytes: uncompressed length
323 # 4 bytes: base rev
323 # 4 bytes: base rev
324 # 4 bytes: link rev
324 # 4 bytes: link rev
325 # 4 bytes: parent 1 rev
325 # 4 bytes: parent 1 rev
326 # 4 bytes: parent 2 rev
326 # 4 bytes: parent 2 rev
327 # 32 bytes: nodeid
327 # 32 bytes: nodeid
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
329 indexformatng_pack = indexformatng.pack
329 indexformatng_pack = indexformatng.pack
330 versionformat = struct.Struct(b">I")
330 versionformat = struct.Struct(b">I")
331 versionformat_pack = versionformat.pack
331 versionformat_pack = versionformat.pack
332 versionformat_unpack = versionformat.unpack
332 versionformat_unpack = versionformat.unpack
333
333
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
335 # signed integer)
335 # signed integer)
336 _maxentrysize = 0x7FFFFFFF
336 _maxentrysize = 0x7FFFFFFF
337
337
338
338
339 class revlogio(object):
339 class revlogio(object):
340 def __init__(self):
340 def __init__(self):
341 self.size = indexformatng.size
341 self.size = indexformatng.size
342
342
343 def parseindex(self, data, inline):
343 def parseindex(self, data, inline):
344 # call the C implementation to parse the index data
344 # call the C implementation to parse the index data
345 index, cache = parsers.parse_index2(data, inline)
345 index, cache = parsers.parse_index2(data, inline)
346 return index, cache
346 return index, cache
347
347
348 def packentry(self, entry, node, version, rev):
348 def packentry(self, entry, node, version, rev):
349 p = indexformatng_pack(*entry)
349 p = indexformatng_pack(*entry)
350 if rev == 0:
350 if rev == 0:
351 p = versionformat_pack(version) + p[4:]
351 p = versionformat_pack(version) + p[4:]
352 return p
352 return p
353
353
354
354
355 NodemapRevlogIO = None
356
357 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
358
359 class NodemapRevlogIO(revlogio):
360 """A debug oriented IO class that return a PersistentNodeMapIndexObject
361
362 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
363 """
364
365 def parseindex(self, data, inline):
366 index, cache = parsers.parse_index_devel_nodemap(data, inline)
367 return index, cache
368
369
355 class rustrevlogio(revlogio):
370 class rustrevlogio(revlogio):
356 def parseindex(self, data, inline):
371 def parseindex(self, data, inline):
357 index, cache = super(rustrevlogio, self).parseindex(data, inline)
372 index, cache = super(rustrevlogio, self).parseindex(data, inline)
358 return rustrevlog.MixedIndex(index), cache
373 return rustrevlog.MixedIndex(index), cache
359
374
360
375
361 class revlog(object):
376 class revlog(object):
362 """
377 """
363 the underlying revision storage object
378 the underlying revision storage object
364
379
365 A revlog consists of two parts, an index and the revision data.
380 A revlog consists of two parts, an index and the revision data.
366
381
367 The index is a file with a fixed record size containing
382 The index is a file with a fixed record size containing
368 information on each revision, including its nodeid (hash), the
383 information on each revision, including its nodeid (hash), the
369 nodeids of its parents, the position and offset of its data within
384 nodeids of its parents, the position and offset of its data within
370 the data file, and the revision it's based on. Finally, each entry
385 the data file, and the revision it's based on. Finally, each entry
371 contains a linkrev entry that can serve as a pointer to external
386 contains a linkrev entry that can serve as a pointer to external
372 data.
387 data.
373
388
374 The revision data itself is a linear collection of data chunks.
389 The revision data itself is a linear collection of data chunks.
375 Each chunk represents a revision and is usually represented as a
390 Each chunk represents a revision and is usually represented as a
376 delta against the previous chunk. To bound lookup time, runs of
391 delta against the previous chunk. To bound lookup time, runs of
377 deltas are limited to about 2 times the length of the original
392 deltas are limited to about 2 times the length of the original
378 version data. This makes retrieval of a version proportional to
393 version data. This makes retrieval of a version proportional to
379 its size, or O(1) relative to the number of revisions.
394 its size, or O(1) relative to the number of revisions.
380
395
381 Both pieces of the revlog are written to in an append-only
396 Both pieces of the revlog are written to in an append-only
382 fashion, which means we never need to rewrite a file to insert or
397 fashion, which means we never need to rewrite a file to insert or
383 remove data, and can use some simple techniques to avoid the need
398 remove data, and can use some simple techniques to avoid the need
384 for locking while reading.
399 for locking while reading.
385
400
386 If checkambig, indexfile is opened with checkambig=True at
401 If checkambig, indexfile is opened with checkambig=True at
387 writing, to avoid file stat ambiguity.
402 writing, to avoid file stat ambiguity.
388
403
389 If mmaplargeindex is True, and an mmapindexthreshold is set, the
404 If mmaplargeindex is True, and an mmapindexthreshold is set, the
390 index will be mmapped rather than read if it is larger than the
405 index will be mmapped rather than read if it is larger than the
391 configured threshold.
406 configured threshold.
392
407
393 If censorable is True, the revlog can have censored revisions.
408 If censorable is True, the revlog can have censored revisions.
394
409
395 If `upperboundcomp` is not None, this is the expected maximal gain from
410 If `upperboundcomp` is not None, this is the expected maximal gain from
396 compression for the data content.
411 compression for the data content.
397 """
412 """
398
413
399 _flagserrorclass = error.RevlogError
414 _flagserrorclass = error.RevlogError
400
415
401 def __init__(
416 def __init__(
402 self,
417 self,
403 opener,
418 opener,
404 indexfile,
419 indexfile,
405 datafile=None,
420 datafile=None,
406 checkambig=False,
421 checkambig=False,
407 mmaplargeindex=False,
422 mmaplargeindex=False,
408 censorable=False,
423 censorable=False,
409 upperboundcomp=None,
424 upperboundcomp=None,
410 persistentnodemap=False,
425 persistentnodemap=False,
411 ):
426 ):
412 """
427 """
413 create a revlog object
428 create a revlog object
414
429
415 opener is a function that abstracts the file opening operation
430 opener is a function that abstracts the file opening operation
416 and can be used to implement COW semantics or the like.
431 and can be used to implement COW semantics or the like.
417
432
418 """
433 """
419 self.upperboundcomp = upperboundcomp
434 self.upperboundcomp = upperboundcomp
420 self.indexfile = indexfile
435 self.indexfile = indexfile
421 self.datafile = datafile or (indexfile[:-2] + b".d")
436 self.datafile = datafile or (indexfile[:-2] + b".d")
422 self.nodemap_file = None
437 self.nodemap_file = None
423 if persistentnodemap:
438 if persistentnodemap:
424 self.nodemap_file = indexfile[:-2] + b".n"
439 self.nodemap_file = indexfile[:-2] + b".n"
425
440
426 self.opener = opener
441 self.opener = opener
427 # When True, indexfile is opened with checkambig=True at writing, to
442 # When True, indexfile is opened with checkambig=True at writing, to
428 # avoid file stat ambiguity.
443 # avoid file stat ambiguity.
429 self._checkambig = checkambig
444 self._checkambig = checkambig
430 self._mmaplargeindex = mmaplargeindex
445 self._mmaplargeindex = mmaplargeindex
431 self._censorable = censorable
446 self._censorable = censorable
432 # 3-tuple of (node, rev, text) for a raw revision.
447 # 3-tuple of (node, rev, text) for a raw revision.
433 self._revisioncache = None
448 self._revisioncache = None
434 # Maps rev to chain base rev.
449 # Maps rev to chain base rev.
435 self._chainbasecache = util.lrucachedict(100)
450 self._chainbasecache = util.lrucachedict(100)
436 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
451 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
437 self._chunkcache = (0, b'')
452 self._chunkcache = (0, b'')
438 # How much data to read and cache into the raw revlog data cache.
453 # How much data to read and cache into the raw revlog data cache.
439 self._chunkcachesize = 65536
454 self._chunkcachesize = 65536
440 self._maxchainlen = None
455 self._maxchainlen = None
441 self._deltabothparents = True
456 self._deltabothparents = True
442 self.index = None
457 self.index = None
443 # Mapping of partial identifiers to full nodes.
458 # Mapping of partial identifiers to full nodes.
444 self._pcache = {}
459 self._pcache = {}
445 # Mapping of revision integer to full node.
460 # Mapping of revision integer to full node.
446 self._compengine = b'zlib'
461 self._compengine = b'zlib'
447 self._compengineopts = {}
462 self._compengineopts = {}
448 self._maxdeltachainspan = -1
463 self._maxdeltachainspan = -1
449 self._withsparseread = False
464 self._withsparseread = False
450 self._sparserevlog = False
465 self._sparserevlog = False
451 self._srdensitythreshold = 0.50
466 self._srdensitythreshold = 0.50
452 self._srmingapsize = 262144
467 self._srmingapsize = 262144
453
468
454 # Make copy of flag processors so each revlog instance can support
469 # Make copy of flag processors so each revlog instance can support
455 # custom flags.
470 # custom flags.
456 self._flagprocessors = dict(flagutil.flagprocessors)
471 self._flagprocessors = dict(flagutil.flagprocessors)
457
472
458 # 2-tuple of file handles being used for active writing.
473 # 2-tuple of file handles being used for active writing.
459 self._writinghandles = None
474 self._writinghandles = None
460
475
461 self._loadindex()
476 self._loadindex()
462
477
463 def _loadindex(self):
478 def _loadindex(self):
464 mmapindexthreshold = None
479 mmapindexthreshold = None
465 opts = self.opener.options
480 opts = self.opener.options
466
481
467 if b'revlogv2' in opts:
482 if b'revlogv2' in opts:
468 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
483 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
469 elif b'revlogv1' in opts:
484 elif b'revlogv1' in opts:
470 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
485 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
471 if b'generaldelta' in opts:
486 if b'generaldelta' in opts:
472 newversionflags |= FLAG_GENERALDELTA
487 newversionflags |= FLAG_GENERALDELTA
473 elif b'revlogv0' in self.opener.options:
488 elif b'revlogv0' in self.opener.options:
474 newversionflags = REVLOGV0
489 newversionflags = REVLOGV0
475 else:
490 else:
476 newversionflags = REVLOG_DEFAULT_VERSION
491 newversionflags = REVLOG_DEFAULT_VERSION
477
492
478 if b'chunkcachesize' in opts:
493 if b'chunkcachesize' in opts:
479 self._chunkcachesize = opts[b'chunkcachesize']
494 self._chunkcachesize = opts[b'chunkcachesize']
480 if b'maxchainlen' in opts:
495 if b'maxchainlen' in opts:
481 self._maxchainlen = opts[b'maxchainlen']
496 self._maxchainlen = opts[b'maxchainlen']
482 if b'deltabothparents' in opts:
497 if b'deltabothparents' in opts:
483 self._deltabothparents = opts[b'deltabothparents']
498 self._deltabothparents = opts[b'deltabothparents']
484 self._lazydelta = bool(opts.get(b'lazydelta', True))
499 self._lazydelta = bool(opts.get(b'lazydelta', True))
485 self._lazydeltabase = False
500 self._lazydeltabase = False
486 if self._lazydelta:
501 if self._lazydelta:
487 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
502 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
488 if b'compengine' in opts:
503 if b'compengine' in opts:
489 self._compengine = opts[b'compengine']
504 self._compengine = opts[b'compengine']
490 if b'zlib.level' in opts:
505 if b'zlib.level' in opts:
491 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
506 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
492 if b'zstd.level' in opts:
507 if b'zstd.level' in opts:
493 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
508 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
494 if b'maxdeltachainspan' in opts:
509 if b'maxdeltachainspan' in opts:
495 self._maxdeltachainspan = opts[b'maxdeltachainspan']
510 self._maxdeltachainspan = opts[b'maxdeltachainspan']
496 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
511 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
497 mmapindexthreshold = opts[b'mmapindexthreshold']
512 mmapindexthreshold = opts[b'mmapindexthreshold']
498 self.hassidedata = bool(opts.get(b'side-data', False))
513 self.hassidedata = bool(opts.get(b'side-data', False))
499 if self.hassidedata:
514 if self.hassidedata:
500 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
515 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
501 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
516 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
502 withsparseread = bool(opts.get(b'with-sparse-read', False))
517 withsparseread = bool(opts.get(b'with-sparse-read', False))
503 # sparse-revlog forces sparse-read
518 # sparse-revlog forces sparse-read
504 self._withsparseread = self._sparserevlog or withsparseread
519 self._withsparseread = self._sparserevlog or withsparseread
505 if b'sparse-read-density-threshold' in opts:
520 if b'sparse-read-density-threshold' in opts:
506 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
521 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
507 if b'sparse-read-min-gap-size' in opts:
522 if b'sparse-read-min-gap-size' in opts:
508 self._srmingapsize = opts[b'sparse-read-min-gap-size']
523 self._srmingapsize = opts[b'sparse-read-min-gap-size']
509 if opts.get(b'enableellipsis'):
524 if opts.get(b'enableellipsis'):
510 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
525 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
511
526
512 # revlog v0 doesn't have flag processors
527 # revlog v0 doesn't have flag processors
513 for flag, processor in pycompat.iteritems(
528 for flag, processor in pycompat.iteritems(
514 opts.get(b'flagprocessors', {})
529 opts.get(b'flagprocessors', {})
515 ):
530 ):
516 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
531 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
517
532
518 if self._chunkcachesize <= 0:
533 if self._chunkcachesize <= 0:
519 raise error.RevlogError(
534 raise error.RevlogError(
520 _(b'revlog chunk cache size %r is not greater than 0')
535 _(b'revlog chunk cache size %r is not greater than 0')
521 % self._chunkcachesize
536 % self._chunkcachesize
522 )
537 )
523 elif self._chunkcachesize & (self._chunkcachesize - 1):
538 elif self._chunkcachesize & (self._chunkcachesize - 1):
524 raise error.RevlogError(
539 raise error.RevlogError(
525 _(b'revlog chunk cache size %r is not a power of 2')
540 _(b'revlog chunk cache size %r is not a power of 2')
526 % self._chunkcachesize
541 % self._chunkcachesize
527 )
542 )
528
543
529 indexdata = b''
544 indexdata = b''
530 self._initempty = True
545 self._initempty = True
531 try:
546 try:
532 with self._indexfp() as f:
547 with self._indexfp() as f:
533 if (
548 if (
534 mmapindexthreshold is not None
549 mmapindexthreshold is not None
535 and self.opener.fstat(f).st_size >= mmapindexthreshold
550 and self.opener.fstat(f).st_size >= mmapindexthreshold
536 ):
551 ):
537 # TODO: should .close() to release resources without
552 # TODO: should .close() to release resources without
538 # relying on Python GC
553 # relying on Python GC
539 indexdata = util.buffer(util.mmapread(f))
554 indexdata = util.buffer(util.mmapread(f))
540 else:
555 else:
541 indexdata = f.read()
556 indexdata = f.read()
542 if len(indexdata) > 0:
557 if len(indexdata) > 0:
543 versionflags = versionformat_unpack(indexdata[:4])[0]
558 versionflags = versionformat_unpack(indexdata[:4])[0]
544 self._initempty = False
559 self._initempty = False
545 else:
560 else:
546 versionflags = newversionflags
561 versionflags = newversionflags
547 except IOError as inst:
562 except IOError as inst:
548 if inst.errno != errno.ENOENT:
563 if inst.errno != errno.ENOENT:
549 raise
564 raise
550
565
551 versionflags = newversionflags
566 versionflags = newversionflags
552
567
553 self.version = versionflags
568 self.version = versionflags
554
569
555 flags = versionflags & ~0xFFFF
570 flags = versionflags & ~0xFFFF
556 fmt = versionflags & 0xFFFF
571 fmt = versionflags & 0xFFFF
557
572
558 if fmt == REVLOGV0:
573 if fmt == REVLOGV0:
559 if flags:
574 if flags:
560 raise error.RevlogError(
575 raise error.RevlogError(
561 _(b'unknown flags (%#04x) in version %d revlog %s')
576 _(b'unknown flags (%#04x) in version %d revlog %s')
562 % (flags >> 16, fmt, self.indexfile)
577 % (flags >> 16, fmt, self.indexfile)
563 )
578 )
564
579
565 self._inline = False
580 self._inline = False
566 self._generaldelta = False
581 self._generaldelta = False
567
582
568 elif fmt == REVLOGV1:
583 elif fmt == REVLOGV1:
569 if flags & ~REVLOGV1_FLAGS:
584 if flags & ~REVLOGV1_FLAGS:
570 raise error.RevlogError(
585 raise error.RevlogError(
571 _(b'unknown flags (%#04x) in version %d revlog %s')
586 _(b'unknown flags (%#04x) in version %d revlog %s')
572 % (flags >> 16, fmt, self.indexfile)
587 % (flags >> 16, fmt, self.indexfile)
573 )
588 )
574
589
575 self._inline = versionflags & FLAG_INLINE_DATA
590 self._inline = versionflags & FLAG_INLINE_DATA
576 self._generaldelta = versionflags & FLAG_GENERALDELTA
591 self._generaldelta = versionflags & FLAG_GENERALDELTA
577
592
578 elif fmt == REVLOGV2:
593 elif fmt == REVLOGV2:
579 if flags & ~REVLOGV2_FLAGS:
594 if flags & ~REVLOGV2_FLAGS:
580 raise error.RevlogError(
595 raise error.RevlogError(
581 _(b'unknown flags (%#04x) in version %d revlog %s')
596 _(b'unknown flags (%#04x) in version %d revlog %s')
582 % (flags >> 16, fmt, self.indexfile)
597 % (flags >> 16, fmt, self.indexfile)
583 )
598 )
584
599
585 self._inline = versionflags & FLAG_INLINE_DATA
600 self._inline = versionflags & FLAG_INLINE_DATA
586 # generaldelta implied by version 2 revlogs.
601 # generaldelta implied by version 2 revlogs.
587 self._generaldelta = True
602 self._generaldelta = True
588
603
589 else:
604 else:
590 raise error.RevlogError(
605 raise error.RevlogError(
591 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
606 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
592 )
607 )
593 # sparse-revlog can't be on without general-delta (issue6056)
608 # sparse-revlog can't be on without general-delta (issue6056)
594 if not self._generaldelta:
609 if not self._generaldelta:
595 self._sparserevlog = False
610 self._sparserevlog = False
596
611
597 self._storedeltachains = True
612 self._storedeltachains = True
598
613
614 devel_nodemap = (
615 self.nodemap_file
616 and opts.get(b'devel-force-nodemap', False)
617 and NodemapRevlogIO is not None
618 )
619
599 self._io = revlogio()
620 self._io = revlogio()
600 if self.version == REVLOGV0:
621 if self.version == REVLOGV0:
601 self._io = revlogoldio()
622 self._io = revlogoldio()
623 elif devel_nodemap:
624 self._io = NodemapRevlogIO()
602 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
625 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
603 self._io = rustrevlogio()
626 self._io = rustrevlogio()
604 try:
627 try:
605 d = self._io.parseindex(indexdata, self._inline)
628 d = self._io.parseindex(indexdata, self._inline)
606 except (ValueError, IndexError):
629 except (ValueError, IndexError):
607 raise error.RevlogError(
630 raise error.RevlogError(
608 _(b"index %s is corrupted") % self.indexfile
631 _(b"index %s is corrupted") % self.indexfile
609 )
632 )
610 self.index, self._chunkcache = d
633 self.index, self._chunkcache = d
611 if not self._chunkcache:
634 if not self._chunkcache:
612 self._chunkclear()
635 self._chunkclear()
613 # revnum -> (chain-length, sum-delta-length)
636 # revnum -> (chain-length, sum-delta-length)
614 self._chaininfocache = {}
637 self._chaininfocache = {}
615 # revlog header -> revlog compressor
638 # revlog header -> revlog compressor
616 self._decompressors = {}
639 self._decompressors = {}
617
640
618 @util.propertycache
641 @util.propertycache
619 def _compressor(self):
642 def _compressor(self):
620 engine = util.compengines[self._compengine]
643 engine = util.compengines[self._compengine]
621 return engine.revlogcompressor(self._compengineopts)
644 return engine.revlogcompressor(self._compengineopts)
622
645
623 def _indexfp(self, mode=b'r'):
646 def _indexfp(self, mode=b'r'):
624 """file object for the revlog's index file"""
647 """file object for the revlog's index file"""
625 args = {'mode': mode}
648 args = {'mode': mode}
626 if mode != b'r':
649 if mode != b'r':
627 args['checkambig'] = self._checkambig
650 args['checkambig'] = self._checkambig
628 if mode == b'w':
651 if mode == b'w':
629 args['atomictemp'] = True
652 args['atomictemp'] = True
630 return self.opener(self.indexfile, **args)
653 return self.opener(self.indexfile, **args)
631
654
632 def _datafp(self, mode=b'r'):
655 def _datafp(self, mode=b'r'):
633 """file object for the revlog's data file"""
656 """file object for the revlog's data file"""
634 return self.opener(self.datafile, mode=mode)
657 return self.opener(self.datafile, mode=mode)
635
658
636 @contextlib.contextmanager
659 @contextlib.contextmanager
637 def _datareadfp(self, existingfp=None):
660 def _datareadfp(self, existingfp=None):
638 """file object suitable to read data"""
661 """file object suitable to read data"""
639 # Use explicit file handle, if given.
662 # Use explicit file handle, if given.
640 if existingfp is not None:
663 if existingfp is not None:
641 yield existingfp
664 yield existingfp
642
665
643 # Use a file handle being actively used for writes, if available.
666 # Use a file handle being actively used for writes, if available.
644 # There is some danger to doing this because reads will seek the
667 # There is some danger to doing this because reads will seek the
645 # file. However, _writeentry() performs a SEEK_END before all writes,
668 # file. However, _writeentry() performs a SEEK_END before all writes,
646 # so we should be safe.
669 # so we should be safe.
647 elif self._writinghandles:
670 elif self._writinghandles:
648 if self._inline:
671 if self._inline:
649 yield self._writinghandles[0]
672 yield self._writinghandles[0]
650 else:
673 else:
651 yield self._writinghandles[1]
674 yield self._writinghandles[1]
652
675
653 # Otherwise open a new file handle.
676 # Otherwise open a new file handle.
654 else:
677 else:
655 if self._inline:
678 if self._inline:
656 func = self._indexfp
679 func = self._indexfp
657 else:
680 else:
658 func = self._datafp
681 func = self._datafp
659 with func() as fp:
682 with func() as fp:
660 yield fp
683 yield fp
661
684
662 def tiprev(self):
685 def tiprev(self):
663 return len(self.index) - 1
686 return len(self.index) - 1
664
687
665 def tip(self):
688 def tip(self):
666 return self.node(self.tiprev())
689 return self.node(self.tiprev())
667
690
668 def __contains__(self, rev):
691 def __contains__(self, rev):
669 return 0 <= rev < len(self)
692 return 0 <= rev < len(self)
670
693
671 def __len__(self):
694 def __len__(self):
672 return len(self.index)
695 return len(self.index)
673
696
674 def __iter__(self):
697 def __iter__(self):
675 return iter(pycompat.xrange(len(self)))
698 return iter(pycompat.xrange(len(self)))
676
699
677 def revs(self, start=0, stop=None):
700 def revs(self, start=0, stop=None):
678 """iterate over all rev in this revlog (from start to stop)"""
701 """iterate over all rev in this revlog (from start to stop)"""
679 return storageutil.iterrevs(len(self), start=start, stop=stop)
702 return storageutil.iterrevs(len(self), start=start, stop=stop)
680
703
681 @property
704 @property
682 def nodemap(self):
705 def nodemap(self):
683 msg = (
706 msg = (
684 b"revlog.nodemap is deprecated, "
707 b"revlog.nodemap is deprecated, "
685 b"use revlog.index.[has_node|rev|get_rev]"
708 b"use revlog.index.[has_node|rev|get_rev]"
686 )
709 )
687 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
710 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
688 return self.index.nodemap
711 return self.index.nodemap
689
712
690 @property
713 @property
691 def _nodecache(self):
714 def _nodecache(self):
692 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
715 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
693 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
716 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
694 return self.index.nodemap
717 return self.index.nodemap
695
718
696 def hasnode(self, node):
719 def hasnode(self, node):
697 try:
720 try:
698 self.rev(node)
721 self.rev(node)
699 return True
722 return True
700 except KeyError:
723 except KeyError:
701 return False
724 return False
702
725
703 def candelta(self, baserev, rev):
726 def candelta(self, baserev, rev):
704 """whether two revisions (baserev, rev) can be delta-ed or not"""
727 """whether two revisions (baserev, rev) can be delta-ed or not"""
705 # Disable delta if either rev requires a content-changing flag
728 # Disable delta if either rev requires a content-changing flag
706 # processor (ex. LFS). This is because such flag processor can alter
729 # processor (ex. LFS). This is because such flag processor can alter
707 # the rawtext content that the delta will be based on, and two clients
730 # the rawtext content that the delta will be based on, and two clients
708 # could have a same revlog node with different flags (i.e. different
731 # could have a same revlog node with different flags (i.e. different
709 # rawtext contents) and the delta could be incompatible.
732 # rawtext contents) and the delta could be incompatible.
710 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
733 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
711 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
734 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
712 ):
735 ):
713 return False
736 return False
714 return True
737 return True
715
738
716 def clearcaches(self):
739 def clearcaches(self):
717 self._revisioncache = None
740 self._revisioncache = None
718 self._chainbasecache.clear()
741 self._chainbasecache.clear()
719 self._chunkcache = (0, b'')
742 self._chunkcache = (0, b'')
720 self._pcache = {}
743 self._pcache = {}
721 self.index.clearcaches()
744 self.index.clearcaches()
722
745
723 def rev(self, node):
746 def rev(self, node):
724 try:
747 try:
725 return self.index.rev(node)
748 return self.index.rev(node)
726 except TypeError:
749 except TypeError:
727 raise
750 raise
728 except error.RevlogError:
751 except error.RevlogError:
729 # parsers.c radix tree lookup failed
752 # parsers.c radix tree lookup failed
730 if node == wdirid or node in wdirfilenodeids:
753 if node == wdirid or node in wdirfilenodeids:
731 raise error.WdirUnsupported
754 raise error.WdirUnsupported
732 raise error.LookupError(node, self.indexfile, _(b'no node'))
755 raise error.LookupError(node, self.indexfile, _(b'no node'))
733
756
734 # Accessors for index entries.
757 # Accessors for index entries.
735
758
736 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
759 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
737 # are flags.
760 # are flags.
738 def start(self, rev):
761 def start(self, rev):
739 return int(self.index[rev][0] >> 16)
762 return int(self.index[rev][0] >> 16)
740
763
741 def flags(self, rev):
764 def flags(self, rev):
742 return self.index[rev][0] & 0xFFFF
765 return self.index[rev][0] & 0xFFFF
743
766
744 def length(self, rev):
767 def length(self, rev):
745 return self.index[rev][1]
768 return self.index[rev][1]
746
769
747 def rawsize(self, rev):
770 def rawsize(self, rev):
748 """return the length of the uncompressed text for a given revision"""
771 """return the length of the uncompressed text for a given revision"""
749 l = self.index[rev][2]
772 l = self.index[rev][2]
750 if l >= 0:
773 if l >= 0:
751 return l
774 return l
752
775
753 t = self.rawdata(rev)
776 t = self.rawdata(rev)
754 return len(t)
777 return len(t)
755
778
756 def size(self, rev):
779 def size(self, rev):
757 """length of non-raw text (processed by a "read" flag processor)"""
780 """length of non-raw text (processed by a "read" flag processor)"""
758 # fast path: if no "read" flag processor could change the content,
781 # fast path: if no "read" flag processor could change the content,
759 # size is rawsize. note: ELLIPSIS is known to not change the content.
782 # size is rawsize. note: ELLIPSIS is known to not change the content.
760 flags = self.flags(rev)
783 flags = self.flags(rev)
761 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
784 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
762 return self.rawsize(rev)
785 return self.rawsize(rev)
763
786
764 return len(self.revision(rev, raw=False))
787 return len(self.revision(rev, raw=False))
765
788
766 def chainbase(self, rev):
789 def chainbase(self, rev):
767 base = self._chainbasecache.get(rev)
790 base = self._chainbasecache.get(rev)
768 if base is not None:
791 if base is not None:
769 return base
792 return base
770
793
771 index = self.index
794 index = self.index
772 iterrev = rev
795 iterrev = rev
773 base = index[iterrev][3]
796 base = index[iterrev][3]
774 while base != iterrev:
797 while base != iterrev:
775 iterrev = base
798 iterrev = base
776 base = index[iterrev][3]
799 base = index[iterrev][3]
777
800
778 self._chainbasecache[rev] = base
801 self._chainbasecache[rev] = base
779 return base
802 return base
780
803
781 def linkrev(self, rev):
804 def linkrev(self, rev):
782 return self.index[rev][4]
805 return self.index[rev][4]
783
806
784 def parentrevs(self, rev):
807 def parentrevs(self, rev):
785 try:
808 try:
786 entry = self.index[rev]
809 entry = self.index[rev]
787 except IndexError:
810 except IndexError:
788 if rev == wdirrev:
811 if rev == wdirrev:
789 raise error.WdirUnsupported
812 raise error.WdirUnsupported
790 raise
813 raise
791
814
792 return entry[5], entry[6]
815 return entry[5], entry[6]
793
816
794 # fast parentrevs(rev) where rev isn't filtered
817 # fast parentrevs(rev) where rev isn't filtered
795 _uncheckedparentrevs = parentrevs
818 _uncheckedparentrevs = parentrevs
796
819
797 def node(self, rev):
820 def node(self, rev):
798 try:
821 try:
799 return self.index[rev][7]
822 return self.index[rev][7]
800 except IndexError:
823 except IndexError:
801 if rev == wdirrev:
824 if rev == wdirrev:
802 raise error.WdirUnsupported
825 raise error.WdirUnsupported
803 raise
826 raise
804
827
805 # Derived from index values.
828 # Derived from index values.
806
829
807 def end(self, rev):
830 def end(self, rev):
808 return self.start(rev) + self.length(rev)
831 return self.start(rev) + self.length(rev)
809
832
810 def parents(self, node):
833 def parents(self, node):
811 i = self.index
834 i = self.index
812 d = i[self.rev(node)]
835 d = i[self.rev(node)]
813 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
836 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
814
837
815 def chainlen(self, rev):
838 def chainlen(self, rev):
816 return self._chaininfo(rev)[0]
839 return self._chaininfo(rev)[0]
817
840
818 def _chaininfo(self, rev):
841 def _chaininfo(self, rev):
819 chaininfocache = self._chaininfocache
842 chaininfocache = self._chaininfocache
820 if rev in chaininfocache:
843 if rev in chaininfocache:
821 return chaininfocache[rev]
844 return chaininfocache[rev]
822 index = self.index
845 index = self.index
823 generaldelta = self._generaldelta
846 generaldelta = self._generaldelta
824 iterrev = rev
847 iterrev = rev
825 e = index[iterrev]
848 e = index[iterrev]
826 clen = 0
849 clen = 0
827 compresseddeltalen = 0
850 compresseddeltalen = 0
828 while iterrev != e[3]:
851 while iterrev != e[3]:
829 clen += 1
852 clen += 1
830 compresseddeltalen += e[1]
853 compresseddeltalen += e[1]
831 if generaldelta:
854 if generaldelta:
832 iterrev = e[3]
855 iterrev = e[3]
833 else:
856 else:
834 iterrev -= 1
857 iterrev -= 1
835 if iterrev in chaininfocache:
858 if iterrev in chaininfocache:
836 t = chaininfocache[iterrev]
859 t = chaininfocache[iterrev]
837 clen += t[0]
860 clen += t[0]
838 compresseddeltalen += t[1]
861 compresseddeltalen += t[1]
839 break
862 break
840 e = index[iterrev]
863 e = index[iterrev]
841 else:
864 else:
842 # Add text length of base since decompressing that also takes
865 # Add text length of base since decompressing that also takes
843 # work. For cache hits the length is already included.
866 # work. For cache hits the length is already included.
844 compresseddeltalen += e[1]
867 compresseddeltalen += e[1]
845 r = (clen, compresseddeltalen)
868 r = (clen, compresseddeltalen)
846 chaininfocache[rev] = r
869 chaininfocache[rev] = r
847 return r
870 return r
848
871
849 def _deltachain(self, rev, stoprev=None):
872 def _deltachain(self, rev, stoprev=None):
850 """Obtain the delta chain for a revision.
873 """Obtain the delta chain for a revision.
851
874
852 ``stoprev`` specifies a revision to stop at. If not specified, we
875 ``stoprev`` specifies a revision to stop at. If not specified, we
853 stop at the base of the chain.
876 stop at the base of the chain.
854
877
855 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
878 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
856 revs in ascending order and ``stopped`` is a bool indicating whether
879 revs in ascending order and ``stopped`` is a bool indicating whether
857 ``stoprev`` was hit.
880 ``stoprev`` was hit.
858 """
881 """
859 # Try C implementation.
882 # Try C implementation.
860 try:
883 try:
861 return self.index.deltachain(rev, stoprev, self._generaldelta)
884 return self.index.deltachain(rev, stoprev, self._generaldelta)
862 except AttributeError:
885 except AttributeError:
863 pass
886 pass
864
887
865 chain = []
888 chain = []
866
889
867 # Alias to prevent attribute lookup in tight loop.
890 # Alias to prevent attribute lookup in tight loop.
868 index = self.index
891 index = self.index
869 generaldelta = self._generaldelta
892 generaldelta = self._generaldelta
870
893
871 iterrev = rev
894 iterrev = rev
872 e = index[iterrev]
895 e = index[iterrev]
873 while iterrev != e[3] and iterrev != stoprev:
896 while iterrev != e[3] and iterrev != stoprev:
874 chain.append(iterrev)
897 chain.append(iterrev)
875 if generaldelta:
898 if generaldelta:
876 iterrev = e[3]
899 iterrev = e[3]
877 else:
900 else:
878 iterrev -= 1
901 iterrev -= 1
879 e = index[iterrev]
902 e = index[iterrev]
880
903
881 if iterrev == stoprev:
904 if iterrev == stoprev:
882 stopped = True
905 stopped = True
883 else:
906 else:
884 chain.append(iterrev)
907 chain.append(iterrev)
885 stopped = False
908 stopped = False
886
909
887 chain.reverse()
910 chain.reverse()
888 return chain, stopped
911 return chain, stopped
889
912
890 def ancestors(self, revs, stoprev=0, inclusive=False):
913 def ancestors(self, revs, stoprev=0, inclusive=False):
891 """Generate the ancestors of 'revs' in reverse revision order.
914 """Generate the ancestors of 'revs' in reverse revision order.
892 Does not generate revs lower than stoprev.
915 Does not generate revs lower than stoprev.
893
916
894 See the documentation for ancestor.lazyancestors for more details."""
917 See the documentation for ancestor.lazyancestors for more details."""
895
918
896 # first, make sure start revisions aren't filtered
919 # first, make sure start revisions aren't filtered
897 revs = list(revs)
920 revs = list(revs)
898 checkrev = self.node
921 checkrev = self.node
899 for r in revs:
922 for r in revs:
900 checkrev(r)
923 checkrev(r)
901 # and we're sure ancestors aren't filtered as well
924 # and we're sure ancestors aren't filtered as well
902
925
903 if rustancestor is not None:
926 if rustancestor is not None:
904 lazyancestors = rustancestor.LazyAncestors
927 lazyancestors = rustancestor.LazyAncestors
905 arg = self.index
928 arg = self.index
906 elif util.safehasattr(parsers, b'rustlazyancestors'):
929 elif util.safehasattr(parsers, b'rustlazyancestors'):
907 lazyancestors = ancestor.rustlazyancestors
930 lazyancestors = ancestor.rustlazyancestors
908 arg = self.index
931 arg = self.index
909 else:
932 else:
910 lazyancestors = ancestor.lazyancestors
933 lazyancestors = ancestor.lazyancestors
911 arg = self._uncheckedparentrevs
934 arg = self._uncheckedparentrevs
912 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
935 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
913
936
914 def descendants(self, revs):
937 def descendants(self, revs):
915 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
938 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
916
939
917 def findcommonmissing(self, common=None, heads=None):
940 def findcommonmissing(self, common=None, heads=None):
918 """Return a tuple of the ancestors of common and the ancestors of heads
941 """Return a tuple of the ancestors of common and the ancestors of heads
919 that are not ancestors of common. In revset terminology, we return the
942 that are not ancestors of common. In revset terminology, we return the
920 tuple:
943 tuple:
921
944
922 ::common, (::heads) - (::common)
945 ::common, (::heads) - (::common)
923
946
924 The list is sorted by revision number, meaning it is
947 The list is sorted by revision number, meaning it is
925 topologically sorted.
948 topologically sorted.
926
949
927 'heads' and 'common' are both lists of node IDs. If heads is
950 'heads' and 'common' are both lists of node IDs. If heads is
928 not supplied, uses all of the revlog's heads. If common is not
951 not supplied, uses all of the revlog's heads. If common is not
929 supplied, uses nullid."""
952 supplied, uses nullid."""
930 if common is None:
953 if common is None:
931 common = [nullid]
954 common = [nullid]
932 if heads is None:
955 if heads is None:
933 heads = self.heads()
956 heads = self.heads()
934
957
935 common = [self.rev(n) for n in common]
958 common = [self.rev(n) for n in common]
936 heads = [self.rev(n) for n in heads]
959 heads = [self.rev(n) for n in heads]
937
960
938 # we want the ancestors, but inclusive
961 # we want the ancestors, but inclusive
939 class lazyset(object):
962 class lazyset(object):
940 def __init__(self, lazyvalues):
963 def __init__(self, lazyvalues):
941 self.addedvalues = set()
964 self.addedvalues = set()
942 self.lazyvalues = lazyvalues
965 self.lazyvalues = lazyvalues
943
966
944 def __contains__(self, value):
967 def __contains__(self, value):
945 return value in self.addedvalues or value in self.lazyvalues
968 return value in self.addedvalues or value in self.lazyvalues
946
969
947 def __iter__(self):
970 def __iter__(self):
948 added = self.addedvalues
971 added = self.addedvalues
949 for r in added:
972 for r in added:
950 yield r
973 yield r
951 for r in self.lazyvalues:
974 for r in self.lazyvalues:
952 if not r in added:
975 if not r in added:
953 yield r
976 yield r
954
977
955 def add(self, value):
978 def add(self, value):
956 self.addedvalues.add(value)
979 self.addedvalues.add(value)
957
980
958 def update(self, values):
981 def update(self, values):
959 self.addedvalues.update(values)
982 self.addedvalues.update(values)
960
983
961 has = lazyset(self.ancestors(common))
984 has = lazyset(self.ancestors(common))
962 has.add(nullrev)
985 has.add(nullrev)
963 has.update(common)
986 has.update(common)
964
987
965 # take all ancestors from heads that aren't in has
988 # take all ancestors from heads that aren't in has
966 missing = set()
989 missing = set()
967 visit = collections.deque(r for r in heads if r not in has)
990 visit = collections.deque(r for r in heads if r not in has)
968 while visit:
991 while visit:
969 r = visit.popleft()
992 r = visit.popleft()
970 if r in missing:
993 if r in missing:
971 continue
994 continue
972 else:
995 else:
973 missing.add(r)
996 missing.add(r)
974 for p in self.parentrevs(r):
997 for p in self.parentrevs(r):
975 if p not in has:
998 if p not in has:
976 visit.append(p)
999 visit.append(p)
977 missing = list(missing)
1000 missing = list(missing)
978 missing.sort()
1001 missing.sort()
979 return has, [self.node(miss) for miss in missing]
1002 return has, [self.node(miss) for miss in missing]
980
1003
981 def incrementalmissingrevs(self, common=None):
1004 def incrementalmissingrevs(self, common=None):
982 """Return an object that can be used to incrementally compute the
1005 """Return an object that can be used to incrementally compute the
983 revision numbers of the ancestors of arbitrary sets that are not
1006 revision numbers of the ancestors of arbitrary sets that are not
984 ancestors of common. This is an ancestor.incrementalmissingancestors
1007 ancestors of common. This is an ancestor.incrementalmissingancestors
985 object.
1008 object.
986
1009
987 'common' is a list of revision numbers. If common is not supplied, uses
1010 'common' is a list of revision numbers. If common is not supplied, uses
988 nullrev.
1011 nullrev.
989 """
1012 """
990 if common is None:
1013 if common is None:
991 common = [nullrev]
1014 common = [nullrev]
992
1015
993 if rustancestor is not None:
1016 if rustancestor is not None:
994 return rustancestor.MissingAncestors(self.index, common)
1017 return rustancestor.MissingAncestors(self.index, common)
995 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1018 return ancestor.incrementalmissingancestors(self.parentrevs, common)
996
1019
997 def findmissingrevs(self, common=None, heads=None):
1020 def findmissingrevs(self, common=None, heads=None):
998 """Return the revision numbers of the ancestors of heads that
1021 """Return the revision numbers of the ancestors of heads that
999 are not ancestors of common.
1022 are not ancestors of common.
1000
1023
1001 More specifically, return a list of revision numbers corresponding to
1024 More specifically, return a list of revision numbers corresponding to
1002 nodes N such that every N satisfies the following constraints:
1025 nodes N such that every N satisfies the following constraints:
1003
1026
1004 1. N is an ancestor of some node in 'heads'
1027 1. N is an ancestor of some node in 'heads'
1005 2. N is not an ancestor of any node in 'common'
1028 2. N is not an ancestor of any node in 'common'
1006
1029
1007 The list is sorted by revision number, meaning it is
1030 The list is sorted by revision number, meaning it is
1008 topologically sorted.
1031 topologically sorted.
1009
1032
1010 'heads' and 'common' are both lists of revision numbers. If heads is
1033 'heads' and 'common' are both lists of revision numbers. If heads is
1011 not supplied, uses all of the revlog's heads. If common is not
1034 not supplied, uses all of the revlog's heads. If common is not
1012 supplied, uses nullid."""
1035 supplied, uses nullid."""
1013 if common is None:
1036 if common is None:
1014 common = [nullrev]
1037 common = [nullrev]
1015 if heads is None:
1038 if heads is None:
1016 heads = self.headrevs()
1039 heads = self.headrevs()
1017
1040
1018 inc = self.incrementalmissingrevs(common=common)
1041 inc = self.incrementalmissingrevs(common=common)
1019 return inc.missingancestors(heads)
1042 return inc.missingancestors(heads)
1020
1043
1021 def findmissing(self, common=None, heads=None):
1044 def findmissing(self, common=None, heads=None):
1022 """Return the ancestors of heads that are not ancestors of common.
1045 """Return the ancestors of heads that are not ancestors of common.
1023
1046
1024 More specifically, return a list of nodes N such that every N
1047 More specifically, return a list of nodes N such that every N
1025 satisfies the following constraints:
1048 satisfies the following constraints:
1026
1049
1027 1. N is an ancestor of some node in 'heads'
1050 1. N is an ancestor of some node in 'heads'
1028 2. N is not an ancestor of any node in 'common'
1051 2. N is not an ancestor of any node in 'common'
1029
1052
1030 The list is sorted by revision number, meaning it is
1053 The list is sorted by revision number, meaning it is
1031 topologically sorted.
1054 topologically sorted.
1032
1055
1033 'heads' and 'common' are both lists of node IDs. If heads is
1056 'heads' and 'common' are both lists of node IDs. If heads is
1034 not supplied, uses all of the revlog's heads. If common is not
1057 not supplied, uses all of the revlog's heads. If common is not
1035 supplied, uses nullid."""
1058 supplied, uses nullid."""
1036 if common is None:
1059 if common is None:
1037 common = [nullid]
1060 common = [nullid]
1038 if heads is None:
1061 if heads is None:
1039 heads = self.heads()
1062 heads = self.heads()
1040
1063
1041 common = [self.rev(n) for n in common]
1064 common = [self.rev(n) for n in common]
1042 heads = [self.rev(n) for n in heads]
1065 heads = [self.rev(n) for n in heads]
1043
1066
1044 inc = self.incrementalmissingrevs(common=common)
1067 inc = self.incrementalmissingrevs(common=common)
1045 return [self.node(r) for r in inc.missingancestors(heads)]
1068 return [self.node(r) for r in inc.missingancestors(heads)]
1046
1069
1047 def nodesbetween(self, roots=None, heads=None):
1070 def nodesbetween(self, roots=None, heads=None):
1048 """Return a topological path from 'roots' to 'heads'.
1071 """Return a topological path from 'roots' to 'heads'.
1049
1072
1050 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1073 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1051 topologically sorted list of all nodes N that satisfy both of
1074 topologically sorted list of all nodes N that satisfy both of
1052 these constraints:
1075 these constraints:
1053
1076
1054 1. N is a descendant of some node in 'roots'
1077 1. N is a descendant of some node in 'roots'
1055 2. N is an ancestor of some node in 'heads'
1078 2. N is an ancestor of some node in 'heads'
1056
1079
1057 Every node is considered to be both a descendant and an ancestor
1080 Every node is considered to be both a descendant and an ancestor
1058 of itself, so every reachable node in 'roots' and 'heads' will be
1081 of itself, so every reachable node in 'roots' and 'heads' will be
1059 included in 'nodes'.
1082 included in 'nodes'.
1060
1083
1061 'outroots' is the list of reachable nodes in 'roots', i.e., the
1084 'outroots' is the list of reachable nodes in 'roots', i.e., the
1062 subset of 'roots' that is returned in 'nodes'. Likewise,
1085 subset of 'roots' that is returned in 'nodes'. Likewise,
1063 'outheads' is the subset of 'heads' that is also in 'nodes'.
1086 'outheads' is the subset of 'heads' that is also in 'nodes'.
1064
1087
1065 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1088 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1066 unspecified, uses nullid as the only root. If 'heads' is
1089 unspecified, uses nullid as the only root. If 'heads' is
1067 unspecified, uses list of all of the revlog's heads."""
1090 unspecified, uses list of all of the revlog's heads."""
1068 nonodes = ([], [], [])
1091 nonodes = ([], [], [])
1069 if roots is not None:
1092 if roots is not None:
1070 roots = list(roots)
1093 roots = list(roots)
1071 if not roots:
1094 if not roots:
1072 return nonodes
1095 return nonodes
1073 lowestrev = min([self.rev(n) for n in roots])
1096 lowestrev = min([self.rev(n) for n in roots])
1074 else:
1097 else:
1075 roots = [nullid] # Everybody's a descendant of nullid
1098 roots = [nullid] # Everybody's a descendant of nullid
1076 lowestrev = nullrev
1099 lowestrev = nullrev
1077 if (lowestrev == nullrev) and (heads is None):
1100 if (lowestrev == nullrev) and (heads is None):
1078 # We want _all_ the nodes!
1101 # We want _all_ the nodes!
1079 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1102 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1080 if heads is None:
1103 if heads is None:
1081 # All nodes are ancestors, so the latest ancestor is the last
1104 # All nodes are ancestors, so the latest ancestor is the last
1082 # node.
1105 # node.
1083 highestrev = len(self) - 1
1106 highestrev = len(self) - 1
1084 # Set ancestors to None to signal that every node is an ancestor.
1107 # Set ancestors to None to signal that every node is an ancestor.
1085 ancestors = None
1108 ancestors = None
1086 # Set heads to an empty dictionary for later discovery of heads
1109 # Set heads to an empty dictionary for later discovery of heads
1087 heads = {}
1110 heads = {}
1088 else:
1111 else:
1089 heads = list(heads)
1112 heads = list(heads)
1090 if not heads:
1113 if not heads:
1091 return nonodes
1114 return nonodes
1092 ancestors = set()
1115 ancestors = set()
1093 # Turn heads into a dictionary so we can remove 'fake' heads.
1116 # Turn heads into a dictionary so we can remove 'fake' heads.
1094 # Also, later we will be using it to filter out the heads we can't
1117 # Also, later we will be using it to filter out the heads we can't
1095 # find from roots.
1118 # find from roots.
1096 heads = dict.fromkeys(heads, False)
1119 heads = dict.fromkeys(heads, False)
1097 # Start at the top and keep marking parents until we're done.
1120 # Start at the top and keep marking parents until we're done.
1098 nodestotag = set(heads)
1121 nodestotag = set(heads)
1099 # Remember where the top was so we can use it as a limit later.
1122 # Remember where the top was so we can use it as a limit later.
1100 highestrev = max([self.rev(n) for n in nodestotag])
1123 highestrev = max([self.rev(n) for n in nodestotag])
1101 while nodestotag:
1124 while nodestotag:
1102 # grab a node to tag
1125 # grab a node to tag
1103 n = nodestotag.pop()
1126 n = nodestotag.pop()
1104 # Never tag nullid
1127 # Never tag nullid
1105 if n == nullid:
1128 if n == nullid:
1106 continue
1129 continue
1107 # A node's revision number represents its place in a
1130 # A node's revision number represents its place in a
1108 # topologically sorted list of nodes.
1131 # topologically sorted list of nodes.
1109 r = self.rev(n)
1132 r = self.rev(n)
1110 if r >= lowestrev:
1133 if r >= lowestrev:
1111 if n not in ancestors:
1134 if n not in ancestors:
1112 # If we are possibly a descendant of one of the roots
1135 # If we are possibly a descendant of one of the roots
1113 # and we haven't already been marked as an ancestor
1136 # and we haven't already been marked as an ancestor
1114 ancestors.add(n) # Mark as ancestor
1137 ancestors.add(n) # Mark as ancestor
1115 # Add non-nullid parents to list of nodes to tag.
1138 # Add non-nullid parents to list of nodes to tag.
1116 nodestotag.update(
1139 nodestotag.update(
1117 [p for p in self.parents(n) if p != nullid]
1140 [p for p in self.parents(n) if p != nullid]
1118 )
1141 )
1119 elif n in heads: # We've seen it before, is it a fake head?
1142 elif n in heads: # We've seen it before, is it a fake head?
1120 # So it is, real heads should not be the ancestors of
1143 # So it is, real heads should not be the ancestors of
1121 # any other heads.
1144 # any other heads.
1122 heads.pop(n)
1145 heads.pop(n)
1123 if not ancestors:
1146 if not ancestors:
1124 return nonodes
1147 return nonodes
1125 # Now that we have our set of ancestors, we want to remove any
1148 # Now that we have our set of ancestors, we want to remove any
1126 # roots that are not ancestors.
1149 # roots that are not ancestors.
1127
1150
1128 # If one of the roots was nullid, everything is included anyway.
1151 # If one of the roots was nullid, everything is included anyway.
1129 if lowestrev > nullrev:
1152 if lowestrev > nullrev:
1130 # But, since we weren't, let's recompute the lowest rev to not
1153 # But, since we weren't, let's recompute the lowest rev to not
1131 # include roots that aren't ancestors.
1154 # include roots that aren't ancestors.
1132
1155
1133 # Filter out roots that aren't ancestors of heads
1156 # Filter out roots that aren't ancestors of heads
1134 roots = [root for root in roots if root in ancestors]
1157 roots = [root for root in roots if root in ancestors]
1135 # Recompute the lowest revision
1158 # Recompute the lowest revision
1136 if roots:
1159 if roots:
1137 lowestrev = min([self.rev(root) for root in roots])
1160 lowestrev = min([self.rev(root) for root in roots])
1138 else:
1161 else:
1139 # No more roots? Return empty list
1162 # No more roots? Return empty list
1140 return nonodes
1163 return nonodes
1141 else:
1164 else:
1142 # We are descending from nullid, and don't need to care about
1165 # We are descending from nullid, and don't need to care about
1143 # any other roots.
1166 # any other roots.
1144 lowestrev = nullrev
1167 lowestrev = nullrev
1145 roots = [nullid]
1168 roots = [nullid]
1146 # Transform our roots list into a set.
1169 # Transform our roots list into a set.
1147 descendants = set(roots)
1170 descendants = set(roots)
1148 # Also, keep the original roots so we can filter out roots that aren't
1171 # Also, keep the original roots so we can filter out roots that aren't
1149 # 'real' roots (i.e. are descended from other roots).
1172 # 'real' roots (i.e. are descended from other roots).
1150 roots = descendants.copy()
1173 roots = descendants.copy()
1151 # Our topologically sorted list of output nodes.
1174 # Our topologically sorted list of output nodes.
1152 orderedout = []
1175 orderedout = []
1153 # Don't start at nullid since we don't want nullid in our output list,
1176 # Don't start at nullid since we don't want nullid in our output list,
1154 # and if nullid shows up in descendants, empty parents will look like
1177 # and if nullid shows up in descendants, empty parents will look like
1155 # they're descendants.
1178 # they're descendants.
1156 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1179 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1157 n = self.node(r)
1180 n = self.node(r)
1158 isdescendant = False
1181 isdescendant = False
1159 if lowestrev == nullrev: # Everybody is a descendant of nullid
1182 if lowestrev == nullrev: # Everybody is a descendant of nullid
1160 isdescendant = True
1183 isdescendant = True
1161 elif n in descendants:
1184 elif n in descendants:
1162 # n is already a descendant
1185 # n is already a descendant
1163 isdescendant = True
1186 isdescendant = True
1164 # This check only needs to be done here because all the roots
1187 # This check only needs to be done here because all the roots
1165 # will start being marked is descendants before the loop.
1188 # will start being marked is descendants before the loop.
1166 if n in roots:
1189 if n in roots:
1167 # If n was a root, check if it's a 'real' root.
1190 # If n was a root, check if it's a 'real' root.
1168 p = tuple(self.parents(n))
1191 p = tuple(self.parents(n))
1169 # If any of its parents are descendants, it's not a root.
1192 # If any of its parents are descendants, it's not a root.
1170 if (p[0] in descendants) or (p[1] in descendants):
1193 if (p[0] in descendants) or (p[1] in descendants):
1171 roots.remove(n)
1194 roots.remove(n)
1172 else:
1195 else:
1173 p = tuple(self.parents(n))
1196 p = tuple(self.parents(n))
1174 # A node is a descendant if either of its parents are
1197 # A node is a descendant if either of its parents are
1175 # descendants. (We seeded the dependents list with the roots
1198 # descendants. (We seeded the dependents list with the roots
1176 # up there, remember?)
1199 # up there, remember?)
1177 if (p[0] in descendants) or (p[1] in descendants):
1200 if (p[0] in descendants) or (p[1] in descendants):
1178 descendants.add(n)
1201 descendants.add(n)
1179 isdescendant = True
1202 isdescendant = True
1180 if isdescendant and ((ancestors is None) or (n in ancestors)):
1203 if isdescendant and ((ancestors is None) or (n in ancestors)):
1181 # Only include nodes that are both descendants and ancestors.
1204 # Only include nodes that are both descendants and ancestors.
1182 orderedout.append(n)
1205 orderedout.append(n)
1183 if (ancestors is not None) and (n in heads):
1206 if (ancestors is not None) and (n in heads):
1184 # We're trying to figure out which heads are reachable
1207 # We're trying to figure out which heads are reachable
1185 # from roots.
1208 # from roots.
1186 # Mark this head as having been reached
1209 # Mark this head as having been reached
1187 heads[n] = True
1210 heads[n] = True
1188 elif ancestors is None:
1211 elif ancestors is None:
1189 # Otherwise, we're trying to discover the heads.
1212 # Otherwise, we're trying to discover the heads.
1190 # Assume this is a head because if it isn't, the next step
1213 # Assume this is a head because if it isn't, the next step
1191 # will eventually remove it.
1214 # will eventually remove it.
1192 heads[n] = True
1215 heads[n] = True
1193 # But, obviously its parents aren't.
1216 # But, obviously its parents aren't.
1194 for p in self.parents(n):
1217 for p in self.parents(n):
1195 heads.pop(p, None)
1218 heads.pop(p, None)
1196 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1219 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1197 roots = list(roots)
1220 roots = list(roots)
1198 assert orderedout
1221 assert orderedout
1199 assert roots
1222 assert roots
1200 assert heads
1223 assert heads
1201 return (orderedout, roots, heads)
1224 return (orderedout, roots, heads)
1202
1225
1203 def headrevs(self, revs=None):
1226 def headrevs(self, revs=None):
1204 if revs is None:
1227 if revs is None:
1205 try:
1228 try:
1206 return self.index.headrevs()
1229 return self.index.headrevs()
1207 except AttributeError:
1230 except AttributeError:
1208 return self._headrevs()
1231 return self._headrevs()
1209 if rustdagop is not None:
1232 if rustdagop is not None:
1210 return rustdagop.headrevs(self.index, revs)
1233 return rustdagop.headrevs(self.index, revs)
1211 return dagop.headrevs(revs, self._uncheckedparentrevs)
1234 return dagop.headrevs(revs, self._uncheckedparentrevs)
1212
1235
1213 def computephases(self, roots):
1236 def computephases(self, roots):
1214 return self.index.computephasesmapsets(roots)
1237 return self.index.computephasesmapsets(roots)
1215
1238
1216 def _headrevs(self):
1239 def _headrevs(self):
1217 count = len(self)
1240 count = len(self)
1218 if not count:
1241 if not count:
1219 return [nullrev]
1242 return [nullrev]
1220 # we won't iter over filtered rev so nobody is a head at start
1243 # we won't iter over filtered rev so nobody is a head at start
1221 ishead = [0] * (count + 1)
1244 ishead = [0] * (count + 1)
1222 index = self.index
1245 index = self.index
1223 for r in self:
1246 for r in self:
1224 ishead[r] = 1 # I may be an head
1247 ishead[r] = 1 # I may be an head
1225 e = index[r]
1248 e = index[r]
1226 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1249 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1227 return [r for r, val in enumerate(ishead) if val]
1250 return [r for r, val in enumerate(ishead) if val]
1228
1251
1229 def heads(self, start=None, stop=None):
1252 def heads(self, start=None, stop=None):
1230 """return the list of all nodes that have no children
1253 """return the list of all nodes that have no children
1231
1254
1232 if start is specified, only heads that are descendants of
1255 if start is specified, only heads that are descendants of
1233 start will be returned
1256 start will be returned
1234 if stop is specified, it will consider all the revs from stop
1257 if stop is specified, it will consider all the revs from stop
1235 as if they had no children
1258 as if they had no children
1236 """
1259 """
1237 if start is None and stop is None:
1260 if start is None and stop is None:
1238 if not len(self):
1261 if not len(self):
1239 return [nullid]
1262 return [nullid]
1240 return [self.node(r) for r in self.headrevs()]
1263 return [self.node(r) for r in self.headrevs()]
1241
1264
1242 if start is None:
1265 if start is None:
1243 start = nullrev
1266 start = nullrev
1244 else:
1267 else:
1245 start = self.rev(start)
1268 start = self.rev(start)
1246
1269
1247 stoprevs = set(self.rev(n) for n in stop or [])
1270 stoprevs = set(self.rev(n) for n in stop or [])
1248
1271
1249 revs = dagop.headrevssubset(
1272 revs = dagop.headrevssubset(
1250 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1273 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1251 )
1274 )
1252
1275
1253 return [self.node(rev) for rev in revs]
1276 return [self.node(rev) for rev in revs]
1254
1277
1255 def children(self, node):
1278 def children(self, node):
1256 """find the children of a given node"""
1279 """find the children of a given node"""
1257 c = []
1280 c = []
1258 p = self.rev(node)
1281 p = self.rev(node)
1259 for r in self.revs(start=p + 1):
1282 for r in self.revs(start=p + 1):
1260 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1283 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1261 if prevs:
1284 if prevs:
1262 for pr in prevs:
1285 for pr in prevs:
1263 if pr == p:
1286 if pr == p:
1264 c.append(self.node(r))
1287 c.append(self.node(r))
1265 elif p == nullrev:
1288 elif p == nullrev:
1266 c.append(self.node(r))
1289 c.append(self.node(r))
1267 return c
1290 return c
1268
1291
1269 def commonancestorsheads(self, a, b):
1292 def commonancestorsheads(self, a, b):
1270 """calculate all the heads of the common ancestors of nodes a and b"""
1293 """calculate all the heads of the common ancestors of nodes a and b"""
1271 a, b = self.rev(a), self.rev(b)
1294 a, b = self.rev(a), self.rev(b)
1272 ancs = self._commonancestorsheads(a, b)
1295 ancs = self._commonancestorsheads(a, b)
1273 return pycompat.maplist(self.node, ancs)
1296 return pycompat.maplist(self.node, ancs)
1274
1297
1275 def _commonancestorsheads(self, *revs):
1298 def _commonancestorsheads(self, *revs):
1276 """calculate all the heads of the common ancestors of revs"""
1299 """calculate all the heads of the common ancestors of revs"""
1277 try:
1300 try:
1278 ancs = self.index.commonancestorsheads(*revs)
1301 ancs = self.index.commonancestorsheads(*revs)
1279 except (AttributeError, OverflowError): # C implementation failed
1302 except (AttributeError, OverflowError): # C implementation failed
1280 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1303 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1281 return ancs
1304 return ancs
1282
1305
1283 def isancestor(self, a, b):
1306 def isancestor(self, a, b):
1284 """return True if node a is an ancestor of node b
1307 """return True if node a is an ancestor of node b
1285
1308
1286 A revision is considered an ancestor of itself."""
1309 A revision is considered an ancestor of itself."""
1287 a, b = self.rev(a), self.rev(b)
1310 a, b = self.rev(a), self.rev(b)
1288 return self.isancestorrev(a, b)
1311 return self.isancestorrev(a, b)
1289
1312
1290 def isancestorrev(self, a, b):
1313 def isancestorrev(self, a, b):
1291 """return True if revision a is an ancestor of revision b
1314 """return True if revision a is an ancestor of revision b
1292
1315
1293 A revision is considered an ancestor of itself.
1316 A revision is considered an ancestor of itself.
1294
1317
1295 The implementation of this is trivial but the use of
1318 The implementation of this is trivial but the use of
1296 reachableroots is not."""
1319 reachableroots is not."""
1297 if a == nullrev:
1320 if a == nullrev:
1298 return True
1321 return True
1299 elif a == b:
1322 elif a == b:
1300 return True
1323 return True
1301 elif a > b:
1324 elif a > b:
1302 return False
1325 return False
1303 return bool(self.reachableroots(a, [b], [a], includepath=False))
1326 return bool(self.reachableroots(a, [b], [a], includepath=False))
1304
1327
1305 def reachableroots(self, minroot, heads, roots, includepath=False):
1328 def reachableroots(self, minroot, heads, roots, includepath=False):
1306 """return (heads(::(<roots> and <roots>::<heads>)))
1329 """return (heads(::(<roots> and <roots>::<heads>)))
1307
1330
1308 If includepath is True, return (<roots>::<heads>)."""
1331 If includepath is True, return (<roots>::<heads>)."""
1309 try:
1332 try:
1310 return self.index.reachableroots2(
1333 return self.index.reachableroots2(
1311 minroot, heads, roots, includepath
1334 minroot, heads, roots, includepath
1312 )
1335 )
1313 except AttributeError:
1336 except AttributeError:
1314 return dagop._reachablerootspure(
1337 return dagop._reachablerootspure(
1315 self.parentrevs, minroot, roots, heads, includepath
1338 self.parentrevs, minroot, roots, heads, includepath
1316 )
1339 )
1317
1340
1318 def ancestor(self, a, b):
1341 def ancestor(self, a, b):
1319 """calculate the "best" common ancestor of nodes a and b"""
1342 """calculate the "best" common ancestor of nodes a and b"""
1320
1343
1321 a, b = self.rev(a), self.rev(b)
1344 a, b = self.rev(a), self.rev(b)
1322 try:
1345 try:
1323 ancs = self.index.ancestors(a, b)
1346 ancs = self.index.ancestors(a, b)
1324 except (AttributeError, OverflowError):
1347 except (AttributeError, OverflowError):
1325 ancs = ancestor.ancestors(self.parentrevs, a, b)
1348 ancs = ancestor.ancestors(self.parentrevs, a, b)
1326 if ancs:
1349 if ancs:
1327 # choose a consistent winner when there's a tie
1350 # choose a consistent winner when there's a tie
1328 return min(map(self.node, ancs))
1351 return min(map(self.node, ancs))
1329 return nullid
1352 return nullid
1330
1353
1331 def _match(self, id):
1354 def _match(self, id):
1332 if isinstance(id, int):
1355 if isinstance(id, int):
1333 # rev
1356 # rev
1334 return self.node(id)
1357 return self.node(id)
1335 if len(id) == 20:
1358 if len(id) == 20:
1336 # possibly a binary node
1359 # possibly a binary node
1337 # odds of a binary node being all hex in ASCII are 1 in 10**25
1360 # odds of a binary node being all hex in ASCII are 1 in 10**25
1338 try:
1361 try:
1339 node = id
1362 node = id
1340 self.rev(node) # quick search the index
1363 self.rev(node) # quick search the index
1341 return node
1364 return node
1342 except error.LookupError:
1365 except error.LookupError:
1343 pass # may be partial hex id
1366 pass # may be partial hex id
1344 try:
1367 try:
1345 # str(rev)
1368 # str(rev)
1346 rev = int(id)
1369 rev = int(id)
1347 if b"%d" % rev != id:
1370 if b"%d" % rev != id:
1348 raise ValueError
1371 raise ValueError
1349 if rev < 0:
1372 if rev < 0:
1350 rev = len(self) + rev
1373 rev = len(self) + rev
1351 if rev < 0 or rev >= len(self):
1374 if rev < 0 or rev >= len(self):
1352 raise ValueError
1375 raise ValueError
1353 return self.node(rev)
1376 return self.node(rev)
1354 except (ValueError, OverflowError):
1377 except (ValueError, OverflowError):
1355 pass
1378 pass
1356 if len(id) == 40:
1379 if len(id) == 40:
1357 try:
1380 try:
1358 # a full hex nodeid?
1381 # a full hex nodeid?
1359 node = bin(id)
1382 node = bin(id)
1360 self.rev(node)
1383 self.rev(node)
1361 return node
1384 return node
1362 except (TypeError, error.LookupError):
1385 except (TypeError, error.LookupError):
1363 pass
1386 pass
1364
1387
1365 def _partialmatch(self, id):
1388 def _partialmatch(self, id):
1366 # we don't care wdirfilenodeids as they should be always full hash
1389 # we don't care wdirfilenodeids as they should be always full hash
1367 maybewdir = wdirhex.startswith(id)
1390 maybewdir = wdirhex.startswith(id)
1368 try:
1391 try:
1369 partial = self.index.partialmatch(id)
1392 partial = self.index.partialmatch(id)
1370 if partial and self.hasnode(partial):
1393 if partial and self.hasnode(partial):
1371 if maybewdir:
1394 if maybewdir:
1372 # single 'ff...' match in radix tree, ambiguous with wdir
1395 # single 'ff...' match in radix tree, ambiguous with wdir
1373 raise error.RevlogError
1396 raise error.RevlogError
1374 return partial
1397 return partial
1375 if maybewdir:
1398 if maybewdir:
1376 # no 'ff...' match in radix tree, wdir identified
1399 # no 'ff...' match in radix tree, wdir identified
1377 raise error.WdirUnsupported
1400 raise error.WdirUnsupported
1378 return None
1401 return None
1379 except error.RevlogError:
1402 except error.RevlogError:
1380 # parsers.c radix tree lookup gave multiple matches
1403 # parsers.c radix tree lookup gave multiple matches
1381 # fast path: for unfiltered changelog, radix tree is accurate
1404 # fast path: for unfiltered changelog, radix tree is accurate
1382 if not getattr(self, 'filteredrevs', None):
1405 if not getattr(self, 'filteredrevs', None):
1383 raise error.AmbiguousPrefixLookupError(
1406 raise error.AmbiguousPrefixLookupError(
1384 id, self.indexfile, _(b'ambiguous identifier')
1407 id, self.indexfile, _(b'ambiguous identifier')
1385 )
1408 )
1386 # fall through to slow path that filters hidden revisions
1409 # fall through to slow path that filters hidden revisions
1387 except (AttributeError, ValueError):
1410 except (AttributeError, ValueError):
1388 # we are pure python, or key was too short to search radix tree
1411 # we are pure python, or key was too short to search radix tree
1389 pass
1412 pass
1390
1413
1391 if id in self._pcache:
1414 if id in self._pcache:
1392 return self._pcache[id]
1415 return self._pcache[id]
1393
1416
1394 if len(id) <= 40:
1417 if len(id) <= 40:
1395 try:
1418 try:
1396 # hex(node)[:...]
1419 # hex(node)[:...]
1397 l = len(id) // 2 # grab an even number of digits
1420 l = len(id) // 2 # grab an even number of digits
1398 prefix = bin(id[: l * 2])
1421 prefix = bin(id[: l * 2])
1399 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1422 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1400 nl = [
1423 nl = [
1401 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1424 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1402 ]
1425 ]
1403 if nullhex.startswith(id):
1426 if nullhex.startswith(id):
1404 nl.append(nullid)
1427 nl.append(nullid)
1405 if len(nl) > 0:
1428 if len(nl) > 0:
1406 if len(nl) == 1 and not maybewdir:
1429 if len(nl) == 1 and not maybewdir:
1407 self._pcache[id] = nl[0]
1430 self._pcache[id] = nl[0]
1408 return nl[0]
1431 return nl[0]
1409 raise error.AmbiguousPrefixLookupError(
1432 raise error.AmbiguousPrefixLookupError(
1410 id, self.indexfile, _(b'ambiguous identifier')
1433 id, self.indexfile, _(b'ambiguous identifier')
1411 )
1434 )
1412 if maybewdir:
1435 if maybewdir:
1413 raise error.WdirUnsupported
1436 raise error.WdirUnsupported
1414 return None
1437 return None
1415 except TypeError:
1438 except TypeError:
1416 pass
1439 pass
1417
1440
1418 def lookup(self, id):
1441 def lookup(self, id):
1419 """locate a node based on:
1442 """locate a node based on:
1420 - revision number or str(revision number)
1443 - revision number or str(revision number)
1421 - nodeid or subset of hex nodeid
1444 - nodeid or subset of hex nodeid
1422 """
1445 """
1423 n = self._match(id)
1446 n = self._match(id)
1424 if n is not None:
1447 if n is not None:
1425 return n
1448 return n
1426 n = self._partialmatch(id)
1449 n = self._partialmatch(id)
1427 if n:
1450 if n:
1428 return n
1451 return n
1429
1452
1430 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1453 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1431
1454
1432 def shortest(self, node, minlength=1):
1455 def shortest(self, node, minlength=1):
1433 """Find the shortest unambiguous prefix that matches node."""
1456 """Find the shortest unambiguous prefix that matches node."""
1434
1457
1435 def isvalid(prefix):
1458 def isvalid(prefix):
1436 try:
1459 try:
1437 matchednode = self._partialmatch(prefix)
1460 matchednode = self._partialmatch(prefix)
1438 except error.AmbiguousPrefixLookupError:
1461 except error.AmbiguousPrefixLookupError:
1439 return False
1462 return False
1440 except error.WdirUnsupported:
1463 except error.WdirUnsupported:
1441 # single 'ff...' match
1464 # single 'ff...' match
1442 return True
1465 return True
1443 if matchednode is None:
1466 if matchednode is None:
1444 raise error.LookupError(node, self.indexfile, _(b'no node'))
1467 raise error.LookupError(node, self.indexfile, _(b'no node'))
1445 return True
1468 return True
1446
1469
1447 def maybewdir(prefix):
1470 def maybewdir(prefix):
1448 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1471 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1449
1472
1450 hexnode = hex(node)
1473 hexnode = hex(node)
1451
1474
1452 def disambiguate(hexnode, minlength):
1475 def disambiguate(hexnode, minlength):
1453 """Disambiguate against wdirid."""
1476 """Disambiguate against wdirid."""
1454 for length in range(minlength, 41):
1477 for length in range(minlength, 41):
1455 prefix = hexnode[:length]
1478 prefix = hexnode[:length]
1456 if not maybewdir(prefix):
1479 if not maybewdir(prefix):
1457 return prefix
1480 return prefix
1458
1481
1459 if not getattr(self, 'filteredrevs', None):
1482 if not getattr(self, 'filteredrevs', None):
1460 try:
1483 try:
1461 length = max(self.index.shortest(node), minlength)
1484 length = max(self.index.shortest(node), minlength)
1462 return disambiguate(hexnode, length)
1485 return disambiguate(hexnode, length)
1463 except error.RevlogError:
1486 except error.RevlogError:
1464 if node != wdirid:
1487 if node != wdirid:
1465 raise error.LookupError(node, self.indexfile, _(b'no node'))
1488 raise error.LookupError(node, self.indexfile, _(b'no node'))
1466 except AttributeError:
1489 except AttributeError:
1467 # Fall through to pure code
1490 # Fall through to pure code
1468 pass
1491 pass
1469
1492
1470 if node == wdirid:
1493 if node == wdirid:
1471 for length in range(minlength, 41):
1494 for length in range(minlength, 41):
1472 prefix = hexnode[:length]
1495 prefix = hexnode[:length]
1473 if isvalid(prefix):
1496 if isvalid(prefix):
1474 return prefix
1497 return prefix
1475
1498
1476 for length in range(minlength, 41):
1499 for length in range(minlength, 41):
1477 prefix = hexnode[:length]
1500 prefix = hexnode[:length]
1478 if isvalid(prefix):
1501 if isvalid(prefix):
1479 return disambiguate(hexnode, length)
1502 return disambiguate(hexnode, length)
1480
1503
1481 def cmp(self, node, text):
1504 def cmp(self, node, text):
1482 """compare text with a given file revision
1505 """compare text with a given file revision
1483
1506
1484 returns True if text is different than what is stored.
1507 returns True if text is different than what is stored.
1485 """
1508 """
1486 p1, p2 = self.parents(node)
1509 p1, p2 = self.parents(node)
1487 return storageutil.hashrevisionsha1(text, p1, p2) != node
1510 return storageutil.hashrevisionsha1(text, p1, p2) != node
1488
1511
1489 def _cachesegment(self, offset, data):
1512 def _cachesegment(self, offset, data):
1490 """Add a segment to the revlog cache.
1513 """Add a segment to the revlog cache.
1491
1514
1492 Accepts an absolute offset and the data that is at that location.
1515 Accepts an absolute offset and the data that is at that location.
1493 """
1516 """
1494 o, d = self._chunkcache
1517 o, d = self._chunkcache
1495 # try to add to existing cache
1518 # try to add to existing cache
1496 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1519 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1497 self._chunkcache = o, d + data
1520 self._chunkcache = o, d + data
1498 else:
1521 else:
1499 self._chunkcache = offset, data
1522 self._chunkcache = offset, data
1500
1523
1501 def _readsegment(self, offset, length, df=None):
1524 def _readsegment(self, offset, length, df=None):
1502 """Load a segment of raw data from the revlog.
1525 """Load a segment of raw data from the revlog.
1503
1526
1504 Accepts an absolute offset, length to read, and an optional existing
1527 Accepts an absolute offset, length to read, and an optional existing
1505 file handle to read from.
1528 file handle to read from.
1506
1529
1507 If an existing file handle is passed, it will be seeked and the
1530 If an existing file handle is passed, it will be seeked and the
1508 original seek position will NOT be restored.
1531 original seek position will NOT be restored.
1509
1532
1510 Returns a str or buffer of raw byte data.
1533 Returns a str or buffer of raw byte data.
1511
1534
1512 Raises if the requested number of bytes could not be read.
1535 Raises if the requested number of bytes could not be read.
1513 """
1536 """
1514 # Cache data both forward and backward around the requested
1537 # Cache data both forward and backward around the requested
1515 # data, in a fixed size window. This helps speed up operations
1538 # data, in a fixed size window. This helps speed up operations
1516 # involving reading the revlog backwards.
1539 # involving reading the revlog backwards.
1517 cachesize = self._chunkcachesize
1540 cachesize = self._chunkcachesize
1518 realoffset = offset & ~(cachesize - 1)
1541 realoffset = offset & ~(cachesize - 1)
1519 reallength = (
1542 reallength = (
1520 (offset + length + cachesize) & ~(cachesize - 1)
1543 (offset + length + cachesize) & ~(cachesize - 1)
1521 ) - realoffset
1544 ) - realoffset
1522 with self._datareadfp(df) as df:
1545 with self._datareadfp(df) as df:
1523 df.seek(realoffset)
1546 df.seek(realoffset)
1524 d = df.read(reallength)
1547 d = df.read(reallength)
1525
1548
1526 self._cachesegment(realoffset, d)
1549 self._cachesegment(realoffset, d)
1527 if offset != realoffset or reallength != length:
1550 if offset != realoffset or reallength != length:
1528 startoffset = offset - realoffset
1551 startoffset = offset - realoffset
1529 if len(d) - startoffset < length:
1552 if len(d) - startoffset < length:
1530 raise error.RevlogError(
1553 raise error.RevlogError(
1531 _(
1554 _(
1532 b'partial read of revlog %s; expected %d bytes from '
1555 b'partial read of revlog %s; expected %d bytes from '
1533 b'offset %d, got %d'
1556 b'offset %d, got %d'
1534 )
1557 )
1535 % (
1558 % (
1536 self.indexfile if self._inline else self.datafile,
1559 self.indexfile if self._inline else self.datafile,
1537 length,
1560 length,
1538 realoffset,
1561 realoffset,
1539 len(d) - startoffset,
1562 len(d) - startoffset,
1540 )
1563 )
1541 )
1564 )
1542
1565
1543 return util.buffer(d, startoffset, length)
1566 return util.buffer(d, startoffset, length)
1544
1567
1545 if len(d) < length:
1568 if len(d) < length:
1546 raise error.RevlogError(
1569 raise error.RevlogError(
1547 _(
1570 _(
1548 b'partial read of revlog %s; expected %d bytes from offset '
1571 b'partial read of revlog %s; expected %d bytes from offset '
1549 b'%d, got %d'
1572 b'%d, got %d'
1550 )
1573 )
1551 % (
1574 % (
1552 self.indexfile if self._inline else self.datafile,
1575 self.indexfile if self._inline else self.datafile,
1553 length,
1576 length,
1554 offset,
1577 offset,
1555 len(d),
1578 len(d),
1556 )
1579 )
1557 )
1580 )
1558
1581
1559 return d
1582 return d
1560
1583
1561 def _getsegment(self, offset, length, df=None):
1584 def _getsegment(self, offset, length, df=None):
1562 """Obtain a segment of raw data from the revlog.
1585 """Obtain a segment of raw data from the revlog.
1563
1586
1564 Accepts an absolute offset, length of bytes to obtain, and an
1587 Accepts an absolute offset, length of bytes to obtain, and an
1565 optional file handle to the already-opened revlog. If the file
1588 optional file handle to the already-opened revlog. If the file
1566 handle is used, it's original seek position will not be preserved.
1589 handle is used, it's original seek position will not be preserved.
1567
1590
1568 Requests for data may be returned from a cache.
1591 Requests for data may be returned from a cache.
1569
1592
1570 Returns a str or a buffer instance of raw byte data.
1593 Returns a str or a buffer instance of raw byte data.
1571 """
1594 """
1572 o, d = self._chunkcache
1595 o, d = self._chunkcache
1573 l = len(d)
1596 l = len(d)
1574
1597
1575 # is it in the cache?
1598 # is it in the cache?
1576 cachestart = offset - o
1599 cachestart = offset - o
1577 cacheend = cachestart + length
1600 cacheend = cachestart + length
1578 if cachestart >= 0 and cacheend <= l:
1601 if cachestart >= 0 and cacheend <= l:
1579 if cachestart == 0 and cacheend == l:
1602 if cachestart == 0 and cacheend == l:
1580 return d # avoid a copy
1603 return d # avoid a copy
1581 return util.buffer(d, cachestart, cacheend - cachestart)
1604 return util.buffer(d, cachestart, cacheend - cachestart)
1582
1605
1583 return self._readsegment(offset, length, df=df)
1606 return self._readsegment(offset, length, df=df)
1584
1607
1585 def _getsegmentforrevs(self, startrev, endrev, df=None):
1608 def _getsegmentforrevs(self, startrev, endrev, df=None):
1586 """Obtain a segment of raw data corresponding to a range of revisions.
1609 """Obtain a segment of raw data corresponding to a range of revisions.
1587
1610
1588 Accepts the start and end revisions and an optional already-open
1611 Accepts the start and end revisions and an optional already-open
1589 file handle to be used for reading. If the file handle is read, its
1612 file handle to be used for reading. If the file handle is read, its
1590 seek position will not be preserved.
1613 seek position will not be preserved.
1591
1614
1592 Requests for data may be satisfied by a cache.
1615 Requests for data may be satisfied by a cache.
1593
1616
1594 Returns a 2-tuple of (offset, data) for the requested range of
1617 Returns a 2-tuple of (offset, data) for the requested range of
1595 revisions. Offset is the integer offset from the beginning of the
1618 revisions. Offset is the integer offset from the beginning of the
1596 revlog and data is a str or buffer of the raw byte data.
1619 revlog and data is a str or buffer of the raw byte data.
1597
1620
1598 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1621 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1599 to determine where each revision's data begins and ends.
1622 to determine where each revision's data begins and ends.
1600 """
1623 """
1601 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1624 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1602 # (functions are expensive).
1625 # (functions are expensive).
1603 index = self.index
1626 index = self.index
1604 istart = index[startrev]
1627 istart = index[startrev]
1605 start = int(istart[0] >> 16)
1628 start = int(istart[0] >> 16)
1606 if startrev == endrev:
1629 if startrev == endrev:
1607 end = start + istart[1]
1630 end = start + istart[1]
1608 else:
1631 else:
1609 iend = index[endrev]
1632 iend = index[endrev]
1610 end = int(iend[0] >> 16) + iend[1]
1633 end = int(iend[0] >> 16) + iend[1]
1611
1634
1612 if self._inline:
1635 if self._inline:
1613 start += (startrev + 1) * self._io.size
1636 start += (startrev + 1) * self._io.size
1614 end += (endrev + 1) * self._io.size
1637 end += (endrev + 1) * self._io.size
1615 length = end - start
1638 length = end - start
1616
1639
1617 return start, self._getsegment(start, length, df=df)
1640 return start, self._getsegment(start, length, df=df)
1618
1641
1619 def _chunk(self, rev, df=None):
1642 def _chunk(self, rev, df=None):
1620 """Obtain a single decompressed chunk for a revision.
1643 """Obtain a single decompressed chunk for a revision.
1621
1644
1622 Accepts an integer revision and an optional already-open file handle
1645 Accepts an integer revision and an optional already-open file handle
1623 to be used for reading. If used, the seek position of the file will not
1646 to be used for reading. If used, the seek position of the file will not
1624 be preserved.
1647 be preserved.
1625
1648
1626 Returns a str holding uncompressed data for the requested revision.
1649 Returns a str holding uncompressed data for the requested revision.
1627 """
1650 """
1628 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1651 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1629
1652
1630 def _chunks(self, revs, df=None, targetsize=None):
1653 def _chunks(self, revs, df=None, targetsize=None):
1631 """Obtain decompressed chunks for the specified revisions.
1654 """Obtain decompressed chunks for the specified revisions.
1632
1655
1633 Accepts an iterable of numeric revisions that are assumed to be in
1656 Accepts an iterable of numeric revisions that are assumed to be in
1634 ascending order. Also accepts an optional already-open file handle
1657 ascending order. Also accepts an optional already-open file handle
1635 to be used for reading. If used, the seek position of the file will
1658 to be used for reading. If used, the seek position of the file will
1636 not be preserved.
1659 not be preserved.
1637
1660
1638 This function is similar to calling ``self._chunk()`` multiple times,
1661 This function is similar to calling ``self._chunk()`` multiple times,
1639 but is faster.
1662 but is faster.
1640
1663
1641 Returns a list with decompressed data for each requested revision.
1664 Returns a list with decompressed data for each requested revision.
1642 """
1665 """
1643 if not revs:
1666 if not revs:
1644 return []
1667 return []
1645 start = self.start
1668 start = self.start
1646 length = self.length
1669 length = self.length
1647 inline = self._inline
1670 inline = self._inline
1648 iosize = self._io.size
1671 iosize = self._io.size
1649 buffer = util.buffer
1672 buffer = util.buffer
1650
1673
1651 l = []
1674 l = []
1652 ladd = l.append
1675 ladd = l.append
1653
1676
1654 if not self._withsparseread:
1677 if not self._withsparseread:
1655 slicedchunks = (revs,)
1678 slicedchunks = (revs,)
1656 else:
1679 else:
1657 slicedchunks = deltautil.slicechunk(
1680 slicedchunks = deltautil.slicechunk(
1658 self, revs, targetsize=targetsize
1681 self, revs, targetsize=targetsize
1659 )
1682 )
1660
1683
1661 for revschunk in slicedchunks:
1684 for revschunk in slicedchunks:
1662 firstrev = revschunk[0]
1685 firstrev = revschunk[0]
1663 # Skip trailing revisions with empty diff
1686 # Skip trailing revisions with empty diff
1664 for lastrev in revschunk[::-1]:
1687 for lastrev in revschunk[::-1]:
1665 if length(lastrev) != 0:
1688 if length(lastrev) != 0:
1666 break
1689 break
1667
1690
1668 try:
1691 try:
1669 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1692 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1670 except OverflowError:
1693 except OverflowError:
1671 # issue4215 - we can't cache a run of chunks greater than
1694 # issue4215 - we can't cache a run of chunks greater than
1672 # 2G on Windows
1695 # 2G on Windows
1673 return [self._chunk(rev, df=df) for rev in revschunk]
1696 return [self._chunk(rev, df=df) for rev in revschunk]
1674
1697
1675 decomp = self.decompress
1698 decomp = self.decompress
1676 for rev in revschunk:
1699 for rev in revschunk:
1677 chunkstart = start(rev)
1700 chunkstart = start(rev)
1678 if inline:
1701 if inline:
1679 chunkstart += (rev + 1) * iosize
1702 chunkstart += (rev + 1) * iosize
1680 chunklength = length(rev)
1703 chunklength = length(rev)
1681 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1704 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1682
1705
1683 return l
1706 return l
1684
1707
1685 def _chunkclear(self):
1708 def _chunkclear(self):
1686 """Clear the raw chunk cache."""
1709 """Clear the raw chunk cache."""
1687 self._chunkcache = (0, b'')
1710 self._chunkcache = (0, b'')
1688
1711
1689 def deltaparent(self, rev):
1712 def deltaparent(self, rev):
1690 """return deltaparent of the given revision"""
1713 """return deltaparent of the given revision"""
1691 base = self.index[rev][3]
1714 base = self.index[rev][3]
1692 if base == rev:
1715 if base == rev:
1693 return nullrev
1716 return nullrev
1694 elif self._generaldelta:
1717 elif self._generaldelta:
1695 return base
1718 return base
1696 else:
1719 else:
1697 return rev - 1
1720 return rev - 1
1698
1721
1699 def issnapshot(self, rev):
1722 def issnapshot(self, rev):
1700 """tells whether rev is a snapshot
1723 """tells whether rev is a snapshot
1701 """
1724 """
1702 if not self._sparserevlog:
1725 if not self._sparserevlog:
1703 return self.deltaparent(rev) == nullrev
1726 return self.deltaparent(rev) == nullrev
1704 elif util.safehasattr(self.index, b'issnapshot'):
1727 elif util.safehasattr(self.index, b'issnapshot'):
1705 # directly assign the method to cache the testing and access
1728 # directly assign the method to cache the testing and access
1706 self.issnapshot = self.index.issnapshot
1729 self.issnapshot = self.index.issnapshot
1707 return self.issnapshot(rev)
1730 return self.issnapshot(rev)
1708 if rev == nullrev:
1731 if rev == nullrev:
1709 return True
1732 return True
1710 entry = self.index[rev]
1733 entry = self.index[rev]
1711 base = entry[3]
1734 base = entry[3]
1712 if base == rev:
1735 if base == rev:
1713 return True
1736 return True
1714 if base == nullrev:
1737 if base == nullrev:
1715 return True
1738 return True
1716 p1 = entry[5]
1739 p1 = entry[5]
1717 p2 = entry[6]
1740 p2 = entry[6]
1718 if base == p1 or base == p2:
1741 if base == p1 or base == p2:
1719 return False
1742 return False
1720 return self.issnapshot(base)
1743 return self.issnapshot(base)
1721
1744
1722 def snapshotdepth(self, rev):
1745 def snapshotdepth(self, rev):
1723 """number of snapshot in the chain before this one"""
1746 """number of snapshot in the chain before this one"""
1724 if not self.issnapshot(rev):
1747 if not self.issnapshot(rev):
1725 raise error.ProgrammingError(b'revision %d not a snapshot')
1748 raise error.ProgrammingError(b'revision %d not a snapshot')
1726 return len(self._deltachain(rev)[0]) - 1
1749 return len(self._deltachain(rev)[0]) - 1
1727
1750
1728 def revdiff(self, rev1, rev2):
1751 def revdiff(self, rev1, rev2):
1729 """return or calculate a delta between two revisions
1752 """return or calculate a delta between two revisions
1730
1753
1731 The delta calculated is in binary form and is intended to be written to
1754 The delta calculated is in binary form and is intended to be written to
1732 revlog data directly. So this function needs raw revision data.
1755 revlog data directly. So this function needs raw revision data.
1733 """
1756 """
1734 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1757 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1735 return bytes(self._chunk(rev2))
1758 return bytes(self._chunk(rev2))
1736
1759
1737 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1760 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1738
1761
1739 def _processflags(self, text, flags, operation, raw=False):
1762 def _processflags(self, text, flags, operation, raw=False):
1740 """deprecated entry point to access flag processors"""
1763 """deprecated entry point to access flag processors"""
1741 msg = b'_processflag(...) use the specialized variant'
1764 msg = b'_processflag(...) use the specialized variant'
1742 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1765 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1743 if raw:
1766 if raw:
1744 return text, flagutil.processflagsraw(self, text, flags)
1767 return text, flagutil.processflagsraw(self, text, flags)
1745 elif operation == b'read':
1768 elif operation == b'read':
1746 return flagutil.processflagsread(self, text, flags)
1769 return flagutil.processflagsread(self, text, flags)
1747 else: # write operation
1770 else: # write operation
1748 return flagutil.processflagswrite(self, text, flags)
1771 return flagutil.processflagswrite(self, text, flags)
1749
1772
1750 def revision(self, nodeorrev, _df=None, raw=False):
1773 def revision(self, nodeorrev, _df=None, raw=False):
1751 """return an uncompressed revision of a given node or revision
1774 """return an uncompressed revision of a given node or revision
1752 number.
1775 number.
1753
1776
1754 _df - an existing file handle to read from. (internal-only)
1777 _df - an existing file handle to read from. (internal-only)
1755 raw - an optional argument specifying if the revision data is to be
1778 raw - an optional argument specifying if the revision data is to be
1756 treated as raw data when applying flag transforms. 'raw' should be set
1779 treated as raw data when applying flag transforms. 'raw' should be set
1757 to True when generating changegroups or in debug commands.
1780 to True when generating changegroups or in debug commands.
1758 """
1781 """
1759 if raw:
1782 if raw:
1760 msg = (
1783 msg = (
1761 b'revlog.revision(..., raw=True) is deprecated, '
1784 b'revlog.revision(..., raw=True) is deprecated, '
1762 b'use revlog.rawdata(...)'
1785 b'use revlog.rawdata(...)'
1763 )
1786 )
1764 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1787 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1765 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1788 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1766
1789
1767 def sidedata(self, nodeorrev, _df=None):
1790 def sidedata(self, nodeorrev, _df=None):
1768 """a map of extra data related to the changeset but not part of the hash
1791 """a map of extra data related to the changeset but not part of the hash
1769
1792
1770 This function currently return a dictionary. However, more advanced
1793 This function currently return a dictionary. However, more advanced
1771 mapping object will likely be used in the future for a more
1794 mapping object will likely be used in the future for a more
1772 efficient/lazy code.
1795 efficient/lazy code.
1773 """
1796 """
1774 return self._revisiondata(nodeorrev, _df)[1]
1797 return self._revisiondata(nodeorrev, _df)[1]
1775
1798
1776 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1799 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1777 # deal with <nodeorrev> argument type
1800 # deal with <nodeorrev> argument type
1778 if isinstance(nodeorrev, int):
1801 if isinstance(nodeorrev, int):
1779 rev = nodeorrev
1802 rev = nodeorrev
1780 node = self.node(rev)
1803 node = self.node(rev)
1781 else:
1804 else:
1782 node = nodeorrev
1805 node = nodeorrev
1783 rev = None
1806 rev = None
1784
1807
1785 # fast path the special `nullid` rev
1808 # fast path the special `nullid` rev
1786 if node == nullid:
1809 if node == nullid:
1787 return b"", {}
1810 return b"", {}
1788
1811
1789 # ``rawtext`` is the text as stored inside the revlog. Might be the
1812 # ``rawtext`` is the text as stored inside the revlog. Might be the
1790 # revision or might need to be processed to retrieve the revision.
1813 # revision or might need to be processed to retrieve the revision.
1791 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1814 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1792
1815
1793 if raw and validated:
1816 if raw and validated:
1794 # if we don't want to process the raw text and that raw
1817 # if we don't want to process the raw text and that raw
1795 # text is cached, we can exit early.
1818 # text is cached, we can exit early.
1796 return rawtext, {}
1819 return rawtext, {}
1797 if rev is None:
1820 if rev is None:
1798 rev = self.rev(node)
1821 rev = self.rev(node)
1799 # the revlog's flag for this revision
1822 # the revlog's flag for this revision
1800 # (usually alter its state or content)
1823 # (usually alter its state or content)
1801 flags = self.flags(rev)
1824 flags = self.flags(rev)
1802
1825
1803 if validated and flags == REVIDX_DEFAULT_FLAGS:
1826 if validated and flags == REVIDX_DEFAULT_FLAGS:
1804 # no extra flags set, no flag processor runs, text = rawtext
1827 # no extra flags set, no flag processor runs, text = rawtext
1805 return rawtext, {}
1828 return rawtext, {}
1806
1829
1807 sidedata = {}
1830 sidedata = {}
1808 if raw:
1831 if raw:
1809 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1832 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1810 text = rawtext
1833 text = rawtext
1811 else:
1834 else:
1812 try:
1835 try:
1813 r = flagutil.processflagsread(self, rawtext, flags)
1836 r = flagutil.processflagsread(self, rawtext, flags)
1814 except error.SidedataHashError as exc:
1837 except error.SidedataHashError as exc:
1815 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1838 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1816 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1839 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1817 raise error.RevlogError(msg)
1840 raise error.RevlogError(msg)
1818 text, validatehash, sidedata = r
1841 text, validatehash, sidedata = r
1819 if validatehash:
1842 if validatehash:
1820 self.checkhash(text, node, rev=rev)
1843 self.checkhash(text, node, rev=rev)
1821 if not validated:
1844 if not validated:
1822 self._revisioncache = (node, rev, rawtext)
1845 self._revisioncache = (node, rev, rawtext)
1823
1846
1824 return text, sidedata
1847 return text, sidedata
1825
1848
1826 def _rawtext(self, node, rev, _df=None):
1849 def _rawtext(self, node, rev, _df=None):
1827 """return the possibly unvalidated rawtext for a revision
1850 """return the possibly unvalidated rawtext for a revision
1828
1851
1829 returns (rev, rawtext, validated)
1852 returns (rev, rawtext, validated)
1830 """
1853 """
1831
1854
1832 # revision in the cache (could be useful to apply delta)
1855 # revision in the cache (could be useful to apply delta)
1833 cachedrev = None
1856 cachedrev = None
1834 # An intermediate text to apply deltas to
1857 # An intermediate text to apply deltas to
1835 basetext = None
1858 basetext = None
1836
1859
1837 # Check if we have the entry in cache
1860 # Check if we have the entry in cache
1838 # The cache entry looks like (node, rev, rawtext)
1861 # The cache entry looks like (node, rev, rawtext)
1839 if self._revisioncache:
1862 if self._revisioncache:
1840 if self._revisioncache[0] == node:
1863 if self._revisioncache[0] == node:
1841 return (rev, self._revisioncache[2], True)
1864 return (rev, self._revisioncache[2], True)
1842 cachedrev = self._revisioncache[1]
1865 cachedrev = self._revisioncache[1]
1843
1866
1844 if rev is None:
1867 if rev is None:
1845 rev = self.rev(node)
1868 rev = self.rev(node)
1846
1869
1847 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1870 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1848 if stopped:
1871 if stopped:
1849 basetext = self._revisioncache[2]
1872 basetext = self._revisioncache[2]
1850
1873
1851 # drop cache to save memory, the caller is expected to
1874 # drop cache to save memory, the caller is expected to
1852 # update self._revisioncache after validating the text
1875 # update self._revisioncache after validating the text
1853 self._revisioncache = None
1876 self._revisioncache = None
1854
1877
1855 targetsize = None
1878 targetsize = None
1856 rawsize = self.index[rev][2]
1879 rawsize = self.index[rev][2]
1857 if 0 <= rawsize:
1880 if 0 <= rawsize:
1858 targetsize = 4 * rawsize
1881 targetsize = 4 * rawsize
1859
1882
1860 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1883 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1861 if basetext is None:
1884 if basetext is None:
1862 basetext = bytes(bins[0])
1885 basetext = bytes(bins[0])
1863 bins = bins[1:]
1886 bins = bins[1:]
1864
1887
1865 rawtext = mdiff.patches(basetext, bins)
1888 rawtext = mdiff.patches(basetext, bins)
1866 del basetext # let us have a chance to free memory early
1889 del basetext # let us have a chance to free memory early
1867 return (rev, rawtext, False)
1890 return (rev, rawtext, False)
1868
1891
1869 def rawdata(self, nodeorrev, _df=None):
1892 def rawdata(self, nodeorrev, _df=None):
1870 """return an uncompressed raw data of a given node or revision number.
1893 """return an uncompressed raw data of a given node or revision number.
1871
1894
1872 _df - an existing file handle to read from. (internal-only)
1895 _df - an existing file handle to read from. (internal-only)
1873 """
1896 """
1874 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1897 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1875
1898
1876 def hash(self, text, p1, p2):
1899 def hash(self, text, p1, p2):
1877 """Compute a node hash.
1900 """Compute a node hash.
1878
1901
1879 Available as a function so that subclasses can replace the hash
1902 Available as a function so that subclasses can replace the hash
1880 as needed.
1903 as needed.
1881 """
1904 """
1882 return storageutil.hashrevisionsha1(text, p1, p2)
1905 return storageutil.hashrevisionsha1(text, p1, p2)
1883
1906
1884 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1907 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1885 """Check node hash integrity.
1908 """Check node hash integrity.
1886
1909
1887 Available as a function so that subclasses can extend hash mismatch
1910 Available as a function so that subclasses can extend hash mismatch
1888 behaviors as needed.
1911 behaviors as needed.
1889 """
1912 """
1890 try:
1913 try:
1891 if p1 is None and p2 is None:
1914 if p1 is None and p2 is None:
1892 p1, p2 = self.parents(node)
1915 p1, p2 = self.parents(node)
1893 if node != self.hash(text, p1, p2):
1916 if node != self.hash(text, p1, p2):
1894 # Clear the revision cache on hash failure. The revision cache
1917 # Clear the revision cache on hash failure. The revision cache
1895 # only stores the raw revision and clearing the cache does have
1918 # only stores the raw revision and clearing the cache does have
1896 # the side-effect that we won't have a cache hit when the raw
1919 # the side-effect that we won't have a cache hit when the raw
1897 # revision data is accessed. But this case should be rare and
1920 # revision data is accessed. But this case should be rare and
1898 # it is extra work to teach the cache about the hash
1921 # it is extra work to teach the cache about the hash
1899 # verification state.
1922 # verification state.
1900 if self._revisioncache and self._revisioncache[0] == node:
1923 if self._revisioncache and self._revisioncache[0] == node:
1901 self._revisioncache = None
1924 self._revisioncache = None
1902
1925
1903 revornode = rev
1926 revornode = rev
1904 if revornode is None:
1927 if revornode is None:
1905 revornode = templatefilters.short(hex(node))
1928 revornode = templatefilters.short(hex(node))
1906 raise error.RevlogError(
1929 raise error.RevlogError(
1907 _(b"integrity check failed on %s:%s")
1930 _(b"integrity check failed on %s:%s")
1908 % (self.indexfile, pycompat.bytestr(revornode))
1931 % (self.indexfile, pycompat.bytestr(revornode))
1909 )
1932 )
1910 except error.RevlogError:
1933 except error.RevlogError:
1911 if self._censorable and storageutil.iscensoredtext(text):
1934 if self._censorable and storageutil.iscensoredtext(text):
1912 raise error.CensoredNodeError(self.indexfile, node, text)
1935 raise error.CensoredNodeError(self.indexfile, node, text)
1913 raise
1936 raise
1914
1937
1915 def _enforceinlinesize(self, tr, fp=None):
1938 def _enforceinlinesize(self, tr, fp=None):
1916 """Check if the revlog is too big for inline and convert if so.
1939 """Check if the revlog is too big for inline and convert if so.
1917
1940
1918 This should be called after revisions are added to the revlog. If the
1941 This should be called after revisions are added to the revlog. If the
1919 revlog has grown too large to be an inline revlog, it will convert it
1942 revlog has grown too large to be an inline revlog, it will convert it
1920 to use multiple index and data files.
1943 to use multiple index and data files.
1921 """
1944 """
1922 tiprev = len(self) - 1
1945 tiprev = len(self) - 1
1923 if (
1946 if (
1924 not self._inline
1947 not self._inline
1925 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1948 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1926 ):
1949 ):
1927 return
1950 return
1928
1951
1929 trinfo = tr.find(self.indexfile)
1952 trinfo = tr.find(self.indexfile)
1930 if trinfo is None:
1953 if trinfo is None:
1931 raise error.RevlogError(
1954 raise error.RevlogError(
1932 _(b"%s not found in the transaction") % self.indexfile
1955 _(b"%s not found in the transaction") % self.indexfile
1933 )
1956 )
1934
1957
1935 trindex = trinfo[2]
1958 trindex = trinfo[2]
1936 if trindex is not None:
1959 if trindex is not None:
1937 dataoff = self.start(trindex)
1960 dataoff = self.start(trindex)
1938 else:
1961 else:
1939 # revlog was stripped at start of transaction, use all leftover data
1962 # revlog was stripped at start of transaction, use all leftover data
1940 trindex = len(self) - 1
1963 trindex = len(self) - 1
1941 dataoff = self.end(tiprev)
1964 dataoff = self.end(tiprev)
1942
1965
1943 tr.add(self.datafile, dataoff)
1966 tr.add(self.datafile, dataoff)
1944
1967
1945 if fp:
1968 if fp:
1946 fp.flush()
1969 fp.flush()
1947 fp.close()
1970 fp.close()
1948 # We can't use the cached file handle after close(). So prevent
1971 # We can't use the cached file handle after close(). So prevent
1949 # its usage.
1972 # its usage.
1950 self._writinghandles = None
1973 self._writinghandles = None
1951
1974
1952 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1975 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1953 for r in self:
1976 for r in self:
1954 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1977 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1955
1978
1956 with self._indexfp(b'w') as fp:
1979 with self._indexfp(b'w') as fp:
1957 self.version &= ~FLAG_INLINE_DATA
1980 self.version &= ~FLAG_INLINE_DATA
1958 self._inline = False
1981 self._inline = False
1959 io = self._io
1982 io = self._io
1960 for i in self:
1983 for i in self:
1961 e = io.packentry(self.index[i], self.node, self.version, i)
1984 e = io.packentry(self.index[i], self.node, self.version, i)
1962 fp.write(e)
1985 fp.write(e)
1963
1986
1964 # the temp file replace the real index when we exit the context
1987 # the temp file replace the real index when we exit the context
1965 # manager
1988 # manager
1966
1989
1967 tr.replace(self.indexfile, trindex * self._io.size)
1990 tr.replace(self.indexfile, trindex * self._io.size)
1968 nodemaputil.setup_persistent_nodemap(tr, self)
1991 nodemaputil.setup_persistent_nodemap(tr, self)
1969 self._chunkclear()
1992 self._chunkclear()
1970
1993
1971 def _nodeduplicatecallback(self, transaction, node):
1994 def _nodeduplicatecallback(self, transaction, node):
1972 """called when trying to add a node already stored.
1995 """called when trying to add a node already stored.
1973 """
1996 """
1974
1997
1975 def addrevision(
1998 def addrevision(
1976 self,
1999 self,
1977 text,
2000 text,
1978 transaction,
2001 transaction,
1979 link,
2002 link,
1980 p1,
2003 p1,
1981 p2,
2004 p2,
1982 cachedelta=None,
2005 cachedelta=None,
1983 node=None,
2006 node=None,
1984 flags=REVIDX_DEFAULT_FLAGS,
2007 flags=REVIDX_DEFAULT_FLAGS,
1985 deltacomputer=None,
2008 deltacomputer=None,
1986 sidedata=None,
2009 sidedata=None,
1987 ):
2010 ):
1988 """add a revision to the log
2011 """add a revision to the log
1989
2012
1990 text - the revision data to add
2013 text - the revision data to add
1991 transaction - the transaction object used for rollback
2014 transaction - the transaction object used for rollback
1992 link - the linkrev data to add
2015 link - the linkrev data to add
1993 p1, p2 - the parent nodeids of the revision
2016 p1, p2 - the parent nodeids of the revision
1994 cachedelta - an optional precomputed delta
2017 cachedelta - an optional precomputed delta
1995 node - nodeid of revision; typically node is not specified, and it is
2018 node - nodeid of revision; typically node is not specified, and it is
1996 computed by default as hash(text, p1, p2), however subclasses might
2019 computed by default as hash(text, p1, p2), however subclasses might
1997 use different hashing method (and override checkhash() in such case)
2020 use different hashing method (and override checkhash() in such case)
1998 flags - the known flags to set on the revision
2021 flags - the known flags to set on the revision
1999 deltacomputer - an optional deltacomputer instance shared between
2022 deltacomputer - an optional deltacomputer instance shared between
2000 multiple calls
2023 multiple calls
2001 """
2024 """
2002 if link == nullrev:
2025 if link == nullrev:
2003 raise error.RevlogError(
2026 raise error.RevlogError(
2004 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2027 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2005 )
2028 )
2006
2029
2007 if sidedata is None:
2030 if sidedata is None:
2008 sidedata = {}
2031 sidedata = {}
2009 flags = flags & ~REVIDX_SIDEDATA
2032 flags = flags & ~REVIDX_SIDEDATA
2010 elif not self.hassidedata:
2033 elif not self.hassidedata:
2011 raise error.ProgrammingError(
2034 raise error.ProgrammingError(
2012 _(b"trying to add sidedata to a revlog who don't support them")
2035 _(b"trying to add sidedata to a revlog who don't support them")
2013 )
2036 )
2014 else:
2037 else:
2015 flags |= REVIDX_SIDEDATA
2038 flags |= REVIDX_SIDEDATA
2016
2039
2017 if flags:
2040 if flags:
2018 node = node or self.hash(text, p1, p2)
2041 node = node or self.hash(text, p1, p2)
2019
2042
2020 rawtext, validatehash = flagutil.processflagswrite(
2043 rawtext, validatehash = flagutil.processflagswrite(
2021 self, text, flags, sidedata=sidedata
2044 self, text, flags, sidedata=sidedata
2022 )
2045 )
2023
2046
2024 # If the flag processor modifies the revision data, ignore any provided
2047 # If the flag processor modifies the revision data, ignore any provided
2025 # cachedelta.
2048 # cachedelta.
2026 if rawtext != text:
2049 if rawtext != text:
2027 cachedelta = None
2050 cachedelta = None
2028
2051
2029 if len(rawtext) > _maxentrysize:
2052 if len(rawtext) > _maxentrysize:
2030 raise error.RevlogError(
2053 raise error.RevlogError(
2031 _(
2054 _(
2032 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2055 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2033 )
2056 )
2034 % (self.indexfile, len(rawtext))
2057 % (self.indexfile, len(rawtext))
2035 )
2058 )
2036
2059
2037 node = node or self.hash(rawtext, p1, p2)
2060 node = node or self.hash(rawtext, p1, p2)
2038 if self.index.has_node(node):
2061 if self.index.has_node(node):
2039 return node
2062 return node
2040
2063
2041 if validatehash:
2064 if validatehash:
2042 self.checkhash(rawtext, node, p1=p1, p2=p2)
2065 self.checkhash(rawtext, node, p1=p1, p2=p2)
2043
2066
2044 return self.addrawrevision(
2067 return self.addrawrevision(
2045 rawtext,
2068 rawtext,
2046 transaction,
2069 transaction,
2047 link,
2070 link,
2048 p1,
2071 p1,
2049 p2,
2072 p2,
2050 node,
2073 node,
2051 flags,
2074 flags,
2052 cachedelta=cachedelta,
2075 cachedelta=cachedelta,
2053 deltacomputer=deltacomputer,
2076 deltacomputer=deltacomputer,
2054 )
2077 )
2055
2078
2056 def addrawrevision(
2079 def addrawrevision(
2057 self,
2080 self,
2058 rawtext,
2081 rawtext,
2059 transaction,
2082 transaction,
2060 link,
2083 link,
2061 p1,
2084 p1,
2062 p2,
2085 p2,
2063 node,
2086 node,
2064 flags,
2087 flags,
2065 cachedelta=None,
2088 cachedelta=None,
2066 deltacomputer=None,
2089 deltacomputer=None,
2067 ):
2090 ):
2068 """add a raw revision with known flags, node and parents
2091 """add a raw revision with known flags, node and parents
2069 useful when reusing a revision not stored in this revlog (ex: received
2092 useful when reusing a revision not stored in this revlog (ex: received
2070 over wire, or read from an external bundle).
2093 over wire, or read from an external bundle).
2071 """
2094 """
2072 dfh = None
2095 dfh = None
2073 if not self._inline:
2096 if not self._inline:
2074 dfh = self._datafp(b"a+")
2097 dfh = self._datafp(b"a+")
2075 ifh = self._indexfp(b"a+")
2098 ifh = self._indexfp(b"a+")
2076 try:
2099 try:
2077 return self._addrevision(
2100 return self._addrevision(
2078 node,
2101 node,
2079 rawtext,
2102 rawtext,
2080 transaction,
2103 transaction,
2081 link,
2104 link,
2082 p1,
2105 p1,
2083 p2,
2106 p2,
2084 flags,
2107 flags,
2085 cachedelta,
2108 cachedelta,
2086 ifh,
2109 ifh,
2087 dfh,
2110 dfh,
2088 deltacomputer=deltacomputer,
2111 deltacomputer=deltacomputer,
2089 )
2112 )
2090 finally:
2113 finally:
2091 if dfh:
2114 if dfh:
2092 dfh.close()
2115 dfh.close()
2093 ifh.close()
2116 ifh.close()
2094
2117
2095 def compress(self, data):
2118 def compress(self, data):
2096 """Generate a possibly-compressed representation of data."""
2119 """Generate a possibly-compressed representation of data."""
2097 if not data:
2120 if not data:
2098 return b'', data
2121 return b'', data
2099
2122
2100 compressed = self._compressor.compress(data)
2123 compressed = self._compressor.compress(data)
2101
2124
2102 if compressed:
2125 if compressed:
2103 # The revlog compressor added the header in the returned data.
2126 # The revlog compressor added the header in the returned data.
2104 return b'', compressed
2127 return b'', compressed
2105
2128
2106 if data[0:1] == b'\0':
2129 if data[0:1] == b'\0':
2107 return b'', data
2130 return b'', data
2108 return b'u', data
2131 return b'u', data
2109
2132
2110 def decompress(self, data):
2133 def decompress(self, data):
2111 """Decompress a revlog chunk.
2134 """Decompress a revlog chunk.
2112
2135
2113 The chunk is expected to begin with a header identifying the
2136 The chunk is expected to begin with a header identifying the
2114 format type so it can be routed to an appropriate decompressor.
2137 format type so it can be routed to an appropriate decompressor.
2115 """
2138 """
2116 if not data:
2139 if not data:
2117 return data
2140 return data
2118
2141
2119 # Revlogs are read much more frequently than they are written and many
2142 # Revlogs are read much more frequently than they are written and many
2120 # chunks only take microseconds to decompress, so performance is
2143 # chunks only take microseconds to decompress, so performance is
2121 # important here.
2144 # important here.
2122 #
2145 #
2123 # We can make a few assumptions about revlogs:
2146 # We can make a few assumptions about revlogs:
2124 #
2147 #
2125 # 1) the majority of chunks will be compressed (as opposed to inline
2148 # 1) the majority of chunks will be compressed (as opposed to inline
2126 # raw data).
2149 # raw data).
2127 # 2) decompressing *any* data will likely by at least 10x slower than
2150 # 2) decompressing *any* data will likely by at least 10x slower than
2128 # returning raw inline data.
2151 # returning raw inline data.
2129 # 3) we want to prioritize common and officially supported compression
2152 # 3) we want to prioritize common and officially supported compression
2130 # engines
2153 # engines
2131 #
2154 #
2132 # It follows that we want to optimize for "decompress compressed data
2155 # It follows that we want to optimize for "decompress compressed data
2133 # when encoded with common and officially supported compression engines"
2156 # when encoded with common and officially supported compression engines"
2134 # case over "raw data" and "data encoded by less common or non-official
2157 # case over "raw data" and "data encoded by less common or non-official
2135 # compression engines." That is why we have the inline lookup first
2158 # compression engines." That is why we have the inline lookup first
2136 # followed by the compengines lookup.
2159 # followed by the compengines lookup.
2137 #
2160 #
2138 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2161 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2139 # compressed chunks. And this matters for changelog and manifest reads.
2162 # compressed chunks. And this matters for changelog and manifest reads.
2140 t = data[0:1]
2163 t = data[0:1]
2141
2164
2142 if t == b'x':
2165 if t == b'x':
2143 try:
2166 try:
2144 return _zlibdecompress(data)
2167 return _zlibdecompress(data)
2145 except zlib.error as e:
2168 except zlib.error as e:
2146 raise error.RevlogError(
2169 raise error.RevlogError(
2147 _(b'revlog decompress error: %s')
2170 _(b'revlog decompress error: %s')
2148 % stringutil.forcebytestr(e)
2171 % stringutil.forcebytestr(e)
2149 )
2172 )
2150 # '\0' is more common than 'u' so it goes first.
2173 # '\0' is more common than 'u' so it goes first.
2151 elif t == b'\0':
2174 elif t == b'\0':
2152 return data
2175 return data
2153 elif t == b'u':
2176 elif t == b'u':
2154 return util.buffer(data, 1)
2177 return util.buffer(data, 1)
2155
2178
2156 try:
2179 try:
2157 compressor = self._decompressors[t]
2180 compressor = self._decompressors[t]
2158 except KeyError:
2181 except KeyError:
2159 try:
2182 try:
2160 engine = util.compengines.forrevlogheader(t)
2183 engine = util.compengines.forrevlogheader(t)
2161 compressor = engine.revlogcompressor(self._compengineopts)
2184 compressor = engine.revlogcompressor(self._compengineopts)
2162 self._decompressors[t] = compressor
2185 self._decompressors[t] = compressor
2163 except KeyError:
2186 except KeyError:
2164 raise error.RevlogError(_(b'unknown compression type %r') % t)
2187 raise error.RevlogError(_(b'unknown compression type %r') % t)
2165
2188
2166 return compressor.decompress(data)
2189 return compressor.decompress(data)
2167
2190
2168 def _addrevision(
2191 def _addrevision(
2169 self,
2192 self,
2170 node,
2193 node,
2171 rawtext,
2194 rawtext,
2172 transaction,
2195 transaction,
2173 link,
2196 link,
2174 p1,
2197 p1,
2175 p2,
2198 p2,
2176 flags,
2199 flags,
2177 cachedelta,
2200 cachedelta,
2178 ifh,
2201 ifh,
2179 dfh,
2202 dfh,
2180 alwayscache=False,
2203 alwayscache=False,
2181 deltacomputer=None,
2204 deltacomputer=None,
2182 ):
2205 ):
2183 """internal function to add revisions to the log
2206 """internal function to add revisions to the log
2184
2207
2185 see addrevision for argument descriptions.
2208 see addrevision for argument descriptions.
2186
2209
2187 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2210 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2188
2211
2189 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2212 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2190 be used.
2213 be used.
2191
2214
2192 invariants:
2215 invariants:
2193 - rawtext is optional (can be None); if not set, cachedelta must be set.
2216 - rawtext is optional (can be None); if not set, cachedelta must be set.
2194 if both are set, they must correspond to each other.
2217 if both are set, they must correspond to each other.
2195 """
2218 """
2196 if node == nullid:
2219 if node == nullid:
2197 raise error.RevlogError(
2220 raise error.RevlogError(
2198 _(b"%s: attempt to add null revision") % self.indexfile
2221 _(b"%s: attempt to add null revision") % self.indexfile
2199 )
2222 )
2200 if node == wdirid or node in wdirfilenodeids:
2223 if node == wdirid or node in wdirfilenodeids:
2201 raise error.RevlogError(
2224 raise error.RevlogError(
2202 _(b"%s: attempt to add wdir revision") % self.indexfile
2225 _(b"%s: attempt to add wdir revision") % self.indexfile
2203 )
2226 )
2204
2227
2205 if self._inline:
2228 if self._inline:
2206 fh = ifh
2229 fh = ifh
2207 else:
2230 else:
2208 fh = dfh
2231 fh = dfh
2209
2232
2210 btext = [rawtext]
2233 btext = [rawtext]
2211
2234
2212 curr = len(self)
2235 curr = len(self)
2213 prev = curr - 1
2236 prev = curr - 1
2214 offset = self.end(prev)
2237 offset = self.end(prev)
2215 p1r, p2r = self.rev(p1), self.rev(p2)
2238 p1r, p2r = self.rev(p1), self.rev(p2)
2216
2239
2217 # full versions are inserted when the needed deltas
2240 # full versions are inserted when the needed deltas
2218 # become comparable to the uncompressed text
2241 # become comparable to the uncompressed text
2219 if rawtext is None:
2242 if rawtext is None:
2220 # need rawtext size, before changed by flag processors, which is
2243 # need rawtext size, before changed by flag processors, which is
2221 # the non-raw size. use revlog explicitly to avoid filelog's extra
2244 # the non-raw size. use revlog explicitly to avoid filelog's extra
2222 # logic that might remove metadata size.
2245 # logic that might remove metadata size.
2223 textlen = mdiff.patchedsize(
2246 textlen = mdiff.patchedsize(
2224 revlog.size(self, cachedelta[0]), cachedelta[1]
2247 revlog.size(self, cachedelta[0]), cachedelta[1]
2225 )
2248 )
2226 else:
2249 else:
2227 textlen = len(rawtext)
2250 textlen = len(rawtext)
2228
2251
2229 if deltacomputer is None:
2252 if deltacomputer is None:
2230 deltacomputer = deltautil.deltacomputer(self)
2253 deltacomputer = deltautil.deltacomputer(self)
2231
2254
2232 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2255 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2233
2256
2234 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2257 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2235
2258
2236 e = (
2259 e = (
2237 offset_type(offset, flags),
2260 offset_type(offset, flags),
2238 deltainfo.deltalen,
2261 deltainfo.deltalen,
2239 textlen,
2262 textlen,
2240 deltainfo.base,
2263 deltainfo.base,
2241 link,
2264 link,
2242 p1r,
2265 p1r,
2243 p2r,
2266 p2r,
2244 node,
2267 node,
2245 )
2268 )
2246 self.index.append(e)
2269 self.index.append(e)
2247
2270
2248 entry = self._io.packentry(e, self.node, self.version, curr)
2271 entry = self._io.packentry(e, self.node, self.version, curr)
2249 self._writeentry(
2272 self._writeentry(
2250 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2273 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2251 )
2274 )
2252
2275
2253 rawtext = btext[0]
2276 rawtext = btext[0]
2254
2277
2255 if alwayscache and rawtext is None:
2278 if alwayscache and rawtext is None:
2256 rawtext = deltacomputer.buildtext(revinfo, fh)
2279 rawtext = deltacomputer.buildtext(revinfo, fh)
2257
2280
2258 if type(rawtext) == bytes: # only accept immutable objects
2281 if type(rawtext) == bytes: # only accept immutable objects
2259 self._revisioncache = (node, curr, rawtext)
2282 self._revisioncache = (node, curr, rawtext)
2260 self._chainbasecache[curr] = deltainfo.chainbase
2283 self._chainbasecache[curr] = deltainfo.chainbase
2261 return node
2284 return node
2262
2285
2263 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2286 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2264 # Files opened in a+ mode have inconsistent behavior on various
2287 # Files opened in a+ mode have inconsistent behavior on various
2265 # platforms. Windows requires that a file positioning call be made
2288 # platforms. Windows requires that a file positioning call be made
2266 # when the file handle transitions between reads and writes. See
2289 # when the file handle transitions between reads and writes. See
2267 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2290 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2268 # platforms, Python or the platform itself can be buggy. Some versions
2291 # platforms, Python or the platform itself can be buggy. Some versions
2269 # of Solaris have been observed to not append at the end of the file
2292 # of Solaris have been observed to not append at the end of the file
2270 # if the file was seeked to before the end. See issue4943 for more.
2293 # if the file was seeked to before the end. See issue4943 for more.
2271 #
2294 #
2272 # We work around this issue by inserting a seek() before writing.
2295 # We work around this issue by inserting a seek() before writing.
2273 # Note: This is likely not necessary on Python 3. However, because
2296 # Note: This is likely not necessary on Python 3. However, because
2274 # the file handle is reused for reads and may be seeked there, we need
2297 # the file handle is reused for reads and may be seeked there, we need
2275 # to be careful before changing this.
2298 # to be careful before changing this.
2276 ifh.seek(0, os.SEEK_END)
2299 ifh.seek(0, os.SEEK_END)
2277 if dfh:
2300 if dfh:
2278 dfh.seek(0, os.SEEK_END)
2301 dfh.seek(0, os.SEEK_END)
2279
2302
2280 curr = len(self) - 1
2303 curr = len(self) - 1
2281 if not self._inline:
2304 if not self._inline:
2282 transaction.add(self.datafile, offset)
2305 transaction.add(self.datafile, offset)
2283 transaction.add(self.indexfile, curr * len(entry))
2306 transaction.add(self.indexfile, curr * len(entry))
2284 if data[0]:
2307 if data[0]:
2285 dfh.write(data[0])
2308 dfh.write(data[0])
2286 dfh.write(data[1])
2309 dfh.write(data[1])
2287 ifh.write(entry)
2310 ifh.write(entry)
2288 else:
2311 else:
2289 offset += curr * self._io.size
2312 offset += curr * self._io.size
2290 transaction.add(self.indexfile, offset, curr)
2313 transaction.add(self.indexfile, offset, curr)
2291 ifh.write(entry)
2314 ifh.write(entry)
2292 ifh.write(data[0])
2315 ifh.write(data[0])
2293 ifh.write(data[1])
2316 ifh.write(data[1])
2294 self._enforceinlinesize(transaction, ifh)
2317 self._enforceinlinesize(transaction, ifh)
2295 nodemaputil.setup_persistent_nodemap(transaction, self)
2318 nodemaputil.setup_persistent_nodemap(transaction, self)
2296
2319
2297 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2320 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2298 """
2321 """
2299 add a delta group
2322 add a delta group
2300
2323
2301 given a set of deltas, add them to the revision log. the
2324 given a set of deltas, add them to the revision log. the
2302 first delta is against its parent, which should be in our
2325 first delta is against its parent, which should be in our
2303 log, the rest are against the previous delta.
2326 log, the rest are against the previous delta.
2304
2327
2305 If ``addrevisioncb`` is defined, it will be called with arguments of
2328 If ``addrevisioncb`` is defined, it will be called with arguments of
2306 this revlog and the node that was added.
2329 this revlog and the node that was added.
2307 """
2330 """
2308
2331
2309 if self._writinghandles:
2332 if self._writinghandles:
2310 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2333 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2311
2334
2312 nodes = []
2335 nodes = []
2313
2336
2314 r = len(self)
2337 r = len(self)
2315 end = 0
2338 end = 0
2316 if r:
2339 if r:
2317 end = self.end(r - 1)
2340 end = self.end(r - 1)
2318 ifh = self._indexfp(b"a+")
2341 ifh = self._indexfp(b"a+")
2319 isize = r * self._io.size
2342 isize = r * self._io.size
2320 if self._inline:
2343 if self._inline:
2321 transaction.add(self.indexfile, end + isize, r)
2344 transaction.add(self.indexfile, end + isize, r)
2322 dfh = None
2345 dfh = None
2323 else:
2346 else:
2324 transaction.add(self.indexfile, isize, r)
2347 transaction.add(self.indexfile, isize, r)
2325 transaction.add(self.datafile, end)
2348 transaction.add(self.datafile, end)
2326 dfh = self._datafp(b"a+")
2349 dfh = self._datafp(b"a+")
2327
2350
2328 def flush():
2351 def flush():
2329 if dfh:
2352 if dfh:
2330 dfh.flush()
2353 dfh.flush()
2331 ifh.flush()
2354 ifh.flush()
2332
2355
2333 self._writinghandles = (ifh, dfh)
2356 self._writinghandles = (ifh, dfh)
2334
2357
2335 try:
2358 try:
2336 deltacomputer = deltautil.deltacomputer(self)
2359 deltacomputer = deltautil.deltacomputer(self)
2337 # loop through our set of deltas
2360 # loop through our set of deltas
2338 for data in deltas:
2361 for data in deltas:
2339 node, p1, p2, linknode, deltabase, delta, flags = data
2362 node, p1, p2, linknode, deltabase, delta, flags = data
2340 link = linkmapper(linknode)
2363 link = linkmapper(linknode)
2341 flags = flags or REVIDX_DEFAULT_FLAGS
2364 flags = flags or REVIDX_DEFAULT_FLAGS
2342
2365
2343 nodes.append(node)
2366 nodes.append(node)
2344
2367
2345 if self.index.has_node(node):
2368 if self.index.has_node(node):
2346 self._nodeduplicatecallback(transaction, node)
2369 self._nodeduplicatecallback(transaction, node)
2347 # this can happen if two branches make the same change
2370 # this can happen if two branches make the same change
2348 continue
2371 continue
2349
2372
2350 for p in (p1, p2):
2373 for p in (p1, p2):
2351 if not self.index.has_node(p):
2374 if not self.index.has_node(p):
2352 raise error.LookupError(
2375 raise error.LookupError(
2353 p, self.indexfile, _(b'unknown parent')
2376 p, self.indexfile, _(b'unknown parent')
2354 )
2377 )
2355
2378
2356 if not self.index.has_node(deltabase):
2379 if not self.index.has_node(deltabase):
2357 raise error.LookupError(
2380 raise error.LookupError(
2358 deltabase, self.indexfile, _(b'unknown delta base')
2381 deltabase, self.indexfile, _(b'unknown delta base')
2359 )
2382 )
2360
2383
2361 baserev = self.rev(deltabase)
2384 baserev = self.rev(deltabase)
2362
2385
2363 if baserev != nullrev and self.iscensored(baserev):
2386 if baserev != nullrev and self.iscensored(baserev):
2364 # if base is censored, delta must be full replacement in a
2387 # if base is censored, delta must be full replacement in a
2365 # single patch operation
2388 # single patch operation
2366 hlen = struct.calcsize(b">lll")
2389 hlen = struct.calcsize(b">lll")
2367 oldlen = self.rawsize(baserev)
2390 oldlen = self.rawsize(baserev)
2368 newlen = len(delta) - hlen
2391 newlen = len(delta) - hlen
2369 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2392 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2370 raise error.CensoredBaseError(
2393 raise error.CensoredBaseError(
2371 self.indexfile, self.node(baserev)
2394 self.indexfile, self.node(baserev)
2372 )
2395 )
2373
2396
2374 if not flags and self._peek_iscensored(baserev, delta, flush):
2397 if not flags and self._peek_iscensored(baserev, delta, flush):
2375 flags |= REVIDX_ISCENSORED
2398 flags |= REVIDX_ISCENSORED
2376
2399
2377 # We assume consumers of addrevisioncb will want to retrieve
2400 # We assume consumers of addrevisioncb will want to retrieve
2378 # the added revision, which will require a call to
2401 # the added revision, which will require a call to
2379 # revision(). revision() will fast path if there is a cache
2402 # revision(). revision() will fast path if there is a cache
2380 # hit. So, we tell _addrevision() to always cache in this case.
2403 # hit. So, we tell _addrevision() to always cache in this case.
2381 # We're only using addgroup() in the context of changegroup
2404 # We're only using addgroup() in the context of changegroup
2382 # generation so the revision data can always be handled as raw
2405 # generation so the revision data can always be handled as raw
2383 # by the flagprocessor.
2406 # by the flagprocessor.
2384 self._addrevision(
2407 self._addrevision(
2385 node,
2408 node,
2386 None,
2409 None,
2387 transaction,
2410 transaction,
2388 link,
2411 link,
2389 p1,
2412 p1,
2390 p2,
2413 p2,
2391 flags,
2414 flags,
2392 (baserev, delta),
2415 (baserev, delta),
2393 ifh,
2416 ifh,
2394 dfh,
2417 dfh,
2395 alwayscache=bool(addrevisioncb),
2418 alwayscache=bool(addrevisioncb),
2396 deltacomputer=deltacomputer,
2419 deltacomputer=deltacomputer,
2397 )
2420 )
2398
2421
2399 if addrevisioncb:
2422 if addrevisioncb:
2400 addrevisioncb(self, node)
2423 addrevisioncb(self, node)
2401
2424
2402 if not dfh and not self._inline:
2425 if not dfh and not self._inline:
2403 # addrevision switched from inline to conventional
2426 # addrevision switched from inline to conventional
2404 # reopen the index
2427 # reopen the index
2405 ifh.close()
2428 ifh.close()
2406 dfh = self._datafp(b"a+")
2429 dfh = self._datafp(b"a+")
2407 ifh = self._indexfp(b"a+")
2430 ifh = self._indexfp(b"a+")
2408 self._writinghandles = (ifh, dfh)
2431 self._writinghandles = (ifh, dfh)
2409 finally:
2432 finally:
2410 self._writinghandles = None
2433 self._writinghandles = None
2411
2434
2412 if dfh:
2435 if dfh:
2413 dfh.close()
2436 dfh.close()
2414 ifh.close()
2437 ifh.close()
2415
2438
2416 return nodes
2439 return nodes
2417
2440
2418 def iscensored(self, rev):
2441 def iscensored(self, rev):
2419 """Check if a file revision is censored."""
2442 """Check if a file revision is censored."""
2420 if not self._censorable:
2443 if not self._censorable:
2421 return False
2444 return False
2422
2445
2423 return self.flags(rev) & REVIDX_ISCENSORED
2446 return self.flags(rev) & REVIDX_ISCENSORED
2424
2447
2425 def _peek_iscensored(self, baserev, delta, flush):
2448 def _peek_iscensored(self, baserev, delta, flush):
2426 """Quickly check if a delta produces a censored revision."""
2449 """Quickly check if a delta produces a censored revision."""
2427 if not self._censorable:
2450 if not self._censorable:
2428 return False
2451 return False
2429
2452
2430 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2453 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2431
2454
2432 def getstrippoint(self, minlink):
2455 def getstrippoint(self, minlink):
2433 """find the minimum rev that must be stripped to strip the linkrev
2456 """find the minimum rev that must be stripped to strip the linkrev
2434
2457
2435 Returns a tuple containing the minimum rev and a set of all revs that
2458 Returns a tuple containing the minimum rev and a set of all revs that
2436 have linkrevs that will be broken by this strip.
2459 have linkrevs that will be broken by this strip.
2437 """
2460 """
2438 return storageutil.resolvestripinfo(
2461 return storageutil.resolvestripinfo(
2439 minlink,
2462 minlink,
2440 len(self) - 1,
2463 len(self) - 1,
2441 self.headrevs(),
2464 self.headrevs(),
2442 self.linkrev,
2465 self.linkrev,
2443 self.parentrevs,
2466 self.parentrevs,
2444 )
2467 )
2445
2468
2446 def strip(self, minlink, transaction):
2469 def strip(self, minlink, transaction):
2447 """truncate the revlog on the first revision with a linkrev >= minlink
2470 """truncate the revlog on the first revision with a linkrev >= minlink
2448
2471
2449 This function is called when we're stripping revision minlink and
2472 This function is called when we're stripping revision minlink and
2450 its descendants from the repository.
2473 its descendants from the repository.
2451
2474
2452 We have to remove all revisions with linkrev >= minlink, because
2475 We have to remove all revisions with linkrev >= minlink, because
2453 the equivalent changelog revisions will be renumbered after the
2476 the equivalent changelog revisions will be renumbered after the
2454 strip.
2477 strip.
2455
2478
2456 So we truncate the revlog on the first of these revisions, and
2479 So we truncate the revlog on the first of these revisions, and
2457 trust that the caller has saved the revisions that shouldn't be
2480 trust that the caller has saved the revisions that shouldn't be
2458 removed and that it'll re-add them after this truncation.
2481 removed and that it'll re-add them after this truncation.
2459 """
2482 """
2460 if len(self) == 0:
2483 if len(self) == 0:
2461 return
2484 return
2462
2485
2463 rev, _ = self.getstrippoint(minlink)
2486 rev, _ = self.getstrippoint(minlink)
2464 if rev == len(self):
2487 if rev == len(self):
2465 return
2488 return
2466
2489
2467 # first truncate the files on disk
2490 # first truncate the files on disk
2468 end = self.start(rev)
2491 end = self.start(rev)
2469 if not self._inline:
2492 if not self._inline:
2470 transaction.add(self.datafile, end)
2493 transaction.add(self.datafile, end)
2471 end = rev * self._io.size
2494 end = rev * self._io.size
2472 else:
2495 else:
2473 end += rev * self._io.size
2496 end += rev * self._io.size
2474
2497
2475 transaction.add(self.indexfile, end)
2498 transaction.add(self.indexfile, end)
2476
2499
2477 # then reset internal state in memory to forget those revisions
2500 # then reset internal state in memory to forget those revisions
2478 self._revisioncache = None
2501 self._revisioncache = None
2479 self._chaininfocache = {}
2502 self._chaininfocache = {}
2480 self._chunkclear()
2503 self._chunkclear()
2481
2504
2482 del self.index[rev:-1]
2505 del self.index[rev:-1]
2483
2506
2484 def checksize(self):
2507 def checksize(self):
2485 """Check size of index and data files
2508 """Check size of index and data files
2486
2509
2487 return a (dd, di) tuple.
2510 return a (dd, di) tuple.
2488 - dd: extra bytes for the "data" file
2511 - dd: extra bytes for the "data" file
2489 - di: extra bytes for the "index" file
2512 - di: extra bytes for the "index" file
2490
2513
2491 A healthy revlog will return (0, 0).
2514 A healthy revlog will return (0, 0).
2492 """
2515 """
2493 expected = 0
2516 expected = 0
2494 if len(self):
2517 if len(self):
2495 expected = max(0, self.end(len(self) - 1))
2518 expected = max(0, self.end(len(self) - 1))
2496
2519
2497 try:
2520 try:
2498 with self._datafp() as f:
2521 with self._datafp() as f:
2499 f.seek(0, io.SEEK_END)
2522 f.seek(0, io.SEEK_END)
2500 actual = f.tell()
2523 actual = f.tell()
2501 dd = actual - expected
2524 dd = actual - expected
2502 except IOError as inst:
2525 except IOError as inst:
2503 if inst.errno != errno.ENOENT:
2526 if inst.errno != errno.ENOENT:
2504 raise
2527 raise
2505 dd = 0
2528 dd = 0
2506
2529
2507 try:
2530 try:
2508 f = self.opener(self.indexfile)
2531 f = self.opener(self.indexfile)
2509 f.seek(0, io.SEEK_END)
2532 f.seek(0, io.SEEK_END)
2510 actual = f.tell()
2533 actual = f.tell()
2511 f.close()
2534 f.close()
2512 s = self._io.size
2535 s = self._io.size
2513 i = max(0, actual // s)
2536 i = max(0, actual // s)
2514 di = actual - (i * s)
2537 di = actual - (i * s)
2515 if self._inline:
2538 if self._inline:
2516 databytes = 0
2539 databytes = 0
2517 for r in self:
2540 for r in self:
2518 databytes += max(0, self.length(r))
2541 databytes += max(0, self.length(r))
2519 dd = 0
2542 dd = 0
2520 di = actual - len(self) * s - databytes
2543 di = actual - len(self) * s - databytes
2521 except IOError as inst:
2544 except IOError as inst:
2522 if inst.errno != errno.ENOENT:
2545 if inst.errno != errno.ENOENT:
2523 raise
2546 raise
2524 di = 0
2547 di = 0
2525
2548
2526 return (dd, di)
2549 return (dd, di)
2527
2550
2528 def files(self):
2551 def files(self):
2529 res = [self.indexfile]
2552 res = [self.indexfile]
2530 if not self._inline:
2553 if not self._inline:
2531 res.append(self.datafile)
2554 res.append(self.datafile)
2532 return res
2555 return res
2533
2556
2534 def emitrevisions(
2557 def emitrevisions(
2535 self,
2558 self,
2536 nodes,
2559 nodes,
2537 nodesorder=None,
2560 nodesorder=None,
2538 revisiondata=False,
2561 revisiondata=False,
2539 assumehaveparentrevisions=False,
2562 assumehaveparentrevisions=False,
2540 deltamode=repository.CG_DELTAMODE_STD,
2563 deltamode=repository.CG_DELTAMODE_STD,
2541 ):
2564 ):
2542 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2565 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2543 raise error.ProgrammingError(
2566 raise error.ProgrammingError(
2544 b'unhandled value for nodesorder: %s' % nodesorder
2567 b'unhandled value for nodesorder: %s' % nodesorder
2545 )
2568 )
2546
2569
2547 if nodesorder is None and not self._generaldelta:
2570 if nodesorder is None and not self._generaldelta:
2548 nodesorder = b'storage'
2571 nodesorder = b'storage'
2549
2572
2550 if (
2573 if (
2551 not self._storedeltachains
2574 not self._storedeltachains
2552 and deltamode != repository.CG_DELTAMODE_PREV
2575 and deltamode != repository.CG_DELTAMODE_PREV
2553 ):
2576 ):
2554 deltamode = repository.CG_DELTAMODE_FULL
2577 deltamode = repository.CG_DELTAMODE_FULL
2555
2578
2556 return storageutil.emitrevisions(
2579 return storageutil.emitrevisions(
2557 self,
2580 self,
2558 nodes,
2581 nodes,
2559 nodesorder,
2582 nodesorder,
2560 revlogrevisiondelta,
2583 revlogrevisiondelta,
2561 deltaparentfn=self.deltaparent,
2584 deltaparentfn=self.deltaparent,
2562 candeltafn=self.candelta,
2585 candeltafn=self.candelta,
2563 rawsizefn=self.rawsize,
2586 rawsizefn=self.rawsize,
2564 revdifffn=self.revdiff,
2587 revdifffn=self.revdiff,
2565 flagsfn=self.flags,
2588 flagsfn=self.flags,
2566 deltamode=deltamode,
2589 deltamode=deltamode,
2567 revisiondata=revisiondata,
2590 revisiondata=revisiondata,
2568 assumehaveparentrevisions=assumehaveparentrevisions,
2591 assumehaveparentrevisions=assumehaveparentrevisions,
2569 )
2592 )
2570
2593
2571 DELTAREUSEALWAYS = b'always'
2594 DELTAREUSEALWAYS = b'always'
2572 DELTAREUSESAMEREVS = b'samerevs'
2595 DELTAREUSESAMEREVS = b'samerevs'
2573 DELTAREUSENEVER = b'never'
2596 DELTAREUSENEVER = b'never'
2574
2597
2575 DELTAREUSEFULLADD = b'fulladd'
2598 DELTAREUSEFULLADD = b'fulladd'
2576
2599
2577 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2600 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2578
2601
2579 def clone(
2602 def clone(
2580 self,
2603 self,
2581 tr,
2604 tr,
2582 destrevlog,
2605 destrevlog,
2583 addrevisioncb=None,
2606 addrevisioncb=None,
2584 deltareuse=DELTAREUSESAMEREVS,
2607 deltareuse=DELTAREUSESAMEREVS,
2585 forcedeltabothparents=None,
2608 forcedeltabothparents=None,
2586 sidedatacompanion=None,
2609 sidedatacompanion=None,
2587 ):
2610 ):
2588 """Copy this revlog to another, possibly with format changes.
2611 """Copy this revlog to another, possibly with format changes.
2589
2612
2590 The destination revlog will contain the same revisions and nodes.
2613 The destination revlog will contain the same revisions and nodes.
2591 However, it may not be bit-for-bit identical due to e.g. delta encoding
2614 However, it may not be bit-for-bit identical due to e.g. delta encoding
2592 differences.
2615 differences.
2593
2616
2594 The ``deltareuse`` argument control how deltas from the existing revlog
2617 The ``deltareuse`` argument control how deltas from the existing revlog
2595 are preserved in the destination revlog. The argument can have the
2618 are preserved in the destination revlog. The argument can have the
2596 following values:
2619 following values:
2597
2620
2598 DELTAREUSEALWAYS
2621 DELTAREUSEALWAYS
2599 Deltas will always be reused (if possible), even if the destination
2622 Deltas will always be reused (if possible), even if the destination
2600 revlog would not select the same revisions for the delta. This is the
2623 revlog would not select the same revisions for the delta. This is the
2601 fastest mode of operation.
2624 fastest mode of operation.
2602 DELTAREUSESAMEREVS
2625 DELTAREUSESAMEREVS
2603 Deltas will be reused if the destination revlog would pick the same
2626 Deltas will be reused if the destination revlog would pick the same
2604 revisions for the delta. This mode strikes a balance between speed
2627 revisions for the delta. This mode strikes a balance between speed
2605 and optimization.
2628 and optimization.
2606 DELTAREUSENEVER
2629 DELTAREUSENEVER
2607 Deltas will never be reused. This is the slowest mode of execution.
2630 Deltas will never be reused. This is the slowest mode of execution.
2608 This mode can be used to recompute deltas (e.g. if the diff/delta
2631 This mode can be used to recompute deltas (e.g. if the diff/delta
2609 algorithm changes).
2632 algorithm changes).
2610 DELTAREUSEFULLADD
2633 DELTAREUSEFULLADD
2611 Revision will be re-added as if their were new content. This is
2634 Revision will be re-added as if their were new content. This is
2612 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2635 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2613 eg: large file detection and handling.
2636 eg: large file detection and handling.
2614
2637
2615 Delta computation can be slow, so the choice of delta reuse policy can
2638 Delta computation can be slow, so the choice of delta reuse policy can
2616 significantly affect run time.
2639 significantly affect run time.
2617
2640
2618 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2641 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2619 two extremes. Deltas will be reused if they are appropriate. But if the
2642 two extremes. Deltas will be reused if they are appropriate. But if the
2620 delta could choose a better revision, it will do so. This means if you
2643 delta could choose a better revision, it will do so. This means if you
2621 are converting a non-generaldelta revlog to a generaldelta revlog,
2644 are converting a non-generaldelta revlog to a generaldelta revlog,
2622 deltas will be recomputed if the delta's parent isn't a parent of the
2645 deltas will be recomputed if the delta's parent isn't a parent of the
2623 revision.
2646 revision.
2624
2647
2625 In addition to the delta policy, the ``forcedeltabothparents``
2648 In addition to the delta policy, the ``forcedeltabothparents``
2626 argument controls whether to force compute deltas against both parents
2649 argument controls whether to force compute deltas against both parents
2627 for merges. By default, the current default is used.
2650 for merges. By default, the current default is used.
2628
2651
2629 If not None, the `sidedatacompanion` is callable that accept two
2652 If not None, the `sidedatacompanion` is callable that accept two
2630 arguments:
2653 arguments:
2631
2654
2632 (srcrevlog, rev)
2655 (srcrevlog, rev)
2633
2656
2634 and return a triplet that control changes to sidedata content from the
2657 and return a triplet that control changes to sidedata content from the
2635 old revision to the new clone result:
2658 old revision to the new clone result:
2636
2659
2637 (dropall, filterout, update)
2660 (dropall, filterout, update)
2638
2661
2639 * if `dropall` is True, all sidedata should be dropped
2662 * if `dropall` is True, all sidedata should be dropped
2640 * `filterout` is a set of sidedata keys that should be dropped
2663 * `filterout` is a set of sidedata keys that should be dropped
2641 * `update` is a mapping of additionnal/new key -> value
2664 * `update` is a mapping of additionnal/new key -> value
2642 """
2665 """
2643 if deltareuse not in self.DELTAREUSEALL:
2666 if deltareuse not in self.DELTAREUSEALL:
2644 raise ValueError(
2667 raise ValueError(
2645 _(b'value for deltareuse invalid: %s') % deltareuse
2668 _(b'value for deltareuse invalid: %s') % deltareuse
2646 )
2669 )
2647
2670
2648 if len(destrevlog):
2671 if len(destrevlog):
2649 raise ValueError(_(b'destination revlog is not empty'))
2672 raise ValueError(_(b'destination revlog is not empty'))
2650
2673
2651 if getattr(self, 'filteredrevs', None):
2674 if getattr(self, 'filteredrevs', None):
2652 raise ValueError(_(b'source revlog has filtered revisions'))
2675 raise ValueError(_(b'source revlog has filtered revisions'))
2653 if getattr(destrevlog, 'filteredrevs', None):
2676 if getattr(destrevlog, 'filteredrevs', None):
2654 raise ValueError(_(b'destination revlog has filtered revisions'))
2677 raise ValueError(_(b'destination revlog has filtered revisions'))
2655
2678
2656 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2679 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2657 # if possible.
2680 # if possible.
2658 oldlazydelta = destrevlog._lazydelta
2681 oldlazydelta = destrevlog._lazydelta
2659 oldlazydeltabase = destrevlog._lazydeltabase
2682 oldlazydeltabase = destrevlog._lazydeltabase
2660 oldamd = destrevlog._deltabothparents
2683 oldamd = destrevlog._deltabothparents
2661
2684
2662 try:
2685 try:
2663 if deltareuse == self.DELTAREUSEALWAYS:
2686 if deltareuse == self.DELTAREUSEALWAYS:
2664 destrevlog._lazydeltabase = True
2687 destrevlog._lazydeltabase = True
2665 destrevlog._lazydelta = True
2688 destrevlog._lazydelta = True
2666 elif deltareuse == self.DELTAREUSESAMEREVS:
2689 elif deltareuse == self.DELTAREUSESAMEREVS:
2667 destrevlog._lazydeltabase = False
2690 destrevlog._lazydeltabase = False
2668 destrevlog._lazydelta = True
2691 destrevlog._lazydelta = True
2669 elif deltareuse == self.DELTAREUSENEVER:
2692 elif deltareuse == self.DELTAREUSENEVER:
2670 destrevlog._lazydeltabase = False
2693 destrevlog._lazydeltabase = False
2671 destrevlog._lazydelta = False
2694 destrevlog._lazydelta = False
2672
2695
2673 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2696 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2674
2697
2675 self._clone(
2698 self._clone(
2676 tr,
2699 tr,
2677 destrevlog,
2700 destrevlog,
2678 addrevisioncb,
2701 addrevisioncb,
2679 deltareuse,
2702 deltareuse,
2680 forcedeltabothparents,
2703 forcedeltabothparents,
2681 sidedatacompanion,
2704 sidedatacompanion,
2682 )
2705 )
2683
2706
2684 finally:
2707 finally:
2685 destrevlog._lazydelta = oldlazydelta
2708 destrevlog._lazydelta = oldlazydelta
2686 destrevlog._lazydeltabase = oldlazydeltabase
2709 destrevlog._lazydeltabase = oldlazydeltabase
2687 destrevlog._deltabothparents = oldamd
2710 destrevlog._deltabothparents = oldamd
2688
2711
2689 def _clone(
2712 def _clone(
2690 self,
2713 self,
2691 tr,
2714 tr,
2692 destrevlog,
2715 destrevlog,
2693 addrevisioncb,
2716 addrevisioncb,
2694 deltareuse,
2717 deltareuse,
2695 forcedeltabothparents,
2718 forcedeltabothparents,
2696 sidedatacompanion,
2719 sidedatacompanion,
2697 ):
2720 ):
2698 """perform the core duty of `revlog.clone` after parameter processing"""
2721 """perform the core duty of `revlog.clone` after parameter processing"""
2699 deltacomputer = deltautil.deltacomputer(destrevlog)
2722 deltacomputer = deltautil.deltacomputer(destrevlog)
2700 index = self.index
2723 index = self.index
2701 for rev in self:
2724 for rev in self:
2702 entry = index[rev]
2725 entry = index[rev]
2703
2726
2704 # Some classes override linkrev to take filtered revs into
2727 # Some classes override linkrev to take filtered revs into
2705 # account. Use raw entry from index.
2728 # account. Use raw entry from index.
2706 flags = entry[0] & 0xFFFF
2729 flags = entry[0] & 0xFFFF
2707 linkrev = entry[4]
2730 linkrev = entry[4]
2708 p1 = index[entry[5]][7]
2731 p1 = index[entry[5]][7]
2709 p2 = index[entry[6]][7]
2732 p2 = index[entry[6]][7]
2710 node = entry[7]
2733 node = entry[7]
2711
2734
2712 sidedataactions = (False, [], {})
2735 sidedataactions = (False, [], {})
2713 if sidedatacompanion is not None:
2736 if sidedatacompanion is not None:
2714 sidedataactions = sidedatacompanion(self, rev)
2737 sidedataactions = sidedatacompanion(self, rev)
2715
2738
2716 # (Possibly) reuse the delta from the revlog if allowed and
2739 # (Possibly) reuse the delta from the revlog if allowed and
2717 # the revlog chunk is a delta.
2740 # the revlog chunk is a delta.
2718 cachedelta = None
2741 cachedelta = None
2719 rawtext = None
2742 rawtext = None
2720 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2743 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2721 dropall, filterout, update = sidedataactions
2744 dropall, filterout, update = sidedataactions
2722 text, sidedata = self._revisiondata(rev)
2745 text, sidedata = self._revisiondata(rev)
2723 if dropall:
2746 if dropall:
2724 sidedata = {}
2747 sidedata = {}
2725 for key in filterout:
2748 for key in filterout:
2726 sidedata.pop(key, None)
2749 sidedata.pop(key, None)
2727 sidedata.update(update)
2750 sidedata.update(update)
2728 if not sidedata:
2751 if not sidedata:
2729 sidedata = None
2752 sidedata = None
2730 destrevlog.addrevision(
2753 destrevlog.addrevision(
2731 text,
2754 text,
2732 tr,
2755 tr,
2733 linkrev,
2756 linkrev,
2734 p1,
2757 p1,
2735 p2,
2758 p2,
2736 cachedelta=cachedelta,
2759 cachedelta=cachedelta,
2737 node=node,
2760 node=node,
2738 flags=flags,
2761 flags=flags,
2739 deltacomputer=deltacomputer,
2762 deltacomputer=deltacomputer,
2740 sidedata=sidedata,
2763 sidedata=sidedata,
2741 )
2764 )
2742 else:
2765 else:
2743 if destrevlog._lazydelta:
2766 if destrevlog._lazydelta:
2744 dp = self.deltaparent(rev)
2767 dp = self.deltaparent(rev)
2745 if dp != nullrev:
2768 if dp != nullrev:
2746 cachedelta = (dp, bytes(self._chunk(rev)))
2769 cachedelta = (dp, bytes(self._chunk(rev)))
2747
2770
2748 if not cachedelta:
2771 if not cachedelta:
2749 rawtext = self.rawdata(rev)
2772 rawtext = self.rawdata(rev)
2750
2773
2751 ifh = destrevlog.opener(
2774 ifh = destrevlog.opener(
2752 destrevlog.indexfile, b'a+', checkambig=False
2775 destrevlog.indexfile, b'a+', checkambig=False
2753 )
2776 )
2754 dfh = None
2777 dfh = None
2755 if not destrevlog._inline:
2778 if not destrevlog._inline:
2756 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2779 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2757 try:
2780 try:
2758 destrevlog._addrevision(
2781 destrevlog._addrevision(
2759 node,
2782 node,
2760 rawtext,
2783 rawtext,
2761 tr,
2784 tr,
2762 linkrev,
2785 linkrev,
2763 p1,
2786 p1,
2764 p2,
2787 p2,
2765 flags,
2788 flags,
2766 cachedelta,
2789 cachedelta,
2767 ifh,
2790 ifh,
2768 dfh,
2791 dfh,
2769 deltacomputer=deltacomputer,
2792 deltacomputer=deltacomputer,
2770 )
2793 )
2771 finally:
2794 finally:
2772 if dfh:
2795 if dfh:
2773 dfh.close()
2796 dfh.close()
2774 ifh.close()
2797 ifh.close()
2775
2798
2776 if addrevisioncb:
2799 if addrevisioncb:
2777 addrevisioncb(self, rev, node)
2800 addrevisioncb(self, rev, node)
2778
2801
2779 def censorrevision(self, tr, censornode, tombstone=b''):
2802 def censorrevision(self, tr, censornode, tombstone=b''):
2780 if (self.version & 0xFFFF) == REVLOGV0:
2803 if (self.version & 0xFFFF) == REVLOGV0:
2781 raise error.RevlogError(
2804 raise error.RevlogError(
2782 _(b'cannot censor with version %d revlogs') % self.version
2805 _(b'cannot censor with version %d revlogs') % self.version
2783 )
2806 )
2784
2807
2785 censorrev = self.rev(censornode)
2808 censorrev = self.rev(censornode)
2786 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2809 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2787
2810
2788 if len(tombstone) > self.rawsize(censorrev):
2811 if len(tombstone) > self.rawsize(censorrev):
2789 raise error.Abort(
2812 raise error.Abort(
2790 _(b'censor tombstone must be no longer than censored data')
2813 _(b'censor tombstone must be no longer than censored data')
2791 )
2814 )
2792
2815
2793 # Rewriting the revlog in place is hard. Our strategy for censoring is
2816 # Rewriting the revlog in place is hard. Our strategy for censoring is
2794 # to create a new revlog, copy all revisions to it, then replace the
2817 # to create a new revlog, copy all revisions to it, then replace the
2795 # revlogs on transaction close.
2818 # revlogs on transaction close.
2796
2819
2797 newindexfile = self.indexfile + b'.tmpcensored'
2820 newindexfile = self.indexfile + b'.tmpcensored'
2798 newdatafile = self.datafile + b'.tmpcensored'
2821 newdatafile = self.datafile + b'.tmpcensored'
2799
2822
2800 # This is a bit dangerous. We could easily have a mismatch of state.
2823 # This is a bit dangerous. We could easily have a mismatch of state.
2801 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2824 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2802 newrl.version = self.version
2825 newrl.version = self.version
2803 newrl._generaldelta = self._generaldelta
2826 newrl._generaldelta = self._generaldelta
2804 newrl._io = self._io
2827 newrl._io = self._io
2805
2828
2806 for rev in self.revs():
2829 for rev in self.revs():
2807 node = self.node(rev)
2830 node = self.node(rev)
2808 p1, p2 = self.parents(node)
2831 p1, p2 = self.parents(node)
2809
2832
2810 if rev == censorrev:
2833 if rev == censorrev:
2811 newrl.addrawrevision(
2834 newrl.addrawrevision(
2812 tombstone,
2835 tombstone,
2813 tr,
2836 tr,
2814 self.linkrev(censorrev),
2837 self.linkrev(censorrev),
2815 p1,
2838 p1,
2816 p2,
2839 p2,
2817 censornode,
2840 censornode,
2818 REVIDX_ISCENSORED,
2841 REVIDX_ISCENSORED,
2819 )
2842 )
2820
2843
2821 if newrl.deltaparent(rev) != nullrev:
2844 if newrl.deltaparent(rev) != nullrev:
2822 raise error.Abort(
2845 raise error.Abort(
2823 _(
2846 _(
2824 b'censored revision stored as delta; '
2847 b'censored revision stored as delta; '
2825 b'cannot censor'
2848 b'cannot censor'
2826 ),
2849 ),
2827 hint=_(
2850 hint=_(
2828 b'censoring of revlogs is not '
2851 b'censoring of revlogs is not '
2829 b'fully implemented; please report '
2852 b'fully implemented; please report '
2830 b'this bug'
2853 b'this bug'
2831 ),
2854 ),
2832 )
2855 )
2833 continue
2856 continue
2834
2857
2835 if self.iscensored(rev):
2858 if self.iscensored(rev):
2836 if self.deltaparent(rev) != nullrev:
2859 if self.deltaparent(rev) != nullrev:
2837 raise error.Abort(
2860 raise error.Abort(
2838 _(
2861 _(
2839 b'cannot censor due to censored '
2862 b'cannot censor due to censored '
2840 b'revision having delta stored'
2863 b'revision having delta stored'
2841 )
2864 )
2842 )
2865 )
2843 rawtext = self._chunk(rev)
2866 rawtext = self._chunk(rev)
2844 else:
2867 else:
2845 rawtext = self.rawdata(rev)
2868 rawtext = self.rawdata(rev)
2846
2869
2847 newrl.addrawrevision(
2870 newrl.addrawrevision(
2848 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2871 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2849 )
2872 )
2850
2873
2851 tr.addbackup(self.indexfile, location=b'store')
2874 tr.addbackup(self.indexfile, location=b'store')
2852 if not self._inline:
2875 if not self._inline:
2853 tr.addbackup(self.datafile, location=b'store')
2876 tr.addbackup(self.datafile, location=b'store')
2854
2877
2855 self.opener.rename(newrl.indexfile, self.indexfile)
2878 self.opener.rename(newrl.indexfile, self.indexfile)
2856 if not self._inline:
2879 if not self._inline:
2857 self.opener.rename(newrl.datafile, self.datafile)
2880 self.opener.rename(newrl.datafile, self.datafile)
2858
2881
2859 self.clearcaches()
2882 self.clearcaches()
2860 self._loadindex()
2883 self._loadindex()
2861
2884
2862 def verifyintegrity(self, state):
2885 def verifyintegrity(self, state):
2863 """Verifies the integrity of the revlog.
2886 """Verifies the integrity of the revlog.
2864
2887
2865 Yields ``revlogproblem`` instances describing problems that are
2888 Yields ``revlogproblem`` instances describing problems that are
2866 found.
2889 found.
2867 """
2890 """
2868 dd, di = self.checksize()
2891 dd, di = self.checksize()
2869 if dd:
2892 if dd:
2870 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2893 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2871 if di:
2894 if di:
2872 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2895 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2873
2896
2874 version = self.version & 0xFFFF
2897 version = self.version & 0xFFFF
2875
2898
2876 # The verifier tells us what version revlog we should be.
2899 # The verifier tells us what version revlog we should be.
2877 if version != state[b'expectedversion']:
2900 if version != state[b'expectedversion']:
2878 yield revlogproblem(
2901 yield revlogproblem(
2879 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2902 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2880 % (self.indexfile, version, state[b'expectedversion'])
2903 % (self.indexfile, version, state[b'expectedversion'])
2881 )
2904 )
2882
2905
2883 state[b'skipread'] = set()
2906 state[b'skipread'] = set()
2884 state[b'safe_renamed'] = set()
2907 state[b'safe_renamed'] = set()
2885
2908
2886 for rev in self:
2909 for rev in self:
2887 node = self.node(rev)
2910 node = self.node(rev)
2888
2911
2889 # Verify contents. 4 cases to care about:
2912 # Verify contents. 4 cases to care about:
2890 #
2913 #
2891 # common: the most common case
2914 # common: the most common case
2892 # rename: with a rename
2915 # rename: with a rename
2893 # meta: file content starts with b'\1\n', the metadata
2916 # meta: file content starts with b'\1\n', the metadata
2894 # header defined in filelog.py, but without a rename
2917 # header defined in filelog.py, but without a rename
2895 # ext: content stored externally
2918 # ext: content stored externally
2896 #
2919 #
2897 # More formally, their differences are shown below:
2920 # More formally, their differences are shown below:
2898 #
2921 #
2899 # | common | rename | meta | ext
2922 # | common | rename | meta | ext
2900 # -------------------------------------------------------
2923 # -------------------------------------------------------
2901 # flags() | 0 | 0 | 0 | not 0
2924 # flags() | 0 | 0 | 0 | not 0
2902 # renamed() | False | True | False | ?
2925 # renamed() | False | True | False | ?
2903 # rawtext[0:2]=='\1\n'| False | True | True | ?
2926 # rawtext[0:2]=='\1\n'| False | True | True | ?
2904 #
2927 #
2905 # "rawtext" means the raw text stored in revlog data, which
2928 # "rawtext" means the raw text stored in revlog data, which
2906 # could be retrieved by "rawdata(rev)". "text"
2929 # could be retrieved by "rawdata(rev)". "text"
2907 # mentioned below is "revision(rev)".
2930 # mentioned below is "revision(rev)".
2908 #
2931 #
2909 # There are 3 different lengths stored physically:
2932 # There are 3 different lengths stored physically:
2910 # 1. L1: rawsize, stored in revlog index
2933 # 1. L1: rawsize, stored in revlog index
2911 # 2. L2: len(rawtext), stored in revlog data
2934 # 2. L2: len(rawtext), stored in revlog data
2912 # 3. L3: len(text), stored in revlog data if flags==0, or
2935 # 3. L3: len(text), stored in revlog data if flags==0, or
2913 # possibly somewhere else if flags!=0
2936 # possibly somewhere else if flags!=0
2914 #
2937 #
2915 # L1 should be equal to L2. L3 could be different from them.
2938 # L1 should be equal to L2. L3 could be different from them.
2916 # "text" may or may not affect commit hash depending on flag
2939 # "text" may or may not affect commit hash depending on flag
2917 # processors (see flagutil.addflagprocessor).
2940 # processors (see flagutil.addflagprocessor).
2918 #
2941 #
2919 # | common | rename | meta | ext
2942 # | common | rename | meta | ext
2920 # -------------------------------------------------
2943 # -------------------------------------------------
2921 # rawsize() | L1 | L1 | L1 | L1
2944 # rawsize() | L1 | L1 | L1 | L1
2922 # size() | L1 | L2-LM | L1(*) | L1 (?)
2945 # size() | L1 | L2-LM | L1(*) | L1 (?)
2923 # len(rawtext) | L2 | L2 | L2 | L2
2946 # len(rawtext) | L2 | L2 | L2 | L2
2924 # len(text) | L2 | L2 | L2 | L3
2947 # len(text) | L2 | L2 | L2 | L3
2925 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2948 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2926 #
2949 #
2927 # LM: length of metadata, depending on rawtext
2950 # LM: length of metadata, depending on rawtext
2928 # (*): not ideal, see comment in filelog.size
2951 # (*): not ideal, see comment in filelog.size
2929 # (?): could be "- len(meta)" if the resolved content has
2952 # (?): could be "- len(meta)" if the resolved content has
2930 # rename metadata
2953 # rename metadata
2931 #
2954 #
2932 # Checks needed to be done:
2955 # Checks needed to be done:
2933 # 1. length check: L1 == L2, in all cases.
2956 # 1. length check: L1 == L2, in all cases.
2934 # 2. hash check: depending on flag processor, we may need to
2957 # 2. hash check: depending on flag processor, we may need to
2935 # use either "text" (external), or "rawtext" (in revlog).
2958 # use either "text" (external), or "rawtext" (in revlog).
2936
2959
2937 try:
2960 try:
2938 skipflags = state.get(b'skipflags', 0)
2961 skipflags = state.get(b'skipflags', 0)
2939 if skipflags:
2962 if skipflags:
2940 skipflags &= self.flags(rev)
2963 skipflags &= self.flags(rev)
2941
2964
2942 _verify_revision(self, skipflags, state, node)
2965 _verify_revision(self, skipflags, state, node)
2943
2966
2944 l1 = self.rawsize(rev)
2967 l1 = self.rawsize(rev)
2945 l2 = len(self.rawdata(node))
2968 l2 = len(self.rawdata(node))
2946
2969
2947 if l1 != l2:
2970 if l1 != l2:
2948 yield revlogproblem(
2971 yield revlogproblem(
2949 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2972 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2950 node=node,
2973 node=node,
2951 )
2974 )
2952
2975
2953 except error.CensoredNodeError:
2976 except error.CensoredNodeError:
2954 if state[b'erroroncensored']:
2977 if state[b'erroroncensored']:
2955 yield revlogproblem(
2978 yield revlogproblem(
2956 error=_(b'censored file data'), node=node
2979 error=_(b'censored file data'), node=node
2957 )
2980 )
2958 state[b'skipread'].add(node)
2981 state[b'skipread'].add(node)
2959 except Exception as e:
2982 except Exception as e:
2960 yield revlogproblem(
2983 yield revlogproblem(
2961 error=_(b'unpacking %s: %s')
2984 error=_(b'unpacking %s: %s')
2962 % (short(node), stringutil.forcebytestr(e)),
2985 % (short(node), stringutil.forcebytestr(e)),
2963 node=node,
2986 node=node,
2964 )
2987 )
2965 state[b'skipread'].add(node)
2988 state[b'skipread'].add(node)
2966
2989
2967 def storageinfo(
2990 def storageinfo(
2968 self,
2991 self,
2969 exclusivefiles=False,
2992 exclusivefiles=False,
2970 sharedfiles=False,
2993 sharedfiles=False,
2971 revisionscount=False,
2994 revisionscount=False,
2972 trackedsize=False,
2995 trackedsize=False,
2973 storedsize=False,
2996 storedsize=False,
2974 ):
2997 ):
2975 d = {}
2998 d = {}
2976
2999
2977 if exclusivefiles:
3000 if exclusivefiles:
2978 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3001 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
2979 if not self._inline:
3002 if not self._inline:
2980 d[b'exclusivefiles'].append((self.opener, self.datafile))
3003 d[b'exclusivefiles'].append((self.opener, self.datafile))
2981
3004
2982 if sharedfiles:
3005 if sharedfiles:
2983 d[b'sharedfiles'] = []
3006 d[b'sharedfiles'] = []
2984
3007
2985 if revisionscount:
3008 if revisionscount:
2986 d[b'revisionscount'] = len(self)
3009 d[b'revisionscount'] = len(self)
2987
3010
2988 if trackedsize:
3011 if trackedsize:
2989 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3012 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
2990
3013
2991 if storedsize:
3014 if storedsize:
2992 d[b'storedsize'] = sum(
3015 d[b'storedsize'] = sum(
2993 self.opener.stat(path).st_size for path in self.files()
3016 self.opener.stat(path).st_size for path in self.files()
2994 )
3017 )
2995
3018
2996 return d
3019 return d
@@ -1,48 +1,50 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5
5
6 $ hg init test-repo
6 $ hg init test-repo
7 $ cd test-repo
7 $ cd test-repo
8 $ cat << EOF >> .hg/hgrc
8 $ cat << EOF >> .hg/hgrc
9 > [experimental]
9 > [experimental]
10 > exp-persistent-nodemap=yes
10 > exp-persistent-nodemap=yes
11 > [devel]
12 > persistent-nodemap=yes
11 > EOF
13 > EOF
12 $ hg debugbuilddag .+5000
14 $ hg debugbuilddag .+5000
13 $ f --size .hg/store/00changelog.n
15 $ f --size .hg/store/00changelog.n
14 .hg/store/00changelog.n: size=18
16 .hg/store/00changelog.n: size=18
15 $ f --sha256 .hg/store/00changelog-*.nd
17 $ f --sha256 .hg/store/00changelog-*.nd
16 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
18 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
17 $ hg debugnodemap --dump-new | f --sha256 --size
19 $ hg debugnodemap --dump-new | f --sha256 --size
18 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
20 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
19 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
21 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
20 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
22 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
21 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
23 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
22 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
24 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
23 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
25 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
24 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
26 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
25 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
27 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
26 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
28 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
27 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
29 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
28 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
30 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
29 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
31 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
30 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
32 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
31 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
33 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
32 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
34 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
33 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
35 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
34 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
36 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
35 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
37 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
36 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
38 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
37
39
38 add a new commit
40 add a new commit
39
41
40 $ hg up
42 $ hg up
41 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 $ echo foo > foo
44 $ echo foo > foo
43 $ hg add foo
45 $ hg add foo
44 $ hg ci -m 'foo'
46 $ hg ci -m 'foo'
45 $ f --size .hg/store/00changelog.n
47 $ f --size .hg/store/00changelog.n
46 .hg/store/00changelog.n: size=18
48 .hg/store/00changelog.n: size=18
47 $ f --sha256 .hg/store/00changelog-*.nd --size
49 $ f --sha256 .hg/store/00changelog-*.nd --size
48 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
50 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
General Comments 0
You need to be logged in to leave comments. Login now